diff --git a/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala index 3e2b2ac2a8..67d7207d6a 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala @@ -12,6 +12,8 @@ import scala.concurrent.Await import scala.concurrent.duration._ import scala.util.Failure +import language.postfixOps + class AskSpec extends AkkaSpec with ScalaFutures { "The “ask” pattern" must { diff --git a/akka-actor/src/main/java/akka/japi/pf/DeciderBuilder.java b/akka-actor/src/main/java/akka/japi/pf/DeciderBuilder.java index 4c79703157..855a8c3c68 100644 --- a/akka-actor/src/main/java/akka/japi/pf/DeciderBuilder.java +++ b/akka-actor/src/main/java/akka/japi/pf/DeciderBuilder.java @@ -10,18 +10,19 @@ import static akka.actor.SupervisorStrategy.Directive; * Used for building a partial function for {@link akka.actor.Actor#supervisorStrategy() Actor.supervisorStrategy()}. * * * Inside an actor you can use it like this with Java 8 to define your supervisorStrategy. - *

+ *

* Example: + *

*
- * @Override
+ * @Override
  * private static SupervisorStrategy strategy =
  *   new OneForOneStrategy(10, Duration.create("1 minute"), DeciderBuilder.
- *     match(ArithmeticException.class, e -> resume()).
- *     match(NullPointerException.class, e -> restart()).
- *     match(IllegalArgumentException.class, e -> stop()).
- *     matchAny(o -> escalate()).build());
+ *     match(ArithmeticException.class, e -> resume()).
+ *     match(NullPointerException.class, e -> restart()).
+ *     match(IllegalArgumentException.class, e -> stop()).
+ *     matchAny(o -> escalate()).build());
  *
- * @Override
+ * @Override
  * public SupervisorStrategy supervisorStrategy() {
  *   return strategy;
  * }
diff --git a/akka-actor/src/main/java/akka/japi/pf/Match.java b/akka-actor/src/main/java/akka/japi/pf/Match.java
index 09c5e9ff30..2ac8b2d3c8 100644
--- a/akka-actor/src/main/java/akka/japi/pf/Match.java
+++ b/akka-actor/src/main/java/akka/japi/pf/Match.java
@@ -90,10 +90,11 @@ public class Match extends AbstractMatch {
 
   /**
    * Convenience function to make the Java code more readable.
-   * 

+ *

+ * *

    *   Matcher<X, Y> matcher = Matcher.create(...);
-   * 

+ * * Y someY = matcher.match(obj); *

* diff --git a/akka-actor/src/main/java/akka/japi/pf/ReceiveBuilder.java b/akka-actor/src/main/java/akka/japi/pf/ReceiveBuilder.java index 890b07f139..590bc3d35d 100644 --- a/akka-actor/src/main/java/akka/japi/pf/ReceiveBuilder.java +++ b/akka-actor/src/main/java/akka/japi/pf/ReceiveBuilder.java @@ -10,19 +10,20 @@ package akka.japi.pf; * There is both a match on type only, and a match on type and predicate. * * Inside an actor you can use it like this with Java 8 to define your receive method. - *

+ *

* Example: + *

*
- * @Override
+ * @Override
  * public Actor() {
  *   receive(ReceiveBuilder.
- *     match(Double.class, d -> {
+ *     match(Double.class, d -> {
  *       sender().tell(d.isNaN() ? 0 : d, self());
  *     }).
- *     match(Integer.class, i -> {
+ *     match(Integer.class, i -> {
  *       sender().tell(i * 10, self());
  *     }).
- *     match(String.class, s -> s.startsWith("foo"), s -> {
+ *     match(String.class, s -> s.startsWith("foo"), s -> {
  *       sender().tell(s.toUpperCase(), self());
  *     }).build()
  *   );
diff --git a/akka-actor/src/main/java/akka/japi/pf/UnitMatch.java b/akka-actor/src/main/java/akka/japi/pf/UnitMatch.java
index 1ab252a039..bb8ec77634 100644
--- a/akka-actor/src/main/java/akka/japi/pf/UnitMatch.java
+++ b/akka-actor/src/main/java/akka/japi/pf/UnitMatch.java
@@ -110,7 +110,7 @@ public class UnitMatch extends AbstractMatch {
    * 

*


    *   UnitMatcher<X> matcher = UnitMatcher.create(...);
-   * 

+ * * matcher.match(obj); *

* diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index 9de9247980..f2929166c1 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -5,11 +5,7 @@ package akka /** - * Akka base Exception. Each Exception gets: - *
    - *
  • a uuid for tracking purposes
  • - *
  • toString that includes exception name, message and uuid
  • - *
+ * Akka base Exception. */ @SerialVersionUID(1L) class AkkaException(message: String, cause: Throwable) extends RuntimeException(message, cause) with Serializable { diff --git a/akka-actor/src/main/scala/akka/actor/AbstractActor.scala b/akka-actor/src/main/scala/akka/actor/AbstractActor.scala index 9d95000dc4..9eeff702f7 100644 --- a/akka-actor/src/main/scala/akka/actor/AbstractActor.scala +++ b/akka-actor/src/main/scala/akka/actor/AbstractActor.scala @@ -109,7 +109,7 @@ abstract class AbstractLoggingActor extends AbstractActor with ActorLogging * } *
* Note that the subclasses of `AbstractActorWithStash` by default request a Deque based mailbox since this class - * implements the `RequiresMessageQueue` marker interface. + * implements the `RequiresMessageQueue<DequeBasedMessageQueueSemantics>` marker interface. * You can override the default mailbox provided when `DequeBasedMessageQueueSemantics` are requested via config: *
  *   akka.actor.mailbox.requirements {
diff --git a/akka-actor/src/main/scala/akka/actor/ActorPath.scala b/akka-actor/src/main/scala/akka/actor/ActorPath.scala
index 6ed9997404..f9bf5c4dcd 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorPath.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorPath.scala
@@ -27,7 +27,7 @@ object ActorPath {
 
   /**
    * Validates the given actor path element and throws an [[InvalidActorNameException]] if invalid.
-   * See [[isValidPathElement()]] for a non-throwing version.
+   * See [[#isValidPathElement]] for a non-throwing version.
    *
    * @param element actor path element to be validated
    */
@@ -35,7 +35,7 @@ object ActorPath {
 
   /**
    * Validates the given actor path element and throws an [[InvalidActorNameException]] if invalid.
-   * See [[isValidPathElement()]] for a non-throwing version.
+   * See [[#isValidPathElement]] for a non-throwing version.
    *
    * @param element actor path element to be validated
    * @param fullPath optional fullPath element that may be included for better error messages; null if not given
@@ -60,7 +60,7 @@ object ActorPath {
   /**
    * This method is used to validate a path element (Actor Name).
    * Since Actors form a tree, it is addressable using an URL, therefore an Actor Name has to conform to:
-   * [[http://www.ietf.org/rfc/rfc2396.txt RFC-2396]].
+   * RFC-2396.
    *
    * User defined Actor names may not start from a `$` sign - these are reserved for system names.
    */
@@ -97,7 +97,7 @@ object ActorPath {
  * ActorPath defines a natural ordering (so that ActorRefs can be put into
  * collections with this requirement); this ordering is intended to be as fast
  * as possible, which owing to the bottom-up recursive nature of ActorPath
- * is sorted by path elements FROM RIGHT TO LEFT, where RootActorPath >
+ * is sorted by path elements FROM RIGHT TO LEFT, where RootActorPath >
  * ChildActorPath in case the number of elements is different.
  *
  * Two actor paths are compared equal when they have the same name and parent
diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala
index d71a95ed65..719218ce1c 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala
@@ -118,7 +118,7 @@ abstract class ActorRef extends java.lang.Comparable[ActorRef] with Serializable
    * Sends the specified message to this ActorRef, i.e. fire-and-forget
    * semantics, including the sender reference if possible.
    *
-   * Pass [[akka.actor.ActorRef$.noSender]] or `null` as sender if there is nobody to reply to
+   * Pass [[akka.actor.ActorRef]] `noSender` or `null` as sender if there is nobody to reply to
    */
   final def tell(msg: Any, sender: ActorRef): Unit = this.!(msg)(sender)
 
@@ -157,7 +157,7 @@ abstract class ActorRef extends java.lang.Comparable[ActorRef] with Serializable
 /**
  * This trait represents the Scala Actor API
  * There are implicit conversions in ../actor/Implicits.scala
- * from ActorRef -> ScalaActorRef and back
+ * from ActorRef -> ScalaActorRef and back
  */
 trait ScalaActorRef { ref: ActorRef ⇒
 
diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala
index af3fdc6e22..24818061e0 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala
@@ -358,9 +358,9 @@ private[akka] object SystemGuardian {
   /**
    * For the purpose of orderly shutdown it's possible
    * to register interest in the termination of systemGuardian
-   * and receive a notification [[akka.actor.Guardian.TerminationHook]]
+   * and receive a notification `TerminationHook`
    * before systemGuardian is stopped. The registered hook is supposed
-   * to reply with [[akka.actor.Guardian.TerminationHookDone]] and the
+   * to reply with `TerminationHookDone` and the
    * systemGuardian will not stop until all registered hooks have replied.
    */
   case object RegisterTerminationHook
diff --git a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala
index d7e202bf40..67051a53c1 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala
@@ -101,7 +101,7 @@ abstract class ActorSelection extends Serializable {
 
   /**
    * String representation of the actor selection suitable for storage and recreation.
-   * The output is similar to the URI fragment returned by [[akka.actor.ActorPath.toSerializationFormat]].
+   * The output is similar to the URI fragment returned by [[akka.actor.ActorPath#toSerializationFormat]].
    * @return URI fragment
    */
   def toSerializationFormat: String = {
diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala
index 225c9b2f51..29ff241fc1 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala
@@ -291,7 +291,7 @@ abstract class ActorSystem extends ActorRefFactory {
   def logConfiguration(): Unit
 
   /**
-   * Construct a path below the application guardian to be used with [[ActorSystem.actorSelection]].
+   * Construct a path below the application guardian to be used with [[ActorSystem#actorSelection]].
    */
   def /(name: String): ActorPath
 
@@ -301,7 +301,7 @@ abstract class ActorSystem extends ActorRefFactory {
   def child(child: String): ActorPath = /(child)
 
   /**
-   * Construct a path below the application guardian to be used with [[ActorSystem.actorSelection]].
+   * Construct a path below the application guardian to be used with [[ActorSystem#actorSelection]].
    */
   def /(name: Iterable[String]): ActorPath
 
@@ -326,7 +326,7 @@ abstract class ActorSystem extends ActorRefFactory {
   def eventStream: EventStream
 
   /**
-   * Convenient logging adapter for logging to the [[ActorSystem.eventStream]].
+   * Convenient logging adapter for logging to the [[ActorSystem#eventStream]].
    */
   def log: LoggingAdapter
 
@@ -369,7 +369,7 @@ abstract class ActorSystem extends ActorRefFactory {
    * The callbacks will be run sequentially in reverse order of registration, i.e.
    * last registration is run first.
    *
-   * @throws a RejectedExecutionException if the System has already shut down or if shutdown has been initiated.
+   * Throws a RejectedExecutionException if the System has already shut down or if shutdown has been initiated.
    *
    * Scala API
    */
@@ -382,7 +382,7 @@ abstract class ActorSystem extends ActorRefFactory {
    * The callbacks will be run sequentially in reverse order of registration, i.e.
    * last registration is run first.
    *
-   * @throws a RejectedExecutionException if the System has already shut down or if shutdown has been initiated.
+   * Throws a RejectedExecutionException if the System has already shut down or if shutdown has been initiated.
    */
   def registerOnTermination(code: Runnable): Unit
 
@@ -391,7 +391,7 @@ abstract class ActorSystem extends ActorRefFactory {
    * timeout has elapsed. This will block until after all on termination
    * callbacks have been run.
    *
-   * @throws TimeoutException in case of timeout
+   * Throws TimeoutException in case of timeout.
    */
   @deprecated("Use Await.result(whenTerminated, timeout) instead", "2.4")
   def awaitTermination(timeout: Duration): Unit
@@ -407,7 +407,7 @@ abstract class ActorSystem extends ActorRefFactory {
    * Stop this actor system. This will stop the guardian actor, which in turn
    * will recursively stop all its child actors, then the system guardian
    * (below which the logging actors reside) and the execute all registered
-   * termination handlers (see [[ActorSystem.registerOnTermination]]).
+   * termination handlers (see [[ActorSystem#registerOnTermination]]).
    */
   @deprecated("Use the terminate() method instead", "2.4")
   def shutdown(): Unit
@@ -426,7 +426,7 @@ abstract class ActorSystem extends ActorRefFactory {
    * Terminates this actor system. This will stop the guardian actor, which in turn
    * will recursively stop all its child actors, then the system guardian
    * (below which the logging actors reside) and the execute all registered
-   * termination handlers (see [[ActorSystem.registerOnTermination]]).
+   * termination handlers (see [[ActorSystem#registerOnTermination]]).
    */
   def terminate(): Future[Terminated]
 
@@ -835,7 +835,7 @@ private[akka] class ActorSystemImpl(
      * Adds a Runnable that will be executed on ActorSystem termination.
      * Note that callbacks are executed in reverse order of insertion.
      * @param r The callback to be executed on ActorSystem termination
-     * @throws RejectedExecutionException if called after ActorSystem has been terminated
+     * Throws RejectedExecutionException if called after ActorSystem has been terminated.
      */
     final def add(r: Runnable): Unit = {
       @tailrec def addRec(r: Runnable, p: Promise[T]): Unit = ref.get match {
diff --git a/akka-actor/src/main/scala/akka/actor/Address.scala b/akka-actor/src/main/scala/akka/actor/Address.scala
index 6bd03c2787..955f127522 100644
--- a/akka-actor/src/main/scala/akka/actor/Address.scala
+++ b/akka-actor/src/main/scala/akka/actor/Address.scala
@@ -46,7 +46,7 @@ final case class Address private (protocol: String, system: String, host: Option
   /**
    * Returns the canonical String representation of this Address formatted as:
    *
-   * ://@:
+   * `protocol://system@host:port`
    */
   @transient
   override lazy val toString: String = {
@@ -61,7 +61,7 @@ final case class Address private (protocol: String, system: String, host: Option
   /**
    * Returns a String representation formatted as:
    *
-   * @:
+   * `system@host:port`
    */
   def hostPort: String = toString.substring(protocol.length + 3)
 }
diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala
index 2dbebab9fc..fde0247ecb 100644
--- a/akka-actor/src/main/scala/akka/actor/Deployer.scala
+++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala
@@ -60,7 +60,7 @@ final case class Deploy(
   /**
    * Do a merge between this and the other Deploy, where values from “this” take
    * precedence. The “path” of the other Deploy is not taken into account. All
-   * other members are merged using ``.withFallback(other.)``.
+   * other members are merged using `X.withFallback(other.X)`.
    */
   def withFallback(other: Deploy): Deploy = {
     Deploy(
diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala
index 2234e84b4a..09e3597ff1 100644
--- a/akka-actor/src/main/scala/akka/actor/FSM.scala
+++ b/akka-actor/src/main/scala/akka/actor/FSM.scala
@@ -70,7 +70,7 @@ object FSM {
   /**
    * Signifies that the [[akka.actor.FSM]] is shutting itself down because of
    * an error, e.g. if the state to transition into does not exist. You can use
-   * this to communicate a more precise cause to the [[akka.actor.FSM.onTermination]] block.
+   * this to communicate a more precise cause to the `onTermination` block.
    */
   final case class Failure(cause: Any) extends Reason
 
@@ -374,7 +374,7 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging {
    * Return this from a state function when no state change is to be effected.
    *
    * No transition event will be triggered by [[#stay]].
-   * If you want to trigger an event like `S -> S` for [[#onTransition]] to handle use [[#goto]] instead.
+   * If you want to trigger an event like `S -> S` for `onTransition` to handle use `goto` instead.
    *
    * @return descriptor for staying in current state
    */
@@ -410,7 +410,6 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging {
    * @param msg message to be delivered
    * @param timeout delay of first message delivery and between subsequent messages
    * @param repeat send once if false, scheduleAtFixedRate if true
-   * @return current state descriptor
    */
   final def setTimer(name: String, msg: Any, timeout: FiniteDuration, repeat: Boolean = false): Unit = {
     if (debugEvent)
@@ -1160,7 +1159,6 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
    * @param name identifier to be used with cancelTimer()
    * @param msg message to be delivered
    * @param timeout delay of first message delivery and between subsequent messages
-   * @return current state descriptor
    */
   final def setTimer(name: String, msg: Any, timeout: FiniteDuration): Unit =
     setTimer(name, msg, timeout, false)
diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala
index a8ae39865d..40d4f51db6 100644
--- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala
+++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala
@@ -145,7 +145,7 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits {
 
   /**
    * When supervisorStrategy is not specified for an actor this
-   * [[Decider]] is used by default in the supervisor strategy.
+   * `Decider` is used by default in the supervisor strategy.
    * The child will be stopped when [[akka.actor.ActorInitializationException]],
    * [[akka.actor.ActorKilledException]], or [[akka.actor.DeathPactException]] is
    * thrown. It will be restarted for other `Exception` types.
diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala
index 92ad6e4ac1..0ea99a562a 100644
--- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala
+++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala
@@ -187,7 +187,7 @@ trait Cancellable {
  * when scheduling single-shot tasks, instead it always rounds up the task
  * delay to a full multiple of the TickDuration. This means that tasks are
  * scheduled possibly one tick later than they could be (if checking that
- * “now() + delay <= nextTick” were done).
+ * “now() + delay <= nextTick” were done).
  */
 class LightArrayRevolverScheduler(config: Config,
                                   log: LoggingAdapter,
diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala
index e597b16c5e..50198a61aa 100644
--- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala
+++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala
@@ -138,7 +138,7 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi
     /**
      * Invokes the Method on the supplied instance
      *
-     * @throws the underlying exception if there's an InvocationTargetException thrown on the invocation
+     * Throws the underlying exception if there's an InvocationTargetException thrown on the invocation.
      */
     def apply(instance: AnyRef): AnyRef = try {
       parameters match {
@@ -217,8 +217,9 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi
    *
    * NEVER EXPOSE "this" to someone else, always use "self[TypeOfInterface(s)]"
    *
-   * @throws IllegalStateException if called outside of the scope of a method on this TypedActor
-   * @throws ClassCastException if the supplied type T isn't the type of the proxy associated with this TypedActor
+   * Throws IllegalStateException if called outside of the scope of a method on this TypedActor.
+   *
+   * Throws ClassCastException if the supplied type T isn't the type of the proxy associated with this TypedActor.
    */
   def self[T <: AnyRef] = selfReference.get.asInstanceOf[T] match {
     case null ⇒ throw new IllegalStateException("Calling TypedActor.self outside of a TypedActor implementation method!")
diff --git a/akka-actor/src/main/scala/akka/actor/UntypedActorWithStash.scala b/akka-actor/src/main/scala/akka/actor/UntypedActorWithStash.scala
index 111a85f4cd..ae5450aee8 100644
--- a/akka-actor/src/main/scala/akka/actor/UntypedActorWithStash.scala
+++ b/akka-actor/src/main/scala/akka/actor/UntypedActorWithStash.scala
@@ -30,7 +30,7 @@ package akka.actor
  *   }
  * 
* Note that the subclasses of `UntypedActorWithStash` by default request a Deque based mailbox since this class - * implements the `RequiresMessageQueue` marker interface. + * implements the `RequiresMessageQueue<DequeBasedMessageQueueSemantics>` marker interface. * You can override the default mailbox provided when `DequeBasedMessageQueueSemantics` are requested via config: *
  *   akka.actor.mailbox.requirements {
diff --git a/akka-actor/src/main/scala/akka/actor/dsl/Inbox.scala b/akka-actor/src/main/scala/akka/actor/dsl/Inbox.scala
index 6e7f4d773b..dc36cae14b 100644
--- a/akka-actor/src/main/scala/akka/actor/dsl/Inbox.scala
+++ b/akka-actor/src/main/scala/akka/actor/dsl/Inbox.scala
@@ -157,8 +157,8 @@ trait Inbox { this: ActorDSL.type ⇒
 
   /**
    * Create a new actor which will internally queue up messages it gets so that
-   * they can be interrogated with the [[akka.actor.dsl.Inbox!.Inbox!.receive]]
-   * and [[akka.actor.dsl.Inbox!.Inbox!.select]] methods. It will be created as
+   * they can be interrogated with the `akka.actor.dsl.Inbox!.Inbox!.receive`
+   * and `akka.actor.dsl.Inbox!.Inbox!.select` methods. It will be created as
    * a system actor in the ActorSystem which is implicitly (or explicitly)
    * supplied.
    */
diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala
index 0ea91952ea..cfe484c8ad 100644
--- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala
@@ -76,7 +76,7 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc
    * Returns a dispatcher as specified in configuration. Please note that this
    * method _may_ create and return a NEW dispatcher, _every_ call.
    *
-   * @throws ConfigurationException if the specified dispatcher cannot be found in the configuration
+   * Throws ConfigurationException if the specified dispatcher cannot be found in the configuration.
    */
   def lookup(id: String): MessageDispatcher = lookupConfigurator(id).dispatcher()
 
diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala
index 8618c104fd..6af5853aa1 100644
--- a/akka-actor/src/main/scala/akka/dispatch/Future.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala
@@ -41,7 +41,7 @@ object ExecutionContexts {
    * Returns a new ExecutionContextExecutorService which will delegate execution to the underlying ExecutorService,
    * and which will use the default error reporter.
    *
-   * @param executor the ExecutorService which will be used for the ExecutionContext
+   * @param executorService the ExecutorService which will be used for the ExecutionContext
    * @return a new ExecutionContext
    */
   def fromExecutorService(executorService: ExecutorService): ExecutionContextExecutorService =
@@ -51,7 +51,7 @@ object ExecutionContexts {
    * Returns a new ExecutionContextExecutorService which will delegate execution to the underlying ExecutorService,
    * and which will use the provided error reporter.
    *
-   * @param executor the ExecutorService which will be used for the ExecutionContext
+   * @param executorService the ExecutorService which will be used for the ExecutionContext
    * @param errorReporter a Procedure that will log any exceptions passed to it
    * @return a new ExecutionContext
    */
@@ -272,7 +272,7 @@ abstract class Recover[+T] extends japi.RecoverBridge[T] {
    * becomes completed with a failure.
    *
    * @return a successful value for the passed in failure
-   * @throws the passed in failure to propagate it.
+   * Throws the passed in failure to propagate it.
    *
    * Java API
    */
@@ -350,7 +350,7 @@ abstract class Mapper[-T, +R] extends scala.runtime.AbstractFunction1[T, R] {
   /**
    * Override this method if you need to throw checked exceptions
    *
-   * @throws UnsupportedOperation by default
+   * Throws UnsupportedOperation by default.
    */
   @throws(classOf[Throwable])
   def checkedApply(parameter: T): R = throw new UnsupportedOperationException("Mapper.checkedApply has not been implemented")
diff --git a/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala b/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala
index 0dff418daa..697dcb3edd 100644
--- a/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala
@@ -182,7 +182,7 @@ private[akka] class EarliestFirstSystemMessageList(val head: SystemMessage) exte
  *
  * INTERNAL API
  *
- * ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅
+ * NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS
  */
 private[akka] sealed trait SystemMessage extends PossiblyHarmful with Serializable {
   // Next fields are only modifiable via the SystemMessageList value class
diff --git a/akka-actor/src/main/scala/akka/event/EventBusUnsubscribers.scala b/akka-actor/src/main/scala/akka/event/EventBusUnsubscribers.scala
index 53c146abf6..7bc3ee25c5 100644
--- a/akka-actor/src/main/scala/akka/event/EventBusUnsubscribers.scala
+++ b/akka-actor/src/main/scala/akka/event/EventBusUnsubscribers.scala
@@ -51,7 +51,7 @@ private[akka] class EventStreamUnsubscriber(eventStream: EventStream, debug: Boo
  * INTERNAL API
  *
  * Provides factory for [[akka.event.EventStreamUnsubscriber]] actors with **unique names**.
- * This is needed if someone spins up more [[EventStream]]s using the same [[ActorSystem]],
+ * This is needed if someone spins up more [[EventStream]]s using the same [[akka.actor.ActorSystem]],
  * each stream gets it's own unsubscriber.
  */
 private[akka] object EventStreamUnsubscriber {
diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala
index 447b2c3928..d77976a0b0 100644
--- a/akka-actor/src/main/scala/akka/event/Logging.scala
+++ b/akka-actor/src/main/scala/akka/event/Logging.scala
@@ -251,8 +251,8 @@ class DummyClassForStringSources
  * In case an [[akka.actor.ActorSystem]] is provided, the following apply:
  * 
    *
  • [[akka.actor.Actor]] and [[akka.actor.ActorRef]] will be represented by their absolute physical path
  • - *
  • providing a `String` as source will append "()" and use the result
  • - *
  • providing a `Class` will extract its simple name, append "()" and use the result
  • + *
  • providing a `String` as source will append "(<system address>)" and use the result
  • + *
  • providing a `Class` will extract its simple name, append "(<system address>)" and use the result
  • *
  • anything else gives compile error unless implicit [[akka.event.LogSource]] is in scope for it
  • *
* @@ -385,7 +385,7 @@ object Logging { /** * Returns a 'safe' getSimpleName for the provided Class - * @param obj + * @param clazz * @return the simple name of the given Class */ def simpleName(clazz: Class[_]): String = { @@ -856,19 +856,19 @@ object Logging { * evaluate .toString only if the log level is actually enabled. Typically used * by obtaining an implementation from the Logging object: * - *
+ * {{{
  * val log = Logging(<bus>, <source object>)
  * ...
  * log.info("hello world!")
- * 
+ * }}} * * All log-level methods support simple interpolation templates with up to four * arguments placed by using {} within the template (first string * argument): * - *
+ * {{{
  * log.error(exception, "Exception while processing {} in state {}", msg, state)
- * 
+ * }}} */ trait LoggingAdapter { @@ -1140,7 +1140,7 @@ trait DiagnosticLoggingAdapter extends LoggingAdapter { /** * Scala API: * Mapped Diagnostic Context for application defined values - * which can be used in PatternLayout when [[akka.event.slf4j.Slf4jLogger]] is configured. + * which can be used in PatternLayout when `akka.event.slf4j.Slf4jLogger` is configured. * Visit Logback Docs: MDC for more information. * * @return A Map containing the MDC values added by the application, or empty Map if no value was added. @@ -1150,7 +1150,7 @@ trait DiagnosticLoggingAdapter extends LoggingAdapter { /** * Scala API: * Sets the values to be added to the MDC (Mapped Diagnostic Context) before the log is appended. - * These values can be used in PatternLayout when [[akka.event.slf4j.Slf4jLogger]] is configured. + * These values can be used in PatternLayout when `akka.event.slf4j.Slf4jLogger` is configured. * Visit Logback Docs: MDC for more information. */ def mdc(mdc: MDC): Unit = _mdc = if (mdc != null) mdc else emptyMDC @@ -1158,17 +1158,18 @@ trait DiagnosticLoggingAdapter extends LoggingAdapter { /** * Java API: * Mapped Diagnostic Context for application defined values - * which can be used in PatternLayout when [[akka.event.slf4j.Slf4jLogger]] is configured. + * which can be used in PatternLayout when `akka.event.slf4j.Slf4jLogger` is configured. * Visit Logback Docs: MDC for more information. * Note tha it returns a COPY of the actual MDC values. * You cannot modify any value by changing the returned Map. * Code like the following won't have any effect unless you set back the modified Map. - *
+   *
+   * {{{
    *   Map mdc = log.getMDC();
    *   mdc.put("key", value);
    *   // NEEDED
    *   log.setMDC(mdc);
-   * 
+ * }}} * * @return A copy of the actual MDC values */ @@ -1177,7 +1178,7 @@ trait DiagnosticLoggingAdapter extends LoggingAdapter { /** * Java API: * Sets the values to be added to the MDC (Mapped Diagnostic Context) before the log is appended. - * These values can be used in PatternLayout when [[akka.event.slf4j.Slf4jLogger]] is configured. + * These values can be used in PatternLayout when `akka.event.slf4j.Slf4jLogger` is configured. * Visit Logback Docs: MDC for more information. */ def setMDC(jMdc: java.util.Map[String, Any]): Unit = mdc(if (jMdc != null) jMdc.asScala.toMap else emptyMDC) diff --git a/akka-actor/src/main/scala/akka/io/Inet.scala b/akka-actor/src/main/scala/akka/io/Inet.scala index 289722c601..105e00c519 100644 --- a/akka-actor/src/main/scala/akka/io/Inet.scala +++ b/akka-actor/src/main/scala/akka/io/Inet.scala @@ -78,7 +78,7 @@ object Inet { /** * Open and return new DatagramChannel. * - * [[scala.throws]] is needed because [[DatagramChannel.open]] method + * `throws` is needed because `DatagramChannel.open` method * can throw an exception. */ @throws(classOf[Exception]) @@ -95,7 +95,7 @@ object Inet { /** * [[akka.io.Inet.SocketOption]] to set the SO_RCVBUF option * - * For more information see [[java.net.Socket.setReceiveBufferSize]] + * For more information see [[java.net.Socket#setReceiveBufferSize]] */ final case class ReceiveBufferSize(size: Int) extends SocketOption { require(size > 0, "ReceiveBufferSize must be > 0") @@ -109,7 +109,7 @@ object Inet { /** * [[akka.io.Inet.SocketOption]] to enable or disable SO_REUSEADDR * - * For more information see [[java.net.Socket.setReuseAddress]] + * For more information see [[java.net.Socket#setReuseAddress]] */ final case class ReuseAddress(on: Boolean) extends SocketOption { override def beforeServerSocketBind(s: ServerSocket): Unit = s.setReuseAddress(on) @@ -120,7 +120,7 @@ object Inet { /** * [[akka.io.Inet.SocketOption]] to set the SO_SNDBUF option. * - * For more information see [[java.net.Socket.setSendBufferSize]] + * For more information see [[java.net.Socket#setSendBufferSize]] */ final case class SendBufferSize(size: Int) extends SocketOption { require(size > 0, "SendBufferSize must be > 0") @@ -132,7 +132,7 @@ object Inet { * type-of-service octet in the IP header for packets sent from this * socket. * - * For more information see [[java.net.Socket.setTrafficClass]] + * For more information see [[java.net.Socket#setTrafficClass]] */ final case class TrafficClass(tc: Int) extends SocketOption { require(0 <= tc && tc <= 255, "TrafficClass needs to be in the interval [0, 255]") @@ -145,21 +145,21 @@ object Inet { /** * [[akka.io.Inet.SocketOption]] to set the SO_RCVBUF option * - * For more information see [[java.net.Socket.setReceiveBufferSize]] + * For more information see [[java.net.Socket#setReceiveBufferSize]] */ val ReceiveBufferSize = SO.ReceiveBufferSize /** * [[akka.io.Inet.SocketOption]] to enable or disable SO_REUSEADDR * - * For more information see [[java.net.Socket.setReuseAddress]] + * For more information see [[java.net.Socket#setReuseAddress]] */ val ReuseAddress = SO.ReuseAddress /** * [[akka.io.Inet.SocketOption]] to set the SO_SNDBUF option. * - * For more information see [[java.net.Socket.setSendBufferSize]] + * For more information see [[java.net.Socket#setSendBufferSize]] */ val SendBufferSize = SO.SendBufferSize @@ -168,7 +168,7 @@ object Inet { * type-of-service octet in the IP header for packets sent from this * socket. * - * For more information see [[java.net.Socket.setTrafficClass]] + * For more information see [[java.net.Socket#setTrafficClass]] */ val TrafficClass = SO.TrafficClass } @@ -178,21 +178,21 @@ object Inet { /** * [[akka.io.Inet.SocketOption]] to set the SO_RCVBUF option * - * For more information see [[java.net.Socket.setReceiveBufferSize]] + * For more information see [[java.net.Socket#setReceiveBufferSize]] */ def receiveBufferSize(size: Int) = ReceiveBufferSize(size) /** * [[akka.io.Inet.SocketOption]] to enable or disable SO_REUSEADDR * - * For more information see [[java.net.Socket.setReuseAddress]] + * For more information see [[java.net.Socket#setReuseAddress]] */ def reuseAddress(on: Boolean) = ReuseAddress(on) /** * [[akka.io.Inet.SocketOption]] to set the SO_SNDBUF option. * - * For more information see [[java.net.Socket.setSendBufferSize]] + * For more information see [[java.net.Socket#setSendBufferSize]] */ def sendBufferSize(size: Int) = SendBufferSize(size) @@ -201,7 +201,7 @@ object Inet { * type-of-service octet in the IP header for packets sent from this * socket. * - * For more information see [[java.net.Socket.setTrafficClass]] + * For more information see [[java.net.Socket#setTrafficClass]] */ def trafficClass(tc: Int) = TrafficClass(tc) } diff --git a/akka-actor/src/main/scala/akka/io/Tcp.scala b/akka-actor/src/main/scala/akka/io/Tcp.scala index 9d77d53129..cb4f964a40 100644 --- a/akka-actor/src/main/scala/akka/io/Tcp.scala +++ b/akka-actor/src/main/scala/akka/io/Tcp.scala @@ -54,7 +54,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { /** * [[akka.io.Inet.SocketOption]] to enable or disable SO_KEEPALIVE * - * For more information see [[java.net.Socket.setKeepAlive]] + * For more information see `java.net.Socket.setKeepAlive` */ final case class KeepAlive(on: Boolean) extends SocketOption { override def afterConnect(s: Socket): Unit = s.setKeepAlive(on) @@ -65,7 +65,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * of TCP urgent data) By default, this option is disabled and TCP urgent * data is silently discarded. * - * For more information see [[java.net.Socket.setOOBInline]] + * For more information see `java.net.Socket.setOOBInline` */ final case class OOBInline(on: Boolean) extends SocketOption { override def afterConnect(s: Socket): Unit = s.setOOBInline(on) @@ -79,7 +79,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * * Please note, that TCP_NODELAY is enabled by default. * - * For more information see [[java.net.Socket.setTcpNoDelay]] + * For more information see `java.net.Socket.setTcpNoDelay` */ final case class TcpNoDelay(on: Boolean) extends SocketOption { override def afterConnect(s: Socket): Unit = s.setTcpNoDelay(on) @@ -109,7 +109,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * * @param remoteAddress is the address to connect to * @param localAddress optionally specifies a specific address to bind to - * @param options Please refer to the [[SO]] object for a list of all supported options. + * @param options Please refer to the `Tcp.SO` object for a list of all supported options. */ final case class Connect(remoteAddress: InetSocketAddress, localAddress: Option[InetSocketAddress] = None, @@ -134,7 +134,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * @param backlog This specifies the number of unaccepted connections the O/S * kernel will hold for this port before refusing connections. * - * @param options Please refer to the [[SO]] object for a list of all supported options. + * @param options Please refer to the `Tcp.SO` object for a list of all supported options. */ final case class Bind(handler: ActorRef, localAddress: InetSocketAddress, @@ -153,11 +153,11 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * * @param keepOpenOnPeerClosed If this is set to true then the connection * is not automatically closed when the peer closes its half, - * requiring an explicit [[Closed]] from our side when finished. + * requiring an explicit [[CloseCommand]] from our side when finished. * * @param useResumeWriting If this is set to true then the connection actor * will refuse all further writes after issuing a [[CommandFailed]] - * notification until [[ResumeWriting]] is received. This can + * notification until `ResumeWriting` is received. This can * be used to implement NACK-based write backpressure. */ final case class Register(handler: ActorRef, keepOpenOnPeerClosed: Boolean = false, useResumeWriting: Boolean = true) extends Command @@ -183,7 +183,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { /** * A normal close operation will first flush pending writes and then close the * socket. The sender of this command and the registered handler for incoming - * data will both be notified once the socket is closed using a [[Closed]] + * data will both be notified once the socket is closed using a `Closed` * message. */ case object Close extends CloseCommand { @@ -198,7 +198,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * A confirmed close operation will flush pending writes and half-close the * connection, waiting for the peer to close the other half. The sender of this * command and the registered handler for incoming data will both be notified - * once the socket is closed using a [[ConfirmedClosed]] message. + * once the socket is closed using a `ConfirmedClosed` message. */ case object ConfirmedClose extends CloseCommand { /** @@ -213,7 +213,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * command to the O/S kernel which should result in a TCP_RST packet being sent * to the peer. The sender of this command and the registered handler for * incoming data will both be notified once the socket is closed using a - * [[Aborted]] message. + * `Aborted` message. */ case object Abort extends CloseCommand { /** @@ -225,7 +225,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { /** * Each [[WriteCommand]] can optionally request a positive acknowledgment to be sent - * to the commanding actor. If such notification is not desired the [[WriteCommand#ack]] + * to the commanding actor. If such notification is not desired the [[SimpleWriteCommand#ack]] * must be set to an instance of this class. The token contained within can be used * to recognize which write failed when receiving a [[CommandFailed]] message. */ @@ -238,7 +238,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { object NoAck extends NoAck(null) /** - * Common interface for all write commands, currently [[Write]], [[WriteFile]] and [[CompoundWrite]]. + * Common interface for all write commands. */ sealed abstract class WriteCommand extends Command { /** @@ -310,8 +310,8 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { /** * Write data to the TCP connection. If no ack is needed use the special * `NoAck` object. The connection actor will reply with a [[CommandFailed]] - * message if the write could not be enqueued. If [[WriteCommand#wantsAck]] - * returns true, the connection actor will reply with the supplied [[WriteCommand#ack]] + * message if the write could not be enqueued. If [[SimpleWriteCommand#wantsAck]] + * returns true, the connection actor will reply with the supplied [[SimpleWriteCommand#ack]] * token once the write has been successfully enqueued to the O/S kernel. * Note that this does not in any way guarantee that the data will be * or have been sent! Unfortunately there is no way to determine whether @@ -336,9 +336,9 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { /** * Write `count` bytes starting at `position` from file at `filePath` to the connection. - * The count must be > 0. The connection actor will reply with a [[CommandFailed]] - * message if the write could not be enqueued. If [[WriteCommand#wantsAck]] - * returns true, the connection actor will reply with the supplied [[WriteCommand#ack]] + * The count must be > 0. The connection actor will reply with a [[CommandFailed]] + * message if the write could not be enqueued. If [[SimpleWriteCommand#wantsAck]] + * returns true, the connection actor will reply with the supplied [[SimpleWriteCommand#ack]] * token once the write has been successfully enqueued to the O/S kernel. * Note that this does not in any way guarantee that the data will be * or have been sent! Unfortunately there is no way to determine whether @@ -385,12 +385,12 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { /** * Sending this command to the connection actor will disable reading from the TCP * socket. TCP flow-control will then propagate backpressure to the sender side - * as buffers fill up on either end. To re-enable reading send [[ResumeReading]]. + * as buffers fill up on either end. To re-enable reading send `ResumeReading`. */ case object SuspendReading extends Command /** - * This command needs to be sent to the connection actor after a [[SuspendReading]] + * This command needs to be sent to the connection actor after a `SuspendReading` * command in order to resume reading from the socket. */ case object ResumeReading extends Command @@ -430,7 +430,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { /** * When `useResumeWriting` is in effect as indicated in the [[Register]] message, - * the [[ResumeWriting]] command will be acknowledged by this message type, upon + * the `ResumeWriting` command will be acknowledged by this message type, upon * which it is safe to send at least one write. This means that all writes preceding * the first [[CommandFailed]] message have been enqueued to the O/S kernel at this * point. @@ -446,7 +446,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { final case class Bound(localAddress: InetSocketAddress) extends Event /** - * The sender of an [[Unbind]] command will receive confirmation through this + * The sender of an `Unbind` command will receive confirmation through this * message once the listening socket has been closed. */ sealed trait Unbound extends Event @@ -458,12 +458,12 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { */ sealed trait ConnectionClosed extends Event with DeadLetterSuppression { /** - * `true` iff the connection has been closed in response to an [[Abort]] command. + * `true` iff the connection has been closed in response to an `Abort` command. */ def isAborted: Boolean = false /** * `true` iff the connection has been fully closed in response to a - * [[ConfirmedClose]] command. + * `ConfirmedClose` command. */ def isConfirmed: Boolean = false /** @@ -483,18 +483,18 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { def getErrorCause: String = null } /** - * The connection has been closed normally in response to a [[Close]] command. + * The connection has been closed normally in response to a `Close` command. */ case object Closed extends ConnectionClosed /** - * The connection has been aborted in response to an [[Abort]] command. + * The connection has been aborted in response to an `Abort` command. */ case object Aborted extends ConnectionClosed { override def isAborted = true } /** * The connection has been half-closed by us and then half-close by the peer - * in response to a [[ConfirmedClose]] command. + * in response to a `ConfirmedClose` command. */ case object ConfirmedClosed extends ConnectionClosed { override def isConfirmed = true @@ -585,7 +585,7 @@ object TcpSO extends SoJavaFactories { /** * [[akka.io.Inet.SocketOption]] to enable or disable SO_KEEPALIVE * - * For more information see [[java.net.Socket.setKeepAlive]] + * For more information see `java.net.Socket.setKeepAlive` */ def keepAlive(on: Boolean) = KeepAlive(on) @@ -594,7 +594,7 @@ object TcpSO extends SoJavaFactories { * of TCP urgent data) By default, this option is disabled and TCP urgent * data is silently discarded. * - * For more information see [[java.net.Socket.setOOBInline]] + * For more information see `java.net.Socket.setOOBInline` */ def oobInline(on: Boolean) = OOBInline(on) @@ -604,7 +604,7 @@ object TcpSO extends SoJavaFactories { * * Please note, that TCP_NODELAY is enabled by default. * - * For more information see [[java.net.Socket.setTcpNoDelay]] + * For more information see `java.net.Socket.setTcpNoDelay` */ def tcpNoDelay(on: Boolean) = TcpNoDelay(on) } @@ -648,8 +648,8 @@ object TcpMessage { * @param handler The actor which will receive all incoming connection requests * in the form of [[Tcp.Connected]] messages. * - * @param localAddress The socket address to bind to; use port zero for - * automatic assignment (i.e. an ephemeral port, see [[Bound]]) + * @param endpoint The socket address to bind to; use port zero for + * automatic assignment (i.e. an ephemeral port, see [[Tcp.Bound]]) * * @param backlog This specifies the number of unaccepted connections the O/S * kernel will hold for this port before refusing connections. @@ -682,11 +682,11 @@ object TcpMessage { * * @param keepOpenOnPeerClosed If this is set to true then the connection * is not automatically closed when the peer closes its half, - * requiring an explicit [[Tcp.Closed]] from our side when finished. + * requiring an explicit `Tcp.ConnectionClosed from our side when finished. * * @param useResumeWriting If this is set to true then the connection actor * will refuse all further writes after issuing a [[Tcp.CommandFailed]] - * notification until [[Tcp.ResumeWriting]] is received. This can + * notification until [[Tcp]] `ResumeWriting` is received. This can * be used to implement NACK-based write backpressure. */ def register(handler: ActorRef, keepOpenOnPeerClosed: Boolean, useResumeWriting: Boolean): Command = @@ -699,14 +699,14 @@ object TcpMessage { /** * In order to close down a listening socket, send this message to that socket’s * actor (that is the actor which previously had sent the [[Tcp.Bound]] message). The - * listener socket actor will reply with a [[Tcp.Unbound]] message. + * listener socket actor will reply with a `Tcp.Unbound` message. */ def unbind: Command = Unbind /** * A normal close operation will first flush pending writes and then close the * socket. The sender of this command and the registered handler for incoming - * data will both be notified once the socket is closed using a [[Tcp.Closed]] + * data will both be notified once the socket is closed using a `Tcp.Closed` * message. */ def close: Command = Close @@ -715,7 +715,7 @@ object TcpMessage { * A confirmed close operation will flush pending writes and half-close the * connection, waiting for the peer to close the other half. The sender of this * command and the registered handler for incoming data will both be notified - * once the socket is closed using a [[Tcp.ConfirmedClosed]] message. + * once the socket is closed using a `Tcp.ConfirmedClosed` message. */ def confirmedClose: Command = ConfirmedClose @@ -724,13 +724,13 @@ object TcpMessage { * command to the O/S kernel which should result in a TCP_RST packet being sent * to the peer. The sender of this command and the registered handler for * incoming data will both be notified once the socket is closed using a - * [[Tcp.Aborted]] message. + * `Tcp.Aborted` message. */ def abort: Command = Abort /** * Each [[Tcp.WriteCommand]] can optionally request a positive acknowledgment to be sent - * to the commanding actor. If such notification is not desired the [[Tcp.WriteCommand#ack]] + * to the commanding actor. If such notification is not desired the [[Tcp.SimpleWriteCommand#ack]] * must be set to an instance of this class. The token contained within can be used * to recognize which write failed when receiving a [[Tcp.CommandFailed]] message. */ @@ -744,8 +744,8 @@ object TcpMessage { /** * Write data to the TCP connection. If no ack is needed use the special * `NoAck` object. The connection actor will reply with a [[Tcp.CommandFailed]] - * message if the write could not be enqueued. If [[Tcp.WriteCommand#wantsAck]] - * returns true, the connection actor will reply with the supplied [[Tcp.WriteCommand#ack]] + * message if the write could not be enqueued. If [[Tcp.SimpleWriteCommand#wantsAck]] + * returns true, the connection actor will reply with the supplied [[Tcp.SimpleWriteCommand#ack]] * token once the write has been successfully enqueued to the O/S kernel. * Note that this does not in any way guarantee that the data will be * or have been sent! Unfortunately there is no way to determine whether @@ -759,9 +759,9 @@ object TcpMessage { /** * Write `count` bytes starting at `position` from file at `filePath` to the connection. - * The count must be > 0. The connection actor will reply with a [[Tcp.CommandFailed]] - * message if the write could not be enqueued. If [[Tcp.WriteCommand#wantsAck]] - * returns true, the connection actor will reply with the supplied [[Tcp.WriteCommand#ack]] + * The count must be > 0. The connection actor will reply with a [[Tcp.CommandFailed]] + * message if the write could not be enqueued. If [[Tcp.SimpleWriteCommand#wantsAck]] + * returns true, the connection actor will reply with the supplied [[Tcp.SimpleWriteCommand#ack]] * token once the write has been successfully enqueued to the O/S kernel. * Note that this does not in any way guarantee that the data will be * or have been sent! Unfortunately there is no way to determine whether @@ -782,12 +782,12 @@ object TcpMessage { /** * Sending this command to the connection actor will disable reading from the TCP * socket. TCP flow-control will then propagate backpressure to the sender side - * as buffers fill up on either end. To re-enable reading send [[Tcp.ResumeReading]]. + * as buffers fill up on either end. To re-enable reading send `Tcp.ResumeReading`. */ def suspendReading: Command = SuspendReading /** - * This command needs to be sent to the connection actor after a [[Tcp.SuspendReading]] + * This command needs to be sent to the connection actor after a `Tcp.SuspendReading` * command in order to resume reading from the socket. */ def resumeReading: Command = ResumeReading diff --git a/akka-actor/src/main/scala/akka/io/Udp.scala b/akka-actor/src/main/scala/akka/io/Udp.scala index 61bddd2608..53125c158b 100644 --- a/akka-actor/src/main/scala/akka/io/Udp.scala +++ b/akka-actor/src/main/scala/akka/io/Udp.scala @@ -120,13 +120,13 @@ object Udp extends ExtensionId[UdpExt] with ExtensionIdProvider { * Send this message to a listener actor (which sent a [[Bound]] message) to * have it stop reading datagrams from the network. If the O/S kernel’s receive * buffer runs full then subsequent datagrams will be silently discarded. - * Re-enable reading from the socket using the [[ResumeReading]] command. + * Re-enable reading from the socket using the `ResumeReading` command. */ case object SuspendReading extends Command /** * This message must be sent to the listener actor to re-enable reading from - * the socket after a [[SuspendReading]] command. + * the socket after a `SuspendReading` command. */ case object ResumeReading extends Command @@ -161,7 +161,7 @@ object Udp extends ExtensionId[UdpExt] with ExtensionIdProvider { case object SimpleSenderReady extends SimpleSenderReady /** - * This message is sent by the listener actor in response to an [[Unbind]] command + * This message is sent by the listener actor in response to an `Unbind` command * after the socket has been closed. */ sealed trait Unbound @@ -312,13 +312,13 @@ object UdpMessage { * Send this message to a listener actor (which sent a [[Udp.Bound]] message) to * have it stop reading datagrams from the network. If the O/S kernel’s receive * buffer runs full then subsequent datagrams will be silently discarded. - * Re-enable reading from the socket using the [[Udp.ResumeReading]] command. + * Re-enable reading from the socket using the `Udp.ResumeReading` command. */ def suspendReading: Command = SuspendReading /** * This message must be sent to the listener actor to re-enable reading from - * the socket after a [[Udp.SuspendReading]] command. + * the socket after a `Udp.SuspendReading` command. */ def resumeReading: Command = ResumeReading } diff --git a/akka-actor/src/main/scala/akka/io/UdpConnected.scala b/akka-actor/src/main/scala/akka/io/UdpConnected.scala index d92da0e45d..f643b5c231 100644 --- a/akka-actor/src/main/scala/akka/io/UdpConnected.scala +++ b/akka-actor/src/main/scala/akka/io/UdpConnected.scala @@ -100,13 +100,13 @@ object UdpConnected extends ExtensionId[UdpConnectedExt] with ExtensionIdProvide * Send this message to a listener actor (which sent a [[Udp.Bound]] message) to * have it stop reading datagrams from the network. If the O/S kernel’s receive * buffer runs full then subsequent datagrams will be silently discarded. - * Re-enable reading from the socket using the [[ResumeReading]] command. + * Re-enable reading from the socket using the `ResumeReading` command. */ case object SuspendReading extends Command /** * This message must be sent to the listener actor to re-enable reading from - * the socket after a [[SuspendReading]] command. + * the socket after a `SuspendReading` command. */ case object ResumeReading extends Command @@ -137,7 +137,7 @@ object UdpConnected extends ExtensionId[UdpConnectedExt] with ExtensionIdProvide /** * This message is sent by the connection actor to the actor which sent the - * [[Disconnect]] message when the UDP socket has been closed. + * `Disconnect` message when the UDP socket has been closed. */ sealed trait Disconnected extends Event case object Disconnected extends Disconnected @@ -231,13 +231,13 @@ object UdpConnectedMessage { * Send this message to a listener actor (which sent a [[Udp.Bound]] message) to * have it stop reading datagrams from the network. If the O/S kernel’s receive * buffer runs full then subsequent datagrams will be silently discarded. - * Re-enable reading from the socket using the [[UdpConnected.ResumeReading]] command. + * Re-enable reading from the socket using the `UdpConnected.ResumeReading` command. */ def suspendReading: Command = SuspendReading /** * This message must be sent to the listener actor to re-enable reading from - * the socket after a [[UdpConnected.SuspendReading]] command. + * the socket after a `UdpConnected.SuspendReading` command. */ def resumeReading: Command = ResumeReading diff --git a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala index 556e678882..d6c02f2fee 100644 --- a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala +++ b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala @@ -108,9 +108,8 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite * Wraps invocations of asynchronous calls that need to be protected * * @param body Call needing protected - * @tparam T return type from call * @return [[scala.concurrent.Future]] containing the call result or a - * [[scala.concurrent.TimeoutException]] if the call timed out + * `scala.concurrent.TimeoutException` if the call timed out * */ def withCircuitBreaker[T](body: ⇒ Future[T]): Future[T] = currentState.invoke(body) @@ -119,9 +118,8 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite * Java API for [[#withCircuitBreaker]] * * @param body Call needing protected - * @tparam T return type from call * @return [[scala.concurrent.Future]] containing the call result or a - * [[scala.concurrent.TimeoutException]] if the call timed out + * `scala.concurrent.TimeoutException` if the call timed out */ def callWithCircuitBreaker[T](body: Callable[Future[T]]): Future[T] = withCircuitBreaker(body.call) @@ -129,12 +127,12 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite * Wraps invocations of synchronous calls that need to be protected * * Calls are run in caller's thread. Because of the synchronous nature of - * this call the [[scala.concurrent.TimeoutException]] will only be thrown + * this call the `scala.concurrent.TimeoutException` will only be thrown * after the body has completed. * + * Throws java.util.concurrent.TimeoutException if the call timed out. + * * @param body Call needing protected - * @tparam T return type from call - * @throws scala.concurrent.TimeoutException if the call timed out * @return The result of the call */ def withSyncCircuitBreaker[T](body: ⇒ T): T = @@ -143,11 +141,9 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite callTimeout) /** - * Java API for [[#withSyncCircuitBreaker]] + * Java API for [[#withSyncCircuitBreaker]]. Throws [[java.util.concurrent.TimeoutException]] if the call timed out. * * @param body Call needing protected - * @tparam T return type from call - * @throws scala.concurrent.TimeoutException if the call timed out * @return The result of the call */ def callWithSyncCircuitBreaker[T](body: Callable[T]): T = withSyncCircuitBreaker(body.call) @@ -223,11 +219,10 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite private[akka] def currentFailureCount: Int = Closed.get /** - * Implements consistent transition between states + * Implements consistent transition between states. Throws IllegalStateException if an invalid transition is attempted. * * @param fromState State being transitioning from * @param toState State being transitioning from - * @throws IllegalStateException if an invalid transition is attempted */ private def transition(fromState: State, toState: State): Unit = if (swapState(fromState, toState)) @@ -254,6 +249,8 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite */ private def attemptReset(): Unit = transition(Open, HalfOpen) + private val timeoutFuture = Future.failed(new TimeoutException("Circuit Breaker Timed out.") with NoStackTrace) + /** * Internal state abstraction */ @@ -264,7 +261,6 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite * Add a listener function which is invoked on state entry * * @param listener listener implementation - * @tparam T return type of listener, not used - but supplied for type inference purposes */ def addListener(listener: Runnable): Unit = listeners add listener @@ -295,7 +291,6 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite * call timeout is counted as a failed call, otherwise a successful call * * @param body Implementation of the call - * @tparam T Return type of the call's implementation * @return Future containing the result of the call */ def callThrough[T](body: ⇒ Future[T]): Future[T] = { @@ -308,14 +303,13 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite val p = Promise[T]() implicit val ec = sameThreadExecutionContext - p.future.onComplete({ + p.future.onComplete { case s: Success[_] ⇒ callSucceeds() case _ ⇒ callFails() - }) + } val timeout = scheduler.scheduleOnce(callTimeout) { - p.tryCompleteWith( - Future.failed(new TimeoutException("Circuit Breaker Timed out."))) + p tryCompleteWith timeoutFuture } materialize(body).onComplete { result ⇒ @@ -330,7 +324,6 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite * Abstract entry point for all states * * @param body Implementation of the call that needs protected - * @tparam T Return type of protected call * @return Future containing result of protected call */ def invoke[T](body: ⇒ Future[T]): Future[T] @@ -373,7 +366,6 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite * Implementation of invoke, which simply attempts the call * * @param body Implementation of the call that needs protected - * @tparam T Return type of protected call * @return Future containing result of protected call */ override def invoke[T](body: ⇒ Future[T]): Future[T] = callThrough(body) @@ -418,7 +410,6 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite * If the call succeeds the breaker closes. * * @param body Implementation of the call that needs protected - * @tparam T Return type of protected call * @return Future containing result of protected call */ override def invoke[T](body: ⇒ Future[T]): Future[T] = @@ -462,7 +453,6 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite * Fail-fast on any invocation * * @param body Implementation of the call that needs protected - * @tparam T Return type of protected call * @return Future containing result of protected call */ override def invoke[T](body: ⇒ Future[T]): Future[T] = diff --git a/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala b/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala index e80a15e34a..fbec79d4b2 100644 --- a/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala @@ -1,8 +1,7 @@ -package akka.pattern - /** * Copyright (C) 2009-2015 Typesafe Inc. */ +package akka.pattern import scala.concurrent.duration.Duration import scala.concurrent.{ ExecutionContext, Promise, Future } diff --git a/akka-actor/src/main/scala/akka/routing/Resizer.scala b/akka-actor/src/main/scala/akka/routing/Resizer.scala index 7193cd9533..ff7c8f4669 100644 --- a/akka-actor/src/main/scala/akka/routing/Resizer.scala +++ b/akka-actor/src/main/scala/akka/routing/Resizer.scala @@ -100,7 +100,7 @@ case class DefaultResizer( *
  • 0: number of routees currently processing a message.
  • *
  • 1: number of routees currently processing a message has * some messages in mailbox.
  • - *
  • > 1: number of routees with at least the configured `pressureThreshold` + *
  • > 1: number of routees with at least the configured `pressureThreshold` * messages in their mailbox. Note that estimating mailbox size of * default UnboundedMailbox is O(N) operation.
  • * @@ -180,7 +180,7 @@ case class DefaultResizer( *
  • 0: number of routees currently processing a message.
  • *
  • 1: number of routees currently processing a message has * some messages in mailbox.
  • - *
  • > 1: number of routees with at least the configured `pressureThreshold` + *
  • > 1: number of routees with at least the configured `pressureThreshold` * messages in their mailbox. Note that estimating mailbox size of * default UnboundedMailbox is O(N) operation.
  • * diff --git a/akka-actor/src/main/scala/akka/serialization/Serialization.scala b/akka-actor/src/main/scala/akka/serialization/Serialization.scala index 9d35e9918b..17e6625e80 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serialization.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serialization.scala @@ -115,8 +115,8 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { /** * Returns the Serializer configured for the given object, returns the NullSerializer if it's null. * - * @throws akka.ConfigurationException if no `serialization-bindings` is configured for the - * class of the object + * Throws akka.ConfigurationException if no `serialization-bindings` is configured for the + * class of the object. */ def findSerializerFor(o: AnyRef): Serializer = o match { case null ⇒ NullSerializer @@ -130,7 +130,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { * ambiguity it is primarily using the most specific configured class, * and secondly the entry configured first. * - * @throws java.io.NotSerializableException if no `serialization-bindings` is configured for the class + * Throws java.io.NotSerializableException if no `serialization-bindings` is configured for the class. */ def serializerFor(clazz: Class[_]): Serializer = serializerMap.get(clazz) match { diff --git a/akka-actor/src/main/scala/akka/serialization/Serializer.scala b/akka-actor/src/main/scala/akka/serialization/Serializer.scala index d2f863b198..429a465764 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serializer.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serializer.scala @@ -139,7 +139,6 @@ object JavaSerializer { * * @param value - the current value under the call to callable.call() * @param callable - the operation to be performed - * @tparam S - the return type * @return the result of callable.call() */ def withValue[S](value: ExtendedActorSystem, callable: Callable[S]): S = super.withValue[S](value)(callable.call) diff --git a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala index e507b3abd3..6a51b50adf 100644 --- a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala +++ b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala @@ -11,9 +11,8 @@ import annotation.tailrec /** * BoundedBlockingQueue wraps any Queue and turns the result into a BlockingQueue with a limited capacity. - * @param maxCapacity - the maximum capacity of this Queue, needs to be > 0 + * @param maxCapacity - the maximum capacity of this Queue, needs to be > 0 * @param backing - the backing Queue - * @tparam E - The type of the contents of this Queue */ class BoundedBlockingQueue[E <: AnyRef]( val maxCapacity: Int, private val backing: Queue[E]) extends AbstractQueue[E] with BlockingQueue[E] { diff --git a/akka-actor/src/main/scala/akka/util/ByteIterator.scala b/akka-actor/src/main/scala/akka/util/ByteIterator.scala index 5a82a94e6a..65094a613e 100644 --- a/akka-actor/src/main/scala/akka/util/ByteIterator.scala +++ b/akka-actor/src/main/scala/akka/util/ByteIterator.scala @@ -543,19 +543,19 @@ abstract class ByteIterator extends BufferedIterator[Byte] { /** * Get a specific number of Bytes from this iterator. In contrast to - * copyToArray, this method will fail if this.len < xs.length. + * copyToArray, this method will fail if this.len < xs.length. */ def getBytes(xs: Array[Byte]): this.type = getBytes(xs, 0, xs.length) /** * Get a specific number of Bytes from this iterator. In contrast to - * copyToArray, this method will fail if length < n or if (xs.length - offset) < n. + * copyToArray, this method will fail if length < n or if (xs.length - offset) < n. */ def getBytes(xs: Array[Byte], offset: Int, n: Int): this.type /** * Get a specific number of Bytes from this iterator. In contrast to - * copyToArray, this method will fail if this.len < n. + * copyToArray, this method will fail if this.len < n. */ def getBytes(n: Int): Array[Byte] = { val bytes = new Array[Byte](n) @@ -565,7 +565,7 @@ abstract class ByteIterator extends BufferedIterator[Byte] { /** * Get a ByteString with specific number of Bytes from this iterator. In contrast to - * copyToArray, this method will fail if this.len < n. + * copyToArray, this method will fail if this.len < n. */ def getByteString(n: Int): ByteString = { val bs = clone.take(n).toByteString diff --git a/akka-actor/src/main/scala/akka/util/Index.scala b/akka-actor/src/main/scala/akka/util/Index.scala index b4c8de7b47..f90c4cb9ae 100644 --- a/akka-actor/src/main/scala/akka/util/Index.scala +++ b/akka-actor/src/main/scala/akka/util/Index.scala @@ -13,7 +13,7 @@ import scala.collection.mutable /** * An implementation of a ConcurrentMultiMap * Adds/remove is serialized over the specified key - * Reads are fully concurrent <-- el-cheapo + * Reads are fully concurrent <-- el-cheapo */ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { @@ -188,6 +188,6 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { /** * An implementation of a ConcurrentMultiMap * Adds/remove is serialized over the specified key - * Reads are fully concurrent <-- el-cheapo + * Reads are fully concurrent <-- el-cheapo */ class ConcurrentMultiMap[K, V](mapSize: Int, valueComparator: Comparator[V]) extends Index[K, V](mapSize, valueComparator) diff --git a/akka-actor/src/main/scala/akka/util/Reflect.scala b/akka-actor/src/main/scala/akka/util/Reflect.scala index 9fba43b3b1..6c523784bc 100644 --- a/akka-actor/src/main/scala/akka/util/Reflect.scala +++ b/akka-actor/src/main/scala/akka/util/Reflect.scala @@ -39,7 +39,6 @@ private[akka] object Reflect { /** * INTERNAL API * @param clazz the class which to instantiate an instance of - * @tparam T the type of the instance that will be created * @return a new instance from the default constructor of the given class */ private[akka] def instantiate[T](clazz: Class[T]): T = try clazz.newInstance catch { @@ -113,7 +112,6 @@ private[akka] object Reflect { /** * INTERNAL API * @param clazz the class which to instantiate an instance of - * @tparam T the type of the instance that will be created * @return a function which when applied will create a new instance from the default constructor of the given class */ private[akka] def instantiator[T](clazz: Class[T]): () ⇒ T = () ⇒ instantiate(clazz) diff --git a/akka-actor/src/main/scala/akka/util/StablePriorityQueue.scala b/akka-actor/src/main/scala/akka/util/StablePriorityQueue.scala index 89b6eec975..2b9752b411 100644 --- a/akka-actor/src/main/scala/akka/util/StablePriorityQueue.scala +++ b/akka-actor/src/main/scala/akka/util/StablePriorityQueue.scala @@ -10,7 +10,6 @@ import java.util.{ AbstractQueue, Comparator, Iterator, PriorityQueue } /** * PriorityQueueStabilizer wraps a priority queue so that it respects FIFO for elements of equal priority. - * @tparam E - The type of the elements of this Queue */ trait PriorityQueueStabilizer[E <: AnyRef] extends AbstractQueue[E] { val backingQueue: AbstractQueue[PriorityQueueStabilizer.WrappedElement[E]] @@ -58,9 +57,8 @@ object PriorityQueueStabilizer { /** * StablePriorityQueue is a priority queue that preserves order for elements of equal priority. - * @param capacity - the initial capacity of this Queue, needs to be > 0. + * @param capacity - the initial capacity of this Queue, needs to be > 0. * @param cmp - Comparator for comparing Queue elements - * @tparam E - The type of the elements of this Queue */ class StablePriorityQueue[E <: AnyRef](capacity: Int, cmp: Comparator[E]) extends PriorityQueueStabilizer[E] { val backingQueue = new PriorityQueue[PriorityQueueStabilizer.WrappedElement[E]]( @@ -70,9 +68,8 @@ class StablePriorityQueue[E <: AnyRef](capacity: Int, cmp: Comparator[E]) extend /** * StablePriorityBlockingQueue is a blocking priority queue that preserves order for elements of equal priority. - * @param capacity - the initial capacity of this Queue, needs to be > 0. + * @param capacity - the initial capacity of this Queue, needs to be > 0. * @param cmp - Comparator for comparing Queue elements - * @tparam E - The type of the elements of this Queue */ class StablePriorityBlockingQueue[E <: AnyRef](capacity: Int, cmp: Comparator[E]) extends PriorityQueueStabilizer[E] { val backingQueue = new PriorityBlockingQueue[PriorityQueueStabilizer.WrappedElement[E]]( diff --git a/akka-agent/src/main/scala/akka/agent/Agent.scala b/akka-agent/src/main/scala/akka/agent/Agent.scala index 1091eccdc5..8fa5802fc2 100644 --- a/akka-agent/src/main/scala/akka/agent/Agent.scala +++ b/akka-agent/src/main/scala/akka/agent/Agent.scala @@ -119,13 +119,11 @@ object Agent { * // use result ... * * }}} - *
    * * Agent is also monadic, which means that you can compose operations using * for-comprehensions. In monadic usage the original agents are not touched * but new agents are created. So the old values (agents) are still available * as-is. They are so-called 'persistent'. - *

    * * Example of monadic usage: * {{{ diff --git a/akka-camel/src/main/scala/akka/camel/Camel.scala b/akka-camel/src/main/scala/akka/camel/Camel.scala index 5c0dfe68ee..dd482cc081 100644 --- a/akka-camel/src/main/scala/akka/camel/Camel.scala +++ b/akka-camel/src/main/scala/akka/camel/Camel.scala @@ -80,9 +80,6 @@ class CamelSettings private[camel] (config: Config, dynamicAccess: DynamicAccess */ final val AutoAck: Boolean = config.getBoolean("akka.camel.consumer.auto-ack") - /** - * - */ final val JmxStatistics: Boolean = config.getBoolean("akka.camel.jmx") /** @@ -127,7 +124,6 @@ class CamelSettings private[camel] (config: Config, dynamicAccess: DynamicAccess * * @see akka.actor.ExtensionId * @see akka.actor.ExtensionIdProvider - * */ object CamelExtension extends ExtensionId[Camel] with ExtensionIdProvider { diff --git a/akka-camel/src/main/scala/akka/camel/CamelMessage.scala b/akka-camel/src/main/scala/akka/camel/CamelMessage.scala index fa25eb1ed7..89044f158f 100644 --- a/akka-camel/src/main/scala/akka/camel/CamelMessage.scala +++ b/akka-camel/src/main/scala/akka/camel/CamelMessage.scala @@ -49,7 +49,6 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) { * in a [[scala.util.Success]]. If an exception occurs during the conversion to the type T or when the header cannot be found, * the exception is returned in a [[scala.util.Failure]]. * - *

    * The CamelContext is accessible in a [[akka.camel.javaapi.UntypedConsumerActor]] and [[akka.camel.javaapi.UntypedProducerActor]] * using the `getCamelContext` method, and is available on the [[akka.camel.CamelExtension]]. * @@ -60,7 +59,7 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) { /** * Java API: Returns the header by given name parameter. The header is converted to type T as defined by the clazz parameter. * An exception is thrown when the conversion to the type T fails or when the header cannot be found. - *

    + * * The CamelContext is accessible in a [[akka.camel.javaapi.UntypedConsumerActor]] and [[akka.camel.javaapi.UntypedProducerActor]] * using the `getCamelContext` method, and is available on the [[akka.camel.CamelExtension]]. */ @@ -133,7 +132,6 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) { /** * Companion object of CamelMessage class. - * */ object CamelMessage { @@ -176,7 +174,6 @@ object CamelMessage { /** * Positive acknowledgement message (used for application-acknowledged message receipts). * When `autoAck` is set to false in the [[akka.camel.Consumer]], you can send an `Ack` to the sender of the CamelMessage. - * */ case object Ack { /** Java API to get the Ack singleton */ diff --git a/akka-camel/src/main/scala/akka/camel/Consumer.scala b/akka-camel/src/main/scala/akka/camel/Consumer.scala index 4c6807dfb5..63236f4224 100644 --- a/akka-camel/src/main/scala/akka/camel/Consumer.scala +++ b/akka-camel/src/main/scala/akka/camel/Consumer.scala @@ -14,8 +14,6 @@ import scala.language.existentials /** * Mixed in by Actor implementations that consume message from Camel endpoints. - * - * */ trait Consumer extends Actor with CamelSupport { import Consumer._ diff --git a/akka-camel/src/main/scala/akka/camel/Producer.scala b/akka-camel/src/main/scala/akka/camel/Producer.scala index c640b1a53c..68740ae349 100644 --- a/akka-camel/src/main/scala/akka/camel/Producer.scala +++ b/akka-camel/src/main/scala/akka/camel/Producer.scala @@ -54,7 +54,7 @@ trait ProducerSupport extends Actor with CamelSupport { * actually sent it is pre-processed by calling transformOutgoingMessage. If oneway * is true, an in-only message exchange is initiated, otherwise an in-out message exchange. * - * @see Producer#produce(Any, ExchangePattern) + * @see Producer#produce */ protected def produce: Receive = { case CamelProducerObjects(endpoint, processor) ⇒ @@ -119,7 +119,7 @@ trait ProducerSupport extends Actor with CamelSupport { * response is received asynchronously, the receiveAfterProduce is called * asynchronously. The original sender is preserved. * - * @see CamelMessage#canonicalize(Any) + * @see CamelMessage#canonicalize * @param endpoint the endpoint * @param processor the processor * @param msg message to produce diff --git a/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala b/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala index 5e6caae747..8113379df5 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala @@ -1,8 +1,7 @@ -package akka.camel.internal - /** * Copyright (C) 2009-2015 Typesafe Inc. */ +package akka.camel.internal import akka.actor.ActorRef diff --git a/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala b/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala index 1a5220018b..a4a9baa9dd 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala @@ -9,7 +9,7 @@ import akka.camel.{ FailureResult, AkkaCamelException, CamelMessage } /** * INTERNAL API - * Adapter for converting an [[org.apache.camel.Exchange]] to and from [[akka.camel.CamelMessage]] and [[akka.camel.Failure]] objects. + * Adapter for converting an [[org.apache.camel.Exchange]] to and from [[akka.camel.CamelMessage]] and [[akka.camel.FailureResult]] objects. * The org.apache.camel.Message is mutable and not suitable to be used directly as messages between Actors. * This adapter is used to convert to immutable messages to be used with Actors, and convert the immutable messages back * to org.apache.camel.Message when using Camel. @@ -62,8 +62,6 @@ private[camel] class CamelExchangeAdapter(val exchange: Exchange) { * on the AkkaCamelException. * * If the exchange is out-capable then the headers of Exchange.getOut are used, otherwise the headers of Exchange.getIn are used. - * - * @see AkkaCamelException */ def toAkkaCamelException: AkkaCamelException = toAkkaCamelException(Map.empty) @@ -78,8 +76,6 @@ private[camel] class CamelExchangeAdapter(val exchange: Exchange) { * * @param headers additional headers to set on the exception in addition to those * in the exchange. - * - * @see AkkaCamelException */ def toAkkaCamelException(headers: Map[String, Any]): AkkaCamelException = { import scala.collection.JavaConversions._ @@ -88,8 +84,6 @@ private[camel] class CamelExchangeAdapter(val exchange: Exchange) { /** * Creates an immutable Failure object from the adapted Exchange so it can be used internally between Actors. - * - * @see Failure */ def toFailureMessage: FailureResult = toFailureResult(Map.empty) @@ -98,8 +92,6 @@ private[camel] class CamelExchangeAdapter(val exchange: Exchange) { * * @param headers additional headers to set on the created CamelMessage in addition to those * in the Camel message. - * - * @see Failure */ def toFailureResult(headers: Map[String, Any]): FailureResult = { import scala.collection.JavaConversions._ diff --git a/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala b/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala index c7864d35b1..65b27af5be 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala @@ -47,7 +47,7 @@ private[camel] class DefaultCamel(val system: ExtendedActorSystem) extends Camel /** * Starts camel and underlying camel context and template. * Only the creator of Camel should start and stop it. - * @see akka.camel.DefaultCamel#shutdown() + * @see akka.camel.internal.DefaultCamel#shutdown */ def start(): this.type = { context.start() @@ -61,7 +61,7 @@ private[camel] class DefaultCamel(val system: ExtendedActorSystem) extends Camel * Only the creator of Camel should shut it down. * There is no need to stop Camel instance, which you get from the CamelExtension, as its lifecycle is bound to the actor system. * - * @see akka.camel.DefaultCamel#start() + * @see akka.camel.internal.DefaultCamel#start */ def shutdown(): Unit = { try context.stop() finally { diff --git a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala index e6b51b358f..1172977778 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala @@ -30,8 +30,6 @@ import scala.util.{ Failure, Success, Try } * `ActorComponent` to the [[org.apache.camel.CamelContext]] under the 'actor' component name. * Messages are sent to [[akka.camel.Consumer]] actors through a [[akka.camel.internal.component.ActorEndpoint]] that * this component provides. - * - * */ private[camel] class ActorComponent(camel: Camel, system: ActorSystem) extends DefaultComponent { /** @@ -51,8 +49,6 @@ private[camel] class ActorComponent(camel: Camel, system: ActorSystem) extends D * Actors are referenced using actor endpoint URIs of the following format: * [actorPath]?[options]%s, * where [actorPath] refers to the actor path to the actor. - * - * */ private[camel] class ActorEndpoint(uri: String, comp: ActorComponent, @@ -87,7 +83,6 @@ private[camel] class ActorEndpoint(uri: String, /** * INTERNAL API * Configures the `ActorEndpoint`. This needs to be a `bean` for Camel purposes. - * */ private[camel] trait ActorEndpointConfig { def path: ActorEndpointPath @@ -99,12 +94,10 @@ private[camel] trait ActorEndpointConfig { } /** - * Sends the in-message of an exchange to an untyped actor, identified by an [[akka.camel.internal.component.ActorEndPoint]] - * - * @see akka.camel.component.ActorComponent - * @see akka.camel.component.ActorEndpoint - * + * Sends the in-message of an exchange to an untyped actor, identified by an [[akka.camel.internal.component.ActorEndpoint]] * + * @see akka.camel.internal.component.ActorComponent + * @see akka.camel.internal.component.ActorEndpoint */ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) extends DefaultProducer(endpoint) with AsyncProcessor { /** diff --git a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala index de95337f29..1ea80f1f3e 100644 --- a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala +++ b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala @@ -11,8 +11,6 @@ import org.apache.camel.impl.DefaultCamelContext /** * Subclass this abstract class to create an untyped producer actor. This class is meant to be used from Java. - * - * */ abstract class UntypedProducerActor extends UntypedActor with ProducerSupport { /** diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala index d655d9735e..7e041c3255 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala @@ -202,7 +202,7 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { } /** - * Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus.Up]]. + * Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus]] `Up`. */ def receiveState(state: CurrentClusterState): Unit = nodes = (state.members -- state.unreachable) collect { case m if m.status == MemberStatus.Up ⇒ m.address } diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsStrategy.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsStrategy.scala index 9ca51114f9..0c39dcc136 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsStrategy.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsStrategy.scala @@ -10,7 +10,7 @@ import akka.util.Helpers.ConfigOps /** * Default [[ClusterMetricsSupervisor]] strategy: - * A configurable [[OneForOneStrategy]] with restart-on-throwable decider. + * A configurable [[akka.actor.OneForOneStrategy]] with restart-on-throwable decider. */ class ClusterMetricsStrategy(config: Config) extends OneForOneStrategy( maxNrOfRetries = config.getInt("maxNrOfRetries"), @@ -25,7 +25,7 @@ object ClusterMetricsStrategy { import akka.actor.SupervisorStrategy._ /** - * [[SupervisorStrategy.Decider]] which allows to survive intermittent Sigar native method calls failures. + * [[akka.actor.SupervisorStrategy]] `Decider` which allows to survive intermittent Sigar native method calls failures. */ val metricsDecider: SupervisorStrategy.Decider = { case _: ActorInitializationException ⇒ Stop diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/EWMA.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/EWMA.scala index 2ea389ad11..396b48cc36 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/EWMA.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/EWMA.scala @@ -33,7 +33,7 @@ final case class EWMA(value: Double, alpha: Double) { * Calculates the exponentially weighted moving average for a given monitored data set. * * @param xn the new data point - * @return a new [[akka.cluster.EWMA]] with the updated value + * @return a new EWMA with the updated value */ def :+(xn: Double): EWMA = { val newValue = (alpha * xn) + (1 - alpha) * value diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala index c01a4e0498..7c2a1e3457 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala @@ -109,7 +109,7 @@ object StandardMetrics { final val SystemLoadAverage = "system-load-average" final val Processors = "processors" // In latest Linux kernels: CpuCombined + CpuStolen + CpuIdle = 1.0 or 100%. - /** Sum of User + Sys + Nice + Wait. See [[org.hyperic.sigar.CpuPerc]] */ + /** Sum of User + Sys + Nice + Wait. See `org.hyperic.sigar.CpuPerc` */ final val CpuCombined = "cpu-combined" /** The amount of CPU 'stolen' from this virtual machine by the hypervisor for other tasks (such as running another virtual machine). */ final val CpuStolen = "cpu-stolen" @@ -146,9 +146,9 @@ object StandardMetrics { } /** - * The amount of used and committed memory will always be <= max if max is defined. - * A memory allocation may fail if it attempts to increase the used memory such that used > committed - * even if used <= max is true (e.g. when the system virtual memory is low). + * The amount of used and committed memory will always be <= max if max is defined. + * A memory allocation may fail if it attempts to increase the used memory such that used > committed + * even if used <= max is true (e.g. when the system virtual memory is low). * * @param address [[akka.actor.Address]] of the node the metrics are gathered at * @param timestamp the time of sampling, in milliseconds since midnight, January 1, 1970 UTC @@ -269,7 +269,7 @@ private[metrics] trait MetricNumericConverter { * * @param address [[akka.actor.Address]] of the node the metrics are gathered at * @param timestamp the time of sampling, in milliseconds since midnight, January 1, 1970 UTC - * @param metrics the set of sampled [[akka.actor.Metric]] + * @param metrics the set of sampled [[akka.cluster.metrics.Metric]] */ @SerialVersionUID(1L) final case class NodeMetrics(address: Address, timestamp: Long, metrics: Set[Metric] = Set.empty[Metric]) { @@ -344,7 +344,7 @@ private[metrics] object MetricsGossip { private[metrics] final case class MetricsGossip(nodes: Set[NodeMetrics]) { /** - * Removes nodes if their correlating node ring members are not [[akka.cluster.MemberStatus.Up]] + * Removes nodes if their correlating node ring members are not [[akka.cluster.MemberStatus]] `Up`. */ def remove(node: Address): MetricsGossip = copy(nodes = nodes filterNot (_.address == node)) diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala index ffdc9b0485..e40d2b47fa 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala @@ -237,7 +237,7 @@ class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarP * theoretically. Note that 99% CPU utilization can be optimal or indicative of failure. * * In the data stream, this will sometimes return with a valid metric value, and sometimes as a NaN or Infinite. - * Documented bug https://bugzilla.redhat.com/show_bug.cgi?id=749121 and several others. + * Documented bug 749121 and several others. * * Creates a new instance each time. */ @@ -248,8 +248,8 @@ class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarP /** * (SIGAR) Returns the stolen CPU time. Relevant to virtual hosting environments. - * For details please see: [[http://en.wikipedia.org/wiki/CPU_time#Subdivision Wikipedia - CPU time subdivision]] and - * [[https://www.datadoghq.com/2013/08/understanding-aws-stolen-cpu-and-how-it-affects-your-apps/ Understanding AWS stolen CPU and how it affects your apps]] + * For details please see: Wikipedia - CPU time subdivision and + * Understanding AWS stolen CPU and how it affects your apps * * Creates a new instance each time. */ diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala index 3bf11d6ff6..19da06186d 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala @@ -15,14 +15,14 @@ import scala.util.Failure import scala.util.Try /** - * Provide sigar instance as [[SigarProxy]]. + * Provide sigar instance as `SigarProxy`. * * User can provision sigar classes and native library in one of the following ways: * - * 1) Use [[https://github.com/kamon-io/sigar-loader Kamon sigar-loader]] as a project dependency for the user project. + * 1) Use Kamon sigar-loader as a project dependency for the user project. * Metrics extension will extract and load sigar library on demand with help of Kamon sigar provisioner. * - * 2) Use [[https://github.com/kamon-io/sigar-loader Kamon sigar-loader]] as java agent: `java -javaagent:/path/to/sigar-loader.jar` + * 2) Use Kamon sigar-loader as java agent: `java -javaagent:/path/to/sigar-loader.jar` * Kamon sigar loader agent will extract and load sigar library during JVM start. * * 3) Place `sigar.jar` on the `classpath` and sigar native library for the o/s on the `java.library.path` @@ -79,7 +79,7 @@ object SigarProvider { /** * Release underlying sigar proxy resources. * - * Note: [[SigarProxy]] is not [[Sigar]] during tests. + * Note: `SigarProxy` is not `Sigar` during tests. */ def close(sigar: SigarProxy) = { if (sigar.isInstanceOf[Sigar]) sigar.asInstanceOf[Sigar].close() @@ -87,7 +87,7 @@ object SigarProvider { } /** - * Provide sigar instance as [[SigarProxy]] with configured location via [[ClusterMetricsSettings]]. + * Provide sigar instance as `SigarProxy` with configured location via [[ClusterMetricsSettings]]. */ case class DefaultSigarProvider(settings: ClusterMetricsSettings) extends SigarProvider { def extractFolder = settings.NativeLibraryExtractFolder diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala index 8075c7d53e..9f74c76dba 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala @@ -19,7 +19,7 @@ import scala.annotation.tailrec import scala.collection.JavaConverters.{ asJavaIterableConverter, asScalaBufferConverter, setAsJavaSetConverter } /** - * Protobuf serializer for [[ClusterMetricsMessage]] types. + * Protobuf serializer for [[akka.cluster.metrics.ClusterMetricsMessage]] types. */ class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer { diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala index 1544993fbe..66773593f4 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala @@ -1433,7 +1433,7 @@ object ShardCoordinator { * supposed to discard their known location of the shard, i.e. start buffering * incoming messages for the shard. They reply with [[BeginHandOffAck]]. * When all have replied the `ShardCoordinator` continues by sending - * [[HandOff]] to the `ShardRegion` responsible for the shard. + * `HandOff` to the `ShardRegion` responsible for the shard. */ @SerialVersionUID(1L) final case class BeginHandOff(shard: ShardId) extends CoordinatorMessage /** @@ -1441,14 +1441,14 @@ object ShardCoordinator { */ @SerialVersionUID(1L) final case class BeginHandOffAck(shard: ShardId) extends CoordinatorCommand /** - * When all `ShardRegion` actors have acknoledged the [[BeginHandOff]] the - * ShardCoordinator` sends this message to the `ShardRegion` responsible for the + * When all `ShardRegion` actors have acknoledged the `BeginHandOff` the + * `ShardCoordinator` sends this message to the `ShardRegion` responsible for the * shard. The `ShardRegion` is supposed to stop all entries in that shard and when * all entries have terminated reply with `ShardStopped` to the `ShardCoordinator`. */ @SerialVersionUID(1L) final case class HandOff(shard: ShardId) extends CoordinatorMessage /** - * Reply to [[HandOff]] when all entries in the shard have been terminated. + * Reply to `HandOff` when all entries in the shard have been terminated. */ @SerialVersionUID(1L) final case class ShardStopped(shard: ShardId) extends CoordinatorCommand @@ -1527,8 +1527,8 @@ object ShardCoordinator { /** * INTERNAL API. Rebalancing process is performed by this actor. - * It sends [[BeginHandOff]] to all `ShardRegion` actors followed by - * [[HandOff]] to the `ShardRegion` responsible for the shard. + * It sends `BeginHandOff` to all `ShardRegion` actors followed by + * `HandOff` to the `ShardRegion` responsible for the shard. * When the handoff is completed it sends [[RebalanceDone]] to its * parent `ShardCoordinator`. If the process takes longer than the * `handOffTimeout` it also sends [[RebalanceDone]]. diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala b/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala index 38070f6e00..ba28f6b117 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala @@ -194,9 +194,9 @@ class ClusterClient( } /** - * Extension that starts [[ClusterReceptionist]] and accompanying [[DistributedPubSubMediator]] + * Extension that starts [[ClusterReceptionist]] and accompanying [[akka.cluster.pubsub.DistributedPubSubMediator]] * with settings defined in config section `akka.cluster.client.receptionist`. - * The [[DistributedPubSubMediator]] is started by the [[DistributedPubSubExtension]]. + * The [[akka.cluster.pubsub.DistributedPubSubMediator]] is started by the [[akka.cluster.pubsub.DistributedPubSubExtension]]. */ object ClusterReceptionistExtension extends ExtensionId[ClusterReceptionistExtension] with ExtensionIdProvider { override def get(system: ActorSystem): ClusterReceptionistExtension = super.get(system) @@ -347,12 +347,12 @@ object ClusterReceptionist { * The receptionist can be started with the [[ClusterReceptionistExtension]] or as an * ordinary actor (use the factory method [[ClusterReceptionist#props]]). * - * The receptionist forwards messages from the client to the associated [[DistributedPubSubMediator]], + * The receptionist forwards messages from the client to the associated [[akka.cluster.pubsub.DistributedPubSubMediator]], * i.e. the client can send messages to any actor in the cluster that is registered in the * `DistributedPubSubMediator`. Messages from the client are wrapped in - * [[DistributedPubSubMediator.Send]], [[DistributedPubSubMediator.SendToAll]] - * or [[DistributedPubSubMediator.Publish]] with the semantics described in - * [[DistributedPubSubMediator]]. + * [[akka.cluster.pubsub.DistributedPubSubMediator.Send]], [[akka.cluster.pubsub.DistributedPubSubMediator.SendToAll]] + * or [[akka.cluster.pubsub.DistributedPubSubMediator.Publish]] with the semantics described in + * [[akka.cluster.pubsub.DistributedPubSubMediator]]. * * Response messages from the destination actor are tunneled via the receptionist * to avoid inbound connections from other cluster nodes to the client, i.e. diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala index 1f4354ce67..bf43737429 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala @@ -258,12 +258,12 @@ object DistributedPubSubMediator { } /** - * Mediator uses [[Router]] to send messages to multiple destinations, Router in general - * unwraps messages from [[RouterEnvelope]] and sends the contents to [[Routee]]s. + * Mediator uses [[akka.routing.Router]] to send messages to multiple destinations, Router in general + * unwraps messages from [[akka.routing.RouterEnvelope]] and sends the contents to [[akka.routing.Routee]]s. * * Using mediator services should not have an undesired effect of unwrapping messages - * out of [[RouterEnvelope]]. For this reason user messages are wrapped in - * [[MediatorRouterEnvelope]] which will be unwrapped by the [[Router]] leaving original + * out of [[akka.routing.RouterEnvelope]]. For this reason user messages are wrapped in + * [[MediatorRouterEnvelope]] which will be unwrapped by the [[akka.routing.Router]] leaving original * user message. */ def wrapIfNeeded: Any ⇒ Any = { diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala index dfad366be4..6312ea0604 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala @@ -106,6 +106,7 @@ class ClusterSingletonManagerLeaveSpec extends MultiNodeSpec(ClusterSingletonMan within(10.seconds) { awaitAssert(cluster.state.members.count(m ⇒ m.status == MemberStatus.Up) should be(3)) } + enterBarrier("all-up") runOn(second) { cluster.leave(node(first).address) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index dc56109ae3..a8bf06bbe1 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -50,7 +50,7 @@ object Cluster extends ExtensionId[Cluster] with ExtensionIdProvider { * * Each cluster [[Member]] is identified by its [[akka.actor.Address]], and * the cluster address of this actor system is [[#selfAddress]]. A member also has a status; - * initially [[MemberStatus.Joining]] followed by [[MemberStatus.Up]]. + * initially [[MemberStatus]] `Joining` followed by [[MemberStatus]] `Up`. */ class Cluster(val system: ExtendedActorSystem) extends Extension { @@ -212,11 +212,11 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { * The `to` classes can be [[akka.cluster.ClusterEvent.ClusterDomainEvent]] * or subclasses. * - * If `initialStateMode` is [[ClusterEvent.InitialStateAsEvents]] the events corresponding + * If `initialStateMode` is `ClusterEvent.InitialStateAsEvents` the events corresponding * to the current state will be sent to the subscriber to mimic what you would * have seen if you were listening to the events when they occurred in the past. * - * If `initialStateMode` is [[ClusterEvent.InitialStateAsSnapshot]] a snapshot of + * If `initialStateMode` is `ClusterEvent.InitialStateAsSnapshot` a snapshot of * [[akka.cluster.ClusterEvent.CurrentClusterState]] will be sent to the subscriber as the * first message. * @@ -274,8 +274,8 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { /** * Send command to issue state transition to LEAVING for the node specified by 'address'. - * The member will go through the status changes [[MemberStatus.Leaving]] (not published to - * subscribers) followed by [[MemberStatus.Exiting]] and finally [[MemberStatus.Removed]]. + * The member will go through the status changes [[MemberStatus]] `Leaving` (not published to + * subscribers) followed by [[MemberStatus]] `Exiting` and finally [[MemberStatus]] `Removed`. * * Note that this command can be issued to any member in the cluster, not necessarily the * one that is leaving. The cluster extension, but not the actor system or JVM, of the @@ -300,7 +300,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { /** * The supplied thunk will be run, once, when current cluster member is `Up`. - * Typically used together with configuration option `akka.cluster.min-nr-of-members' + * Typically used together with configuration option `akka.cluster.min-nr-of-members` * to defer some action, such as starting actors, until the cluster has reached * a certain size. */ @@ -309,7 +309,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { /** * Java API: The supplied callback will be run, once, when current cluster member is `Up`. - * Typically used together with configuration option `akka.cluster.min-nr-of-members' + * Typically used together with configuration option `akka.cluster.min-nr-of-members` * to defer some action, such as starting actors, until the cluster has reached * a certain size. */ @@ -344,7 +344,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { * Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks. * * Should not called by the user. The user can issue a LEAVE command which will tell the node - * to go through graceful handoff process `LEAVE -> EXITING -> REMOVED -> SHUTDOWN`. + * to go through graceful handoff process `LEAVE -> EXITING -> REMOVED -> SHUTDOWN`. */ private[cluster] def shutdown(): Unit = { if (_isTerminated.compareAndSet(false, true)) { diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index f91cf9f972..6d5fa627d8 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -87,19 +87,19 @@ private[cluster] object InternalClusterAction { case object JoinSeedNode /** - * @see JoinSeedNode + * see JoinSeedNode */ @SerialVersionUID(1L) case object InitJoin extends ClusterMessage /** - * @see JoinSeedNode + * see JoinSeedNode */ @SerialVersionUID(1L) final case class InitJoinAck(address: Address) extends ClusterMessage /** - * @see JoinSeedNode + * see JoinSeedNode */ @SerialVersionUID(1L) final case class InitJoinNack(address: Address) extends ClusterMessage diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index 6e154c3f5a..8332ae8b86 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -129,7 +129,7 @@ object ClusterEvent { } /** - * Member status changed to [[MemberStatus.Exiting]] and will be removed + * Member status changed to `MemberStatus.Exiting` and will be removed * when all members have seen the `Exiting` status. */ final case class MemberExited(member: Member) extends MemberEvent { @@ -178,7 +178,7 @@ object ClusterEvent { final case object ClusterShuttingDown extends ClusterDomainEvent /** - * Java API: get the singleton instance of [[ClusterShuttingDown]] event + * Java API: get the singleton instance of `ClusterShuttingDown` event */ def getClusterShuttingDownInstance = ClusterShuttingDown diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala index 80126a269d..bcf35858b1 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala @@ -119,7 +119,7 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto } /** - * Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus.Up]]. + * Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus]] `Up`. */ def receiveState(state: CurrentClusterState): Unit = nodes = state.members collect { case m if m.status == Up ⇒ m.address } @@ -128,7 +128,7 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto * Samples the latest metrics for the node, updates metrics statistics in * [[akka.cluster.MetricsGossip]], and publishes the change to the event bus. * - * @see [[akka.cluster.ClusterMetricsCollector.collect( )]] + * @see [[akka.cluster.ClusterMetricsCollector#collect]] */ def collect(): Unit = { latestGossip :+= collector.sample() @@ -189,7 +189,7 @@ private[cluster] object MetricsGossip { private[cluster] final case class MetricsGossip(nodes: Set[NodeMetrics]) { /** - * Removes nodes if their correlating node ring members are not [[akka.cluster.MemberStatus.Up]] + * Removes nodes if their correlating node ring members are not [[akka.cluster.MemberStatus]] `Up`. */ def remove(node: Address): MetricsGossip = copy(nodes = nodes filterNot (_.address == node)) @@ -386,7 +386,7 @@ object Metric extends MetricNumericConverter { * * @param address [[akka.actor.Address]] of the node the metrics are gathered at * @param timestamp the time of sampling, in milliseconds since midnight, January 1, 1970 UTC - * @param metrics the set of sampled [[akka.actor.Metric]] + * @param metrics the set of sampled [[akka.cluster.Metric]] */ @SerialVersionUID(1L) @deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4") diff --git a/akka-contrib/src/main/scala/akka/contrib/pattern/Aggregator.scala b/akka-contrib/src/main/scala/akka/contrib/pattern/Aggregator.scala index 10696d4fc1..64ed357bdc 100644 --- a/akka-contrib/src/main/scala/akka/contrib/pattern/Aggregator.scala +++ b/akka-contrib/src/main/scala/akka/contrib/pattern/Aggregator.scala @@ -88,7 +88,6 @@ object WorkList { * Singly linked list entry implementation for WorkList. * @param ref The item reference, None for head entry * @param permanent If the entry is to be kept after processing - * @tparam T The type of the item */ class Entry[T](val ref: Option[T], val permanent: Boolean) { var next: Entry[T] = null @@ -101,7 +100,6 @@ object WorkList { * The list is not thread safe! However it is expected to be reentrant. This means a processing function can add/remove * entries from the list while processing. Most important, a processing function can remove its own entry from the list. * The first remove must return true and any subsequent removes must return false. - * @tparam T The type of the item */ class WorkList[T] { diff --git a/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala b/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala index d1ed3b2149..cba19c70b6 100644 --- a/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala +++ b/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala @@ -173,22 +173,22 @@ import ReliableProxy._ * transition callbacks to those actors which subscribe using the * ``SubscribeTransitionCallBack`` and ``UnsubscribeTransitionCallBack`` * messages; see [[akka.actor.FSM]] for more documentation. The proxy will - * transition into [[ReliableProxy.Active]] state when ACKs - * are outstanding and return to the [[ReliableProxy.Idle]] + * transition into `ReliableProxy.Active` state when ACKs + * are outstanding and return to the `ReliableProxy.Idle` * state when every message send so far has been confirmed by the peer end-point. * - * The initial state of the proxy is [[ReliableProxy.Connecting]]. In this state the + * The initial state of the proxy is `ReliableProxy.Connecting`. In this state the * proxy will repeatedly send [[akka.actor.Identify]] messages to `ActorSelection(targetPath)` * in order to obtain a new `ActorRef` for the target. When an [[akka.actor.ActorIdentity]] * for the target is received a new tunnel will be created, a [[ReliableProxy.TargetChanged]] * message containing the target `ActorRef` will be sent to the proxy's transition subscribers - * and the proxy will transition into either the [[ReliableProxy.Idle]] or [[ReliableProxy.Active]] + * and the proxy will transition into either the `ReliableProxy.Idle` or `ReliableProxy.Active` * state, depending if there are any outstanding messages that need to be delivered. If * `maxConnectAttempts` is defined this actor will stop itself after `Identify` is sent * `maxConnectAttempts` times. * * While in the `Idle` or `Active` states, if a communication failure causes the tunnel to - * terminate via Remote Deathwatch the proxy will transition into the [[ReliableProxy.Connecting]] + * terminate via Remote Deathwatch the proxy will transition into the `ReliableProxy.Connecting` * state as described above. After reconnecting `TargetChanged` will be sent only if the target * `ActorRef` has changed. * diff --git a/akka-docs/rst/general/configuration.rst b/akka-docs/rst/general/configuration.rst index 3009ecceb7..e86f45e40c 100644 --- a/akka-docs/rst/general/configuration.rst +++ b/akka-docs/rst/general/configuration.rst @@ -204,7 +204,7 @@ before or after using them to construct an actor system: .. parsed-literal:: - Welcome to Scala version @scalaVersion@ (Java HotSpot(TM) 64-Bit Server VM, Java 1.6.0_27). + Welcome to Scala version @scalaVersion@ (Java HotSpot(TM) 64-Bit Server VM, Java 1.8.0). Type in expressions to have them evaluated. Type :help for more information. diff --git a/akka-docs/rst/intro/getting-started.rst b/akka-docs/rst/intro/getting-started.rst index 933ff1d316..6f38a94d34 100644 --- a/akka-docs/rst/intro/getting-started.rst +++ b/akka-docs/rst/intro/getting-started.rst @@ -4,9 +4,11 @@ Getting Started Prerequisites ------------- -Akka requires that you have `Java 1.6 `_ or +Akka requires that you have `Java 8 `_ or later installed on you machine. +`Typesafe `_ provides versions of Akka that are compatible with Java 6, 7 and 8. + Getting Started Guides and Template Projects -------------------------------------------- @@ -33,8 +35,7 @@ Akka is very modular and consists of several JARs containing different features. - ``akka-cluster`` – Cluster membership management, elastic routers. -- ``akka-osgi`` – base bundle for using Akka in OSGi containers, containing the - ``akka-actor`` classes +- ``akka-osgi`` – utilities for using Akka in OSGi containers - ``akka-osgi-aries`` – Aries blueprint for provisioning actor systems diff --git a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java b/akka-docs/rst/java/code/docs/actorlambda/ActorDocTest.java similarity index 99% rename from akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java rename to akka-docs/rst/java/code/docs/actorlambda/ActorDocTest.java index 56b47d4dc9..926c564973 100644 --- a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java +++ b/akka-docs/rst/java/code/docs/actorlambda/ActorDocTest.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2015 Typesafe Inc. */ -package docs.actor; +package docs.actorlambda; import akka.actor.*; import akka.japi.pf.ReceiveBuilder; @@ -13,8 +13,8 @@ import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import scala.PartialFunction; import scala.runtime.BoxedUnit; -import static docs.actor.Messages.Swap.Swap; -import static docs.actor.Messages.*; +import static docs.actorlambda.Messages.Swap.Swap; +import static docs.actorlambda.Messages.*; import static akka.japi.Util.immutableSeq; import java.util.concurrent.TimeUnit; diff --git a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/FaultHandlingTest.java b/akka-docs/rst/java/code/docs/actorlambda/FaultHandlingTest.java similarity index 99% rename from akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/FaultHandlingTest.java rename to akka-docs/rst/java/code/docs/actorlambda/FaultHandlingTest.java index a189b651d9..bfd225c297 100644 --- a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/FaultHandlingTest.java +++ b/akka-docs/rst/java/code/docs/actorlambda/FaultHandlingTest.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2015 Typesafe Inc. */ -package docs.actor; +package docs.actorlambda; //#testkit import akka.actor.*; diff --git a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/InitializationDocTest.java b/akka-docs/rst/java/code/docs/actorlambda/InitializationDocTest.java similarity index 99% rename from akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/InitializationDocTest.java rename to akka-docs/rst/java/code/docs/actorlambda/InitializationDocTest.java index e64c3e4999..cefe176db9 100644 --- a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/InitializationDocTest.java +++ b/akka-docs/rst/java/code/docs/actorlambda/InitializationDocTest.java @@ -1,8 +1,7 @@ -package docs.actor; /** * Copyright (C) 2009-2015 Typesafe Inc. */ - +package docs.actorlambda; import akka.actor.AbstractActor; import akka.actor.ActorRef; diff --git a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/Messages.java b/akka-docs/rst/java/code/docs/actorlambda/Messages.java similarity index 99% rename from akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/Messages.java rename to akka-docs/rst/java/code/docs/actorlambda/Messages.java index 4a5febbca2..788c94b383 100644 --- a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/Messages.java +++ b/akka-docs/rst/java/code/docs/actorlambda/Messages.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2015 Typesafe Inc. */ -package docs.actor; +package docs.actorlambda; import java.util.ArrayList; import java.util.Collections; diff --git a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/MyActor.java b/akka-docs/rst/java/code/docs/actorlambda/MyActor.java similarity index 96% rename from akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/MyActor.java rename to akka-docs/rst/java/code/docs/actorlambda/MyActor.java index 2064849a85..9dec6d186c 100644 --- a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/MyActor.java +++ b/akka-docs/rst/java/code/docs/actorlambda/MyActor.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2015 Typesafe Inc. */ -package docs.actor; +package docs.actorlambda; //#imports import akka.actor.AbstractActor; diff --git a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/SampleActor.java b/akka-docs/rst/java/code/docs/actorlambda/SampleActor.java similarity index 97% rename from akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/SampleActor.java rename to akka-docs/rst/java/code/docs/actorlambda/SampleActor.java index 19a9d60ffb..6e7e1e362d 100644 --- a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/SampleActor.java +++ b/akka-docs/rst/java/code/docs/actorlambda/SampleActor.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2015 Typesafe Inc. */ -package docs.actor; +package docs.actorlambda; //#sample-actor import akka.actor.AbstractActor; diff --git a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/SampleActorTest.java b/akka-docs/rst/java/code/docs/actorlambda/SampleActorTest.java similarity index 98% rename from akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/SampleActorTest.java rename to akka-docs/rst/java/code/docs/actorlambda/SampleActorTest.java index c6fa1b095d..d04b864f03 100644 --- a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/SampleActorTest.java +++ b/akka-docs/rst/java/code/docs/actorlambda/SampleActorTest.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2015 Typesafe Inc. */ -package docs.actor; +package docs.actorlambda; import akka.actor.ActorRef; import akka.actor.ActorSystem; diff --git a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/Buncher.java b/akka-docs/rst/java/code/docs/actorlambda/fsm/Buncher.java similarity index 91% rename from akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/Buncher.java rename to akka-docs/rst/java/code/docs/actorlambda/fsm/Buncher.java index a5e1612bee..9e34d15cde 100644 --- a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/Buncher.java +++ b/akka-docs/rst/java/code/docs/actorlambda/fsm/Buncher.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2015 Typesafe Inc. */ -package docs.actor.fsm; +package docs.actorlambda.fsm; //#simple-imports import akka.actor.AbstractFSM; @@ -14,11 +14,11 @@ import java.util.List; import scala.concurrent.duration.Duration; //#simple-imports -import static docs.actor.fsm.Buncher.Data; -import static docs.actor.fsm.Buncher.State.*; -import static docs.actor.fsm.Buncher.State; -import static docs.actor.fsm.Buncher.Uninitialized.*; -import static docs.actor.fsm.Events.*; +import static docs.actorlambda.fsm.Buncher.Data; +import static docs.actorlambda.fsm.Buncher.State.*; +import static docs.actorlambda.fsm.Buncher.State; +import static docs.actorlambda.fsm.Buncher.Uninitialized.*; +import static docs.actorlambda.fsm.Events.*; //#simple-fsm public class Buncher extends AbstractFSM { diff --git a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/BuncherTest.java b/akka-docs/rst/java/code/docs/actorlambda/fsm/BuncherTest.java similarity index 87% rename from akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/BuncherTest.java rename to akka-docs/rst/java/code/docs/actorlambda/fsm/BuncherTest.java index a17d6e270e..a443c0a6c4 100644 --- a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/BuncherTest.java +++ b/akka-docs/rst/java/code/docs/actorlambda/fsm/BuncherTest.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2015 Typesafe Inc. */ -package docs.actor.fsm; +package docs.actorlambda.fsm; import akka.actor.ActorRef; import akka.actor.ActorSystem; @@ -13,11 +13,11 @@ import org.junit.BeforeClass; import org.junit.Test; import java.util.LinkedList; -import docs.actor.fsm.*; -import static docs.actor.fsm.Events.Batch; -import static docs.actor.fsm.Events.Queue; -import static docs.actor.fsm.Events.SetTarget; -import static docs.actor.fsm.Events.Flush.Flush; +import docs.actorlambda.fsm.*; +import static docs.actorlambda.fsm.Events.Batch; +import static docs.actorlambda.fsm.Events.Queue; +import static docs.actorlambda.fsm.Events.SetTarget; +import static docs.actorlambda.fsm.Events.Flush.Flush; //#test-code public class BuncherTest { diff --git a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/Events.java b/akka-docs/rst/java/code/docs/actorlambda/fsm/Events.java similarity index 98% rename from akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/Events.java rename to akka-docs/rst/java/code/docs/actorlambda/fsm/Events.java index b0252a0c7f..c39bc15bf5 100644 --- a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/Events.java +++ b/akka-docs/rst/java/code/docs/actorlambda/fsm/Events.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2015 Typesafe Inc. */ -package docs.actor.fsm; +package docs.actorlambda.fsm; import akka.actor.ActorRef; import java.util.List; diff --git a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/FSMDocTest.java b/akka-docs/rst/java/code/docs/actorlambda/fsm/FSMDocTest.java similarity index 94% rename from akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/FSMDocTest.java rename to akka-docs/rst/java/code/docs/actorlambda/fsm/FSMDocTest.java index bd2bf84761..1fa74e1962 100644 --- a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/FSMDocTest.java +++ b/akka-docs/rst/java/code/docs/actorlambda/fsm/FSMDocTest.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2015 Typesafe Inc. */ -package docs.actor.fsm; +package docs.actorlambda.fsm; import akka.actor.*; import akka.testkit.JavaTestKit; @@ -14,8 +14,8 @@ import scala.concurrent.duration.Duration; import static org.junit.Assert.*; -import static docs.actor.fsm.FSMDocTest.StateType.*; -import static docs.actor.fsm.FSMDocTest.Messages.*; +import static docs.actorlambda.fsm.FSMDocTest.StateType.*; +import static docs.actorlambda.fsm.FSMDocTest.Messages.*; import static java.util.concurrent.TimeUnit.*; public class FSMDocTest { @@ -173,7 +173,7 @@ public class FSMDocTest { expectMsgEquals(Active); expectMsgEquals(Data.Foo); String msg = expectMsgClass(String.class); - assertThat(msg, CoreMatchers.startsWith("LogEntry(SomeState,Foo,Actor[akka://FSMDocTest/system/")); + assertTrue(msg.startsWith("LogEntry(SomeState,Foo,Actor[akka://FSMDocTest/system/")); }}; } } diff --git a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/japi/FaultHandlingDocSample.java b/akka-docs/rst/java/code/docs/actorlambda/japi/FaultHandlingDocSample.java similarity index 97% rename from akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/japi/FaultHandlingDocSample.java rename to akka-docs/rst/java/code/docs/actorlambda/japi/FaultHandlingDocSample.java index dadc4ec986..a623b9dbae 100644 --- a/akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/japi/FaultHandlingDocSample.java +++ b/akka-docs/rst/java/code/docs/actorlambda/japi/FaultHandlingDocSample.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2015 Typesafe Inc. */ -package docs.actor.japi; +package docs.actorlambda.japi; //#all //#imports @@ -28,10 +28,10 @@ import static akka.actor.SupervisorStrategy.escalate; import static akka.pattern.Patterns.ask; import static akka.pattern.Patterns.pipe; -import static docs.actor.japi.FaultHandlingDocSample.WorkerApi.*; -import static docs.actor.japi.FaultHandlingDocSample.CounterServiceApi.*; -import static docs.actor.japi.FaultHandlingDocSample.CounterApi.*; -import static docs.actor.japi.FaultHandlingDocSample.StorageApi.*; +import static docs.actorlambda.japi.FaultHandlingDocSample.WorkerApi.*; +import static docs.actorlambda.japi.FaultHandlingDocSample.CounterServiceApi.*; +import static docs.actorlambda.japi.FaultHandlingDocSample.CounterApi.*; +import static docs.actorlambda.japi.FaultHandlingDocSample.StorageApi.*; //#imports diff --git a/akka-samples/akka-docs-udp-multicast/src/main/java/docs/io/JavaUdpMulticast.java b/akka-docs/rst/java/code/docs/io/JavaUdpMulticast.java similarity index 100% rename from akka-samples/akka-docs-udp-multicast/src/main/java/docs/io/JavaUdpMulticast.java rename to akka-docs/rst/java/code/docs/io/JavaUdpMulticast.java diff --git a/akka-samples/akka-docs-udp-multicast/src/test/java/docs/io/JavaUdpMulticastTest.java b/akka-docs/rst/java/code/docs/io/JavaUdpMulticastTest.java similarity index 100% rename from akka-samples/akka-docs-udp-multicast/src/test/java/docs/io/JavaUdpMulticastTest.java rename to akka-docs/rst/java/code/docs/io/JavaUdpMulticastTest.java diff --git a/akka-docs/rst/java/io-udp.rst b/akka-docs/rst/java/io-udp.rst index a91024b461..f186c4d26f 100644 --- a/akka-docs/rst/java/io-udp.rst +++ b/akka-docs/rst/java/io-udp.rst @@ -95,12 +95,12 @@ To select a Protocol Family you must extend ``akka.io.Inet.DatagramChannelCreato class which implements ``akka.io.Inet.SocketOption``. Provide custom logic for opening a datagram channel by overriding :meth:`create` method. -.. includecode:: ../../../akka-samples/akka-docs-udp-multicast/src/main/java/docs/io/JavaUdpMulticast.java#inet6-protocol-family +.. includecode:: code/docs/io/JavaUdpMulticast.java#inet6-protocol-family Another socket option will be needed to join a multicast group. -.. includecode:: ../../../akka-samples/akka-docs-udp-multicast/src/main/java/docs/io/JavaUdpMulticast.java#multicast-group +.. includecode:: code/docs/io/JavaUdpMulticast.java#multicast-group Socket options must be provided to :meth:`UdpMessage.bind` command. -.. includecode:: ../../../akka-samples/akka-docs-udp-multicast/src/main/java/docs/io/JavaUdpMulticast.java#bind +.. includecode:: code/docs/io/JavaUdpMulticast.java#bind diff --git a/akka-docs/rst/java/lambda-actors.rst b/akka-docs/rst/java/lambda-actors.rst index b221531e38..c9ca3501fa 100644 --- a/akka-docs/rst/java/lambda-actors.rst +++ b/akka-docs/rst/java/lambda-actors.rst @@ -51,7 +51,7 @@ function there is a builder named ``ReceiveBuilder`` that you can use. Here is an example: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/MyActor.java +.. includecode:: code/docs/actorlambda/MyActor.java :include: imports,my-actor Please note that the Akka Actor ``receive`` message loop is exhaustive, which @@ -80,8 +80,8 @@ creating an actor including associated deployment information (e.g. which dispatcher to use, see more below). Here are some examples of how to create a :class:`Props` instance. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#import-props -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#creating-props +.. includecode:: code/docs/actorlambda/ActorDocTest.java#import-props +.. includecode:: code/docs/actorlambda/ActorDocTest.java#creating-props The second variant shows how to pass constructor arguments to the :class:`Actor` being created, but it should only be used outside of actors as @@ -96,7 +96,7 @@ found. Dangerous Variants ^^^^^^^^^^^^^^^^^^ -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#creating-props-deprecated +.. includecode:: code/docs/actorlambda/ActorDocTest.java#creating-props-deprecated This method is not recommended to be used within another actor because it encourages to close over the enclosing scope, resulting in non-serializable @@ -128,14 +128,14 @@ associated with using the ``Props.create(...)`` method which takes a by-name argument, since within a companion object the given code block will not retain a reference to its enclosing scope: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#props-factory +.. includecode:: code/docs/actorlambda/ActorDocTest.java#props-factory Another good practice is to declare what messages an Actor can receive as close to the actor definition as possible (e.g. as static classes inside the Actor or using other suitable class), which makes it easier to know what it can receive. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#messages-in-companion +.. includecode:: code/docs/actorlambda/ActorDocTest.java#messages-in-companion Creating Actors with Props -------------------------- @@ -144,14 +144,14 @@ Actors are created by passing a :class:`Props` instance into the :meth:`actorOf` factory method which is available on :class:`ActorSystem` and :class:`ActorContext`. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#import-actorRef -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#system-actorOf +.. includecode:: code/docs/actorlambda/ActorDocTest.java#import-actorRef +.. includecode:: code/docs/actorlambda/ActorDocTest.java#system-actorOf Using the :class:`ActorSystem` will create top-level actors, supervised by the actor system’s provided guardian actor, while using an actor’s context will create a child actor. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#context-actorOf +.. includecode:: code/docs/actorlambda/ActorDocTest.java#context-actorOf :exclude: plus-some-behavior It is recommended to create a hierarchy of children, grand-children and so on @@ -321,7 +321,7 @@ termination (see `Stopping Actors`_). This service is provided by the Registering a monitor is easy: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#watch +.. includecode:: code/docs/actorlambda/ActorDocTest.java#watch It should be noted that the :class:`Terminated` message is generated independent of the order in which registration and termination occur. @@ -348,7 +348,7 @@ Start Hook Right after starting the actor, its :meth:`preStart` method is invoked. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#preStart +.. includecode:: code/docs/actorlambda/ActorDocTest.java#preStart This method is called when the actor is first created. During restarts it is called by the default implementation of :meth:`postRestart`, which means that @@ -427,7 +427,7 @@ actors may look up other actors by specifying absolute or relative paths—logical or physical—and receive back an :class:`ActorSelection` with the result: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#selection-local +.. includecode:: code/docs/actorlambda/ActorDocTest.java#selection-local .. note:: @@ -453,7 +453,7 @@ structure, i.e. the supervisor. The path elements of an actor selection may contain wildcard patterns allowing for broadcasting of messages to that section: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#selection-wildcard +.. includecode:: code/docs/actorlambda/ActorDocTest.java#selection-wildcard Messages can be sent via the :class:`ActorSelection` and the path of the :class:`ActorSelection` is looked up when delivering each message. If the selection @@ -469,8 +469,8 @@ actors which are traversed in the sense that if a concrete name lookup fails negative result is generated. Please note that this does not mean that delivery of that reply is guaranteed, it still is a normal message. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#import-identify -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#identify +.. includecode:: code/docs/actorlambda/ActorDocTest.java#import-identify +.. includecode:: code/docs/actorlambda/ActorDocTest.java#identify You can also acquire an :class:`ActorRef` for an :class:`ActorSelection` with the ``resolveOne`` method of the :class:`ActorSelection`. It returns a ``Future`` @@ -480,7 +480,7 @@ didn't complete within the supplied `timeout`. Remote actor addresses may also be looked up, if :ref:`remoting ` is enabled: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#selection-remote +.. includecode:: code/docs/actorlambda/ActorDocTest.java#selection-remote An example demonstrating actor look-up is given in :ref:`remote-sample-java`. @@ -537,7 +537,7 @@ Tell: Fire-forget This is the preferred way of sending messages. No blocking waiting for a message. This gives the best concurrency and scalability characteristics. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#tell +.. includecode:: code/docs/actorlambda/ActorDocTest.java#tell The sender reference is passed along with the message and available within the receiving actor via its :meth:`sender()` method while processing this @@ -577,7 +577,7 @@ more below. To complete the future with an exception you need send a Failure message to the sender. This is *not done automatically* when an actor throws an exception while processing a message. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#reply-exception +.. includecode:: code/docs/actorlambda/ActorDocTest.java#reply-exception If the actor does not complete the future, it will expire after the timeout period, specified as parameter to the ``ask`` method; this will complete the @@ -608,7 +608,7 @@ original sender address/reference is maintained even though the message is going through a 'mediator'. This can be useful when writing actors that work as routers, load-balancers, replicators etc. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#forward +.. includecode:: code/docs/actorlambda/ActorDocTest.java#forward Receive messages ================ @@ -616,13 +616,13 @@ Receive messages An Actor either has to set its initial receive behavior in the constructor by calling the :meth:`receive` method in the :class:`AbstractActor`: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java +.. includecode:: code/docs/actorlambda/ActorDocTest.java :include: receive-constructor :exclude: and-some-behavior or by implementing the :meth:`receive` method in the :class:`Actor` interface: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#receive +.. includecode:: code/docs/actorlambda/ActorDocTest.java#receive Both the argument to the :class:`AbstractActor` :meth:`receive` method and the return type of the :class:`Actor` :meth:`receive` method is a ``PartialFunction`` @@ -634,7 +634,7 @@ function there is a builder named ``ReceiveBuilder`` that you can use. Here is an example: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/MyActor.java +.. includecode:: code/docs/actorlambda/MyActor.java :include: imports,my-actor .. _LambdaActor.Reply: @@ -649,7 +649,7 @@ for replying later, or passing on to other actors. If there is no sender (a message was sent without an actor or future context) then the sender defaults to a 'dead-letter' actor ref. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/MyActor.java#reply +.. includecode:: code/docs/actorlambda/MyActor.java#reply Receive timeout @@ -667,7 +667,7 @@ timeout there must have been an idle period beforehand as configured via this me Once set, the receive timeout stays in effect (i.e. continues firing repeatedly after inactivity periods). Pass in `Duration.Undefined` to switch off this feature. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#receive-timeout +.. includecode:: code/docs/actorlambda/ActorDocTest.java#receive-timeout .. _stopping-actors-lambda: @@ -704,7 +704,7 @@ whole system. The :meth:`postStop()` hook is invoked after an actor is fully stopped. This enables cleaning up of resources: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#postStop +.. includecode:: code/docs/actorlambda/ActorDocTest.java#postStop :exclude: clean-up-some-resources .. note:: @@ -735,7 +735,7 @@ termination of several actors: .. includecode:: code/docs/actor/UntypedActorDocTest.java#gracefulStop -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#gracefulStop-actor +.. includecode:: code/docs/actorlambda/ActorDocTest.java#gracefulStop-actor When ``gracefulStop()`` returns successfully, the actor’s ``postStop()`` hook will have been executed: there exists a happens-before edge between the end of @@ -775,7 +775,7 @@ popped. To hotswap the Actor behavior using ``become``: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#hot-swap-actor +.. includecode:: code/docs/actorlambda/ActorDocTest.java#hot-swap-actor This variant of the :meth:`become` method is useful for many different things, such as to implement a Finite State Machine (FSM, for an example see `Dining @@ -791,7 +791,7 @@ of “pop” operations (i.e. :meth:`unbecome`) matches the number of “push” in the long run, otherwise this amounts to a memory leak (which is why this behavior is not the default). -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#swapper +.. includecode:: code/docs/actorlambda/ActorDocTest.java#swapper Stash @@ -816,7 +816,7 @@ order as they have been received originally. An actor that extends Here is an example of the ``AbstractActorWithStash`` class in action: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/ActorDocTest.java#stash +.. includecode:: code/docs/actorlambda/ActorDocTest.java#stash Invoking ``stash()`` adds the current message (the message that the actor received last) to the actor's stash. It is typically invoked @@ -949,7 +949,7 @@ for example in the presence of circular dependencies. In this case the actor sho and use ``become()`` or a finite state-machine state transition to encode the initialized and uninitialized states of the actor. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/InitializationDocTest.java#messageInit +.. includecode:: code/docs/actorlambda/InitializationDocTest.java#messageInit If the actor may receive messages before it has been initialized, a useful tool can be the ``Stash`` to save messages until the initialization finishes, and replaying them after the actor became initialized. diff --git a/akka-docs/rst/java/lambda-fault-tolerance-sample.rst b/akka-docs/rst/java/lambda-fault-tolerance-sample.rst index 7fdcb1dbc8..278d825038 100644 --- a/akka-docs/rst/java/lambda-fault-tolerance-sample.rst +++ b/akka-docs/rst/java/lambda-fault-tolerance-sample.rst @@ -49,5 +49,5 @@ Step Description Full Source Code of the Fault Tolerance Sample ------------------------------------------------------ -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/japi/FaultHandlingDocSample.java#all +.. includecode:: code/docs/actorlambda/japi/FaultHandlingDocSample.java#all diff --git a/akka-docs/rst/java/lambda-fault-tolerance.rst b/akka-docs/rst/java/lambda-fault-tolerance.rst index 7a9d27ddff..e05ce67cde 100644 --- a/akka-docs/rst/java/lambda-fault-tolerance.rst +++ b/akka-docs/rst/java/lambda-fault-tolerance.rst @@ -32,7 +32,7 @@ in more depth. For the sake of demonstration let us consider the following strategy: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/FaultHandlingTest.java +.. includecode:: code/docs/actorlambda/FaultHandlingTest.java :include: strategy I have chosen a few well-known exception types in order to demonstrate the @@ -109,49 +109,49 @@ Test Application The following section shows the effects of the different directives in practice, wherefor a test setup is needed. First off, we need a suitable supervisor: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/FaultHandlingTest.java +.. includecode:: code/docs/actorlambda/FaultHandlingTest.java :include: supervisor This supervisor will be used to create a child, with which we can experiment: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/FaultHandlingTest.java +.. includecode:: code/docs/actorlambda/FaultHandlingTest.java :include: child The test is easier by using the utilities described in :ref:`akka-testkit`, where ``TestProbe`` provides an actor ref useful for receiving and inspecting replies. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/FaultHandlingTest.java +.. includecode:: code/docs/actorlambda/FaultHandlingTest.java :include: testkit Let us create actors: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/FaultHandlingTest.java +.. includecode:: code/docs/actorlambda/FaultHandlingTest.java :include: create The first test shall demonstrate the ``Resume`` directive, so we try it out by setting some non-initial state in the actor and have it fail: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/FaultHandlingTest.java +.. includecode:: code/docs/actorlambda/FaultHandlingTest.java :include: resume As you can see the value 42 survives the fault handling directive. Now, if we change the failure to a more serious ``NullPointerException``, that will no longer be the case: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/FaultHandlingTest.java +.. includecode:: code/docs/actorlambda/FaultHandlingTest.java :include: restart And finally in case of the fatal ``IllegalArgumentException`` the child will be terminated by the supervisor: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/FaultHandlingTest.java +.. includecode:: code/docs/actorlambda/FaultHandlingTest.java :include: stop Up to now the supervisor was completely unaffected by the child’s failure, because the directives set did handle it. In case of an ``Exception``, this is not true anymore and the supervisor escalates the failure. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/FaultHandlingTest.java +.. includecode:: code/docs/actorlambda/FaultHandlingTest.java :include: escalate-kill The supervisor itself is supervised by the top-level actor provided by the @@ -164,12 +164,12 @@ child not to survive this failure. In case this is not desired (which depends on the use case), we need to use a different supervisor which overrides this behavior. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/FaultHandlingTest.java +.. includecode:: code/docs/actorlambda/FaultHandlingTest.java :include: supervisor2 With this parent, the child survives the escalated restart, as demonstrated in the last test: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/FaultHandlingTest.java +.. includecode:: code/docs/actorlambda/FaultHandlingTest.java :include: escalate-restart diff --git a/akka-docs/rst/java/lambda-fsm.rst b/akka-docs/rst/java/lambda-fsm.rst index 35da0a7e2f..eaf3f9c589 100644 --- a/akka-docs/rst/java/lambda-fsm.rst +++ b/akka-docs/rst/java/lambda-fsm.rst @@ -38,11 +38,11 @@ send them on after the burst ended or a flush request is received. First, consider all of the below to use these import statements: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/Buncher.java#simple-imports +.. includecode:: code/docs/actorlambda/fsm/Buncher.java#simple-imports The contract of our “Buncher” actor is that it accepts or produces the following messages: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/Events.java +.. includecode:: code/docs/actorlambda/fsm/Events.java :include: simple-events :exclude: boilerplate @@ -53,7 +53,7 @@ The contract of our “Buncher” actor is that it accepts or produces the follo The actor can be in two states: no message queued (aka ``Idle``) or some message queued (aka ``Active``). The states and the state data is defined like this: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/Buncher.java +.. includecode:: code/docs/actorlambda/fsm/Buncher.java :include: simple-state :exclude: boilerplate @@ -64,7 +64,7 @@ reference to send the batches to and the actual queue of messages. Now let’s take a look at the skeleton for our FSM actor: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/Buncher.java +.. includecode:: code/docs/actorlambda/fsm/Buncher.java :include: simple-fsm :exclude: transition-elided,unhandled-elided @@ -93,7 +93,7 @@ shall work identically in both states, we make use of the fact that any event which is not handled by the ``when()`` block is passed to the ``whenUnhandled()`` block: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/Buncher.java#unhandled-elided +.. includecode:: code/docs/actorlambda/fsm/Buncher.java#unhandled-elided The first case handled here is adding ``Queue()`` requests to the internal queue and going to the ``Active`` state (this does the obvious thing of staying @@ -107,7 +107,7 @@ target, for which we use the ``onTransition`` mechanism: you can declare multiple such blocks and all of them will be tried for matching behavior in case a state transition occurs (i.e. only when the state actually changes). -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/Buncher.java#transition-elided +.. includecode:: code/docs/actorlambda/fsm/Buncher.java#transition-elided The transition callback is a partial function which takes as input a pair of states—the current and the next state. During the state change, the old state @@ -117,7 +117,7 @@ available as ``nextStateData``. To verify that this buncher actually works, it is quite easy to write a test using the :ref:`akka-testkit`, here using JUnit as an example: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/BuncherTest.java +.. includecode:: code/docs/actorlambda/fsm/BuncherTest.java :include: test-code Reference @@ -129,7 +129,7 @@ The AbstractFSM Class The :class:`AbstractFSM` abstract class is the base class used to implement an FSM. It implements Actor since an Actor is created to drive the FSM. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/Buncher.java +.. includecode:: code/docs/actorlambda/fsm/Buncher.java :include: simple-fsm :exclude: fsm-body @@ -181,7 +181,7 @@ The :meth:`stateFunction` argument is a :class:`PartialFunction[Event, State]`, which is conveniently given using the state function builder syntax as demonstrated below: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/Buncher.java +.. includecode:: code/docs/actorlambda/fsm/Buncher.java :include: when-syntax .. warning:: @@ -193,7 +193,7 @@ It is recommended practice to declare the states as an enum and then verify that ``when`` clause for each of the states. If you want to leave the handling of a state “unhandled” (more below), it still needs to be declared like this: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/FSMDocTest.java#NullFunction +.. includecode:: code/docs/actorlambda/fsm/FSMDocTest.java#NullFunction Defining the Initial State -------------------------- @@ -213,7 +213,7 @@ If a state doesn't handle a received event a warning is logged. If you want to do something else in this case you can specify that with :func:`whenUnhandled(stateFunction)`: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/FSMDocTest.java +.. includecode:: code/docs/actorlambda/fsm/FSMDocTest.java :include: unhandled-syntax Within this handler the state of the FSM may be queried using the @@ -257,7 +257,7 @@ of the modifiers described in the following: All modifiers can be chained to achieve a nice and concise description: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/FSMDocTest.java +.. includecode:: code/docs/actorlambda/fsm/FSMDocTest.java :include: modifier-syntax The parentheses are not actually needed in all cases, but they visually @@ -294,14 +294,14 @@ The handler is a partial function which takes a pair of states as input; no resulting state is needed as it is not possible to modify the transition in progress. -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/FSMDocTest.java +.. includecode:: code/docs/actorlambda/fsm/FSMDocTest.java :include: transition-syntax It is also possible to pass a function object accepting two states to :func:`onTransition`, in case your transition handling logic is implemented as a method: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/FSMDocTest.java +.. includecode:: code/docs/actorlambda/fsm/FSMDocTest.java :include: alt-transition-syntax The handlers registered with this method are stacked, so you can intersperse @@ -377,14 +377,14 @@ state data which is available during termination handling. the same way as a state transition (but note that the ``return`` statement may not be used within a :meth:`when` block). -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/FSMDocTest.java +.. includecode:: code/docs/actorlambda/fsm/FSMDocTest.java :include: stop-syntax You can use :func:`onTermination(handler)` to specify custom code that is executed when the FSM is stopped. The handler is a partial function which takes a :class:`StopEvent(reason, stateName, stateData)` as argument: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/FSMDocTest.java +.. includecode:: code/docs/actorlambda/fsm/FSMDocTest.java :include: termination-syntax As for the :func:`whenUnhandled` case, this handler is not stacked, so each @@ -418,7 +418,7 @@ Event Tracing The setting ``akka.actor.debug.fsm`` in :ref:`configuration` enables logging of an event trace by :class:`LoggingFSM` instances: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/FSMDocTest.java +.. includecode:: code/docs/actorlambda/fsm/FSMDocTest.java :include: logging-fsm :exclude: body-elided @@ -439,7 +439,7 @@ The :class:`AbstractLoggingFSM` class adds one more feature to the FSM: a rollin log which may be used during debugging (for tracing how the FSM entered a certain failure state) or for other creative uses: -.. includecode:: ../../../akka-samples/akka-docs-java-lambda/src/test/java/docs/actor/fsm/FSMDocTest.java +.. includecode:: code/docs/actorlambda/fsm/FSMDocTest.java :include: logging-fsm The :meth:`logDepth` defaults to zero, which turns off the event log. diff --git a/akka-samples/akka-docs-udp-multicast/src/main/scala/ScalaUdpMulticast.scala b/akka-docs/rst/scala/code/docs/io/ScalaUdpMulticast.scala similarity index 87% rename from akka-samples/akka-docs-udp-multicast/src/main/scala/ScalaUdpMulticast.scala rename to akka-docs/rst/scala/code/docs/io/ScalaUdpMulticast.scala index 1e08f3b10d..a617a79277 100644 --- a/akka-samples/akka-docs-udp-multicast/src/main/scala/ScalaUdpMulticast.scala +++ b/akka-docs/rst/scala/code/docs/io/ScalaUdpMulticast.scala @@ -4,13 +4,13 @@ package docs.io -import java.net.{InetAddress, InetSocketAddress, NetworkInterface, StandardProtocolFamily} +import java.net.{ InetAddress, InetSocketAddress, NetworkInterface, StandardProtocolFamily } import java.net.DatagramSocket import java.nio.channels.DatagramChannel -import akka.actor.{Actor, ActorLogging, ActorRef} -import akka.io.Inet.{DatagramChannelCreator, SocketOption, SocketOptionV2} -import akka.io.{IO, Udp} +import akka.actor.{ Actor, ActorLogging, ActorRef } +import akka.io.Inet.{ DatagramChannelCreator, SocketOption, SocketOptionV2 } +import akka.io.{ IO, Udp } import akka.util.ByteString //#inet6-protocol-family diff --git a/akka-samples/akka-docs-udp-multicast/src/test/scala/ScalaUdpMulticastSpec.scala b/akka-docs/rst/scala/code/docs/io/ScalaUdpMulticastSpec.scala similarity index 89% rename from akka-samples/akka-docs-udp-multicast/src/test/scala/ScalaUdpMulticastSpec.scala rename to akka-docs/rst/scala/code/docs/io/ScalaUdpMulticastSpec.scala index 0b9c22c67c..a4ba2dff71 100644 --- a/akka-samples/akka-docs-udp-multicast/src/test/scala/ScalaUdpMulticastSpec.scala +++ b/akka-docs/rst/scala/code/docs/io/ScalaUdpMulticastSpec.scala @@ -4,14 +4,14 @@ package docs.io -import java.net.{Inet6Address, InetSocketAddress, NetworkInterface, StandardProtocolFamily} +import java.net.{ Inet6Address, InetSocketAddress, NetworkInterface, StandardProtocolFamily } import java.nio.channels.DatagramChannel import scala.util.Random -import akka.actor.{ActorSystem, Props} +import akka.actor.{ ActorSystem, Props } import akka.io.Udp import akka.testkit.TestKit -import org.scalatest.{BeforeAndAfter, WordSpecLike} +import org.scalatest.{ BeforeAndAfter, WordSpecLike } import scala.collection.JavaConversions.enumerationAsScalaIterator @@ -42,7 +42,7 @@ class ScalaUdpMulticastSpec extends TestKit(ActorSystem("ScalaUdpMulticastSpec") } } - def afterAll = { + def afterAll(): Unit = { TestKit.shutdownActorSystem(system) } diff --git a/akka-docs/rst/scala/io-udp.rst b/akka-docs/rst/scala/io-udp.rst index 7390baaf60..0d1e10c14b 100644 --- a/akka-docs/rst/scala/io-udp.rst +++ b/akka-docs/rst/scala/io-udp.rst @@ -95,12 +95,12 @@ To select a Protocol Family you must extend ``akka.io.Inet.DatagramChannelCreato class which extends ``akka.io.Inet.SocketOption``. Provide custom logic for opening a datagram channel by overriding :meth:`create` method. -.. includecode:: ../../../akka-samples/akka-docs-udp-multicast/src/main/scala/ScalaUdpMulticast.scala#inet6-protocol-family +.. includecode:: code/docs/io/ScalaUdpMulticast.scala#inet6-protocol-family Another socket option will be needed to join a multicast group. -.. includecode:: ../../../akka-samples/akka-docs-udp-multicast/src/main/scala/ScalaUdpMulticast.scala#multicast-group +.. includecode:: code/docs/io/ScalaUdpMulticast.scala#multicast-group Socket options must be provided to :class:`UdpMessage.Bind` message. -.. includecode:: ../../../akka-samples/akka-docs-udp-multicast/src/main/scala/ScalaUdpMulticast.scala#bind +.. includecode:: code/docs/io/ScalaUdpMulticast.scala#bind diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala index 4e05343b87..0e17bdee6c 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -32,7 +32,7 @@ import akka.actor.DeadLetterSuppression * The conductor is the one orchestrating the test: it governs the * [[akka.remote.testconductor.Controller]]’s port to which all * [[akka.remote.testconductor.Player]]s connect, it issues commands to their - * [[akka.remote.testconductor.NetworkFailureInjector]] and provides support + * `akka.remote.testconductor.NetworkFailureInjector` and provides support * for barriers using the [[akka.remote.testconductor.BarrierCoordinator]]. * All of this is bundled inside the [[akka.remote.testconductor.TestConductorExt]] * extension. @@ -292,7 +292,7 @@ private[akka] object ServerFSM { * node name translations). * * In the Ready state, messages from the client are forwarded to the controller - * and [[akka.remote.testconductor.Send]] requests are sent, but the latter is + * and `Send` requests are sent, but the latter is * treated specially: all client operations are to be confirmed by a * [[akka.remote.testconductor.Done]] message, and there can be only one such * request outstanding at a given time (i.e. a Send fails if the previous has diff --git a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala index 8e2836d703..dda89a2913 100644 --- a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala +++ b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala @@ -103,7 +103,7 @@ abstract class ActorSystemActivator extends BundleActivator { } /** - * By default, the [[akka.actor.ActorSystem]] name will be set to `bundle--ActorSystem`. Override this + * By default, the [[akka.actor.ActorSystem]] name will be set to `bundle-<bundle id>-ActorSystem`. Override this * method to define another name for your [[akka.actor.ActorSystem]] instance. * * @param context the bundle context diff --git a/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala b/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala index e2d19b087a..57c8bc5749 100644 --- a/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala +++ b/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala @@ -27,7 +27,7 @@ class OsgiActorSystemFactory(val context: BundleContext, val fallbackClassLoader /** * Creates the [[akka.actor.ActorSystem]], using the name specified. * - * A default name (`bundle--ActorSystem`) is assigned when you pass along [[scala.None]] instead. + * A default name (`bundle-<bundle id>-ActorSystem`) is assigned when you pass along [[scala.None]] instead. */ def createActorSystem(name: Option[String]): ActorSystem = ActorSystem(actorSystemName(name), actorSystemConfig(context), classloader) @@ -43,7 +43,7 @@ class OsgiActorSystemFactory(val context: BundleContext, val fallbackClassLoader /** * Determine the name for the [[akka.actor.ActorSystem]] - * Returns a default value of `bundle--ActorSystem` is no name is being specified + * Returns a default value of `bundle-<bundle id>-ActorSystem` is no name is being specified */ def actorSystemName(name: Option[String]): String = name.getOrElse("bundle-%s-ActorSystem".format(context.getBundle.getBundleId)) diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/japi/snapshot/JavaSnapshotStoreSpec.scala b/akka-persistence-tck/src/main/scala/akka/persistence/japi/snapshot/JavaSnapshotStoreSpec.scala index 8a280d93b4..65148c7919 100644 --- a/akka-persistence-tck/src/main/scala/akka/persistence/japi/snapshot/JavaSnapshotStoreSpec.scala +++ b/akka-persistence-tck/src/main/scala/akka/persistence/japi/snapshot/JavaSnapshotStoreSpec.scala @@ -11,7 +11,7 @@ import org.scalatest.junit.JUnitRunner /** * JAVA API * - * This spec aims to verify custom akka-persistence [[SnapshotStore]] implementations. + * This spec aims to verify custom akka-persistence [[akka.persistence.snapshot.SnapshotStore]] implementations. * Plugin authors are highly encouraged to include it in their plugin's test suites. * * In case your snapshot-store plugin needs some kind of setup or teardown, override the `beforeAll` or `afterAll` diff --git a/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala b/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala index af123748f5..b39df1425b 100644 --- a/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala +++ b/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala @@ -19,7 +19,7 @@ object AtLeastOnceDelivery { * Snapshot of current `AtLeastOnceDelivery` state. Can be retrieved with * [[AtLeastOnceDelivery#getDeliverySnapshot]] and saved with [[PersistentActor#saveSnapshot]]. * During recovery the snapshot received in [[SnapshotOffer]] should be set - * with [[AtLeastOnceDelivery.setDeliverySnapshot]]. + * with [[AtLeastOnceDelivery#setDeliverySnapshot]]. */ @SerialVersionUID(1L) case class AtLeastOnceDeliverySnapshot(currentDeliveryId: Long, unconfirmedDeliveries: immutable.Seq[UnconfirmedDelivery]) diff --git a/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala b/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala index bffa470ced..28b8548c25 100644 --- a/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala +++ b/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala @@ -23,7 +23,7 @@ private[persistence] object JournalProtocol { sealed trait Response extends Message /** - * Reply message to a failed [[DeleteMessages]] request. + * Reply message to a failed [[DeleteMessagesTo]] request. */ final case class DeleteMessagesFailure(cause: Throwable) extends Response @@ -54,7 +54,7 @@ private[persistence] object JournalProtocol { /** * Reply message to a failed [[WriteMessages]] request. This reply is sent to the requestor - * before all subsequent [[WriteMessagFailure]] replies. + * before all subsequent [[WriteMessageFailure]] replies. * * @param cause failure cause. */ diff --git a/akka-persistence/src/main/scala/akka/persistence/Persistent.scala b/akka-persistence/src/main/scala/akka/persistence/Persistent.scala index 7ea21d038e..28b9b3b3a8 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Persistent.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Persistent.scala @@ -35,9 +35,9 @@ private[persistence] final case class NonPersistentRepr(payload: Any, sender: Ac /** * Plugin API: representation of a persistent message in the journal plugin API. * - * @see [[journal.SyncWriteJournal]] - * @see [[journal.AsyncWriteJournal]] - * @see [[journal.AsyncRecovery]] + * @see [[akka.persistence.journal.SyncWriteJournal]] + * @see [[akka.persistence.journal.AsyncWriteJournal]] + * @see [[akka.persistence.journal.AsyncRecovery]] */ trait PersistentRepr extends PersistentEnvelope with Message { import scala.collection.JavaConverters._ diff --git a/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala b/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala index 9fa3d881cf..198e217baf 100644 --- a/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala +++ b/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala @@ -21,7 +21,7 @@ import scala.collection.immutable.VectorBuilder trait Message extends Serializable /** - * Protobuf serializer for [[PersistentRepr]] and [[AtLeastOnceDelivery]] messages. + * Protobuf serializer for [[akka.persistence.PersistentRepr]] and [[akka.persistence.AtLeastOnceDelivery]] messages. */ class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer { import PersistentRepr.Undefined diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala index 42f5a990f6..975d653ec0 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala @@ -1,3 +1,6 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ package akka.remote import akka.remote.testkit.MultiNodeConfig @@ -97,15 +100,15 @@ abstract class RemoteReDeploymentMultiJvmSpec extends MultiNodeSpec(RemoteReDepl "terminate the child when its parent system is replaced by a new one" in { val echo = system.actorOf(echoProps(testActor), "echo") - val address = node(second).address + enterBarrier("echo-started") runOn(second) { system.actorOf(Props[Parent], "parent") ! ((Props[Hello], "hello")) - expectMsg("HelloParent") + expectMsg(15.seconds, "HelloParent") } runOn(first) { - expectMsg("PreStart") + expectMsg(15.seconds, "PreStart") } enterBarrier("first-deployed") @@ -136,14 +139,16 @@ abstract class RemoteReDeploymentMultiJvmSpec extends MultiNodeSpec(RemoteReDepl val p = TestProbe()(sys) sys.actorOf(echoProps(p.ref), "echo") p.send(sys.actorOf(Props[Parent], "parent"), (Props[Hello], "hello")) - p.expectMsg("HelloParent") + p.expectMsg(15.seconds, "HelloParent") } enterBarrier("re-deployed") runOn(first) { - if (expectQuarantine) expectMsg("PreStart") - else expectMsgAllOf("PostStop", "PreStart") + within(15.seconds) { + if (expectQuarantine) expectMsg("PreStart") + else expectMsgAllOf("PostStop", "PreStart") + } } enterBarrier("the-end") @@ -154,4 +159,4 @@ abstract class RemoteReDeploymentMultiJvmSpec extends MultiNodeSpec(RemoteReDepl } -} \ No newline at end of file +} diff --git a/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala b/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala index 0436d162d0..658d32b36f 100644 --- a/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala +++ b/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala @@ -13,8 +13,8 @@ import akka.ConfigurationException * Interface for a registry of Akka failure detectors. New resources are implicitly registered when heartbeat is first * called with the resource given as parameter. * - * @tparam A - * The type of the key that identifies a resource to be monitored by a failure detector + * type parameter A: + * - The type of the key that identifies a resource to be monitored by a failure detector */ trait FailureDetectorRegistry[A] { @@ -56,7 +56,7 @@ private[akka] object FailureDetectorLoader { /** * Loads and instantiates a given [[FailureDetector]] implementation. The class to be loaded must have a constructor - * that accepts a [[com.typesafe.config.Config]] and an [[EventStream]] parameter. Will throw ConfigurationException + * that accepts a [[com.typesafe.config.Config]] and an [[akka.event.EventStream]] parameter. Will throw ConfigurationException * if the implementation cannot be loaded. * * @param fqcn Fully qualified class name of the implementation to be loaded. @@ -76,8 +76,8 @@ private[akka] object FailureDetectorLoader { /** * Loads and instantiates a given [[FailureDetector]] implementation. The class to be loaded must have a constructor - * that accepts a [[com.typesafe.config.Config]] and an [[EventStream]] parameter. Will throw ConfigurationException - * if the implementation cannot be loaded. Use [[FailureDetectorLoader#load]] if no implicit [[ActorContext]] is + * that accepts a [[com.typesafe.config.Config]] and an [[akka.event.EventStream]] parameter. Will throw ConfigurationException + * if the implementation cannot be loaded. Use [[FailureDetectorLoader#load]] if no implicit [[akka.actor.ActorContext]] is * available. * * @param fqcn Fully qualified class name of the implementation to be loaded. diff --git a/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala b/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala index 2f4d2d47f5..26b655738f 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala @@ -65,7 +65,7 @@ private[akka] object RemoteWatcher { * intercepts Watch and Unwatch system messages and sends corresponding * [[RemoteWatcher.WatchRemote]] and [[RemoteWatcher.UnwatchRemote]] to this actor. * - * For a new node to be watched this actor periodically sends [[RemoteWatcher.Heartbeat]] + * For a new node to be watched this actor periodically sends `RemoteWatcher.Heartbeat` * to the peer actor on the other node, which replies with [[RemoteWatcher.HeartbeatRsp]] * message back. The failure detector on the watching side monitors these heartbeat messages. * If arrival of hearbeat messages stops it will be detected and this actor will publish diff --git a/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala index 178776221d..be365af7c5 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala @@ -189,11 +189,11 @@ object TestTransport { * @param logCallback * Function that will be called independently of the current active behavior * - * @tparam A - * Parameter type of the wrapped function. If it takes multiple parameters it must be wrapped in a tuple. + * type parameter A: + * - Parameter type of the wrapped function. If it takes multiple parameters it must be wrapped in a tuple. * - * @tparam B - * Type parameter of the future that the original function returns. + * type parameter B: + * - Type parameter of the future that the original function returns. */ class SwitchableLoggedBehavior[A, B](defaultBehavior: Behavior[A, B], logCallback: (A) ⇒ Unit) extends Behavior[A, B] { diff --git a/akka-remote/src/main/scala/akka/remote/transport/Transport.scala b/akka-remote/src/main/scala/akka/remote/transport/Transport.scala index a3d2f605aa..ba8b48c9dd 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/Transport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/Transport.scala @@ -24,7 +24,7 @@ object Transport { /** * Message sent to a [[akka.remote.transport.Transport.AssociationEventListener]] registered to a transport - * (via the Promise returned by [[akka.remote.transport.Transport.listen]]) when an inbound association request arrives. + * (via the Promise returned by [[akka.remote.transport.Transport#listen]]) when an inbound association request arrives. * * @param association * The handle for the inbound association. @@ -153,7 +153,7 @@ object AssociationHandle { /** * Message sent to the listener registered to an association (via the Promise returned by - * [[akka.remote.transport.AssociationHandle.readHandlerPromise]]) when an inbound payload arrives. + * [[akka.remote.transport.AssociationHandle#readHandlerPromise]]) when an inbound payload arrives. * * @param payload * The raw bytes that were sent by the remote endpoint. diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 83d4cdc6a7..ee3ab57fa7 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -99,14 +99,14 @@ class Ticket1978AES128CounterSecureRNGSpec extends Ticket1978CommunicationSpec(g class Ticket1978AES256CounterSecureRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES256CounterSecureRNG", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA")) /** - * Both of the Inet variants require access to the Internet to access random.org. + * Both of the `Inet` variants require access to the Internet to access random.org. */ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class Ticket1978AES128CounterInetRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterInetRNG", "TLS_RSA_WITH_AES_128_CBC_SHA")) with InetRNGSpec /** - * Both of the Inet variants require access to the Internet to access random.org. + * Both of the `Inet` variants require access to the Internet to access random.org. */ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class Ticket1978AES256CounterInetRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES256CounterInetRNG", "TLS_RSA_WITH_AES_256_CBC_SHA")) diff --git a/akka-samples/akka-docs-java-lambda/build.sbt b/akka-samples/akka-docs-java-lambda/build.sbt deleted file mode 100644 index c1a7ebbc5f..0000000000 --- a/akka-samples/akka-docs-java-lambda/build.sbt +++ /dev/null @@ -1,17 +0,0 @@ -name := "akka-docs-java-lambda" - -version := "2.4-SNAPSHOT" - -scalaVersion := "2.11.5" - -compileOrder := CompileOrder.ScalaThenJava - -javacOptions ++= Seq("-source", "1.8", "-target", "1.8", "-Xlint") - -testOptions += Tests.Argument(TestFrameworks.JUnit, "-v", "-a") - -libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % "2.4-SNAPSHOT", - "com.typesafe.akka" %% "akka-testkit" % "2.4-SNAPSHOT" % "test", - "junit" % "junit" % "4.11" % "test", - "com.novocode" % "junit-interface" % "0.10" % "test") diff --git a/akka-samples/akka-docs-java-lambda/pom.xml b/akka-samples/akka-docs-java-lambda/pom.xml deleted file mode 100644 index 599ef9ebbb..0000000000 --- a/akka-samples/akka-docs-java-lambda/pom.xml +++ /dev/null @@ -1,53 +0,0 @@ - - 4.0.0 - - - UTF-8 - - - sample - akka-docs-java-lambda - jar - 2.4-SNAPSHOT - - - - com.typesafe.akka - akka-actor_2.11 - 2.4-SNAPSHOT - - - com.typesafe.akka - akka-testkit_2.11 - 2.4-SNAPSHOT - - - junit - junit - 4.11 - test - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.1 - - 1.8 - 1.8 - true - - -Xlint - - - - - - - diff --git a/akka-samples/akka-docs-java-lambda/project/build.properties b/akka-samples/akka-docs-java-lambda/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-docs-java-lambda/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-docs-udp-multicast/build.sbt b/akka-samples/akka-docs-udp-multicast/build.sbt deleted file mode 100644 index f01ecc5913..0000000000 --- a/akka-samples/akka-docs-udp-multicast/build.sbt +++ /dev/null @@ -1,19 +0,0 @@ -name := "akka-docs-udp-multicast" - -version := "2.4-SNAPSHOT" - -scalaVersion := "2.11.5" - -compileOrder := CompileOrder.ScalaThenJava - -javacOptions ++= Seq("-source", "1.7", "-target", "1.7", "-Xlint") - -testOptions += Tests.Argument(TestFrameworks.JUnit, "-v", "-a") - -libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % "2.4-SNAPSHOT", - "com.typesafe.akka" %% "akka-testkit" % "2.4-SNAPSHOT", - "org.scalatest" %% "scalatest" % "2.2.1" % "test", - "junit" % "junit" % "4.11" % "test", - "com.novocode" % "junit-interface" % "0.10" % "test" -) diff --git a/akka-samples/akka-docs-udp-multicast/project/build.properties b/akka-samples/akka-docs-udp-multicast/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-docs-udp-multicast/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-cluster-java/build.sbt b/akka-samples/akka-sample-cluster-java/build.sbt index 620dda5585..bbebc998cd 100644 --- a/akka-samples/akka-sample-cluster-java/build.sbt +++ b/akka-samples/akka-sample-cluster-java/build.sbt @@ -10,9 +10,9 @@ val project = Project( name := "akka-sample-cluster-java", version := "2.4-SNAPSHOT", scalaVersion := "2.11.5", - scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.6", "-deprecation", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"), - javacOptions in Compile ++= Seq("-source", "1.6", "-target", "1.6", "-Xlint:unchecked", "-Xlint:deprecation"), - javacOptions in doc in Compile := Seq("-source", "1.6"), // javadoc does not support -target and -Xlint flags + scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.8", "-deprecation", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"), + javacOptions in Compile ++= Seq("-source", "1.8", "-target", "1.8", "-Xlint:unchecked", "-Xlint:deprecation"), + javacOptions in doc in Compile := Seq("-source", "1.8", "-Xdoclint:none"), libraryDependencies ++= Seq( "com.typesafe.akka" %% "akka-actor" % akkaVersion, "com.typesafe.akka" %% "akka-remote" % akkaVersion, diff --git a/akka-samples/akka-sample-cluster-scala/build.sbt b/akka-samples/akka-sample-cluster-scala/build.sbt index 4a33f0e2c2..9ec5792599 100644 --- a/akka-samples/akka-sample-cluster-scala/build.sbt +++ b/akka-samples/akka-sample-cluster-scala/build.sbt @@ -10,8 +10,8 @@ val project = Project( name := "akka-sample-cluster-scala", version := "2.4-SNAPSHOT", scalaVersion := "2.11.5", - scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.6", "-deprecation", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"), - javacOptions in Compile ++= Seq("-source", "1.6", "-target", "1.6", "-Xlint:unchecked", "-Xlint:deprecation"), + scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.8", "-deprecation", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"), + javacOptions in Compile ++= Seq("-source", "1.8", "-target", "1.8", "-Xlint:unchecked", "-Xlint:deprecation"), libraryDependencies ++= Seq( "com.typesafe.akka" %% "akka-actor" % akkaVersion, "com.typesafe.akka" %% "akka-remote" % akkaVersion, diff --git a/akka-samples/akka-sample-fsm-java-lambda/build.sbt b/akka-samples/akka-sample-fsm-java-lambda/build.sbt index 26bb0271b3..adbf84cacd 100644 --- a/akka-samples/akka-sample-fsm-java-lambda/build.sbt +++ b/akka-samples/akka-sample-fsm-java-lambda/build.sbt @@ -2,9 +2,11 @@ name := "akka-docs-java-lambda" version := "2.4-SNAPSHOT" -scalaVersion := "2.11.5" +scalaVersion := "2.11.6" -javacOptions ++= Seq("-source", "1.8", "-target", "1.8", "-Xlint") +javacOptions in compile ++= Seq("-encoding", "UTF-8", "-source", "1.8", "-target", "1.8", "-Xlint") + +javacOptions in doc ++= Seq("-encoding", "UTF-8", "-source", "1.8") testOptions += Tests.Argument(TestFrameworks.JUnit, "-v", "-a") diff --git a/akka-samples/akka-sample-persistence-java-lambda/build.sbt b/akka-samples/akka-sample-persistence-java-lambda/build.sbt index d5f81b571d..ca45c9fa46 100644 --- a/akka-samples/akka-sample-persistence-java-lambda/build.sbt +++ b/akka-samples/akka-sample-persistence-java-lambda/build.sbt @@ -4,7 +4,9 @@ version := "1.0" scalaVersion := "2.11.5" -javacOptions ++= Seq("-source", "1.8", "-target", "1.8", "-Xlint") +javacOptions in compile ++= Seq("-encoding", "UTF-8", "-source", "1.8", "-target", "1.8", "-Xlint") + +javacOptions in doc ++= Seq("-encoding", "UTF-8", "-source", "1.8", "-Xdoclint:none") libraryDependencies ++= Seq( "com.typesafe.akka" %% "akka-persistence-experimental" % "2.4-SNAPSHOT" diff --git a/akka-samples/akka-sample-supervision-java-lambda/build.sbt b/akka-samples/akka-sample-supervision-java-lambda/build.sbt index f114d4369e..ccf1af4a19 100644 --- a/akka-samples/akka-sample-supervision-java-lambda/build.sbt +++ b/akka-samples/akka-sample-supervision-java-lambda/build.sbt @@ -4,7 +4,9 @@ version := "1.0" scalaVersion := "2.11.5" -javacOptions ++= Seq("-source", "1.8", "-target", "1.8", "-Xlint") +javacOptions in compile ++= Seq("-encoding", "UTF-8", "-source", "1.8", "-target", "1.8", "-Xlint") + +javacOptions in doc ++= Seq("-encoding", "UTF-8", "-source", "1.8", "-Xdoclint:none") testOptions += Tests.Argument(TestFrameworks.JUnit, "-v", "-a") diff --git a/akka-testkit/src/main/java/akka/testkit/JavaTestKit.java b/akka-testkit/src/main/java/akka/testkit/JavaTestKit.java index 20d057d9da..3a67cc855b 100644 --- a/akka-testkit/src/main/java/akka/testkit/JavaTestKit.java +++ b/akka-testkit/src/main/java/akka/testkit/JavaTestKit.java @@ -332,7 +332,7 @@ public class JavaTestKit { * *

        * 
    -   * final String out = new ExpectMsg("match hint") {
    +   * final String out = new ExpectMsg<String>("match hint") {
        *   protected String match(Object in) {
        *     if (in instanceof Integer)
        *       return "match";
    diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/AveragingGauge.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/AveragingGauge.scala
    index ac4609df16..c9fe58cbf3 100644
    --- a/akka-testkit/src/test/scala/akka/testkit/metrics/AveragingGauge.scala
    +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/AveragingGauge.scala
    @@ -8,7 +8,7 @@ import com.codahale.metrics.Gauge
     /**
      * Gauge which exposes the Arithmetic Mean of values given to it.
      *
    - * Can be used to expose average of a series of values to [[com.codahale.metrics.ScheduledReporter]]s.
    + * Can be used to expose average of a series of values to `com.codahale.metrics.ScheduledReporter`.
      */
     class AveragingGauge extends Gauge[Double] {
     
    diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala
    index 6a06c8ec30..f89864c2da 100644
    --- a/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala
    +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala
    @@ -10,7 +10,7 @@ import org.{ HdrHistogram ⇒ hdr }
      * Adapts Gil Tene's HdrHistogram to Metric's Metric interface.
      *
      * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive
    - *                              integer that is >= 2.
    + *                              integer that is >= 2.
      * @param numberOfSignificantValueDigits The number of significant decimal digits to which the histogram will
      *                                       maintain value resolution and separation. Must be a non-negative
      *                                       integer between 0 and 5.
    diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala
    index 6d67c00470..25d18a385e 100644
    --- a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala
    +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala
    @@ -23,7 +23,7 @@ import scala.reflect.ClassTag
      * please refer to JMH if that's what you're writing.
      * This trait instead aims to give an high level overview as well as data for trend-analysis of long running tests.
      *
    - * Reporting defaults to [[ConsoleReporter]].
    + * Reporting defaults to `ConsoleReporter`.
      * In order to send registry to Graphite run sbt with the following property: `-Dakka.registry.reporting.0=graphite`.
      */
     private[akka] trait MetricsKit extends MetricsKitOps {
    @@ -195,7 +195,7 @@ private[akka] object MetricsKit {
       }
     }
     
    -/** Provides access to custom Akka [[Metric]]s, with named methods. */
    +/** Provides access to custom Akka `com.codahale.metrics.Metric`, with named methods. */
     trait AkkaMetricRegistry {
       this: MetricRegistry ⇒
     
    diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala
    index dadf79faa0..68e94f452c 100644
    --- a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala
    +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala
    @@ -19,7 +19,7 @@ private[akka] trait MetricsKitOps extends MetricKeyDSL {
     
       type MetricKey = MetricKeyDSL#MetricKey
     
    -  /** Simple thread-safe counter, backed by [[LongAdder]] so can pretty efficiently work even when hit by multiple threads */
    +  /** Simple thread-safe counter, backed by `java.util.concurrent.LongAdder` so can pretty efficiently work even when hit by multiple threads */
       def counter(key: MetricKey): Counter = registry.counter(key.toString)
     
       /** Simple averaging Gauge, which exposes an arithmetic mean of the values added to it. */
    @@ -49,7 +49,7 @@ private[akka] trait MetricsKitOps extends MetricKeyDSL {
       /**
        * Use when measuring for 9x'th percentiles as well as min / max / mean values.
        *
    -   * Backed by [[ExponentiallyDecayingReservoir]].
    +   * Backed by codahale `ExponentiallyDecayingReservoir`.
        */
       def histogram(key: MetricKey): Histogram = {
         registry.histogram((key / "histogram").toString)
    @@ -62,10 +62,10 @@ private[akka] trait MetricsKitOps extends MetricKeyDSL {
       }
     
       /**
    -   * Enable memory measurements - will be logged by [[ScheduledReporter]]s if enabled.
    +   * Enable memory measurements - will be logged by `ScheduledReporter`s if enabled.
        * Must not be triggered multiple times - pass around the `MemoryUsageSnapshotting` if you need to measure different points.
        *
    -   * Also allows to [[MemoryUsageSnapshotting.getHeapSnapshot]] to obtain memory usage numbers at given point in time.
    +   * Also allows to `MemoryUsageSnapshotting.getHeapSnapshot` to obtain memory usage numbers at given point in time.
        */
       def measureMemory(key: MetricKey): MemoryUsageGaugeSet with MemoryUsageSnapshotting = {
         val gaugeSet = new jvm.MemoryUsageGaugeSet() with MemoryUsageSnapshotting {
    diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala
    index 6d79f45aa0..97e67b781c 100644
    --- a/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala
    +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala
    @@ -11,7 +11,7 @@ import akka.testkit.metrics._
     import scala.reflect.ClassTag
     
     /**
    - * Used to report [[Metric]] types that the original [[ConsoleReporter]] is unaware of (cannot re-use directly because of private constructor).
    + * Used to report `akka.testkit.metric.Metric` types that the original `com.codahale.metrics.ConsoleReporter` is unaware of (cannot re-use directly because of private constructor).
      */
     class AkkaConsoleReporter(
       registry: AkkaMetricRegistry,
    diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaGraphiteReporter.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaGraphiteReporter.scala
    index d3457f1c18..736b5dc716 100644
    --- a/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaGraphiteReporter.scala
    +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaGraphiteReporter.scala
    @@ -12,7 +12,7 @@ import akka.testkit.metrics._
     import scala.concurrent.duration._
     
     /**
    - * Used to report [[Metric]] types that the original [[com.codahale.metrics.graphite.GraphiteReporter]] is unaware of (cannot re-use directly because of private constructor).
    + * Used to report `com.codahale.metrics.Metric` types that the original `com.codahale.metrics.graphite.GraphiteReporter` is unaware of (cannot re-use directly because of private constructor).
      */
     class AkkaGraphiteReporter(
       registry: AkkaMetricRegistry,
    diff --git a/akka-typed/src/main/scala/akka/typed/ActorContext.scala b/akka-typed/src/main/scala/akka/typed/ActorContext.scala
    index d86ff35c50..f0a60e6711 100644
    --- a/akka-typed/src/main/scala/akka/typed/ActorContext.scala
    +++ b/akka-typed/src/main/scala/akka/typed/ActorContext.scala
    @@ -113,13 +113,13 @@ trait ActorContext[T] {
       def watch(other: akka.actor.ActorRef): akka.actor.ActorRef
     
       /**
    -   * Revoke the registration established by [[#watch[U]* watch]]. A [[Terminated]]
    +   * Revoke the registration established by `watch`. A [[Terminated]]
        * notification will not subsequently be received for the referenced Actor.
        */
       def unwatch[U](other: ActorRef[U]): ActorRef[U]
     
       /**
    -   * Revoke the registration established by [[#watch(* watch]]. A [[Terminated]]
    +   * Revoke the registration established by `watch`. A [[Terminated]]
        * notification will not subsequently be received for the referenced Actor.
        */
       def unwatch(other: akka.actor.ActorRef): akka.actor.ActorRef
    @@ -135,7 +135,7 @@ trait ActorContext[T] {
       /**
        * Schedule the sending of the given message to the given target Actor after
        * the given time period has elapsed. The scheduled action can be cancelled
    -   * by invoking [[akka.actor.Cancellable!.cancel* cancel]] on the returned
    +   * by invoking [[akka.actor.Cancellable]] `cancel` on the returned
        * handle.
        */
       def schedule[U](delay: FiniteDuration, target: ActorRef[U], msg: U): untyped.Cancellable
    @@ -157,9 +157,9 @@ trait ActorContext[T] {
     /**
      * An [[ActorContext]] for synchronous execution of a [[Behavior]] that
      * provides only stubs for the effects an Actor can perform and replaces
    - * created child Actors by [[Inbox$.sync* a synchronous Inbox]].
    + * created child Actors by a synchronous Inbox (see `Inbox.sync`).
      *
    - * @see [[EffectfulActorContext]] for more advanced uses.
    + * See [[EffectfulActorContext]] for more advanced uses.
      */
     class StubbedActorContext[T](
       val name: String,
    diff --git a/akka-typed/src/main/scala/akka/typed/ActorRef.scala b/akka-typed/src/main/scala/akka/typed/ActorRef.scala
    index 127088c34f..a56edcb5c3 100644
    --- a/akka-typed/src/main/scala/akka/typed/ActorRef.scala
    +++ b/akka-typed/src/main/scala/akka/typed/ActorRef.scala
    @@ -13,7 +13,7 @@ import language.implicitConversions
      * Actor instance. Sending a message to an Actor that has terminated before
      * receiving the message will lead to that message being discarded; such
      * messages are delivered to the [[akka.actor.DeadLetter]] channel of the
    - * [[akka.actor.ActorSystem!.eventStream EventStream]] on a best effort basis
    + * [[akka.event.EventStream]] on a best effort basis
      * (i.e. this delivery is not reliable).
      */
     abstract class ActorRef[-T] extends java.lang.Comparable[ActorRef[_]] { this: ScalaActorRef[T] ⇒
    diff --git a/akka-typed/src/main/scala/akka/typed/ActorSystem.scala b/akka-typed/src/main/scala/akka/typed/ActorSystem.scala
    index cbef998b56..7d6d8fb51b 100644
    --- a/akka-typed/src/main/scala/akka/typed/ActorSystem.scala
    +++ b/akka-typed/src/main/scala/akka/typed/ActorSystem.scala
    @@ -23,7 +23,7 @@ import akka.dispatch.Dispatchers
     
     /**
      * An ActorSystem is home to a hierarchy of Actors. It is created using
    - * [[ActorSystem$.apply[T]*]] from a [[Props]] object that describes the root
    + * [[ActorSystem$]] `apply` from a [[Props]] object that describes the root
      * Actor of this hierarchy and which will create all other Actors beneath it.
      * A system also implements the [[ActorRef]] type, and sending a message to
      * the system directs that message to the root Actor.
    diff --git a/akka-typed/src/main/scala/akka/typed/Ask.scala b/akka-typed/src/main/scala/akka/typed/Ask.scala
    index bf7da75a75..715c266c9b 100644
    --- a/akka-typed/src/main/scala/akka/typed/Ask.scala
    +++ b/akka-typed/src/main/scala/akka/typed/Ask.scala
    @@ -20,8 +20,8 @@ import akka.actor.Status
      * implementation will fabricate a (hidden) [[ActorRef]] that is bound to a
      * [[scala.concurrent.Promise]]. This ActorRef will need to be injected in the
      * message that is sent to the target Actor in order to function as a reply-to
    - * address, therefore the argument to the [[AskPattern$.Askable ask
    - * operator]] is not the message itself but a function that given the reply-to
    + * address, therefore the argument to the ask / `?`
    + * operator is not the message itself but a function that given the reply-to
      * address will create the message.
      *
      * {{{
    diff --git a/akka-typed/src/main/scala/akka/typed/Behavior.scala b/akka-typed/src/main/scala/akka/typed/Behavior.scala
    index 0a7306b0f6..9783fc0248 100644
    --- a/akka-typed/src/main/scala/akka/typed/Behavior.scala
    +++ b/akka-typed/src/main/scala/akka/typed/Behavior.scala
    @@ -34,7 +34,7 @@ abstract class Behavior[T] {
        *  * returning `Same` designates to reuse the current Behavior
        *  * returning `Unhandled` keeps the same Behavior and signals that the message was not yet handled
        *
    -   * Code calling this method should use [[Behavior$.canonicalize]] to replace
    +   * Code calling this method should use [[Behavior$]] `canonicalize` to replace
        * the special objects with real Behaviors.
        */
       def management(ctx: ActorContext[T], msg: Signal): Behavior[T]
    @@ -49,7 +49,7 @@ abstract class Behavior[T] {
        *  * returning `Same` designates to reuse the current Behavior
        *  * returning `Unhandled` keeps the same Behavior and signals that the message was not yet handled
        *
    -   * Code calling this method should use [[Behavior$.canonicalize]] to replace
    +   * Code calling this method should use [[Behavior$]] `canonicalize` to replace
        * the special objects with real Behaviors.
        */
       def message(ctx: ActorContext[T], msg: T): Behavior[T]
    @@ -103,7 +103,7 @@ final case class PostRestart(failure: Throwable) extends Signal
      * registered watchers after this signal has been processed.
      *
      * IMPORTANT NOTE: if the actor terminated by switching to the
    - * [[ScalaDSL$.Stopped]] behavior then this signal will be ignored (i.e. the
    + * `Stopped` behavior then this signal will be ignored (i.e. the
      * Stopped behvavior will do nothing in reaction to it).
      */
     @SerialVersionUID(1L)
    @@ -136,7 +136,7 @@ final case object ReceiveTimeout extends Signal
     /**
      * Lifecycle signal that is fired when an Actor that was watched has terminated.
      * Watching is performed by invoking the
    - * [[akka.typed.ActorContext!.watch[U]* watch]] method. The DeathWatch service is
    + * [[akka.typed.ActorContext]] `watch` method. The DeathWatch service is
      * idempotent, meaning that registering twice has the same effect as registering
      * once. Registration does not need to happen before the Actor terminates, a
      * notification is guaranteed to arrive after both registration and termination
    @@ -267,7 +267,7 @@ object Behavior {
     
       /**
        * Given a possibly special behavior (same or unhandled) and a
    -   * “current” behavior (which defines the meaning of encountering a [[ScalaDSL$.Same]]
    +   * “current” behavior (which defines the meaning of encountering a `Same`
        * behavior) this method unwraps the behavior such that the innermost behavior
        * is returned, i.e. it removes the decorations.
        */
    diff --git a/akka-typed/src/main/scala/akka/typed/ScalaDSL.scala b/akka-typed/src/main/scala/akka/typed/ScalaDSL.scala
    index 4ff7478c3a..eb066cf3c2 100644
    --- a/akka-typed/src/main/scala/akka/typed/ScalaDSL.scala
    +++ b/akka-typed/src/main/scala/akka/typed/ScalaDSL.scala
    @@ -101,10 +101,10 @@ object ScalaDSL {
       def Ignore[T]: Behavior[T] = ignoreBehavior.asInstanceOf[Behavior[T]]
     
       /**
    -   * Algebraic Data Type modeling either a [[ScalaDSL$.Msg message]] or a
    -   * [[ScalaDSL$.Sig signal]], including the [[ActorContext]]. This type is
    +   * Algebraic Data Type modeling either a [[Msg message]] or a
    +   * [[Sig signal]], including the [[ActorContext]]. This type is
        * used by several of the behaviors defined in this DSL, see for example
    -   * [[ScalaDSL$.Full]].
    +   * [[Full]].
        */
       sealed trait MessageOrSignal[T]
       /**
    @@ -185,7 +185,7 @@ object ScalaDSL {
       /**
        * This type of Behavior is created from a partial function from the declared
        * message type to the next behavior, flagging all unmatched messages as
    -   * [[ScalaDSL$.Unhandled]]. All system signals are
    +   * [[Unhandled]]. All system signals are
        * ignored by this behavior, which implies that a failure of a child actor
        * will be escalated unconditionally.
        *
    @@ -226,7 +226,7 @@ object ScalaDSL {
       }
     
       /**
    -   * This type of behavior is a variant of [[ScalaDSL$.Total]] that does not
    +   * This type of behavior is a variant of [[Total]] that does not
        * allow the actor to change behavior. It is an efficient choice for stateless
        * actors, possibly entering such a behavior after finishing its
        * initialization (which may be modeled using any of the other behavior types).
    @@ -250,7 +250,7 @@ object ScalaDSL {
        * messages that were sent to the synchronous self reference will be lost.
        *
        * This decorator is useful for passing messages between the left and right
    -   * sides of [[ScalaDSL$.And]] and [[ScalaDSL$.Or]] combinators.
    +   * sides of [[And]] and [[Or]] combinators.
        */
       final case class SynchronousSelf[T](f: ActorRef[T] ⇒ Behavior[T]) extends Behavior[T] {
         private val inbox = Inbox.sync[T]("syncbox")
    @@ -324,7 +324,7 @@ object ScalaDSL {
        * A behavior combinator that feeds incoming messages and signals either into
        * the left or right sub-behavior and allows them to evolve independently of
        * each other. The message or signal is passed first into the left sub-behavior
    -   * and only if that results in [[ScalaDSL$.Unhandled]] is it passed to the right
    +   * and only if that results in [[Unhandled]] is it passed to the right
        * sub-behavior. When one of the sub-behaviors terminates the other takes over
        * exclusively. When both sub-behaviors respond to a [[Failed]] signal, the
        * response with the higher precedence is chosen (see [[Failed$]]).
    diff --git a/akka-typed/src/main/scala/akka/typed/StepWise.scala b/akka-typed/src/main/scala/akka/typed/StepWise.scala
    index 492f540a1e..fb3d8cead2 100644
    --- a/akka-typed/src/main/scala/akka/typed/StepWise.scala
    +++ b/akka-typed/src/main/scala/akka/typed/StepWise.scala
    @@ -33,7 +33,7 @@ import scala.util.control.NonFatal
      *
      * This way of writing Actors can be very useful when writing Actor-based
      * test procedures for actor systems, hence also the possibility to expect
    - * failures (see [[StepWise$.Steps!.expectFailure]]).
    + * failures (see [[StepWise.Steps#expectFailure]]).
      */
     object StepWise {
       import Behavior._
    @@ -173,3 +173,5 @@ object StepWise {
           case Nil ⇒ Stopped
         }
     }
    +
    +abstract class StepWise
    \ No newline at end of file
    diff --git a/akka-typed/src/main/scala/akka/typed/patterns/Receptionist.scala b/akka-typed/src/main/scala/akka/typed/patterns/Receptionist.scala
    index a20c715c0d..19c32d966d 100644
    --- a/akka-typed/src/main/scala/akka/typed/patterns/Receptionist.scala
    +++ b/akka-typed/src/main/scala/akka/typed/patterns/Receptionist.scala
    @@ -18,7 +18,7 @@ import akka.util.TypedMultiMap
     object Receptionist {
     
       /**
    -   * Internal representation of [[Receptionist$.ServiceKey]] which is needed
    +   * Internal representation of [[Receptionist.ServiceKey]] which is needed
        * in order to use a TypedMultiMap (using keys with a type parameter does not
        * work in Scala 2.x).
        */
    @@ -41,7 +41,7 @@ object Receptionist {
        */
       sealed trait Command
       /**
    -   * Associate the given [[ActorRef]] with the given [[ServiceKey]]. Multiple
    +   * Associate the given [[akka.typed.ActorRef]] with the given [[ServiceKey]]. Multiple
        * registrations can be made for the same key. Unregistration is implied by
        * the end of the referenced Actor’s lifecycle.
        */
    @@ -53,7 +53,7 @@ object Receptionist {
       final case class Find[T](key: ServiceKey[T])(val replyTo: ActorRef[Listing[T]]) extends Command
     
       /**
    -   * Confirmtion that the given [[ActorRef]] has been associated with the [[ServiceKey]].
    +   * Confirmtion that the given [[akka.typed.ActorRef]] has been associated with the [[ServiceKey]].
        */
       final case class Registered[T](key: ServiceKey[T], address: ActorRef[T])
       /**
    @@ -85,3 +85,5 @@ object Receptionist {
           behavior(map valueRemoved ref)
       }
     }
    +
    +abstract class Receptionist
    \ No newline at end of file
    diff --git a/akka-typed/src/test/scala/akka/typed/patterns/ReceptionistSpec.scala b/akka-typed/src/test/scala/akka/typed/patterns/ReceptionistSpec.scala
    index 386dcb3c81..6a8a3fc5fa 100644
    --- a/akka-typed/src/test/scala/akka/typed/patterns/ReceptionistSpec.scala
    +++ b/akka-typed/src/test/scala/akka/typed/patterns/ReceptionistSpec.scala
    @@ -27,9 +27,11 @@ class ReceptionistSpec extends TypedSpec {
           val a = Inbox.sync[ServiceA]("a")
           val r = Inbox.sync[Registered[_]]("r")
           ctx.run(Register(ServiceKeyA, a.ref)(r.ref))
    +      ctx.getAllEffects() should be(Effect.Watched(a.ref) :: Nil)
           r.receiveMsg() should be(Registered(ServiceKeyA, a.ref))
           val q = Inbox.sync[Listing[ServiceA]]("q")
           ctx.run(Find(ServiceKeyA)(q.ref))
    +      ctx.getAllEffects() should be(Nil)
           q.receiveMsg() should be(Listing(ServiceKeyA, Set(a.ref)))
           assertEmpty(a, r, q)
         }
    diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala
    index f0dbe5af37..7ebf6f0164 100644
    --- a/project/AkkaBuild.scala
    +++ b/project/AkkaBuild.scala
    @@ -203,9 +203,10 @@ object AkkaBuild extends Build {
         settings = parentSettings ++ ActivatorDist.settings,
         // FIXME osgiDiningHakkersSampleMavenTest temporarily removed from aggregate due to #16703
         aggregate = if (!CommandLineOptions.aggregateSamples) Nil else
    -      Seq(sampleCamelJava, sampleCamelScala, sampleClusterJava, sampleClusterScala, sampleFsmScala,
    -        sampleMainJava, sampleMainScala, sampleMultiNodeScala,
    -        samplePersistenceJava, samplePersistenceScala, sampleRemoteJava, sampleRemoteScala)
    +      Seq(sampleCamelJava, sampleCamelScala, sampleClusterJava, sampleClusterScala, sampleFsmScala, sampleFsmJavaLambda,
    +        sampleMainJava, sampleMainScala, sampleMainJavaLambda, sampleMultiNodeScala,
    +        samplePersistenceJava, samplePersistenceScala, samplePersistenceJavaLambda,
    +        sampleRemoteJava, sampleRemoteScala, sampleSupervisionJavaLambda)
       )
     
       lazy val sampleCamelJava = Sample.project("akka-sample-camel-java")
    @@ -215,17 +216,22 @@ object AkkaBuild extends Build {
       lazy val sampleClusterScala = Sample.project("akka-sample-cluster-scala")
     
       lazy val sampleFsmScala = Sample.project("akka-sample-fsm-scala")
    +  lazy val sampleFsmJavaLambda = Sample.project("akka-sample-fsm-java-lambda")
     
       lazy val sampleMainJava = Sample.project("akka-sample-main-java")
       lazy val sampleMainScala = Sample.project("akka-sample-main-scala")
    +  lazy val sampleMainJavaLambda = Sample.project("akka-sample-main-java-lambda")
     
       lazy val sampleMultiNodeScala = Sample.project("akka-sample-multi-node-scala")
     
       lazy val samplePersistenceJava = Sample.project("akka-sample-persistence-java")
       lazy val samplePersistenceScala = Sample.project("akka-sample-persistence-scala")
    +  lazy val samplePersistenceJavaLambda = Sample.project("akka-sample-persistence-java-lambda")
     
       lazy val sampleRemoteJava = Sample.project("akka-sample-remote-java")
       lazy val sampleRemoteScala = Sample.project("akka-sample-remote-scala")
    +  
    +  lazy val sampleSupervisionJavaLambda = Sample.project("akka-sample-supervision-java-lambda")
     
       lazy val osgiDiningHakkersSampleMavenTest = Project(id = "akka-sample-osgi-dining-hakkers-maven-test",
         base = file("akka-samples/akka-sample-osgi-dining-hakkers-maven-test"),
    @@ -305,13 +311,13 @@ object AkkaBuild extends Build {
       lazy val defaultSettings = baseSettings ++ resolverSettings ++ TestExtras.Filter.settings ++
         Protobuf.settings ++ Seq(
         // compile options
    -    scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.6", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"),
    +    scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.8", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"),
         scalacOptions in Compile ++= (if (allWarnings) Seq("-deprecation") else Nil),
         scalacOptions in Test := (scalacOptions in Test).value.filterNot(_ == "-Xlog-reflective-calls"),
         // -XDignore.symbol.file suppresses sun.misc.Unsafe warnings
    -    javacOptions in compile ++= Seq("-encoding", "UTF-8", "-source", "1.6", "-target", "1.6", "-Xlint:unchecked", "-XDignore.symbol.file"),
    +    javacOptions in compile ++= Seq("-encoding", "UTF-8", "-source", "1.8", "-target", "1.8", "-Xlint:unchecked", "-XDignore.symbol.file"),
         javacOptions in compile ++= (if (allWarnings) Seq("-Xlint:deprecation") else Nil),
    -    javacOptions in doc ++= Seq("-encoding", "UTF-8", "-source", "1.6"),
    +    javacOptions in doc ++= Seq("-encoding", "UTF-8", "-source", "1.8"),
         incOptions := incOptions.value.withNameHashing(true),
     
         crossVersion := CrossVersion.binary,
    diff --git a/project/Unidoc.scala b/project/Unidoc.scala
    index 08c75cf723..21ef389c2f 100644
    --- a/project/Unidoc.scala
    +++ b/project/Unidoc.scala
    @@ -2,7 +2,7 @@ package akka
     
     import sbt._
     import sbtunidoc.Plugin.UnidocKeys._
    -import sbtunidoc.Plugin.{ ScalaUnidoc, JavaUnidoc, scalaJavaUnidocSettings, genjavadocSettings, scalaUnidocSettings }
    +import sbtunidoc.Plugin.{ ScalaUnidoc, JavaUnidoc, scalaJavaUnidocSettings, genjavadocExtraSettings, scalaUnidocSettings }
     import sbt.Keys._
     import sbt.File
     import scala.annotation.tailrec
    @@ -22,7 +22,10 @@ object Unidoc {
     
       val genjavadocEnabled = sys.props.get("akka.genjavadoc.enabled").getOrElse("false").toBoolean
       val (unidocSettings, javadocSettings) =
    -    if (genjavadocEnabled) (scalaJavaUnidocSettings, genjavadocSettings ++ Seq(unidocGenjavadocVersion in Global := "0.8"))
    +    if (genjavadocEnabled)
    +      (scalaJavaUnidocSettings, genjavadocExtraSettings ++ Seq(
    +        scalacOptions in Compile += "-P:genjavadoc:fabricateParams=true",
    +        unidocGenjavadocVersion in Global := "0.9"))
         else (scalaUnidocSettings, Nil)
     
       lazy val scaladocDiagramsEnabled = sys.props.get("akka.scaladoc.diagrams").getOrElse("true").toBoolean
    diff --git a/scripts/build/extra-build-steps.sh b/scripts/build/extra-build-steps.sh
    deleted file mode 100755
    index 94ea02f99d..0000000000
    --- a/scripts/build/extra-build-steps.sh
    +++ /dev/null
    @@ -1,136 +0,0 @@
    -#!/usr/bin/env bash
    -
    -# defaults
    -declare -r default_java_home="/usr/local/share/java/jdk6"
    -declare -r default_java8_home="/usr/local/share/java/jdk8"
    -declare -r default_sbt_jar="/usr/share/sbt-launcher-packaging/bin/sbt-launch.jar"
    -declare -r default_ivy_home="~/.ivy2"
    -
    -# get the source location for this script; handles symlinks
    -function get_script_path {
    -  local source="${BASH_SOURCE[0]}"
    -  while [ -h "${source}" ] ; do
    -    source="$(readlink "${source}")";
    -  done
    -  echo ${source}
    -}
    -
    -# path, name, and dir for this script
    -declare -r script_path=$(get_script_path)
    -declare -r script_name=$(basename "${script_path}")
    -declare -r script_dir="$(cd -P "$(dirname "${script_path}")" && pwd)"
    -
    -# print usage info
    -function usage {
    -  cat <&2
    -}
    -
    -# fail the script with an error message
    -function fail {
    -  echoerr "$@"
    -  exit 1
    -}
    -
    -# try to run a command or otherwise fail with a message
    -function try {
    -  "${@:1:$#-1}" || fail "${@:$#}"
    -}
    -
    -# try to run a command or otherwise fail
    -function check {
    -  type -P "$@" &> /dev/null || fail "command not found: $@"
    -}
    -
    -# run mvn clean test using the specified java home in the specified directory
    -function mvncleantest {
    -  tmp="$script_dir/../../$2"
    -  try cd  "$tmp" "can't step into project directory: $tmp"
    -  export JAVA_HOME="$1"
    -  try mvn clean test "mvn execution in $2 failed"
    -}
    -
    -# run sbt clean test using the specified java home in the specified directory
    -function sbtcleantest {
    -  tmp="$script_dir/../../$2"
    -  try cd  "$tmp" "can't step into project directory: $tmp"
    -  orig_path="$PATH"
    -  export PATH="$1/bin:$PATH"
    -  try java -jar $sbt_jar -Dsbt.ivy.home=$ivy_home clean test "sbt execution in $2 failed"
    -  export PATH="$orig_path"
    -}
    -
    -# initialize variables with defaults and override from environment
    -declare java_home="$default_java_home"
    -if [ $AKKA_BUILD_JAVA_HOME ]; then
    -  java_home="$AKKA_BUILD_JAVA_HOME"
    -fi
    -
    -declare java8_home="$default_java8_home"
    -if [ $AKKA_BUILD_JAVA8_HOME ]; then
    -  java8_home="$AKKA_BUILD_JAVA8_HOME"
    -fi
    -
    -declare sbt_jar="$default_sbt_jar"
    -if [ $AKKA_BUILD_SBT_JAR ]; then
    -  sbt_jar="$AKKA_BUILD_SBT_JAR"
    -fi
    -
    -declare ivy_home="$default_ivy_home"
    -if [ $AKKA_BUILD_IVY_HOME ]; then
    -  ivy_home="$AKKA_BUILD_IVY_HOME"
    -fi
    -
    -# process options and set flags
    -while true; do
    -  case "$1" in
    -    -h | --help ) usage; exit 1 ;;
    -    --java_home ) java_home=$2; shift 2 ;;
    -    --java8_home ) java8_home=$2; shift 2 ;;
    -    * ) break ;;
    -  esac
    -done
    -
    -declare -r java_path="$java_home/bin/java"
    -declare -r java8_path="$java8_home/bin/java"
    -
    -# check that java paths work
    -check "$java_path"
    -check "$java8_path"
    -
    -# check for a mvn command
    -check mvn
    -
    -# now do some work
    -mvncleantest "$java8_home" "akka-samples/akka-docs-java-lambda"
    -
    -mvncleantest "$java8_home" "akka-samples/akka-sample-fsm-java-lambda"
    -
    -mvncleantest "$java8_home" "akka-samples/akka-sample-persistence-java-lambda"
    -
    -mvncleantest "$java8_home" "akka-samples/akka-sample-supervision-java-lambda"
    -
    -sample_dir=akka-samples/akka-sample-main-java-lambda
    -tmp="$script_dir/../../$sample_dir"
    -try cd  "$tmp" "can't step into project directory: $tmp"
    -export JAVA_HOME="$java8_home"
    -try mvn clean compile exec:java -Dexec.mainClass="akka.Main" -Dexec.args="sample.hello.HelloWorld" "mvn execution in $sample_dir failed"
    -try mvn exec:java -Dexec.mainClass="sample.hello.Main2" "mvn execution in $sample_dir failed"
    -
    -sbtcleantest "$java8_home" "akka-samples/akka-docs-udp-multicast"