= #17380 fix doc comments for java8 doclint

* actor and cluster-metrics comments
* agent/camel/cluster/osgi/persistence/remote comments
* comments in contrib/persistence-tck/multi-node/typed
This commit is contained in:
Roland Kuhn 2015-05-15 16:53:24 +02:00 committed by Patrik Nordwall
parent bd280e3252
commit 18688fc84b
90 changed files with 287 additions and 329 deletions

View file

@ -12,6 +12,8 @@ import scala.concurrent.Await
import scala.concurrent.duration._ import scala.concurrent.duration._
import scala.util.Failure import scala.util.Failure
import language.postfixOps
class AskSpec extends AkkaSpec with ScalaFutures { class AskSpec extends AkkaSpec with ScalaFutures {
"The “ask” pattern" must { "The “ask” pattern" must {

View file

@ -5,11 +5,7 @@
package akka package akka
/** /**
* Akka base Exception. Each Exception gets: * Akka base Exception.
* <ul>
* <li>a uuid for tracking purposes</li>
* <li>toString that includes exception name, message and uuid</li>
* </ul>
*/ */
@SerialVersionUID(1L) @SerialVersionUID(1L)
class AkkaException(message: String, cause: Throwable) extends RuntimeException(message, cause) with Serializable { class AkkaException(message: String, cause: Throwable) extends RuntimeException(message, cause) with Serializable {

View file

@ -109,7 +109,7 @@ abstract class AbstractLoggingActor extends AbstractActor with ActorLogging
* } * }
* </pre> * </pre>
* Note that the subclasses of `AbstractActorWithStash` by default request a Deque based mailbox since this class * Note that the subclasses of `AbstractActorWithStash` by default request a Deque based mailbox since this class
* implements the `RequiresMessageQueue<DequeBasedMessageQueueSemantics>` marker interface. * implements the `RequiresMessageQueue&lt;DequeBasedMessageQueueSemantics&gt;` marker interface.
* You can override the default mailbox provided when `DequeBasedMessageQueueSemantics` are requested via config: * You can override the default mailbox provided when `DequeBasedMessageQueueSemantics` are requested via config:
* <pre> * <pre>
* akka.actor.mailbox.requirements { * akka.actor.mailbox.requirements {

View file

@ -27,7 +27,7 @@ object ActorPath {
/** /**
* Validates the given actor path element and throws an [[InvalidActorNameException]] if invalid. * Validates the given actor path element and throws an [[InvalidActorNameException]] if invalid.
* See [[isValidPathElement()]] for a non-throwing version. * See [[#isValidPathElement]] for a non-throwing version.
* *
* @param element actor path element to be validated * @param element actor path element to be validated
*/ */
@ -35,7 +35,7 @@ object ActorPath {
/** /**
* Validates the given actor path element and throws an [[InvalidActorNameException]] if invalid. * Validates the given actor path element and throws an [[InvalidActorNameException]] if invalid.
* See [[isValidPathElement()]] for a non-throwing version. * See [[#isValidPathElement]] for a non-throwing version.
* *
* @param element actor path element to be validated * @param element actor path element to be validated
* @param fullPath optional fullPath element that may be included for better error messages; null if not given * @param fullPath optional fullPath element that may be included for better error messages; null if not given
@ -60,7 +60,7 @@ object ActorPath {
/** /**
* This method is used to validate a path element (Actor Name). * This method is used to validate a path element (Actor Name).
* Since Actors form a tree, it is addressable using an URL, therefore an Actor Name has to conform to: * Since Actors form a tree, it is addressable using an URL, therefore an Actor Name has to conform to:
* [[http://www.ietf.org/rfc/rfc2396.txt RFC-2396]]. * <a href="http://www.ietf.org/rfc/rfc2396.txt">RFC-2396</a>.
* *
* User defined Actor names may not start from a `$` sign - these are reserved for system names. * User defined Actor names may not start from a `$` sign - these are reserved for system names.
*/ */
@ -97,7 +97,7 @@ object ActorPath {
* ActorPath defines a natural ordering (so that ActorRefs can be put into * ActorPath defines a natural ordering (so that ActorRefs can be put into
* collections with this requirement); this ordering is intended to be as fast * collections with this requirement); this ordering is intended to be as fast
* as possible, which owing to the bottom-up recursive nature of ActorPath * as possible, which owing to the bottom-up recursive nature of ActorPath
* is sorted by path elements FROM RIGHT TO LEFT, where RootActorPath > * is sorted by path elements FROM RIGHT TO LEFT, where RootActorPath &gt;
* ChildActorPath in case the number of elements is different. * ChildActorPath in case the number of elements is different.
* *
* Two actor paths are compared equal when they have the same name and parent * Two actor paths are compared equal when they have the same name and parent

View file

@ -118,7 +118,7 @@ abstract class ActorRef extends java.lang.Comparable[ActorRef] with Serializable
* Sends the specified message to this ActorRef, i.e. fire-and-forget * Sends the specified message to this ActorRef, i.e. fire-and-forget
* semantics, including the sender reference if possible. * semantics, including the sender reference if possible.
* *
* Pass [[akka.actor.ActorRef$.noSender]] or `null` as sender if there is nobody to reply to * Pass [[akka.actor.ActorRef]] `noSender` or `null` as sender if there is nobody to reply to
*/ */
final def tell(msg: Any, sender: ActorRef): Unit = this.!(msg)(sender) final def tell(msg: Any, sender: ActorRef): Unit = this.!(msg)(sender)
@ -157,7 +157,7 @@ abstract class ActorRef extends java.lang.Comparable[ActorRef] with Serializable
/** /**
* This trait represents the Scala Actor API * This trait represents the Scala Actor API
* There are implicit conversions in ../actor/Implicits.scala * There are implicit conversions in ../actor/Implicits.scala
* from ActorRef -> ScalaActorRef and back * from ActorRef -&gt; ScalaActorRef and back
*/ */
trait ScalaActorRef { ref: ActorRef trait ScalaActorRef { ref: ActorRef

View file

@ -358,9 +358,9 @@ private[akka] object SystemGuardian {
/** /**
* For the purpose of orderly shutdown it's possible * For the purpose of orderly shutdown it's possible
* to register interest in the termination of systemGuardian * to register interest in the termination of systemGuardian
* and receive a notification [[akka.actor.Guardian.TerminationHook]] * and receive a notification `TerminationHook`
* before systemGuardian is stopped. The registered hook is supposed * before systemGuardian is stopped. The registered hook is supposed
* to reply with [[akka.actor.Guardian.TerminationHookDone]] and the * to reply with `TerminationHookDone` and the
* systemGuardian will not stop until all registered hooks have replied. * systemGuardian will not stop until all registered hooks have replied.
*/ */
case object RegisterTerminationHook case object RegisterTerminationHook

View file

@ -101,7 +101,7 @@ abstract class ActorSelection extends Serializable {
/** /**
* String representation of the actor selection suitable for storage and recreation. * String representation of the actor selection suitable for storage and recreation.
* The output is similar to the URI fragment returned by [[akka.actor.ActorPath.toSerializationFormat]]. * The output is similar to the URI fragment returned by [[akka.actor.ActorPath#toSerializationFormat]].
* @return URI fragment * @return URI fragment
*/ */
def toSerializationFormat: String = { def toSerializationFormat: String = {

View file

@ -291,7 +291,7 @@ abstract class ActorSystem extends ActorRefFactory {
def logConfiguration(): Unit def logConfiguration(): Unit
/** /**
* Construct a path below the application guardian to be used with [[ActorSystem.actorSelection]]. * Construct a path below the application guardian to be used with [[ActorSystem#actorSelection]].
*/ */
def /(name: String): ActorPath def /(name: String): ActorPath
@ -301,7 +301,7 @@ abstract class ActorSystem extends ActorRefFactory {
def child(child: String): ActorPath = /(child) def child(child: String): ActorPath = /(child)
/** /**
* Construct a path below the application guardian to be used with [[ActorSystem.actorSelection]]. * Construct a path below the application guardian to be used with [[ActorSystem#actorSelection]].
*/ */
def /(name: Iterable[String]): ActorPath def /(name: Iterable[String]): ActorPath
@ -326,7 +326,7 @@ abstract class ActorSystem extends ActorRefFactory {
def eventStream: EventStream def eventStream: EventStream
/** /**
* Convenient logging adapter for logging to the [[ActorSystem.eventStream]]. * Convenient logging adapter for logging to the [[ActorSystem#eventStream]].
*/ */
def log: LoggingAdapter def log: LoggingAdapter
@ -369,7 +369,7 @@ abstract class ActorSystem extends ActorRefFactory {
* The callbacks will be run sequentially in reverse order of registration, i.e. * The callbacks will be run sequentially in reverse order of registration, i.e.
* last registration is run first. * last registration is run first.
* *
* @throws a RejectedExecutionException if the System has already shut down or if shutdown has been initiated. * Throws a RejectedExecutionException if the System has already shut down or if shutdown has been initiated.
* *
* Scala API * Scala API
*/ */
@ -382,7 +382,7 @@ abstract class ActorSystem extends ActorRefFactory {
* The callbacks will be run sequentially in reverse order of registration, i.e. * The callbacks will be run sequentially in reverse order of registration, i.e.
* last registration is run first. * last registration is run first.
* *
* @throws a RejectedExecutionException if the System has already shut down or if shutdown has been initiated. * Throws a RejectedExecutionException if the System has already shut down or if shutdown has been initiated.
*/ */
def registerOnTermination(code: Runnable): Unit def registerOnTermination(code: Runnable): Unit
@ -391,7 +391,7 @@ abstract class ActorSystem extends ActorRefFactory {
* timeout has elapsed. This will block until after all on termination * timeout has elapsed. This will block until after all on termination
* callbacks have been run. * callbacks have been run.
* *
* @throws TimeoutException in case of timeout * Throws TimeoutException in case of timeout.
*/ */
@deprecated("Use Await.result(whenTerminated, timeout) instead", "2.4") @deprecated("Use Await.result(whenTerminated, timeout) instead", "2.4")
def awaitTermination(timeout: Duration): Unit def awaitTermination(timeout: Duration): Unit
@ -407,7 +407,7 @@ abstract class ActorSystem extends ActorRefFactory {
* Stop this actor system. This will stop the guardian actor, which in turn * Stop this actor system. This will stop the guardian actor, which in turn
* will recursively stop all its child actors, then the system guardian * will recursively stop all its child actors, then the system guardian
* (below which the logging actors reside) and the execute all registered * (below which the logging actors reside) and the execute all registered
* termination handlers (see [[ActorSystem.registerOnTermination]]). * termination handlers (see [[ActorSystem#registerOnTermination]]).
*/ */
@deprecated("Use the terminate() method instead", "2.4") @deprecated("Use the terminate() method instead", "2.4")
def shutdown(): Unit def shutdown(): Unit
@ -426,7 +426,7 @@ abstract class ActorSystem extends ActorRefFactory {
* Terminates this actor system. This will stop the guardian actor, which in turn * Terminates this actor system. This will stop the guardian actor, which in turn
* will recursively stop all its child actors, then the system guardian * will recursively stop all its child actors, then the system guardian
* (below which the logging actors reside) and the execute all registered * (below which the logging actors reside) and the execute all registered
* termination handlers (see [[ActorSystem.registerOnTermination]]). * termination handlers (see [[ActorSystem#registerOnTermination]]).
*/ */
def terminate(): Future[Terminated] def terminate(): Future[Terminated]
@ -835,7 +835,7 @@ private[akka] class ActorSystemImpl(
* Adds a Runnable that will be executed on ActorSystem termination. * Adds a Runnable that will be executed on ActorSystem termination.
* Note that callbacks are executed in reverse order of insertion. * Note that callbacks are executed in reverse order of insertion.
* @param r The callback to be executed on ActorSystem termination * @param r The callback to be executed on ActorSystem termination
* @throws RejectedExecutionException if called after ActorSystem has been terminated * Throws RejectedExecutionException if called after ActorSystem has been terminated.
*/ */
final def add(r: Runnable): Unit = { final def add(r: Runnable): Unit = {
@tailrec def addRec(r: Runnable, p: Promise[T]): Unit = ref.get match { @tailrec def addRec(r: Runnable, p: Promise[T]): Unit = ref.get match {

View file

@ -46,7 +46,7 @@ final case class Address private (protocol: String, system: String, host: Option
/** /**
* Returns the canonical String representation of this Address formatted as: * Returns the canonical String representation of this Address formatted as:
* *
* <protocol>://<system>@<host>:<port> * `protocol://system@host:port`
*/ */
@transient @transient
override lazy val toString: String = { override lazy val toString: String = {
@ -61,7 +61,7 @@ final case class Address private (protocol: String, system: String, host: Option
/** /**
* Returns a String representation formatted as: * Returns a String representation formatted as:
* *
* <system>@<host>:<port> * `system@host:port`
*/ */
def hostPort: String = toString.substring(protocol.length + 3) def hostPort: String = toString.substring(protocol.length + 3)
} }

View file

@ -60,7 +60,7 @@ final case class Deploy(
/** /**
* Do a merge between this and the other Deploy, where values from this take * Do a merge between this and the other Deploy, where values from this take
* precedence. The path of the other Deploy is not taken into account. All * precedence. The path of the other Deploy is not taken into account. All
* other members are merged using ``<X>.withFallback(other.<X>)``. * other members are merged using `X.withFallback(other.X)`.
*/ */
def withFallback(other: Deploy): Deploy = { def withFallback(other: Deploy): Deploy = {
Deploy( Deploy(

View file

@ -70,7 +70,7 @@ object FSM {
/** /**
* Signifies that the [[akka.actor.FSM]] is shutting itself down because of * Signifies that the [[akka.actor.FSM]] is shutting itself down because of
* an error, e.g. if the state to transition into does not exist. You can use * an error, e.g. if the state to transition into does not exist. You can use
* this to communicate a more precise cause to the [[akka.actor.FSM.onTermination]] block. * this to communicate a more precise cause to the `onTermination` block.
*/ */
final case class Failure(cause: Any) extends Reason final case class Failure(cause: Any) extends Reason
@ -374,7 +374,7 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging {
* Return this from a state function when no state change is to be effected. * Return this from a state function when no state change is to be effected.
* *
* No transition event will be triggered by [[#stay]]. * No transition event will be triggered by [[#stay]].
* If you want to trigger an event like `S -> S` for [[#onTransition]] to handle use [[#goto]] instead. * If you want to trigger an event like `S -&gt; S` for `onTransition` to handle use `goto` instead.
* *
* @return descriptor for staying in current state * @return descriptor for staying in current state
*/ */
@ -410,7 +410,6 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging {
* @param msg message to be delivered * @param msg message to be delivered
* @param timeout delay of first message delivery and between subsequent messages * @param timeout delay of first message delivery and between subsequent messages
* @param repeat send once if false, scheduleAtFixedRate if true * @param repeat send once if false, scheduleAtFixedRate if true
* @return current state descriptor
*/ */
final def setTimer(name: String, msg: Any, timeout: FiniteDuration, repeat: Boolean = false): Unit = { final def setTimer(name: String, msg: Any, timeout: FiniteDuration, repeat: Boolean = false): Unit = {
if (debugEvent) if (debugEvent)
@ -1160,7 +1159,6 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* @param name identifier to be used with cancelTimer() * @param name identifier to be used with cancelTimer()
* @param msg message to be delivered * @param msg message to be delivered
* @param timeout delay of first message delivery and between subsequent messages * @param timeout delay of first message delivery and between subsequent messages
* @return current state descriptor
*/ */
final def setTimer(name: String, msg: Any, timeout: FiniteDuration): Unit = final def setTimer(name: String, msg: Any, timeout: FiniteDuration): Unit =
setTimer(name, msg, timeout, false) setTimer(name, msg, timeout, false)

View file

@ -145,7 +145,7 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits {
/** /**
* When supervisorStrategy is not specified for an actor this * When supervisorStrategy is not specified for an actor this
* [[Decider]] is used by default in the supervisor strategy. * `Decider` is used by default in the supervisor strategy.
* The child will be stopped when [[akka.actor.ActorInitializationException]], * The child will be stopped when [[akka.actor.ActorInitializationException]],
* [[akka.actor.ActorKilledException]], or [[akka.actor.DeathPactException]] is * [[akka.actor.ActorKilledException]], or [[akka.actor.DeathPactException]] is
* thrown. It will be restarted for other `Exception` types. * thrown. It will be restarted for other `Exception` types.

View file

@ -187,7 +187,7 @@ trait Cancellable {
* when scheduling single-shot tasks, instead it always rounds up the task * when scheduling single-shot tasks, instead it always rounds up the task
* delay to a full multiple of the TickDuration. This means that tasks are * delay to a full multiple of the TickDuration. This means that tasks are
* scheduled possibly one tick later than they could be (if checking that * scheduled possibly one tick later than they could be (if checking that
* now() + delay <= nextTick were done). * now() + delay &lt;= nextTick were done).
*/ */
class LightArrayRevolverScheduler(config: Config, class LightArrayRevolverScheduler(config: Config,
log: LoggingAdapter, log: LoggingAdapter,

View file

@ -138,7 +138,7 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi
/** /**
* Invokes the Method on the supplied instance * Invokes the Method on the supplied instance
* *
* @throws the underlying exception if there's an InvocationTargetException thrown on the invocation * Throws the underlying exception if there's an InvocationTargetException thrown on the invocation.
*/ */
def apply(instance: AnyRef): AnyRef = try { def apply(instance: AnyRef): AnyRef = try {
parameters match { parameters match {
@ -217,8 +217,9 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi
* *
* NEVER EXPOSE "this" to someone else, always use "self[TypeOfInterface(s)]" * NEVER EXPOSE "this" to someone else, always use "self[TypeOfInterface(s)]"
* *
* @throws IllegalStateException if called outside of the scope of a method on this TypedActor * Throws IllegalStateException if called outside of the scope of a method on this TypedActor.
* @throws ClassCastException if the supplied type T isn't the type of the proxy associated with this TypedActor *
* Throws ClassCastException if the supplied type T isn't the type of the proxy associated with this TypedActor.
*/ */
def self[T <: AnyRef] = selfReference.get.asInstanceOf[T] match { def self[T <: AnyRef] = selfReference.get.asInstanceOf[T] match {
case null throw new IllegalStateException("Calling TypedActor.self outside of a TypedActor implementation method!") case null throw new IllegalStateException("Calling TypedActor.self outside of a TypedActor implementation method!")

View file

@ -30,7 +30,7 @@ package akka.actor
* } * }
* </pre> * </pre>
* Note that the subclasses of `UntypedActorWithStash` by default request a Deque based mailbox since this class * Note that the subclasses of `UntypedActorWithStash` by default request a Deque based mailbox since this class
* implements the `RequiresMessageQueue<DequeBasedMessageQueueSemantics>` marker interface. * implements the `RequiresMessageQueue&lt;DequeBasedMessageQueueSemantics&gt;` marker interface.
* You can override the default mailbox provided when `DequeBasedMessageQueueSemantics` are requested via config: * You can override the default mailbox provided when `DequeBasedMessageQueueSemantics` are requested via config:
* <pre> * <pre>
* akka.actor.mailbox.requirements { * akka.actor.mailbox.requirements {

View file

@ -157,8 +157,8 @@ trait Inbox { this: ActorDSL.type ⇒
/** /**
* Create a new actor which will internally queue up messages it gets so that * Create a new actor which will internally queue up messages it gets so that
* they can be interrogated with the [[akka.actor.dsl.Inbox!.Inbox!.receive]] * they can be interrogated with the `akka.actor.dsl.Inbox!.Inbox!.receive`
* and [[akka.actor.dsl.Inbox!.Inbox!.select]] methods. It will be created as * and `akka.actor.dsl.Inbox!.Inbox!.select` methods. It will be created as
* a system actor in the ActorSystem which is implicitly (or explicitly) * a system actor in the ActorSystem which is implicitly (or explicitly)
* supplied. * supplied.
*/ */

View file

@ -76,7 +76,7 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc
* Returns a dispatcher as specified in configuration. Please note that this * Returns a dispatcher as specified in configuration. Please note that this
* method _may_ create and return a NEW dispatcher, _every_ call. * method _may_ create and return a NEW dispatcher, _every_ call.
* *
* @throws ConfigurationException if the specified dispatcher cannot be found in the configuration * Throws ConfigurationException if the specified dispatcher cannot be found in the configuration.
*/ */
def lookup(id: String): MessageDispatcher = lookupConfigurator(id).dispatcher() def lookup(id: String): MessageDispatcher = lookupConfigurator(id).dispatcher()

View file

@ -41,7 +41,7 @@ object ExecutionContexts {
* Returns a new ExecutionContextExecutorService which will delegate execution to the underlying ExecutorService, * Returns a new ExecutionContextExecutorService which will delegate execution to the underlying ExecutorService,
* and which will use the default error reporter. * and which will use the default error reporter.
* *
* @param executor the ExecutorService which will be used for the ExecutionContext * @param executorService the ExecutorService which will be used for the ExecutionContext
* @return a new ExecutionContext * @return a new ExecutionContext
*/ */
def fromExecutorService(executorService: ExecutorService): ExecutionContextExecutorService = def fromExecutorService(executorService: ExecutorService): ExecutionContextExecutorService =
@ -51,7 +51,7 @@ object ExecutionContexts {
* Returns a new ExecutionContextExecutorService which will delegate execution to the underlying ExecutorService, * Returns a new ExecutionContextExecutorService which will delegate execution to the underlying ExecutorService,
* and which will use the provided error reporter. * and which will use the provided error reporter.
* *
* @param executor the ExecutorService which will be used for the ExecutionContext * @param executorService the ExecutorService which will be used for the ExecutionContext
* @param errorReporter a Procedure that will log any exceptions passed to it * @param errorReporter a Procedure that will log any exceptions passed to it
* @return a new ExecutionContext * @return a new ExecutionContext
*/ */
@ -272,7 +272,7 @@ abstract class Recover[+T] extends japi.RecoverBridge[T] {
* becomes completed with a failure. * becomes completed with a failure.
* *
* @return a successful value for the passed in failure * @return a successful value for the passed in failure
* @throws the passed in failure to propagate it. * Throws the passed in failure to propagate it.
* *
* Java API * Java API
*/ */
@ -350,7 +350,7 @@ abstract class Mapper[-T, +R] extends scala.runtime.AbstractFunction1[T, R] {
/** /**
* Override this method if you need to throw checked exceptions * Override this method if you need to throw checked exceptions
* *
* @throws UnsupportedOperation by default * Throws UnsupportedOperation by default.
*/ */
@throws(classOf[Throwable]) @throws(classOf[Throwable])
def checkedApply(parameter: T): R = throw new UnsupportedOperationException("Mapper.checkedApply has not been implemented") def checkedApply(parameter: T): R = throw new UnsupportedOperationException("Mapper.checkedApply has not been implemented")

View file

@ -182,7 +182,7 @@ private[akka] class EarliestFirstSystemMessageList(val head: SystemMessage) exte
* *
* INTERNAL API * INTERNAL API
* *
* ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ * <b>NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS</b>
*/ */
private[akka] sealed trait SystemMessage extends PossiblyHarmful with Serializable { private[akka] sealed trait SystemMessage extends PossiblyHarmful with Serializable {
// Next fields are only modifiable via the SystemMessageList value class // Next fields are only modifiable via the SystemMessageList value class

View file

@ -51,7 +51,7 @@ private[akka] class EventStreamUnsubscriber(eventStream: EventStream, debug: Boo
* INTERNAL API * INTERNAL API
* *
* Provides factory for [[akka.event.EventStreamUnsubscriber]] actors with **unique names**. * Provides factory for [[akka.event.EventStreamUnsubscriber]] actors with **unique names**.
* This is needed if someone spins up more [[EventStream]]s using the same [[ActorSystem]], * This is needed if someone spins up more [[EventStream]]s using the same [[akka.actor.ActorSystem]],
* each stream gets it's own unsubscriber. * each stream gets it's own unsubscriber.
*/ */
private[akka] object EventStreamUnsubscriber { private[akka] object EventStreamUnsubscriber {

View file

@ -251,8 +251,8 @@ class DummyClassForStringSources
* In case an [[akka.actor.ActorSystem]] is provided, the following apply: * In case an [[akka.actor.ActorSystem]] is provided, the following apply:
* <ul> * <ul>
* <li>[[akka.actor.Actor]] and [[akka.actor.ActorRef]] will be represented by their absolute physical path</li> * <li>[[akka.actor.Actor]] and [[akka.actor.ActorRef]] will be represented by their absolute physical path</li>
* <li>providing a `String` as source will append "(<system address>)" and use the result</li> * <li>providing a `String` as source will append "(&lt;system address&gt;)" and use the result</li>
* <li>providing a `Class` will extract its simple name, append "(<system address>)" and use the result</li> * <li>providing a `Class` will extract its simple name, append "(&lt;system address&gt;)" and use the result</li>
* <li>anything else gives compile error unless implicit [[akka.event.LogSource]] is in scope for it</li> * <li>anything else gives compile error unless implicit [[akka.event.LogSource]] is in scope for it</li>
* </ul> * </ul>
* *
@ -385,7 +385,7 @@ object Logging {
/** /**
* Returns a 'safe' getSimpleName for the provided Class * Returns a 'safe' getSimpleName for the provided Class
* @param obj * @param clazz
* @return the simple name of the given Class * @return the simple name of the given Class
*/ */
def simpleName(clazz: Class[_]): String = { def simpleName(clazz: Class[_]): String = {
@ -856,19 +856,19 @@ object Logging {
* evaluate .toString only if the log level is actually enabled. Typically used * evaluate .toString only if the log level is actually enabled. Typically used
* by obtaining an implementation from the Logging object: * by obtaining an implementation from the Logging object:
* *
* <code><pre> * {{{
* val log = Logging(&lt;bus&gt;, &lt;source object&gt;) * val log = Logging(&lt;bus&gt;, &lt;source object&gt;)
* ... * ...
* log.info("hello world!") * log.info("hello world!")
* </pre></code> * }}}
* *
* All log-level methods support simple interpolation templates with up to four * All log-level methods support simple interpolation templates with up to four
* arguments placed by using <code>{}</code> within the template (first string * arguments placed by using <code>{}</code> within the template (first string
* argument): * argument):
* *
* <code><pre> * {{{
* log.error(exception, "Exception while processing {} in state {}", msg, state) * log.error(exception, "Exception while processing {} in state {}", msg, state)
* </pre></code> * }}}
*/ */
trait LoggingAdapter { trait LoggingAdapter {
@ -1140,7 +1140,7 @@ trait DiagnosticLoggingAdapter extends LoggingAdapter {
/** /**
* Scala API: * Scala API:
* Mapped Diagnostic Context for application defined values * Mapped Diagnostic Context for application defined values
* which can be used in PatternLayout when [[akka.event.slf4j.Slf4jLogger]] is configured. * which can be used in PatternLayout when `akka.event.slf4j.Slf4jLogger` is configured.
* Visit <a href="http://logback.qos.ch/manual/mdc.html">Logback Docs: MDC</a> for more information. * Visit <a href="http://logback.qos.ch/manual/mdc.html">Logback Docs: MDC</a> for more information.
* *
* @return A Map containing the MDC values added by the application, or empty Map if no value was added. * @return A Map containing the MDC values added by the application, or empty Map if no value was added.
@ -1150,7 +1150,7 @@ trait DiagnosticLoggingAdapter extends LoggingAdapter {
/** /**
* Scala API: * Scala API:
* Sets the values to be added to the MDC (Mapped Diagnostic Context) before the log is appended. * Sets the values to be added to the MDC (Mapped Diagnostic Context) before the log is appended.
* These values can be used in PatternLayout when [[akka.event.slf4j.Slf4jLogger]] is configured. * These values can be used in PatternLayout when `akka.event.slf4j.Slf4jLogger` is configured.
* Visit <a href="http://logback.qos.ch/manual/mdc.html">Logback Docs: MDC</a> for more information. * Visit <a href="http://logback.qos.ch/manual/mdc.html">Logback Docs: MDC</a> for more information.
*/ */
def mdc(mdc: MDC): Unit = _mdc = if (mdc != null) mdc else emptyMDC def mdc(mdc: MDC): Unit = _mdc = if (mdc != null) mdc else emptyMDC
@ -1158,17 +1158,18 @@ trait DiagnosticLoggingAdapter extends LoggingAdapter {
/** /**
* Java API: * Java API:
* Mapped Diagnostic Context for application defined values * Mapped Diagnostic Context for application defined values
* which can be used in PatternLayout when [[akka.event.slf4j.Slf4jLogger]] is configured. * which can be used in PatternLayout when `akka.event.slf4j.Slf4jLogger` is configured.
* Visit <a href="http://logback.qos.ch/manual/mdc.html">Logback Docs: MDC</a> for more information. * Visit <a href="http://logback.qos.ch/manual/mdc.html">Logback Docs: MDC</a> for more information.
* Note tha it returns a <b>COPY</b> of the actual MDC values. * Note tha it returns a <b>COPY</b> of the actual MDC values.
* You cannot modify any value by changing the returned Map. * You cannot modify any value by changing the returned Map.
* Code like the following won't have any effect unless you set back the modified Map. * Code like the following won't have any effect unless you set back the modified Map.
* <code><pre> *
* {{{
* Map mdc = log.getMDC(); * Map mdc = log.getMDC();
* mdc.put("key", value); * mdc.put("key", value);
* // NEEDED * // NEEDED
* log.setMDC(mdc); * log.setMDC(mdc);
* </pre></code> * }}}
* *
* @return A copy of the actual MDC values * @return A copy of the actual MDC values
*/ */
@ -1177,7 +1178,7 @@ trait DiagnosticLoggingAdapter extends LoggingAdapter {
/** /**
* Java API: * Java API:
* Sets the values to be added to the MDC (Mapped Diagnostic Context) before the log is appended. * Sets the values to be added to the MDC (Mapped Diagnostic Context) before the log is appended.
* These values can be used in PatternLayout when [[akka.event.slf4j.Slf4jLogger]] is configured. * These values can be used in PatternLayout when `akka.event.slf4j.Slf4jLogger` is configured.
* Visit <a href="http://logback.qos.ch/manual/mdc.html">Logback Docs: MDC</a> for more information. * Visit <a href="http://logback.qos.ch/manual/mdc.html">Logback Docs: MDC</a> for more information.
*/ */
def setMDC(jMdc: java.util.Map[String, Any]): Unit = mdc(if (jMdc != null) jMdc.asScala.toMap else emptyMDC) def setMDC(jMdc: java.util.Map[String, Any]): Unit = mdc(if (jMdc != null) jMdc.asScala.toMap else emptyMDC)

View file

@ -78,7 +78,7 @@ object Inet {
/** /**
* Open and return new DatagramChannel. * Open and return new DatagramChannel.
* *
* [[scala.throws]] is needed because [[DatagramChannel.open]] method * `throws` is needed because `DatagramChannel.open` method
* can throw an exception. * can throw an exception.
*/ */
@throws(classOf[Exception]) @throws(classOf[Exception])
@ -95,7 +95,7 @@ object Inet {
/** /**
* [[akka.io.Inet.SocketOption]] to set the SO_RCVBUF option * [[akka.io.Inet.SocketOption]] to set the SO_RCVBUF option
* *
* For more information see [[java.net.Socket.setReceiveBufferSize]] * For more information see [[java.net.Socket#setReceiveBufferSize]]
*/ */
final case class ReceiveBufferSize(size: Int) extends SocketOption { final case class ReceiveBufferSize(size: Int) extends SocketOption {
require(size > 0, "ReceiveBufferSize must be > 0") require(size > 0, "ReceiveBufferSize must be > 0")
@ -109,7 +109,7 @@ object Inet {
/** /**
* [[akka.io.Inet.SocketOption]] to enable or disable SO_REUSEADDR * [[akka.io.Inet.SocketOption]] to enable or disable SO_REUSEADDR
* *
* For more information see [[java.net.Socket.setReuseAddress]] * For more information see [[java.net.Socket#setReuseAddress]]
*/ */
final case class ReuseAddress(on: Boolean) extends SocketOption { final case class ReuseAddress(on: Boolean) extends SocketOption {
override def beforeServerSocketBind(s: ServerSocket): Unit = s.setReuseAddress(on) override def beforeServerSocketBind(s: ServerSocket): Unit = s.setReuseAddress(on)
@ -120,7 +120,7 @@ object Inet {
/** /**
* [[akka.io.Inet.SocketOption]] to set the SO_SNDBUF option. * [[akka.io.Inet.SocketOption]] to set the SO_SNDBUF option.
* *
* For more information see [[java.net.Socket.setSendBufferSize]] * For more information see [[java.net.Socket#setSendBufferSize]]
*/ */
final case class SendBufferSize(size: Int) extends SocketOption { final case class SendBufferSize(size: Int) extends SocketOption {
require(size > 0, "SendBufferSize must be > 0") require(size > 0, "SendBufferSize must be > 0")
@ -132,7 +132,7 @@ object Inet {
* type-of-service octet in the IP header for packets sent from this * type-of-service octet in the IP header for packets sent from this
* socket. * socket.
* *
* For more information see [[java.net.Socket.setTrafficClass]] * For more information see [[java.net.Socket#setTrafficClass]]
*/ */
final case class TrafficClass(tc: Int) extends SocketOption { final case class TrafficClass(tc: Int) extends SocketOption {
require(0 <= tc && tc <= 255, "TrafficClass needs to be in the interval [0, 255]") require(0 <= tc && tc <= 255, "TrafficClass needs to be in the interval [0, 255]")
@ -145,21 +145,21 @@ object Inet {
/** /**
* [[akka.io.Inet.SocketOption]] to set the SO_RCVBUF option * [[akka.io.Inet.SocketOption]] to set the SO_RCVBUF option
* *
* For more information see [[java.net.Socket.setReceiveBufferSize]] * For more information see [[java.net.Socket#setReceiveBufferSize]]
*/ */
val ReceiveBufferSize = SO.ReceiveBufferSize val ReceiveBufferSize = SO.ReceiveBufferSize
/** /**
* [[akka.io.Inet.SocketOption]] to enable or disable SO_REUSEADDR * [[akka.io.Inet.SocketOption]] to enable or disable SO_REUSEADDR
* *
* For more information see [[java.net.Socket.setReuseAddress]] * For more information see [[java.net.Socket#setReuseAddress]]
*/ */
val ReuseAddress = SO.ReuseAddress val ReuseAddress = SO.ReuseAddress
/** /**
* [[akka.io.Inet.SocketOption]] to set the SO_SNDBUF option. * [[akka.io.Inet.SocketOption]] to set the SO_SNDBUF option.
* *
* For more information see [[java.net.Socket.setSendBufferSize]] * For more information see [[java.net.Socket#setSendBufferSize]]
*/ */
val SendBufferSize = SO.SendBufferSize val SendBufferSize = SO.SendBufferSize
@ -168,7 +168,7 @@ object Inet {
* type-of-service octet in the IP header for packets sent from this * type-of-service octet in the IP header for packets sent from this
* socket. * socket.
* *
* For more information see [[java.net.Socket.setTrafficClass]] * For more information see [[java.net.Socket#setTrafficClass]]
*/ */
val TrafficClass = SO.TrafficClass val TrafficClass = SO.TrafficClass
} }
@ -178,21 +178,21 @@ object Inet {
/** /**
* [[akka.io.Inet.SocketOption]] to set the SO_RCVBUF option * [[akka.io.Inet.SocketOption]] to set the SO_RCVBUF option
* *
* For more information see [[java.net.Socket.setReceiveBufferSize]] * For more information see [[java.net.Socket#setReceiveBufferSize]]
*/ */
def receiveBufferSize(size: Int) = ReceiveBufferSize(size) def receiveBufferSize(size: Int) = ReceiveBufferSize(size)
/** /**
* [[akka.io.Inet.SocketOption]] to enable or disable SO_REUSEADDR * [[akka.io.Inet.SocketOption]] to enable or disable SO_REUSEADDR
* *
* For more information see [[java.net.Socket.setReuseAddress]] * For more information see [[java.net.Socket#setReuseAddress]]
*/ */
def reuseAddress(on: Boolean) = ReuseAddress(on) def reuseAddress(on: Boolean) = ReuseAddress(on)
/** /**
* [[akka.io.Inet.SocketOption]] to set the SO_SNDBUF option. * [[akka.io.Inet.SocketOption]] to set the SO_SNDBUF option.
* *
* For more information see [[java.net.Socket.setSendBufferSize]] * For more information see [[java.net.Socket#setSendBufferSize]]
*/ */
def sendBufferSize(size: Int) = SendBufferSize(size) def sendBufferSize(size: Int) = SendBufferSize(size)
@ -201,7 +201,7 @@ object Inet {
* type-of-service octet in the IP header for packets sent from this * type-of-service octet in the IP header for packets sent from this
* socket. * socket.
* *
* For more information see [[java.net.Socket.setTrafficClass]] * For more information see [[java.net.Socket#setTrafficClass]]
*/ */
def trafficClass(tc: Int) = TrafficClass(tc) def trafficClass(tc: Int) = TrafficClass(tc)
} }

View file

@ -54,7 +54,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
/** /**
* [[akka.io.Inet.SocketOption]] to enable or disable SO_KEEPALIVE * [[akka.io.Inet.SocketOption]] to enable or disable SO_KEEPALIVE
* *
* For more information see [[java.net.Socket.setKeepAlive]] * For more information see `java.net.Socket.setKeepAlive`
*/ */
final case class KeepAlive(on: Boolean) extends SocketOption { final case class KeepAlive(on: Boolean) extends SocketOption {
override def afterConnect(s: Socket): Unit = s.setKeepAlive(on) override def afterConnect(s: Socket): Unit = s.setKeepAlive(on)
@ -65,7 +65,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
* of TCP urgent data) By default, this option is disabled and TCP urgent * of TCP urgent data) By default, this option is disabled and TCP urgent
* data is silently discarded. * data is silently discarded.
* *
* For more information see [[java.net.Socket.setOOBInline]] * For more information see `java.net.Socket.setOOBInline`
*/ */
final case class OOBInline(on: Boolean) extends SocketOption { final case class OOBInline(on: Boolean) extends SocketOption {
override def afterConnect(s: Socket): Unit = s.setOOBInline(on) override def afterConnect(s: Socket): Unit = s.setOOBInline(on)
@ -79,7 +79,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
* *
* Please note, that TCP_NODELAY is enabled by default. * Please note, that TCP_NODELAY is enabled by default.
* *
* For more information see [[java.net.Socket.setTcpNoDelay]] * For more information see `java.net.Socket.setTcpNoDelay`
*/ */
final case class TcpNoDelay(on: Boolean) extends SocketOption { final case class TcpNoDelay(on: Boolean) extends SocketOption {
override def afterConnect(s: Socket): Unit = s.setTcpNoDelay(on) override def afterConnect(s: Socket): Unit = s.setTcpNoDelay(on)
@ -109,7 +109,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
* *
* @param remoteAddress is the address to connect to * @param remoteAddress is the address to connect to
* @param localAddress optionally specifies a specific address to bind to * @param localAddress optionally specifies a specific address to bind to
* @param options Please refer to the [[SO]] object for a list of all supported options. * @param options Please refer to the `Tcp.SO` object for a list of all supported options.
*/ */
final case class Connect(remoteAddress: InetSocketAddress, final case class Connect(remoteAddress: InetSocketAddress,
localAddress: Option[InetSocketAddress] = None, localAddress: Option[InetSocketAddress] = None,
@ -134,7 +134,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
* @param backlog This specifies the number of unaccepted connections the O/S * @param backlog This specifies the number of unaccepted connections the O/S
* kernel will hold for this port before refusing connections. * kernel will hold for this port before refusing connections.
* *
* @param options Please refer to the [[SO]] object for a list of all supported options. * @param options Please refer to the `Tcp.SO` object for a list of all supported options.
*/ */
final case class Bind(handler: ActorRef, final case class Bind(handler: ActorRef,
localAddress: InetSocketAddress, localAddress: InetSocketAddress,
@ -153,11 +153,11 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
* *
* @param keepOpenOnPeerClosed If this is set to true then the connection * @param keepOpenOnPeerClosed If this is set to true then the connection
* is not automatically closed when the peer closes its half, * is not automatically closed when the peer closes its half,
* requiring an explicit [[Closed]] from our side when finished. * requiring an explicit [[CloseCommand]] from our side when finished.
* *
* @param useResumeWriting If this is set to true then the connection actor * @param useResumeWriting If this is set to true then the connection actor
* will refuse all further writes after issuing a [[CommandFailed]] * will refuse all further writes after issuing a [[CommandFailed]]
* notification until [[ResumeWriting]] is received. This can * notification until `ResumeWriting` is received. This can
* be used to implement NACK-based write backpressure. * be used to implement NACK-based write backpressure.
*/ */
final case class Register(handler: ActorRef, keepOpenOnPeerClosed: Boolean = false, useResumeWriting: Boolean = true) extends Command final case class Register(handler: ActorRef, keepOpenOnPeerClosed: Boolean = false, useResumeWriting: Boolean = true) extends Command
@ -183,7 +183,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
/** /**
* A normal close operation will first flush pending writes and then close the * A normal close operation will first flush pending writes and then close the
* socket. The sender of this command and the registered handler for incoming * socket. The sender of this command and the registered handler for incoming
* data will both be notified once the socket is closed using a [[Closed]] * data will both be notified once the socket is closed using a `Closed`
* message. * message.
*/ */
case object Close extends CloseCommand { case object Close extends CloseCommand {
@ -198,7 +198,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
* A confirmed close operation will flush pending writes and half-close the * A confirmed close operation will flush pending writes and half-close the
* connection, waiting for the peer to close the other half. The sender of this * connection, waiting for the peer to close the other half. The sender of this
* command and the registered handler for incoming data will both be notified * command and the registered handler for incoming data will both be notified
* once the socket is closed using a [[ConfirmedClosed]] message. * once the socket is closed using a `ConfirmedClosed` message.
*/ */
case object ConfirmedClose extends CloseCommand { case object ConfirmedClose extends CloseCommand {
/** /**
@ -213,7 +213,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
* command to the O/S kernel which should result in a TCP_RST packet being sent * command to the O/S kernel which should result in a TCP_RST packet being sent
* to the peer. The sender of this command and the registered handler for * to the peer. The sender of this command and the registered handler for
* incoming data will both be notified once the socket is closed using a * incoming data will both be notified once the socket is closed using a
* [[Aborted]] message. * `Aborted` message.
*/ */
case object Abort extends CloseCommand { case object Abort extends CloseCommand {
/** /**
@ -225,7 +225,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
/** /**
* Each [[WriteCommand]] can optionally request a positive acknowledgment to be sent * Each [[WriteCommand]] can optionally request a positive acknowledgment to be sent
* to the commanding actor. If such notification is not desired the [[WriteCommand#ack]] * to the commanding actor. If such notification is not desired the [[SimpleWriteCommand#ack]]
* must be set to an instance of this class. The token contained within can be used * must be set to an instance of this class. The token contained within can be used
* to recognize which write failed when receiving a [[CommandFailed]] message. * to recognize which write failed when receiving a [[CommandFailed]] message.
*/ */
@ -238,7 +238,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
object NoAck extends NoAck(null) object NoAck extends NoAck(null)
/** /**
* Common interface for all write commands, currently [[Write]], [[WriteFile]] and [[CompoundWrite]]. * Common interface for all write commands.
*/ */
sealed abstract class WriteCommand extends Command { sealed abstract class WriteCommand extends Command {
/** /**
@ -310,8 +310,8 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
/** /**
* Write data to the TCP connection. If no ack is needed use the special * Write data to the TCP connection. If no ack is needed use the special
* `NoAck` object. The connection actor will reply with a [[CommandFailed]] * `NoAck` object. The connection actor will reply with a [[CommandFailed]]
* message if the write could not be enqueued. If [[WriteCommand#wantsAck]] * message if the write could not be enqueued. If [[SimpleWriteCommand#wantsAck]]
* returns true, the connection actor will reply with the supplied [[WriteCommand#ack]] * returns true, the connection actor will reply with the supplied [[SimpleWriteCommand#ack]]
* token once the write has been successfully enqueued to the O/S kernel. * token once the write has been successfully enqueued to the O/S kernel.
* <b>Note that this does not in any way guarantee that the data will be * <b>Note that this does not in any way guarantee that the data will be
* or have been sent!</b> Unfortunately there is no way to determine whether * or have been sent!</b> Unfortunately there is no way to determine whether
@ -336,9 +336,9 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
/** /**
* Write `count` bytes starting at `position` from file at `filePath` to the connection. * Write `count` bytes starting at `position` from file at `filePath` to the connection.
* The count must be > 0. The connection actor will reply with a [[CommandFailed]] * The count must be &gt; 0. The connection actor will reply with a [[CommandFailed]]
* message if the write could not be enqueued. If [[WriteCommand#wantsAck]] * message if the write could not be enqueued. If [[SimpleWriteCommand#wantsAck]]
* returns true, the connection actor will reply with the supplied [[WriteCommand#ack]] * returns true, the connection actor will reply with the supplied [[SimpleWriteCommand#ack]]
* token once the write has been successfully enqueued to the O/S kernel. * token once the write has been successfully enqueued to the O/S kernel.
* <b>Note that this does not in any way guarantee that the data will be * <b>Note that this does not in any way guarantee that the data will be
* or have been sent!</b> Unfortunately there is no way to determine whether * or have been sent!</b> Unfortunately there is no way to determine whether
@ -385,12 +385,12 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
/** /**
* Sending this command to the connection actor will disable reading from the TCP * Sending this command to the connection actor will disable reading from the TCP
* socket. TCP flow-control will then propagate backpressure to the sender side * socket. TCP flow-control will then propagate backpressure to the sender side
* as buffers fill up on either end. To re-enable reading send [[ResumeReading]]. * as buffers fill up on either end. To re-enable reading send `ResumeReading`.
*/ */
case object SuspendReading extends Command case object SuspendReading extends Command
/** /**
* This command needs to be sent to the connection actor after a [[SuspendReading]] * This command needs to be sent to the connection actor after a `SuspendReading`
* command in order to resume reading from the socket. * command in order to resume reading from the socket.
*/ */
case object ResumeReading extends Command case object ResumeReading extends Command
@ -430,7 +430,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
/** /**
* When `useResumeWriting` is in effect as indicated in the [[Register]] message, * When `useResumeWriting` is in effect as indicated in the [[Register]] message,
* the [[ResumeWriting]] command will be acknowledged by this message type, upon * the `ResumeWriting` command will be acknowledged by this message type, upon
* which it is safe to send at least one write. This means that all writes preceding * which it is safe to send at least one write. This means that all writes preceding
* the first [[CommandFailed]] message have been enqueued to the O/S kernel at this * the first [[CommandFailed]] message have been enqueued to the O/S kernel at this
* point. * point.
@ -446,7 +446,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
final case class Bound(localAddress: InetSocketAddress) extends Event final case class Bound(localAddress: InetSocketAddress) extends Event
/** /**
* The sender of an [[Unbind]] command will receive confirmation through this * The sender of an `Unbind` command will receive confirmation through this
* message once the listening socket has been closed. * message once the listening socket has been closed.
*/ */
sealed trait Unbound extends Event sealed trait Unbound extends Event
@ -458,12 +458,12 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
*/ */
sealed trait ConnectionClosed extends Event with DeadLetterSuppression { sealed trait ConnectionClosed extends Event with DeadLetterSuppression {
/** /**
* `true` iff the connection has been closed in response to an [[Abort]] command. * `true` iff the connection has been closed in response to an `Abort` command.
*/ */
def isAborted: Boolean = false def isAborted: Boolean = false
/** /**
* `true` iff the connection has been fully closed in response to a * `true` iff the connection has been fully closed in response to a
* [[ConfirmedClose]] command. * `ConfirmedClose` command.
*/ */
def isConfirmed: Boolean = false def isConfirmed: Boolean = false
/** /**
@ -483,18 +483,18 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
def getErrorCause: String = null def getErrorCause: String = null
} }
/** /**
* The connection has been closed normally in response to a [[Close]] command. * The connection has been closed normally in response to a `Close` command.
*/ */
case object Closed extends ConnectionClosed case object Closed extends ConnectionClosed
/** /**
* The connection has been aborted in response to an [[Abort]] command. * The connection has been aborted in response to an `Abort` command.
*/ */
case object Aborted extends ConnectionClosed { case object Aborted extends ConnectionClosed {
override def isAborted = true override def isAborted = true
} }
/** /**
* The connection has been half-closed by us and then half-close by the peer * The connection has been half-closed by us and then half-close by the peer
* in response to a [[ConfirmedClose]] command. * in response to a `ConfirmedClose` command.
*/ */
case object ConfirmedClosed extends ConnectionClosed { case object ConfirmedClosed extends ConnectionClosed {
override def isConfirmed = true override def isConfirmed = true
@ -585,7 +585,7 @@ object TcpSO extends SoJavaFactories {
/** /**
* [[akka.io.Inet.SocketOption]] to enable or disable SO_KEEPALIVE * [[akka.io.Inet.SocketOption]] to enable or disable SO_KEEPALIVE
* *
* For more information see [[java.net.Socket.setKeepAlive]] * For more information see `java.net.Socket.setKeepAlive`
*/ */
def keepAlive(on: Boolean) = KeepAlive(on) def keepAlive(on: Boolean) = KeepAlive(on)
@ -594,7 +594,7 @@ object TcpSO extends SoJavaFactories {
* of TCP urgent data) By default, this option is disabled and TCP urgent * of TCP urgent data) By default, this option is disabled and TCP urgent
* data is silently discarded. * data is silently discarded.
* *
* For more information see [[java.net.Socket.setOOBInline]] * For more information see `java.net.Socket.setOOBInline`
*/ */
def oobInline(on: Boolean) = OOBInline(on) def oobInline(on: Boolean) = OOBInline(on)
@ -604,7 +604,7 @@ object TcpSO extends SoJavaFactories {
* *
* Please note, that TCP_NODELAY is enabled by default. * Please note, that TCP_NODELAY is enabled by default.
* *
* For more information see [[java.net.Socket.setTcpNoDelay]] * For more information see `java.net.Socket.setTcpNoDelay`
*/ */
def tcpNoDelay(on: Boolean) = TcpNoDelay(on) def tcpNoDelay(on: Boolean) = TcpNoDelay(on)
} }
@ -648,8 +648,8 @@ object TcpMessage {
* @param handler The actor which will receive all incoming connection requests * @param handler The actor which will receive all incoming connection requests
* in the form of [[Tcp.Connected]] messages. * in the form of [[Tcp.Connected]] messages.
* *
* @param localAddress The socket address to bind to; use port zero for * @param endpoint The socket address to bind to; use port zero for
* automatic assignment (i.e. an ephemeral port, see [[Bound]]) * automatic assignment (i.e. an ephemeral port, see [[Tcp.Bound]])
* *
* @param backlog This specifies the number of unaccepted connections the O/S * @param backlog This specifies the number of unaccepted connections the O/S
* kernel will hold for this port before refusing connections. * kernel will hold for this port before refusing connections.
@ -682,11 +682,11 @@ object TcpMessage {
* *
* @param keepOpenOnPeerClosed If this is set to true then the connection * @param keepOpenOnPeerClosed If this is set to true then the connection
* is not automatically closed when the peer closes its half, * is not automatically closed when the peer closes its half,
* requiring an explicit [[Tcp.Closed]] from our side when finished. * requiring an explicit `Tcp.ConnectionClosed from our side when finished.
* *
* @param useResumeWriting If this is set to true then the connection actor * @param useResumeWriting If this is set to true then the connection actor
* will refuse all further writes after issuing a [[Tcp.CommandFailed]] * will refuse all further writes after issuing a [[Tcp.CommandFailed]]
* notification until [[Tcp.ResumeWriting]] is received. This can * notification until [[Tcp]] `ResumeWriting` is received. This can
* be used to implement NACK-based write backpressure. * be used to implement NACK-based write backpressure.
*/ */
def register(handler: ActorRef, keepOpenOnPeerClosed: Boolean, useResumeWriting: Boolean): Command = def register(handler: ActorRef, keepOpenOnPeerClosed: Boolean, useResumeWriting: Boolean): Command =
@ -699,14 +699,14 @@ object TcpMessage {
/** /**
* In order to close down a listening socket, send this message to that sockets * In order to close down a listening socket, send this message to that sockets
* actor (that is the actor which previously had sent the [[Tcp.Bound]] message). The * actor (that is the actor which previously had sent the [[Tcp.Bound]] message). The
* listener socket actor will reply with a [[Tcp.Unbound]] message. * listener socket actor will reply with a `Tcp.Unbound` message.
*/ */
def unbind: Command = Unbind def unbind: Command = Unbind
/** /**
* A normal close operation will first flush pending writes and then close the * A normal close operation will first flush pending writes and then close the
* socket. The sender of this command and the registered handler for incoming * socket. The sender of this command and the registered handler for incoming
* data will both be notified once the socket is closed using a [[Tcp.Closed]] * data will both be notified once the socket is closed using a `Tcp.Closed`
* message. * message.
*/ */
def close: Command = Close def close: Command = Close
@ -715,7 +715,7 @@ object TcpMessage {
* A confirmed close operation will flush pending writes and half-close the * A confirmed close operation will flush pending writes and half-close the
* connection, waiting for the peer to close the other half. The sender of this * connection, waiting for the peer to close the other half. The sender of this
* command and the registered handler for incoming data will both be notified * command and the registered handler for incoming data will both be notified
* once the socket is closed using a [[Tcp.ConfirmedClosed]] message. * once the socket is closed using a `Tcp.ConfirmedClosed` message.
*/ */
def confirmedClose: Command = ConfirmedClose def confirmedClose: Command = ConfirmedClose
@ -724,13 +724,13 @@ object TcpMessage {
* command to the O/S kernel which should result in a TCP_RST packet being sent * command to the O/S kernel which should result in a TCP_RST packet being sent
* to the peer. The sender of this command and the registered handler for * to the peer. The sender of this command and the registered handler for
* incoming data will both be notified once the socket is closed using a * incoming data will both be notified once the socket is closed using a
* [[Tcp.Aborted]] message. * `Tcp.Aborted` message.
*/ */
def abort: Command = Abort def abort: Command = Abort
/** /**
* Each [[Tcp.WriteCommand]] can optionally request a positive acknowledgment to be sent * Each [[Tcp.WriteCommand]] can optionally request a positive acknowledgment to be sent
* to the commanding actor. If such notification is not desired the [[Tcp.WriteCommand#ack]] * to the commanding actor. If such notification is not desired the [[Tcp.SimpleWriteCommand#ack]]
* must be set to an instance of this class. The token contained within can be used * must be set to an instance of this class. The token contained within can be used
* to recognize which write failed when receiving a [[Tcp.CommandFailed]] message. * to recognize which write failed when receiving a [[Tcp.CommandFailed]] message.
*/ */
@ -744,8 +744,8 @@ object TcpMessage {
/** /**
* Write data to the TCP connection. If no ack is needed use the special * Write data to the TCP connection. If no ack is needed use the special
* `NoAck` object. The connection actor will reply with a [[Tcp.CommandFailed]] * `NoAck` object. The connection actor will reply with a [[Tcp.CommandFailed]]
* message if the write could not be enqueued. If [[Tcp.WriteCommand#wantsAck]] * message if the write could not be enqueued. If [[Tcp.SimpleWriteCommand#wantsAck]]
* returns true, the connection actor will reply with the supplied [[Tcp.WriteCommand#ack]] * returns true, the connection actor will reply with the supplied [[Tcp.SimpleWriteCommand#ack]]
* token once the write has been successfully enqueued to the O/S kernel. * token once the write has been successfully enqueued to the O/S kernel.
* <b>Note that this does not in any way guarantee that the data will be * <b>Note that this does not in any way guarantee that the data will be
* or have been sent!</b> Unfortunately there is no way to determine whether * or have been sent!</b> Unfortunately there is no way to determine whether
@ -759,9 +759,9 @@ object TcpMessage {
/** /**
* Write `count` bytes starting at `position` from file at `filePath` to the connection. * Write `count` bytes starting at `position` from file at `filePath` to the connection.
* The count must be > 0. The connection actor will reply with a [[Tcp.CommandFailed]] * The count must be &gt; 0. The connection actor will reply with a [[Tcp.CommandFailed]]
* message if the write could not be enqueued. If [[Tcp.WriteCommand#wantsAck]] * message if the write could not be enqueued. If [[Tcp.SimpleWriteCommand#wantsAck]]
* returns true, the connection actor will reply with the supplied [[Tcp.WriteCommand#ack]] * returns true, the connection actor will reply with the supplied [[Tcp.SimpleWriteCommand#ack]]
* token once the write has been successfully enqueued to the O/S kernel. * token once the write has been successfully enqueued to the O/S kernel.
* <b>Note that this does not in any way guarantee that the data will be * <b>Note that this does not in any way guarantee that the data will be
* or have been sent!</b> Unfortunately there is no way to determine whether * or have been sent!</b> Unfortunately there is no way to determine whether
@ -782,12 +782,12 @@ object TcpMessage {
/** /**
* Sending this command to the connection actor will disable reading from the TCP * Sending this command to the connection actor will disable reading from the TCP
* socket. TCP flow-control will then propagate backpressure to the sender side * socket. TCP flow-control will then propagate backpressure to the sender side
* as buffers fill up on either end. To re-enable reading send [[Tcp.ResumeReading]]. * as buffers fill up on either end. To re-enable reading send `Tcp.ResumeReading`.
*/ */
def suspendReading: Command = SuspendReading def suspendReading: Command = SuspendReading
/** /**
* This command needs to be sent to the connection actor after a [[Tcp.SuspendReading]] * This command needs to be sent to the connection actor after a `Tcp.SuspendReading`
* command in order to resume reading from the socket. * command in order to resume reading from the socket.
*/ */
def resumeReading: Command = ResumeReading def resumeReading: Command = ResumeReading

View file

@ -120,13 +120,13 @@ object Udp extends ExtensionId[UdpExt] with ExtensionIdProvider {
* Send this message to a listener actor (which sent a [[Bound]] message) to * Send this message to a listener actor (which sent a [[Bound]] message) to
* have it stop reading datagrams from the network. If the O/S kernels receive * have it stop reading datagrams from the network. If the O/S kernels receive
* buffer runs full then subsequent datagrams will be silently discarded. * buffer runs full then subsequent datagrams will be silently discarded.
* Re-enable reading from the socket using the [[ResumeReading]] command. * Re-enable reading from the socket using the `ResumeReading` command.
*/ */
case object SuspendReading extends Command case object SuspendReading extends Command
/** /**
* This message must be sent to the listener actor to re-enable reading from * This message must be sent to the listener actor to re-enable reading from
* the socket after a [[SuspendReading]] command. * the socket after a `SuspendReading` command.
*/ */
case object ResumeReading extends Command case object ResumeReading extends Command
@ -161,7 +161,7 @@ object Udp extends ExtensionId[UdpExt] with ExtensionIdProvider {
case object SimpleSenderReady extends SimpleSenderReady case object SimpleSenderReady extends SimpleSenderReady
/** /**
* This message is sent by the listener actor in response to an [[Unbind]] command * This message is sent by the listener actor in response to an `Unbind` command
* after the socket has been closed. * after the socket has been closed.
*/ */
sealed trait Unbound sealed trait Unbound
@ -312,13 +312,13 @@ object UdpMessage {
* Send this message to a listener actor (which sent a [[Udp.Bound]] message) to * Send this message to a listener actor (which sent a [[Udp.Bound]] message) to
* have it stop reading datagrams from the network. If the O/S kernels receive * have it stop reading datagrams from the network. If the O/S kernels receive
* buffer runs full then subsequent datagrams will be silently discarded. * buffer runs full then subsequent datagrams will be silently discarded.
* Re-enable reading from the socket using the [[Udp.ResumeReading]] command. * Re-enable reading from the socket using the `Udp.ResumeReading` command.
*/ */
def suspendReading: Command = SuspendReading def suspendReading: Command = SuspendReading
/** /**
* This message must be sent to the listener actor to re-enable reading from * This message must be sent to the listener actor to re-enable reading from
* the socket after a [[Udp.SuspendReading]] command. * the socket after a `Udp.SuspendReading` command.
*/ */
def resumeReading: Command = ResumeReading def resumeReading: Command = ResumeReading
} }

View file

@ -100,13 +100,13 @@ object UdpConnected extends ExtensionId[UdpConnectedExt] with ExtensionIdProvide
* Send this message to a listener actor (which sent a [[Udp.Bound]] message) to * Send this message to a listener actor (which sent a [[Udp.Bound]] message) to
* have it stop reading datagrams from the network. If the O/S kernels receive * have it stop reading datagrams from the network. If the O/S kernels receive
* buffer runs full then subsequent datagrams will be silently discarded. * buffer runs full then subsequent datagrams will be silently discarded.
* Re-enable reading from the socket using the [[ResumeReading]] command. * Re-enable reading from the socket using the `ResumeReading` command.
*/ */
case object SuspendReading extends Command case object SuspendReading extends Command
/** /**
* This message must be sent to the listener actor to re-enable reading from * This message must be sent to the listener actor to re-enable reading from
* the socket after a [[SuspendReading]] command. * the socket after a `SuspendReading` command.
*/ */
case object ResumeReading extends Command case object ResumeReading extends Command
@ -137,7 +137,7 @@ object UdpConnected extends ExtensionId[UdpConnectedExt] with ExtensionIdProvide
/** /**
* This message is sent by the connection actor to the actor which sent the * This message is sent by the connection actor to the actor which sent the
* [[Disconnect]] message when the UDP socket has been closed. * `Disconnect` message when the UDP socket has been closed.
*/ */
sealed trait Disconnected extends Event sealed trait Disconnected extends Event
case object Disconnected extends Disconnected case object Disconnected extends Disconnected
@ -231,13 +231,13 @@ object UdpConnectedMessage {
* Send this message to a listener actor (which sent a [[Udp.Bound]] message) to * Send this message to a listener actor (which sent a [[Udp.Bound]] message) to
* have it stop reading datagrams from the network. If the O/S kernels receive * have it stop reading datagrams from the network. If the O/S kernels receive
* buffer runs full then subsequent datagrams will be silently discarded. * buffer runs full then subsequent datagrams will be silently discarded.
* Re-enable reading from the socket using the [[UdpConnected.ResumeReading]] command. * Re-enable reading from the socket using the `UdpConnected.ResumeReading` command.
*/ */
def suspendReading: Command = SuspendReading def suspendReading: Command = SuspendReading
/** /**
* This message must be sent to the listener actor to re-enable reading from * This message must be sent to the listener actor to re-enable reading from
* the socket after a [[UdpConnected.SuspendReading]] command. * the socket after a `UdpConnected.SuspendReading` command.
*/ */
def resumeReading: Command = ResumeReading def resumeReading: Command = ResumeReading

View file

@ -108,9 +108,8 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
* Wraps invocations of asynchronous calls that need to be protected * Wraps invocations of asynchronous calls that need to be protected
* *
* @param body Call needing protected * @param body Call needing protected
* @tparam T return type from call
* @return [[scala.concurrent.Future]] containing the call result or a * @return [[scala.concurrent.Future]] containing the call result or a
* [[scala.concurrent.TimeoutException]] if the call timed out * `scala.concurrent.TimeoutException` if the call timed out
* *
*/ */
def withCircuitBreaker[T](body: Future[T]): Future[T] = currentState.invoke(body) def withCircuitBreaker[T](body: Future[T]): Future[T] = currentState.invoke(body)
@ -119,9 +118,8 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
* Java API for [[#withCircuitBreaker]] * Java API for [[#withCircuitBreaker]]
* *
* @param body Call needing protected * @param body Call needing protected
* @tparam T return type from call
* @return [[scala.concurrent.Future]] containing the call result or a * @return [[scala.concurrent.Future]] containing the call result or a
* [[scala.concurrent.TimeoutException]] if the call timed out * `scala.concurrent.TimeoutException` if the call timed out
*/ */
def callWithCircuitBreaker[T](body: Callable[Future[T]]): Future[T] = withCircuitBreaker(body.call) def callWithCircuitBreaker[T](body: Callable[Future[T]]): Future[T] = withCircuitBreaker(body.call)
@ -129,12 +127,12 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
* Wraps invocations of synchronous calls that need to be protected * Wraps invocations of synchronous calls that need to be protected
* *
* Calls are run in caller's thread. Because of the synchronous nature of * Calls are run in caller's thread. Because of the synchronous nature of
* this call the [[scala.concurrent.TimeoutException]] will only be thrown * this call the `scala.concurrent.TimeoutException` will only be thrown
* after the body has completed. * after the body has completed.
* *
* Throws java.util.concurrent.TimeoutException if the call timed out.
*
* @param body Call needing protected * @param body Call needing protected
* @tparam T return type from call
* @throws scala.concurrent.TimeoutException if the call timed out
* @return The result of the call * @return The result of the call
*/ */
def withSyncCircuitBreaker[T](body: T): T = def withSyncCircuitBreaker[T](body: T): T =
@ -143,11 +141,9 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
callTimeout) callTimeout)
/** /**
* Java API for [[#withSyncCircuitBreaker]] * Java API for [[#withSyncCircuitBreaker]]. Throws [[java.util.concurrent.TimeoutException]] if the call timed out.
* *
* @param body Call needing protected * @param body Call needing protected
* @tparam T return type from call
* @throws scala.concurrent.TimeoutException if the call timed out
* @return The result of the call * @return The result of the call
*/ */
def callWithSyncCircuitBreaker[T](body: Callable[T]): T = withSyncCircuitBreaker(body.call) def callWithSyncCircuitBreaker[T](body: Callable[T]): T = withSyncCircuitBreaker(body.call)
@ -223,11 +219,10 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
private[akka] def currentFailureCount: Int = Closed.get private[akka] def currentFailureCount: Int = Closed.get
/** /**
* Implements consistent transition between states * Implements consistent transition between states. Throws IllegalStateException if an invalid transition is attempted.
* *
* @param fromState State being transitioning from * @param fromState State being transitioning from
* @param toState State being transitioning from * @param toState State being transitioning from
* @throws IllegalStateException if an invalid transition is attempted
*/ */
private def transition(fromState: State, toState: State): Unit = private def transition(fromState: State, toState: State): Unit =
if (swapState(fromState, toState)) if (swapState(fromState, toState))
@ -254,6 +249,8 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
*/ */
private def attemptReset(): Unit = transition(Open, HalfOpen) private def attemptReset(): Unit = transition(Open, HalfOpen)
private val timeoutFuture = Future.failed(new TimeoutException("Circuit Breaker Timed out.") with NoStackTrace)
/** /**
* Internal state abstraction * Internal state abstraction
*/ */
@ -264,7 +261,6 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
* Add a listener function which is invoked on state entry * Add a listener function which is invoked on state entry
* *
* @param listener listener implementation * @param listener listener implementation
* @tparam T return type of listener, not used - but supplied for type inference purposes
*/ */
def addListener(listener: Runnable): Unit = listeners add listener def addListener(listener: Runnable): Unit = listeners add listener
@ -295,7 +291,6 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
* call timeout is counted as a failed call, otherwise a successful call * call timeout is counted as a failed call, otherwise a successful call
* *
* @param body Implementation of the call * @param body Implementation of the call
* @tparam T Return type of the call's implementation
* @return Future containing the result of the call * @return Future containing the result of the call
*/ */
def callThrough[T](body: Future[T]): Future[T] = { def callThrough[T](body: Future[T]): Future[T] = {
@ -308,14 +303,13 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
val p = Promise[T]() val p = Promise[T]()
implicit val ec = sameThreadExecutionContext implicit val ec = sameThreadExecutionContext
p.future.onComplete({ p.future.onComplete {
case s: Success[_] callSucceeds() case s: Success[_] callSucceeds()
case _ callFails() case _ callFails()
}) }
val timeout = scheduler.scheduleOnce(callTimeout) { val timeout = scheduler.scheduleOnce(callTimeout) {
p.tryCompleteWith( p tryCompleteWith timeoutFuture
Future.failed(new TimeoutException("Circuit Breaker Timed out.")))
} }
materialize(body).onComplete { result materialize(body).onComplete { result
@ -330,7 +324,6 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
* Abstract entry point for all states * Abstract entry point for all states
* *
* @param body Implementation of the call that needs protected * @param body Implementation of the call that needs protected
* @tparam T Return type of protected call
* @return Future containing result of protected call * @return Future containing result of protected call
*/ */
def invoke[T](body: Future[T]): Future[T] def invoke[T](body: Future[T]): Future[T]
@ -373,7 +366,6 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
* Implementation of invoke, which simply attempts the call * Implementation of invoke, which simply attempts the call
* *
* @param body Implementation of the call that needs protected * @param body Implementation of the call that needs protected
* @tparam T Return type of protected call
* @return Future containing result of protected call * @return Future containing result of protected call
*/ */
override def invoke[T](body: Future[T]): Future[T] = callThrough(body) override def invoke[T](body: Future[T]): Future[T] = callThrough(body)
@ -418,7 +410,6 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
* If the call succeeds the breaker closes. * If the call succeeds the breaker closes.
* *
* @param body Implementation of the call that needs protected * @param body Implementation of the call that needs protected
* @tparam T Return type of protected call
* @return Future containing result of protected call * @return Future containing result of protected call
*/ */
override def invoke[T](body: Future[T]): Future[T] = override def invoke[T](body: Future[T]): Future[T] =
@ -462,7 +453,6 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
* Fail-fast on any invocation * Fail-fast on any invocation
* *
* @param body Implementation of the call that needs protected * @param body Implementation of the call that needs protected
* @tparam T Return type of protected call
* @return Future containing result of protected call * @return Future containing result of protected call
*/ */
override def invoke[T](body: Future[T]): Future[T] = override def invoke[T](body: Future[T]): Future[T] =

View file

@ -1,8 +1,7 @@
package akka.pattern
/** /**
* Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com> * Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com>
*/ */
package akka.pattern
import scala.concurrent.duration.Duration import scala.concurrent.duration.Duration
import scala.concurrent.{ ExecutionContext, Promise, Future } import scala.concurrent.{ ExecutionContext, Promise, Future }

View file

@ -100,7 +100,7 @@ case class DefaultResizer(
* <li> 0: number of routees currently processing a message.</li> * <li> 0: number of routees currently processing a message.</li>
* <li> 1: number of routees currently processing a message has * <li> 1: number of routees currently processing a message has
* some messages in mailbox.</li> * some messages in mailbox.</li>
* <li> > 1: number of routees with at least the configured `pressureThreshold` * <li> &gt; 1: number of routees with at least the configured `pressureThreshold`
* messages in their mailbox. Note that estimating mailbox size of * messages in their mailbox. Note that estimating mailbox size of
* default UnboundedMailbox is O(N) operation.</li> * default UnboundedMailbox is O(N) operation.</li>
* </ul> * </ul>
@ -180,7 +180,7 @@ case class DefaultResizer(
* <li> 0: number of routees currently processing a message.</li> * <li> 0: number of routees currently processing a message.</li>
* <li> 1: number of routees currently processing a message has * <li> 1: number of routees currently processing a message has
* some messages in mailbox.</li> * some messages in mailbox.</li>
* <li> > 1: number of routees with at least the configured `pressureThreshold` * <li> &gt; 1: number of routees with at least the configured `pressureThreshold`
* messages in their mailbox. Note that estimating mailbox size of * messages in their mailbox. Note that estimating mailbox size of
* default UnboundedMailbox is O(N) operation.</li> * default UnboundedMailbox is O(N) operation.</li>
* </ul> * </ul>

View file

@ -115,8 +115,8 @@ class Serialization(val system: ExtendedActorSystem) extends Extension {
/** /**
* Returns the Serializer configured for the given object, returns the NullSerializer if it's null. * Returns the Serializer configured for the given object, returns the NullSerializer if it's null.
* *
* @throws akka.ConfigurationException if no `serialization-bindings` is configured for the * Throws akka.ConfigurationException if no `serialization-bindings` is configured for the
* class of the object * class of the object.
*/ */
def findSerializerFor(o: AnyRef): Serializer = o match { def findSerializerFor(o: AnyRef): Serializer = o match {
case null NullSerializer case null NullSerializer
@ -130,7 +130,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension {
* ambiguity it is primarily using the most specific configured class, * ambiguity it is primarily using the most specific configured class,
* and secondly the entry configured first. * and secondly the entry configured first.
* *
* @throws java.io.NotSerializableException if no `serialization-bindings` is configured for the class * Throws java.io.NotSerializableException if no `serialization-bindings` is configured for the class.
*/ */
def serializerFor(clazz: Class[_]): Serializer = def serializerFor(clazz: Class[_]): Serializer =
serializerMap.get(clazz) match { serializerMap.get(clazz) match {

View file

@ -139,7 +139,6 @@ object JavaSerializer {
* *
* @param value - the current value under the call to callable.call() * @param value - the current value under the call to callable.call()
* @param callable - the operation to be performed * @param callable - the operation to be performed
* @tparam S - the return type
* @return the result of callable.call() * @return the result of callable.call()
*/ */
def withValue[S](value: ExtendedActorSystem, callable: Callable[S]): S = super.withValue[S](value)(callable.call) def withValue[S](value: ExtendedActorSystem, callable: Callable[S]): S = super.withValue[S](value)(callable.call)

View file

@ -11,9 +11,8 @@ import annotation.tailrec
/** /**
* BoundedBlockingQueue wraps any Queue and turns the result into a BlockingQueue with a limited capacity. * BoundedBlockingQueue wraps any Queue and turns the result into a BlockingQueue with a limited capacity.
* @param maxCapacity - the maximum capacity of this Queue, needs to be > 0 * @param maxCapacity - the maximum capacity of this Queue, needs to be &gt; 0
* @param backing - the backing Queue * @param backing - the backing Queue
* @tparam E - The type of the contents of this Queue
*/ */
class BoundedBlockingQueue[E <: AnyRef]( class BoundedBlockingQueue[E <: AnyRef](
val maxCapacity: Int, private val backing: Queue[E]) extends AbstractQueue[E] with BlockingQueue[E] { val maxCapacity: Int, private val backing: Queue[E]) extends AbstractQueue[E] with BlockingQueue[E] {

View file

@ -543,19 +543,19 @@ abstract class ByteIterator extends BufferedIterator[Byte] {
/** /**
* Get a specific number of Bytes from this iterator. In contrast to * Get a specific number of Bytes from this iterator. In contrast to
* copyToArray, this method will fail if this.len < xs.length. * copyToArray, this method will fail if this.len &lt; xs.length.
*/ */
def getBytes(xs: Array[Byte]): this.type = getBytes(xs, 0, xs.length) def getBytes(xs: Array[Byte]): this.type = getBytes(xs, 0, xs.length)
/** /**
* Get a specific number of Bytes from this iterator. In contrast to * Get a specific number of Bytes from this iterator. In contrast to
* copyToArray, this method will fail if length < n or if (xs.length - offset) < n. * copyToArray, this method will fail if length &lt; n or if (xs.length - offset) &lt; n.
*/ */
def getBytes(xs: Array[Byte], offset: Int, n: Int): this.type def getBytes(xs: Array[Byte], offset: Int, n: Int): this.type
/** /**
* Get a specific number of Bytes from this iterator. In contrast to * Get a specific number of Bytes from this iterator. In contrast to
* copyToArray, this method will fail if this.len < n. * copyToArray, this method will fail if this.len &lt; n.
*/ */
def getBytes(n: Int): Array[Byte] = { def getBytes(n: Int): Array[Byte] = {
val bytes = new Array[Byte](n) val bytes = new Array[Byte](n)
@ -565,7 +565,7 @@ abstract class ByteIterator extends BufferedIterator[Byte] {
/** /**
* Get a ByteString with specific number of Bytes from this iterator. In contrast to * Get a ByteString with specific number of Bytes from this iterator. In contrast to
* copyToArray, this method will fail if this.len < n. * copyToArray, this method will fail if this.len &lt; n.
*/ */
def getByteString(n: Int): ByteString = { def getByteString(n: Int): ByteString = {
val bs = clone.take(n).toByteString val bs = clone.take(n).toByteString

View file

@ -13,7 +13,7 @@ import scala.collection.mutable
/** /**
* An implementation of a ConcurrentMultiMap * An implementation of a ConcurrentMultiMap
* Adds/remove is serialized over the specified key * Adds/remove is serialized over the specified key
* Reads are fully concurrent <-- el-cheapo * Reads are fully concurrent &lt;-- el-cheapo
*/ */
class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) {
@ -188,6 +188,6 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) {
/** /**
* An implementation of a ConcurrentMultiMap * An implementation of a ConcurrentMultiMap
* Adds/remove is serialized over the specified key * Adds/remove is serialized over the specified key
* Reads are fully concurrent <-- el-cheapo * Reads are fully concurrent &lt;-- el-cheapo
*/ */
class ConcurrentMultiMap[K, V](mapSize: Int, valueComparator: Comparator[V]) extends Index[K, V](mapSize, valueComparator) class ConcurrentMultiMap[K, V](mapSize: Int, valueComparator: Comparator[V]) extends Index[K, V](mapSize, valueComparator)

View file

@ -39,7 +39,6 @@ private[akka] object Reflect {
/** /**
* INTERNAL API * INTERNAL API
* @param clazz the class which to instantiate an instance of * @param clazz the class which to instantiate an instance of
* @tparam T the type of the instance that will be created
* @return a new instance from the default constructor of the given class * @return a new instance from the default constructor of the given class
*/ */
private[akka] def instantiate[T](clazz: Class[T]): T = try clazz.newInstance catch { private[akka] def instantiate[T](clazz: Class[T]): T = try clazz.newInstance catch {
@ -113,7 +112,6 @@ private[akka] object Reflect {
/** /**
* INTERNAL API * INTERNAL API
* @param clazz the class which to instantiate an instance of * @param clazz the class which to instantiate an instance of
* @tparam T the type of the instance that will be created
* @return a function which when applied will create a new instance from the default constructor of the given class * @return a function which when applied will create a new instance from the default constructor of the given class
*/ */
private[akka] def instantiator[T](clazz: Class[T]): () T = () instantiate(clazz) private[akka] def instantiator[T](clazz: Class[T]): () T = () instantiate(clazz)

View file

@ -10,7 +10,6 @@ import java.util.{ AbstractQueue, Comparator, Iterator, PriorityQueue }
/** /**
* PriorityQueueStabilizer wraps a priority queue so that it respects FIFO for elements of equal priority. * PriorityQueueStabilizer wraps a priority queue so that it respects FIFO for elements of equal priority.
* @tparam E - The type of the elements of this Queue
*/ */
trait PriorityQueueStabilizer[E <: AnyRef] extends AbstractQueue[E] { trait PriorityQueueStabilizer[E <: AnyRef] extends AbstractQueue[E] {
val backingQueue: AbstractQueue[PriorityQueueStabilizer.WrappedElement[E]] val backingQueue: AbstractQueue[PriorityQueueStabilizer.WrappedElement[E]]
@ -58,9 +57,8 @@ object PriorityQueueStabilizer {
/** /**
* StablePriorityQueue is a priority queue that preserves order for elements of equal priority. * StablePriorityQueue is a priority queue that preserves order for elements of equal priority.
* @param capacity - the initial capacity of this Queue, needs to be > 0. * @param capacity - the initial capacity of this Queue, needs to be &gt; 0.
* @param cmp - Comparator for comparing Queue elements * @param cmp - Comparator for comparing Queue elements
* @tparam E - The type of the elements of this Queue
*/ */
class StablePriorityQueue[E <: AnyRef](capacity: Int, cmp: Comparator[E]) extends PriorityQueueStabilizer[E] { class StablePriorityQueue[E <: AnyRef](capacity: Int, cmp: Comparator[E]) extends PriorityQueueStabilizer[E] {
val backingQueue = new PriorityQueue[PriorityQueueStabilizer.WrappedElement[E]]( val backingQueue = new PriorityQueue[PriorityQueueStabilizer.WrappedElement[E]](
@ -70,9 +68,8 @@ class StablePriorityQueue[E <: AnyRef](capacity: Int, cmp: Comparator[E]) extend
/** /**
* StablePriorityBlockingQueue is a blocking priority queue that preserves order for elements of equal priority. * StablePriorityBlockingQueue is a blocking priority queue that preserves order for elements of equal priority.
* @param capacity - the initial capacity of this Queue, needs to be > 0. * @param capacity - the initial capacity of this Queue, needs to be &gt; 0.
* @param cmp - Comparator for comparing Queue elements * @param cmp - Comparator for comparing Queue elements
* @tparam E - The type of the elements of this Queue
*/ */
class StablePriorityBlockingQueue[E <: AnyRef](capacity: Int, cmp: Comparator[E]) extends PriorityQueueStabilizer[E] { class StablePriorityBlockingQueue[E <: AnyRef](capacity: Int, cmp: Comparator[E]) extends PriorityQueueStabilizer[E] {
val backingQueue = new PriorityBlockingQueue[PriorityQueueStabilizer.WrappedElement[E]]( val backingQueue = new PriorityBlockingQueue[PriorityQueueStabilizer.WrappedElement[E]](

View file

@ -119,13 +119,11 @@ object Agent {
* // use result ... * // use result ...
* *
* }}} * }}}
* <br/>
* *
* Agent is also monadic, which means that you can compose operations using * Agent is also monadic, which means that you can compose operations using
* for-comprehensions. In monadic usage the original agents are not touched * for-comprehensions. In monadic usage the original agents are not touched
* but new agents are created. So the old values (agents) are still available * but new agents are created. So the old values (agents) are still available
* as-is. They are so-called 'persistent'. * as-is. They are so-called 'persistent'.
* <br/><br/>
* *
* Example of monadic usage: * Example of monadic usage:
* {{{ * {{{

View file

@ -80,9 +80,6 @@ class CamelSettings private[camel] (config: Config, dynamicAccess: DynamicAccess
*/ */
final val AutoAck: Boolean = config.getBoolean("akka.camel.consumer.auto-ack") final val AutoAck: Boolean = config.getBoolean("akka.camel.consumer.auto-ack")
/**
*
*/
final val JmxStatistics: Boolean = config.getBoolean("akka.camel.jmx") final val JmxStatistics: Boolean = config.getBoolean("akka.camel.jmx")
/** /**
@ -127,7 +124,6 @@ class CamelSettings private[camel] (config: Config, dynamicAccess: DynamicAccess
* *
* @see akka.actor.ExtensionId * @see akka.actor.ExtensionId
* @see akka.actor.ExtensionIdProvider * @see akka.actor.ExtensionIdProvider
*
*/ */
object CamelExtension extends ExtensionId[Camel] with ExtensionIdProvider { object CamelExtension extends ExtensionId[Camel] with ExtensionIdProvider {

View file

@ -49,7 +49,6 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) {
* in a [[scala.util.Success]]. If an exception occurs during the conversion to the type <code>T</code> or when the header cannot be found, * in a [[scala.util.Success]]. If an exception occurs during the conversion to the type <code>T</code> or when the header cannot be found,
* the exception is returned in a [[scala.util.Failure]]. * the exception is returned in a [[scala.util.Failure]].
* *
* <p>
* The CamelContext is accessible in a [[akka.camel.javaapi.UntypedConsumerActor]] and [[akka.camel.javaapi.UntypedProducerActor]] * The CamelContext is accessible in a [[akka.camel.javaapi.UntypedConsumerActor]] and [[akka.camel.javaapi.UntypedProducerActor]]
* using the `getCamelContext` method, and is available on the [[akka.camel.CamelExtension]]. * using the `getCamelContext` method, and is available on the [[akka.camel.CamelExtension]].
* *
@ -60,7 +59,7 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) {
/** /**
* Java API: Returns the header by given <code>name</code> parameter. The header is converted to type <code>T</code> as defined by the <code>clazz</code> parameter. * Java API: Returns the header by given <code>name</code> parameter. The header is converted to type <code>T</code> as defined by the <code>clazz</code> parameter.
* An exception is thrown when the conversion to the type <code>T</code> fails or when the header cannot be found. * An exception is thrown when the conversion to the type <code>T</code> fails or when the header cannot be found.
* <p> *
* The CamelContext is accessible in a [[akka.camel.javaapi.UntypedConsumerActor]] and [[akka.camel.javaapi.UntypedProducerActor]] * The CamelContext is accessible in a [[akka.camel.javaapi.UntypedConsumerActor]] and [[akka.camel.javaapi.UntypedProducerActor]]
* using the `getCamelContext` method, and is available on the [[akka.camel.CamelExtension]]. * using the `getCamelContext` method, and is available on the [[akka.camel.CamelExtension]].
*/ */
@ -133,7 +132,6 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) {
/** /**
* Companion object of CamelMessage class. * Companion object of CamelMessage class.
*
*/ */
object CamelMessage { object CamelMessage {
@ -176,7 +174,6 @@ object CamelMessage {
/** /**
* Positive acknowledgement message (used for application-acknowledged message receipts). * Positive acknowledgement message (used for application-acknowledged message receipts).
* When `autoAck` is set to false in the [[akka.camel.Consumer]], you can send an `Ack` to the sender of the CamelMessage. * When `autoAck` is set to false in the [[akka.camel.Consumer]], you can send an `Ack` to the sender of the CamelMessage.
*
*/ */
case object Ack { case object Ack {
/** Java API to get the Ack singleton */ /** Java API to get the Ack singleton */

View file

@ -14,8 +14,6 @@ import scala.language.existentials
/** /**
* Mixed in by Actor implementations that consume message from Camel endpoints. * Mixed in by Actor implementations that consume message from Camel endpoints.
*
*
*/ */
trait Consumer extends Actor with CamelSupport { trait Consumer extends Actor with CamelSupport {
import Consumer._ import Consumer._

View file

@ -54,7 +54,7 @@ trait ProducerSupport extends Actor with CamelSupport {
* actually sent it is pre-processed by calling <code>transformOutgoingMessage</code>. If <code>oneway</code> * actually sent it is pre-processed by calling <code>transformOutgoingMessage</code>. If <code>oneway</code>
* is <code>true</code>, an in-only message exchange is initiated, otherwise an in-out message exchange. * is <code>true</code>, an in-only message exchange is initiated, otherwise an in-out message exchange.
* *
* @see Producer#produce(Any, ExchangePattern) * @see Producer#produce
*/ */
protected def produce: Receive = { protected def produce: Receive = {
case CamelProducerObjects(endpoint, processor) case CamelProducerObjects(endpoint, processor)
@ -119,7 +119,7 @@ trait ProducerSupport extends Actor with CamelSupport {
* response is received asynchronously, the <code>receiveAfterProduce</code> is called * response is received asynchronously, the <code>receiveAfterProduce</code> is called
* asynchronously. The original sender is preserved. * asynchronously. The original sender is preserved.
* *
* @see CamelMessage#canonicalize(Any) * @see CamelMessage#canonicalize
* @param endpoint the endpoint * @param endpoint the endpoint
* @param processor the processor * @param processor the processor
* @param msg message to produce * @param msg message to produce

View file

@ -1,8 +1,7 @@
package akka.camel.internal
/** /**
* Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com> * Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com>
*/ */
package akka.camel.internal
import akka.actor.ActorRef import akka.actor.ActorRef

View file

@ -9,7 +9,7 @@ import akka.camel.{ FailureResult, AkkaCamelException, CamelMessage }
/** /**
* INTERNAL API * INTERNAL API
* Adapter for converting an [[org.apache.camel.Exchange]] to and from [[akka.camel.CamelMessage]] and [[akka.camel.Failure]] objects. * Adapter for converting an [[org.apache.camel.Exchange]] to and from [[akka.camel.CamelMessage]] and [[akka.camel.FailureResult]] objects.
* The org.apache.camel.Message is mutable and not suitable to be used directly as messages between Actors. * The org.apache.camel.Message is mutable and not suitable to be used directly as messages between Actors.
* This adapter is used to convert to immutable messages to be used with Actors, and convert the immutable messages back * This adapter is used to convert to immutable messages to be used with Actors, and convert the immutable messages back
* to org.apache.camel.Message when using Camel. * to org.apache.camel.Message when using Camel.
@ -62,8 +62,6 @@ private[camel] class CamelExchangeAdapter(val exchange: Exchange) {
* on the AkkaCamelException. * on the AkkaCamelException.
* *
* If the exchange is out-capable then the headers of Exchange.getOut are used, otherwise the headers of Exchange.getIn are used. * If the exchange is out-capable then the headers of Exchange.getOut are used, otherwise the headers of Exchange.getIn are used.
*
* @see AkkaCamelException
*/ */
def toAkkaCamelException: AkkaCamelException = toAkkaCamelException(Map.empty) def toAkkaCamelException: AkkaCamelException = toAkkaCamelException(Map.empty)
@ -78,8 +76,6 @@ private[camel] class CamelExchangeAdapter(val exchange: Exchange) {
* *
* @param headers additional headers to set on the exception in addition to those * @param headers additional headers to set on the exception in addition to those
* in the exchange. * in the exchange.
*
* @see AkkaCamelException
*/ */
def toAkkaCamelException(headers: Map[String, Any]): AkkaCamelException = { def toAkkaCamelException(headers: Map[String, Any]): AkkaCamelException = {
import scala.collection.JavaConversions._ import scala.collection.JavaConversions._
@ -88,8 +84,6 @@ private[camel] class CamelExchangeAdapter(val exchange: Exchange) {
/** /**
* Creates an immutable Failure object from the adapted Exchange so it can be used internally between Actors. * Creates an immutable Failure object from the adapted Exchange so it can be used internally between Actors.
*
* @see Failure
*/ */
def toFailureMessage: FailureResult = toFailureResult(Map.empty) def toFailureMessage: FailureResult = toFailureResult(Map.empty)
@ -98,8 +92,6 @@ private[camel] class CamelExchangeAdapter(val exchange: Exchange) {
* *
* @param headers additional headers to set on the created CamelMessage in addition to those * @param headers additional headers to set on the created CamelMessage in addition to those
* in the Camel message. * in the Camel message.
*
* @see Failure
*/ */
def toFailureResult(headers: Map[String, Any]): FailureResult = { def toFailureResult(headers: Map[String, Any]): FailureResult = {
import scala.collection.JavaConversions._ import scala.collection.JavaConversions._

View file

@ -47,7 +47,7 @@ private[camel] class DefaultCamel(val system: ExtendedActorSystem) extends Camel
/** /**
* Starts camel and underlying camel context and template. * Starts camel and underlying camel context and template.
* Only the creator of Camel should start and stop it. * Only the creator of Camel should start and stop it.
* @see akka.camel.DefaultCamel#shutdown() * @see akka.camel.internal.DefaultCamel#shutdown
*/ */
def start(): this.type = { def start(): this.type = {
context.start() context.start()
@ -61,7 +61,7 @@ private[camel] class DefaultCamel(val system: ExtendedActorSystem) extends Camel
* Only the creator of Camel should shut it down. * Only the creator of Camel should shut it down.
* There is no need to stop Camel instance, which you get from the CamelExtension, as its lifecycle is bound to the actor system. * There is no need to stop Camel instance, which you get from the CamelExtension, as its lifecycle is bound to the actor system.
* *
* @see akka.camel.DefaultCamel#start() * @see akka.camel.internal.DefaultCamel#start
*/ */
def shutdown(): Unit = { def shutdown(): Unit = {
try context.stop() finally { try context.stop() finally {

View file

@ -30,8 +30,6 @@ import scala.util.{ Failure, Success, Try }
* `ActorComponent` to the [[org.apache.camel.CamelContext]] under the 'actor' component name. * `ActorComponent` to the [[org.apache.camel.CamelContext]] under the 'actor' component name.
* Messages are sent to [[akka.camel.Consumer]] actors through a [[akka.camel.internal.component.ActorEndpoint]] that * Messages are sent to [[akka.camel.Consumer]] actors through a [[akka.camel.internal.component.ActorEndpoint]] that
* this component provides. * this component provides.
*
*
*/ */
private[camel] class ActorComponent(camel: Camel, system: ActorSystem) extends DefaultComponent { private[camel] class ActorComponent(camel: Camel, system: ActorSystem) extends DefaultComponent {
/** /**
@ -51,8 +49,6 @@ private[camel] class ActorComponent(camel: Camel, system: ActorSystem) extends D
* Actors are referenced using actor endpoint URIs of the following format: * Actors are referenced using actor endpoint URIs of the following format:
* <code>[actorPath]?[options]%s</code>, * <code>[actorPath]?[options]%s</code>,
* where <code>[actorPath]</code> refers to the actor path to the actor. * where <code>[actorPath]</code> refers to the actor path to the actor.
*
*
*/ */
private[camel] class ActorEndpoint(uri: String, private[camel] class ActorEndpoint(uri: String,
comp: ActorComponent, comp: ActorComponent,
@ -87,7 +83,6 @@ private[camel] class ActorEndpoint(uri: String,
/** /**
* INTERNAL API * INTERNAL API
* Configures the `ActorEndpoint`. This needs to be a `bean` for Camel purposes. * Configures the `ActorEndpoint`. This needs to be a `bean` for Camel purposes.
*
*/ */
private[camel] trait ActorEndpointConfig { private[camel] trait ActorEndpointConfig {
def path: ActorEndpointPath def path: ActorEndpointPath
@ -99,12 +94,10 @@ private[camel] trait ActorEndpointConfig {
} }
/** /**
* Sends the in-message of an exchange to an untyped actor, identified by an [[akka.camel.internal.component.ActorEndPoint]] * Sends the in-message of an exchange to an untyped actor, identified by an [[akka.camel.internal.component.ActorEndpoint]]
*
* @see akka.camel.component.ActorComponent
* @see akka.camel.component.ActorEndpoint
*
* *
* @see akka.camel.internal.component.ActorComponent
* @see akka.camel.internal.component.ActorEndpoint
*/ */
private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) extends DefaultProducer(endpoint) with AsyncProcessor { private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) extends DefaultProducer(endpoint) with AsyncProcessor {
/** /**

View file

@ -11,8 +11,6 @@ import org.apache.camel.impl.DefaultCamelContext
/** /**
* Subclass this abstract class to create an untyped producer actor. This class is meant to be used from Java. * Subclass this abstract class to create an untyped producer actor. This class is meant to be used from Java.
*
*
*/ */
abstract class UntypedProducerActor extends UntypedActor with ProducerSupport { abstract class UntypedProducerActor extends UntypedActor with ProducerSupport {
/** /**

View file

@ -202,7 +202,7 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging {
} }
/** /**
* Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus.Up]]. * Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus]] `Up`.
*/ */
def receiveState(state: CurrentClusterState): Unit = def receiveState(state: CurrentClusterState): Unit =
nodes = (state.members -- state.unreachable) collect { case m if m.status == MemberStatus.Up m.address } nodes = (state.members -- state.unreachable) collect { case m if m.status == MemberStatus.Up m.address }

View file

@ -10,7 +10,7 @@ import akka.util.Helpers.ConfigOps
/** /**
* Default [[ClusterMetricsSupervisor]] strategy: * Default [[ClusterMetricsSupervisor]] strategy:
* A configurable [[OneForOneStrategy]] with restart-on-throwable decider. * A configurable [[akka.actor.OneForOneStrategy]] with restart-on-throwable decider.
*/ */
class ClusterMetricsStrategy(config: Config) extends OneForOneStrategy( class ClusterMetricsStrategy(config: Config) extends OneForOneStrategy(
maxNrOfRetries = config.getInt("maxNrOfRetries"), maxNrOfRetries = config.getInt("maxNrOfRetries"),
@ -25,7 +25,7 @@ object ClusterMetricsStrategy {
import akka.actor.SupervisorStrategy._ import akka.actor.SupervisorStrategy._
/** /**
* [[SupervisorStrategy.Decider]] which allows to survive intermittent Sigar native method calls failures. * [[akka.actor.SupervisorStrategy]] `Decider` which allows to survive intermittent Sigar native method calls failures.
*/ */
val metricsDecider: SupervisorStrategy.Decider = { val metricsDecider: SupervisorStrategy.Decider = {
case _: ActorInitializationException Stop case _: ActorInitializationException Stop

View file

@ -33,7 +33,7 @@ final case class EWMA(value: Double, alpha: Double) {
* Calculates the exponentially weighted moving average for a given monitored data set. * Calculates the exponentially weighted moving average for a given monitored data set.
* *
* @param xn the new data point * @param xn the new data point
* @return a new [[akka.cluster.EWMA]] with the updated value * @return a new EWMA with the updated value
*/ */
def :+(xn: Double): EWMA = { def :+(xn: Double): EWMA = {
val newValue = (alpha * xn) + (1 - alpha) * value val newValue = (alpha * xn) + (1 - alpha) * value

View file

@ -109,7 +109,7 @@ object StandardMetrics {
final val SystemLoadAverage = "system-load-average" final val SystemLoadAverage = "system-load-average"
final val Processors = "processors" final val Processors = "processors"
// In latest Linux kernels: CpuCombined + CpuStolen + CpuIdle = 1.0 or 100%. // In latest Linux kernels: CpuCombined + CpuStolen + CpuIdle = 1.0 or 100%.
/** Sum of User + Sys + Nice + Wait. See [[org.hyperic.sigar.CpuPerc]] */ /** Sum of User + Sys + Nice + Wait. See `org.hyperic.sigar.CpuPerc` */
final val CpuCombined = "cpu-combined" final val CpuCombined = "cpu-combined"
/** The amount of CPU 'stolen' from this virtual machine by the hypervisor for other tasks (such as running another virtual machine). */ /** The amount of CPU 'stolen' from this virtual machine by the hypervisor for other tasks (such as running another virtual machine). */
final val CpuStolen = "cpu-stolen" final val CpuStolen = "cpu-stolen"
@ -146,9 +146,9 @@ object StandardMetrics {
} }
/** /**
* The amount of used and committed memory will always be <= max if max is defined. * The amount of used and committed memory will always be &lt;= max if max is defined.
* A memory allocation may fail if it attempts to increase the used memory such that used > committed * A memory allocation may fail if it attempts to increase the used memory such that used &gt; committed
* even if used <= max is true (e.g. when the system virtual memory is low). * even if used &lt;= max is true (e.g. when the system virtual memory is low).
* *
* @param address [[akka.actor.Address]] of the node the metrics are gathered at * @param address [[akka.actor.Address]] of the node the metrics are gathered at
* @param timestamp the time of sampling, in milliseconds since midnight, January 1, 1970 UTC * @param timestamp the time of sampling, in milliseconds since midnight, January 1, 1970 UTC
@ -269,7 +269,7 @@ private[metrics] trait MetricNumericConverter {
* *
* @param address [[akka.actor.Address]] of the node the metrics are gathered at * @param address [[akka.actor.Address]] of the node the metrics are gathered at
* @param timestamp the time of sampling, in milliseconds since midnight, January 1, 1970 UTC * @param timestamp the time of sampling, in milliseconds since midnight, January 1, 1970 UTC
* @param metrics the set of sampled [[akka.actor.Metric]] * @param metrics the set of sampled [[akka.cluster.metrics.Metric]]
*/ */
@SerialVersionUID(1L) @SerialVersionUID(1L)
final case class NodeMetrics(address: Address, timestamp: Long, metrics: Set[Metric] = Set.empty[Metric]) { final case class NodeMetrics(address: Address, timestamp: Long, metrics: Set[Metric] = Set.empty[Metric]) {
@ -344,7 +344,7 @@ private[metrics] object MetricsGossip {
private[metrics] final case class MetricsGossip(nodes: Set[NodeMetrics]) { private[metrics] final case class MetricsGossip(nodes: Set[NodeMetrics]) {
/** /**
* Removes nodes if their correlating node ring members are not [[akka.cluster.MemberStatus.Up]] * Removes nodes if their correlating node ring members are not [[akka.cluster.MemberStatus]] `Up`.
*/ */
def remove(node: Address): MetricsGossip = copy(nodes = nodes filterNot (_.address == node)) def remove(node: Address): MetricsGossip = copy(nodes = nodes filterNot (_.address == node))

View file

@ -237,7 +237,7 @@ class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarP
* theoretically. Note that 99% CPU utilization can be optimal or indicative of failure. * theoretically. Note that 99% CPU utilization can be optimal or indicative of failure.
* *
* In the data stream, this will sometimes return with a valid metric value, and sometimes as a NaN or Infinite. * In the data stream, this will sometimes return with a valid metric value, and sometimes as a NaN or Infinite.
* Documented bug https://bugzilla.redhat.com/show_bug.cgi?id=749121 and several others. * Documented bug <a href="https://bugzilla.redhat.com/show_bug.cgi?id=749121">749121</a> and several others.
* *
* Creates a new instance each time. * Creates a new instance each time.
*/ */
@ -248,8 +248,8 @@ class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarP
/** /**
* (SIGAR) Returns the stolen CPU time. Relevant to virtual hosting environments. * (SIGAR) Returns the stolen CPU time. Relevant to virtual hosting environments.
* For details please see: [[http://en.wikipedia.org/wiki/CPU_time#Subdivision Wikipedia - CPU time subdivision]] and * For details please see: <a href="http://en.wikipedia.org/wiki/CPU_time#Subdivision">Wikipedia - CPU time subdivision</a> and
* [[https://www.datadoghq.com/2013/08/understanding-aws-stolen-cpu-and-how-it-affects-your-apps/ Understanding AWS stolen CPU and how it affects your apps]] * <a href="https://www.datadoghq.com/2013/08/understanding-aws-stolen-cpu-and-how-it-affects-your-apps/">Understanding AWS stolen CPU and how it affects your apps</a>
* *
* Creates a new instance each time. * Creates a new instance each time.
*/ */

View file

@ -15,14 +15,14 @@ import scala.util.Failure
import scala.util.Try import scala.util.Try
/** /**
* Provide sigar instance as [[SigarProxy]]. * Provide sigar instance as `SigarProxy`.
* *
* User can provision sigar classes and native library in one of the following ways: * User can provision sigar classes and native library in one of the following ways:
* *
* 1) Use [[https://github.com/kamon-io/sigar-loader Kamon sigar-loader]] as a project dependency for the user project. * 1) Use <a href="https://github.com/kamon-io/sigar-loader">Kamon sigar-loader</a> as a project dependency for the user project.
* Metrics extension will extract and load sigar library on demand with help of Kamon sigar provisioner. * Metrics extension will extract and load sigar library on demand with help of Kamon sigar provisioner.
* *
* 2) Use [[https://github.com/kamon-io/sigar-loader Kamon sigar-loader]] as java agent: `java -javaagent:/path/to/sigar-loader.jar` * 2) Use <a href="https://github.com/kamon-io/sigar-loader">Kamon sigar-loader</a> as java agent: `java -javaagent:/path/to/sigar-loader.jar`
* Kamon sigar loader agent will extract and load sigar library during JVM start. * Kamon sigar loader agent will extract and load sigar library during JVM start.
* *
* 3) Place `sigar.jar` on the `classpath` and sigar native library for the o/s on the `java.library.path` * 3) Place `sigar.jar` on the `classpath` and sigar native library for the o/s on the `java.library.path`
@ -79,7 +79,7 @@ object SigarProvider {
/** /**
* Release underlying sigar proxy resources. * Release underlying sigar proxy resources.
* *
* Note: [[SigarProxy]] is not [[Sigar]] during tests. * Note: `SigarProxy` is not `Sigar` during tests.
*/ */
def close(sigar: SigarProxy) = { def close(sigar: SigarProxy) = {
if (sigar.isInstanceOf[Sigar]) sigar.asInstanceOf[Sigar].close() if (sigar.isInstanceOf[Sigar]) sigar.asInstanceOf[Sigar].close()
@ -87,7 +87,7 @@ object SigarProvider {
} }
/** /**
* Provide sigar instance as [[SigarProxy]] with configured location via [[ClusterMetricsSettings]]. * Provide sigar instance as `SigarProxy` with configured location via [[ClusterMetricsSettings]].
*/ */
case class DefaultSigarProvider(settings: ClusterMetricsSettings) extends SigarProvider { case class DefaultSigarProvider(settings: ClusterMetricsSettings) extends SigarProvider {
def extractFolder = settings.NativeLibraryExtractFolder def extractFolder = settings.NativeLibraryExtractFolder

View file

@ -19,7 +19,7 @@ import scala.annotation.tailrec
import scala.collection.JavaConverters.{ asJavaIterableConverter, asScalaBufferConverter, setAsJavaSetConverter } import scala.collection.JavaConverters.{ asJavaIterableConverter, asScalaBufferConverter, setAsJavaSetConverter }
/** /**
* Protobuf serializer for [[ClusterMetricsMessage]] types. * Protobuf serializer for [[akka.cluster.metrics.ClusterMetricsMessage]] types.
*/ */
class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer { class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer {

View file

@ -1433,7 +1433,7 @@ object ShardCoordinator {
* supposed to discard their known location of the shard, i.e. start buffering * supposed to discard their known location of the shard, i.e. start buffering
* incoming messages for the shard. They reply with [[BeginHandOffAck]]. * incoming messages for the shard. They reply with [[BeginHandOffAck]].
* When all have replied the `ShardCoordinator` continues by sending * When all have replied the `ShardCoordinator` continues by sending
* [[HandOff]] to the `ShardRegion` responsible for the shard. * `HandOff` to the `ShardRegion` responsible for the shard.
*/ */
@SerialVersionUID(1L) final case class BeginHandOff(shard: ShardId) extends CoordinatorMessage @SerialVersionUID(1L) final case class BeginHandOff(shard: ShardId) extends CoordinatorMessage
/** /**
@ -1441,14 +1441,14 @@ object ShardCoordinator {
*/ */
@SerialVersionUID(1L) final case class BeginHandOffAck(shard: ShardId) extends CoordinatorCommand @SerialVersionUID(1L) final case class BeginHandOffAck(shard: ShardId) extends CoordinatorCommand
/** /**
* When all `ShardRegion` actors have acknoledged the [[BeginHandOff]] the * When all `ShardRegion` actors have acknoledged the `BeginHandOff` the
* ShardCoordinator` sends this message to the `ShardRegion` responsible for the * `ShardCoordinator` sends this message to the `ShardRegion` responsible for the
* shard. The `ShardRegion` is supposed to stop all entries in that shard and when * shard. The `ShardRegion` is supposed to stop all entries in that shard and when
* all entries have terminated reply with `ShardStopped` to the `ShardCoordinator`. * all entries have terminated reply with `ShardStopped` to the `ShardCoordinator`.
*/ */
@SerialVersionUID(1L) final case class HandOff(shard: ShardId) extends CoordinatorMessage @SerialVersionUID(1L) final case class HandOff(shard: ShardId) extends CoordinatorMessage
/** /**
* Reply to [[HandOff]] when all entries in the shard have been terminated. * Reply to `HandOff` when all entries in the shard have been terminated.
*/ */
@SerialVersionUID(1L) final case class ShardStopped(shard: ShardId) extends CoordinatorCommand @SerialVersionUID(1L) final case class ShardStopped(shard: ShardId) extends CoordinatorCommand
@ -1527,8 +1527,8 @@ object ShardCoordinator {
/** /**
* INTERNAL API. Rebalancing process is performed by this actor. * INTERNAL API. Rebalancing process is performed by this actor.
* It sends [[BeginHandOff]] to all `ShardRegion` actors followed by * It sends `BeginHandOff` to all `ShardRegion` actors followed by
* [[HandOff]] to the `ShardRegion` responsible for the shard. * `HandOff` to the `ShardRegion` responsible for the shard.
* When the handoff is completed it sends [[RebalanceDone]] to its * When the handoff is completed it sends [[RebalanceDone]] to its
* parent `ShardCoordinator`. If the process takes longer than the * parent `ShardCoordinator`. If the process takes longer than the
* `handOffTimeout` it also sends [[RebalanceDone]]. * `handOffTimeout` it also sends [[RebalanceDone]].

View file

@ -258,12 +258,12 @@ object DistributedPubSubMediator {
} }
/** /**
* Mediator uses [[Router]] to send messages to multiple destinations, Router in general * Mediator uses [[akka.routing.Router]] to send messages to multiple destinations, Router in general
* unwraps messages from [[RouterEnvelope]] and sends the contents to [[Routee]]s. * unwraps messages from [[akka.routing.RouterEnvelope]] and sends the contents to [[akka.routing.Routee]]s.
* *
* Using mediator services should not have an undesired effect of unwrapping messages * Using mediator services should not have an undesired effect of unwrapping messages
* out of [[RouterEnvelope]]. For this reason user messages are wrapped in * out of [[akka.routing.RouterEnvelope]]. For this reason user messages are wrapped in
* [[MediatorRouterEnvelope]] which will be unwrapped by the [[Router]] leaving original * [[MediatorRouterEnvelope]] which will be unwrapped by the [[akka.routing.Router]] leaving original
* user message. * user message.
*/ */
def wrapIfNeeded: Any Any = { def wrapIfNeeded: Any Any = {

View file

@ -50,7 +50,7 @@ object Cluster extends ExtensionId[Cluster] with ExtensionIdProvider {
* *
* Each cluster [[Member]] is identified by its [[akka.actor.Address]], and * Each cluster [[Member]] is identified by its [[akka.actor.Address]], and
* the cluster address of this actor system is [[#selfAddress]]. A member also has a status; * the cluster address of this actor system is [[#selfAddress]]. A member also has a status;
* initially [[MemberStatus.Joining]] followed by [[MemberStatus.Up]]. * initially [[MemberStatus]] `Joining` followed by [[MemberStatus]] `Up`.
*/ */
class Cluster(val system: ExtendedActorSystem) extends Extension { class Cluster(val system: ExtendedActorSystem) extends Extension {
@ -212,11 +212,11 @@ class Cluster(val system: ExtendedActorSystem) extends Extension {
* The `to` classes can be [[akka.cluster.ClusterEvent.ClusterDomainEvent]] * The `to` classes can be [[akka.cluster.ClusterEvent.ClusterDomainEvent]]
* or subclasses. * or subclasses.
* *
* If `initialStateMode` is [[ClusterEvent.InitialStateAsEvents]] the events corresponding * If `initialStateMode` is `ClusterEvent.InitialStateAsEvents` the events corresponding
* to the current state will be sent to the subscriber to mimic what you would * to the current state will be sent to the subscriber to mimic what you would
* have seen if you were listening to the events when they occurred in the past. * have seen if you were listening to the events when they occurred in the past.
* *
* If `initialStateMode` is [[ClusterEvent.InitialStateAsSnapshot]] a snapshot of * If `initialStateMode` is `ClusterEvent.InitialStateAsSnapshot` a snapshot of
* [[akka.cluster.ClusterEvent.CurrentClusterState]] will be sent to the subscriber as the * [[akka.cluster.ClusterEvent.CurrentClusterState]] will be sent to the subscriber as the
* first message. * first message.
* *
@ -274,8 +274,8 @@ class Cluster(val system: ExtendedActorSystem) extends Extension {
/** /**
* Send command to issue state transition to LEAVING for the node specified by 'address'. * Send command to issue state transition to LEAVING for the node specified by 'address'.
* The member will go through the status changes [[MemberStatus.Leaving]] (not published to * The member will go through the status changes [[MemberStatus]] `Leaving` (not published to
* subscribers) followed by [[MemberStatus.Exiting]] and finally [[MemberStatus.Removed]]. * subscribers) followed by [[MemberStatus]] `Exiting` and finally [[MemberStatus]] `Removed`.
* *
* Note that this command can be issued to any member in the cluster, not necessarily the * Note that this command can be issued to any member in the cluster, not necessarily the
* one that is leaving. The cluster extension, but not the actor system or JVM, of the * one that is leaving. The cluster extension, but not the actor system or JVM, of the
@ -300,7 +300,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension {
/** /**
* The supplied thunk will be run, once, when current cluster member is `Up`. * The supplied thunk will be run, once, when current cluster member is `Up`.
* Typically used together with configuration option `akka.cluster.min-nr-of-members' * Typically used together with configuration option `akka.cluster.min-nr-of-members`
* to defer some action, such as starting actors, until the cluster has reached * to defer some action, such as starting actors, until the cluster has reached
* a certain size. * a certain size.
*/ */
@ -309,7 +309,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension {
/** /**
* Java API: The supplied callback will be run, once, when current cluster member is `Up`. * Java API: The supplied callback will be run, once, when current cluster member is `Up`.
* Typically used together with configuration option `akka.cluster.min-nr-of-members' * Typically used together with configuration option `akka.cluster.min-nr-of-members`
* to defer some action, such as starting actors, until the cluster has reached * to defer some action, such as starting actors, until the cluster has reached
* a certain size. * a certain size.
*/ */
@ -344,7 +344,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension {
* Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks. * Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks.
* *
* Should not called by the user. The user can issue a LEAVE command which will tell the node * Should not called by the user. The user can issue a LEAVE command which will tell the node
* to go through graceful handoff process `LEAVE -> EXITING -> REMOVED -> SHUTDOWN`. * to go through graceful handoff process `LEAVE -&gt; EXITING -&gt; REMOVED -&gt; SHUTDOWN`.
*/ */
private[cluster] def shutdown(): Unit = { private[cluster] def shutdown(): Unit = {
if (_isTerminated.compareAndSet(false, true)) { if (_isTerminated.compareAndSet(false, true)) {

View file

@ -87,19 +87,19 @@ private[cluster] object InternalClusterAction {
case object JoinSeedNode case object JoinSeedNode
/** /**
* @see JoinSeedNode * see JoinSeedNode
*/ */
@SerialVersionUID(1L) @SerialVersionUID(1L)
case object InitJoin extends ClusterMessage case object InitJoin extends ClusterMessage
/** /**
* @see JoinSeedNode * see JoinSeedNode
*/ */
@SerialVersionUID(1L) @SerialVersionUID(1L)
final case class InitJoinAck(address: Address) extends ClusterMessage final case class InitJoinAck(address: Address) extends ClusterMessage
/** /**
* @see JoinSeedNode * see JoinSeedNode
*/ */
@SerialVersionUID(1L) @SerialVersionUID(1L)
final case class InitJoinNack(address: Address) extends ClusterMessage final case class InitJoinNack(address: Address) extends ClusterMessage

View file

@ -129,7 +129,7 @@ object ClusterEvent {
} }
/** /**
* Member status changed to [[MemberStatus.Exiting]] and will be removed * Member status changed to `MemberStatus.Exiting` and will be removed
* when all members have seen the `Exiting` status. * when all members have seen the `Exiting` status.
*/ */
final case class MemberExited(member: Member) extends MemberEvent { final case class MemberExited(member: Member) extends MemberEvent {
@ -178,7 +178,7 @@ object ClusterEvent {
final case object ClusterShuttingDown extends ClusterDomainEvent final case object ClusterShuttingDown extends ClusterDomainEvent
/** /**
* Java API: get the singleton instance of [[ClusterShuttingDown]] event * Java API: get the singleton instance of `ClusterShuttingDown` event
*/ */
def getClusterShuttingDownInstance = ClusterShuttingDown def getClusterShuttingDownInstance = ClusterShuttingDown

View file

@ -119,7 +119,7 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto
} }
/** /**
* Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus.Up]]. * Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus]] `Up`.
*/ */
def receiveState(state: CurrentClusterState): Unit = def receiveState(state: CurrentClusterState): Unit =
nodes = state.members collect { case m if m.status == Up m.address } nodes = state.members collect { case m if m.status == Up m.address }
@ -128,7 +128,7 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto
* Samples the latest metrics for the node, updates metrics statistics in * Samples the latest metrics for the node, updates metrics statistics in
* [[akka.cluster.MetricsGossip]], and publishes the change to the event bus. * [[akka.cluster.MetricsGossip]], and publishes the change to the event bus.
* *
* @see [[akka.cluster.ClusterMetricsCollector.collect( )]] * @see [[akka.cluster.ClusterMetricsCollector#collect]]
*/ */
def collect(): Unit = { def collect(): Unit = {
latestGossip :+= collector.sample() latestGossip :+= collector.sample()
@ -189,7 +189,7 @@ private[cluster] object MetricsGossip {
private[cluster] final case class MetricsGossip(nodes: Set[NodeMetrics]) { private[cluster] final case class MetricsGossip(nodes: Set[NodeMetrics]) {
/** /**
* Removes nodes if their correlating node ring members are not [[akka.cluster.MemberStatus.Up]] * Removes nodes if their correlating node ring members are not [[akka.cluster.MemberStatus]] `Up`.
*/ */
def remove(node: Address): MetricsGossip = copy(nodes = nodes filterNot (_.address == node)) def remove(node: Address): MetricsGossip = copy(nodes = nodes filterNot (_.address == node))
@ -386,7 +386,7 @@ object Metric extends MetricNumericConverter {
* *
* @param address [[akka.actor.Address]] of the node the metrics are gathered at * @param address [[akka.actor.Address]] of the node the metrics are gathered at
* @param timestamp the time of sampling, in milliseconds since midnight, January 1, 1970 UTC * @param timestamp the time of sampling, in milliseconds since midnight, January 1, 1970 UTC
* @param metrics the set of sampled [[akka.actor.Metric]] * @param metrics the set of sampled [[akka.cluster.Metric]]
*/ */
@SerialVersionUID(1L) @SerialVersionUID(1L)
@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4") @deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")

View file

@ -88,7 +88,6 @@ object WorkList {
* Singly linked list entry implementation for WorkList. * Singly linked list entry implementation for WorkList.
* @param ref The item reference, None for head entry * @param ref The item reference, None for head entry
* @param permanent If the entry is to be kept after processing * @param permanent If the entry is to be kept after processing
* @tparam T The type of the item
*/ */
class Entry[T](val ref: Option[T], val permanent: Boolean) { class Entry[T](val ref: Option[T], val permanent: Boolean) {
var next: Entry[T] = null var next: Entry[T] = null
@ -101,7 +100,6 @@ object WorkList {
* The list is not thread safe! However it is expected to be reentrant. This means a processing function can add/remove * The list is not thread safe! However it is expected to be reentrant. This means a processing function can add/remove
* entries from the list while processing. Most important, a processing function can remove its own entry from the list. * entries from the list while processing. Most important, a processing function can remove its own entry from the list.
* The first remove must return true and any subsequent removes must return false. * The first remove must return true and any subsequent removes must return false.
* @tparam T The type of the item
*/ */
class WorkList[T] { class WorkList[T] {

View file

@ -173,22 +173,22 @@ import ReliableProxy._
* transition callbacks to those actors which subscribe using the * transition callbacks to those actors which subscribe using the
* ``SubscribeTransitionCallBack`` and ``UnsubscribeTransitionCallBack`` * ``SubscribeTransitionCallBack`` and ``UnsubscribeTransitionCallBack``
* messages; see [[akka.actor.FSM]] for more documentation. The proxy will * messages; see [[akka.actor.FSM]] for more documentation. The proxy will
* transition into [[ReliableProxy.Active]] state when ACKs * transition into `ReliableProxy.Active` state when ACKs
* are outstanding and return to the [[ReliableProxy.Idle]] * are outstanding and return to the `ReliableProxy.Idle`
* state when every message send so far has been confirmed by the peer end-point. * state when every message send so far has been confirmed by the peer end-point.
* *
* The initial state of the proxy is [[ReliableProxy.Connecting]]. In this state the * The initial state of the proxy is `ReliableProxy.Connecting`. In this state the
* proxy will repeatedly send [[akka.actor.Identify]] messages to `ActorSelection(targetPath)` * proxy will repeatedly send [[akka.actor.Identify]] messages to `ActorSelection(targetPath)`
* in order to obtain a new `ActorRef` for the target. When an [[akka.actor.ActorIdentity]] * in order to obtain a new `ActorRef` for the target. When an [[akka.actor.ActorIdentity]]
* for the target is received a new tunnel will be created, a [[ReliableProxy.TargetChanged]] * for the target is received a new tunnel will be created, a [[ReliableProxy.TargetChanged]]
* message containing the target `ActorRef` will be sent to the proxy's transition subscribers * message containing the target `ActorRef` will be sent to the proxy's transition subscribers
* and the proxy will transition into either the [[ReliableProxy.Idle]] or [[ReliableProxy.Active]] * and the proxy will transition into either the `ReliableProxy.Idle` or `ReliableProxy.Active`
* state, depending if there are any outstanding messages that need to be delivered. If * state, depending if there are any outstanding messages that need to be delivered. If
* `maxConnectAttempts` is defined this actor will stop itself after `Identify` is sent * `maxConnectAttempts` is defined this actor will stop itself after `Identify` is sent
* `maxConnectAttempts` times. * `maxConnectAttempts` times.
* *
* While in the `Idle` or `Active` states, if a communication failure causes the tunnel to * While in the `Idle` or `Active` states, if a communication failure causes the tunnel to
* terminate via Remote Deathwatch the proxy will transition into the [[ReliableProxy.Connecting]] * terminate via Remote Deathwatch the proxy will transition into the `ReliableProxy.Connecting`
* state as described above. After reconnecting `TargetChanged` will be sent only if the target * state as described above. After reconnecting `TargetChanged` will be sent only if the target
* `ActorRef` has changed. * `ActorRef` has changed.
* *

View file

@ -32,7 +32,7 @@ import akka.actor.DeadLetterSuppression
* The conductor is the one orchestrating the test: it governs the * The conductor is the one orchestrating the test: it governs the
* [[akka.remote.testconductor.Controller]]s port to which all * [[akka.remote.testconductor.Controller]]s port to which all
* [[akka.remote.testconductor.Player]]s connect, it issues commands to their * [[akka.remote.testconductor.Player]]s connect, it issues commands to their
* [[akka.remote.testconductor.NetworkFailureInjector]] and provides support * `akka.remote.testconductor.NetworkFailureInjector` and provides support
* for barriers using the [[akka.remote.testconductor.BarrierCoordinator]]. * for barriers using the [[akka.remote.testconductor.BarrierCoordinator]].
* All of this is bundled inside the [[akka.remote.testconductor.TestConductorExt]] * All of this is bundled inside the [[akka.remote.testconductor.TestConductorExt]]
* extension. * extension.
@ -292,7 +292,7 @@ private[akka] object ServerFSM {
* node name translations). * node name translations).
* *
* In the Ready state, messages from the client are forwarded to the controller * In the Ready state, messages from the client are forwarded to the controller
* and [[akka.remote.testconductor.Send]] requests are sent, but the latter is * and `Send` requests are sent, but the latter is
* treated specially: all client operations are to be confirmed by a * treated specially: all client operations are to be confirmed by a
* [[akka.remote.testconductor.Done]] message, and there can be only one such * [[akka.remote.testconductor.Done]] message, and there can be only one such
* request outstanding at a given time (i.e. a Send fails if the previous has * request outstanding at a given time (i.e. a Send fails if the previous has

View file

@ -103,7 +103,7 @@ abstract class ActorSystemActivator extends BundleActivator {
} }
/** /**
* By default, the [[akka.actor.ActorSystem]] name will be set to `bundle-<bundle id>-ActorSystem`. Override this * By default, the [[akka.actor.ActorSystem]] name will be set to `bundle-&lt;bundle id&gt;-ActorSystem`. Override this
* method to define another name for your [[akka.actor.ActorSystem]] instance. * method to define another name for your [[akka.actor.ActorSystem]] instance.
* *
* @param context the bundle context * @param context the bundle context

View file

@ -27,7 +27,7 @@ class OsgiActorSystemFactory(val context: BundleContext, val fallbackClassLoader
/** /**
* Creates the [[akka.actor.ActorSystem]], using the name specified. * Creates the [[akka.actor.ActorSystem]], using the name specified.
* *
* A default name (`bundle-<bundle id>-ActorSystem`) is assigned when you pass along [[scala.None]] instead. * A default name (`bundle-&lt;bundle id&gt;-ActorSystem`) is assigned when you pass along [[scala.None]] instead.
*/ */
def createActorSystem(name: Option[String]): ActorSystem = def createActorSystem(name: Option[String]): ActorSystem =
ActorSystem(actorSystemName(name), actorSystemConfig(context), classloader) ActorSystem(actorSystemName(name), actorSystemConfig(context), classloader)
@ -43,7 +43,7 @@ class OsgiActorSystemFactory(val context: BundleContext, val fallbackClassLoader
/** /**
* Determine the name for the [[akka.actor.ActorSystem]] * Determine the name for the [[akka.actor.ActorSystem]]
* Returns a default value of `bundle-<bundle id>-ActorSystem` is no name is being specified * Returns a default value of `bundle-&lt;bundle id&gt;-ActorSystem` is no name is being specified
*/ */
def actorSystemName(name: Option[String]): String = def actorSystemName(name: Option[String]): String =
name.getOrElse("bundle-%s-ActorSystem".format(context.getBundle.getBundleId)) name.getOrElse("bundle-%s-ActorSystem".format(context.getBundle.getBundleId))

View file

@ -11,7 +11,7 @@ import org.scalatest.junit.JUnitRunner
/** /**
* JAVA API * JAVA API
* *
* This spec aims to verify custom akka-persistence [[SnapshotStore]] implementations. * This spec aims to verify custom akka-persistence [[akka.persistence.snapshot.SnapshotStore]] implementations.
* Plugin authors are highly encouraged to include it in their plugin's test suites. * Plugin authors are highly encouraged to include it in their plugin's test suites.
* *
* In case your snapshot-store plugin needs some kind of setup or teardown, override the `beforeAll` or `afterAll` * In case your snapshot-store plugin needs some kind of setup or teardown, override the `beforeAll` or `afterAll`

View file

@ -19,7 +19,7 @@ object AtLeastOnceDelivery {
* Snapshot of current `AtLeastOnceDelivery` state. Can be retrieved with * Snapshot of current `AtLeastOnceDelivery` state. Can be retrieved with
* [[AtLeastOnceDelivery#getDeliverySnapshot]] and saved with [[PersistentActor#saveSnapshot]]. * [[AtLeastOnceDelivery#getDeliverySnapshot]] and saved with [[PersistentActor#saveSnapshot]].
* During recovery the snapshot received in [[SnapshotOffer]] should be set * During recovery the snapshot received in [[SnapshotOffer]] should be set
* with [[AtLeastOnceDelivery.setDeliverySnapshot]]. * with [[AtLeastOnceDelivery#setDeliverySnapshot]].
*/ */
@SerialVersionUID(1L) @SerialVersionUID(1L)
case class AtLeastOnceDeliverySnapshot(currentDeliveryId: Long, unconfirmedDeliveries: immutable.Seq[UnconfirmedDelivery]) case class AtLeastOnceDeliverySnapshot(currentDeliveryId: Long, unconfirmedDeliveries: immutable.Seq[UnconfirmedDelivery])

View file

@ -23,7 +23,7 @@ private[persistence] object JournalProtocol {
sealed trait Response extends Message sealed trait Response extends Message
/** /**
* Reply message to a failed [[DeleteMessages]] request. * Reply message to a failed [[DeleteMessagesTo]] request.
*/ */
final case class DeleteMessagesFailure(cause: Throwable) final case class DeleteMessagesFailure(cause: Throwable)
extends Response extends Response
@ -54,7 +54,7 @@ private[persistence] object JournalProtocol {
/** /**
* Reply message to a failed [[WriteMessages]] request. This reply is sent to the requestor * Reply message to a failed [[WriteMessages]] request. This reply is sent to the requestor
* before all subsequent [[WriteMessagFailure]] replies. * before all subsequent [[WriteMessageFailure]] replies.
* *
* @param cause failure cause. * @param cause failure cause.
*/ */

View file

@ -35,9 +35,9 @@ private[persistence] final case class NonPersistentRepr(payload: Any, sender: Ac
/** /**
* Plugin API: representation of a persistent message in the journal plugin API. * Plugin API: representation of a persistent message in the journal plugin API.
* *
* @see [[journal.SyncWriteJournal]] * @see [[akka.persistence.journal.SyncWriteJournal]]
* @see [[journal.AsyncWriteJournal]] * @see [[akka.persistence.journal.AsyncWriteJournal]]
* @see [[journal.AsyncRecovery]] * @see [[akka.persistence.journal.AsyncRecovery]]
*/ */
trait PersistentRepr extends PersistentEnvelope with Message { trait PersistentRepr extends PersistentEnvelope with Message {
import scala.collection.JavaConverters._ import scala.collection.JavaConverters._

View file

@ -21,7 +21,7 @@ import scala.collection.immutable.VectorBuilder
trait Message extends Serializable trait Message extends Serializable
/** /**
* Protobuf serializer for [[PersistentRepr]] and [[AtLeastOnceDelivery]] messages. * Protobuf serializer for [[akka.persistence.PersistentRepr]] and [[akka.persistence.AtLeastOnceDelivery]] messages.
*/ */
class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer { class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer {
import PersistentRepr.Undefined import PersistentRepr.Undefined

View file

@ -13,8 +13,8 @@ import akka.ConfigurationException
* Interface for a registry of Akka failure detectors. New resources are implicitly registered when heartbeat is first * Interface for a registry of Akka failure detectors. New resources are implicitly registered when heartbeat is first
* called with the resource given as parameter. * called with the resource given as parameter.
* *
* @tparam A * type parameter A:
* The type of the key that identifies a resource to be monitored by a failure detector * - The type of the key that identifies a resource to be monitored by a failure detector
*/ */
trait FailureDetectorRegistry[A] { trait FailureDetectorRegistry[A] {
@ -56,7 +56,7 @@ private[akka] object FailureDetectorLoader {
/** /**
* Loads and instantiates a given [[FailureDetector]] implementation. The class to be loaded must have a constructor * Loads and instantiates a given [[FailureDetector]] implementation. The class to be loaded must have a constructor
* that accepts a [[com.typesafe.config.Config]] and an [[EventStream]] parameter. Will throw ConfigurationException * that accepts a [[com.typesafe.config.Config]] and an [[akka.event.EventStream]] parameter. Will throw ConfigurationException
* if the implementation cannot be loaded. * if the implementation cannot be loaded.
* *
* @param fqcn Fully qualified class name of the implementation to be loaded. * @param fqcn Fully qualified class name of the implementation to be loaded.
@ -76,8 +76,8 @@ private[akka] object FailureDetectorLoader {
/** /**
* Loads and instantiates a given [[FailureDetector]] implementation. The class to be loaded must have a constructor * Loads and instantiates a given [[FailureDetector]] implementation. The class to be loaded must have a constructor
* that accepts a [[com.typesafe.config.Config]] and an [[EventStream]] parameter. Will throw ConfigurationException * that accepts a [[com.typesafe.config.Config]] and an [[akka.event.EventStream]] parameter. Will throw ConfigurationException
* if the implementation cannot be loaded. Use [[FailureDetectorLoader#load]] if no implicit [[ActorContext]] is * if the implementation cannot be loaded. Use [[FailureDetectorLoader#load]] if no implicit [[akka.actor.ActorContext]] is
* available. * available.
* *
* @param fqcn Fully qualified class name of the implementation to be loaded. * @param fqcn Fully qualified class name of the implementation to be loaded.

View file

@ -65,7 +65,7 @@ private[akka] object RemoteWatcher {
* intercepts Watch and Unwatch system messages and sends corresponding * intercepts Watch and Unwatch system messages and sends corresponding
* [[RemoteWatcher.WatchRemote]] and [[RemoteWatcher.UnwatchRemote]] to this actor. * [[RemoteWatcher.WatchRemote]] and [[RemoteWatcher.UnwatchRemote]] to this actor.
* *
* For a new node to be watched this actor periodically sends [[RemoteWatcher.Heartbeat]] * For a new node to be watched this actor periodically sends `RemoteWatcher.Heartbeat`
* to the peer actor on the other node, which replies with [[RemoteWatcher.HeartbeatRsp]] * to the peer actor on the other node, which replies with [[RemoteWatcher.HeartbeatRsp]]
* message back. The failure detector on the watching side monitors these heartbeat messages. * message back. The failure detector on the watching side monitors these heartbeat messages.
* If arrival of hearbeat messages stops it will be detected and this actor will publish * If arrival of hearbeat messages stops it will be detected and this actor will publish

View file

@ -189,11 +189,11 @@ object TestTransport {
* @param logCallback * @param logCallback
* Function that will be called independently of the current active behavior * Function that will be called independently of the current active behavior
* *
* @tparam A * type parameter A:
* Parameter type of the wrapped function. If it takes multiple parameters it must be wrapped in a tuple. * - Parameter type of the wrapped function. If it takes multiple parameters it must be wrapped in a tuple.
* *
* @tparam B * type parameter B:
* Type parameter of the future that the original function returns. * - Type parameter of the future that the original function returns.
*/ */
class SwitchableLoggedBehavior[A, B](defaultBehavior: Behavior[A, B], logCallback: (A) Unit) extends Behavior[A, B] { class SwitchableLoggedBehavior[A, B](defaultBehavior: Behavior[A, B], logCallback: (A) Unit) extends Behavior[A, B] {

View file

@ -24,7 +24,7 @@ object Transport {
/** /**
* Message sent to a [[akka.remote.transport.Transport.AssociationEventListener]] registered to a transport * Message sent to a [[akka.remote.transport.Transport.AssociationEventListener]] registered to a transport
* (via the Promise returned by [[akka.remote.transport.Transport.listen]]) when an inbound association request arrives. * (via the Promise returned by [[akka.remote.transport.Transport#listen]]) when an inbound association request arrives.
* *
* @param association * @param association
* The handle for the inbound association. * The handle for the inbound association.
@ -153,7 +153,7 @@ object AssociationHandle {
/** /**
* Message sent to the listener registered to an association (via the Promise returned by * Message sent to the listener registered to an association (via the Promise returned by
* [[akka.remote.transport.AssociationHandle.readHandlerPromise]]) when an inbound payload arrives. * [[akka.remote.transport.AssociationHandle#readHandlerPromise]]) when an inbound payload arrives.
* *
* @param payload * @param payload
* The raw bytes that were sent by the remote endpoint. * The raw bytes that were sent by the remote endpoint.

View file

@ -99,14 +99,14 @@ class Ticket1978AES128CounterSecureRNGSpec extends Ticket1978CommunicationSpec(g
class Ticket1978AES256CounterSecureRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES256CounterSecureRNG", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA")) class Ticket1978AES256CounterSecureRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES256CounterSecureRNG", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"))
/** /**
* Both of the <quote>Inet</quote> variants require access to the Internet to access random.org. * Both of the `Inet` variants require access to the Internet to access random.org.
*/ */
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class Ticket1978AES128CounterInetRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterInetRNG", "TLS_RSA_WITH_AES_128_CBC_SHA")) class Ticket1978AES128CounterInetRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterInetRNG", "TLS_RSA_WITH_AES_128_CBC_SHA"))
with InetRNGSpec with InetRNGSpec
/** /**
* Both of the <quote>Inet</quote> variants require access to the Internet to access random.org. * Both of the `Inet` variants require access to the Internet to access random.org.
*/ */
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class Ticket1978AES256CounterInetRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES256CounterInetRNG", "TLS_RSA_WITH_AES_256_CBC_SHA")) class Ticket1978AES256CounterInetRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES256CounterInetRNG", "TLS_RSA_WITH_AES_256_CBC_SHA"))

View file

@ -8,7 +8,7 @@ import com.codahale.metrics.Gauge
/** /**
* Gauge which exposes the Arithmetic Mean of values given to it. * Gauge which exposes the Arithmetic Mean of values given to it.
* *
* Can be used to expose average of a series of values to [[com.codahale.metrics.ScheduledReporter]]s. * Can be used to expose average of a series of values to `com.codahale.metrics.ScheduledReporter`.
*/ */
class AveragingGauge extends Gauge[Double] { class AveragingGauge extends Gauge[Double] {

View file

@ -10,7 +10,7 @@ import org.{ HdrHistogram ⇒ hdr }
* Adapts Gil Tene's HdrHistogram to Metric's Metric interface. * Adapts Gil Tene's HdrHistogram to Metric's Metric interface.
* *
* @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive
* integer that is >= 2. * integer that is &gt;= 2.
* @param numberOfSignificantValueDigits The number of significant decimal digits to which the histogram will * @param numberOfSignificantValueDigits The number of significant decimal digits to which the histogram will
* maintain value resolution and separation. Must be a non-negative * maintain value resolution and separation. Must be a non-negative
* integer between 0 and 5. * integer between 0 and 5.

View file

@ -23,7 +23,7 @@ import scala.reflect.ClassTag
* please refer to <a href="http://openjdk.java.net/projects/code-tools/jmh/">JMH</a> if that's what you're writing. * please refer to <a href="http://openjdk.java.net/projects/code-tools/jmh/">JMH</a> if that's what you're writing.
* This trait instead aims to give an high level overview as well as data for trend-analysis of long running tests. * This trait instead aims to give an high level overview as well as data for trend-analysis of long running tests.
* *
* Reporting defaults to [[ConsoleReporter]]. * Reporting defaults to `ConsoleReporter`.
* In order to send registry to Graphite run sbt with the following property: `-Dakka.registry.reporting.0=graphite`. * In order to send registry to Graphite run sbt with the following property: `-Dakka.registry.reporting.0=graphite`.
*/ */
private[akka] trait MetricsKit extends MetricsKitOps { private[akka] trait MetricsKit extends MetricsKitOps {
@ -195,7 +195,7 @@ private[akka] object MetricsKit {
} }
} }
/** Provides access to custom Akka [[Metric]]s, with named methods. */ /** Provides access to custom Akka `com.codahale.metrics.Metric`, with named methods. */
trait AkkaMetricRegistry { trait AkkaMetricRegistry {
this: MetricRegistry this: MetricRegistry

View file

@ -19,7 +19,7 @@ private[akka] trait MetricsKitOps extends MetricKeyDSL {
type MetricKey = MetricKeyDSL#MetricKey type MetricKey = MetricKeyDSL#MetricKey
/** Simple thread-safe counter, backed by [[LongAdder]] so can pretty efficiently work even when hit by multiple threads */ /** Simple thread-safe counter, backed by `java.util.concurrent.LongAdder` so can pretty efficiently work even when hit by multiple threads */
def counter(key: MetricKey): Counter = registry.counter(key.toString) def counter(key: MetricKey): Counter = registry.counter(key.toString)
/** Simple averaging Gauge, which exposes an arithmetic mean of the values added to it. */ /** Simple averaging Gauge, which exposes an arithmetic mean of the values added to it. */
@ -49,7 +49,7 @@ private[akka] trait MetricsKitOps extends MetricKeyDSL {
/** /**
* Use when measuring for 9x'th percentiles as well as min / max / mean values. * Use when measuring for 9x'th percentiles as well as min / max / mean values.
* *
* Backed by [[ExponentiallyDecayingReservoir]]. * Backed by codahale `ExponentiallyDecayingReservoir`.
*/ */
def histogram(key: MetricKey): Histogram = { def histogram(key: MetricKey): Histogram = {
registry.histogram((key / "histogram").toString) registry.histogram((key / "histogram").toString)
@ -62,10 +62,10 @@ private[akka] trait MetricsKitOps extends MetricKeyDSL {
} }
/** /**
* Enable memory measurements - will be logged by [[ScheduledReporter]]s if enabled. * Enable memory measurements - will be logged by `ScheduledReporter`s if enabled.
* Must not be triggered multiple times - pass around the `MemoryUsageSnapshotting` if you need to measure different points. * Must not be triggered multiple times - pass around the `MemoryUsageSnapshotting` if you need to measure different points.
* *
* Also allows to [[MemoryUsageSnapshotting.getHeapSnapshot]] to obtain memory usage numbers at given point in time. * Also allows to `MemoryUsageSnapshotting.getHeapSnapshot` to obtain memory usage numbers at given point in time.
*/ */
def measureMemory(key: MetricKey): MemoryUsageGaugeSet with MemoryUsageSnapshotting = { def measureMemory(key: MetricKey): MemoryUsageGaugeSet with MemoryUsageSnapshotting = {
val gaugeSet = new jvm.MemoryUsageGaugeSet() with MemoryUsageSnapshotting { val gaugeSet = new jvm.MemoryUsageGaugeSet() with MemoryUsageSnapshotting {

View file

@ -11,7 +11,7 @@ import akka.testkit.metrics._
import scala.reflect.ClassTag import scala.reflect.ClassTag
/** /**
* Used to report [[Metric]] types that the original [[ConsoleReporter]] is unaware of (cannot re-use directly because of private constructor). * Used to report `akka.testkit.metric.Metric` types that the original `com.codahale.metrics.ConsoleReporter` is unaware of (cannot re-use directly because of private constructor).
*/ */
class AkkaConsoleReporter( class AkkaConsoleReporter(
registry: AkkaMetricRegistry, registry: AkkaMetricRegistry,

View file

@ -12,7 +12,7 @@ import akka.testkit.metrics._
import scala.concurrent.duration._ import scala.concurrent.duration._
/** /**
* Used to report [[Metric]] types that the original [[com.codahale.metrics.graphite.GraphiteReporter]] is unaware of (cannot re-use directly because of private constructor). * Used to report `com.codahale.metrics.Metric` types that the original `com.codahale.metrics.graphite.GraphiteReporter` is unaware of (cannot re-use directly because of private constructor).
*/ */
class AkkaGraphiteReporter( class AkkaGraphiteReporter(
registry: AkkaMetricRegistry, registry: AkkaMetricRegistry,

View file

@ -113,13 +113,13 @@ trait ActorContext[T] {
def watch(other: akka.actor.ActorRef): akka.actor.ActorRef def watch(other: akka.actor.ActorRef): akka.actor.ActorRef
/** /**
* Revoke the registration established by [[#watch[U]* watch]]. A [[Terminated]] * Revoke the registration established by `watch`. A [[Terminated]]
* notification will not subsequently be received for the referenced Actor. * notification will not subsequently be received for the referenced Actor.
*/ */
def unwatch[U](other: ActorRef[U]): ActorRef[U] def unwatch[U](other: ActorRef[U]): ActorRef[U]
/** /**
* Revoke the registration established by [[#watch(* watch]]. A [[Terminated]] * Revoke the registration established by `watch`. A [[Terminated]]
* notification will not subsequently be received for the referenced Actor. * notification will not subsequently be received for the referenced Actor.
*/ */
def unwatch(other: akka.actor.ActorRef): akka.actor.ActorRef def unwatch(other: akka.actor.ActorRef): akka.actor.ActorRef
@ -135,7 +135,7 @@ trait ActorContext[T] {
/** /**
* Schedule the sending of the given message to the given target Actor after * Schedule the sending of the given message to the given target Actor after
* the given time period has elapsed. The scheduled action can be cancelled * the given time period has elapsed. The scheduled action can be cancelled
* by invoking [[akka.actor.Cancellable!.cancel* cancel]] on the returned * by invoking [[akka.actor.Cancellable]] `cancel` on the returned
* handle. * handle.
*/ */
def schedule[U](delay: FiniteDuration, target: ActorRef[U], msg: U): untyped.Cancellable def schedule[U](delay: FiniteDuration, target: ActorRef[U], msg: U): untyped.Cancellable
@ -157,9 +157,9 @@ trait ActorContext[T] {
/** /**
* An [[ActorContext]] for synchronous execution of a [[Behavior]] that * An [[ActorContext]] for synchronous execution of a [[Behavior]] that
* provides only stubs for the effects an Actor can perform and replaces * provides only stubs for the effects an Actor can perform and replaces
* created child Actors by [[Inbox$.sync* a synchronous Inbox]]. * created child Actors by a synchronous Inbox (see `Inbox.sync`).
* *
* @see [[EffectfulActorContext]] for more advanced uses. * See [[EffectfulActorContext]] for more advanced uses.
*/ */
class StubbedActorContext[T]( class StubbedActorContext[T](
val name: String, val name: String,

View file

@ -13,7 +13,7 @@ import language.implicitConversions
* Actor instance. Sending a message to an Actor that has terminated before * Actor instance. Sending a message to an Actor that has terminated before
* receiving the message will lead to that message being discarded; such * receiving the message will lead to that message being discarded; such
* messages are delivered to the [[akka.actor.DeadLetter]] channel of the * messages are delivered to the [[akka.actor.DeadLetter]] channel of the
* [[akka.actor.ActorSystem!.eventStream EventStream]] on a best effort basis * [[akka.event.EventStream]] on a best effort basis
* (i.e. this delivery is not reliable). * (i.e. this delivery is not reliable).
*/ */
abstract class ActorRef[-T] extends java.lang.Comparable[ActorRef[_]] { this: ScalaActorRef[T] abstract class ActorRef[-T] extends java.lang.Comparable[ActorRef[_]] { this: ScalaActorRef[T]

View file

@ -23,7 +23,7 @@ import akka.dispatch.Dispatchers
/** /**
* An ActorSystem is home to a hierarchy of Actors. It is created using * An ActorSystem is home to a hierarchy of Actors. It is created using
* [[ActorSystem$.apply[T]*]] from a [[Props]] object that describes the root * [[ActorSystem$]] `apply` from a [[Props]] object that describes the root
* Actor of this hierarchy and which will create all other Actors beneath it. * Actor of this hierarchy and which will create all other Actors beneath it.
* A system also implements the [[ActorRef]] type, and sending a message to * A system also implements the [[ActorRef]] type, and sending a message to
* the system directs that message to the root Actor. * the system directs that message to the root Actor.

View file

@ -20,8 +20,8 @@ import akka.actor.Status
* implementation will fabricate a (hidden) [[ActorRef]] that is bound to a * implementation will fabricate a (hidden) [[ActorRef]] that is bound to a
* [[scala.concurrent.Promise]]. This ActorRef will need to be injected in the * [[scala.concurrent.Promise]]. This ActorRef will need to be injected in the
* message that is sent to the target Actor in order to function as a reply-to * message that is sent to the target Actor in order to function as a reply-to
* address, therefore the argument to the [[AskPattern$.Askable ask * address, therefore the argument to the ask / `?`
* operator]] is not the message itself but a function that given the reply-to * operator is not the message itself but a function that given the reply-to
* address will create the message. * address will create the message.
* *
* {{{ * {{{

View file

@ -34,7 +34,7 @@ abstract class Behavior[T] {
* * returning `Same` designates to reuse the current Behavior * * returning `Same` designates to reuse the current Behavior
* * returning `Unhandled` keeps the same Behavior and signals that the message was not yet handled * * returning `Unhandled` keeps the same Behavior and signals that the message was not yet handled
* *
* Code calling this method should use [[Behavior$.canonicalize]] to replace * Code calling this method should use [[Behavior$]] `canonicalize` to replace
* the special objects with real Behaviors. * the special objects with real Behaviors.
*/ */
def management(ctx: ActorContext[T], msg: Signal): Behavior[T] def management(ctx: ActorContext[T], msg: Signal): Behavior[T]
@ -49,7 +49,7 @@ abstract class Behavior[T] {
* * returning `Same` designates to reuse the current Behavior * * returning `Same` designates to reuse the current Behavior
* * returning `Unhandled` keeps the same Behavior and signals that the message was not yet handled * * returning `Unhandled` keeps the same Behavior and signals that the message was not yet handled
* *
* Code calling this method should use [[Behavior$.canonicalize]] to replace * Code calling this method should use [[Behavior$]] `canonicalize` to replace
* the special objects with real Behaviors. * the special objects with real Behaviors.
*/ */
def message(ctx: ActorContext[T], msg: T): Behavior[T] def message(ctx: ActorContext[T], msg: T): Behavior[T]
@ -103,7 +103,7 @@ final case class PostRestart(failure: Throwable) extends Signal
* registered watchers after this signal has been processed. * registered watchers after this signal has been processed.
* *
* <b>IMPORTANT NOTE:</b> if the actor terminated by switching to the * <b>IMPORTANT NOTE:</b> if the actor terminated by switching to the
* [[ScalaDSL$.Stopped]] behavior then this signal will be ignored (i.e. the * `Stopped` behavior then this signal will be ignored (i.e. the
* Stopped behvavior will do nothing in reaction to it). * Stopped behvavior will do nothing in reaction to it).
*/ */
@SerialVersionUID(1L) @SerialVersionUID(1L)
@ -136,7 +136,7 @@ final case object ReceiveTimeout extends Signal
/** /**
* Lifecycle signal that is fired when an Actor that was watched has terminated. * Lifecycle signal that is fired when an Actor that was watched has terminated.
* Watching is performed by invoking the * Watching is performed by invoking the
* [[akka.typed.ActorContext!.watch[U]* watch]] method. The DeathWatch service is * [[akka.typed.ActorContext]] `watch` method. The DeathWatch service is
* idempotent, meaning that registering twice has the same effect as registering * idempotent, meaning that registering twice has the same effect as registering
* once. Registration does not need to happen before the Actor terminates, a * once. Registration does not need to happen before the Actor terminates, a
* notification is guaranteed to arrive after both registration and termination * notification is guaranteed to arrive after both registration and termination
@ -267,7 +267,7 @@ object Behavior {
/** /**
* Given a possibly special behavior (same or unhandled) and a * Given a possibly special behavior (same or unhandled) and a
* current behavior (which defines the meaning of encountering a [[ScalaDSL$.Same]] * current behavior (which defines the meaning of encountering a `Same`
* behavior) this method unwraps the behavior such that the innermost behavior * behavior) this method unwraps the behavior such that the innermost behavior
* is returned, i.e. it removes the decorations. * is returned, i.e. it removes the decorations.
*/ */

View file

@ -101,10 +101,10 @@ object ScalaDSL {
def Ignore[T]: Behavior[T] = ignoreBehavior.asInstanceOf[Behavior[T]] def Ignore[T]: Behavior[T] = ignoreBehavior.asInstanceOf[Behavior[T]]
/** /**
* Algebraic Data Type modeling either a [[ScalaDSL$.Msg message]] or a * Algebraic Data Type modeling either a [[Msg message]] or a
* [[ScalaDSL$.Sig signal]], including the [[ActorContext]]. This type is * [[Sig signal]], including the [[ActorContext]]. This type is
* used by several of the behaviors defined in this DSL, see for example * used by several of the behaviors defined in this DSL, see for example
* [[ScalaDSL$.Full]]. * [[Full]].
*/ */
sealed trait MessageOrSignal[T] sealed trait MessageOrSignal[T]
/** /**
@ -185,7 +185,7 @@ object ScalaDSL {
/** /**
* This type of Behavior is created from a partial function from the declared * This type of Behavior is created from a partial function from the declared
* message type to the next behavior, flagging all unmatched messages as * message type to the next behavior, flagging all unmatched messages as
* [[ScalaDSL$.Unhandled]]. All system signals are * [[Unhandled]]. All system signals are
* ignored by this behavior, which implies that a failure of a child actor * ignored by this behavior, which implies that a failure of a child actor
* will be escalated unconditionally. * will be escalated unconditionally.
* *
@ -226,7 +226,7 @@ object ScalaDSL {
} }
/** /**
* This type of behavior is a variant of [[ScalaDSL$.Total]] that does not * This type of behavior is a variant of [[Total]] that does not
* allow the actor to change behavior. It is an efficient choice for stateless * allow the actor to change behavior. It is an efficient choice for stateless
* actors, possibly entering such a behavior after finishing its * actors, possibly entering such a behavior after finishing its
* initialization (which may be modeled using any of the other behavior types). * initialization (which may be modeled using any of the other behavior types).
@ -250,7 +250,7 @@ object ScalaDSL {
* messages that were sent to the synchronous self reference will be lost. * messages that were sent to the synchronous self reference will be lost.
* *
* This decorator is useful for passing messages between the left and right * This decorator is useful for passing messages between the left and right
* sides of [[ScalaDSL$.And]] and [[ScalaDSL$.Or]] combinators. * sides of [[And]] and [[Or]] combinators.
*/ */
final case class SynchronousSelf[T](f: ActorRef[T] Behavior[T]) extends Behavior[T] { final case class SynchronousSelf[T](f: ActorRef[T] Behavior[T]) extends Behavior[T] {
private val inbox = Inbox.sync[T]("syncbox") private val inbox = Inbox.sync[T]("syncbox")
@ -324,7 +324,7 @@ object ScalaDSL {
* A behavior combinator that feeds incoming messages and signals either into * A behavior combinator that feeds incoming messages and signals either into
* the left or right sub-behavior and allows them to evolve independently of * the left or right sub-behavior and allows them to evolve independently of
* each other. The message or signal is passed first into the left sub-behavior * each other. The message or signal is passed first into the left sub-behavior
* and only if that results in [[ScalaDSL$.Unhandled]] is it passed to the right * and only if that results in [[Unhandled]] is it passed to the right
* sub-behavior. When one of the sub-behaviors terminates the other takes over * sub-behavior. When one of the sub-behaviors terminates the other takes over
* exclusively. When both sub-behaviors respond to a [[Failed]] signal, the * exclusively. When both sub-behaviors respond to a [[Failed]] signal, the
* response with the higher precedence is chosen (see [[Failed$]]). * response with the higher precedence is chosen (see [[Failed$]]).

View file

@ -33,7 +33,7 @@ import scala.util.control.NonFatal
* *
* This way of writing Actors can be very useful when writing Actor-based * This way of writing Actors can be very useful when writing Actor-based
* test procedures for actor systems, hence also the possibility to expect * test procedures for actor systems, hence also the possibility to expect
* failures (see [[StepWise$.Steps!.expectFailure]]). * failures (see [[StepWise.Steps#expectFailure]]).
*/ */
object StepWise { object StepWise {
import Behavior._ import Behavior._
@ -173,3 +173,5 @@ object StepWise {
case Nil Stopped case Nil Stopped
} }
} }
abstract class StepWise

View file

@ -18,7 +18,7 @@ import akka.util.TypedMultiMap
object Receptionist { object Receptionist {
/** /**
* Internal representation of [[Receptionist$.ServiceKey]] which is needed * Internal representation of [[Receptionist.ServiceKey]] which is needed
* in order to use a TypedMultiMap (using keys with a type parameter does not * in order to use a TypedMultiMap (using keys with a type parameter does not
* work in Scala 2.x). * work in Scala 2.x).
*/ */
@ -41,7 +41,7 @@ object Receptionist {
*/ */
sealed trait Command sealed trait Command
/** /**
* Associate the given [[ActorRef]] with the given [[ServiceKey]]. Multiple * Associate the given [[akka.typed.ActorRef]] with the given [[ServiceKey]]. Multiple
* registrations can be made for the same key. Unregistration is implied by * registrations can be made for the same key. Unregistration is implied by
* the end of the referenced Actors lifecycle. * the end of the referenced Actors lifecycle.
*/ */
@ -53,7 +53,7 @@ object Receptionist {
final case class Find[T](key: ServiceKey[T])(val replyTo: ActorRef[Listing[T]]) extends Command final case class Find[T](key: ServiceKey[T])(val replyTo: ActorRef[Listing[T]]) extends Command
/** /**
* Confirmtion that the given [[ActorRef]] has been associated with the [[ServiceKey]]. * Confirmtion that the given [[akka.typed.ActorRef]] has been associated with the [[ServiceKey]].
*/ */
final case class Registered[T](key: ServiceKey[T], address: ActorRef[T]) final case class Registered[T](key: ServiceKey[T], address: ActorRef[T])
/** /**
@ -85,3 +85,5 @@ object Receptionist {
behavior(map valueRemoved ref) behavior(map valueRemoved ref)
} }
} }
abstract class Receptionist

View file

@ -27,9 +27,11 @@ class ReceptionistSpec extends TypedSpec {
val a = Inbox.sync[ServiceA]("a") val a = Inbox.sync[ServiceA]("a")
val r = Inbox.sync[Registered[_]]("r") val r = Inbox.sync[Registered[_]]("r")
ctx.run(Register(ServiceKeyA, a.ref)(r.ref)) ctx.run(Register(ServiceKeyA, a.ref)(r.ref))
ctx.getAllEffects() should be(Effect.Watched(a.ref) :: Nil)
r.receiveMsg() should be(Registered(ServiceKeyA, a.ref)) r.receiveMsg() should be(Registered(ServiceKeyA, a.ref))
val q = Inbox.sync[Listing[ServiceA]]("q") val q = Inbox.sync[Listing[ServiceA]]("q")
ctx.run(Find(ServiceKeyA)(q.ref)) ctx.run(Find(ServiceKeyA)(q.ref))
ctx.getAllEffects() should be(Nil)
q.receiveMsg() should be(Listing(ServiceKeyA, Set(a.ref))) q.receiveMsg() should be(Listing(ServiceKeyA, Set(a.ref)))
assertEmpty(a, r, q) assertEmpty(a, r, q)
} }

View file

@ -317,7 +317,7 @@ object AkkaBuild extends Build {
// -XDignore.symbol.file suppresses sun.misc.Unsafe warnings // -XDignore.symbol.file suppresses sun.misc.Unsafe warnings
javacOptions in compile ++= Seq("-encoding", "UTF-8", "-source", "1.8", "-target", "1.8", "-Xlint:unchecked", "-XDignore.symbol.file"), javacOptions in compile ++= Seq("-encoding", "UTF-8", "-source", "1.8", "-target", "1.8", "-Xlint:unchecked", "-XDignore.symbol.file"),
javacOptions in compile ++= (if (allWarnings) Seq("-Xlint:deprecation") else Nil), javacOptions in compile ++= (if (allWarnings) Seq("-Xlint:deprecation") else Nil),
javacOptions in doc ++= Seq("-encoding", "UTF-8", "-source", "1.8", "-Xdoclint:none"), javacOptions in doc ++= Seq("-encoding", "UTF-8", "-source", "1.8"),
incOptions := incOptions.value.withNameHashing(true), incOptions := incOptions.value.withNameHashing(true),
crossVersion := CrossVersion.binary, crossVersion := CrossVersion.binary,

View file

@ -2,7 +2,7 @@ package akka
import sbt._ import sbt._
import sbtunidoc.Plugin.UnidocKeys._ import sbtunidoc.Plugin.UnidocKeys._
import sbtunidoc.Plugin.{ ScalaUnidoc, JavaUnidoc, scalaJavaUnidocSettings, genjavadocSettings, scalaUnidocSettings } import sbtunidoc.Plugin.{ ScalaUnidoc, JavaUnidoc, scalaJavaUnidocSettings, genjavadocExtraSettings, scalaUnidocSettings }
import sbt.Keys._ import sbt.Keys._
import sbt.File import sbt.File
import scala.annotation.tailrec import scala.annotation.tailrec
@ -22,7 +22,10 @@ object Unidoc {
val genjavadocEnabled = sys.props.get("akka.genjavadoc.enabled").getOrElse("false").toBoolean val genjavadocEnabled = sys.props.get("akka.genjavadoc.enabled").getOrElse("false").toBoolean
val (unidocSettings, javadocSettings) = val (unidocSettings, javadocSettings) =
if (genjavadocEnabled) (scalaJavaUnidocSettings, genjavadocSettings ++ Seq(unidocGenjavadocVersion in Global := "0.8")) if (genjavadocEnabled)
(scalaJavaUnidocSettings, genjavadocExtraSettings ++ Seq(
scalacOptions in Compile += "-P:genjavadoc:fabricateParams=true",
unidocGenjavadocVersion in Global := "0.9-SNAPSHOT"))
else (scalaUnidocSettings, Nil) else (scalaUnidocSettings, Nil)
lazy val scaladocDiagramsEnabled = sys.props.get("akka.scaladoc.diagrams").getOrElse("true").toBoolean lazy val scaladocDiagramsEnabled = sys.props.get("akka.scaladoc.diagrams").getOrElse("true").toBoolean