Fix invalid scaladoc links which cannot be found (#353)

* Add enough package name
* Fix invalid syntax of links to a method
This commit is contained in:
Naoki Yamada 2023-07-06 19:07:36 +09:00 committed by kerr
parent 15f02d696f
commit 201992d984
100 changed files with 251 additions and 250 deletions

View file

@ -134,7 +134,7 @@ abstract class BehaviorTestKit[T] {
/**
* Returns the current behavior as it was returned from processing the previous message.
* For example if [[Behaviors.unhandled]] is returned it will be kept here, but not in
* For example if [[pekko.actor.typed.javadsl.Behaviors.unhandled]] is returned it will be kept here, but not in
* [[currentBehavior]].
*/
def returnedBehavior: Behavior[T]

View file

@ -100,7 +100,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef
/**
* Obtain time remaining for execution of the innermost enclosing `within`
* block or throw an [[AssertionError]] if no `within` block surrounds this
* block or throw an [[java.lang.AssertionError]] if no `within` block surrounds this
* call.
*/
def getRemaining: Duration
@ -143,7 +143,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef
/**
* Receive one message from the test actor and assert that it equals the
* given object. Wait time is bounded by the given duration, with an
* [[AssertionError]] being thrown in case of timeout.
* [[java.lang.AssertionError]] being thrown in case of timeout.
*
* @return the received object
*/
@ -152,7 +152,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef
/**
* Receive one message from the test actor and assert that it equals the
* given object. Wait time is bounded by the given duration, with an
* [[AssertionError]] being thrown in case of timeout.
* [[java.lang.AssertionError]] being thrown in case of timeout.
*
* @return the received object
*/
@ -190,7 +190,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef
/**
* Receive one message of type `M`. Wait time is bounded by the `max` duration,
* with an [[AssertionError]] raised in case of timeout.
* with an [[java.lang.AssertionError]] raised in case of timeout.
*/
def receiveMessage(max: Duration): M
@ -235,7 +235,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef
/**
* Expect the given actor to be stopped or stop within the given timeout or
* throw an [[AssertionError]].
* throw an [[java.lang.AssertionError]].
*
* Note that the timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor".
*/

View file

@ -126,7 +126,7 @@ trait BehaviorTestKit[T] {
/**
* Returns the current behavior as it was returned from processing the previous message.
* For example if [[Behaviors.unhandled]] is returned it will be kept here, but not in
* For example if [[pekko.actor.typed.scaladsl.Behaviors.unhandled]] is returned it will be kept here, but not in
* [[currentBehavior]].
*/
def returnedBehavior: Behavior[T]

View file

@ -86,7 +86,7 @@ object TestProbe {
/**
* Obtain time remaining for execution of the innermost enclosing `within`
* block or throw an [[AssertionError]] if no `within` block surrounds this
* block or throw an [[java.lang.AssertionError]] if no `within` block surrounds this
* call.
*/
def remaining: FiniteDuration
@ -128,7 +128,7 @@ object TestProbe {
/**
* Receive one message from the test actor and assert that it equals the
* given object. Wait time is bounded by the given duration, with an
* [[AssertionError]] being thrown in case of timeout.
* [[java.lang.AssertionError]] being thrown in case of timeout.
*
* @return the received object
*/
@ -137,7 +137,7 @@ object TestProbe {
/**
* Receive one message from the test actor and assert that it equals the
* given object. Wait time is bounded by the given duration, with an
* [[AssertionError]] being thrown in case of timeout.
* [[java.lang.AssertionError]] being thrown in case of timeout.
*
* @return the received object
*/
@ -172,7 +172,7 @@ object TestProbe {
/**
* Receive one message of type `M`. Wait time is bounded by the `max` duration,
* with an [[AssertionError]] raised in case of timeout.
* with an [[java.lang.AssertionError]] raised in case of timeout.
*/
def receiveMessage(max: FiniteDuration): M
@ -225,7 +225,7 @@ object TestProbe {
/**
* Expect the given actor to be stopped or stop within the given timeout or
* throw an [[AssertionError]].
* throw an [[java.lang.AssertionError]].
*/
def expectTerminated[U](actorRef: ActorRef[U], max: FiniteDuration): Unit

View file

@ -49,8 +49,8 @@ trait ActorRef[-T] extends RecipientRef[T] with java.lang.Comparable[ActorRef[_]
/**
* Unsafe utility method for widening the type accepted by this ActorRef;
* provided to avoid having to use `asInstanceOf` on the full reference type,
* which would unfortunately also work on non-ActorRefs. Use it with caution,it may cause a [[ClassCastException]] when you send a message
* to the widened [[ActorRef[U]]].
* which would unfortunately also work on non-ActorRefs. Use it with caution,it may cause a [[java.lang.ClassCastException]] when you send a message
* to the widened [[ActorRef ActorRef[U]]].
*/
def unsafeUpcast[U >: T @uncheckedVariance]: ActorRef[U]

View file

@ -64,7 +64,7 @@ abstract class Behavior[T](private[pekko] val _tag: Int) { behavior =>
* provided as an alternative to the universally available `asInstanceOf`, which
* casts the entire type rather than just the type parameter.
* Typically used to upcast a type, for instance from `Nothing` to some type `U`.
* Use it with caution, it may lead to a [[ClassCastException]] when you send a message
* Use it with caution, it may lead to a [[java.lang.ClassCastException]] when you send a message
* to the resulting [[Behavior[U]]].
*/
@InternalApi private[pekko] final def unsafeCast[U]: Behavior[U] = this.asInstanceOf[Behavior[U]]

View file

@ -34,7 +34,7 @@ trait Scheduler {
* Scala API: Schedules a Runnable to be run once with a delay, i.e. a time period that
* has to pass before the runnable is executed.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* reach (calculated as: `delay / tickNanos > Int.MaxValue`).
*
* Note: For scheduling within actors `Behaviors.withTimers` or `ActorContext.scheduleOnce` should be preferred.
@ -45,7 +45,7 @@ trait Scheduler {
* Java API: Schedules a Runnable to be run once with a delay, i.e. a time period that
* has to pass before the runnable is executed.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* reach (calculated as: `delay / tickNanos > Int.MaxValue`).
*
* Note: For scheduling within actors `Behaviors.withTimers` or `ActorContext.scheduleOnce` should be preferred.
@ -67,7 +67,7 @@ trait Scheduler {
* If the `Runnable` throws an exception the repeated scheduling is aborted,
* i.e. the function will not be invoked any more.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* reach (calculated as: `delay / tickNanos > Int.MaxValue`).
*
* Note: For scheduling within actors `Behaviors.withTimers` should be preferred.
@ -91,7 +91,7 @@ trait Scheduler {
* If the `Runnable` throws an exception the repeated scheduling is aborted,
* i.e. the function will not be invoked any more.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* reach (calculated as: `delay / tickNanos > Int.MaxValue`).
*
* Note: For scheduling in actors `Behaviors.withTimers` should be preferred.
@ -128,7 +128,7 @@ trait Scheduler {
* If the `Runnable` throws an exception the repeated scheduling is aborted,
* i.e. the function will not be invoked any more.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* reach (calculated as: `delay / tickNanos > Int.MaxValue`).
*
* Note: For scheduling within actors `Behaviors.withTimers` should be preferred.
@ -162,7 +162,7 @@ trait Scheduler {
* If the `Runnable` throws an exception the repeated scheduling is aborted,
* i.e. the function will not be invoked any more.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* reach (calculated as: `delay / tickNanos > Int.MaxValue`).
*
* Note: For scheduling in actors `Behaviors.withTimers` should be preferred.

View file

@ -45,7 +45,7 @@ import pekko.util.OptionConverters._
* messages may be routed to two different workers and processed independent of each other.
*
* A worker actor (consumer) and its `ConsumerController` is dynamically registered to the
* `WorkPullingProducerController` via a [[ServiceKey]]. It will register itself to the
* `WorkPullingProducerController` via a [[pekko.actor.typed.receptionist.ServiceKey]]. It will register itself to the
* * [[pekko.actor.typed.receptionist.Receptionist]], and the `WorkPullingProducerController`
* subscribes to the same key to find active workers. In this way workers can be dynamically
* added or removed from any node in the cluster.

View file

@ -173,7 +173,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi
* *Warning*: This method is not thread-safe and must not be accessed from threads other
* than the ordinary actor message processing thread, such as [[java.util.concurrent.CompletionStage]] callbacks.
*
* @throws IllegalArgumentException if the given actor ref is not a direct child of this actor
* @throws java.lang.IllegalArgumentException if the given actor ref is not a direct child of this actorc
*/
def stop[U](child: ActorRef[U]): Unit
@ -184,7 +184,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi
*
* `watch` is idempotent if it is not mixed with `watchWith`.
*
* It will fail with an [[IllegalStateException]] if the same subject was watched before using `watchWith`.
* It will fail with an [[java.lang.IllegalStateException]] if the same subject was watched before using `watchWith`.
* To clear the termination message, unwatch first.
*
* *Warning*: This method is not thread-safe and must not be accessed from threads other
@ -199,7 +199,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi
*
* `watchWith` is idempotent if it is called with the same `msg` and not mixed with `watch`.
*
* It will fail with an [[IllegalStateException]] if the same subject was watched before using `watch` or `watchWith` with
* It will fail with an [[java.lang.IllegalStateException]] if the same subject was watched before using `watch` or `watchWith` with
* another termination message. To change the termination message, unwatch first.
*
* *Warning*: This method is not thread-safe and must not be accessed from threads other

View file

@ -78,7 +78,7 @@ import java.util.function.{ Function => JFunction, Predicate }
* Return the first element of the message buffer without removing it.
*
* @return the first element or throws `NoSuchElementException` if the buffer is empty
* @throws `NoSuchElementException` if the buffer is empty
* @throws java.util.NoSuchElementException if the buffer is empty
*/
def head: T

View file

@ -156,7 +156,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi
* *Warning*: This method is not thread-safe and must not be accessed from threads other
* than the ordinary actor message processing thread, such as [[scala.concurrent.Future]] callbacks.
*
* @throws IllegalArgumentException if the given actor ref is not a direct child of this actor
* @throws java.lang.IllegalArgumentException if the given actor ref is not a direct child of this actor
*/
def stop[U](child: ActorRef[U]): Unit
@ -167,7 +167,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi
*
* `watch` is idempotent if it is not mixed with `watchWith`.
*
* It will fail with an [[IllegalStateException]] if the same subject was watched before using `watchWith`.
* It will fail with an [[java.lang.IllegalStateException]] if the same subject was watched before using `watchWith`.
* To clear the termination message, unwatch first.
*
* *Warning*: This method is not thread-safe and must not be accessed from threads other
@ -182,7 +182,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi
*
* `watchWith` is idempotent if it is called with the same `msg` and not mixed with `watch`.
*
* It will fail with an [[IllegalStateException]] if the same subject was watched before using `watch` or `watchWith` with
* It will fail with an [[java.lang.IllegalStateException]] if the same subject was watched before using `watch` or `watchWith` with
* another termination message. To change the termination message, unwatch first.
*
* *Warning*: This method is not thread-safe and must not be accessed from threads other

View file

@ -90,7 +90,7 @@ import pekko.annotation.{ DoNotInherit, InternalApi }
* Return the first element of the message buffer without removing it.
*
* @return the first element or throws `NoSuchElementException` if the buffer is empty
* @throws `NoSuchElementException` if the buffer is empty
* @throws java.util.NoSuchElementException if the buffer is empty
*/
def head: T

View file

@ -16,7 +16,7 @@ package org.apache.pekko.japi.pf;
import static org.apache.pekko.actor.SupervisorStrategy.Directive;
/**
* Used for building a partial function for {@link org.apache.pekko.actor.Actor#supervisorStrategy()
* Used for building a partial function for {@link org.apache.pekko.actor.Actor#supervisorStrategy
* Actor.supervisorStrategy()}. * Inside an actor you can use it like this with Java 8 to define
* your supervisorStrategy.
*

View file

@ -28,7 +28,7 @@ public class FSMStopBuilder<S, D> {
private UnitPFBuilder<FSM.StopEvent<S, D>> builder = new UnitPFBuilder<FSM.StopEvent<S, D>>();
/**
* Add a case statement that matches on an {@link FSM.Reason}.
* Add a case statement that matches on an {@link org.apache.pekko.actor.FSM.Reason}.
*
* @param reason the reason for the termination
* @param apply an action to apply to the event and state data if there is a match

View file

@ -126,7 +126,7 @@ public class Match<I, R> extends AbstractMatch<I, R> {
*
* @param i the argument to apply the match to
* @return the result of the application
* @throws MatchError if there is no match
* @throws scala.MatchError if there is no match
*/
public R match(I i) throws MatchError {
return statements.apply(i);

View file

@ -19,7 +19,7 @@ import scala.runtime.BoxedUnit;
/**
* Used for building a partial function for {@link
* org.apache.pekko.actor.AbstractActor#createReceive() AbstractActor.createReceive()}.
* org.apache.pekko.actor.AbstractActor#createReceive AbstractActor.createReceive()}.
*
* <p>There is both a match on type only, and a match on type and predicate.
*

View file

@ -127,8 +127,8 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
}
/**
* Set initial state. Call this method from the constructor before the [[#initialize]] method.
* If different state is needed after a restart this method, followed by [[#initialize]], can
* Set initial state. Call this method from the constructor before the [[initialize]] method.
* If different state is needed after a restart this method, followed by [[initialize]], can
* be used in the actor life cycle hooks [[pekko.actor.Actor#preStart]] and [[pekko.actor.Actor#postRestart]].
*
* @param stateName initial state designator
@ -138,8 +138,8 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
startWith(stateName, stateData, null: FiniteDuration)
/**
* Set initial state. Call this method from the constructor before the [[#initialize]] method.
* If different state is needed after a restart this method, followed by [[#initialize]], can
* Set initial state. Call this method from the constructor before the [[initialize]] method.
* If different state is needed after a restart this method, followed by [[initialize]], can
* be used in the actor life cycle hooks [[pekko.actor.Actor#preStart]] and [[pekko.actor.Actor#postRestart]].
*
* @param stateName initial state designator
@ -150,8 +150,8 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
super.startWith(stateName, stateData, Option(timeout))
/**
* Set initial state. Call this method from the constructor before the [[#initialize]] method.
* If different state is needed after a restart this method, followed by [[#initialize]], can
* Set initial state. Call this method from the constructor before the [[initialize]] method.
* If different state is needed after a restart this method, followed by [[initialize]], can
* be used in the actor life cycle hooks [[pekko.actor.Actor#preStart]] and [[pekko.actor.Actor#postRestart]].
*
* @param stateName initial state designator

View file

@ -201,7 +201,7 @@ trait ActorContext extends ActorRefFactory with ClassicActorContextProvider {
*
* `watch` is idempotent if it is not mixed with `watchWith`.
*
* It will fail with an [[IllegalStateException]] if the same subject was watched before using `watchWith`.
* It will fail with an [[java.lang.IllegalStateException]] if the same subject was watched before using `watchWith`.
* To clear the termination message, unwatch first.
*
* *Warning*: This method is not thread-safe and must not be accessed from threads other
@ -218,7 +218,7 @@ trait ActorContext extends ActorRefFactory with ClassicActorContextProvider {
*
* `watchWith` is idempotent if it is called with the same `msg` and not mixed with `watch`.
*
* It will fail with an [[IllegalStateException]] if the same subject was watched before using `watch` or `watchWith` with
* It will fail with an [[java.lang.IllegalStateException]] if the same subject was watched before using `watch` or `watchWith` with
* another termination message. To change the termination message, unwatch first.
*
* *Warning*: This method is not thread-safe and must not be accessed from threads other

View file

@ -224,7 +224,7 @@ trait ActorRefFactory {
*
* @throws pekko.ConfigurationException if deployment, dispatcher
* or mailbox configuration is wrong
* @throws UnsupportedOperationException if invoked on an ActorSystem that
* @throws java.lang.UnsupportedOperationException if invoked on an ActorSystem that
* uses a custom user guardian
*/
def actorOf(props: Props): ActorRef
@ -240,7 +240,7 @@ trait ActorRefFactory {
* invalid or already in use
* @throws pekko.ConfigurationException if deployment, dispatcher
* or mailbox configuration is wrong
* @throws UnsupportedOperationException if invoked on an ActorSystem that
* @throws java.lang.UnsupportedOperationException if invoked on an ActorSystem that
* uses a custom user guardian
*/
def actorOf(props: Props, name: String): ActorRef

View file

@ -476,8 +476,8 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging {
register(stateName, stateFunction, Option(stateTimeout))
/**
* Set initial state. Call this method from the constructor before the [[#initialize]] method.
* If different state is needed after a restart this method, followed by [[#initialize]], can
* Set initial state. Call this method from the constructor before the [[initialize]] method.
* If different state is needed after a restart this method, followed by [[initialize]], can
* be used in the actor life cycle hooks [[pekko.actor.Actor#preStart]] and [[pekko.actor.Actor#postRestart]].
*
* @param stateName initial state designator
@ -941,7 +941,7 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging {
}
/**
* By default [[FSM.Failure]] is logged at error level and other reason
* By default [[pekko.actor.FSM.Failure]] is logged at error level and other reason
* types are not logged. It is possible to override this behavior.
*/
protected def logTermination(reason: Reason): Unit = reason match {

View file

@ -75,7 +75,7 @@ trait Scheduler {
* If the `Runnable` throws an exception the repeated scheduling is aborted,
* i.e. the function will not be invoked any more.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* reach (calculated as: `delay / tickNanos > Int.MaxValue`).
*
* Note: For scheduling within actors `with Timers` should be preferred.
@ -116,7 +116,7 @@ trait Scheduler {
* If the `Runnable` throws an exception the repeated scheduling is aborted,
* i.e. the function will not be invoked any more.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* reach (calculated as: `delay / tickNanos > Int.MaxValue`).
*
* Note: For scheduling within actors `AbstractActorWithTimers` should be preferred.
@ -215,7 +215,7 @@ trait Scheduler {
* If the `Runnable` throws an exception the repeated scheduling is aborted,
* i.e. the function will not be invoked any more.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* reach (calculated as: `delay / tickNanos > Int.MaxValue`).
*
* Note: For scheduling within actors `with Timers` should be preferred.
@ -251,7 +251,7 @@ trait Scheduler {
* If the `Runnable` throws an exception the repeated scheduling is aborted,
* i.e. the function will not be invoked any more.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* reach (calculated as: `delay / tickNanos > Int.MaxValue`).
*
* Note: For scheduling within actors `AbstractActorWithTimers` should be preferred.
@ -415,7 +415,7 @@ trait Scheduler {
* Scala API: Schedules a message to be sent once with a delay, i.e. a time period that has
* to pass before the message is sent.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* reach (calculated as: `delay / tickNanos > Int.MaxValue`).
*
* Note: For scheduling within actors `with Timers` should be preferred.
@ -433,7 +433,7 @@ trait Scheduler {
* Java API: Schedules a message to be sent once with a delay, i.e. a time period that has
* to pass before the message is sent.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* reach (calculated as: `delay / tickNanos > Int.MaxValue`).
*
* Note: For scheduling within actors `AbstractActorWithTimers` should be preferred.
@ -452,7 +452,7 @@ trait Scheduler {
* Scala API: Schedules a function to be run once with a delay, i.e. a time period that has
* to pass before the function is run.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* reach (calculated as: `delay / tickNanos > Int.MaxValue`).
*
* Note: For scheduling within actors `with Timers` should be preferred.
@ -466,7 +466,7 @@ trait Scheduler {
* Scala API: Schedules a Runnable to be run once with a delay, i.e. a time period that
* has to pass before the runnable is executed.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* reach (calculated as: `delay / tickNanos > Int.MaxValue`).
*
* Note: For scheduling within actors `with Timers` should be preferred.
@ -477,7 +477,7 @@ trait Scheduler {
* Java API: Schedules a Runnable to be run once with a delay, i.e. a time period that
* has to pass before the runnable is executed.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* reach (calculated as: `delay / tickNanos > Int.MaxValue`).
*
* Note: For scheduling within actors `AbstractActorWithTimers` should be preferred.

View file

@ -176,8 +176,8 @@ private[pekko] trait StashSupport {
* Adds the current message (the message that the actor received last) to the
* actor's stash.
*
* @throws StashOverflowException in case of a stash capacity violation
* @throws IllegalStateException if the same message is stashed more than once
* @throws pekko.actor.StashOverflowException in case of a stash capacity violation
* @throws java.lang.IllegalStateException if the same message is stashed more than once
*/
def stash(): Unit = {
val currMsg = actorCell.currentMessage

View file

@ -23,7 +23,7 @@ import pekko.util.unused
private[pekko] trait DeathWatch { this: ActorCell =>
/**
* This map holds a [[None]] for actors for which we send a [[Terminated]] notification on termination,
* This map holds a [[scala.None]] for actors for which we send a [[Terminated]] notification on termination,
* ``Some(message)`` for actors for which we send a custom termination message.
*/
private var watching: Map[ActorRef, Option[Any]] = Map.empty

View file

@ -23,15 +23,15 @@ import pekko.annotation.InternalApi
import pekko.util.OptionConverters._
/**
* Marker supertype for a setup part that can be put inside [[ActorSystemSetup]], if a specific concrete setup
* Marker supertype for a setup part that can be put inside [[pekko.actor.setup.ActorSystemSetup]], if a specific concrete setup
* is not specified in the actor system setup that means defaults are used (usually from the config file) - no concrete
* setup instance should be mandatory in the [[ActorSystemSetup]] that an actor system is created with.
* setup instance should be mandatory in the [[pekko.actor.setup.ActorSystemSetup]] that an actor system is created with.
*/
abstract class Setup {
/**
* Construct an [[ActorSystemSetup]] with this setup combined with another one. Allows for
* fluent creation of settings. If `other` is a setting of the same concrete [[Setup]] as this
* Construct an [[pekko.actor.setup.ActorSystemSetup]] with this setup combined with another one. Allows for
* fluent creation of settings. If `other` is a setting of the same concrete [[pekko.actor.setup.Setup]] as this
* it will replace this.
*/
final def and(other: Setup): ActorSystemSetup = ActorSystemSetup(this, other)
@ -43,13 +43,13 @@ object ActorSystemSetup {
val empty = new ActorSystemSetup(Map.empty)
/**
* Scala API: Create an [[ActorSystemSetup]] containing all the provided settings
* Scala API: Create an [[pekko.actor.setup.ActorSystemSetup]] containing all the provided settings
*/
def apply(settings: Setup*): ActorSystemSetup =
new ActorSystemSetup(settings.map(s => s.getClass -> s).toMap)
/**
* Java API: Create an [[ActorSystemSetup]] containing all the provided settings
* Java API: Create an [[pekko.actor.setup.ActorSystemSetup]] containing all the provided settings
*/
@varargs
def create(settings: Setup*): ActorSystemSetup = apply(settings: _*)
@ -58,20 +58,20 @@ object ActorSystemSetup {
/**
* A set of setup settings for programmatic configuration of the actor system.
*
* Constructor is *Internal API*. Use the factory methods [[ActorSystemSetup#create]] and [[pekko.actor.Actor#apply]] to create
* Constructor is *Internal API*. Use the factory methods [[ActorSystemSetup#create]] and [[ActorSystemSetup#apply]] to create
* instances.
*/
final class ActorSystemSetup private[pekko] (@InternalApi private[pekko] val setups: Map[Class[_], AnyRef]) {
/**
* Java API: Extract a concrete [[Setup]] of type `T` if it is defined in the settings.
* Java API: Extract a concrete [[pekko.actor.setup.Setup]] of type `T` if it is defined in the settings.
*/
def get[T <: Setup](clazz: Class[T]): Optional[T] = {
setups.get(clazz).map(_.asInstanceOf[T]).toJava
}
/**
* Scala API: Extract a concrete [[Setup]] of type `T` if it is defined in the settings.
* Scala API: Extract a concrete [[pekko.actor.setup.Setup]] of type `T` if it is defined in the settings.
*/
def get[T <: Setup: ClassTag]: Option[T] = {
val clazz = implicitly[ClassTag[T]].runtimeClass
@ -79,7 +79,7 @@ final class ActorSystemSetup private[pekko] (@InternalApi private[pekko] val set
}
/**
* Add a concrete [[Setup]]. If a setting of the same concrete [[Setup]] already is
* Add a concrete [[pekko.actor.setup.Setup]]. If a setting of the same concrete [[pekko.actor.setup.Setup]] already is
* present it will be replaced.
*/
def withSetup[T <: Setup](t: T): ActorSystemSetup = {
@ -88,7 +88,7 @@ final class ActorSystemSetup private[pekko] (@InternalApi private[pekko] val set
/**
* alias for `withSetup` allowing for fluent combination of settings: `a and b and c`, where `a`, `b` and `c` are
* concrete [[Setup]] instances. If a setting of the same concrete [[Setup]] already is
* concrete [[pekko.actor.setup.Setup]] instances. If a setting of the same concrete [[pekko.actor.setup.Setup]] already is
* present it will be replaced.
*/
def and[T <: Setup](t: T): ActorSystemSetup = withSetup(t)

View file

@ -384,7 +384,7 @@ trait QueueSelector {
/**
* Must be deterministicreturn the same value for the same input.
* @return given a `Runnable` a number between 0 .. `queues` (exclusive)
* @throws NullPointerException when `command` is `null`
* @throws java.lang.NullPointerException when `command` is `null`
*/
def getQueue(command: Runnable, queues: Int): Int
}

View file

@ -57,7 +57,7 @@ object DnsProtocol {
def srvRequestType(): RequestType = Srv
/**
* Sending this to the [[AsyncDnsManager]] will either lead to a [[Resolved]] or a [[pekko.actor.Status.Failure]] response.
* Sending this to the [[internal.AsyncDnsManager]] will either lead to a [[Resolved]] or a [[pekko.actor.Status.Failure]] response.
* If request type are both, both resolutions must succeed or the response is a failure.
*/
final case class Resolve(name: String, requestType: RequestType) extends ConsistentHashable {
@ -108,7 +108,8 @@ object DnsProtocol {
/**
* Return the host, taking into account the "java.net.preferIPv6Addresses" system property.
* @throws UnknownHostException
*
* @throws java.net.UnknownHostException
*/
@throws[UnknownHostException]
def address(): InetAddress = _address match {

View file

@ -623,7 +623,7 @@ object Patterns {
* Return an empty [[Optional]] instance for no delay.
* A scheduler (eg context.system.scheduler) must be provided to delay each retry.
* You could provide a function to generate the next delay duration after first attempt,
* this function should never return `null`, otherwise an [[IllegalArgumentException]] will be through.
* this function should never return `null`, otherwise an [[java.lang.IllegalArgumentException]] will be through.
*
* If attempts are exhausted the returned future is simply the result of invoking attempt.
* Note that the attempt function will be invoked on the given execution context for subsequent tries and

View file

@ -123,10 +123,10 @@ trait RetrySupport {
* Given a function from Unit to Future, returns an internally retrying Future.
* The first attempt will be made immediately, each subsequent attempt will be made after
* the 'delay' return by `delayFunction`(the input next attempt count start from 1).
* Returns [[None]] for no delay.
* Returns [[scala.None]] for no delay.
* A scheduler (eg context.system.scheduler) must be provided to delay each retry.
* You could provide a function to generate the next delay duration after first attempt,
* this function should never return `null`, otherwise an [[IllegalArgumentException]] will be through.
* this function should never return `null`, otherwise an [[java.lang.IllegalArgumentException]] will be through.
*
* If attempts are exhausted the returned future is simply the result of invoking attempt.
* Note that the attempt function will be invoked on the given execution context for subsequent

View file

@ -92,7 +92,7 @@ object StatusReply {
def error[T](errorMessage: String): StatusReply[T] = Error(errorMessage)
/**
* Java API: Create an error response with a user defined [[Throwable]].
* Java API: Create an error response with a user defined [[java.lang.Throwable]].
*
* Prefer the string based error response over this one when possible to avoid tightly coupled logic across
* actors and passing internal failure details on to callers that can not do much to handle them.
@ -150,7 +150,7 @@ object StatusReply {
def apply[T](errorMessage: String): StatusReply[T] = error(new ErrorMessage(errorMessage))
/**
* Scala API: Create an error response with a user defined [[Throwable]].
* Scala API: Create an error response with a user defined [[java.lang.Throwable]].
*
* Prefer the string based error response over this one when possible to avoid tightly coupled logic across
* actors and passing internal failure details on to callers that can not do much to handle them.

View file

@ -35,7 +35,7 @@ trait RoutingLogic extends NoSerializationVerificationNeeded {
/**
* Pick the destination for a given message. Normally it picks one of the
* passed `routees`, but in the end it is up to the implementation to
* return whatever [[Routee]] to use for sending a specific message.
* return whatever [[pekko.routing.Routee]] to use for sending a specific message.
*
* When implemented from Java it can be good to know that
* `routees.apply(index)` can be used to get an element

View file

@ -128,7 +128,7 @@ object Serialization {
* local actor refs, or if serializer library e.g. custom serializer/deserializer
* in Jackson need access to the current `ActorSystem`.
*
* @throws IllegalStateException if the information was not set
* @throws java.lang.IllegalStateException if the information was not set
*/
def getCurrentTransportInformation(): Information = {
Serialization.currentTransportInformation.value match {
@ -541,7 +541,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension {
}
/**
* @throws `NoSuchElementException` if no serializer with given `id`
* @throws java.util.NoSuchElementException if no serializer with given `id`
*/
private def getSerializerById(id: Int): Serializer = {
if (0 <= id && id < quickSerializerByIdentity.length) {

View file

@ -249,7 +249,7 @@ trait BaseSerializer extends Serializer {
/**
* Globally unique serialization identifier configured in the `reference.conf`.
*
* See [[Serializer.identifier]].
* See [[pekko.serialization.Serializer.identifier]].
*/
override val identifier: Int = identifierFromConfig

View file

@ -34,7 +34,7 @@ object ReplicatedEntityProvider {
/**
* Java API:
*
* Provides full control over the [[ReplicatedEntity]] and the [[Entity]]
* Provides full control over the [[ReplicatedEntity]] and the [[javadsl.Entity]]
* Most use cases can use the [[createPerDataCenter]] and [[createPerRole]]
*
* @tparam M The type of messages the replicated entity accepts
@ -53,10 +53,10 @@ object ReplicatedEntityProvider {
/**
* Scala API:
*
* Provides full control over the [[ReplicatedEntity]] and the [[Entity]]
* Provides full control over the [[ReplicatedEntity]] and the [[scaladsl.Entity]]
* Most use cases can use the [[perDataCenter]] and [[perRole]]
*
* @param typeName The type name used in the [[EntityTypeKey]]
* @param typeName The type name used in the [[scaladsl.EntityTypeKey]]
* @tparam M The type of messages the replicated entity accepts
*/
def apply[M: ClassTag](typeName: String, allReplicaIds: Set[ReplicaId])(
@ -75,7 +75,7 @@ object ReplicatedEntityProvider {
/**
* Scala API
*
* Create a [[ReplicatedEntityProvider]] that uses the defaults for [[Entity]] when running in
* Create a [[ReplicatedEntityProvider]] that uses the defaults for [[scaladsl.Entity]] when running in
* ClusterSharding. A replica will be run per data center.
*/
def perDataCenter[M: ClassTag, E](typeName: String, allReplicaIds: Set[ReplicaId])(
@ -91,7 +91,7 @@ object ReplicatedEntityProvider {
/**
* Scala API
*
* Create a [[ReplicatedEntityProvider]] that uses the defaults for [[Entity]] when running in
* Create a [[ReplicatedEntityProvider]] that uses the defaults for [[scaladsl.Entity]] when running in
* ClusterSharding. The replicas in allReplicaIds should be roles used by nodes. A replica for each
* entity will run on each role.
*/
@ -108,7 +108,7 @@ object ReplicatedEntityProvider {
/**
* Java API
*
* Create a [[ReplicatedEntityProvider]] that uses the defaults for [[Entity]] when running in
* Create a [[ReplicatedEntityProvider]] that uses the defaults for [[scaladsl.Entity]] when running in
* ClusterSharding. A replica will be run per data center.
*/
def createPerDataCenter[M](
@ -128,7 +128,7 @@ object ReplicatedEntityProvider {
/**
* Java API
*
* Create a [[ReplicatedEntityProvider]] that uses the defaults for [[Entity]] when running in
* Create a [[ReplicatedEntityProvider]] that uses the defaults for [[scaladsl.Entity]] when running in
* ClusterSharding.
*
* Map replicas to roles and then there will be a replica per role e.g. to match to availability zones/racks

View file

@ -118,7 +118,7 @@ abstract class HashCodeNoEnvelopeMessageExtractor[M](val numberOfShards: Int) ex
*
* @param entityId The business domain identifier of the entity.
* @param message The message to be send to the entity.
* @throws `InvalidMessageException` if message is null.
* @throws pekko.actor.InvalidMessageException if message is null.
*/
final case class ShardingEnvelope[M](entityId: String, message: M) extends WrappedMessage {
if (message == null) throw InvalidMessageException("[null] is not an allowed message")

View file

@ -33,13 +33,13 @@ import pekko.cluster.sharding.typed.delivery.internal.ShardingConsumerController
* `ShardingConsumerController` is the entity that is initialized in `ClusterSharding`. It will manage
* the lifecycle and message delivery to the destination consumer actor.
*
* The destination consumer actor will start the flow by sending an initial [[ConsumerController.Start]]
* The destination consumer actor will start the flow by sending an initial [[pekko.actor.typed.delivery.ConsumerController.Start]]
* message via the `ActorRef` provided in the factory function of the consumer `Behavior`.
* The `ActorRef` in the `Start` message is typically constructed as a message adapter to map the
* [[ConsumerController.Delivery]] to the protocol of the consumer actor.
* [[pekko.actor.typed.delivery.ConsumerController.Delivery]] to the protocol of the consumer actor.
*
* Received messages from the producer are wrapped in [[ConsumerController.Delivery]] when sent to the consumer,
* which is supposed to reply with [[ConsumerController.Confirmed]] when it has processed the message.
* Received messages from the producer are wrapped in [[pekko.actor.typed.delivery.ConsumerController.Delivery]] when sent to the consumer,
* which is supposed to reply with [[pekko.actor.typed.delivery.ConsumerController.Confirmed]] when it has processed the message.
* Next message from a specific producer is not delivered until the previous is confirmed. However, since
* there can be several producers, e.g. one per node, sending to the same destination entity there can be
* several `Delivery` in flight at the same time.

View file

@ -76,7 +76,7 @@ import pekko.util.OptionConverters._
* Until sent messages have been confirmed the `ShardingProducerController` keeps them in memory to be able to
* resend them. If the JVM of the `ShardingProducerController` crashes those unconfirmed messages are lost.
* To make sure the messages can be delivered also in that scenario the `ShardingProducerController` can be
* used with a [[DurableProducerQueue]]. Then the unconfirmed messages are stored in a durable way so
* used with a [[pekko.actor.typed.delivery.DurableProducerQueue]]. Then the unconfirmed messages are stored in a durable way so
* that they can be redelivered when the producer is started again. An implementation of the
* `DurableProducerQueue` is provided by `EventSourcedProducerQueue` in `pekko-persistence-typed`.
*

View file

@ -439,13 +439,13 @@ object EntityTypeKey {
/**
* A reference to an sharded Entity, which allows `ActorRef`-like usage.
*
* An [[EntityRef]] is NOT an [[ActorRef]]by designin order to be explicit about the fact that the life-cycle
* An [[EntityRef]] is NOT an [[pekko.actor.typed.ActorRef ActorRef]]by designin order to be explicit about the fact that the life-cycle
* of a sharded Entity is very different than a plain Actor. Most notably, this is shown by features of Entities
* such as re-balancing (an active Entity to a different node) or passivation. Both of which are aimed to be completely
* transparent to users of such Entity. In other words, if this were to be a plain ActorRef, it would be possible to
* apply DeathWatch to it, which in turn would then trigger when the sharded Actor stopped, breaking the illusion that
* Entity refs are "always there". Please note that while not encouraged, it is possible to expose an Actor's `self`
* [[ActorRef]] and watch it in case such notification is desired.
* [[pekko.actor.typed.ActorRef ActorRef]] and watch it in case such notification is desired.
*
* Not for user extension.
*/

View file

@ -416,13 +416,13 @@ object EntityTypeKey {
/**
* A reference to an sharded Entity, which allows `ActorRef`-like usage.
*
* An [[EntityRef]] is NOT an [[ActorRef]]by designin order to be explicit about the fact that the life-cycle
* An [[EntityRef]] is NOT an [[pekko.actor.typed.ActorRef ActorRef]]by designin order to be explicit about the fact that the life-cycle
* of a sharded Entity is very different than a plain Actors. Most notably, this is shown by features of Entities
* such as re-balancing (an active Entity to a different node) or passivation. Both of which are aimed to be completely
* transparent to users of such Entity. In other words, if this were to be a plain ActorRef, it would be possible to
* apply DeathWatch to it, which in turn would then trigger when the sharded Actor stopped, breaking the illusion that
* Entity refs are "always there". Please note that while not encouraged, it is possible to expose an Actor's `self`
* [[ActorRef]] and watch it in case such notification is desired.
* [[pekko.actor.typed.ActorRef ActorRef]] and watch it in case such notification is desired.
* Not for user extension.
*/
@DoNotInherit trait EntityRef[-M] extends RecipientRef[M] { this: InternalRecipientRef[M] =>
@ -544,7 +544,7 @@ object ClusterShardingSetup {
}
/**
* Can be used in [[pekko.actor.setup.ActorSystemSetup]] when starting the [[ActorSystem]]
* Can be used in [[pekko.actor.setup.ActorSystemSetup]] when starting the [[pekko.actor.typed.ActorSystem]]
* to replace the default implementation of the [[ClusterSharding]] extension. Intended
* for tests that need to replace extension with stub/mock implementations.
*/

View file

@ -20,7 +20,7 @@ import pekko.cluster.sharding.typed.javadsl.EntityRef
import pekko.cluster.sharding.typed.javadsl.EntityTypeKey
/**
* For testing purposes this `EntityRef` can be used in place of a real [[EntityRef]].
* For testing purposes this `EntityRef` can be used in place of a real [[pekko.cluster.sharding.typed.javadsl.EntityRef]].
* It forwards all messages to the `probe`.
*/
object TestEntityRef {

View file

@ -20,7 +20,7 @@ import pekko.cluster.sharding.typed.scaladsl.EntityRef
import pekko.cluster.sharding.typed.scaladsl.EntityTypeKey
/**
* For testing purposes this `EntityRef` can be used in place of a real [[EntityRef]].
* For testing purposes this `EntityRef` can be used in place of a real [[pekko.cluster.sharding.typed.scaladsl.EntityRef]].
* It forwards all messages to the `probe`.
*/
object TestEntityRef {

View file

@ -304,7 +304,7 @@ object ShardRegion {
* Intended for testing purpose to see when cluster sharding is "ready" or to monitor
* the state of the shard regions.
*
* For the statistics for the entire cluster, see [[GetClusterShardingStats$]].
* For the statistics for the entire cluster, see [[GetClusterShardingStats]].
*/
@SerialVersionUID(1L) case object GetShardRegionStats extends ShardRegionQuery with ClusterShardingSerializable

View file

@ -31,7 +31,7 @@ import pekko.cluster.sharding.external.ShardLocations
trait ExternalShardAllocationClient {
/**
* Update the given shard's location. The [[Address]] should
* Update the given shard's location. The [[pekko.actor.Address]] should
* match one of the nodes in the cluster. If the node has not joined
* the cluster yet it will be moved to that node after the first cluster
* sharding rebalance.
@ -44,7 +44,7 @@ trait ExternalShardAllocationClient {
/**
* Update all of the provided ShardLocations.
* The [[Address]] should match one of the nodes in the cluster. If the node has not joined
* The [[pekko.actor.Address]] should match one of the nodes in the cluster. If the node has not joined
* the cluster yet it will be moved to that node after the first cluster
* sharding rebalance it does.
*

View file

@ -31,7 +31,7 @@ import pekko.cluster.sharding.external.ShardLocations
trait ExternalShardAllocationClient {
/**
* Update the given shard's location. The [[Address]] should
* Update the given shard's location. The [[pekko.actor.Address]] should
* match one of the nodes in the cluster. If the node has not joined
* the cluster yet it will be moved to that node after the first cluster
* sharding rebalance it does.
@ -44,7 +44,7 @@ trait ExternalShardAllocationClient {
/**
* Update all of the provided ShardLocations.
* The [[Address]] should match one of the nodes in the cluster. If the node has not joined
* The [[pekko.actor.Address]] should match one of the nodes in the cluster. If the node has not joined
* the cluster yet it will be moved to that node after the first cluster
* sharding rebalance it does.
*

View file

@ -114,7 +114,7 @@ object DistributedDataSetup {
}
/**
* Can be used in [[pekko.actor.setup.ActorSystemSetup]] when starting the [[ActorSystem]]
* Can be used in [[pekko.actor.setup.ActorSystemSetup]] when starting the [[pekko.actor.typed.ActorSystem]]
* to replace the default implementation of the [[DistributedData]] extension. Intended
* for tests that need to replace extension with stub/mock implementations.
*/

View file

@ -47,7 +47,7 @@ import pekko.util.Timeout
* than the ordinary actor message processing thread, such as [[java.util.concurrent.CompletionStage]]
* callbacks. It must not be shared between several actor instances.
*
* @param context The [[ActorContext]] of the requesting actor. The `ReplicatorMessageAdapter` can
* @param context The [[pekko.actor.typed.javadsl.ActorContext]] of the requesting actor. The `ReplicatorMessageAdapter` can
* only be used in this actor.
* @param replicator The replicator to interact with, typically `DistributedData.get(system).replicator`.
* @param unexpectedAskTimeout The timeout to use for `ask` operations. This should be longer than

View file

@ -103,7 +103,7 @@ object Replicator {
extends Command
/**
* Reply from `Get`. The data value is retrieved with [[dd.Replicator.GetSuccess.get]] using the typed key.
* Reply from `Get`. The data value is retrieved with [[pekko.cluster.ddata.Replicator.GetSuccess.get]] using the typed key.
*/
type GetResponse[A <: ReplicatedData] = dd.Replicator.GetResponse[A]
object GetSuccess {
@ -117,7 +117,7 @@ object Replicator {
/**
* The [[Get]] request could not be fulfill according to the given
* [[ReadConsistency consistency level]] and [[ReadConsistency#timeout timeout]].
* [[ReadConsistency consistency level]] and [[pekko.cluster.ddata.Replicator.ReadConsistency#timeout timeout]].
*/
type GetFailure[A <: ReplicatedData] = dd.Replicator.GetFailure[A]
object GetFailure {
@ -199,7 +199,7 @@ object Replicator {
/**
* The direct replication of the [[Update]] could not be fulfill according to
* the given [[WriteConsistency consistency level]] and
* [[WriteConsistency#timeout timeout]].
* [[pekko.cluster.ddata.Replicator.WriteConsistency#timeout timeout]].
*
* The `Update` was still performed locally and possibly replicated to some nodes.
* It will eventually be disseminated to other replicas, unless the local replica
@ -277,7 +277,7 @@ object Replicator {
type SubscribeResponse[A <: ReplicatedData] = dd.Replicator.SubscribeResponse[A]
/**
* The data value is retrieved with [[dd.Replicator.Changed.get]] using the typed key.
* The data value is retrieved with [[pekko.cluster.ddata.Replicator.Changed.get]] using the typed key.
*
* @see [[Subscribe]]
*/
@ -286,7 +286,7 @@ object Replicator {
}
/**
* The data value is retrieved with [[dd.Replicator.Changed.get]] using the typed key.
* The data value is retrieved with [[pekko.cluster.ddata.Replicator.Changed.get]] using the typed key.
*
* @see [[Subscribe]]
*/

View file

@ -50,7 +50,7 @@ object ReplicatorMessageAdapter {
* than the ordinary actor message processing thread, such as [[scala.concurrent.Future]] callbacks.
* It must not be shared between several actor instances.
*
* @param context The [[ActorContext]] of the requesting actor. The `ReplicatorMessageAdapter` can
* @param context The [[pekko.actor.typed.scaladsl.ActorContext]] of the requesting actor. The `ReplicatorMessageAdapter` can
* only be used in this actor.
* @param replicator The replicator to interact with, typically `DistributedData(system).replicator`.
* @param unexpectedAskTimeout The timeout to use for `ask` operations. This should be longer than

View file

@ -229,7 +229,7 @@ object ClusterSetup {
}
/**
* Can be used in [[pekko.actor.setup.ActorSystemSetup]] when starting the [[ActorSystem]]
* Can be used in [[pekko.actor.setup.ActorSystemSetup]] when starting the [[pekko.actor.typed.ActorSystem]]
* to replace the default implementation of the [[Cluster]] extension. Intended
* for tests that need to replace extension with stub/mock implementations.
*/

View file

@ -223,7 +223,7 @@ abstract class ClusterSingleton extends Extension {
* Start if needed and provide a proxy to a named singleton
*
* If there already is a manager running for the given `singletonName` on this node, no additional manager is started.
* If there already is a proxy running for the given `singletonName` on this node, an [[ActorRef]] to that is returned.
* If there already is a proxy running for the given `singletonName` on this node, an [[pekko.actor.typed.ActorRef]] to that is returned.
*
* @return A proxy actor that can be used to communicate with the singleton in the cluster
*/
@ -349,7 +349,7 @@ object ClusterSingletonSetup {
}
/**
* Can be used in [[pekko.actor.setup.ActorSystemSetup]] when starting the [[ActorSystem]]
* Can be used in [[pekko.actor.setup.ActorSystemSetup]] when starting the [[pekko.actor.typed.ActorSystem]]
* to replace the default implementation of the [[ClusterSingleton]] extension. Intended
* for tests that need to replace extension with stub/mock implementations.
*/

View file

@ -102,7 +102,7 @@ object JoinConfigCompatChecker {
* INTERNAL API
* Builds a new Config object containing only the required entries defined by `requiredKeys`
*
* This method is used from the joining side to prepare the [[Config]] instance that will be sent over the wire.
* This method is used from the joining side to prepare the [[com.typesafe.config.Config]] instance that will be sent over the wire.
* We don't send the full config to avoid unnecessary data transfer, but also to avoid leaking any sensitive
* information that users may have added to their configuration.
*/

View file

@ -318,7 +318,7 @@ abstract class ServiceDiscovery {
* the passed `resolveTimeout` should never be exceeded, as it signals the application's
* eagerness to wait for a result for this specific lookup.
*
* The returned future should be failed once resolveTimeout has passed with a [[DiscoveryTimeoutException]].
* The returned future should be failed once resolveTimeout has passed with a [[ServiceDiscovery.DiscoveryTimeoutException]].
*/
def lookup(query: Lookup, resolveTimeout: java.time.Duration): CompletionStage[Resolved] = {
import pekko.util.FutureConverters._

View file

@ -24,7 +24,7 @@ import pekko.stream.javadsl.Source
trait CurrentEventsByPersistenceIdQuery extends ReadJournal {
/**
* Same type of query as [[EventsByPersistenceIdQuery#eventsByPersistenceId]]
* Same type of query as [[pekko.persistence.query.javadsl.EventsByPersistenceIdQuery#eventsByPersistenceId]]
* but the event stream is completed immediately when it reaches the end of
* the "result set". Events that are stored after the query is completed are
* not included in the event stream.

View file

@ -24,7 +24,7 @@ import pekko.stream.javadsl.Source
trait CurrentEventsByTagQuery extends ReadJournal {
/**
* Same type of query as [[EventsByTagQuery#eventsByTag]] but the event stream
* Same type of query as [[pekko.persistence.query.javadsl.EventsByTagQuery#eventsByTag]] but the event stream
* is completed immediately when it reaches the end of the "result set". Depending
* on journal implementation, this may mean all events up to when the query is
* started, or it may include events that are persisted while the query is still

View file

@ -36,7 +36,7 @@ trait DurableStateStoreQuery[A] extends DurableStateStore[A] {
* This will return changes that occurred up to when the `Source` returned by this call is materialized. Changes to
* objects made since materialization are not guaranteed to be included in the results.
*
* The [[DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or
* The [[pekko.persistence.query.DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or
* [[pekko.persistence.query.DeletedDurableState]].
*
* @param tag The tag to get changes for.
@ -58,7 +58,7 @@ trait DurableStateStoreQuery[A] extends DurableStateStore[A] {
* in quick succession are likely to be skipped, with only the last update resulting in a change from this
* source.
*
* The [[DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or
* The [[pekko.persistence.query.DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or
* [[pekko.persistence.query.DeletedDurableState]].
*
* @param tag The tag to get changes for.

View file

@ -35,7 +35,7 @@ trait EventsByPersistenceIdQuery extends ReadJournal {
* The stream is not completed when it reaches the end of the currently stored events,
* but it continues to push new events when new events are persisted.
* Corresponding query that is completed when it reaches the end of the currently
* stored events is provided by [[CurrentEventsByPersistenceIdQuery#currentEventsByPersistenceId]].
* stored events is provided by [[pekko.persistence.query.javadsl.CurrentEventsByPersistenceIdQuery#currentEventsByPersistenceId]].
*/
def eventsByPersistenceId(
persistenceId: String,

View file

@ -24,7 +24,7 @@ import pekko.stream.scaladsl.Source
trait CurrentEventsByPersistenceIdQuery extends ReadJournal {
/**
* Same type of query as [[EventsByPersistenceIdQuery#eventsByPersistenceId]]
* Same type of query as [[pekko.persistence.query.scaladsl.EventsByPersistenceIdQuery#eventsByPersistenceId]]
* but the event stream is completed immediately when it reaches the end of
* the "result set". Events that are stored after the query is completed are
* not included in the event stream.

View file

@ -24,7 +24,7 @@ import pekko.stream.scaladsl.Source
trait CurrentEventsByTagQuery extends ReadJournal {
/**
* Same type of query as [[EventsByTagQuery#eventsByTag]] but the event stream
* Same type of query as [[pekko.persistence.query.scaladsl.EventsByTagQuery#eventsByTag]] but the event stream
* is completed immediately when it reaches the end of the "result set". Depending
* on journal implementation, this may mean all events up to when the query is
* started, or it may include events that are persisted while the query is still

View file

@ -29,7 +29,7 @@ trait DurableStateStorePagedPersistenceIdsQuery[A] extends DurableStateStore[A]
* Not all plugins may support in database paging, and may simply use drop/take Pekko streams operators
* to manipulate the result set according to the paging parameters.
*
* @param afterId The ID to start returning results from, or [[None]] to return all ids. This should be an id
* @param afterId The ID to start returning results from, or [[scala.None]] to return all ids. This should be an id
* returned from a previous invocation of this command. Callers should not assume that ids are
* returned in sorted order.
* @param limit The maximum results to return. Use Long.MaxValue to return all results. Must be greater than zero.

View file

@ -36,7 +36,7 @@ trait DurableStateStoreQuery[A] extends DurableStateStore[A] {
* This will return changes that occurred up to when the `Source` returned by this call is materialized. Changes to
* objects made since materialization are not guaranteed to be included in the results.
*
* The [[DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or
* The [[pekko.persistence.query.DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or
* [[pekko.persistence.query.DeletedDurableState]].
*
* @param tag The tag to get changes for.
@ -58,7 +58,7 @@ trait DurableStateStoreQuery[A] extends DurableStateStore[A] {
* in quick succession are likely to be skipped, with only the last update resulting in a change from this
* source.
*
* The [[DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or
* The [[pekko.persistence.query.DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or
* [[pekko.persistence.query.DeletedDurableState]].
*
* @param tag The tag to get changes for.

View file

@ -35,7 +35,7 @@ trait EventsByPersistenceIdQuery extends ReadJournal {
* The stream is not completed when it reaches the end of the currently stored events,
* but it continues to push new events when new events are persisted.
* Corresponding query that is completed when it reaches the end of the currently
* stored events is provided by [[CurrentEventsByPersistenceIdQuery#currentEventsByPersistenceId]].
* stored events is provided by [[pekko.persistence.query.scaladsl.CurrentEventsByPersistenceIdQuery#currentEventsByPersistenceId]].
*/
def eventsByPersistenceId(
persistenceId: String,

View file

@ -28,7 +28,7 @@ trait PagedPersistenceIdsQuery extends ReadJournal {
* Not all plugins may support in database paging, and may simply use drop/take Pekko streams operators
* to manipulate the result set according to the paging parameters.
*
* @param afterId The ID to start returning results from, or [[None]] to return all ids. This should be an id
* @param afterId The ID to start returning results from, or [[scala.None]] to return all ids. This should be an id
* returned from a previous invocation of this command. Callers should not assume that ids are
* returned in sorted order.
* @param limit The maximum results to return. Use Long.MaxValue to return all results. Must be greater than zero.

View file

@ -31,7 +31,7 @@ import pekko.stream.javadsl.Source
trait CurrentEventsBySliceQuery extends ReadJournal {
/**
* Same type of query as [[EventsBySliceQuery.eventsBySlices]] but the event stream is completed immediately when it
* Same type of query as [[pekko.persistence.query.typed.javadsl.EventsBySliceQuery.eventsBySlices]] but the event stream is completed immediately when it
* reaches the end of the "result set". Depending on journal implementation, this may mean all events up to when the
* query is started, or it may include events that are persisted while the query is still streaming results. For
* eventually consistent stores, it may only include all events up to some point before the query is started.

View file

@ -44,7 +44,7 @@ trait DurableStateStoreBySliceQuery[A] extends DurableStateStore[A] {
* This will return changes that occurred up to when the `Source` returned by this call is materialized. Changes to
* objects made since materialization are not guaranteed to be included in the results.
*
* The [[DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or
* The [[pekko.persistence.query.DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or
* [[pekko.persistence.query.DeletedDurableState]].
*/
def currentChangesBySlices(
@ -66,7 +66,7 @@ trait DurableStateStoreBySliceQuery[A] extends DurableStateStore[A] {
* change for each object since the offset will be emitted. In particular, multiple updates to a given object in quick
* succession are likely to be skipped, with only the last update resulting in a change from this source.
*
* The [[DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or
* The [[pekko.persistence.query.DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or
* [[pekko.persistence.query.DeletedDurableState]].
*/
def changesBySlices(

View file

@ -32,7 +32,7 @@ import pekko.stream.scaladsl.Source
trait CurrentEventsBySliceQuery extends ReadJournal {
/**
* Same type of query as [[EventsBySliceQuery.eventsBySlices]] but the event stream is completed immediately when it
* Same type of query as [[pekko.persistence.query.typed.scaladsl.EventsBySliceQuery.eventsBySlices]] but the event stream is completed immediately when it
* reaches the end of the "result set". Depending on journal implementation, this may mean all events up to when the
* query is started, or it may include events that are persisted while the query is still streaming results. For
* eventually consistent stores, it may only include all events up to some point before the query is started.

View file

@ -45,7 +45,7 @@ trait DurableStateStoreBySliceQuery[A] extends DurableStateStore[A] {
* This will return changes that occurred up to when the `Source` returned by this call is materialized. Changes to
* objects made since materialization are not guaranteed to be included in the results.
*
* The [[DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or
* The [[pekko.persistence.query.DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or
* [[pekko.persistence.query.DeletedDurableState]].
*/
def currentChangesBySlices(
@ -67,7 +67,7 @@ trait DurableStateStoreBySliceQuery[A] extends DurableStateStore[A] {
* change for each object since the offset will be emitted. In particular, multiple updates to a given object in quick
* succession are likely to be skipped, with only the last update resulting in a change from this source.
*
* The [[DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or
* The [[pekko.persistence.query.DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or
* [[pekko.persistence.query.DeletedDurableState]].
*/
def changesBySlices(

View file

@ -371,7 +371,7 @@ class PersistenceTestKit(scalaTestkit: ScalaTestKit) {
*
* NOTE! Also clears sequence numbers in storage!
*
* @see [[PersistenceTestKit.clearAllPreservingSeqNumbers()]]
* @see [[clearAllPreservingSeqNumbers]]
*/
def clearAll(): Unit = scalaTestkit.clearAll()
@ -380,21 +380,21 @@ class PersistenceTestKit(scalaTestkit: ScalaTestKit) {
*
* NOTE! Also clears sequence number in storage!
*
* @see [[PersistenceTestKit.clearByIdPreservingSeqNumbers()]]
* @see [[clearByIdPreservingSeqNumbers]]
*/
def clearByPersistenceId(persistenceId: String): Unit = scalaTestkit.clearByPersistenceId(persistenceId)
/**
* Clear all data in storage preserving sequence numbers.
*
* @see [[PersistenceTestKit.clearAll()]]
* @see [[clearAll]]
*/
def clearAllPreservingSeqNumbers(): Unit = scalaTestkit.clearAllPreservingSeqNumbers()
/**
* Clear all data in storage for particular persistence id preserving sequence numbers.
*
* @see [[PersistenceTestKit.clearByPersistenceId()]]
* @see [[clearByPersistenceId]]
*/
def clearByIdPreservingSeqNumbers(persistenceId: String): Unit =
scalaTestkit.clearByIdPreservingSeqNumbers(persistenceId)
@ -439,7 +439,7 @@ class PersistenceTestKit(scalaTestkit: ScalaTestKit) {
}
/**
* Returns default policy if it was changed by [[PersistenceTestKit.withPolicy()]].
* Returns default policy if it was changed by [[withPolicy]].
*/
def resetPolicy(): Unit = scalaTestkit.resetPolicy()

View file

@ -266,7 +266,7 @@ class SnapshotTestKit(scalaTestkit: ScalaTestKit) {
}
/**
* Returns default policy if it was changed by [[SnapshotTestKit.withPolicy()]].
* Returns default policy if it was changed by [[withPolicy]].
*/
def resetPolicy(): Unit = scalaTestkit.resetPolicy()

View file

@ -149,7 +149,7 @@ final class PersistenceTestKitReadJournal(system: ExtendedActorSystem, @unused c
* Not all plugins may support in database paging, and may simply use drop/take Pekko streams operators
* to manipulate the result set according to the paging parameters.
*
* @param afterId The ID to start returning results from, or [[None]] to return all ids. This should be an id
* @param afterId The ID to start returning results from, or [[scala.None]] to return all ids. This should be an id
* returned from a previous invocation of this command. Callers should not assume that ids are
* returned in sorted order.
* @param limit The maximum results to return. Use Long.MaxValue to return all results. Must be greater than zero.

View file

@ -41,7 +41,7 @@ object PersistenceId {
* in Lagom's `javadsl.PersistentEntity`. For compatibility with Lagom's `javadsl.PersistentEntity`
* you should use `""` as the separator.
*
* @throws IllegalArgumentException if the `entityTypeHint` or `entityId` contains `|`
* @throws java.lang.IllegalArgumentException if the `entityTypeHint` or `entityId` contains `|`
*/
def apply(entityTypeHint: String, entityId: String): PersistenceId =
apply(entityTypeHint, entityId, DefaultSeparator)
@ -65,7 +65,7 @@ object PersistenceId {
* in Lagom's `javadsl.PersistentEntity`. For compatibility with Lagom's `javadsl.PersistentEntity`
* you should use `""` as the separator.
*
* @throws IllegalArgumentException if the `entityTypeHint` or `entityId` contains `separator`
* @throws java.lang.IllegalArgumentException if the `entityTypeHint` or `entityId` contains `separator`
*/
def apply(entityTypeHint: String, entityId: String, separator: String): PersistenceId = {
if (separator.nonEmpty) {
@ -99,7 +99,7 @@ object PersistenceId {
* in Lagom's `javadsl.PersistentEntity`. For compatibility with Lagom's `javadsl.PersistentEntity`
* you should use `""` as the separator.
*
* @throws IllegalArgumentException if the `entityTypeHint` or `entityId` contains `|`
* @throws java.lang.IllegalArgumentException if the `entityTypeHint` or `entityId` contains `|`
*/
def of(entityTypeHint: String, entityId: String): PersistenceId =
apply(entityTypeHint, entityId)
@ -123,7 +123,7 @@ object PersistenceId {
* in Lagom's `javadsl.PersistentEntity`. For compatibility with Lagom's `javadsl.PersistentEntity`
* you should use `""` as the separator.
*
* @throws IllegalArgumentException if the `entityTypeHint` or `entityId` contains `separator`
* @throws java.lang.IllegalArgumentException if the `entityTypeHint` or `entityId` contains `separator`
*/
def of(entityTypeHint: String, entityId: String, separator: String): PersistenceId =
apply(entityTypeHint, entityId, separator)

View file

@ -35,7 +35,7 @@ import pekko.persistence.typed.scaladsl.RetentionCriteria
import pekko.util.JavaDurationConverters._
/**
* [[DurableProducerQueue]] that can be used with [[pekko.actor.typed.delivery.ProducerController]]
* [[pekko.actor.typed.delivery.DurableProducerQueue]] that can be used with [[pekko.actor.typed.delivery.ProducerController]]
* for reliable delivery of messages. It is implemented with Event Sourcing and stores one
* event before sending the message to the destination and one event for the confirmation
* that the message has been delivered and processed.

View file

@ -287,7 +287,7 @@ abstract class EventSourcedBehaviorWithEnforcedReplies[Command, Event, State](
* Use [[EventSourcedBehaviorWithEnforcedReplies#newCommandHandlerWithReplyBuilder]] instead, or
* extend [[EventSourcedBehavior]] instead of [[EventSourcedBehaviorWithEnforcedReplies]].
*
* @throws UnsupportedOperationException use newCommandHandlerWithReplyBuilder instead
* @throws java.lang.UnsupportedOperationException use newCommandHandlerWithReplyBuilder instead
*/
override protected def newCommandHandlerBuilder(): CommandHandlerBuilder[Command, Event, State] =
throw new UnsupportedOperationException("Use newCommandHandlerWithReplyBuilder instead")

View file

@ -202,7 +202,7 @@ abstract class DurableStateBehaviorWithEnforcedReplies[Command, State](
* Use [[DurableStateBehaviorWithEnforcedReplies#newCommandHandlerWithReplyBuilder]] instead, or
* extend [[DurableStateBehavior]] instead of [[DurableStateBehaviorWithEnforcedReplies]].
*
* @throws UnsupportedOperationException use newCommandHandlerWithReplyBuilder instead
* @throws java.lang.UnsupportedOperationException use newCommandHandlerWithReplyBuilder instead
*/
override protected def newCommandHandlerBuilder(): CommandHandlerBuilder[Command, State] =
throw new UnsupportedOperationException("Use newCommandHandlerWithReplyBuilder instead")

View file

@ -371,7 +371,7 @@ private[persistence] trait Eventsourced
/**
* Recovery handler that receives persisted events during recovery. If a state snapshot
* has been captured and saved, this handler will receive a [[SnapshotOffer]] message
* has been captured and saved, this handler will receive a [[pekko.persistence.SnapshotOffer]] message
* followed by events that are younger than the offered snapshot.
*
* This handler must not have side-effects other than changing persistent actor state i.e. it
@ -381,7 +381,7 @@ private[persistence] trait Eventsourced
* If there is a problem with recovering the state of the actor from the journal, the error
* will be logged and the actor will be stopped.
*
* @see [[Recovery]]
* @see [[pekko.persistence.Recovery]]
*/
def receiveRecover: Receive
@ -519,11 +519,11 @@ private[persistence] trait Eventsourced
/**
* Permanently deletes all persistent messages with sequence numbers less than or equal `toSequenceNr`.
*
* If the delete is successful a [[DeleteMessagesSuccess]] will be sent to the actor.
* If the delete fails a [[DeleteMessagesFailure]] will be sent to the actor.
* If the delete is successful a [[pekko.persistence.DeleteMessagesSuccess]] will be sent to the actor.
* If the delete fails a [[pekko.persistence.DeleteMessagesFailure]] will be sent to the actor.
*
* The given `toSequenceNr` must be less than or equal to [[Eventsourced#lastSequenceNr]], otherwise
* [[DeleteMessagesFailure]] is sent to the actor without performing the delete. All persistent
* [[pekko.persistence.DeleteMessagesFailure]] is sent to the actor without performing the delete. All persistent
* messages may be deleted without specifying the actual sequence number by using `Long.MaxValue`
* as the `toSequenceNr`.
*

View file

@ -104,7 +104,7 @@ trait PersistenceRecovery {
// #persistence-recovery
/**
* Called when the persistent actor is started for the first time.
* The returned [[Recovery]] object defines how the Actor will recover its persistent state before
* The returned [[pekko.persistence.Recovery]] object defines how the Actor will recover its persistent state before
* handling the first incoming message.
*
* To skip recovery completely return `Recovery.none`.
@ -117,7 +117,7 @@ trait PersistenceRecovery {
trait PersistenceStash extends Stash with StashFactory {
/**
* The returned [[StashOverflowStrategy]] object determines how to handle the message failed to stash
* The returned [[pekko.persistence.StashOverflowStrategy]] object determines how to handle the message failed to stash
* when the internal Stash capacity exceeded.
*/
def internalStashOverflowStrategy: StashOverflowStrategy =
@ -129,9 +129,9 @@ trait RuntimePluginConfig {
/**
* Additional configuration of the journal plugin servicing this persistent actor.
* When empty, the whole configuration of the journal plugin will be taken from the [[com.typesafe.config.Config]] loaded into the
* [[ActorSystem]].
* [[pekko.actor.ActorSystem]].
* When configured, the journal plugin configuration will be taken from this [[com.typesafe.config.Config]] merged with the [[com.typesafe.config.Config]]
* loaded into the [[ActorSystem]].
* loaded into the [[pekko.actor.ActorSystem]].
*
* @return an additional configuration used to configure the journal plugin.
*/
@ -140,9 +140,9 @@ trait RuntimePluginConfig {
/**
* Additional configuration of the snapshot plugin servicing this persistent actor.
* When empty, the whole configuration of the snapshot plugin will be taken from the [[com.typesafe.config.Config]] loaded into the
* [[ActorSystem]].
* [[pekko.actor.ActorSystem]].
* When configured, the snapshot plugin configuration will be taken from this [[com.typesafe.config.Config]] merged with the [[com.typesafe.config.Config]]
* loaded into the [[ActorSystem]].
* loaded into the [[pekko.actor.ActorSystem]].
*
* @return an additional configuration used to configure the snapshot plugin.
*/
@ -178,7 +178,7 @@ object Persistence extends ExtensionId[Persistence] with ExtensionIdProvider {
/**
* INTERNAL API
* @throws IllegalArgumentException if config path for the `pluginId` doesn't exist
* @throws java.lang.IllegalArgumentException if config path for the `pluginId` doesn't exist
*/
@InternalApi private[pekko] def verifyPluginConfigExists(
config: Config,
@ -190,7 +190,7 @@ object Persistence extends ExtensionId[Persistence] with ExtensionIdProvider {
/**
* INTERNAL API
* @throws IllegalArgumentException if `pluginId` is empty (undefined)
* @throws java.lang.IllegalArgumentException if `pluginId` is empty (undefined)
*/
@InternalApi private[pekko] def verifyPluginConfigIsDefined(pluginId: String, pluginType: String): Unit = {
if (isEmpty(pluginId))
@ -282,13 +282,13 @@ class Persistence(val system: ExtendedActorSystem) extends Extension {
})
/**
* @throws IllegalArgumentException if `configPath` doesn't exist
* @throws java.lang.IllegalArgumentException if `configPath` doesn't exist
*/
private def verifyJournalPluginConfigExists(pluginConfig: Config, configPath: String): Unit =
verifyPluginConfigExists(pluginConfig.withFallback(system.settings.config), configPath, "Journal")
/**
* @throws IllegalArgumentException if `configPath` doesn't exist
* @throws java.lang.IllegalArgumentException if `configPath` doesn't exist
*/
private def verifySnapshotPluginConfigExists(pluginConfig: Config, configPath: String): Unit =
verifyPluginConfigExists(pluginConfig.withFallback(system.settings.config), configPath, "Snapshot store")

View file

@ -36,8 +36,8 @@ trait Snapshotter extends Actor {
def snapshotSequenceNr: Long
/**
* Instructs the snapshot store to load the specified snapshot and send it via an [[SnapshotOffer]]
* to the running [[PersistentActor]].
* Instructs the snapshot store to load the specified snapshot and send it via an [[pekko.persistence.SnapshotOffer SnapshotOffer]]
* to the running [[pekko.persistence.PersistentActor PersistentActor]].
*/
def loadSnapshot(persistenceId: String, criteria: SnapshotSelectionCriteria, toSequenceNr: Long): Unit =
snapshotStore ! LoadSnapshot(persistenceId, criteria, toSequenceNr)
@ -45,8 +45,8 @@ trait Snapshotter extends Actor {
/**
* Saves a `snapshot` of this snapshotter's state.
*
* The [[PersistentActor]] will be notified about the success or failure of this
* via an [[SaveSnapshotSuccess]] or [[SaveSnapshotFailure]] message.
* The [[pekko.persistence.PersistentActor PersistentActor]] will be notified about the success or failure of this
* via an [[pekko.persistence.SaveSnapshotSuccess SaveSnapshotSuccess]] or [[pekko.persistence.SaveSnapshotFailure SaveSnapshotFailure]] message.
*/
def saveSnapshot(snapshot: Any): Unit = {
snapshotStore ! SaveSnapshot(SnapshotMetadata(snapshotterId, snapshotSequenceNr), snapshot)
@ -55,8 +55,8 @@ trait Snapshotter extends Actor {
/**
* Deletes the snapshot identified by `sequenceNr`.
*
* The [[PersistentActor]] will be notified about the status of the deletion
* via an [[DeleteSnapshotSuccess]] or [[DeleteSnapshotFailure]] message.
* The [[pekko.persistence.PersistentActor PersistentActor]] will be notified about the status of the deletion
* via an [[pekko.persistence.DeleteSnapshotSuccess DeleteSnapshotSuccess]] or [[pekko.persistence.DeleteSnapshotFailure DeleteSnapshotFailure]] message.
*/
def deleteSnapshot(sequenceNr: Long): Unit = {
snapshotStore ! DeleteSnapshot(SnapshotMetadata(snapshotterId, sequenceNr))
@ -65,8 +65,8 @@ trait Snapshotter extends Actor {
/**
* Deletes all snapshots matching `criteria`.
*
* The [[PersistentActor]] will be notified about the status of the deletion
* via an [[DeleteSnapshotsSuccess]] or [[DeleteSnapshotsFailure]] message.
* The [[pekko.persistence.PersistentActor PersistentActor]] will be notified about the status of the deletion
* via an [[pekko.persistence.DeleteSnapshotsSuccess DeleteSnapshotsSuccess]] or [[pekko.persistence.DeleteSnapshotsFailure DeleteSnapshotsFailure]] message.
*/
def deleteSnapshots(criteria: SnapshotSelectionCriteria): Unit = {
snapshotStore ! DeleteSnapshots(snapshotterId, criteria)

View file

@ -514,14 +514,14 @@ abstract class AbstractPersistentFSM[S <: FSMState, D, E]
data => action.accept(data)
/**
* Adapter from Java [[Class]] to [[scala.reflect.ClassTag]]
* Adapter from Java [[java.lang.Class]] to [[scala.reflect.ClassTag]]
* @return domain event [[scala.reflect.ClassTag]]
*/
final override def domainEventClassTag: ClassTag[E] =
ClassTag(domainEventClass)
/**
* Domain event's [[Class]]
* Domain event's [[java.lang.Class]]
* Used for identifying domain events during recovery
*/
def domainEventClass: Class[E]

View file

@ -52,7 +52,7 @@ trait AsyncRecovery {
* @param recoveryCallback called to replay a single message. Can be called from any
* thread.
*
* @see [[AsyncWriteJournal]]
* @see [[pekko.persistence.journal.AsyncWriteJournal]]
*/
def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(
recoveryCallback: PersistentRepr => Unit): Future[Unit]

View file

@ -25,13 +25,13 @@ trait DurableStateStoreProvider {
/**
* The `ReadJournal` implementation for the Scala API.
* This corresponds to the instance that is returned by [[DurableStateStoreRegistry#durableStateStoreFor]].
* This corresponds to the instance that is returned by [[org.apache.pekko.persistence.state.DurableStateStoreRegistry.durableStateStoreFor DurableStateStoreRegistry#durableStateStoreFor]].
*/
def scaladslDurableStateStore(): scaladsl.DurableStateStore[Any]
/**
* The `DurableStateStore` implementation for the Java API.
* This corresponds to the instance that is returned by [[DurableStateStoreRegistry#getDurableStateStoreFor]].
* This corresponds to the instance that is returned by [[org.apache.pekko.persistence.state.DurableStateStoreRegistry.getDurableStateStoreFor DurableStateStoreRegistry#getDurableStateStoreFor]].
*/
def javadslDurableStateStore(): javadsl.DurableStateStore[AnyRef]
}

View file

@ -491,14 +491,14 @@ object TestSubscriber {
}
/**
* Expect and return the signalled [[Throwable]].
* Expect and return the signalled [[java.lang.Throwable]].
*/
def expectError(): Throwable = probe.expectMsgType[OnError].cause
/**
* Fluent DSL
*
* Expect given [[Throwable]].
* Expect given [[java.lang.Throwable]].
*/
def expectError(cause: Throwable): Self = {
probe.expectMsg(OnError(cause))
@ -510,7 +510,7 @@ object TestSubscriber {
*
* By default `1` demand will be signalled in order to wake up a possibly lazy upstream.
*
* See also [[#expectSubscriptionAndError(Boolean)]] if no demand should be signalled.
* See also [[#expectSubscriptionAndError(signalDemand:Boolean)* #expectSubscriptionAndError(signalDemand: Boolean)]] if no demand should be signalled.
*/
def expectSubscriptionAndError(): Throwable = {
expectSubscriptionAndError(true)
@ -522,7 +522,7 @@ object TestSubscriber {
* Depending on the `signalDemand` parameter demand may be signalled immediately after obtaining the subscription
* in order to wake up a possibly lazy upstream. You can disable this by setting the `signalDemand` parameter to `false`.
*
* See also [[#expectSubscriptionAndError]].
* See also [[#expectSubscriptionAndError()* #expectSubscriptionAndError()]].
*/
def expectSubscriptionAndError(signalDemand: Boolean): Throwable = {
val sub = expectSubscription()
@ -537,7 +537,7 @@ object TestSubscriber {
*
* By default `1` demand will be signalled in order to wake up a possibly lazy upstream.
*
* See also [[#expectSubscriptionAndComplete(cause: Throwable, signalDemand: Boolean)]] if no demand should be signalled.
* See also [[#expectSubscriptionAndError(cause:Throwable,signalDemand:Boolean)* #expectSubscriptionAndError(cause: Throwable, signalDemand: Boolean)]] if no demand should be signalled.
*/
def expectSubscriptionAndError(cause: Throwable): Self =
expectSubscriptionAndError(cause, signalDemand = true)
@ -548,7 +548,7 @@ object TestSubscriber {
* Expect subscription followed by immediate stream completion.
* By default `1` demand will be signalled in order to wake up a possibly lazy upstream
*
* See also [[#expectSubscriptionAndError(cause: Throwable)]].
* See also [[#expectSubscriptionAndError(cause:Throwable)* #expectSubscriptionAndError(cause: Throwable)]].
*/
def expectSubscriptionAndError(cause: Throwable, signalDemand: Boolean): Self = {
val sub = expectSubscription()
@ -563,7 +563,7 @@ object TestSubscriber {
* Expect subscription followed by immediate stream completion.
* By default `1` demand will be signalled in order to wake up a possibly lazy upstream
*
* See also [[#expectSubscriptionAndComplete(signalDemand: Boolean)]] if no demand should be signalled.
* See also [[#expectSubscriptionAndComplete(signalDemand:Boolean)* #expectSubscriptionAndComplete(signalDemand: Boolean)]] if no demand should be signalled.
*/
def expectSubscriptionAndComplete(): Self =
expectSubscriptionAndComplete(true)
@ -576,7 +576,7 @@ object TestSubscriber {
* Depending on the `signalDemand` parameter demand may be signalled immediately after obtaining the subscription
* in order to wake up a possibly lazy upstream. You can disable this by setting the `signalDemand` parameter to `false`.
*
* See also [[#expectSubscriptionAndComplete]].
* See also [[#expectSubscriptionAndComplete()* #expectSubscriptionAndComplete]].
*/
def expectSubscriptionAndComplete(signalDemand: Boolean): Self = {
val sub = expectSubscription()

View file

@ -87,7 +87,7 @@ object ActorSource {
* if the stream is to drained before completion or should complete immediately.
*
* A message that is matched by `failureMatcher` fails the stream. The extracted
* [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`,
* the failure will be signaled downstream immediately (instead of the completion signal).
*
@ -126,7 +126,7 @@ object ActorSource {
* The stream will complete with failure if a message is sent before the acknowledgement has been replied back.
*
* The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted
* [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`,
* the failure will be signaled downstream immediately (instead of the completion signal).
*

View file

@ -44,7 +44,7 @@ object ActorSource {
* completion.
*
* The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted
* [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`,
* the failure will be signaled downstream immediately (instead of the completion signal).
*
@ -79,7 +79,7 @@ object ActorSource {
* if the stream is to drained before completion or should complete immediately.
*
* A message that is matched by `failureMatcher` fails the stream. The extracted
* [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`,
* the failure will be signaled downstream immediately (instead of the completion signal).
*
@ -91,7 +91,7 @@ object ActorSource {
* @param completionMatcher a partial function applied to the messages received materialized actor,
* a matching message will complete the stream with the return [[CompletionStrategy]]
* @param failureMatcher a partial function applied to the messages received materialized actor,
* a matching message will fail the stream with the returned [[Throwable]]
* a matching message will fail the stream with the returned [[java.lang.Throwable]]
*/
def actorRefWithBackpressure[T, Ack](
ackTo: ActorRef[Ack],
@ -113,7 +113,7 @@ object ActorSource {
* The stream will complete with failure if a message is sent before the acknowledgement has been replied back.
*
* The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted
* [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`,
* the failure will be signaled downstream immediately (instead of the completion signal).
*

View file

@ -53,7 +53,7 @@ trait Graph[+S <: Shape, +M] {
/**
* Specifies the name of the Graph.
* If the name is null or empty the name is ignored, i.e. [[#none]] is returned.
* If the name is null or empty the name is ignored, i.e. [[Attributes.none]] is returned.
*/
def named(name: String): Graph[S, M] = addAttributes(Attributes.name(name))

View file

@ -49,7 +49,7 @@ final case class IOResult(
def wasSuccessful: Boolean = status.isSuccess
/**
* Java API: If the IO operation resulted in an error, returns the corresponding [[Throwable]]
* Java API: If the IO operation resulted in an error, returns the corresponding [[java.lang.Throwable]]
* or throws [[UnsupportedOperationException]] otherwise.
*/
@deprecated("status is always set to Success(Done)", "Akka 2.6.0")

View file

@ -100,7 +100,7 @@ abstract class Materializer {
* If the `Runnable` throws an exception the repeated scheduling is aborted,
* i.e. the function will not be invoked any more.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* supported by the `Scheduler`.
*
* @return A [[pekko.actor.Cancellable]] that allows cancelling the timer. Cancelling is best effort, if the event
@ -136,7 +136,7 @@ abstract class Materializer {
* If the `Runnable` throws an exception the repeated scheduling is aborted,
* i.e. the function will not be invoked any more.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* supported by the `Scheduler`.
*
* @return A [[pekko.actor.Cancellable]] that allows cancelling the timer. Cancelling is best effort, if the event

View file

@ -32,12 +32,12 @@ import pekko.stream.scaladsl.{ Sink, Source }
*/
object SinkRef {
/** Implicitly converts a [[SinkRef]] to a [[Sink]]. The same can be achieved by calling `.sink` on the reference. */
/** Implicitly converts a [[SinkRef]] to a [[scaladsl.Sink]]. The same can be achieved by calling `.sink` on the reference. */
implicit def convertRefToSink[T](sinkRef: SinkRef[T]): Sink[T, NotUsed] = sinkRef.sink()
}
/**
* A [[SinkRef]] allows sharing a "reference" to a [[Sink]] with others, with the main purpose of crossing a network boundary.
* A [[SinkRef]] allows sharing a "reference" to a [[scaladsl.Sink]] with others, with the main purpose of crossing a network boundary.
* Usually obtaining a SinkRef would be done via Actor messaging, in which one system asks a remote one,
* to accept some data from it, and the remote one decides to accept the request to send data in a back-pressured
* streaming fashion -- using a sink ref.
@ -54,7 +54,7 @@ object SinkRef {
@DoNotInherit
trait SinkRef[In] {
/** Scala API: Get [[Sink]] underlying to this source ref. */
/** Scala API: Get [[scaladsl.Sink]] underlying to this source ref. */
def sink(): Sink[In, NotUsed]
/** Java API: Get [[javadsl.Sink]] underlying to this source ref. */

View file

@ -50,7 +50,7 @@ import pekko.util.ByteString
/**
* INTERNAL API: Use [[pekko.stream.scaladsl.JsonFraming]] instead.
*
* **Mutable** framing implementation that given any number of [[ByteString]] chunks, can emit JSON objects contained within them.
* **Mutable** framing implementation that given any number of [[pekko.util.ByteString]] chunks, can emit JSON objects contained within them.
* Typically JSON objects are separated by new-lines or commas, however a top-level JSON Array can also be understood and chunked up
* into valid JSON objects by this framing implementation.
*

View file

@ -23,7 +23,7 @@ object Compression {
/**
* Creates a Flow that decompresses gzip-compressed stream of data.
*
* @param maxBytesPerChunk Maximum length of the output [[ByteString]] chunk.
* @param maxBytesPerChunk Maximum length of the output [[pekko.util.ByteString]] chunk.
*/
def gunzip(maxBytesPerChunk: Int): Flow[ByteString, ByteString, NotUsed] =
scaladsl.Compression.gunzip(maxBytesPerChunk).asJava
@ -31,7 +31,7 @@ object Compression {
/**
* Creates a Flow that decompresses deflate-compressed stream of data.
*
* @param maxBytesPerChunk Maximum length of the output [[ByteString]] chunk.
* @param maxBytesPerChunk Maximum length of the output [[pekko.util.ByteString]] chunk.
*/
def inflate(maxBytesPerChunk: Int): Flow[ByteString, ByteString, NotUsed] =
inflate(maxBytesPerChunk, false)
@ -39,7 +39,7 @@ object Compression {
/**
* Same as [[inflate]] with configurable maximum output length and nowrap
*
* @param maxBytesPerChunk Maximum length of the output [[ByteString]] chunk.
* @param maxBytesPerChunk Maximum length of the output [[pekko.util.ByteString]] chunk.
* @param nowrap if true then use GZIP compatible decompression
*/
def inflate(maxBytesPerChunk: Int, nowrap: Boolean): Flow[ByteString, ByteString, NotUsed] =
@ -47,7 +47,7 @@ object Compression {
/**
* Creates a flow that gzip-compresses a stream of ByteStrings. Note that the compressor
* will SYNC_FLUSH after every [[ByteString]] so that it is guaranteed that every [[ByteString]]
* will SYNC_FLUSH after every [[pekko.util.ByteString]] so that it is guaranteed that every [[pekko.util.ByteString]]
* coming out of the flow can be fully decompressed without waiting for additional data. This may
* come at a compression performance cost for very small chunks.
*/
@ -64,7 +64,7 @@ object Compression {
/**
* Creates a flow that deflate-compresses a stream of ByteString. Note that the compressor
* will SYNC_FLUSH after every [[ByteString]] so that it is guaranteed that every [[ByteString]]
* will SYNC_FLUSH after every [[pekko.util.ByteString]] so that it is guaranteed that every [[pekko.util.ByteString]]
* coming out of the flow can be fully decompressed without waiting for additional data. This may
* come at a compression performance cost for very small chunks.
*/

View file

@ -31,7 +31,7 @@ import pekko.stream.scaladsl.SourceToCompletionStage
object FileIO {
/**
* Creates a Sink that writes incoming [[ByteString]] elements to the given file.
* Creates a Sink that writes incoming [[pekko.util.ByteString]] elements to the given file.
* Overwrites existing files by truncating their contents, if you want to append to an existing file use
* [[toFile(File, util.Set[OpenOption])]] with [[java.nio.file.StandardOpenOption.APPEND]].
*
@ -47,7 +47,7 @@ object FileIO {
def toFile(f: File): javadsl.Sink[ByteString, CompletionStage[IOResult]] = toPath(f.toPath)
/**
* Creates a Sink that writes incoming [[ByteString]] elements to the given file path.
* Creates a Sink that writes incoming [[pekko.util.ByteString]] elements to the given file path.
* Overwrites existing files by truncating their contents, if you want to append to an existing file
* [[toPath(Path, util.Set[OpenOption])]] with [[java.nio.file.StandardOpenOption.APPEND]].
*
@ -69,7 +69,7 @@ object FileIO {
new Sink(scaladsl.FileIO.toPath(f).toCompletionStage())
/**
* Creates a Sink that writes incoming [[ByteString]] elements to the given file.
* Creates a Sink that writes incoming [[pekko.util.ByteString]] elements to the given file.
*
* Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
* and a possible exception if IO operation was not completed successfully.
@ -85,7 +85,7 @@ object FileIO {
toPath(f.toPath)
/**
* Creates a Sink that writes incoming [[ByteString]] elements to the given file path.
* Creates a Sink that writes incoming [[pekko.util.ByteString]] elements to the given file path.
*
* Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
* and a possible exception if IO operation was not completed successfully.
@ -106,7 +106,7 @@ object FileIO {
new Sink(scaladsl.FileIO.toPath(f, options.asScala.toSet).toCompletionStage())
/**
* Creates a Sink that writes incoming [[ByteString]] elements to the given file path.
* Creates a Sink that writes incoming [[pekko.util.ByteString]] elements to the given file path.
*
* Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
* and a possible exception if IO operation was not completed successfully.
@ -132,7 +132,7 @@ object FileIO {
/**
* Creates a Source from a files contents.
* Emitted elements are [[ByteString]] elements, chunked by default by 8192 bytes,
* Emitted elements are [[pekko.util.ByteString]] elements, chunked by default by 8192 bytes,
* except the last element, which will be up to 8192 in size.
*
* You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or
@ -149,7 +149,7 @@ object FileIO {
/**
* Creates a Source from a files contents.
* Emitted elements are [[ByteString]] elements, chunked by default by 8192 bytes,
* Emitted elements are [[pekko.util.ByteString]] elements, chunked by default by 8192 bytes,
* except the last element, which will be up to 8192 in size.
*
* You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or
@ -165,7 +165,7 @@ object FileIO {
/**
* Creates a synchronous Source from a files contents.
* Emitted elements are `chunkSize` sized [[ByteString]] elements,
* Emitted elements are `chunkSize` sized [[pekko.util.ByteString]] elements,
* except the last element, which will be up to `chunkSize` in size.
*
* You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or
@ -184,7 +184,7 @@ object FileIO {
/**
* Creates a synchronous Source from a files contents.
* Emitted elements are `chunkSize` sized [[ByteString]] elements,
* Emitted elements are `chunkSize` sized [[pekko.util.ByteString]] elements,
* except the last element, which will be up to `chunkSize` in size.
*
* You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or
@ -202,7 +202,7 @@ object FileIO {
/**
* Creates a synchronous Source from a files contents.
* Emitted elements are `chunkSize` sized [[ByteString]] elements,
* Emitted elements are `chunkSize` sized [[pekko.util.ByteString]] elements,
* except the last element, which will be up to `chunkSize` in size.
*
* You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or

View file

@ -432,7 +432,7 @@ object Source {
* completion.
*
* The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted
* [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`,
* the failure will be signaled downstream immediately (instead of the completion signal).
*
@ -528,7 +528,7 @@ object Source {
* The stream will complete with failure if a message is sent before the acknowledgement has been replied back.
*
* The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted
* [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`,
* the failure will be signaled downstream immediately (instead of the completion signal).
*
@ -562,7 +562,7 @@ object Source {
* The stream will complete with failure if a message is sent before the acknowledgement has been replied back.
*
* The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted
* [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`,
* the failure will be signaled downstream immediately (instead of the completion signal).
*

View file

@ -36,7 +36,7 @@ import pekko.stream.scaladsl.SourceToCompletionStage
object StreamConverters {
/**
* Sink which writes incoming [[ByteString]]s to an [[OutputStream]] created by the given function.
* Sink which writes incoming [[pekko.util.ByteString]]s to an [[OutputStream]] created by the given function.
*
* Materializes a [[CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
* and a possible exception if IO operation was not completed successfully.
@ -55,7 +55,7 @@ object StreamConverters {
fromOutputStream(f, autoFlush = false)
/**
* Sink which writes incoming [[ByteString]]s to an [[OutputStream]] created by the given function.
* Sink which writes incoming [[pekko.util.ByteString]]s to an [[OutputStream]] created by the given function.
*
* Materializes a [[CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
* and a possible exception if IO operation was not completed successfully.

View file

@ -25,7 +25,7 @@ object Compression {
/**
* Creates a flow that gzip-compresses a stream of ByteStrings. Note that the compressor
* will SYNC_FLUSH after every [[ByteString]] so that it is guaranteed that every [[ByteString]]
* will SYNC_FLUSH after every [[pekko.util.ByteString]] so that it is guaranteed that every [[pekko.util.ByteString]]
* coming out of the flow can be fully decompressed without waiting for additional data. This may
* come at a compression performance cost for very small chunks.
*
@ -44,14 +44,14 @@ object Compression {
/**
* Creates a Flow that decompresses a gzip-compressed stream of data.
*
* @param maxBytesPerChunk Maximum length of an output [[ByteString]] chunk.
* @param maxBytesPerChunk Maximum length of an output [[pekko.util.ByteString]] chunk.
*/
def gunzip(maxBytesPerChunk: Int = MaxBytesPerChunkDefault): Flow[ByteString, ByteString, NotUsed] =
Flow[ByteString].via(new GzipDecompressor(maxBytesPerChunk)).named("gunzip")
/**
* Creates a flow that deflate-compresses a stream of ByteString. Note that the compressor
* will SYNC_FLUSH after every [[ByteString]] so that it is guaranteed that every [[ByteString]]
* will SYNC_FLUSH after every [[pekko.util.ByteString]] so that it is guaranteed that every [[pekko.util.ByteString]]
* coming out of the flow can be fully decompressed without waiting for additional data. This may
* come at a compression performance cost for very small chunks.
*
@ -71,7 +71,7 @@ object Compression {
/**
* Creates a Flow that decompresses a deflate-compressed stream of data.
*
* @param maxBytesPerChunk Maximum length of an output [[ByteString]] chunk.
* @param maxBytesPerChunk Maximum length of an output [[pekko.util.ByteString]] chunk.
*/
def inflate(maxBytesPerChunk: Int = MaxBytesPerChunkDefault): Flow[ByteString, ByteString, NotUsed] =
inflate(maxBytesPerChunk, false)
@ -79,7 +79,7 @@ object Compression {
/**
* Creates a Flow that decompresses a deflate-compressed stream of data.
*
* @param maxBytesPerChunk Maximum length of an output [[ByteString]] chunk.
* @param maxBytesPerChunk Maximum length of an output [[pekko.util.ByteString]] chunk.
* @param nowrap if true then use GZIP compatible decompression
*/
def inflate(maxBytesPerChunk: Int, nowrap: Boolean): Flow[ByteString, ByteString, NotUsed] =

View file

@ -87,7 +87,7 @@ object FileIO {
Source.fromGraph(new FileSource(f, chunkSize, startPosition)).withAttributes(DefaultAttributes.fileSource)
/**
* Creates a Sink which writes incoming [[ByteString]] elements to the given file. Overwrites existing files
* Creates a Sink which writes incoming [[pekko.util.ByteString]] elements to the given file. Overwrites existing files
* by truncating their contents as default.
*
* Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
@ -106,7 +106,7 @@ object FileIO {
toPath(f.toPath, options)
/**
* Creates a Sink which writes incoming [[ByteString]] elements to the given file path. Overwrites existing files
* Creates a Sink which writes incoming [[pekko.util.ByteString]] elements to the given file path. Overwrites existing files
* by truncating their contents as default.
*
* Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
@ -130,7 +130,7 @@ object FileIO {
toPath(f, options, startPosition = 0)
/**
* Creates a Sink which writes incoming [[ByteString]] elements to the given file path. Overwrites existing files
* Creates a Sink which writes incoming [[pekko.util.ByteString]] elements to the given file path. Overwrites existing files
* by truncating their contents as default.
*
* Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,

View file

@ -24,7 +24,7 @@ import pekko.util.ByteString
import scala.util.control.NonFatal
/** Provides JSON framing operators that can separate valid JSON objects from incoming [[ByteString]] objects. */
/** Provides JSON framing operators that can separate valid JSON objects from incoming [[pekko.util.ByteString]] objects. */
object JsonFraming {
/** Thrown if upstream completes with a partial object in the buffer. */

View file

@ -219,7 +219,7 @@ object Sink {
/**
* A `Sink` that materializes into a `Future` of the optional first value received.
* If the stream completes before signaling at least a single element, the value of the Future will be [[None]].
* If the stream completes before signaling at least a single element, the value of the Future will be [[scala.None]].
* If the stream signals an error errors before signaling at least a single element, the Future will be failed with the streams exception.
*
* See also [[head]].
@ -243,7 +243,7 @@ object Sink {
/**
* A `Sink` that materializes into a `Future` of the optional last value received.
* If the stream completes before signaling at least a single element, the value of the Future will be [[None]].
* If the stream completes before signaling at least a single element, the value of the Future will be [[scala.None]].
* If the stream signals an error, the Future will be failed with the stream's exception.
*
* See also [[last]], [[takeLast]].

View file

@ -648,7 +648,7 @@ object Source {
* completion.
*
* The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted
* [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`,
* the failure will be signaled downstream immediately (instead of the completion signal).
*
@ -738,7 +738,7 @@ object Source {
* The stream will complete with failure if a message is sent before the acknowledgement has been replied back.
*
* The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted
* [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`,
* the failure will be signaled downstream immediately (instead of the completion signal).
*

View file

@ -74,7 +74,7 @@ object StreamConverters {
Source.fromGraph(new OutputStreamSourceStage(writeTimeout))
/**
* Creates a Sink which writes incoming [[ByteString]]s to an [[OutputStream]] created by the given function.
* Creates a Sink which writes incoming [[pekko.util.ByteString]]s to an [[OutputStream]] created by the given function.
*
* Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
* and a possible exception if IO operation was not completed successfully.

View file

@ -53,7 +53,7 @@ import scala.concurrent.Future
* elements a given transformation step might buffer before handing elements
* downstream, which means that transformation functions may be invoked more
* often than for corresponding transformations on strict collections like
* [[List]]. *An important consequence* is that elements that were produced
* [[scala.collection.immutable.List]]. *An important consequence* is that elements that were produced
* into a stream may be discarded by later processors, e.g. when using the
* [[#take]] operator.
*

View file

@ -203,7 +203,7 @@ object GraphStageLogic {
/**
* Minimal actor to work with other actors and watch them in a synchronous ways
*
* Not for user instantiation, use [[#getStageActor]].
* Not for user instantiation, use [[GraphStageLogic.getStageActor]].
*/
final class StageActor @InternalApi() private[pekko] (
materializer: Materializer,
@ -1158,9 +1158,9 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
/**
* Obtain a callback object that can be used asynchronously to re-enter the
* current [[GraphStage]] with an asynchronous notification. The [[invoke]] method of the returned
* current [[GraphStage]] with an asynchronous notification. The [[AsyncCallback.invoke]] method of the returned
* [[AsyncCallback]] is safe to be called from other threads. It will in the background thread-safely
* delegate to the passed callback function. I.e. [[invoke]] will be called by other thread and
* delegate to the passed callback function. I.e. [[AsyncCallback.invoke]] will be called by other thread and
* the passed handler will be invoked eventually in a thread-safe way by the execution environment.
*
* In case stream is not yet materialized [[AsyncCallback]] will buffer events until stream is available.
@ -1268,9 +1268,9 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
/**
* Java API: Obtain a callback object that can be used asynchronously to re-enter the
* current [[GraphStage]] with an asynchronous notification. The [[invoke]] method of the returned
* current [[GraphStage]] with an asynchronous notification. The [[AsyncCallback.invoke]] method of the returned
* [[AsyncCallback]] is safe to be called from other threads. It will in the background thread-safely
* delegate to the passed callback function. I.e. [[invoke]] will be called by other thread and
* delegate to the passed callback function. I.e. [[AsyncCallback.invoke]] will be called by other thread and
* the passed handler will be invoked eventually in a thread-safe way by the execution environment.
*
* [[AsyncCallback.invokeWithFeedback]] has an internal promise that will be failed if event cannot be processed due to stream completion.
@ -1310,18 +1310,18 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
_subInletsAndOutlets -= outlet
/**
* Initialize a [[StageActorRef]] which can be used to interact with from the outside world "as-if" an [[Actor]].
* Initialize a [[GraphStageLogic.StageActorRef]] which can be used to interact with from the outside world "as-if" an [[pekko.actor.Actor]].
* The messages are looped through the [[getAsyncCallback]] mechanism of [[GraphStage]] so they are safe to modify
* internal state of this operator.
*
* This method must (the earliest) be called after the [[GraphStageLogic]] constructor has finished running,
* for example from the [[preStart]] callback the graph operator logic provides.
*
* Created [[StageActorRef]] to get messages and watch other actors in synchronous way.
* Created [[GraphStageLogic.StageActorRef]] to get messages and watch other actors in synchronous way.
*
* The [[StageActorRef]]'s lifecycle is bound to the operator, in other words when the operator is finished,
* the Actor will be terminated as well. The entity backing the [[StageActorRef]] is not a real Actor,
* but the [[GraphStageLogic]] itself, therefore it does not react to [[PoisonPill]].
* The [[GraphStageLogic.StageActorRef]]'s lifecycle is bound to the operator, in other words when the operator is finished,
* the Actor will be terminated as well. The entity backing the [[GraphStageLogic.StageActorRef]] is not a real Actor,
* but the [[GraphStageLogic]] itself, therefore it does not react to [[pekko.actor.PoisonPill]].
*
* To be thread safe this method must only be called from either the constructor of the graph operator during
* materialization or one of the methods invoked by the graph operator machinery, such as `onPush` and `onPull`.

View file

@ -255,7 +255,7 @@ trait TestKitBase {
/**
* Obtain time remaining for execution of the innermost enclosing `within`
* block or throw an [[AssertionError]] if no `within` block surrounds this
* block or throw an [[java.lang.AssertionError]] if no `within` block surrounds this
* call.
*/
def remaining: FiniteDuration = end match {

View file

@ -149,7 +149,7 @@ class TestKit(system: ActorSystem) {
/**
* Obtain time remaining for execution of the innermost enclosing `within`
* block or throw an [[AssertionError]] if no `within` block surrounds this
* block or throw an [[java.lang.AssertionError]] if no `within` block surrounds this
* call.
*/
@deprecated("Use getRemaining which returns java.time.Duration instead.", since = "Akka 2.5.12")
@ -157,7 +157,7 @@ class TestKit(system: ActorSystem) {
/**
* Obtain time remaining for execution of the innermost enclosing `within`
* block or throw an [[AssertionError]] if no `within` block surrounds this
* block or throw an [[java.lang.AssertionError]] if no `within` block surrounds this
* call.
*/
def getRemaining: java.time.Duration = tp.remaining.asJava