Fix invalid scaladoc links which cannot be found (#353)

* Add enough package name
* Fix invalid syntax of links to a method
This commit is contained in:
Naoki Yamada 2023-07-06 19:07:36 +09:00 committed by kerr
parent 15f02d696f
commit 201992d984
100 changed files with 251 additions and 250 deletions

View file

@ -53,7 +53,7 @@ trait Graph[+S <: Shape, +M] {
/**
* Specifies the name of the Graph.
* If the name is null or empty the name is ignored, i.e. [[#none]] is returned.
* If the name is null or empty the name is ignored, i.e. [[Attributes.none]] is returned.
*/
def named(name: String): Graph[S, M] = addAttributes(Attributes.name(name))

View file

@ -49,7 +49,7 @@ final case class IOResult(
def wasSuccessful: Boolean = status.isSuccess
/**
* Java API: If the IO operation resulted in an error, returns the corresponding [[Throwable]]
* Java API: If the IO operation resulted in an error, returns the corresponding [[java.lang.Throwable]]
* or throws [[UnsupportedOperationException]] otherwise.
*/
@deprecated("status is always set to Success(Done)", "Akka 2.6.0")

View file

@ -100,7 +100,7 @@ abstract class Materializer {
* If the `Runnable` throws an exception the repeated scheduling is aborted,
* i.e. the function will not be invoked any more.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* supported by the `Scheduler`.
*
* @return A [[pekko.actor.Cancellable]] that allows cancelling the timer. Cancelling is best effort, if the event
@ -136,7 +136,7 @@ abstract class Materializer {
* If the `Runnable` throws an exception the repeated scheduling is aborted,
* i.e. the function will not be invoked any more.
*
* @throws IllegalArgumentException if the given delays exceed the maximum
* @throws java.lang.IllegalArgumentException if the given delays exceed the maximum
* supported by the `Scheduler`.
*
* @return A [[pekko.actor.Cancellable]] that allows cancelling the timer. Cancelling is best effort, if the event

View file

@ -32,12 +32,12 @@ import pekko.stream.scaladsl.{ Sink, Source }
*/
object SinkRef {
/** Implicitly converts a [[SinkRef]] to a [[Sink]]. The same can be achieved by calling `.sink` on the reference. */
/** Implicitly converts a [[SinkRef]] to a [[scaladsl.Sink]]. The same can be achieved by calling `.sink` on the reference. */
implicit def convertRefToSink[T](sinkRef: SinkRef[T]): Sink[T, NotUsed] = sinkRef.sink()
}
/**
* A [[SinkRef]] allows sharing a "reference" to a [[Sink]] with others, with the main purpose of crossing a network boundary.
* A [[SinkRef]] allows sharing a "reference" to a [[scaladsl.Sink]] with others, with the main purpose of crossing a network boundary.
* Usually obtaining a SinkRef would be done via Actor messaging, in which one system asks a remote one,
* to accept some data from it, and the remote one decides to accept the request to send data in a back-pressured
* streaming fashion -- using a sink ref.
@ -54,7 +54,7 @@ object SinkRef {
@DoNotInherit
trait SinkRef[In] {
/** Scala API: Get [[Sink]] underlying to this source ref. */
/** Scala API: Get [[scaladsl.Sink]] underlying to this source ref. */
def sink(): Sink[In, NotUsed]
/** Java API: Get [[javadsl.Sink]] underlying to this source ref. */

View file

@ -50,7 +50,7 @@ import pekko.util.ByteString
/**
* INTERNAL API: Use [[pekko.stream.scaladsl.JsonFraming]] instead.
*
* **Mutable** framing implementation that given any number of [[ByteString]] chunks, can emit JSON objects contained within them.
* **Mutable** framing implementation that given any number of [[pekko.util.ByteString]] chunks, can emit JSON objects contained within them.
* Typically JSON objects are separated by new-lines or commas, however a top-level JSON Array can also be understood and chunked up
* into valid JSON objects by this framing implementation.
*

View file

@ -23,7 +23,7 @@ object Compression {
/**
* Creates a Flow that decompresses gzip-compressed stream of data.
*
* @param maxBytesPerChunk Maximum length of the output [[ByteString]] chunk.
* @param maxBytesPerChunk Maximum length of the output [[pekko.util.ByteString]] chunk.
*/
def gunzip(maxBytesPerChunk: Int): Flow[ByteString, ByteString, NotUsed] =
scaladsl.Compression.gunzip(maxBytesPerChunk).asJava
@ -31,7 +31,7 @@ object Compression {
/**
* Creates a Flow that decompresses deflate-compressed stream of data.
*
* @param maxBytesPerChunk Maximum length of the output [[ByteString]] chunk.
* @param maxBytesPerChunk Maximum length of the output [[pekko.util.ByteString]] chunk.
*/
def inflate(maxBytesPerChunk: Int): Flow[ByteString, ByteString, NotUsed] =
inflate(maxBytesPerChunk, false)
@ -39,7 +39,7 @@ object Compression {
/**
* Same as [[inflate]] with configurable maximum output length and nowrap
*
* @param maxBytesPerChunk Maximum length of the output [[ByteString]] chunk.
* @param maxBytesPerChunk Maximum length of the output [[pekko.util.ByteString]] chunk.
* @param nowrap if true then use GZIP compatible decompression
*/
def inflate(maxBytesPerChunk: Int, nowrap: Boolean): Flow[ByteString, ByteString, NotUsed] =
@ -47,7 +47,7 @@ object Compression {
/**
* Creates a flow that gzip-compresses a stream of ByteStrings. Note that the compressor
* will SYNC_FLUSH after every [[ByteString]] so that it is guaranteed that every [[ByteString]]
* will SYNC_FLUSH after every [[pekko.util.ByteString]] so that it is guaranteed that every [[pekko.util.ByteString]]
* coming out of the flow can be fully decompressed without waiting for additional data. This may
* come at a compression performance cost for very small chunks.
*/
@ -64,7 +64,7 @@ object Compression {
/**
* Creates a flow that deflate-compresses a stream of ByteString. Note that the compressor
* will SYNC_FLUSH after every [[ByteString]] so that it is guaranteed that every [[ByteString]]
* will SYNC_FLUSH after every [[pekko.util.ByteString]] so that it is guaranteed that every [[pekko.util.ByteString]]
* coming out of the flow can be fully decompressed without waiting for additional data. This may
* come at a compression performance cost for very small chunks.
*/

View file

@ -31,7 +31,7 @@ import pekko.stream.scaladsl.SourceToCompletionStage
object FileIO {
/**
* Creates a Sink that writes incoming [[ByteString]] elements to the given file.
* Creates a Sink that writes incoming [[pekko.util.ByteString]] elements to the given file.
* Overwrites existing files by truncating their contents, if you want to append to an existing file use
* [[toFile(File, util.Set[OpenOption])]] with [[java.nio.file.StandardOpenOption.APPEND]].
*
@ -47,7 +47,7 @@ object FileIO {
def toFile(f: File): javadsl.Sink[ByteString, CompletionStage[IOResult]] = toPath(f.toPath)
/**
* Creates a Sink that writes incoming [[ByteString]] elements to the given file path.
* Creates a Sink that writes incoming [[pekko.util.ByteString]] elements to the given file path.
* Overwrites existing files by truncating their contents, if you want to append to an existing file
* [[toPath(Path, util.Set[OpenOption])]] with [[java.nio.file.StandardOpenOption.APPEND]].
*
@ -69,7 +69,7 @@ object FileIO {
new Sink(scaladsl.FileIO.toPath(f).toCompletionStage())
/**
* Creates a Sink that writes incoming [[ByteString]] elements to the given file.
* Creates a Sink that writes incoming [[pekko.util.ByteString]] elements to the given file.
*
* Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
* and a possible exception if IO operation was not completed successfully.
@ -85,7 +85,7 @@ object FileIO {
toPath(f.toPath)
/**
* Creates a Sink that writes incoming [[ByteString]] elements to the given file path.
* Creates a Sink that writes incoming [[pekko.util.ByteString]] elements to the given file path.
*
* Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
* and a possible exception if IO operation was not completed successfully.
@ -106,7 +106,7 @@ object FileIO {
new Sink(scaladsl.FileIO.toPath(f, options.asScala.toSet).toCompletionStage())
/**
* Creates a Sink that writes incoming [[ByteString]] elements to the given file path.
* Creates a Sink that writes incoming [[pekko.util.ByteString]] elements to the given file path.
*
* Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
* and a possible exception if IO operation was not completed successfully.
@ -132,7 +132,7 @@ object FileIO {
/**
* Creates a Source from a files contents.
* Emitted elements are [[ByteString]] elements, chunked by default by 8192 bytes,
* Emitted elements are [[pekko.util.ByteString]] elements, chunked by default by 8192 bytes,
* except the last element, which will be up to 8192 in size.
*
* You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or
@ -149,7 +149,7 @@ object FileIO {
/**
* Creates a Source from a files contents.
* Emitted elements are [[ByteString]] elements, chunked by default by 8192 bytes,
* Emitted elements are [[pekko.util.ByteString]] elements, chunked by default by 8192 bytes,
* except the last element, which will be up to 8192 in size.
*
* You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or
@ -165,7 +165,7 @@ object FileIO {
/**
* Creates a synchronous Source from a files contents.
* Emitted elements are `chunkSize` sized [[ByteString]] elements,
* Emitted elements are `chunkSize` sized [[pekko.util.ByteString]] elements,
* except the last element, which will be up to `chunkSize` in size.
*
* You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or
@ -184,7 +184,7 @@ object FileIO {
/**
* Creates a synchronous Source from a files contents.
* Emitted elements are `chunkSize` sized [[ByteString]] elements,
* Emitted elements are `chunkSize` sized [[pekko.util.ByteString]] elements,
* except the last element, which will be up to `chunkSize` in size.
*
* You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or
@ -202,7 +202,7 @@ object FileIO {
/**
* Creates a synchronous Source from a files contents.
* Emitted elements are `chunkSize` sized [[ByteString]] elements,
* Emitted elements are `chunkSize` sized [[pekko.util.ByteString]] elements,
* except the last element, which will be up to `chunkSize` in size.
*
* You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or

View file

@ -432,7 +432,7 @@ object Source {
* completion.
*
* The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted
* [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`,
* the failure will be signaled downstream immediately (instead of the completion signal).
*
@ -528,7 +528,7 @@ object Source {
* The stream will complete with failure if a message is sent before the acknowledgement has been replied back.
*
* The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted
* [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`,
* the failure will be signaled downstream immediately (instead of the completion signal).
*
@ -562,7 +562,7 @@ object Source {
* The stream will complete with failure if a message is sent before the acknowledgement has been replied back.
*
* The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted
* [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`,
* the failure will be signaled downstream immediately (instead of the completion signal).
*

View file

@ -36,7 +36,7 @@ import pekko.stream.scaladsl.SourceToCompletionStage
object StreamConverters {
/**
* Sink which writes incoming [[ByteString]]s to an [[OutputStream]] created by the given function.
* Sink which writes incoming [[pekko.util.ByteString]]s to an [[OutputStream]] created by the given function.
*
* Materializes a [[CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
* and a possible exception if IO operation was not completed successfully.
@ -55,7 +55,7 @@ object StreamConverters {
fromOutputStream(f, autoFlush = false)
/**
* Sink which writes incoming [[ByteString]]s to an [[OutputStream]] created by the given function.
* Sink which writes incoming [[pekko.util.ByteString]]s to an [[OutputStream]] created by the given function.
*
* Materializes a [[CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
* and a possible exception if IO operation was not completed successfully.

View file

@ -25,7 +25,7 @@ object Compression {
/**
* Creates a flow that gzip-compresses a stream of ByteStrings. Note that the compressor
* will SYNC_FLUSH after every [[ByteString]] so that it is guaranteed that every [[ByteString]]
* will SYNC_FLUSH after every [[pekko.util.ByteString]] so that it is guaranteed that every [[pekko.util.ByteString]]
* coming out of the flow can be fully decompressed without waiting for additional data. This may
* come at a compression performance cost for very small chunks.
*
@ -44,14 +44,14 @@ object Compression {
/**
* Creates a Flow that decompresses a gzip-compressed stream of data.
*
* @param maxBytesPerChunk Maximum length of an output [[ByteString]] chunk.
* @param maxBytesPerChunk Maximum length of an output [[pekko.util.ByteString]] chunk.
*/
def gunzip(maxBytesPerChunk: Int = MaxBytesPerChunkDefault): Flow[ByteString, ByteString, NotUsed] =
Flow[ByteString].via(new GzipDecompressor(maxBytesPerChunk)).named("gunzip")
/**
* Creates a flow that deflate-compresses a stream of ByteString. Note that the compressor
* will SYNC_FLUSH after every [[ByteString]] so that it is guaranteed that every [[ByteString]]
* will SYNC_FLUSH after every [[pekko.util.ByteString]] so that it is guaranteed that every [[pekko.util.ByteString]]
* coming out of the flow can be fully decompressed without waiting for additional data. This may
* come at a compression performance cost for very small chunks.
*
@ -71,7 +71,7 @@ object Compression {
/**
* Creates a Flow that decompresses a deflate-compressed stream of data.
*
* @param maxBytesPerChunk Maximum length of an output [[ByteString]] chunk.
* @param maxBytesPerChunk Maximum length of an output [[pekko.util.ByteString]] chunk.
*/
def inflate(maxBytesPerChunk: Int = MaxBytesPerChunkDefault): Flow[ByteString, ByteString, NotUsed] =
inflate(maxBytesPerChunk, false)
@ -79,7 +79,7 @@ object Compression {
/**
* Creates a Flow that decompresses a deflate-compressed stream of data.
*
* @param maxBytesPerChunk Maximum length of an output [[ByteString]] chunk.
* @param maxBytesPerChunk Maximum length of an output [[pekko.util.ByteString]] chunk.
* @param nowrap if true then use GZIP compatible decompression
*/
def inflate(maxBytesPerChunk: Int, nowrap: Boolean): Flow[ByteString, ByteString, NotUsed] =

View file

@ -87,7 +87,7 @@ object FileIO {
Source.fromGraph(new FileSource(f, chunkSize, startPosition)).withAttributes(DefaultAttributes.fileSource)
/**
* Creates a Sink which writes incoming [[ByteString]] elements to the given file. Overwrites existing files
* Creates a Sink which writes incoming [[pekko.util.ByteString]] elements to the given file. Overwrites existing files
* by truncating their contents as default.
*
* Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
@ -106,7 +106,7 @@ object FileIO {
toPath(f.toPath, options)
/**
* Creates a Sink which writes incoming [[ByteString]] elements to the given file path. Overwrites existing files
* Creates a Sink which writes incoming [[pekko.util.ByteString]] elements to the given file path. Overwrites existing files
* by truncating their contents as default.
*
* Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
@ -130,7 +130,7 @@ object FileIO {
toPath(f, options, startPosition = 0)
/**
* Creates a Sink which writes incoming [[ByteString]] elements to the given file path. Overwrites existing files
* Creates a Sink which writes incoming [[pekko.util.ByteString]] elements to the given file path. Overwrites existing files
* by truncating their contents as default.
*
* Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,

View file

@ -24,7 +24,7 @@ import pekko.util.ByteString
import scala.util.control.NonFatal
/** Provides JSON framing operators that can separate valid JSON objects from incoming [[ByteString]] objects. */
/** Provides JSON framing operators that can separate valid JSON objects from incoming [[pekko.util.ByteString]] objects. */
object JsonFraming {
/** Thrown if upstream completes with a partial object in the buffer. */

View file

@ -219,7 +219,7 @@ object Sink {
/**
* A `Sink` that materializes into a `Future` of the optional first value received.
* If the stream completes before signaling at least a single element, the value of the Future will be [[None]].
* If the stream completes before signaling at least a single element, the value of the Future will be [[scala.None]].
* If the stream signals an error errors before signaling at least a single element, the Future will be failed with the streams exception.
*
* See also [[head]].
@ -243,7 +243,7 @@ object Sink {
/**
* A `Sink` that materializes into a `Future` of the optional last value received.
* If the stream completes before signaling at least a single element, the value of the Future will be [[None]].
* If the stream completes before signaling at least a single element, the value of the Future will be [[scala.None]].
* If the stream signals an error, the Future will be failed with the stream's exception.
*
* See also [[last]], [[takeLast]].

View file

@ -648,7 +648,7 @@ object Source {
* completion.
*
* The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted
* [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`,
* the failure will be signaled downstream immediately (instead of the completion signal).
*
@ -738,7 +738,7 @@ object Source {
* The stream will complete with failure if a message is sent before the acknowledgement has been replied back.
*
* The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted
* [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received
* a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`,
* the failure will be signaled downstream immediately (instead of the completion signal).
*

View file

@ -74,7 +74,7 @@ object StreamConverters {
Source.fromGraph(new OutputStreamSourceStage(writeTimeout))
/**
* Creates a Sink which writes incoming [[ByteString]]s to an [[OutputStream]] created by the given function.
* Creates a Sink which writes incoming [[pekko.util.ByteString]]s to an [[OutputStream]] created by the given function.
*
* Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
* and a possible exception if IO operation was not completed successfully.

View file

@ -53,7 +53,7 @@ import scala.concurrent.Future
* elements a given transformation step might buffer before handing elements
* downstream, which means that transformation functions may be invoked more
* often than for corresponding transformations on strict collections like
* [[List]]. *An important consequence* is that elements that were produced
* [[scala.collection.immutable.List]]. *An important consequence* is that elements that were produced
* into a stream may be discarded by later processors, e.g. when using the
* [[#take]] operator.
*

View file

@ -203,7 +203,7 @@ object GraphStageLogic {
/**
* Minimal actor to work with other actors and watch them in a synchronous ways
*
* Not for user instantiation, use [[#getStageActor]].
* Not for user instantiation, use [[GraphStageLogic.getStageActor]].
*/
final class StageActor @InternalApi() private[pekko] (
materializer: Materializer,
@ -1158,9 +1158,9 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
/**
* Obtain a callback object that can be used asynchronously to re-enter the
* current [[GraphStage]] with an asynchronous notification. The [[invoke]] method of the returned
* current [[GraphStage]] with an asynchronous notification. The [[AsyncCallback.invoke]] method of the returned
* [[AsyncCallback]] is safe to be called from other threads. It will in the background thread-safely
* delegate to the passed callback function. I.e. [[invoke]] will be called by other thread and
* delegate to the passed callback function. I.e. [[AsyncCallback.invoke]] will be called by other thread and
* the passed handler will be invoked eventually in a thread-safe way by the execution environment.
*
* In case stream is not yet materialized [[AsyncCallback]] will buffer events until stream is available.
@ -1268,9 +1268,9 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
/**
* Java API: Obtain a callback object that can be used asynchronously to re-enter the
* current [[GraphStage]] with an asynchronous notification. The [[invoke]] method of the returned
* current [[GraphStage]] with an asynchronous notification. The [[AsyncCallback.invoke]] method of the returned
* [[AsyncCallback]] is safe to be called from other threads. It will in the background thread-safely
* delegate to the passed callback function. I.e. [[invoke]] will be called by other thread and
* delegate to the passed callback function. I.e. [[AsyncCallback.invoke]] will be called by other thread and
* the passed handler will be invoked eventually in a thread-safe way by the execution environment.
*
* [[AsyncCallback.invokeWithFeedback]] has an internal promise that will be failed if event cannot be processed due to stream completion.
@ -1310,18 +1310,18 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
_subInletsAndOutlets -= outlet
/**
* Initialize a [[StageActorRef]] which can be used to interact with from the outside world "as-if" an [[Actor]].
* Initialize a [[GraphStageLogic.StageActorRef]] which can be used to interact with from the outside world "as-if" an [[pekko.actor.Actor]].
* The messages are looped through the [[getAsyncCallback]] mechanism of [[GraphStage]] so they are safe to modify
* internal state of this operator.
*
* This method must (the earliest) be called after the [[GraphStageLogic]] constructor has finished running,
* for example from the [[preStart]] callback the graph operator logic provides.
*
* Created [[StageActorRef]] to get messages and watch other actors in synchronous way.
* Created [[GraphStageLogic.StageActorRef]] to get messages and watch other actors in synchronous way.
*
* The [[StageActorRef]]'s lifecycle is bound to the operator, in other words when the operator is finished,
* the Actor will be terminated as well. The entity backing the [[StageActorRef]] is not a real Actor,
* but the [[GraphStageLogic]] itself, therefore it does not react to [[PoisonPill]].
* The [[GraphStageLogic.StageActorRef]]'s lifecycle is bound to the operator, in other words when the operator is finished,
* the Actor will be terminated as well. The entity backing the [[GraphStageLogic.StageActorRef]] is not a real Actor,
* but the [[GraphStageLogic]] itself, therefore it does not react to [[pekko.actor.PoisonPill]].
*
* To be thread safe this method must only be called from either the constructor of the graph operator during
* materialization or one of the methods invoked by the graph operator machinery, such as `onPush` and `onPull`.