Fix typos, via a Levenshtein-style corrector
This commit is contained in:
parent
e0a1110794
commit
fd41299943
45 changed files with 49 additions and 49 deletions
|
|
@ -15,7 +15,7 @@ ignored-files = [
|
|||
"FlowZipWithIndexSpec.scala"
|
||||
]
|
||||
|
||||
//ignored pacakges
|
||||
//ignored packages
|
||||
ignored-packages = [
|
||||
"doc",
|
||||
"jdoc"
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ class ActorLifeCycleSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitS
|
|||
system.stop(supervisor)
|
||||
}
|
||||
|
||||
"log failues in postStop" in {
|
||||
"log failures in postStop" in {
|
||||
val a = system.actorOf(Props(new Actor {
|
||||
def receive = Actor.emptyBehavior
|
||||
override def postStop: Unit = { throw new Exception("hurrah") }
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ class ActorSystemDispatchersSpec extends AkkaSpec(ConfigFactory.parseString("""
|
|||
akka.actor.internal-dispatcher = akka.actor.default-dispatcher
|
||||
"""))
|
||||
try {
|
||||
// that the user guardian runs on the overriden dispatcher instead of internal
|
||||
// that the user guardian runs on the overridden dispatcher instead of internal
|
||||
// isn't really a guarantee any internal actor has been made running on the right one
|
||||
// but it's better than no test coverage at all
|
||||
userGuardianDispatcher(sys) should ===("akka.actor.default-dispatcher")
|
||||
|
|
|
|||
|
|
@ -277,7 +277,7 @@ class ReceiveTimeoutSpec extends AkkaSpec() {
|
|||
}
|
||||
|
||||
// #28266 reproducer
|
||||
"get the timeout when scheduled immedately on restart" in {
|
||||
"get the timeout when scheduled immediately on restart" in {
|
||||
val probe = TestProbe()
|
||||
val ref = system.actorOf(Props(new RestartingParent(probe.ref)))
|
||||
probe.expectMsg("starting")
|
||||
|
|
|
|||
|
|
@ -334,7 +334,7 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT
|
|||
resizer.resize(routees(7)) should be(2)
|
||||
}
|
||||
|
||||
"ignore further away sample data when optmizing" in {
|
||||
"ignore further away sample data when optimizing" in {
|
||||
val resizer = DefaultOptimalSizeExploringResizer(
|
||||
explorationProbability = 0,
|
||||
numOfAdjacentSizesToConsiderDuringOptimization = 4)
|
||||
|
|
|
|||
|
|
@ -245,7 +245,7 @@ class InterceptSpec extends ScalaTestWithActorTestKit with WordSpecLike with Log
|
|||
probe.expectMessage("after b")
|
||||
}
|
||||
|
||||
"intercept with recursivly setup" in {
|
||||
"intercept with recursively setup" in {
|
||||
val probe = TestProbe[String]()
|
||||
val interceptor = snitchingInterceptor(probe.ref)
|
||||
|
||||
|
|
|
|||
|
|
@ -218,7 +218,7 @@ class OrElseSpec extends WordSpec with Matchers with LogCapturing {
|
|||
testFirstMatching(CompositionWithPartialFunction.ping(Map.empty))
|
||||
}
|
||||
|
||||
"use first matching behavor via delegating interceptor" in {
|
||||
"use first matching behavior via delegating interceptor" in {
|
||||
testFirstMatching(CompositionWithInterceptor.ping())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ object DatabasePool extends ExtensionId[DatabasePool] {
|
|||
@silent
|
||||
//#extension
|
||||
class DatabasePool(system: ActorSystem[_]) extends Extension {
|
||||
// database configuration can be laoded from config
|
||||
// database configuration can be loaded from config
|
||||
// from the actor system
|
||||
private val _connection = new ExpensiveDatabaseConnection()
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ import scala.reflect.ClassTag
|
|||
}
|
||||
|
||||
/**
|
||||
* Support for Mapped Dagnostic Context for logging
|
||||
* Support for Mapped Diagnostic Context for logging
|
||||
*
|
||||
* INTERNAL API
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -261,7 +261,7 @@ object Behaviors {
|
|||
Supervisor(Behavior.validateAsInitial(wrapped), strategy)(ClassTag(clazz))
|
||||
|
||||
/**
|
||||
* Specify the [[SupervisorStrategy]] to be invoked when the wrapped behaior throws.
|
||||
* Specify the [[SupervisorStrategy]] to be invoked when the wrapped behavior throws.
|
||||
*
|
||||
* All non-fatal (see [[scala.util.control.NonFatal]]) exceptions types will be handled using the given strategy.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -316,7 +316,7 @@ akka {
|
|||
# exploration will be +- 5
|
||||
explore-step-size = 0.1
|
||||
|
||||
# Probability of doing an exploration v.s. optmization.
|
||||
# Probability of doing an exploration v.s. optimization.
|
||||
chance-of-exploration = 0.4
|
||||
|
||||
# When downsizing after a long streak of underutilization, the resizer
|
||||
|
|
|
|||
|
|
@ -359,7 +359,7 @@ class ClusterShardingSpec
|
|||
}
|
||||
|
||||
"use the stopMessage for leaving/rebalance" in {
|
||||
// use many entites to reduce the risk that all are hashed to the same shard/node
|
||||
// use many entities to reduce the risk that all are hashed to the same shard/node
|
||||
val numberOfEntities = 100
|
||||
val probe1 = TestProbe[String]()
|
||||
(1 to numberOfEntities).foreach { n =>
|
||||
|
|
|
|||
|
|
@ -138,7 +138,7 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider {
|
|||
// which is possible for OR CRDTs - done with an adapter to leverage the existing NodesRemoved message
|
||||
timers.startTimerWithFixedDelay(RemoveTick, setup.settings.pruningInterval)
|
||||
|
||||
// default tomstone keepalive is 24h (based on prune-gossip-tombstones-after) and keeping the actorrefs
|
||||
// default tombstone keepalive is 24h (based on prune-gossip-tombstones-after) and keeping the actorrefs
|
||||
// around isn't very costly so don't prune often
|
||||
timers.startTimerWithFixedDelay(PruneTombstonesTick, setup.keepTombstonesFor / 24)
|
||||
|
||||
|
|
|
|||
|
|
@ -184,7 +184,7 @@ abstract class MultiDcHeartbeatTakingOverSpec
|
|||
* INTERNAL API
|
||||
* Returns `Up` (or in "later" status, like Leaving etc, but never `Joining` or `WeaklyUp`) members,
|
||||
* sorted by Member.ageOrdering (from oldest to youngest). This restriction on status is needed to
|
||||
* strongly guaratnee the order of "oldest" members, as they're linearized by the order in which they become Up
|
||||
* strongly guarantee the order of "oldest" members, as they're linearized by the order in which they become Up
|
||||
* (since marking that transition is a Leader action).
|
||||
*/
|
||||
private def membersByAge(dataCenter: ClusterSettings.DataCenter): immutable.SortedSet[Member] =
|
||||
|
|
|
|||
|
|
@ -143,7 +143,7 @@ abstract class MultiDcSunnyWeatherSpec
|
|||
* INTERNAL API
|
||||
* Returns `Up` (or in "later" status, like Leaving etc, but never `Joining` or `WeaklyUp`) members,
|
||||
* sorted by Member.ageOrdering (from oldest to youngest). This restriction on status is needed to
|
||||
* strongly guaratnee the order of "oldest" members, as they're linearized by the order in which they become Up
|
||||
* strongly guarantee the order of "oldest" members, as they're linearized by the order in which they become Up
|
||||
* (since marking that transition is a Leader action).
|
||||
*/
|
||||
private def membersByAge(dataCenter: ClusterSettings.DataCenter): immutable.SortedSet[Member] =
|
||||
|
|
|
|||
|
|
@ -569,7 +569,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
|
|||
else if (entry.getOperation == rd.ORSetDeltaOp.Full)
|
||||
ORSet.FullStateDeltaOp(orsetFromProto(entry.getUnderlying))
|
||||
else
|
||||
throw new NotSerializableException(s"Unknow ORSet delta operation ${entry.getOperation}")
|
||||
throw new NotSerializableException(s"Unknown ORSet delta operation ${entry.getOperation}")
|
||||
}
|
||||
.to(immutable.Vector)
|
||||
ORSet.DeltaGroup(ops)
|
||||
|
|
|
|||
|
|
@ -253,7 +253,7 @@ class JepsenInspiredInsertSpec
|
|||
failureWriteAcks should be(Nil)
|
||||
}
|
||||
runOn(n2, n3) {
|
||||
// without delays all could teoretically have been written before the blackhole
|
||||
// without delays all could theoretically have been written before the blackhole
|
||||
if (delayMillis != 0)
|
||||
failureWriteAcks should not be (Nil)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ class ReplicatedDataSerializerSpec
|
|||
|
||||
/**
|
||||
* Given a blob created with the previous serializer (with only string keys for maps). If we deserialize it and then
|
||||
* serialize it again and arive at the same BLOB we can assume that we are compatible in both directions.
|
||||
* serialize it again and arrive at the same BLOB we can assume that we are compatible in both directions.
|
||||
*/
|
||||
def checkCompatibility(oldBlobAsBase64: String, obj: AnyRef): Unit = {
|
||||
val oldBlob = Base64.getDecoder.decode(oldBlobAsBase64)
|
||||
|
|
|
|||
|
|
@ -764,7 +764,7 @@ A simple `toJournal:MyModel=>MyDataModel` and `fromJournal:MyDataModel=>MyModel`
|
|||
understand JSON it is possible to write an EventAdapter `toJournal:Any=>JSON` such that the Journal can *directly* store the
|
||||
json instead of serializing the object to its binary representation.
|
||||
|
||||
Implementing an EventAdapter is rather stright forward:
|
||||
Implementing an EventAdapter is rather straightforward:
|
||||
|
||||
Scala
|
||||
: @@snip [PersistenceEventAdapterDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala) { #identity-event-adapter }
|
||||
|
|
|
|||
|
|
@ -171,7 +171,7 @@ if you want a backpressured actor interface.
|
|||
|
||||
The stream can be completed successfully by sending `akka.actor.Status.Success` to the actor reference.
|
||||
If the content is `akka.stream.CompletionStrategy.immediately` the completion will be signaled immediately.
|
||||
If the content is `akka.stream.CompletionStrategy.draining` already buffered elements will be signaled before siganling completion.
|
||||
If the content is `akka.stream.CompletionStrategy.draining` already buffered elements will be signaled before signaling completion.
|
||||
Any other content will be ignored and fall back to the draining behaviour.
|
||||
|
||||
The stream can be completed with failure by sending `akka.actor.Status.Failure` to the
|
||||
|
|
|
|||
|
|
@ -200,7 +200,7 @@ Scala
|
|||
Java
|
||||
: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/GracefulStopDocTest.java) { #master-actor-watch }
|
||||
|
||||
An alternative to `watch` is `watchWith`, which allows specifying a custom message instead of the `Terminted`.
|
||||
An alternative to `watch` is `watchWith`, which allows specifying a custom message instead of the `Terminated`.
|
||||
This is often preferred over using `watch` and the `Terminated` signal because additional information can
|
||||
be included in the message that can be used later when receiving it.
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ Until @github[#26338](#26338), [this simple example]($github.base_url$/akka-clus
|
|||
### The DistributedPubSub extension
|
||||
|
||||
The mediator can either be started and accessed with the `akka.cluster.pubsub.DistributedPubSub` extension as shown below,
|
||||
or started as an ordinary actor, see the full Akka Classic documentation @ref:[Clasic Distributed PubSub Extension](../distributed-pub-sub.md#distributedpubsub-extension).
|
||||
or started as an ordinary actor, see the full Akka Classic documentation @ref:[Classic Distributed PubSub Extension](../distributed-pub-sub.md#distributedpubsub-extension).
|
||||
|
||||
Scala
|
||||
: @@snip [DistributedPubSubExample.scala](/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/DistributedPubSubExample.scala) { #mediator }
|
||||
|
|
@ -42,7 +42,7 @@ Actors register to a topic for Pub-Sub mode, or register to a path for point-to-
|
|||
|
||||
## Publish
|
||||
|
||||
Pub-Sub mode. For the full Akka Classic documentation of this feature see @ref:[Clasic Distributed PubSub Publish](../distributed-pub-sub.md#publish).
|
||||
Pub-Sub mode. For the full Akka Classic documentation of this feature see @ref:[Classic Distributed PubSub Publish](../distributed-pub-sub.md#publish).
|
||||
|
||||
### Subscribers
|
||||
|
||||
|
|
@ -71,7 +71,7 @@ Scala
|
|||
|
||||
## Send
|
||||
|
||||
Messages can be sent in point-to-point or broadcast mode. For the full Akka Classic documentation of this feature see @ref:[Clasic Distributed PubSub Send](../distributed-pub-sub.md#send).
|
||||
Messages can be sent in point-to-point or broadcast mode. For the full Akka Classic documentation of this feature see @ref:[Classic Distributed PubSub Send](../distributed-pub-sub.md#send).
|
||||
|
||||
First, an actor must register a destination to send to:
|
||||
|
||||
|
|
@ -88,4 +88,4 @@ can explicitly remove entries with `DistributedPubSubMediator.Remove`.
|
|||
|
||||
## Delivery Guarantee
|
||||
|
||||
For the full Akka Classic documentation of this see @ref:[Clasic Distributed PubSub Delivery Guarantee](../distributed-pub-sub.md#delivery-guarantee).
|
||||
For the full Akka Classic documentation of this see @ref:[Classic Distributed PubSub Delivery Guarantee](../distributed-pub-sub.md#delivery-guarantee).
|
||||
|
|
|
|||
|
|
@ -322,7 +322,7 @@ Java
|
|||
|
||||
@@@ div {.group-java}
|
||||
|
||||
## Lamdas versus method references
|
||||
## Lambdas versus method references
|
||||
|
||||
It's recommended to keep the message matching with the `ReceiveBuilder` as short and clean as possible
|
||||
and delegate to methods. This improves readability and ease of method navigation with an IDE.
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ public class SchedulerDocTest extends AbstractJavaTest {
|
|||
.matchEquals(
|
||||
"Tick",
|
||||
m -> {
|
||||
// Do someting
|
||||
// Do something
|
||||
})
|
||||
.build();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ class ClusterDocSpec extends AkkaSpec(ClusterDocSpec.config) with CompileOnlySpe
|
|||
}
|
||||
}
|
||||
|
||||
"demonstrate programatic joining to seed nodes" in compileOnlySpec {
|
||||
"demonstrate programmatic joining to seed nodes" in compileOnlySpec {
|
||||
//#join-seed-nodes
|
||||
import akka.actor.Address
|
||||
import akka.cluster.Cluster
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ class StreamBuffersRateSpec extends AkkaSpec {
|
|||
//#buffering-abstraction-leak
|
||||
}
|
||||
|
||||
"explcit buffers" in {
|
||||
"explicit buffers" in {
|
||||
trait Job
|
||||
def inboundJobsConnector(): Source[Job, NotUsed] = Source.empty
|
||||
//#explicit-buffers-backpressure
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import scala.concurrent.duration._
|
|||
|
||||
class RecipeFlattenSeq extends RecipeSpec {
|
||||
|
||||
"Recipe for flatteing a stream of seqs" must {
|
||||
"Recipe for flattening a stream of seqs" must {
|
||||
|
||||
"work" in {
|
||||
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ trait MayVerb {
|
|||
* Configurable number of frames to be shown when a MAY test fails (is canceled).
|
||||
*
|
||||
* Defaults to `3`.
|
||||
* Must be geater than `0`.
|
||||
* Must be greater than `0`.
|
||||
*/
|
||||
def mayVerbStacktraceContextFrames = 3
|
||||
|
||||
|
|
|
|||
|
|
@ -497,7 +497,7 @@ abstract class AbstractPersistentFSM[S <: FSMState, D, E]
|
|||
/**
|
||||
* Adapter from Java 8 Functional Interface to Scala Function
|
||||
* @param action - Java 8 lambda expression defining the action
|
||||
* @return action represented as a Scala Functin
|
||||
* @return action represented as a Scala Function
|
||||
*/
|
||||
final def exec(action: Consumer[D]): D => Unit =
|
||||
data => action.accept(data)
|
||||
|
|
|
|||
|
|
@ -885,7 +885,7 @@ public final class Descriptors {
|
|||
// Refuse to init if someone added a new declared type.
|
||||
if (Type.values().length != FieldDescriptorProto.Type.values().length) {
|
||||
throw new RuntimeException(
|
||||
"descriptor.proto has a new declared type but Desrciptors.java " +
|
||||
"descriptor.proto has a new declared type but Descriptors.java " +
|
||||
"wasn't updated.");
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -216,7 +216,7 @@ public class CountMinSketch {
|
|||
/**
|
||||
* Hash item using pair independent hash functions.
|
||||
*
|
||||
* <p>Implemetation based on "Less Hashing, Same Performance: Building a Better Bloom Filter"
|
||||
* <p>Implementation based on "Less Hashing, Same Performance: Building a Better Bloom Filter"
|
||||
* https://www.eecs.harvard.edu/~michaelm/postscripts/tr-02-05.pdf
|
||||
*
|
||||
* @param item what should be hashed
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ import scala.reflect.ClassTag
|
|||
*
|
||||
* See also Section 5.2 of http://dimacs.rutgers.edu/~graham/pubs/papers/cm-full.pdf
|
||||
* for a discussion about the assumptions made and guarantees about the Heavy Hitters made in this model.
|
||||
* We assume the Cash Register model in which there are only additions, which simplifies HH detecion significantly.
|
||||
* We assume the Cash Register model in which there are only additions, which simplifies HH detection significantly.
|
||||
*
|
||||
* This class is a hybrid data structure containing a hashmap and a heap pointing to slots in the hashmap. The capacity
|
||||
* of the hashmap is twice that of the heap to reduce clumping of entries on collisions.
|
||||
|
|
|
|||
|
|
@ -117,7 +117,7 @@ class RemoteRouterSpec extends AkkaSpec(s"""
|
|||
masterSystem.stop(router)
|
||||
}
|
||||
|
||||
"deploy its children on remote host driven by programatic definition" in {
|
||||
"deploy its children on remote host driven by programmatic definition" in {
|
||||
val probe = TestProbe()(masterSystem)
|
||||
val router = masterSystem.actorOf(
|
||||
new RemoteRouterConfig(RoundRobinPool(2), Seq(Address(protocol, sysName, "localhost", port)))
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ class RemoteInstrumentsSerializationSpec extends AkkaSpec("akka.loglevel = DEBUG
|
|||
p.expectNoMessage(100.millis)
|
||||
}
|
||||
|
||||
"skip exitsing remote instruments not in the message" in {
|
||||
"skip existing remote instruments not in the message" in {
|
||||
ensureDebugLog("Skipping local RemoteInstrument 10 that has no matching data in the message") {
|
||||
val p = TestProbe()
|
||||
val instruments = Seq(testInstrument(7, "!"), testInstrument(10, ".."), testInstrument(21, "???"))
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ class RemoteRouterSpec
|
|||
masterSystem.stop(router)
|
||||
}
|
||||
|
||||
"deploy its children on remote host driven by programatic definition" in {
|
||||
"deploy its children on remote host driven by programmatic definition" in {
|
||||
val probe = TestProbe()(masterSystem)
|
||||
val router =
|
||||
masterSystem.actorOf(
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ akka.serialization.jackson {
|
|||
jackson-modules += "akka.serialization.jackson.AkkaJacksonModule"
|
||||
# AkkaTypedJacksonModule optionally included if akka-actor-typed is in classpath
|
||||
jackson-modules += "akka.serialization.jackson.AkkaTypedJacksonModule"
|
||||
// FIXME how does that optinal loading work??
|
||||
// FIXME how does that optional loading work??
|
||||
# AkkaStreamsModule optionally included if akka-streams is in classpath
|
||||
jackson-modules += "akka.serialization.jackson.AkkaStreamJacksonModule"
|
||||
jackson-modules += "com.fasterxml.jackson.module.paramnames.ParameterNamesModule"
|
||||
|
|
|
|||
|
|
@ -868,7 +868,7 @@ class TcpSpec extends StreamSpec("""
|
|||
|
||||
Tcp()
|
||||
.bindAndHandleWithTls(
|
||||
// just echo charactes until we reach '\n', then complete stream
|
||||
// just echo characters until we reach '\n', then complete stream
|
||||
// also - byte is our framing
|
||||
Flow[ByteString].mapConcat(_.utf8String.toList).takeWhile(_ != '\n').map(c => ByteString(c)),
|
||||
address.getHostName,
|
||||
|
|
@ -953,7 +953,7 @@ class TcpSpec extends StreamSpec("""
|
|||
|
||||
Tcp()
|
||||
.bindAndHandleTls(
|
||||
// just echo charactes until we reach '\n', then complete stream
|
||||
// just echo characters until we reach '\n', then complete stream
|
||||
// also - byte is our framing
|
||||
Flow[ByteString].mapConcat(_.utf8String.toList).takeWhile(_ != '\n').map(c => ByteString(c)),
|
||||
address.getHostName,
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ class FlowGroupedWithinSpec extends StreamSpec with ScriptedTest {
|
|||
c.expectNoMessage(200.millis)
|
||||
}
|
||||
|
||||
"deliver bufferd elements onComplete before the timeout" taggedAs TimingTest in {
|
||||
"deliver buffered elements onComplete before the timeout" taggedAs TimingTest in {
|
||||
val c = TestSubscriber.manualProbe[immutable.Seq[Int]]()
|
||||
Source(1 to 3).groupedWithin(1000, 10.second).to(Sink.fromSubscriber(c)).run()
|
||||
val cSub = c.expectSubscription
|
||||
|
|
|
|||
|
|
@ -206,7 +206,7 @@ class GraphStageTimersSpec extends StreamSpec {
|
|||
override def preStart(): Unit = scheduleOnce("tick", 100.millis)
|
||||
|
||||
setHandler(in, new InHandler {
|
||||
override def onPush() = () // Ingore
|
||||
override def onPush() = () // Ignore
|
||||
})
|
||||
|
||||
setHandler(out, new OutHandler {
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ akka {
|
|||
}
|
||||
|
||||
io.tcp {
|
||||
# The outgoing bytes are accumulated in a buffer while waiting for acknoledgment
|
||||
# The outgoing bytes are accumulated in a buffer while waiting for acknowledgment
|
||||
# of pending write. This improves throughput for small messages (frames) without
|
||||
# sacrificing latency. While waiting for the ack the stage will eagerly pull
|
||||
# from upstream until the buffer exceeds this size. That means that the buffer may hold
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import JavaFlowAndRsConverters.Implicits._
|
|||
*
|
||||
* Please note that either of these types are designed for *inter-op* and usually should not be used directly
|
||||
* in applications. The intended use case is for shared libraries, like database drivers or similar to provide
|
||||
* the inter-operable types, such that other librarie can co-operate with them directly, if that is your use case
|
||||
* the inter-operable types, such that other libraries can co-operate with them directly, if that is your use case
|
||||
* and you're using the j.u.c.Flow types, use the [[akka.stream.scaladsl.JavaFlowSupport]] sources/sinks/flows instead.
|
||||
*
|
||||
* The interfaces as well as semantic contract of these sets of interfaces.
|
||||
|
|
|
|||
|
|
@ -117,7 +117,7 @@ object PartitionHub {
|
|||
* cancelled are simply removed from the dynamic set of consumers.
|
||||
*
|
||||
* This `statefulSink` should be used when there is a need to keep mutable state in the partition function,
|
||||
* e.g. for implemening round-robin or sticky session kind of routing. If state is not needed the [[#of]] can
|
||||
* e.g. for implementing round-robin or sticky session kind of routing. If state is not needed the [[#of]] can
|
||||
* be more convenient to use.
|
||||
*
|
||||
* @param partitioner Function that decides where to route an element. It is a factory of a function to
|
||||
|
|
|
|||
|
|
@ -581,7 +581,7 @@ object Source {
|
|||
* The stream can be completed successfully by sending the actor reference a [[akka.actor.Status.Success]].
|
||||
* If the content is [[akka.stream.CompletionStrategy.immediately]] the completion will be signaled immediately,
|
||||
* otherwise if the content is [[akka.stream.CompletionStrategy.draining]] (or anything else)
|
||||
* already buffered element will be signaled before siganling completion.
|
||||
* already buffered element will be signaled before signaling completion.
|
||||
*
|
||||
* The stream can be completed with failure by sending a [[akka.actor.Status.Failure]] to the
|
||||
* actor reference. In case the Actor is still draining its internal buffer (after having received
|
||||
|
|
|
|||
|
|
@ -758,7 +758,7 @@ object PartitionHub {
|
|||
* cancelled are simply removed from the dynamic set of consumers.
|
||||
*
|
||||
* This `statefulSink` should be used when there is a need to keep mutable state in the partition function,
|
||||
* e.g. for implemening round-robin or sticky session kind of routing. If state is not needed the [[#sink]] can
|
||||
* e.g. for implementing round-robin or sticky session kind of routing. If state is not needed the [[#sink]] can
|
||||
* be more convenient to use.
|
||||
*
|
||||
* @param partitioner Function that decides where to route an element. It is a factory of a function to
|
||||
|
|
|
|||
|
|
@ -712,7 +712,7 @@ object Source {
|
|||
* The stream can be completed successfully by sending the actor reference a [[akka.actor.Status.Success]].
|
||||
* If the content is [[akka.stream.CompletionStrategy.immediately]] the completion will be signaled immediately,
|
||||
* otherwise if the content is [[akka.stream.CompletionStrategy.draining]] (or anything else)
|
||||
* already buffered element will be signaled before siganling completion.
|
||||
* already buffered element will be signaled before signaling completion.
|
||||
*
|
||||
* The stream can be completed with failure by sending a [[akka.actor.Status.Failure]] to the
|
||||
* actor reference. In case the Actor is still draining its internal buffer (after having received
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue