format source with scalafmt, #26511
This commit is contained in:
parent
2ba9b988df
commit
75579bed17
779 changed files with 15729 additions and 13096 deletions
|
|
@ -110,8 +110,9 @@ class FaultHandlingDocSpec(_system: ActorSystem)
|
|||
|
||||
def this() =
|
||||
this(
|
||||
ActorSystem("FaultHandlingDocSpec",
|
||||
ConfigFactory.parseString("""
|
||||
ActorSystem(
|
||||
"FaultHandlingDocSpec",
|
||||
ConfigFactory.parseString("""
|
||||
akka {
|
||||
loggers = ["akka.testkit.TestEventListener"]
|
||||
loglevel = "WARNING"
|
||||
|
|
|
|||
|
|
@ -74,11 +74,13 @@ abstract class FactorialFrontend2 extends Actor {
|
|||
import akka.cluster.metrics.HeapMetricsSelector
|
||||
|
||||
val backend = context.actorOf(
|
||||
ClusterRouterGroup(AdaptiveLoadBalancingGroup(HeapMetricsSelector),
|
||||
ClusterRouterGroupSettings(totalInstances = 100,
|
||||
routeesPaths = List("/user/factorialBackend"),
|
||||
allowLocalRoutees = true,
|
||||
useRoles = Set("backend"))).props(),
|
||||
ClusterRouterGroup(
|
||||
AdaptiveLoadBalancingGroup(HeapMetricsSelector),
|
||||
ClusterRouterGroupSettings(
|
||||
totalInstances = 100,
|
||||
routeesPaths = List("/user/factorialBackend"),
|
||||
allowLocalRoutees = true,
|
||||
useRoles = Set("backend"))).props(),
|
||||
name = "factorialBackendRouter2")
|
||||
|
||||
//#router-lookup-in-code
|
||||
|
|
@ -93,11 +95,13 @@ abstract class FactorialFrontend3 extends Actor {
|
|||
import akka.cluster.metrics.SystemLoadAverageMetricsSelector
|
||||
|
||||
val backend = context.actorOf(
|
||||
ClusterRouterPool(AdaptiveLoadBalancingPool(SystemLoadAverageMetricsSelector),
|
||||
ClusterRouterPoolSettings(totalInstances = 100,
|
||||
maxInstancesPerNode = 3,
|
||||
allowLocalRoutees = false,
|
||||
useRoles = Set("backend"))).props(Props[FactorialBackend]),
|
||||
ClusterRouterPool(
|
||||
AdaptiveLoadBalancingPool(SystemLoadAverageMetricsSelector),
|
||||
ClusterRouterPoolSettings(
|
||||
totalInstances = 100,
|
||||
maxInstancesPerNode = 3,
|
||||
allowLocalRoutees = false,
|
||||
useRoles = Set("backend"))).props(Props[FactorialBackend]),
|
||||
name = "factorialBackendRouter3")
|
||||
//#router-deploy-in-code
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,9 +23,10 @@ abstract class ClusterSingletonSupervision extends Actor {
|
|||
import akka.actor.{ PoisonPill, Props }
|
||||
import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings }
|
||||
context.system.actorOf(
|
||||
ClusterSingletonManager.props(singletonProps = Props(classOf[SupervisorActor], props, supervisorStrategy),
|
||||
terminationMessage = PoisonPill,
|
||||
settings = ClusterSingletonManagerSettings(context.system)),
|
||||
ClusterSingletonManager.props(
|
||||
singletonProps = Props(classOf[SupervisorActor], props, supervisorStrategy),
|
||||
terminationMessage = PoisonPill,
|
||||
settings = ClusterSingletonManagerSettings(context.system)),
|
||||
name = name)
|
||||
//#singleton-supervisor-actor-usage
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,11 +19,12 @@ class BackoffSupervisorDocSpec {
|
|||
val childProps = Props(classOf[EchoActor])
|
||||
|
||||
val supervisor = BackoffSupervisor.props(
|
||||
BackoffOpts.onStop(childProps,
|
||||
childName = "myEcho",
|
||||
minBackoff = 3.seconds,
|
||||
maxBackoff = 30.seconds,
|
||||
randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
|
||||
BackoffOpts.onStop(
|
||||
childProps,
|
||||
childName = "myEcho",
|
||||
minBackoff = 3.seconds,
|
||||
maxBackoff = 30.seconds,
|
||||
randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
|
||||
))
|
||||
|
||||
system.actorOf(supervisor, name = "echoSupervisor")
|
||||
|
|
@ -38,11 +39,12 @@ class BackoffSupervisorDocSpec {
|
|||
val childProps = Props(classOf[EchoActor])
|
||||
|
||||
val supervisor = BackoffSupervisor.props(
|
||||
BackoffOpts.onFailure(childProps,
|
||||
childName = "myEcho",
|
||||
minBackoff = 3.seconds,
|
||||
maxBackoff = 30.seconds,
|
||||
randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
|
||||
BackoffOpts.onFailure(
|
||||
childProps,
|
||||
childName = "myEcho",
|
||||
minBackoff = 3.seconds,
|
||||
maxBackoff = 30.seconds,
|
||||
randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
|
||||
))
|
||||
|
||||
system.actorOf(supervisor, name = "echoSupervisor")
|
||||
|
|
@ -58,11 +60,12 @@ class BackoffSupervisorDocSpec {
|
|||
//#backoff-custom-stop
|
||||
val supervisor = BackoffSupervisor.props(
|
||||
BackoffOpts
|
||||
.onStop(childProps,
|
||||
childName = "myEcho",
|
||||
minBackoff = 3.seconds,
|
||||
maxBackoff = 30.seconds,
|
||||
randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
|
||||
.onStop(
|
||||
childProps,
|
||||
childName = "myEcho",
|
||||
minBackoff = 3.seconds,
|
||||
maxBackoff = 30.seconds,
|
||||
randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
|
||||
)
|
||||
.withManualReset // the child must send BackoffSupervisor.Reset to its parent
|
||||
.withDefaultStoppingStrategy // Stop at any Exception thrown
|
||||
|
|
@ -81,11 +84,12 @@ class BackoffSupervisorDocSpec {
|
|||
//#backoff-custom-fail
|
||||
val supervisor = BackoffSupervisor.props(
|
||||
BackoffOpts
|
||||
.onFailure(childProps,
|
||||
childName = "myEcho",
|
||||
minBackoff = 3.seconds,
|
||||
maxBackoff = 30.seconds,
|
||||
randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
|
||||
.onFailure(
|
||||
childProps,
|
||||
childName = "myEcho",
|
||||
minBackoff = 3.seconds,
|
||||
maxBackoff = 30.seconds,
|
||||
randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
|
||||
)
|
||||
.withAutoReset(10.seconds) // reset if the child does not throw any errors within 10 seconds
|
||||
.withSupervisorStrategy(OneForOneStrategy() {
|
||||
|
|
|
|||
|
|
@ -74,18 +74,20 @@ object PersistenceMultiDocSpec {
|
|||
override def journalPluginConfig =
|
||||
ConfigFactory
|
||||
.empty()
|
||||
.withValue(s"journal-plugin-$runtimeDistinction",
|
||||
context.system.settings.config
|
||||
.getValue("journal-plugin") // or a very different configuration coming from an external service.
|
||||
.withValue(
|
||||
s"journal-plugin-$runtimeDistinction",
|
||||
context.system.settings.config
|
||||
.getValue("journal-plugin") // or a very different configuration coming from an external service.
|
||||
)
|
||||
|
||||
// Configuration which contains the snapshot store plugin id defined above
|
||||
override def snapshotPluginConfig =
|
||||
ConfigFactory
|
||||
.empty()
|
||||
.withValue(s"snapshot-plugin-$runtimeDistinction",
|
||||
context.system.settings.config
|
||||
.getValue("snapshot-store-plugin") // or a very different configuration coming from an external service.
|
||||
.withValue(
|
||||
s"snapshot-plugin-$runtimeDistinction",
|
||||
context.system.settings.config
|
||||
.getValue("snapshot-store-plugin") // or a very different configuration coming from an external service.
|
||||
)
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -93,10 +93,11 @@ class PersistencePluginDocSpec extends WordSpec {
|
|||
//#snapshot-store-plugin-config
|
||||
"""
|
||||
|
||||
val system = ActorSystem("PersistencePluginDocSpec",
|
||||
ConfigFactory
|
||||
.parseString(providerConfig)
|
||||
.withFallback(ConfigFactory.parseString(PersistencePluginDocSpec.config)))
|
||||
val system = ActorSystem(
|
||||
"PersistencePluginDocSpec",
|
||||
ConfigFactory
|
||||
.parseString(providerConfig)
|
||||
.withFallback(ConfigFactory.parseString(PersistencePluginDocSpec.config)))
|
||||
try {
|
||||
Persistence(system)
|
||||
} finally {
|
||||
|
|
@ -227,8 +228,9 @@ object PersistenceTCKDoc {
|
|||
override def supportsRejectingNonSerializableObjects: CapabilityFlag =
|
||||
true // or CapabilityFlag.on
|
||||
|
||||
val storageLocations = List(new File(system.settings.config.getString("akka.persistence.journal.leveldb.dir")),
|
||||
new File(config.getString("akka.persistence.snapshot-store.local.dir")))
|
||||
val storageLocations = List(
|
||||
new File(system.settings.config.getString("akka.persistence.journal.leveldb.dir")),
|
||||
new File(config.getString("akka.persistence.snapshot-store.local.dir")))
|
||||
|
||||
override def beforeAll(): Unit = {
|
||||
super.beforeAll()
|
||||
|
|
|
|||
|
|
@ -73,9 +73,10 @@ object PersistenceQueryDocSpec {
|
|||
throw new IllegalArgumentException("LevelDB does not support " + offset.getClass.getName + " offsets")
|
||||
}
|
||||
|
||||
override def eventsByPersistenceId(persistenceId: String,
|
||||
fromSequenceNr: Long,
|
||||
toSequenceNr: Long): Source[EventEnvelope, NotUsed] = {
|
||||
override def eventsByPersistenceId(
|
||||
persistenceId: String,
|
||||
fromSequenceNr: Long,
|
||||
toSequenceNr: Long): Source[EventEnvelope, NotUsed] = {
|
||||
// implement in a similar way as eventsByTag
|
||||
???
|
||||
}
|
||||
|
|
@ -111,9 +112,10 @@ object PersistenceQueryDocSpec {
|
|||
override def eventsByTag(tag: String, offset: Offset = Sequence(0L)): javadsl.Source[EventEnvelope, NotUsed] =
|
||||
scaladslReadJournal.eventsByTag(tag, offset).asJava
|
||||
|
||||
override def eventsByPersistenceId(persistenceId: String,
|
||||
fromSequenceNr: Long = 0L,
|
||||
toSequenceNr: Long = Long.MaxValue): javadsl.Source[EventEnvelope, NotUsed] =
|
||||
override def eventsByPersistenceId(
|
||||
persistenceId: String,
|
||||
fromSequenceNr: Long = 0L,
|
||||
toSequenceNr: Long = Long.MaxValue): javadsl.Source[EventEnvelope, NotUsed] =
|
||||
scaladslReadJournal.eventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava
|
||||
|
||||
override def persistenceIds(): javadsl.Source[String, NotUsed] =
|
||||
|
|
|
|||
|
|
@ -400,8 +400,9 @@ router-dispatcher {}
|
|||
for (i <- 1 to 100) router10b ! i
|
||||
val threads10b = Thread.getAllStackTraces.keySet.asScala.filter { _.getName contains "router10b" }
|
||||
val threads10bNr = threads10b.size
|
||||
require(threads10bNr == 5,
|
||||
s"Expected 5 threads for router10b, had $threads10bNr! Got: ${threads10b.map(_.getName)}")
|
||||
require(
|
||||
threads10bNr == 5,
|
||||
s"Expected 5 threads for router10b, had $threads10bNr! Got: ${threads10b.map(_.getName)}")
|
||||
|
||||
//#smallest-mailbox-pool-1
|
||||
val router11: ActorRef =
|
||||
|
|
@ -537,10 +538,10 @@ class RouterDocSpec extends AkkaSpec(RouterDocSpec.config) with ImplicitSender {
|
|||
"demonstrate dispatcher" in {
|
||||
//#dispatchers
|
||||
val router: ActorRef = system.actorOf(
|
||||
// “head” router actor will run on "router-dispatcher" dispatcher
|
||||
// Worker routees will run on "pool-dispatcher" dispatcher
|
||||
RandomPool(5, routerDispatcher = "router-dispatcher").props(Props[Worker]),
|
||||
name = "poolWithDispatcher")
|
||||
// “head” router actor will run on "router-dispatcher" dispatcher
|
||||
// Worker routees will run on "pool-dispatcher" dispatcher
|
||||
RandomPool(5, routerDispatcher = "router-dispatcher").props(Props[Worker]),
|
||||
name = "poolWithDispatcher")
|
||||
//#dispatchers
|
||||
}
|
||||
|
||||
|
|
@ -595,8 +596,9 @@ class RouterDocSpec extends AkkaSpec(RouterDocSpec.config) with ImplicitSender {
|
|||
//#remoteRoutees
|
||||
import akka.actor.{ Address, AddressFromURIString }
|
||||
import akka.remote.routing.RemoteRouterConfig
|
||||
val addresses = Seq(Address("akka.tcp", "remotesys", "otherhost", 1234),
|
||||
AddressFromURIString("akka.tcp://othersys@anotherhost:1234"))
|
||||
val addresses = Seq(
|
||||
Address("akka.tcp", "remotesys", "otherhost", 1234),
|
||||
AddressFromURIString("akka.tcp://othersys@anotherhost:1234"))
|
||||
val routerRemote = system.actorOf(RemoteRouterConfig(RoundRobinPool(5), addresses).props(Props[Echo]))
|
||||
//#remoteRoutees
|
||||
}
|
||||
|
|
|
|||
|
|
@ -117,8 +117,9 @@ class GraphDSLDocSpec extends AkkaSpec {
|
|||
|
||||
//#graph-dsl-components-create
|
||||
object PriorityWorkerPool {
|
||||
def apply[In, Out](worker: Flow[In, Out, Any],
|
||||
workerCount: Int): Graph[PriorityWorkerPoolShape[In, Out], NotUsed] = {
|
||||
def apply[In, Out](
|
||||
worker: Flow[In, Out, Any],
|
||||
workerCount: Int): Graph[PriorityWorkerPoolShape[In, Out], NotUsed] = {
|
||||
|
||||
GraphDSL.create() { implicit b =>
|
||||
import GraphDSL.Implicits._
|
||||
|
|
@ -138,9 +139,10 @@ class GraphDSLDocSpec extends AkkaSpec {
|
|||
// We now expose the input ports of the priorityMerge and the output
|
||||
// of the resultsMerge as our PriorityWorkerPool ports
|
||||
// -- all neatly wrapped in our domain specific Shape
|
||||
PriorityWorkerPoolShape(jobsIn = priorityMerge.in(0),
|
||||
priorityJobsIn = priorityMerge.preferred,
|
||||
resultsOut = resultsMerge.out)
|
||||
PriorityWorkerPoolShape(
|
||||
jobsIn = priorityMerge.in(0),
|
||||
priorityJobsIn = priorityMerge.preferred,
|
||||
resultsOut = resultsMerge.out)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -416,21 +416,20 @@ class GraphStageDocSpec extends AkkaSpec {
|
|||
val promise = Promise[A]()
|
||||
val logic = new GraphStageLogic(shape) {
|
||||
|
||||
setHandler(in,
|
||||
new InHandler {
|
||||
override def onPush(): Unit = {
|
||||
val elem = grab(in)
|
||||
promise.success(elem)
|
||||
push(out, elem)
|
||||
setHandler(in, new InHandler {
|
||||
override def onPush(): Unit = {
|
||||
val elem = grab(in)
|
||||
promise.success(elem)
|
||||
push(out, elem)
|
||||
|
||||
// replace handler with one that only forwards elements
|
||||
setHandler(in, new InHandler {
|
||||
override def onPush(): Unit = {
|
||||
push(out, grab(in))
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
// replace handler with one that only forwards elements
|
||||
setHandler(in, new InHandler {
|
||||
override def onPush(): Unit = {
|
||||
push(out, grab(in))
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
setHandler(out, new OutHandler {
|
||||
override def onPull(): Unit = {
|
||||
|
|
@ -477,44 +476,46 @@ class GraphStageDocSpec extends AkkaSpec {
|
|||
pull(in)
|
||||
}
|
||||
|
||||
setHandler(in,
|
||||
new InHandler {
|
||||
override def onPush(): Unit = {
|
||||
val elem = grab(in)
|
||||
buffer.enqueue(elem)
|
||||
if (downstreamWaiting) {
|
||||
downstreamWaiting = false
|
||||
val bufferedElem = buffer.dequeue()
|
||||
push(out, bufferedElem)
|
||||
}
|
||||
if (!bufferFull) {
|
||||
pull(in)
|
||||
}
|
||||
}
|
||||
setHandler(
|
||||
in,
|
||||
new InHandler {
|
||||
override def onPush(): Unit = {
|
||||
val elem = grab(in)
|
||||
buffer.enqueue(elem)
|
||||
if (downstreamWaiting) {
|
||||
downstreamWaiting = false
|
||||
val bufferedElem = buffer.dequeue()
|
||||
push(out, bufferedElem)
|
||||
}
|
||||
if (!bufferFull) {
|
||||
pull(in)
|
||||
}
|
||||
}
|
||||
|
||||
override def onUpstreamFinish(): Unit = {
|
||||
if (buffer.nonEmpty) {
|
||||
// emit the rest if possible
|
||||
emitMultiple(out, buffer.toIterator)
|
||||
}
|
||||
completeStage()
|
||||
}
|
||||
})
|
||||
override def onUpstreamFinish(): Unit = {
|
||||
if (buffer.nonEmpty) {
|
||||
// emit the rest if possible
|
||||
emitMultiple(out, buffer.toIterator)
|
||||
}
|
||||
completeStage()
|
||||
}
|
||||
})
|
||||
|
||||
setHandler(out,
|
||||
new OutHandler {
|
||||
override def onPull(): Unit = {
|
||||
if (buffer.isEmpty) {
|
||||
downstreamWaiting = true
|
||||
} else {
|
||||
val elem = buffer.dequeue
|
||||
push(out, elem)
|
||||
}
|
||||
if (!bufferFull && !hasBeenPulled(in)) {
|
||||
pull(in)
|
||||
}
|
||||
}
|
||||
})
|
||||
setHandler(
|
||||
out,
|
||||
new OutHandler {
|
||||
override def onPull(): Unit = {
|
||||
if (buffer.isEmpty) {
|
||||
downstreamWaiting = true
|
||||
} else {
|
||||
val elem = buffer.dequeue
|
||||
push(out, elem)
|
||||
}
|
||||
if (!bufferFull && !hasBeenPulled(in)) {
|
||||
pull(in)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -113,9 +113,10 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec {
|
|||
// value to the left is used)
|
||||
val runnableGraph: RunnableGraph[Source[String, NotUsed]] =
|
||||
producer.toMat(
|
||||
PartitionHub.sink((size, elem) => math.abs(elem.hashCode % size),
|
||||
startAfterNrOfConsumers = 2,
|
||||
bufferSize = 256))(Keep.right)
|
||||
PartitionHub.sink(
|
||||
(size, elem) => math.abs(elem.hashCode % size),
|
||||
startAfterNrOfConsumers = 2,
|
||||
bufferSize = 256))(Keep.right)
|
||||
|
||||
// By running/materializing the producer, we get back a Source, which
|
||||
// gives us access to the elements published by the producer.
|
||||
|
|
@ -169,9 +170,10 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec {
|
|||
// Note that this is a moving target since the elements are consumed concurrently.
|
||||
val runnableGraph: RunnableGraph[Source[Int, NotUsed]] =
|
||||
producer.toMat(
|
||||
PartitionHub.statefulSink(() => (info, elem) => info.consumerIds.minBy(id => info.queueSize(id)),
|
||||
startAfterNrOfConsumers = 2,
|
||||
bufferSize = 16))(Keep.right)
|
||||
PartitionHub.statefulSink(
|
||||
() => (info, elem) => info.consumerIds.minBy(id => info.queueSize(id)),
|
||||
startAfterNrOfConsumers = 2,
|
||||
bufferSize = 16))(Keep.right)
|
||||
|
||||
val fromProducer: Source[Int, NotUsed] = runnableGraph.run()
|
||||
|
||||
|
|
|
|||
|
|
@ -206,11 +206,12 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) {
|
|||
|
||||
val probe = TestProbe()
|
||||
val receiver = system.actorOf(Props(new AckingReceiver(probe.ref, ackWith = AckMessage)))
|
||||
val sink = Sink.actorRefWithAck(receiver,
|
||||
onInitMessage = InitMessage,
|
||||
ackMessage = AckMessage,
|
||||
onCompleteMessage = OnCompleteMessage,
|
||||
onFailureMessage = onErrorMessage)
|
||||
val sink = Sink.actorRefWithAck(
|
||||
receiver,
|
||||
onInitMessage = InitMessage,
|
||||
ackMessage = AckMessage,
|
||||
onCompleteMessage = OnCompleteMessage,
|
||||
onFailureMessage = onErrorMessage)
|
||||
|
||||
words.map(_.toLowerCase).runWith(sink)
|
||||
|
||||
|
|
@ -295,13 +296,14 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) {
|
|||
//#external-service-mapAsyncUnordered
|
||||
|
||||
probe.receiveN(7).toSet should be(
|
||||
Set("rolandkuhn@somewhere.com",
|
||||
"patriknw@somewhere.com",
|
||||
"bantonsson@somewhere.com",
|
||||
"drewhk@somewhere.com",
|
||||
"ktosopl@somewhere.com",
|
||||
"mmartynas@somewhere.com",
|
||||
"akkateam@somewhere.com"))
|
||||
Set(
|
||||
"rolandkuhn@somewhere.com",
|
||||
"patriknw@somewhere.com",
|
||||
"bantonsson@somewhere.com",
|
||||
"drewhk@somewhere.com",
|
||||
"ktosopl@somewhere.com",
|
||||
"mmartynas@somewhere.com",
|
||||
"akkateam@somewhere.com"))
|
||||
}
|
||||
|
||||
"careful managed blocking with mapAsync" in {
|
||||
|
|
@ -332,13 +334,14 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) {
|
|||
//#blocking-mapAsync
|
||||
|
||||
probe.receiveN(7).toSet should be(
|
||||
Set("rolandkuhn".hashCode.toString,
|
||||
"patriknw".hashCode.toString,
|
||||
"bantonsson".hashCode.toString,
|
||||
"drewhk".hashCode.toString,
|
||||
"ktosopl".hashCode.toString,
|
||||
"mmartynas".hashCode.toString,
|
||||
"akkateam".hashCode.toString))
|
||||
Set(
|
||||
"rolandkuhn".hashCode.toString,
|
||||
"patriknw".hashCode.toString,
|
||||
"bantonsson".hashCode.toString,
|
||||
"drewhk".hashCode.toString,
|
||||
"ktosopl".hashCode.toString,
|
||||
"mmartynas".hashCode.toString,
|
||||
"akkateam".hashCode.toString))
|
||||
}
|
||||
|
||||
"careful managed blocking with map" in {
|
||||
|
|
@ -452,16 +455,17 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) {
|
|||
//#sometimes-slow-mapAsyncUnordered
|
||||
|
||||
probe.receiveN(10).toSet should be(
|
||||
Set("after: A",
|
||||
"after: B",
|
||||
"after: C",
|
||||
"after: D",
|
||||
"after: E",
|
||||
"after: F",
|
||||
"after: G",
|
||||
"after: H",
|
||||
"after: I",
|
||||
"after: J"))
|
||||
Set(
|
||||
"after: A",
|
||||
"after: B",
|
||||
"after: C",
|
||||
"after: D",
|
||||
"after: E",
|
||||
"after: F",
|
||||
"after: G",
|
||||
"after: H",
|
||||
"after: I",
|
||||
"after: J"))
|
||||
}
|
||||
|
||||
"illustrate use of source queue" in {
|
||||
|
|
|
|||
|
|
@ -35,10 +35,11 @@ class RestartDocSpec extends AkkaSpec with CompileOnlySpec {
|
|||
"demonstrate a restart with backoff source" in compileOnlySpec {
|
||||
|
||||
//#restart-with-backoff-source
|
||||
val restartSource = RestartSource.withBackoff(minBackoff = 3.seconds,
|
||||
maxBackoff = 30.seconds,
|
||||
randomFactor = 0.2, // adds 20% "noise" to vary the intervals slightly
|
||||
maxRestarts = 20 // limits the amount of restarts to 20
|
||||
val restartSource = RestartSource.withBackoff(
|
||||
minBackoff = 3.seconds,
|
||||
maxBackoff = 30.seconds,
|
||||
randomFactor = 0.2, // adds 20% "noise" to vary the intervals slightly
|
||||
maxRestarts = 20 // limits the amount of restarts to 20
|
||||
) { () =>
|
||||
// Create a source from a future of a source
|
||||
Source.fromFutureSource {
|
||||
|
|
|
|||
|
|
@ -35,27 +35,28 @@ class RecipeByteStrings extends RecipeSpec {
|
|||
emitChunk()
|
||||
}
|
||||
})
|
||||
setHandler(in,
|
||||
new InHandler {
|
||||
override def onPush(): Unit = {
|
||||
val elem = grab(in)
|
||||
buffer ++= elem
|
||||
emitChunk()
|
||||
}
|
||||
setHandler(
|
||||
in,
|
||||
new InHandler {
|
||||
override def onPush(): Unit = {
|
||||
val elem = grab(in)
|
||||
buffer ++= elem
|
||||
emitChunk()
|
||||
}
|
||||
|
||||
override def onUpstreamFinish(): Unit = {
|
||||
if (buffer.isEmpty) completeStage()
|
||||
else {
|
||||
// There are elements left in buffer, so
|
||||
// we keep accepting downstream pulls and push from buffer until emptied.
|
||||
//
|
||||
// It might be though, that the upstream finished while it was pulled, in which
|
||||
// case we will not get an onPull from the downstream, because we already had one.
|
||||
// In that case we need to emit from the buffer.
|
||||
if (isAvailable(out)) emitChunk()
|
||||
}
|
||||
}
|
||||
})
|
||||
override def onUpstreamFinish(): Unit = {
|
||||
if (buffer.isEmpty) completeStage()
|
||||
else {
|
||||
// There are elements left in buffer, so
|
||||
// we keep accepting downstream pulls and push from buffer until emptied.
|
||||
//
|
||||
// It might be though, that the upstream finished while it was pulled, in which
|
||||
// case we will not get an onPull from the downstream, because we already had one.
|
||||
// In that case we need to emit from the buffer.
|
||||
if (isAvailable(out)) emitChunk()
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
private def emitChunk(): Unit = {
|
||||
if (buffer.isEmpty) {
|
||||
|
|
@ -93,21 +94,19 @@ class RecipeByteStrings extends RecipeSpec {
|
|||
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
|
||||
private var count = 0
|
||||
|
||||
setHandlers(in,
|
||||
out,
|
||||
new InHandler with OutHandler {
|
||||
setHandlers(in, out, new InHandler with OutHandler {
|
||||
|
||||
override def onPull(): Unit = {
|
||||
pull(in)
|
||||
}
|
||||
override def onPull(): Unit = {
|
||||
pull(in)
|
||||
}
|
||||
|
||||
override def onPush(): Unit = {
|
||||
val chunk = grab(in)
|
||||
count += chunk.size
|
||||
if (count > maximumBytes) failStage(new IllegalStateException("Too much bytes"))
|
||||
else push(out, chunk)
|
||||
}
|
||||
})
|
||||
override def onPush(): Unit = {
|
||||
val chunk = grab(in)
|
||||
count += chunk.size
|
||||
if (count > maximumBytes) failStage(new IllegalStateException("Too much bytes"))
|
||||
else push(out, chunk)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -37,10 +37,11 @@ class RecipeGlobalRateLimit extends RecipeSpec {
|
|||
|
||||
private var waitQueue = immutable.Queue.empty[ActorRef]
|
||||
private var permitTokens = maxAvailableTokens
|
||||
private val replenishTimer = system.scheduler.schedule(initialDelay = tokenRefreshPeriod,
|
||||
interval = tokenRefreshPeriod,
|
||||
receiver = self,
|
||||
ReplenishTokens)
|
||||
private val replenishTimer = system.scheduler.schedule(
|
||||
initialDelay = tokenRefreshPeriod,
|
||||
interval = tokenRefreshPeriod,
|
||||
receiver = self,
|
||||
ReplenishTokens)
|
||||
|
||||
override def receive: Receive = open
|
||||
|
||||
|
|
|
|||
|
|
@ -55,22 +55,23 @@ object HoldOps {
|
|||
private var currentValue: T = _
|
||||
private var waitingFirstValue = true
|
||||
|
||||
setHandlers(in,
|
||||
out,
|
||||
new InHandler with OutHandler {
|
||||
override def onPush(): Unit = {
|
||||
currentValue = grab(in)
|
||||
if (waitingFirstValue) {
|
||||
waitingFirstValue = false
|
||||
if (isAvailable(out)) push(out, currentValue)
|
||||
}
|
||||
pull(in)
|
||||
}
|
||||
setHandlers(
|
||||
in,
|
||||
out,
|
||||
new InHandler with OutHandler {
|
||||
override def onPush(): Unit = {
|
||||
currentValue = grab(in)
|
||||
if (waitingFirstValue) {
|
||||
waitingFirstValue = false
|
||||
if (isAvailable(out)) push(out, currentValue)
|
||||
}
|
||||
pull(in)
|
||||
}
|
||||
|
||||
override def onPull(): Unit = {
|
||||
if (!waitingFirstValue) push(out, currentValue)
|
||||
}
|
||||
})
|
||||
override def onPull(): Unit = {
|
||||
if (!waitingFirstValue) push(out, currentValue)
|
||||
}
|
||||
})
|
||||
|
||||
override def preStart(): Unit = {
|
||||
pull(in)
|
||||
|
|
|
|||
|
|
@ -17,11 +17,12 @@ class RecipeParseLines extends RecipeSpec {
|
|||
|
||||
"work" in {
|
||||
val rawData = Source(
|
||||
List(ByteString("Hello World"),
|
||||
ByteString("\r"),
|
||||
ByteString("!\r"),
|
||||
ByteString("\nHello Akka!\r\nHello Streams!"),
|
||||
ByteString("\r\n\r\n")))
|
||||
List(
|
||||
ByteString("Hello World"),
|
||||
ByteString("\r"),
|
||||
ByteString("!\r"),
|
||||
ByteString("\nHello Akka!\r\nHello Streams!"),
|
||||
ByteString("\r\n\r\n")))
|
||||
|
||||
//#parse-lines
|
||||
import akka.stream.scaladsl.Framing
|
||||
|
|
|
|||
|
|
@ -40,9 +40,8 @@ class RecipeReduceByKey extends RecipeSpec {
|
|||
def words = Source(List("hello", "world", "and", "hello", "universe", "akka") ++ List.fill(1000)("rocks!"))
|
||||
|
||||
//#reduce-by-key-general
|
||||
def reduceByKey[In, K, Out](maximumGroupSize: Int,
|
||||
groupKey: (In) => K,
|
||||
map: (In) => Out)(reduce: (Out, Out) => Out): Flow[In, (K, Out), NotUsed] = {
|
||||
def reduceByKey[In, K, Out](maximumGroupSize: Int, groupKey: (In) => K, map: (In) => Out)(
|
||||
reduce: (Out, Out) => Out): Flow[In, (K, Out), NotUsed] = {
|
||||
|
||||
Flow[In]
|
||||
.groupBy[K](maximumGroupSize, groupKey)
|
||||
|
|
|
|||
|
|
@ -20,9 +20,10 @@ object SourceOrFlow {
|
|||
//#log
|
||||
.log(name = "myStream")
|
||||
.addAttributes(
|
||||
Attributes.logLevels(onElement = Attributes.LogLevels.Off,
|
||||
onFailure = Attributes.LogLevels.Error,
|
||||
onFinish = Attributes.LogLevels.Info))
|
||||
Attributes.logLevels(
|
||||
onElement = Attributes.LogLevels.Off,
|
||||
onFailure = Attributes.LogLevels.Error,
|
||||
onFinish = Attributes.LogLevels.Info))
|
||||
//#log
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -322,8 +322,9 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
|
|||
import akka.testkit.EventFilter
|
||||
import com.typesafe.config.ConfigFactory
|
||||
|
||||
implicit val system = ActorSystem("testsystem",
|
||||
ConfigFactory.parseString("""
|
||||
implicit val system = ActorSystem(
|
||||
"testsystem",
|
||||
ConfigFactory.parseString("""
|
||||
akka.loggers = ["akka.testkit.TestEventListener"]
|
||||
"""))
|
||||
try {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue