Merge branch 'master' into wip-sync-artery-dev-patriknw

This commit is contained in:
Patrik Nordwall 2016-08-31 08:59:49 +02:00
commit 90cce8579a
78 changed files with 3577 additions and 1303 deletions

View file

@ -64,7 +64,7 @@ The steps are exactly the same for everyone involved in the project (be it core
1. Create a branch on your fork and work on the feature. For example: `git checkout -b wip-custom-headers-akka-http`
- Please make sure to follow the general quality guidelines (specified below) when developing your patch.
- Please write additional tests covering your feature and adjust existing ones if needed before submitting your Pull Request. The `validatePullRequest` sbt task ([explained below](#validatePullRequest)) may come in handy to verify your changes are correct.
1. Once your feature is complete, prepare the commit following our [commit message guidelines](#commit-message-guidelines). For example, a good commit message would be: `Adding compression support for Manifests #22222` (note the reference to the ticket it aimed to resolve).
1. Once your feature is complete, prepare the commit following our [Creating Commits And Writing Commit Messages](#creating-commits-and-writing-commit-messages). For example, a good commit message would be: `Adding compression support for Manifests #22222` (note the reference to the ticket it aimed to resolve).
1. Now it's finally time to [submit the Pull Request](https://help.github.com/articles/using-pull-requests)!
1. If you have not already done so, you will be asked by our CLA bot to [sign the Lightbend CLA](http://www.lightbend.com/contribute/cla) online CLA stands for Contributor License Agreement and is a way of protecting intellectual property disputes from harming the project.
1. If you're not already on the contributors white-list, the @akka-ci bot will ask `Can one of the repo owners verify this patch?`, to which a core member will reply by commenting `OK TO TEST`. This is just a sanity check to prevent malicious code from being run on the Jenkins cluster.

View file

@ -1,7 +1,7 @@
Akka
====
We believe that writing correct concurrent & distributed, resilient and elastic applications is too hard.
We believe that writing correct concurrent & distributed, resilient and elastic applications is too hard.
Most of the time it's because we are using the wrong tools and the wrong level of abstraction.
Akka is here to change that.
@ -17,7 +17,7 @@ Learn more at [akka.io](http://akka.io/).
Reference Documentation
-----------------------
The reference documentation is available at [doc.akka.io](http://doc.akka.io),
The reference documentation is available at [doc.akka.io](http://doc.akka.io),
for [Scala](http://doc.akka.io/docs/akka/current/scala.html) and [Java](http://doc.akka.io/docs/akka/current/java.html).
@ -31,10 +31,11 @@ You can join these groups and chats to discuss and ask Akka related questions:
In addition to that, you may enjoy following:
- The [news](http://akka.io/news) section of the page, which is updated whenever a new version is released
- The [news](http://akka.io/news) section of the page, which is updated whenever a new version is released
- The [Akka Team Blog](http://blog.akka.io)
- [@akkateam](https://twitter.com/akkateam) on Twitter
- Questions tagged [#akka on StackOverflow](http://stackoverflow.com/questions/tagged/akka)
- Projects built with Akka: [![akka-dependency-badge]][akka-dependency-scaladex]
Contributing
------------
@ -54,3 +55,6 @@ License
-------
Akka is Open Source and available under the Apache 2 License.
[akka-dependency-badge]: https://index.scala-lang.org/count.svg?q=dependencies:akka/*&subject=scaladex:&color=blue&style=flat-square "Built with Akka"
[akka-dependency-scaladex]: https://index.scala-lang.org/search?q=dependencies:akka/*

View file

@ -73,6 +73,22 @@ class CircuitBreakerSpec extends AkkaSpec with BeforeAndAfter {
intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) }
checkLatch(breaker.halfOpenLatch)
}
"still be in open state after calling success method" in {
val breaker = CircuitBreakerSpec.longResetTimeoutCb()
intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) }
checkLatch(breaker.openLatch)
breaker().succeed()
checkLatch(breaker.openLatch)
}
"still be in open state after calling fail method" in {
val breaker = CircuitBreakerSpec.longResetTimeoutCb()
intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) }
checkLatch(breaker.openLatch)
breaker().fail()
checkLatch(breaker.openLatch)
}
}
"A synchronous circuit breaker that is half-open" must {
@ -91,6 +107,22 @@ class CircuitBreakerSpec extends AkkaSpec with BeforeAndAfter {
intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) }
checkLatch(breaker.openLatch)
}
"open on calling fail method" in {
val breaker = CircuitBreakerSpec.shortResetTimeoutCb()
intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) }
checkLatch(breaker.halfOpenLatch)
breaker().fail()
checkLatch(breaker.openLatch)
}
"close on calling success method" in {
val breaker = CircuitBreakerSpec.shortResetTimeoutCb()
intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) }
checkLatch(breaker.halfOpenLatch)
breaker().succeed()
checkLatch(breaker.closedLatch)
}
}
"A synchronous circuit breaker that is closed" must {
@ -107,6 +139,14 @@ class CircuitBreakerSpec extends AkkaSpec with BeforeAndAfter {
breaker().currentFailureCount should ===(1)
}
"increment failure count on fail method" in {
val breaker = CircuitBreakerSpec.longCallTimeoutCb()
breaker().currentFailureCount should ===(0)
breaker().fail()
checkLatch(breaker.openLatch)
breaker().currentFailureCount should ===(1)
}
"reset failure count after success" in {
val breaker = CircuitBreakerSpec.multiFailureCb()
breaker().currentFailureCount should ===(0)
@ -119,6 +159,18 @@ class CircuitBreakerSpec extends AkkaSpec with BeforeAndAfter {
breaker().currentFailureCount should ===(0)
}
"reset failure count after success method" in {
val breaker = CircuitBreakerSpec.multiFailureCb()
breaker().currentFailureCount should ===(0)
intercept[TestException] {
val ct = Thread.currentThread() // Ensure that the thunk is executed in the tests thread
breaker().withSyncCircuitBreaker({ if (Thread.currentThread() eq ct) throwException else "fail" })
}
breaker().currentFailureCount should ===(1)
breaker().succeed()
breaker().currentFailureCount should ===(0)
}
"throw TimeoutException on callTimeout" in {
val breaker = CircuitBreakerSpec.shortCallTimeoutCb()
intercept[TimeoutException] {

View file

@ -21,11 +21,7 @@ import scala.collection.mutable.Builder
class ByteStringSpec extends WordSpec with Matchers with Checkers {
// // uncomment when developing locally to get better coverage
// implicit override val generatorDrivenConfig =
// PropertyCheckConfig(
// minSuccessful = 1000,
// minSize = 0, maxSize = 100)
implicit val betterGeneratorDrivenConfig = PropertyCheckConfig().copy(minSuccessful = 1000)
def genSimpleByteString(min: Int, max: Int) = for {
n Gen.choose(min, max)
@ -365,13 +361,48 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers {
ByteStrings(ByteString1.fromString(""), ByteString1.fromString("ab")).dropRight(Int.MinValue) should ===(ByteString("ab"))
}
"slice" in {
ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(0, 1) should ===(ByteString("a"))
ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).slice(1, 1) should ===(ByteString(""))
// We explicitly test all edge cases to always test them, refs bug #21237
ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(-10, 10) should ===(ByteString("a"))
ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(-10, 0) should ===(ByteString(""))
ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(-10, 1) should ===(ByteString("a"))
ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(0, 1) should ===(ByteString("a"))
ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(0, 10) should ===(ByteString("a"))
ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(1, 10) should ===(ByteString(""))
ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(1, -2) should ===(ByteString(""))
ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(-10, -100) should ===(ByteString(""))
ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(-100, -10) should ===(ByteString(""))
// Get an empty if `from` is greater then `until`
ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(1, 0) should ===(ByteString(""))
ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(2, 2) should ===(ByteString(""))
ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(2, 3) should ===(ByteString("c"))
ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(2, 4) should ===(ByteString("cd"))
ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(3, 4) should ===(ByteString("d"))
ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(10, 100) should ===(ByteString(""))
// Can obtain expected results from 6 basic patterns
ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(-10, 10) should ===(ByteString("abcd"))
ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(-10, 0) should ===(ByteString(""))
ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(-10, 4) should ===(ByteString("abcd"))
ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(0, 4) should ===(ByteString("abcd"))
ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(1, -2) should ===(ByteString(""))
ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(0, 10) should ===(ByteString("abcd"))
ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(-10, -100) should ===(ByteString(""))
ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(-100, -10) should ===(ByteString(""))
ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(1, -2) should ===(ByteString(""))
ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(-10, -100) should ===(ByteString(""))
ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(-100, -10) should ===(ByteString(""))
// various edge cases using raw ByteString1
ByteString1.fromString("cd").slice(100, 10) should ===(ByteString(""))
ByteString1.fromString("cd").slice(100, 1000) should ===(ByteString(""))
ByteString1.fromString("cd").slice(-10, -5) should ===(ByteString(""))
ByteString1.fromString("cd").slice(-2, -5) should ===(ByteString(""))
ByteString1.fromString("cd").slice(-2, 1) should ===(ByteString("c"))
ByteString1.fromString("abcd").slice(1, -1) should ===(ByteString(""))
// Get an empty if `from` is greater than `until`
ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(4, 0) should ===(ByteString(""))
}
"dropRight" in {
ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).dropRight(0) should ===(ByteString("a"))

View file

@ -166,6 +166,54 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
*/
def callWithSyncCircuitBreaker[T](body: Callable[T]): T = withSyncCircuitBreaker(body.call)
/**
* Mark a successful call through CircuitBreaker. Sometimes the callee of CircuitBreaker sends back a message to the
* caller Actor. In such a case, it is convenient to mark a successful call instead of using Future
* via [[withCircuitBreaker]]
*/
def succeed(): Unit = {
currentState.callSucceeds()
}
/**
* Mark a failed call through CircuitBreaker. Sometimes the callee of CircuitBreaker sends back a message to the
* caller Actor. In such a case, it is convenient to mark a failed call instead of using Future
* via [[withCircuitBreaker]]
*/
def fail(): Unit = {
currentState.callFails()
}
/**
* Return true if the internal state is Closed. WARNING: It is a "power API" call which you should use with care.
* Ordinal use cases of CircuitBreaker expects a remote call to return Future, as in withCircuitBreaker.
* So, if you check the state by yourself, and make a remote call outside CircuitBreaker, you should
* manage the state yourself.
*/
def isClosed: Boolean = {
currentState == Closed
}
/**
* Return true if the internal state is Open. WARNING: It is a "power API" call which you should use with care.
* Ordinal use cases of CircuitBreaker expects a remote call to return Future, as in withCircuitBreaker.
* So, if you check the state by yourself, and make a remote call outside CircuitBreaker, you should
* manage the state yourself.
*/
def isOpen: Boolean = {
currentState == Open
}
/**
* Return true if the internal state is HalfOpen. WARNING: It is a "power API" call which you should use with care.
* Ordinal use cases of CircuitBreaker expects a remote call to return Future, as in withCircuitBreaker.
* So, if you check the state by yourself, and make a remote call outside CircuitBreaker, you should
* manage the state yourself.
*/
def isHalfOpen: Boolean = {
currentState == HalfOpen
}
/**
* Adds a callback to execute when circuit breaker opens
*
@ -189,7 +237,6 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
/**
* Adds a callback to execute when circuit breaker transitions to half-open
*
* The callback is run in the [[scala.concurrent.ExecutionContext]] supplied in the constructor.
*
* @param callback Handler to be invoked on state change

View file

@ -239,7 +239,7 @@ class BoundedBlockingQueue[E <: AnyRef](
elements(last).asInstanceOf[E]
}
def remove() {
override def remove() {
if (last < 0) throw new IllegalStateException
val target = elements(last)
last = -1 //To avoid 2 subsequent removes without a next in between

View file

@ -163,8 +163,8 @@ object ByteString {
else toByteString1.drop(n)
override def slice(from: Int, until: Int): ByteString =
if ((from == 0) && (until == length)) this
else if (from > length) ByteString.empty
if (from <= 0 && until >= length) this
else if (from >= length || until <= 0 || from >= until) ByteString.empty
else toByteString1.slice(from, until)
private[akka] override def writeToOutputStream(os: ObjectOutputStream): Unit =
@ -252,11 +252,8 @@ object ByteString {
if (n <= 0) ByteString.empty
else ByteString1(bytes, startIndex, Math.min(n, length))
override def slice(from: Int, until: Int): ByteString = {
if (from <= 0 && until >= length) this // we can do < / > since we're Compact
else if (until <= from) ByteString1.empty
else ByteString1(bytes, startIndex + from, until - from)
}
override def slice(from: Int, until: Int): ByteString =
drop(from).take(until - Math.max(0, from))
override def copyToBuffer(buffer: ByteBuffer): Int =
writeToBuffer(buffer)
@ -466,7 +463,7 @@ object ByteString {
}
override def slice(from: Int, until: Int): ByteString =
if ((from == 0) && (until == length)) this
if (from <= 0 && until >= length) this
else if (from > length || until <= from) ByteString.empty
else drop(from).dropRight(length - until)

View file

@ -32,7 +32,7 @@ trait PriorityQueueStabilizer[E <: AnyRef] extends AbstractQueue[E] {
private[this] val backingIterator = backingQueue.iterator()
def hasNext: Boolean = backingIterator.hasNext
def next(): E = backingIterator.next().element
def remove() = backingIterator.remove()
override def remove() = backingIterator.remove()
}
override def poll(): E = {

View file

@ -9,8 +9,6 @@ import scala.collection.immutable.HashMap
private[akka] final case class WildcardIndex[T](wildcardTree: WildcardTree[T] = WildcardTree[T](), doubleWildcardTree: WildcardTree[T] = WildcardTree[T]()) {
val empty = WildcardTree[T]()
def insert(elems: Array[String], d: T): WildcardIndex[T] = elems.lastOption match {
case Some("**") copy(doubleWildcardTree = doubleWildcardTree.insert(elems.iterator, d))
case Some(_) copy(wildcardTree = wildcardTree.insert(elems.iterator, d))
@ -20,7 +18,7 @@ private[akka] final case class WildcardIndex[T](wildcardTree: WildcardTree[T] =
def find(elems: Iterable[String]): Option[T] =
(if (wildcardTree.isEmpty) {
if (doubleWildcardTree.isEmpty) {
empty
WildcardTree[T]() // empty
} else {
doubleWildcardTree.findWithTerminalDoubleWildcard(elems.iterator)
}
@ -33,6 +31,8 @@ private[akka] final case class WildcardIndex[T](wildcardTree: WildcardTree[T] =
}
}).data
def isEmpty: Boolean = wildcardTree.isEmpty && doubleWildcardTree.isEmpty
}
private[akka] object WildcardTree {
@ -42,7 +42,7 @@ private[akka] object WildcardTree {
private[akka] final case class WildcardTree[T](data: Option[T] = None, children: Map[String, WildcardTree[T]] = HashMap[String, WildcardTree[T]]()) {
lazy val isEmpty: Boolean = data.isEmpty && children.isEmpty
def isEmpty: Boolean = data.isEmpty && children.isEmpty
def insert(elems: Iterator[String], d: T): WildcardTree[T] =
if (!elems.hasNext) {

View file

@ -107,3 +107,35 @@ Java
will return a :class:`CircuitBreaker` where callbacks are executed in the caller's thread.
This can be useful if the asynchronous :class:`Future` behavior is unnecessary, for
example invoking a synchronous-only API.
------------
Tell Pattern
------------
The above ``Call Protection`` pattern works well when the return from a remote call is wrapped in a ``Future``.
However, when a remote call sends back a message or timeout to the caller ``Actor``, the ``Call Protection`` pattern
is awkward. CircuitBreaker doesn't support it natively at the moment, so you need to use below low-level power-user APIs,
``succeed`` and ``fail`` methods, as well as ``isClose``, ``isOpen``, ``isHalfOpen``.
.. note::
The below examples doesn't make a remote call when the state is `HalfOpen`. Using the power-user APIs, it is
your responsibility to judge when to make remote calls in `HalfOpen`.
^^^^^^^
Scala
^^^^^^^
.. includecode:: code/docs/circuitbreaker/CircuitBreakerDocSpec.scala
:include: circuit-breaker-tell-pattern
^^^^^^^
Java
^^^^^^^
.. includecode:: code/docs/circuitbreaker/TellPatternJavaActor.java
:include: circuit-breaker-tell-pattern

View file

@ -8,10 +8,9 @@ package docs.circuitbreaker
import scala.concurrent.duration._
import akka.pattern.CircuitBreaker
import akka.pattern.pipe
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.{Actor, ActorLogging, ActorRef}
import scala.concurrent.Future
import akka.event.Logging
//#imports1
@ -45,3 +44,35 @@ class DangerousActor extends Actor with ActorLogging {
}
class TellPatternActor(recipient : ActorRef) extends Actor with ActorLogging {
import context.dispatcher
val breaker =
new CircuitBreaker(
context.system.scheduler,
maxFailures = 5,
callTimeout = 10.seconds,
resetTimeout = 1.minute).onOpen(notifyMeOnOpen())
def notifyMeOnOpen(): Unit =
log.warning("My CircuitBreaker is now open, and will not close for one minute")
//#circuit-breaker-tell-pattern
import akka.actor.ReceiveTimeout
def receive = {
case "call" if breaker.isClosed => {
recipient ! "message"
}
case "response" => {
breaker.succeed()
}
case err: Throwable => {
breaker.fail()
}
case ReceiveTimeout => {
breaker.fail()
}
}
//#circuit-breaker-tell-pattern
}

View file

@ -0,0 +1,51 @@
/**
* Copyright (C) 2009-2016 Lightbend Inc. <http://www.lightbend.com>
*/
package docs.circuitbreaker;
import akka.actor.ActorRef;
import akka.actor.ReceiveTimeout;
import akka.actor.UntypedActor;
import akka.event.Logging;
import akka.event.LoggingAdapter;
import akka.pattern.CircuitBreaker;
import scala.concurrent.duration.Duration;
public class TellPatternJavaActor extends UntypedActor {
private final ActorRef target;
private final CircuitBreaker breaker;
private final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
public TellPatternJavaActor(ActorRef targetActor) {
this.target = targetActor;
this.breaker = new CircuitBreaker(
getContext().dispatcher(), getContext().system().scheduler(),
5, Duration.create(10, "s"), Duration.create(1, "m"))
.onOpen(new Runnable() {
public void run() {
notifyMeOnOpen();
}
});
}
public void notifyMeOnOpen() {
log.warning("My CircuitBreaker is now open, and will not close for one minute");
}
//#circuit-breaker-tell-pattern
@Override
public void onReceive(Object payload) {
if ( "call".equals(payload) && breaker.isClosed() ) {
target.tell("message", getSelf());
} else if ( "response".equals(payload) ) {
breaker.succeed();
} else if ( payload instanceof Throwable ) {
breaker.fail();
} else if ( payload instanceof ReceiveTimeout ) {
breaker.fail();
}
}
//#circuit-breaker-tell-pattern
}

View file

@ -0,0 +1,140 @@
/**
* Copyright (C) 2015-2016 Lightbend Inc. <http://www.lightbend.com>
*/
package docs.stream;
import akka.Done;
import akka.NotUsed;
import akka.actor.ActorSystem;
import akka.actor.Cancellable;
import akka.japi.Pair;
import akka.stream.ActorMaterializer;
import akka.stream.KillSwitches;
import akka.stream.Materializer;
import akka.stream.UniqueKillSwitch;
import akka.stream.javadsl.*;
import akka.testkit.JavaTestKit;
import docs.AbstractJavaTest;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import scala.concurrent.duration.FiniteDuration;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
public class HubDocTest extends AbstractJavaTest {
static ActorSystem system;
static Materializer materializer;
@BeforeClass
public static void setup() {
system = ActorSystem.create("GraphDSLDocTest");
materializer = ActorMaterializer.create(system);
}
@AfterClass
public static void tearDown() {
JavaTestKit.shutdownActorSystem(system);
system = null;
materializer = null;
}
@Test
public void dynamicMerge() {
//#merge-hub
// A simple consumer that will print to the console for now
Sink<String, CompletionStage<Done>> consumer = Sink.foreach(System.out::println);
// Attach a MergeHub Source to the consumer. This will materialize to a
// corresponding Sink.
RunnableGraph<Sink<String, NotUsed>> runnableGraph =
MergeHub.of(String.class, 16).to(consumer);
// By running/materializing the consumer we get back a Sink, and hence
// now have access to feed elements into it. This Sink can be materialized
// any number of times, and every element that enters the Sink will
// be consumed by our consumer.
Sink<String, NotUsed> toConsumer = runnableGraph.run(materializer);
Source.single("Hello!").runWith(toConsumer, materializer);
Source.single("Hub!").runWith(toConsumer, materializer);
//#merge-hub
}
@Test
public void dynamicBroadcast() {
// Used to be able to clean up the running stream
ActorMaterializer materializer = ActorMaterializer.create(system);
//#broadcast-hub
// A simple producer that publishes a new "message" every second
Source<String, Cancellable> producer = Source.tick(
FiniteDuration.create(1, TimeUnit.SECONDS),
FiniteDuration.create(1, TimeUnit.SECONDS),
"New message"
);
// Attach a BroadcastHub Sink to the producer. This will materialize to a
// corresponding Source.
// (We need to use toMat and Keep.right since by default the materialized
// value to the left is used)
RunnableGraph<Source<String, NotUsed>> runnableGraph =
producer.toMat(BroadcastHub.of(String.class, 256), Keep.right());
// By running/materializing the producer, we get back a Source, which
// gives us access to the elements published by the producer.
Source<String, NotUsed> fromProducer = runnableGraph.run(materializer);
// Print out messages from the producer in two independent consumers
fromProducer.runForeach(msg -> System.out.println("consumer1: " + msg), materializer);
fromProducer.runForeach(msg -> System.out.println("consumer2: " + msg), materializer);
//#broadcast-hub
// Cleanup
materializer.shutdown();
}
@Test
public void mergeBroadcastCombination() {
//#pub-sub-1
// Obtain a Sink and Source which will publish and receive from the "bus" respectively.
Pair<Sink<String, NotUsed>, Source<String, NotUsed>> sinkAndSource =
MergeHub.of(String.class, 16)
.toMat(BroadcastHub.of(String.class, 256), Keep.both())
.run(materializer);
Sink<String, NotUsed> sink = sinkAndSource.first();
Source<String, NotUsed> source = sinkAndSource.second();
//#pub-sub-1
//#pub-sub-2
// Ensure that the Broadcast output is dropped if there are no listening parties.
// If this dropping Sink is not attached, then the broadcast hub will not drop any
// elements itself when there are no subscribers, backpressuring the producer instead.
source.runWith(Sink.ignore(), materializer);
//#pub-sub-2
//#pub-sub-3
// We create now a Flow that represents a publish-subscribe channel using the above
// started stream as its "topic". We add two more features, external cancellation of
// the registration and automatic cleanup for very slow subscribers.
Flow<String, String, UniqueKillSwitch> busFlow =
Flow.fromSinkAndSource(sink, source)
.joinMat(KillSwitches.singleBidi(), Keep.right())
.backpressureTimeout(FiniteDuration.create(1, TimeUnit.SECONDS));
//#pub-sub-3
//#pub-sub-4
UniqueKillSwitch killSwitch =
Source.repeat("Hello World!")
.viaMat(busFlow, Keep.right())
.to(Sink.foreach(System.out::println))
.run(materializer);
// Shut down externally
killSwitch.shutdown();
//#pub-sub-4
}
}

View file

@ -46,13 +46,14 @@ class KillSwitchDocTest extends AbstractJavaTest {
public void uniqueKillSwitchShutdownExample() throws Exception {
//#unique-shutdown
final Source<Integer, NotUsed> countingSrc = Source.from(new ArrayList<>(Arrays.asList(1, 2, 3, 4)))
.delay(FiniteDuration.apply(1, TimeUnit.SECONDS), DelayOverflowStrategy.backpressure());
final Source<Integer, NotUsed> countingSrc =
Source.from(new ArrayList<>(Arrays.asList(1, 2, 3, 4)))
.delay(FiniteDuration.apply(1, TimeUnit.SECONDS), DelayOverflowStrategy.backpressure());
final Sink<Integer, CompletionStage<Integer>> lastSnk = Sink.last();
final Pair<UniqueKillSwitch, CompletionStage<Integer>> stream = countingSrc
.viaMat(KillSwitches.single(), Keep.right())
.toMat(lastSnk, Keep.both()).run(mat);
.viaMat(KillSwitches.single(), Keep.right())
.toMat(lastSnk, Keep.both()).run(mat);
final UniqueKillSwitch killSwitch = stream.first();
final CompletionStage<Integer> completionStage = stream.second();
@ -60,20 +61,22 @@ class KillSwitchDocTest extends AbstractJavaTest {
doSomethingElse();
killSwitch.shutdown();
final int finalCount = completionStage.toCompletableFuture().get(1, TimeUnit.SECONDS);
final int finalCount =
completionStage.toCompletableFuture().get(1, TimeUnit.SECONDS);
assertEquals(2, finalCount);
//#unique-shutdown
}
public static void uniqueKillSwitchAbortExample() throws Exception {
//#unique-abort
final Source<Integer, NotUsed> countingSrc = Source.from(new ArrayList<>(Arrays.asList(1, 2, 3, 4)))
.delay(FiniteDuration.apply(1, TimeUnit.SECONDS), DelayOverflowStrategy.backpressure());
final Source<Integer, NotUsed> countingSrc =
Source.from(new ArrayList<>(Arrays.asList(1, 2, 3, 4)))
.delay(FiniteDuration.apply(1, TimeUnit.SECONDS), DelayOverflowStrategy.backpressure());
final Sink<Integer, CompletionStage<Integer>> lastSnk = Sink.last();
final Pair<UniqueKillSwitch, CompletionStage<Integer>> stream = countingSrc
.viaMat(KillSwitches.single(), Keep.right())
.toMat(lastSnk, Keep.both()).run(mat);
.viaMat(KillSwitches.single(), Keep.right())
.toMat(lastSnk, Keep.both()).run(mat);
final UniqueKillSwitch killSwitch = stream.first();
final CompletionStage<Integer> completionStage = stream.second();
@ -81,31 +84,36 @@ class KillSwitchDocTest extends AbstractJavaTest {
final Exception error = new Exception("boom!");
killSwitch.abort(error);
final int result = completionStage.toCompletableFuture().exceptionally(e -> -1).get(1, TimeUnit.SECONDS);
final int result =
completionStage.toCompletableFuture().exceptionally(e -> -1).get(1, TimeUnit.SECONDS);
assertEquals(-1, result);
//#unique-abort
}
public void sharedKillSwitchShutdownExample() throws Exception {
//#shared-shutdown
final Source<Integer, NotUsed> countingSrc = Source.from(new ArrayList<>(Arrays.asList(1, 2, 3, 4)))
.delay(FiniteDuration.apply(1, TimeUnit.SECONDS), DelayOverflowStrategy.backpressure());
final Source<Integer, NotUsed> countingSrc =
Source.from(new ArrayList<>(Arrays.asList(1, 2, 3, 4)))
.delay(FiniteDuration.apply(1, TimeUnit.SECONDS), DelayOverflowStrategy.backpressure());
final Sink<Integer, CompletionStage<Integer>> lastSnk = Sink.last();
final SharedKillSwitch killSwitch = KillSwitches.shared("my-kill-switch");
final CompletionStage<Integer> completionStage = countingSrc
.viaMat(killSwitch.flow(), Keep.right())
.toMat(lastSnk, Keep.right()).run(mat);
.viaMat(killSwitch.flow(), Keep.right())
.toMat(lastSnk, Keep.right()).run(mat);
final CompletionStage<Integer> completionStageDelayed = countingSrc
.delay(FiniteDuration.apply(1, TimeUnit.SECONDS), DelayOverflowStrategy.backpressure())
.viaMat(killSwitch.flow(), Keep.right())
.toMat(lastSnk, Keep.right()).run(mat);
.delay(FiniteDuration.apply(1, TimeUnit.SECONDS), DelayOverflowStrategy.backpressure())
.viaMat(killSwitch.flow(), Keep.right())
.toMat(lastSnk, Keep.right()).run(mat);
doSomethingElse();
killSwitch.shutdown();
final int finalCount = completionStage.toCompletableFuture().get(1, TimeUnit.SECONDS);
final int finalCountDelayed = completionStageDelayed.toCompletableFuture().get(1, TimeUnit.SECONDS);
final int finalCount =
completionStage.toCompletableFuture().get(1, TimeUnit.SECONDS);
final int finalCountDelayed =
completionStageDelayed.toCompletableFuture().get(1, TimeUnit.SECONDS);
assertEquals(2, finalCount);
assertEquals(1, finalCountDelayed);
//#shared-shutdown
@ -113,23 +121,27 @@ class KillSwitchDocTest extends AbstractJavaTest {
public static void sharedKillSwitchAbortExample() throws Exception {
//#shared-abort
final Source<Integer, NotUsed> countingSrc = Source.from(new ArrayList<>(Arrays.asList(1, 2, 3, 4)))
.delay(FiniteDuration.apply(1, TimeUnit.SECONDS), DelayOverflowStrategy.backpressure());
final Source<Integer, NotUsed> countingSrc =
Source.from(new ArrayList<>(Arrays.asList(1, 2, 3, 4)))
.delay(FiniteDuration.apply(1, TimeUnit.SECONDS), DelayOverflowStrategy.backpressure());
final Sink<Integer, CompletionStage<Integer>> lastSnk = Sink.last();
final SharedKillSwitch killSwitch = KillSwitches.shared("my-kill-switch");
final CompletionStage<Integer> completionStage1 = countingSrc
.viaMat(killSwitch.flow(), Keep.right())
.toMat(lastSnk, Keep.right()).run(mat);
.viaMat(killSwitch.flow(), Keep.right())
.toMat(lastSnk, Keep.right()).run(mat);
final CompletionStage<Integer> completionStage2 = countingSrc
.viaMat(killSwitch.flow(), Keep.right())
.toMat(lastSnk, Keep.right()).run(mat);
.viaMat(killSwitch.flow(), Keep.right())
.toMat(lastSnk, Keep.right()).run(mat);
final Exception error = new Exception("boom!");
killSwitch.abort(error);
final int result1 = completionStage1.toCompletableFuture().exceptionally(e -> -1).get(1, TimeUnit.SECONDS);
final int result2 = completionStage2.toCompletableFuture().exceptionally(e -> -1).get(1, TimeUnit.SECONDS);
final int result1 =
completionStage1.toCompletableFuture().exceptionally(e -> -1).get(1, TimeUnit.SECONDS);
final int result2 =
completionStage2.toCompletableFuture().exceptionally(e -> -1).get(1, TimeUnit.SECONDS);
assertEquals(-1, result1);
assertEquals(-1, result2);
//#shared-abort

View file

@ -240,6 +240,14 @@ Subscribers will receive ``Replicator.DataDeleted``.
.. includecode:: code/docs/ddata/DistributedDataDocTest.java#delete
.. warning::
As deleted keys continue to be included in the stored data on each node as well as in gossip
messages, a continuous series of updates and deletes of top-level entities will result in
growing memory usage until an ActorSystem runs out of memory. To use Akka Distributed Data
where frequent adds and removes are required, you should use a fixed number of top-level data
types that support both updates and removals, for example ``ORMap`` or ``ORSet``.
Data Types
==========

View file

@ -58,13 +58,13 @@ to the Actor as a message:
.. warning::
Be sure to consume the response entities ``dataBytes:Source[ByteString,Unit]`` by for example connecting it
to a ``Sink`` (for example ``response.entity.dataBytes.runWith(Sink.ignore)`` if you don't care about the
to a ``Sink`` (for example ``response.discardEntityBytes(Materializer)`` if you don't care about the
response entity), since otherwise Akka HTTP (and the underlying Streams infrastructure) will understand the
lack of entity consumption as a back-pressure signal and stop reading from the underlying TCP connection!
This is a feature of Akka HTTP that allows consuming entities (and pulling them through the network) in
a streaming fashion, and only *on demand* when the client is ready to consume the bytes -
it may be a bit suprising at first though.
it may be a bit surprising at first though.
There are tickets open about automatically dropping entities if not consumed (`#18716`_ and `#18540`_),
so these may be implemented in the near future.

View file

@ -4,18 +4,19 @@ Implications of the streaming nature of Request/Response Entities
-----------------------------------------------------------------
Akka HTTP is streaming *all the way through*, which means that the back-pressure mechanisms enabled by Akka Streams
are exposed through all layersfrom the TCP layer, through the HTTP server, all the way up to the user-facing ``HttpRequest``
are exposed through all layersfrom the TCP layer, through the HTTP server, all the way up to the user-facing ``HttpRequest``
and ``HttpResponse`` and their ``HttpEntity`` APIs.
This has suprising implications if you are used to non-streaming / not-reactive HTTP clients.
Specifically it means that: "*lack of consumption of the HTTP Entity, is signaled as back-pressure to the other
This has surprising implications if you are used to non-streaming / not-reactive HTTP clients.
Specifically it means that: "*lack of consumption of the HTTP Entity, is signaled as back-pressure to the other
side of the connection*". This is a feature, as it allows one only to consume the entity, and back-pressure servers/clients
from overwhelming our application, possibly causing un-necessary buffering of the entity in memory.
.. warning::
Consuming (or discarding) the Entity of a request is mandatory!
If *accidentally* left neither consumed or discarded Akka HTTP will
asume the incoming data should remain back-pressured, and will stall the incoming data via TCP back-pressure mechanisms.
If *accidentally* left neither consumed or discarded Akka HTTP will
assume the incoming data should remain back-pressured, and will stall the incoming data via TCP back-pressure mechanisms.
A client should consume the Entity regardless of the status of the ``HttpResponse``.
Client-Side handling of streaming HTTP Entities
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -25,7 +26,7 @@ Consuming the HTTP Response Entity (Client)
The most common use-case of course is consuming the response entity, which can be done via
running the underlying ``dataBytes`` Source. This is as simple as running the dataBytes source,
(or on the server-side using directives such as
(or on the server-side using directives such as ``BasicDirectives.extractDataBytes``).
It is encouraged to use various streaming techniques to utilise the underlying infrastructure to its fullest,
for example by framing the incoming chunks, parsing them line-by-line and then connecting the flow into another
@ -34,16 +35,16 @@ destination Sink, such as a File or other Akka Streams connector:
.. includecode:: ../code/docs/http/javadsl/HttpClientExampleDocTest.java#manual-entity-consume-example-1
however sometimes the need may arise to consume the entire entity as ``Strict`` entity (which means that it is
completely loaded into memory). Akka HTTP provides a special ``toStrict(timeout, materializer)`` method which can be used to
completely loaded into memory). Akka HTTP provides a special ``toStrict(timeout, materializer)`` method which can be used to
eagerly consume the entity and make it available in memory:
.. includecode:: ../code/docs/http/javadsl/HttpClientExampleDocTest.java#manual-entity-consume-example-2
Discarding the HTTP Response Entity (Client)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sometimes when calling HTTP services we do not care about their response payload (e.g. all we care about is the response code),
yet as explained above entity still has to be consumed in some way, otherwise we'll be exherting back-pressure on the
yet as explained above entity still has to be consumed in some way, otherwise we'll be exherting back-pressure on the
underlying TCP connection.
The ``discardEntityBytes`` convenience method serves the purpose of easily discarding the entity if it has no purpose for us.
@ -83,22 +84,22 @@ Discarding the HTTP Request Entity (Server)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sometimes, depending on some validation (e.g. checking if given user is allowed to perform uploads or not)
you may want to decide to discard the uploaded entity.
you may want to decide to discard the uploaded entity.
Please note that discarding means that the entire upload will proceed, even though you are not interested in the data
Please note that discarding means that the entire upload will proceed, even though you are not interested in the data
being streamed to the server - this may be useful if you are simply not interested in the given entity, however
you don't want to abort the entire connection (which we'll demonstrate as well), since there may be more requests
pending on the same connection still.
pending on the same connection still.
In order to discard the databytes explicitly you can invoke the ``discardEntityBytes`` bytes of the incoming ``HTTPRequest``:
.. includecode:: ../code/docs/http/javadsl/server/HttpServerExampleDocTest.java#discard-discardEntityBytes
A related concept is *cancelling* the incoming ``entity.getDataBytes()`` stream, which results in Akka HTTP
A related concept is *cancelling* the incoming ``entity.getDataBytes()`` stream, which results in Akka HTTP
*abruptly closing the connection from the Client*. This may be useful when you detect that the given user should not be allowed to make any
uploads at all, and you want to drop the connection (instead of reading and ignoring the incoming data).
This can be done by attaching the incoming ``entity.getDataBytes()`` to a ``Sink.cancelled`` which will cancel
the entity stream, which in turn will cause the underlying connection to be shut-down by the server
This can be done by attaching the incoming ``entity.getDataBytes()`` to a ``Sink.cancelled`` which will cancel
the entity stream, which in turn will cause the underlying connection to be shut-down by the server
effectively hard-aborting the incoming request:
.. includecode:: ../code/docs/http/javadsl/server/HttpServerExampleDocTest.java#discard-close-connections
@ -112,10 +113,10 @@ Under certain conditions it is possible to detect an entity is very unlikely to
and issue warnings or discard the entity automatically. This advanced feature has not been implemented yet, see the below
note and issues for further discussion and ideas.
.. note::
An advanced feature code named "auto draining" has been discussed and proposed for Akka HTTP, and we're hoping
.. note::
An advanced feature code named "auto draining" has been discussed and proposed for Akka HTTP, and we're hoping
to implement or help the community implement it.
You can read more about it in `issue #18716 <https://github.com/akka/akka/issues/18716>`_
You can read more about it in `issue #18716 <https://github.com/akka/akka/issues/18716>`_
as well as `issue #18540 <https://github.com/akka/akka/issues/18540>`_ ; as always, contributions are very welcome!

View file

@ -1252,6 +1252,24 @@ If materialized values needs to be collected ``prependMat`` is available.
**completes** when all upstreams complete
orElse
^^^^^^
If the primary source completes without emitting any elements, the elements from the secondary source
are emitted. If the primary source emits any elements the secondary source is cancelled.
Note that both sources are materialized directly and the secondary source is backpressured until it becomes
the source of elements or is cancelled.
Signal errors downstream, regardless which of the two sources emitted the error.
**emits** when an element is available from first stream or first stream closed without emitting any elements and an element
is available from the second stream
**backpressures** when downstream backpressures
**completes** the primary stream completes after emitting at least one element, when the primary stream completes
without emitting and the secondary stream already has completed or when the secondary stream completes
interleave
^^^^^^^^^^
Emits a specifiable number of elements from the original source, then from the provided source and repeats. If one

View file

@ -61,3 +61,80 @@ Refer to the below for usage examples.
A ``UniqueKillSwitch`` is always a result of a materialization, whilst ``SharedKillSwitch`` needs to be constructed
before any materialization takes place.
Dynamic fan-in and fan-out with MergeHub and BroadcastHub
---------------------------------------------------------
There are many cases when consumers or producers of a certain service (represented as a Sink, Source, or possibly Flow)
are dynamic and not known in advance. The Graph DSL does not allow to represent this, all connections of the graph
must be known in advance and must be connected upfront. To allow dynamic fan-in and fan-out streaming, the Hubs
should be used. They provide means to construct :class:`Sink` and :class:`Source` pairs that are "attached" to each
other, but one of them can be materialized multiple times to implement dynamic fan-in or fan-out.
Using the MergeHub
^^^^^^^^^^^^^^^^^^
A :class:`MergeHub` allows to implement a dynamic fan-in junction point in a graph where elements coming from
different producers are emitted in a First-Comes-First-Served fashion. If the consumer cannot keep up then *all* of the
producers are backpressured. The hub itself comes as a :class:`Source` to which the single consumer can be attached.
It is not possible to attach any producers until this :class:`Source` has been materialized (started). This is ensured
by the fact that we only get the corresponding :class:`Sink` as a materialized value. Usage might look like this:
.. includecode:: ../code/docs/stream/HubDocTest.java#merge-hub
This sequence, while might look odd at first, ensures proper startup order. Once we get the :class:`Sink`,
we can use it as many times as wanted. Everything that is fed to it will be delivered to the consumer we attached
previously until it cancels.
Using the BroadcastHub
^^^^^^^^^^^^^^^^^^^^^^
A :class:`BroadcastHub` can be used to consume elements from a common producer by a dynamic set of consumers. The
rate of the producer will be automatically adapted to the slowest consumer. In this case, the hub is a :class:`Sink`
to which the single producer must be attached first. Consumers can only be attached once the :class:`Sink` has
been materialized (i.e. the producer has been started). One example of using the :class:`BroadcastHub`:
.. includecode:: ../code/docs/stream/HubDocTest.java#broadcast-hub
The resulting :class:`Source` can be materialized any number of times, each materialization effectively attaching
a new subscriber. If there are no subscribers attached to this hub then it will not drop any elements but instead
backpressure the upstream producer until subscribers arrive. This behavior can be tweaked by using the combinators
``.buffer`` for example with a drop strategy, or just attaching a subscriber that drops all messages. If there
are no other subscribers, this will ensure that the producer is kept drained (dropping all elements) and once a new
subscriber arrives it will adaptively slow down, ensuring no more messages are dropped.
Combining dynamic stages to build a simple Publish-Subscribe service
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The features provided by the Hub implementations are limited by default. This is by design, as various combinations
can be used to express additional features like unsubscribing producers or consumers externally. We show here
an example that builds a :class:`Flow` representing a publish-subscribe channel. The input of the :class:`Flow` is
published to all subscribers while the output streams all the elements published.
First, we connect a :class:`MergeHub` and a :class:`BroadcastHub` together to form a publish-subscribe channel. Once
we materialize this small stream, we get back a pair of :class:`Source` and :class:`Sink` that together define
the publish and subscribe sides of our channel.
.. includecode:: ../code/docs/stream/HubDocTest.java#pub-sub-1
We now use a few tricks to add more features. First of all, we attach a ``Sink.ignore``
at the broadcast side of the channel to keep it drained when there are no subscribers. If this behavior is not the
desired one this line can be simply dropped.
.. includecode:: ../code/docs/stream/HubDocTest.java#pub-sub-2
We now wrap the :class:`Sink` and :class:`Source` in a :class:`Flow` using ``Flow.fromSinkAndSource``. This bundles
up the two sides of the channel into one and forces users of it to always define a publisher and subscriber side
(even if the subscriber side is just dropping). It also allows us to very simply attach a :class:`KillSwitch` as
a :class:`BidiStage` which in turn makes it possible to close both the original :class:`Sink` and :class:`Source` at the
same time.
Finally, we add ``backpressureTimeout`` on the consumer side to ensure that subscribers that block the channel for more
than 3 seconds are forcefully removed (and their stream failed).
.. includecode:: ../code/docs/stream/HubDocTest.java#pub-sub-3
The resulting Flow now has a type of ``Flow[String, String, UniqueKillSwitch]`` representing a publish-subscribe
channel which can be used any number of times to attach new producers or consumers. In addition, it materializes
to a :class:`UniqueKillSwitch` (see :ref:`unique-kill-switch-java`) that can be used to deregister a single user externally:
.. includecode:: ../code/docs/stream/HubDocTest.java#pub-sub-4

View file

@ -207,8 +207,9 @@ class HttpClientExampleSpec extends WordSpec with Matchers with CompileOnlySpec
def receive = {
case HttpResponse(StatusCodes.OK, headers, entity, _) =>
log.info("Got response, body: " + entity.dataBytes.runFold(ByteString(""))(_ ++ _))
case HttpResponse(code, _, _, _) =>
case resp @ HttpResponse(code, _, _, _) =>
log.info("Request failed, response code: " + code)
resp.discardEntityBytes()
}
}

View file

@ -0,0 +1,109 @@
/**
* Copyright (C) 2015-2016 Lightbend Inc. <http://www.lightbend.com>
*/
package docs.stream
import akka.NotUsed
import akka.stream.{ ActorMaterializer, KillSwitches, UniqueKillSwitch }
import akka.stream.scaladsl._
import akka.testkit.AkkaSpec
import docs.CompileOnlySpec
import scala.concurrent.duration._
class HubsDocSpec extends AkkaSpec with CompileOnlySpec {
implicit val materializer = ActorMaterializer()
"Hubs" must {
"demonstrate creating a dynamic merge" in {
def println(s: String) = testActor ! s
//#merge-hub
// A simple consumer that will print to the console for now
val consumer = Sink.foreach(println)
// Attach a MergeHub Source to the consumer. This will materialize to a
// corresponding Sink.
val runnableGraph: RunnableGraph[Sink[String, NotUsed]] =
MergeHub.source[String](perProducerBufferSize = 16).to(consumer)
// By running/materializing the consumer we get back a Sink, and hence
// now have access to feed elements into it. This Sink can be materialized
// any number of times, and every element that enters the Sink will
// be consumed by our consumer.
val toConsumer: Sink[String, NotUsed] = runnableGraph.run()
// Feeding two independent sources into the hub.
Source.single("Hello!").runWith(toConsumer)
Source.single("Hub!").runWith(toConsumer)
//#merge-hub
expectMsgAllOf("Hello!", "Hub!")
}
"demonstrate creating a dynamic broadcast" in compileOnlySpec {
//#broadcast-hub
// A simple producer that publishes a new "message" every second
val producer = Source.tick(1.second, 1.second, "New message")
// Attach a BroadcastHub Sink to the producer. This will materialize to a
// corresponding Source.
// (We need to use toMat and Keep.right since by default the materialized
// value to the left is used)
val runnableGraph: RunnableGraph[Source[String, NotUsed]] =
producer.toMat(BroadcastHub.sink(bufferSize = 256))(Keep.right)
// By running/materializing the producer, we get back a Source, which
// gives us access to the elements published by the producer.
val fromProducer: Source[String, NotUsed] = runnableGraph.run()
// Print out messages from the producer in two independent consumers
fromProducer.runForeach(msg => println("consumer1: " + msg))
fromProducer.runForeach(msg => println("consumer2: " + msg))
//#broadcast-hub
}
"demonstrate combination" in {
def println(s: String) = testActor ! s
//#pub-sub-1
// Obtain a Sink and Source which will publish and receive from the "bus" respectively.
val (sink, source) =
MergeHub.source[String](perProducerBufferSize = 16)
.toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both)
.run()
//#pub-sub-1
//#pub-sub-2
// Ensure that the Broadcast output is dropped if there are no listening parties.
// If this dropping Sink is not attached, then the broadcast hub will not drop any
// elements itself when there are no subscribers, backpressuring the producer instead.
source.runWith(Sink.ignore)
//#pub-sub-2
//#pub-sub-3
// We create now a Flow that represents a publish-subscribe channel using the above
// started stream as its "topic". We add two more features, external cancellation of
// the registration and automatic cleanup for very slow subscribers.
val busFlow: Flow[String, String, UniqueKillSwitch] =
Flow.fromSinkAndSource(sink, source)
.joinMat(KillSwitches.singleBidi[String, String])(Keep.right)
.backpressureTimeout(3.seconds)
//#pub-sub-3
//#pub-sub-4
val switch: UniqueKillSwitch =
Source.repeat("Hello world!")
.viaMat(busFlow)(Keep.right)
.to(Sink.foreach(println))
.run()
// Shut down externally
switch.shutdown()
//#pub-sub-4
}
}
}

View file

@ -240,6 +240,14 @@ Subscribers will receive ``Replicator.DataDeleted``.
.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#delete
.. warning::
As deleted keys continue to be included in the stored data on each node as well as in gossip
messages, a continuous series of updates and deletes of top-level entities will result in
growing memory usage until an ActorSystem runs out of memory. To use Akka Distributed Data
where frequent adds and removes are required, you should use a fixed number of top-level data
types that support both updates and removals, for example ``ORMap`` or ``ORSet``.
Data Types
==========

View file

@ -8,7 +8,7 @@ The request-level API is the most convenient way of using Akka HTTP's client-sid
Depending on your preference you can pick the flow-based or the future-based variant.
.. note::
It is recommended to first read the :ref:`implications-of-streaming-http-entities` section,
It is recommended to first read the :ref:`implications-of-streaming-http-entities` section,
as it explains the underlying full-stack streaming concepts, which may be unexpected when coming
from a background with non-"streaming first" HTTP Clients.
@ -68,13 +68,13 @@ Example
.. warning::
Be sure to consume the response entities ``dataBytes:Source[ByteString,Unit]`` by for example connecting it
to a ``Sink`` (for example ``response.entity.dataBytes.runWith(Sink.ignore)`` if you don't care about the
to a ``Sink`` (for example ``response.discardEntityBytes()`` if you don't care about the
response entity), since otherwise Akka HTTP (and the underlying Streams infrastructure) will understand the
lack of entity consumption as a back-pressure signal and stop reading from the underlying TCP connection!
This is a feature of Akka HTTP that allows consuming entities (and pulling them through the network) in
a streaming fashion, and only *on demand* when the client is ready to consume the bytes -
it may be a bit suprising at first though.
it may be a bit surprising at first though.
There are tickets open about automatically dropping entities if not consumed (`#18716`_ and `#18540`_),
so these may be implemented in the near future.

View file

@ -4,18 +4,19 @@ Implications of the streaming nature of Request/Response Entities
-----------------------------------------------------------------
Akka HTTP is streaming *all the way through*, which means that the back-pressure mechanisms enabled by Akka Streams
are exposed through all layersfrom the TCP layer, through the HTTP server, all the way up to the user-facing ``HttpRequest``
are exposed through all layersfrom the TCP layer, through the HTTP server, all the way up to the user-facing ``HttpRequest``
and ``HttpResponse`` and their ``HttpEntity`` APIs.
This has suprising implications if you are used to non-streaming / not-reactive HTTP clients.
Specifically it means that: "*lack of consumption of the HTTP Entity, is signaled as back-pressure to the other
This has surprising implications if you are used to non-streaming / not-reactive HTTP clients.
Specifically it means that: "*lack of consumption of the HTTP Entity, is signaled as back-pressure to the other
side of the connection*". This is a feature, as it allows one only to consume the entity, and back-pressure servers/clients
from overwhelming our application, possibly causing un-necessary buffering of the entity in memory.
.. warning::
Consuming (or discarding) the Entity of a request is mandatory!
If *accidentally* left neither consumed or discarded Akka HTTP will
asume the incoming data should remain back-pressured, and will stall the incoming data via TCP back-pressure mechanisms.
If *accidentally* left neither consumed or discarded Akka HTTP will
assume the incoming data should remain back-pressured, and will stall the incoming data via TCP back-pressure mechanisms.
A client should consume the Entity regardless of the status of the ``HttpResponse``.
Client-Side handling of streaming HTTP Entities
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -25,7 +26,7 @@ Consuming the HTTP Response Entity (Client)
The most common use-case of course is consuming the response entity, which can be done via
running the underlying ``dataBytes`` Source. This is as simple as running the dataBytes source,
(or on the server-side using directives such as
(or on the server-side using directives such as ``BasicDirectives.extractDataBytes``).
It is encouraged to use various streaming techniques to utilise the underlying infrastructure to its fullest,
for example by framing the incoming chunks, parsing them line-by-line and then connecting the flow into another
@ -35,17 +36,17 @@ destination Sink, such as a File or other Akka Streams connector:
:include: manual-entity-consume-example-1
however sometimes the need may arise to consume the entire entity as ``Strict`` entity (which means that it is
completely loaded into memory). Akka HTTP provides a special ``toStrict(timeout)`` method which can be used to
completely loaded into memory). Akka HTTP provides a special ``toStrict(timeout)`` method which can be used to
eagerly consume the entity and make it available in memory:
.. includecode:: ../code/docs/http/scaladsl/HttpClientExampleSpec.scala
:include: manual-entity-consume-example-2
Discarding the HTTP Response Entity (Client)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sometimes when calling HTTP services we do not care about their response payload (e.g. all we care about is the response code),
yet as explained above entity still has to be consumed in some way, otherwise we'll be exherting back-pressure on the
yet as explained above entity still has to be consumed in some way, otherwise we'll be exherting back-pressure on the
underlying TCP connection.
The ``discardEntityBytes`` convenience method serves the purpose of easily discarding the entity if it has no purpose for us.
@ -89,23 +90,23 @@ Discarding the HTTP Request Entity (Server)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sometimes, depending on some validation (e.g. checking if given user is allowed to perform uploads or not)
you may want to decide to discard the uploaded entity.
you may want to decide to discard the uploaded entity.
Please note that discarding means that the entire upload will proceed, even though you are not interested in the data
Please note that discarding means that the entire upload will proceed, even though you are not interested in the data
being streamed to the server - this may be useful if you are simply not interested in the given entity, however
you don't want to abort the entire connection (which we'll demonstrate as well), since there may be more requests
pending on the same connection still.
pending on the same connection still.
In order to discard the databytes explicitly you can invoke the ``discardEntityBytes`` bytes of the incoming ``HTTPRequest``:
.. includecode:: ../code/docs/http/scaladsl/HttpServerExampleSpec.scala
:include: discard-discardEntityBytes
A related concept is *cancelling* the incoming ``entity.dataBytes`` stream, which results in Akka HTTP
A related concept is *cancelling* the incoming ``entity.dataBytes`` stream, which results in Akka HTTP
*abruptly closing the connection from the Client*. This may be useful when you detect that the given user should not be allowed to make any
uploads at all, and you want to drop the connection (instead of reading and ignoring the incoming data).
This can be done by attaching the incoming ``entity.dataBytes`` to a ``Sink.cancelled`` which will cancel
the entity stream, which in turn will cause the underlying connection to be shut-down by the server
This can be done by attaching the incoming ``entity.dataBytes`` to a ``Sink.cancelled`` which will cancel
the entity stream, which in turn will cause the underlying connection to be shut-down by the server
effectively hard-aborting the incoming request:
.. includecode:: ../code/docs/http/scaladsl/HttpServerExampleSpec.scala
@ -120,10 +121,10 @@ Under certain conditions it is possible to detect an entity is very unlikely to
and issue warnings or discard the entity automatically. This advanced feature has not been implemented yet, see the below
note and issues for further discussion and ideas.
.. note::
An advanced feature code named "auto draining" has been discussed and proposed for Akka HTTP, and we're hoping
.. note::
An advanced feature code named "auto draining" has been discussed and proposed for Akka HTTP, and we're hoping
to implement or help the community implement it.
You can read more about it in `issue #18716 <https://github.com/akka/akka/issues/18716>`_
You can read more about it in `issue #18716 <https://github.com/akka/akka/issues/18716>`_
as well as `issue #18540 <https://github.com/akka/akka/issues/18540>`_ ; as always, contributions are very welcome!

View file

@ -11,7 +11,7 @@ Signature
Description
-----------
Provides the value of ``X-Forwarded-For``, ``Remote-Address``, or ``X-Real-IP`` headers as an instance of ``HttpIp``.
Provides the value of ``X-Forwarded-For``, ``Remote-Address``, or ``X-Real-IP`` headers as an instance of ``RemoteAddress``.
The akka-http server engine adds the ``Remote-Address`` header to every request automatically if the respective
setting ``akka.http.server.remote-address-header`` is set to ``on``. Per default it is set to ``off``.

View file

@ -1254,6 +1254,24 @@ If materialized values needs to be collected ``prependMat`` is available.
**completes** when all upstreams complete
orElse
^^^^^^
If the primary source completes without emitting any elements, the elements from the secondary source
are emitted. If the primary source emits any elements the secondary source is cancelled.
Note that both sources are materialized directly and the secondary source is backpressured until it becomes
the source of elements or is cancelled.
Signal errors downstream, regardless which of the two sources emitted the error.
**emits** when an element is available from first stream or first stream closed without emitting any elements and an element
is available from the second stream
**backpressures** when downstream backpressures
**completes** the primary stream completes after emitting at least one element, when the primary stream completes
without emitting and the secondary stream already has completed or when the secondary stream completes
interleave
^^^^^^^^^^
Emits a specifiable number of elements from the original source, then from the provided source and repeats. If one

View file

@ -61,3 +61,80 @@ Refer to the below for usage examples.
A ``UniqueKillSwitch`` is always a result of a materialization, whilst ``SharedKillSwitch`` needs to be constructed
before any materialization takes place.
Dynamic fan-in and fan-out with MergeHub and BroadcastHub
---------------------------------------------------------
There are many cases when consumers or producers of a certain service (represented as a Sink, Source, or possibly Flow)
are dynamic and not known in advance. The Graph DSL does not allow to represent this, all connections of the graph
must be known in advance and must be connected upfront. To allow dynamic fan-in and fan-out streaming, the Hubs
should be used. They provide means to construct :class:`Sink` and :class:`Source` pairs that are "attached" to each
other, but one of them can be materialized multiple times to implement dynamic fan-in or fan-out.
Using the MergeHub
^^^^^^^^^^^^^^^^^^
A :class:`MergeHub` allows to implement a dynamic fan-in junction point in a graph where elements coming from
different producers are emitted in a First-Comes-First-Served fashion. If the consumer cannot keep up then *all* of the
producers are backpressured. The hub itself comes as a :class:`Source` to which the single consumer can be attached.
It is not possible to attach any producers until this :class:`Source` has been materialized (started). This is ensured
by the fact that we only get the corresponding :class:`Sink` as a materialized value. Usage might look like this:
.. includecode:: ../code/docs/stream/HubsDocSpec.scala#merge-hub
This sequence, while might look odd at first, ensures proper startup order. Once we get the :class:`Sink`,
we can use it as many times as wanted. Everything that is fed to it will be delivered to the consumer we attached
previously until it cancels.
Using the BroadcastHub
^^^^^^^^^^^^^^^^^^^^^^
A :class:`BroadcastHub` can be used to consume elements from a common producer by a dynamic set of consumers. The
rate of the producer will be automatically adapted to the slowest consumer. In this case, the hub is a :class:`Sink`
to which the single producer must be attached first. Consumers can only be attached once the :class:`Sink` has
been materialized (i.e. the producer has been started). One example of using the :class:`BroadcastHub`:
.. includecode:: ../code/docs/stream/HubsDocSpec.scala#broadcast-hub
The resulting :class:`Source` can be materialized any number of times, each materialization effectively attaching
a new subscriber. If there are no subscribers attached to this hub then it will not drop any elements but instead
backpressure the upstream producer until subscribers arrive. This behavior can be tweaked by using the combinators
``.buffer`` for example with a drop strategy, or just attaching a subscriber that drops all messages. If there
are no other subscribers, this will ensure that the producer is kept drained (dropping all elements) and once a new
subscriber arrives it will adaptively slow down, ensuring no more messages are dropped.
Combining dynamic stages to build a simple Publish-Subscribe service
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The features provided by the Hub implementations are limited by default. This is by design, as various combinations
can be used to express additional features like unsubscribing producers or consumers externally. We show here
an example that builds a :class:`Flow` representing a publish-subscribe channel. The input of the :class:`Flow` is
published to all subscribers while the output streams all the elements published.
First, we connect a :class:`MergeHub` and a :class:`BroadcastHub` together to form a publish-subscribe channel. Once
we materialize this small stream, we get back a pair of :class:`Source` and :class:`Sink` that together define
the publish and subscribe sides of our channel.
.. includecode:: ../code/docs/stream/HubsDocSpec.scala#pub-sub-1
We now use a few tricks to add more features. First of all, we attach a ``Sink.ignore``
at the broadcast side of the channel to keep it drained when there are no subscribers. If this behavior is not the
desired one this line can be simply dropped.
.. includecode:: ../code/docs/stream/HubsDocSpec.scala#pub-sub-2
We now wrap the :class:`Sink` and :class:`Source` in a :class:`Flow` using ``Flow.fromSinkAndSource``. This bundles
up the two sides of the channel into one and forces users of it to always define a publisher and subscriber side
(even if the subscriber side is just dropping). It also allows us to very simply attach a :class:`KillSwitch` as
a :class:`BidiStage` which in turn makes it possible to close both the original :class:`Sink` and :class:`Source` at the
same time.
Finally, we add ``backpressureTimeout`` on the consumer side to ensure that subscribers that block the channel for more
than 3 seconds are forcefully removed (and their stream failed).
.. includecode:: ../code/docs/stream/HubsDocSpec.scala#pub-sub-3
The resulting Flow now has a type of ``Flow[String, String, UniqueKillSwitch]`` representing a publish-subscribe
channel which can be used any number of times to attach new producers or consumers. In addition, it materializes
to a :class:`UniqueKillSwitch` (see :ref:`unique-kill-switch-scala`) that can be used to deregister a single user externally:
.. includecode:: ../code/docs/stream/HubsDocSpec.scala#pub-sub-4

View file

@ -14,6 +14,7 @@ import java.nio.charset.Charset;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.StreamSupport;
public abstract class Query {
/**
@ -99,6 +100,16 @@ public abstract class Query {
return new JavaQuery(UriJavaAccessor.queryApply(params));
}
/**
* Returns a Query from the given parameters.
*/
public static Query create(Iterable<Pair<String, String>> params) {
@SuppressWarnings("unchecked")
final Pair<String, String>[] paramsArray =
StreamSupport.stream(params.spliterator(), false).toArray(Pair[]::new);
return create(paramsArray);
}
/**
* Returns a Query from the given parameters.
*/

View file

@ -135,25 +135,6 @@ akka.http {
# doesn't have to be fiddled with in most applications.
request-header-size-hint = 512
# The proxy configurations to be used for requests with the specified
# scheme.
proxy {
# Proxy settings for unencrypted HTTP requests
# Set to 'none' to always connect directly, 'default' to use the system
# settings as described in http://docs.oracle.com/javase/6/docs/technotes/guides/net/proxies.html
# or specify the proxy host, port and non proxy hosts as demonstrated
# in the following example:
# http {
# host = myproxy.com
# port = 8080
# non-proxy-hosts = ["*.direct-access.net"]
# }
http = default
# Proxy settings for HTTPS requests (currently unsupported)
https = default
}
# Socket options to set for the listening socket. If a setting is left
# undefined, it will use whatever the default on the system is.
socket-options {

View file

@ -81,7 +81,7 @@ private[http] object One2OneBidiFlow {
push(out, element)
if (pullSuppressed) {
pullSuppressed = false
pull(in)
if (!isClosed(in)) pull(in)
}
} else throw new UnexpectedOutputException(element)
}

View file

@ -92,7 +92,7 @@ object HttpHeader {
case h if clazz.isInstance(h) return OptionVal.Some[T](h.asInstanceOf[T])
case _ // continue ...
}
OptionVal.None.asInstanceOf[OptionVal[T]]
OptionVal.None
}
sealed trait ParsingResult {

View file

@ -611,7 +611,7 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
case ResponseRenderingOutput.HttpData(bytes) bytes
case _: ResponseRenderingOutput.SwitchToWebSocket throw new IllegalStateException("Didn't expect websocket response")
}
.groupedWithin(1000, 100.millis)
.groupedWithin(1000, 200.millis)
.watchTermination()(Keep.right)
.toMat(Sink.head)(Keep.both).run()
@ -620,7 +620,10 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
case Some(close)
// we try to find out if the renderer has already flagged completion even without the upstream being completed
try {
Await.ready(wasCompletedFuture, 100.millis)
// note how this relates to the groupedWithin timeout above which will always
// close the stream, so only streams closed before that was _actually_ closed
// by the server blueprint
Await.ready(wasCompletedFuture, 150.millis)
Some(true)
} catch {
case NonFatal(_) Some(false)

View file

@ -112,6 +112,34 @@ class One2OneBidiFlowSpec extends AkkaSpec {
seen.get should ===(x + 8)
out.sendComplete() // To please assertAllStagesStopped
}
"not pull when input is closed before surpressed pull can be acted on" in assertAllStagesStopped {
val in = TestPublisher.probe[Int]()
val out = TestSubscriber.probe[Int]()
val wrappedIn = TestSubscriber.probe[Int]()
val wrappedOut = TestPublisher.probe[Int]()
Source.fromPublisher(in).via(
One2OneBidiFlow(maxPending = 1) join Flow.fromSinkAndSource(
Sink.fromSubscriber(wrappedIn),
Source.fromPublisher(wrappedOut))
).runWith(Sink.fromSubscriber(out))
out.request(2)
wrappedOut.expectRequest()
wrappedIn.request(2)
in.expectRequest()
in.sendNext(1)
wrappedIn.expectNext(1)
// now we have reached the maxPending limit
in.sendComplete()
wrappedOut.sendNext(1)
out.expectNext(1)
wrappedIn.expectComplete()
wrappedOut.sendComplete()
out.expectComplete()
}
}

View file

@ -19,6 +19,10 @@ class JavaApiSpec extends FreeSpec with MustMatchers {
Uri.create("/abc")
.query(Query.create(Pair.create("name", "paul"))) must be(Uri.create("/abc?name=paul"))
}
"query(Iterable)" in {
Uri.create("/abc")
.query(Query.create(Iterable(Pair.create("name", "tom")).asJava)) must be(Uri.create("/abc?name=tom"))
}
"addSegment" in {
Uri.create("/abc")
.addPathSegment("def") must be(Uri.create("/abc/def"))

View file

@ -72,7 +72,7 @@ public class MiscDirectivesTest extends JUnitRouteTest {
route
.run(HttpRequest.create())
.assertStatusCode(StatusCodes.NOT_FOUND);
.assertStatusCode(StatusCodes.OK);
}
@Test

View file

@ -30,6 +30,11 @@ class MiscDirectivesSpec extends RoutingSpec {
extractClientIP { echoComplete }
} ~> check { responseAs[String] shouldEqual "1.2.3.4" }
}
"extract unknown when no headers" in {
Get() ~> {
extractClientIP { echoComplete }
} ~> check { responseAs[String] shouldEqual "unknown" }
}
}
"the selectPreferredLanguage directive" should {

View file

@ -37,7 +37,8 @@ class TimeoutDirectivesSpec extends IntegrationRoutingSpec {
val route =
path("timeout") {
withRequestTimeout(500.millis) {
// needs to be long because of the race between wRT and wRTR
withRequestTimeout(1.second) {
withRequestTimeoutResponse(request timeoutResponse) {
val response: Future[String] = slowFuture() // very slow
complete(response)
@ -46,7 +47,7 @@ class TimeoutDirectivesSpec extends IntegrationRoutingSpec {
} ~
path("equivalent") {
// updates timeout and handler at
withRequestTimeout(500.millis, request timeoutResponse) {
withRequestTimeout(1.second, request timeoutResponse) {
val response: Future[String] = slowFuture() // very slow
complete(response)
}
@ -56,6 +57,11 @@ class TimeoutDirectivesSpec extends IntegrationRoutingSpec {
import response._
status should ===(StatusCodes.EnhanceYourCalm)
}
Get("/equivalent") ~!> route ~!> { response
import response._
status should ===(StatusCodes.EnhanceYourCalm)
}
}
def slowFuture(): Future[String] = Promise[String].future

View file

@ -104,7 +104,8 @@ object MiscDirectives extends MiscDirectives {
private val _extractClientIP: Directive1[RemoteAddress] =
headerValuePF { case `X-Forwarded-For`(Seq(address, _*)) address } |
headerValuePF { case `Remote-Address`(address) address } |
headerValuePF { case `X-Real-Ip`(address) address }
headerValuePF { case `X-Real-Ip`(address) address } |
provide(RemoteAddress.Unknown)
private val _requestEntityEmpty: Directive0 =
extract(_.request.entity.isKnownEmpty).flatMap(if (_) pass else reject)

View file

@ -6,11 +6,13 @@ package akka.persistence
import akka.actor.{ OneForOneStrategy, _ }
import akka.persistence.journal.AsyncWriteJournal
import akka.testkit.{ EventFilter, ImplicitSender, TestEvent }
import akka.testkit.{ EventFilter, ImplicitSender, TestEvent, TestProbe }
import scala.collection.immutable
import scala.util.control.NoStackTrace
import scala.util.{ Failure, Try }
import akka.persistence.journal.inmem.InmemJournal
import scala.concurrent.Future
object PersistentActorFailureSpec {
@ -181,9 +183,13 @@ class PersistentActorFailureSpec extends PersistenceSpec(PersistenceSpec.config(
expectMsg(List("corrupt"))
// recover by creating another with same name
system.actorOf(Props(classOf[Supervisor], testActor)) ! props
// note that if we used testActor as failure detector passed in
// the props we'd have a race on our hands (#21229)
val failProbe = TestProbe()
val sameNameProps = Props(classOf[OnRecoveryFailurePersistentActor], name, failProbe.ref)
system.actorOf(Props(classOf[Supervisor], testActor)) ! sameNameProps
val ref = expectMsgType[ActorRef]
expectMsg("recovery-failure:blahonga 1 1")
failProbe.expectMsg("recovery-failure:blahonga 1 1")
watch(ref)
expectTerminated(ref)
}

View file

@ -1,7 +1,7 @@
package akka.persistence
import akka.actor.Status.Failure
import akka.actor.{ Actor, ActorRef, Props }
import akka.actor.{ Actor, ActorLogging, ActorRef, Props }
import akka.persistence.journal.SteppingInmemJournal
import akka.testkit.{ AkkaSpec, ImplicitSender, TestProbe }
import com.typesafe.config.ConfigFactory
@ -32,7 +32,7 @@ object PersistentActorRecoveryTimeoutSpec {
}
}
class TestReceiveTimeoutActor(receiveTimeout: FiniteDuration, probe: ActorRef) extends NamedPersistentActor("recovery-timeout-actor-2") {
class TestReceiveTimeoutActor(receiveTimeout: FiniteDuration, probe: ActorRef) extends NamedPersistentActor("recovery-timeout-actor-2") with ActorLogging {
override def preStart(): Unit = {
context.setReceiveTimeout(receiveTimeout)
@ -50,6 +50,7 @@ object PersistentActorRecoveryTimeoutSpec {
}
override protected def onRecoveryFailure(cause: Throwable, event: Option[Any]): Unit = {
log.error(cause, "Recovery of TestReceiveTimeoutActor failed")
probe ! Failure(cause)
}
}

View file

@ -1,236 +0,0 @@
/**
* Copyright (C) 2015-2016 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.javadsl;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import akka.NotUsed;
import org.junit.ClassRule;
import org.junit.Test;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
import akka.japi.Pair;
import akka.stream.*;
import akka.testkit.AkkaSpec;
import akka.stream.javadsl.GraphDSL.Builder;
import akka.japi.function.*;
import akka.util.ByteString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertArrayEquals;
import akka.testkit.AkkaJUnitActorSystemResource;
public class BidiFlowTest extends StreamTest {
public BidiFlowTest() {
super(actorSystemResource);
}
@ClassRule
public static AkkaJUnitActorSystemResource actorSystemResource = new AkkaJUnitActorSystemResource(
"FlowTest", AkkaSpec.testConf());
private final BidiFlow<Integer, Long, ByteString, String, NotUsed> bidi = BidiFlow
.fromGraph(GraphDSL.create(
new Function<GraphDSL.Builder<NotUsed>, BidiShape<Integer, Long, ByteString, String>>() {
@Override
public BidiShape<Integer, Long, ByteString, String> apply(Builder<NotUsed> b)
throws Exception {
final FlowShape<Integer, Long> top = b.add(Flow
.of(Integer.class).map(new Function<Integer, Long>() {
@Override
public Long apply(Integer arg) {
return (long) ((int) arg) + 2;
}
}));
final FlowShape<ByteString, String> bottom = b.add(Flow
.of(ByteString.class).map(new Function<ByteString, String>() {
@Override
public String apply(ByteString arg) {
return arg.decodeString("UTF-8");
}
}));
return new BidiShape<Integer, Long, ByteString, String>(top
.in(), top.out(), bottom.in(), bottom.out());
}
}));
private final BidiFlow<Long, Integer, String, ByteString, NotUsed> inverse = BidiFlow
.fromGraph(
GraphDSL.create(
new Function<GraphDSL.Builder<NotUsed>, BidiShape<Long, Integer, String, ByteString>>() {
@Override
public BidiShape<Long, Integer, String, ByteString> apply(Builder<NotUsed> b)
throws Exception {
final FlowShape<Long, Integer> top = b.add(Flow.of(Long.class)
.map(new Function<Long, Integer>() {
@Override
public Integer apply(Long arg) {
return (int) ((long) arg) + 2;
}
}));
final FlowShape<String, ByteString> bottom = b.add(Flow
.of(String.class).map(new Function<String, ByteString>() {
@Override
public ByteString apply(String arg) {
return ByteString.fromString(arg);
}
}));
return new BidiShape<Long, Integer, String, ByteString>(top
.in(), top.out(), bottom.in(), bottom.out());
}
}));
private final BidiFlow<Integer, Long, ByteString, String, CompletionStage<Integer>> bidiMat =
BidiFlow.fromGraph(
GraphDSL.create(
Sink.<Integer>head(),
(b, sink) -> {
b.from(b.add(Source.single(42))).to(sink);
final FlowShape<Integer, Long> top = b.add(Flow
.of(Integer.class).map(i -> (long)(i + 2)));
final FlowShape<ByteString, String> bottom = b.add(Flow
.of(ByteString.class).map(bytes -> bytes.decodeString("UTF-8")));
return new BidiShape<Integer, Long, ByteString, String>(top
.in(), top.out(), bottom.in(), bottom.out());
}
));
private final String str = "Hello World";
private final ByteString bytes = ByteString.fromString(str);
private final List<Integer> list = new ArrayList<Integer>();
{
list.add(1);
list.add(2);
list.add(3);
}
private final FiniteDuration oneSec = Duration.create(1, TimeUnit.SECONDS);
@Test
public void mustWorkInIsolation() throws Exception {
final Pair<CompletionStage<Long>, CompletionStage<String>> p =
RunnableGraph.fromGraph(GraphDSL
.create(Sink.<Long> head(), Sink.<String> head(),
Keep.both(),
(b, st, sb) -> {
final BidiShape<Integer, Long, ByteString, String> s =
b.add(bidi);
b.from(b.add(Source.single(1))).toInlet(s.in1());
b.from(s.out1()).to(st);
b.from(b.add(Source.single(bytes))).toInlet(s.in2());
b.from(s.out2()).to(sb);
return ClosedShape.getInstance();
})).run(materializer);
final Long rt = p.first().toCompletableFuture().get(1, TimeUnit.SECONDS);
final String rb = p.second().toCompletableFuture().get(1, TimeUnit.SECONDS);
assertEquals((Long) 3L, rt);
assertEquals(str, rb);
}
@Test
public void mustWorkAsAFlowThatIsOpenOnTheLeft() throws Exception {
final Flow<Integer, String, NotUsed> f = bidi.join(Flow.of(Long.class).map(
new Function<Long, ByteString>() {
@Override public ByteString apply(Long arg) {
return ByteString.fromString("Hello " + arg);
}
}));
final CompletionStage<List<String>> result = Source.from(list).via(f).limit(10).runWith(Sink.<String>seq(), materializer);
assertEquals(Arrays.asList("Hello 3", "Hello 4", "Hello 5"), result.toCompletableFuture().get(1, TimeUnit.SECONDS));
}
@Test
public void mustWorkAsAFlowThatIsOpenOnTheRight() throws Exception {
final Flow<ByteString, Long, NotUsed> f = Flow.of(String.class).map(
new Function<String, Integer>() {
@Override public Integer apply(String arg) {
return Integer.valueOf(arg);
}
}).join(bidi);
final List<ByteString> inputs = Arrays.asList(ByteString.fromString("1"), ByteString.fromString("2"));
final CompletionStage<List<Long>> result = Source.from(inputs).via(f).limit(10).runWith(Sink.<Long>seq(), materializer);
assertEquals(Arrays.asList(3L, 4L), result.toCompletableFuture().get(1, TimeUnit.SECONDS));
}
@Test
public void mustWorkWhenAtopItsInverse() throws Exception {
final Flow<Integer,String,NotUsed> f = bidi.atop(inverse).join(Flow.of(Integer.class).map(
new Function<Integer, String>() {
@Override public String apply(Integer arg) {
return arg.toString();
}
}));
final CompletionStage<List<String>> result = Source.from(list).via(f).limit(10).runWith(Sink.<String>seq(), materializer);
assertEquals(Arrays.asList("5", "6", "7"), result.toCompletableFuture().get(1, TimeUnit.SECONDS));
}
@Test
public void mustWorkWhenReversed() throws Exception {
final Flow<Integer,String,NotUsed> f = Flow.of(Integer.class).map(
new Function<Integer, String>() {
@Override public String apply(Integer arg) {
return arg.toString();
}
}).join(inverse.reversed()).join(bidi.reversed());
final CompletionStage<List<String>> result = Source.from(list).via(f).limit(10).runWith(Sink.<String>seq(), materializer);
assertEquals(Arrays.asList("5", "6", "7"), result.toCompletableFuture().get(1, TimeUnit.SECONDS));
}
@Test
public void mustMaterializeToItsValue() throws Exception {
final CompletionStage<Integer> f = RunnableGraph.fromGraph(
GraphDSL.create(bidiMat, (b, shape) -> {
final FlowShape<String, Integer> left = b.add(Flow.of(String.class).map(Integer::valueOf));
final FlowShape<Long, ByteString> right = b.add(Flow.of(Long.class).map(s -> ByteString.fromString("Hello " + s)));
b.from(shape.out2()).via(left).toInlet(shape.in1())
.from(shape.out1()).via(right).toInlet(shape.in2());
return ClosedShape.getInstance();
})).run(materializer);
assertEquals((Integer) 42, f.toCompletableFuture().get(1, TimeUnit.SECONDS));
}
@Test
public void mustCombineMaterializationValues() throws Exception {
final Flow<String, Integer, CompletionStage<Integer>> left = Flow.fromGraph(GraphDSL.create(
Sink.<Integer>head(), (b, sink) -> {
final UniformFanOutShape<Integer, Integer> bcast = b.add(Broadcast.<Integer>create(2));
final UniformFanInShape<Integer, Integer> merge = b.add(Merge.<Integer>create(2));
final FlowShape<String, Integer> flow = b.add(Flow.of(String.class).map(Integer::valueOf));
b.from(bcast).to(sink)
.from(b.add(Source.single(1))).viaFanOut(bcast).toFanIn(merge)
.from(flow).toFanIn(merge);
return new FlowShape<String, Integer>(flow.in(), merge.out());
}));
final Flow<Long, ByteString, CompletionStage<List<Long>>> right = Flow.fromGraph(GraphDSL.create(
Sink.<List<Long>>head(), (b, sink) -> {
final FlowShape<Long, List<Long>> flow = b.add(Flow.of(Long.class).grouped(10));
b.from(flow).to(sink);
return new FlowShape<Long, ByteString>(flow.in(), b.add(Source.single(ByteString.fromString("10"))).out());
}));
final Pair<Pair<CompletionStage<Integer>, CompletionStage<Integer>>, CompletionStage<List<Long>>> result =
left.joinMat(bidiMat, Keep.both()).joinMat(right, Keep.both()).run(materializer);
final CompletionStage<Integer> l = result.first().first();
final CompletionStage<Integer> m = result.first().second();
final CompletionStage<List<Long>> r = result.second();
assertEquals((Integer) 1, l.toCompletableFuture().get(1, TimeUnit.SECONDS));
assertEquals((Integer) 42, m.toCompletableFuture().get(1, TimeUnit.SECONDS));
final Long[] rr = r.toCompletableFuture().get(1, TimeUnit.SECONDS).toArray(new Long[2]);
Arrays.sort(rr);
assertArrayEquals(new Long[] { 3L, 12L }, rr);
}
public void mustSuitablyOverrideAttributeHandlingMethods() {
@SuppressWarnings("unused")
final BidiFlow<Integer, Long, ByteString, String, NotUsed> b =
bidi.withAttributes(Attributes.name("")).addAttributes(Attributes.asyncBoundary()).named("");
}
}

View file

@ -1,377 +0,0 @@
/**
* Copyright (C) 2014-2016 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.javadsl;
import akka.NotUsed;
import akka.japi.Pair;
import akka.pattern.PatternsCS;
import akka.japi.tuple.Tuple4;
import akka.stream.*;
import akka.stream.javadsl.GraphDSL.Builder;
import akka.stream.stage.*;
import akka.japi.function.*;
import akka.testkit.AkkaSpec;
import akka.testkit.JavaTestKit;
import akka.testkit.TestProbe;
import akka.testkit.AkkaJUnitActorSystemResource;
import org.junit.ClassRule;
import org.junit.Test;
import org.reactivestreams.Publisher;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
import java.util.*;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
public class GraphDSLTest extends StreamTest {
public GraphDSLTest() {
super(actorSystemResource);
}
@ClassRule
public static AkkaJUnitActorSystemResource actorSystemResource = new AkkaJUnitActorSystemResource("GraphDSLTest",
AkkaSpec.testConf());
@SuppressWarnings("serial")
public <T> Creator<Stage<T, T>> op() {
return new akka.japi.function.Creator<Stage<T, T>>() {
@Override
public PushPullStage<T, T> create() throws Exception {
return new PushPullStage<T, T>() {
@Override
public SyncDirective onPush(T element, Context<T> ctx) {
return ctx.push(element);
}
@Override
public SyncDirective onPull(Context<T> ctx) {
return ctx.pull();
}
};
}
};
}
@Test
public void mustBeAbleToUseMerge() throws Exception {
final Flow<String, String, NotUsed> f1 =
Flow.of(String.class).transform(GraphDSLTest.this.<String> op()).named("f1");
final Flow<String, String, NotUsed> f2 =
Flow.of(String.class).transform(GraphDSLTest.this.<String> op()).named("f2");
@SuppressWarnings("unused")
final Flow<String, String, NotUsed> f3 =
Flow.of(String.class).transform(GraphDSLTest.this.<String> op()).named("f3");
final Source<String, NotUsed> in1 = Source.from(Arrays.asList("a", "b", "c"));
final Source<String, NotUsed> in2 = Source.from(Arrays.asList("d", "e", "f"));
final Sink<String, Publisher<String>> publisher = Sink.asPublisher(AsPublisher.WITHOUT_FANOUT);
final Source<String, NotUsed> source = Source.fromGraph(
GraphDSL.create(new Function<GraphDSL.Builder<NotUsed>, SourceShape<String>>() {
@Override
public SourceShape<String> apply(Builder<NotUsed> b) throws Exception {
final UniformFanInShape<String, String> merge = b.add(Merge.<String>create(2));
b.from(b.add(in1)).via(b.add(f1)).toInlet(merge.in(0));
b.from(b.add(in2)).via(b.add(f2)).toInlet(merge.in(1));
return new SourceShape<String>(merge.out());
}
}));
// collecting
final Publisher<String> pub = source.runWith(publisher, materializer);
final CompletionStage<List<String>> all = Source.fromPublisher(pub).limit(100).runWith(Sink.<String>seq(), materializer);
final List<String> result = all.toCompletableFuture().get(3, TimeUnit.SECONDS);
assertEquals(new HashSet<Object>(Arrays.asList("a", "b", "c", "d", "e", "f")), new HashSet<String>(result));
}
@Test
public void mustBeAbleToUseZip() {
final JavaTestKit probe = new JavaTestKit(system);
final Iterable<String> input1 = Arrays.asList("A", "B", "C");
final Iterable<Integer> input2 = Arrays.asList(1, 2, 3);
RunnableGraph.fromGraph( GraphDSL.create(
new Function<Builder<NotUsed>,ClosedShape>() {
@Override
public ClosedShape apply(final Builder<NotUsed> b) throws Exception {
final Source<String, NotUsed> in1 = Source.from(input1);
final Source<Integer, NotUsed> in2 = Source.from(input2);
final FanInShape2<String, Integer, Pair<String,Integer>> zip = b.add(Zip.<String, Integer>create());
final Sink<Pair<String, Integer>, NotUsed> out = createSink(probe);
b.from(b.add(in1)).toInlet(zip.in0());
b.from(b.add(in2)).toInlet(zip.in1());
b.from(zip.out()).to(b.add(out));
return ClosedShape.getInstance();
}
})).run(materializer);
List<Object> output = Arrays.asList(probe.receiveN(3));
@SuppressWarnings("unchecked")
List<Pair<String, Integer>> expected = Arrays.asList(new Pair<String, Integer>("A", 1), new Pair<String, Integer>(
"B", 2), new Pair<String, Integer>("C", 3));
assertEquals(expected, output);
}
@Test
public void mustBeAbleToUseUnzip() {
final JavaTestKit probe1 = new JavaTestKit(system);
final JavaTestKit probe2 = new JavaTestKit(system);
@SuppressWarnings("unchecked")
final List<Pair<String, Integer>> input = Arrays.asList(new Pair<String, Integer>("A", 1),
new Pair<String, Integer>("B", 2), new Pair<String, Integer>("C", 3));
final Iterable<String> expected1 = Arrays.asList("A", "B", "C");
final Iterable<Integer> expected2 = Arrays.asList(1, 2, 3);
RunnableGraph.fromGraph(GraphDSL.create(
new Function<Builder<NotUsed>, ClosedShape>() {
@Override
public ClosedShape apply(final Builder<NotUsed> b) throws Exception {
final SourceShape<Pair<String, Integer>> in = b.add(Source.from(input));
final FanOutShape2<Pair<String, Integer>, String, Integer> unzip = b.add(Unzip.<String, Integer>create());
final SinkShape<String> out1 = b.add(GraphDSLTest.<String>createSink(probe1));
final SinkShape<Integer> out2 = b.add(GraphDSLTest.<Integer>createSink(probe2));
b.from(in).toInlet(unzip.in());
b.from(unzip.out0()).to(out1);
b.from(unzip.out1()).to(out2);
return ClosedShape.getInstance();
}
})).run(materializer);
List<Object> output1 = Arrays.asList(probe1.receiveN(3));
List<Object> output2 = Arrays.asList(probe2.receiveN(3));
assertEquals(expected1, output1);
assertEquals(expected2, output2);
}
private static <T> Sink<T, NotUsed> createSink(final JavaTestKit probe){
return Sink.actorRef(probe.getRef(), "onComplete");
}
@Test
public void mustBeAbleToUseUnzipWith() throws Exception {
final JavaTestKit probe1 = new JavaTestKit(system);
final JavaTestKit probe2 = new JavaTestKit(system);
RunnableGraph.fromGraph(GraphDSL.create(
new Function<Builder<NotUsed>, ClosedShape>() {
@Override
public ClosedShape apply(final Builder<NotUsed> b) throws Exception {
final Source<Integer, NotUsed> in = Source.single(1);
final FanOutShape2<Integer, String, Integer> unzip = b.add(UnzipWith.create(
new Function<Integer, Pair<String, Integer>>() {
@Override
public Pair<String, Integer> apply(Integer l) throws Exception {
return new Pair<String, Integer>(l + "!", l);
}
})
);
final SinkShape<String> out1 = b.add(GraphDSLTest.<String>createSink(probe1));
final SinkShape<Integer> out2 = b.add(GraphDSLTest.<Integer>createSink(probe2));
b.from(b.add(in)).toInlet(unzip.in());
b.from(unzip.out0()).to(out1);
b.from(unzip.out1()).to(out2);
return ClosedShape.getInstance();
}
}
)).run(materializer);
Duration d = Duration.create(3, TimeUnit.SECONDS);
Object output1 = probe1.receiveOne(d);
Object output2 = probe2.receiveOne(d);
assertEquals("1!", output1);
assertEquals(1, output2);
}
@Test
public void mustBeAbleToUseUnzip4With() throws Exception {
final JavaTestKit probe1 = new JavaTestKit(system);
final JavaTestKit probe2 = new JavaTestKit(system);
final JavaTestKit probe3 = new JavaTestKit(system);
final JavaTestKit probe4 = new JavaTestKit(system);
RunnableGraph.fromGraph(GraphDSL.create(
new Function<Builder<NotUsed>, ClosedShape>() {
@Override
public ClosedShape apply(final Builder<NotUsed> b) throws Exception {
final Source<Integer, NotUsed> in = Source.single(1);
final FanOutShape4<Integer, String, Integer, String, Integer> unzip = b.add(UnzipWith.create4(
new Function<Integer, Tuple4<String, Integer, String, Integer>>() {
@Override
public Tuple4<String, Integer, String, Integer> apply(Integer l) throws Exception {
return new Tuple4<String, Integer, String, Integer>(l.toString(), l, l + "+" + l, l + l);
}
})
);
final SinkShape<String> out1 = b.add(GraphDSLTest.<String>createSink(probe1));
final SinkShape<Integer> out2 = b.add(GraphDSLTest.<Integer>createSink(probe2));
final SinkShape<String> out3 = b.add(GraphDSLTest.<String>createSink(probe3));
final SinkShape<Integer> out4 = b.add(GraphDSLTest.<Integer>createSink(probe4));
b.from(b.add(in)).toInlet(unzip.in());
b.from(unzip.out0()).to(out1);
b.from(unzip.out1()).to(out2);
b.from(unzip.out2()).to(out3);
b.from(unzip.out3()).to(out4);
return ClosedShape.getInstance();
}
})).run(materializer);
Duration d = Duration.create(3, TimeUnit.SECONDS);
Object output1 = probe1.receiveOne(d);
Object output2 = probe2.receiveOne(d);
Object output3 = probe3.receiveOne(d);
Object output4 = probe4.receiveOne(d);
assertEquals("1", output1);
assertEquals(1, output2);
assertEquals("1+1", output3);
assertEquals(2, output4);
}
@Test
public void mustBeAbleToUseZipWith() throws Exception {
final Source<Integer, NotUsed> in1 = Source.single(1);
final Source<Integer, NotUsed> in2 = Source.single(10);
final Graph<FanInShape2<Integer, Integer, Integer>, NotUsed> sumZip = ZipWith.create(
new Function2<Integer, Integer, Integer>() {
@Override public Integer apply(Integer l, Integer r) throws Exception {
return l + r;
}
});
final CompletionStage<Integer> future = RunnableGraph.fromGraph(GraphDSL.create(Sink.<Integer>head(),
(b, out) -> {
final FanInShape2<Integer, Integer, Integer> zip = b.add(sumZip);
b.from(b.add(in1)).toInlet(zip.in0());
b.from(b.add(in2)).toInlet(zip.in1());
b.from(zip.out()).to(out);
return ClosedShape.getInstance();
})).run(materializer);
final Integer result = future.toCompletableFuture().get(3, TimeUnit.SECONDS);
assertEquals(11, (int) result);
}
@Test
public void mustBeAbleToUseZipN() throws Exception {
final Source<Integer, NotUsed> in1 = Source.single(1);
final Source<Integer, NotUsed> in2 = Source.single(10);
final Graph<UniformFanInShape<Integer, List<Integer>>, NotUsed> sumZip = ZipN.create(2);
final CompletionStage<List<Integer>> future = RunnableGraph.fromGraph(GraphDSL.create(Sink.<List<Integer>>head(),
(b, out) -> {
final UniformFanInShape<Integer, List<Integer>> zip = b.add(sumZip);
b.from(b.add(in1)).toInlet(zip.in(0));
b.from(b.add(in2)).toInlet(zip.in(1));
b.from(zip.out()).to(out);
return ClosedShape.getInstance();
})).run(materializer);
final List<Integer> result = future.toCompletableFuture().get(3, TimeUnit.SECONDS);
assertEquals(Arrays.asList(1, 10), result);
}
@Test
public void mustBeAbleToUseZipWithN() throws Exception {
final Source<Integer, NotUsed> in1 = Source.single(1);
final Source<Integer, NotUsed> in2 = Source.single(10);
final Graph<UniformFanInShape<Integer, Integer>, NotUsed> sumZip = ZipWithN.create(
new Function<List<Integer>, Integer>() {
@Override public Integer apply(List<Integer> list) throws Exception {
Integer sum = 0;
for(Integer i : list) {
sum += i;
}
return sum;
}
}, 2);
final CompletionStage<Integer> future = RunnableGraph.fromGraph(GraphDSL.create(Sink.<Integer>head(),
(b, out) -> {
final UniformFanInShape<Integer, Integer> zip = b.add(sumZip);
b.from(b.add(in1)).toInlet(zip.in(0));
b.from(b.add(in2)).toInlet(zip.in(1));
b.from(zip.out()).to(out);
return ClosedShape.getInstance();
})).run(materializer);
final Integer result = future.toCompletableFuture().get(3, TimeUnit.SECONDS);
assertEquals(11, (int) result);
}
@Test
public void mustBeAbleToUseZip4With() throws Exception {
final Source<Integer, NotUsed> in1 = Source.single(1);
final Source<Integer, NotUsed> in2 = Source.single(10);
final Source<Integer, NotUsed> in3 = Source.single(100);
final Source<Integer, NotUsed> in4 = Source.single(1000);
final Graph<FanInShape4<Integer, Integer, Integer, Integer, Integer>, NotUsed> sumZip = ZipWith.create4(
new Function4<Integer, Integer, Integer, Integer, Integer>() {
@Override public Integer apply(Integer i1, Integer i2, Integer i3, Integer i4) throws Exception {
return i1 + i2 + i3 + i4;
}
});
final CompletionStage<Integer> future = RunnableGraph.fromGraph(
GraphDSL.create(Sink.<Integer>head(), (b, out) -> {
final FanInShape4<Integer, Integer, Integer, Integer, Integer> zip = b.add(sumZip);
b.from(b.add(in1)).toInlet(zip.in0());
b.from(b.add(in2)).toInlet(zip.in1());
b.from(b.add(in3)).toInlet(zip.in2());
b.from(b.add(in4)).toInlet(zip.in3());
b.from(zip.out()).to(out);
return ClosedShape.getInstance();
})).run(materializer);
final Integer result = future.toCompletableFuture().get(3, TimeUnit.SECONDS);
assertEquals(1111, (int) result);
}
@Test
public void mustBeAbleToUseMatValue() throws Exception {
@SuppressWarnings("unused")
final Source<Integer, NotUsed> in1 = Source.single(1);
final TestProbe probe = TestProbe.apply(system);
final CompletionStage<Integer> future = RunnableGraph.fromGraph(
GraphDSL.create(Sink.<Integer> head(), (b, out) -> {
b.from(b.add(Source.single(1))).to(out);
b.from(b.materializedValue()).to(b.add(Sink.foreach(mat -> PatternsCS.pipe(mat, system.dispatcher()).to(probe.ref()))));
return ClosedShape.getInstance();
})).run(materializer);
final Integer result = future.toCompletableFuture().get(3, TimeUnit.SECONDS);
assertEquals(1, (int) result);
probe.expectMsg(1);
}
}

View file

@ -39,7 +39,7 @@ class DslConsistencySpec extends WordSpec with Matchers {
Set("create", "apply", "ops", "appendJava", "andThen", "andThenMat", "isIdentity", "withAttributes", "transformMaterializing") ++
Set("asScala", "asJava", "deprecatedAndThen", "deprecatedAndThenMat")
val graphHelpers = Set("zipGraph", "zipWithGraph", "mergeGraph", "mergeSortedGraph", "interleaveGraph", "concatGraph", "prependGraph", "alsoToGraph")
val graphHelpers = Set("zipGraph", "zipWithGraph", "mergeGraph", "mergeSortedGraph", "interleaveGraph", "concatGraph", "prependGraph", "alsoToGraph", "orElseGraph")
val allowMissing: Map[Class[_], Set[String]] = Map(
jFlowClass graphHelpers,
jSourceClass graphHelpers,

View file

@ -4,6 +4,7 @@
package akka.stream.io
import java.nio.file.{ Files, Path, StandardOpenOption }
import akka.actor.ActorSystem
import akka.stream.impl.ActorMaterializerImpl
import akka.stream.impl.StreamSupervisor
@ -15,6 +16,9 @@ import akka.stream.ActorMaterializer
import akka.stream.ActorMaterializerSettings
import akka.stream.ActorAttributes
import akka.util.{ ByteString, Timeout }
import com.google.common.jimfs.{ Configuration, Jimfs }
import org.scalatest.BeforeAndAfterAll
import scala.collection.mutable.ListBuffer
import scala.concurrent.Await
import scala.concurrent.duration._
@ -23,6 +27,7 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) {
val settings = ActorMaterializerSettings(system).withDispatcher("akka.actor.default-dispatcher")
implicit val materializer = ActorMaterializer(settings)
val fs = Jimfs.newFileSystem("FileSinkSpec", Configuration.unix())
val TestLines = {
val b = ListBuffer[String]()
@ -136,7 +141,7 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) {
}
private def targetFile(block: Path Unit, create: Boolean = true) {
val targetFile = Files.createTempFile("synchronous-file-sink", ".tmp")
val targetFile = Files.createTempFile(fs.getPath("/"), "synchronous-file-sink", ".tmp")
if (!create) Files.delete(targetFile)
try block(targetFile) finally Files.delete(targetFile)
}
@ -146,4 +151,8 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) {
new String(out) should ===(contents)
}
override def afterTermination(): Unit = {
fs.close()
}
}

View file

@ -3,9 +3,10 @@
*/
package akka.stream.io
import java.nio.file.Files
import java.nio.file.{ FileSystems, Files }
import java.nio.charset.StandardCharsets.UTF_8
import java.util.Random
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.ActorMaterializerSettings
@ -22,6 +23,8 @@ import akka.stream.testkit.scaladsl.TestSink
import akka.testkit.TestDuration
import akka.util.ByteString
import akka.util.Timeout
import com.google.common.jimfs.{ Configuration, Jimfs }
import scala.concurrent.Await
import scala.concurrent.duration._
@ -34,6 +37,8 @@ class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
val settings = ActorMaterializerSettings(system).withDispatcher("akka.actor.default-dispatcher")
implicit val materializer = ActorMaterializer(settings)
val fs = Jimfs.newFileSystem("FileSourceSpec", Configuration.unix())
val TestText = {
("a" * 1000) +
("b" * 1000) +
@ -44,14 +49,14 @@ class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
}
val testFile = {
val f = Files.createTempFile("file-source-spec", ".tmp")
val f = Files.createTempFile(fs.getPath("/"), "file-source-spec", ".tmp")
Files.newBufferedWriter(f, UTF_8).append(TestText).close()
f
}
val notExistingFile = {
// this way we make sure it doesn't accidentally exist
val f = Files.createTempFile("not-existing-file", ".tmp")
val f = Files.createTempFile(fs.getPath("/"), "not-existing-file", ".tmp")
Files.delete(f)
f
}
@ -59,7 +64,7 @@ class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
val LinesCount = 2000 + new Random().nextInt(300)
val manyLines = {
val f = Files.createTempFile(s"file-source-spec-lines_$LinesCount", "tmp")
val f = Files.createTempFile(fs.getPath("/"), s"file-source-spec-lines_$LinesCount", "tmp")
val w = Files.newBufferedWriter(f, UTF_8)
(1 to LinesCount).foreach { l
w.append("a" * l).append("\n")
@ -206,8 +211,7 @@ class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
}
override def afterTermination(): Unit = {
Files.delete(testFile)
Files.delete(manyLines)
fs.close()
}
}

View file

@ -0,0 +1,271 @@
/**
* Copyright (C) 2014-2016 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.scaladsl
import scala.util.control.NoStackTrace
import scala.concurrent.{ Await, Future }
import scala.concurrent.duration._
import akka.NotUsed
import akka.stream.ActorMaterializer
import akka.stream.ActorAttributes.supervisionStrategy
import akka.stream.Supervision.{ restartingDecider, resumingDecider }
import akka.stream.impl.ReactiveStreamsCompliance
import akka.testkit.{ AkkaSpec, TestLatch }
import akka.stream.testkit._, Utils._
import org.scalatest.concurrent.PatienceConfiguration.Timeout
class FlowFoldAsyncSpec extends StreamSpec {
implicit val materializer = ActorMaterializer()
implicit def ec = materializer.executionContext
val timeout = Timeout(3.seconds)
"A FoldAsync" must {
val input = 1 to 100
val expected = input.sum
val inputSource = Source(input)
val foldSource = inputSource.foldAsync[Int](0) { (a, b)
Future(a + b)
}
val flowDelayMS = 100L
val foldFlow = Flow[Int].foldAsync(0) {
(a, b) Future { Thread.sleep(flowDelayMS); a + b }
}
val foldSink = Sink.foldAsync[Int, Int](0) { (a, b) Future(a + b) }
"work when using Source.foldAsync" in assertAllStagesStopped {
foldSource.runWith(Sink.head).futureValue(timeout) should ===(expected)
}
"work when using Sink.foldAsync" in assertAllStagesStopped {
inputSource.runWith(foldSink).futureValue(timeout) should ===(expected)
}
"work when using Flow.foldAsync" in assertAllStagesStopped {
val flowTimeout =
Timeout((flowDelayMS * input.size).milliseconds + 3.seconds)
inputSource.via(foldFlow).runWith(Sink.head).
futureValue(flowTimeout) should ===(expected)
}
"work when using Source.foldAsync + Flow.foldAsync + Sink.foldAsync" in assertAllStagesStopped {
foldSource.via(foldFlow).runWith(foldSink).
futureValue(timeout) should ===(expected)
}
"propagate an error" in assertAllStagesStopped {
val error = new Exception with NoStackTrace
val future = inputSource.map(x if (x > 50) throw error else x).runFoldAsync[NotUsed](NotUsed)(noneAsync)
the[Exception] thrownBy Await.result(future, 3.seconds) should be(error)
}
"complete future with failure when folding function throws" in assertAllStagesStopped {
val error = new Exception with NoStackTrace
val future = inputSource.runFoldAsync(0) { (x, y)
if (x > 50) Future.failed(error) else Future(x + y)
}
the[Exception] thrownBy Await.result(future, 3.seconds) should be(error)
}
"not blow up with high request counts" in {
val probe = TestSubscriber.manualProbe[Long]()
var i = 0
Source.fromIterator(() Iterator.fill[Int](10000) { i += 1; i }).
foldAsync(1L) { (a, b) Future(a + b) }.
runWith(Sink.asPublisher(true)).subscribe(probe)
val subscription = probe.expectSubscription()
subscription.request(Int.MaxValue)
probe.expectNext(50005001L)
probe.expectComplete()
}
"signal future failure" in assertAllStagesStopped {
val probe = TestSubscriber.probe[Int]()
implicit val ec = system.dispatcher
Source(1 to 5).foldAsync(0) { (_, n)
Future(if (n == 3) throw TE("err1") else n)
}.to(Sink.fromSubscriber(probe)).run()
val sub = probe.expectSubscription()
sub.request(10)
probe.expectError().getMessage should be("err1")
}
"signal error from foldAsync" in assertAllStagesStopped {
val latch = TestLatch(1)
val c = TestSubscriber.manualProbe[Int]()
implicit val ec = system.dispatcher
val p = Source(1 to 5).mapAsync(4)(n
if (n == 3) throw new RuntimeException("err2") with NoStackTrace
else {
Future {
Await.ready(latch, 10.seconds)
n
}
}).
to(Sink.fromSubscriber(c)).run()
val sub = c.expectSubscription()
sub.request(10)
c.expectError().getMessage should be("err2")
latch.countDown()
}
"resume after future failure" in assertAllStagesStopped {
val probe = TestSubscriber.probe[(Int, Int)]()
implicit val ec = system.dispatcher
Source(1 to 5).foldAsync(0 1) {
case ((i, res), n)
Future {
if (n == 3) throw new RuntimeException("err3") with NoStackTrace
else n (i + (res * n))
}
}.withAttributes(supervisionStrategy(resumingDecider)).
to(Sink.fromSubscriber(probe)).run()
val sub = probe.expectSubscription()
sub.request(10)
probe.expectNext(5 74)
probe.expectComplete()
}
"restart after future failure" in assertAllStagesStopped {
val probe = TestSubscriber.probe[(Int, Int)]()
implicit val ec = system.dispatcher
Source(1 to 5).foldAsync(0 1) {
case ((i, res), n)
Future {
if (n == 3) throw new RuntimeException("err3") with NoStackTrace
else n (i + (res * n))
}
}.withAttributes(supervisionStrategy(restartingDecider)).
to(Sink.fromSubscriber(probe)).run()
val sub = probe.expectSubscription()
sub.request(10)
probe.expectNext(5 24)
probe.expectComplete()
}
"resume after multiple failures" in assertAllStagesStopped {
val futures: List[Future[String]] = List(
Future.failed(Utils.TE("failure1")),
Future.failed(Utils.TE("failure2")),
Future.failed(Utils.TE("failure3")),
Future.failed(Utils.TE("failure4")),
Future.failed(Utils.TE("failure5")),
Future.successful("happy!"))
Source(futures).mapAsync(2)(identity).
withAttributes(supervisionStrategy(resumingDecider)).runWith(Sink.head).
futureValue(timeout) should ===("happy!")
}
"finish after future failure" in assertAllStagesStopped {
Source(1 to 3).foldAsync(1) { (_, n)
Future {
if (n == 3) throw new RuntimeException("err3b") with NoStackTrace
else n
}
}.withAttributes(supervisionStrategy(resumingDecider))
.grouped(10).runWith(Sink.head).
futureValue(Timeout(1.second)) should ===(Seq(2))
}
"resume when foldAsync throws" in {
val c = TestSubscriber.manualProbe[(Int, Int)]()
implicit val ec = system.dispatcher
val p = Source(1 to 5).foldAsync(0 1) {
case ((i, res), n)
if (n == 3) throw new RuntimeException("err4") with NoStackTrace
else Future(n (i + (res * n)))
}.withAttributes(supervisionStrategy(resumingDecider)).
to(Sink.fromSubscriber(c)).run()
val sub = c.expectSubscription()
sub.request(10)
c.expectNext(5 74)
c.expectComplete()
}
"restart when foldAsync throws" in {
val c = TestSubscriber.manualProbe[(Int, Int)]()
implicit val ec = system.dispatcher
val p = Source(1 to 5).foldAsync(0 1) {
case ((i, res), n)
if (n == 3) throw new RuntimeException("err4") with NoStackTrace
else Future(n (i + (res * n)))
}.withAttributes(supervisionStrategy(restartingDecider)).
to(Sink.fromSubscriber(c)).run()
val sub = c.expectSubscription()
sub.request(10)
c.expectNext(5 24)
c.expectComplete()
}
"signal NPE when future is completed with null" in {
val c = TestSubscriber.manualProbe[String]()
val p = Source(List("a", "b")).foldAsync("") { (_, elem)
Future.successful(null.asInstanceOf[String])
}.to(Sink.fromSubscriber(c)).run()
val sub = c.expectSubscription()
sub.request(10)
c.expectError().getMessage should be(ReactiveStreamsCompliance.ElementMustNotBeNullMsg)
}
"resume when future is completed with null" in {
val c = TestSubscriber.manualProbe[String]()
val p = Source(List("a", "b", "c")).foldAsync("") { (str, elem)
if (elem == "b") Future.successful(null.asInstanceOf[String])
else Future.successful(str + elem)
}.withAttributes(supervisionStrategy(resumingDecider)).
to(Sink.fromSubscriber(c)).run()
val sub = c.expectSubscription()
sub.request(10)
c.expectNext("ac") // 1: "" + "a"; 2: null => resume "a"; 3: "a" + "c"
c.expectComplete()
}
"restart when future is completed with null" in {
val c = TestSubscriber.manualProbe[String]()
val p = Source(List("a", "b", "c")).foldAsync("") { (str, elem)
if (elem == "b") Future.successful(null.asInstanceOf[String])
else Future.successful(str + elem)
}.withAttributes(supervisionStrategy(restartingDecider)).
to(Sink.fromSubscriber(c)).run()
val sub = c.expectSubscription()
sub.request(10)
c.expectNext("c") // 1: "" + "a"; 2: null => restart ""; 3: "" + "c"
c.expectComplete()
}
"should handle cancel properly" in assertAllStagesStopped {
val pub = TestPublisher.manualProbe[Int]()
val sub = TestSubscriber.manualProbe[Int]()
Source.fromPublisher(pub).
foldAsync(0) { (_, n) Future.successful(n) }.
runWith(Sink.fromSubscriber(sub))
val upstream = pub.expectSubscription()
upstream.expectRequest()
sub.expectSubscription().cancel()
upstream.expectCancellation()
}
}
// Keep
def noneAsync[L, R]: (L, R) Future[NotUsed] = { (_: Any, _: Any)
Future.successful(NotUsed)
}.asInstanceOf[(L, R) Future[NotUsed]]
}

View file

@ -19,9 +19,9 @@ class FlowFoldSpec extends StreamSpec {
"A Fold" must {
val input = 1 to 100
val expected = input.sum
val inputSource = Source(input).filter(_ true).map(identity)
val foldSource = inputSource.fold[Int](0)(_ + _).filter(_ true).map(identity)
val foldFlow = Flow[Int].filter(_ true).map(identity).fold(0)(_ + _).filter(_ true).map(identity)
val inputSource = Source(input)
val foldSource = inputSource.fold[Int](0)(_ + _)
val foldFlow = Flow[Int].fold(0)(_ + _)
val foldSink = Sink.fold[Int, Int](0)(_ + _)
"work when using Source.runFold" in assertAllStagesStopped {

View file

@ -48,10 +48,12 @@ class FlowMapAsyncSpec extends StreamSpec {
"produce future elements in order" in {
val c = TestSubscriber.manualProbe[Int]()
implicit val ec = system.dispatcher
val p = Source(1 to 50).mapAsync(4)(n Future {
Thread.sleep(ThreadLocalRandom.current().nextInt(1, 10))
n
}).to(Sink.fromSubscriber(c)).run()
val p = Source(1 to 50).mapAsync(4)(n
if (n % 3 == 0) Future.successful(n)
else Future {
Thread.sleep(ThreadLocalRandom.current().nextInt(1, 10))
n
}).to(Sink.fromSubscriber(c)).run()
val sub = c.expectSubscription()
sub.request(1000)
for (n 1 to 50) c.expectNext(n)
@ -99,6 +101,27 @@ class FlowMapAsyncSpec extends StreamSpec {
latch.countDown()
}
"signal future failure asap" in assertAllStagesStopped {
val latch = TestLatch(1)
val done = Source(1 to 5)
.map { n
if (n == 1) n
else {
// slow upstream should not block the error
Await.ready(latch, 10.seconds)
n
}
}
.mapAsync(4) { n
if (n == 1) Future.failed(new RuntimeException("err1") with NoStackTrace)
else Future.successful(n)
}.runWith(Sink.ignore)
intercept[RuntimeException] {
Await.result(done, remainingOrDefault)
}.getMessage should be("err1")
latch.countDown()
}
"signal error from mapAsync" in assertAllStagesStopped {
val latch = TestLatch(1)
val c = TestSubscriber.manualProbe[Int]()

View file

@ -55,10 +55,15 @@ class FlowMapAsyncUnorderedSpec extends StreamSpec {
val probe = TestProbe()
val c = TestSubscriber.manualProbe[Int]()
implicit val ec = system.dispatcher
val p = Source(1 to 20).mapAsyncUnordered(4)(n Future {
probe.ref ! n
n
}).to(Sink.fromSubscriber(c)).run()
val p = Source(1 to 20).mapAsyncUnordered(4)(n
if (n % 3 == 0) {
probe.ref ! n
Future.successful(n)
} else
Future {
probe.ref ! n
n
}).to(Sink.fromSubscriber(c)).run()
val sub = c.expectSubscription()
c.expectNoMsg(200.millis)
probe.expectNoMsg(Duration.Zero)
@ -93,6 +98,27 @@ class FlowMapAsyncUnorderedSpec extends StreamSpec {
latch.countDown()
}
"signal future failure asap" in assertAllStagesStopped {
val latch = TestLatch(1)
val done = Source(1 to 5)
.map { n
if (n == 1) n
else {
// slow upstream should not block the error
Await.ready(latch, 10.seconds)
n
}
}
.mapAsyncUnordered(4) { n
if (n == 1) Future.failed(new RuntimeException("err1") with NoStackTrace)
else Future.successful(n)
}.runWith(Sink.ignore)
intercept[RuntimeException] {
Await.result(done, remainingOrDefault)
}.getMessage should be("err1")
latch.countDown()
}
"signal error from mapAsyncUnordered" in assertAllStagesStopped {
val latch = TestLatch(1)
val c = TestSubscriber.manualProbe[Int]()

View file

@ -0,0 +1,151 @@
/**
* Copyright (C) 2016 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.scaladsl
import akka.stream.testkit.Utils.TE
import akka.stream.testkit.{ TestPublisher, TestSubscriber }
import akka.stream.{ ActorMaterializer, ActorMaterializerSettings }
import akka.testkit.AkkaSpec
import scala.collection.immutable.Seq
class FlowOrElseSpec extends AkkaSpec {
val settings = ActorMaterializerSettings(system)
implicit val materializer = ActorMaterializer(settings)
"An OrElse flow" should {
"pass elements from the first input" in {
val source1 = Source(Seq(1, 2, 3))
val source2 = Source(Seq(4, 5, 6))
val sink = Sink.seq[Int]
source1.orElse(source2).runWith(sink).futureValue shouldEqual Seq(1, 2, 3)
}
"pass elements from the second input if the first completes with no elements emitted" in {
val source1 = Source.empty[Int]
val source2 = Source(Seq(4, 5, 6))
val sink = Sink.seq[Int]
source1.orElse(source2).runWith(sink).futureValue shouldEqual Seq(4, 5, 6)
}
"pass elements from input one through and cancel input 2" in new OrElseProbedFlow {
outProbe.request(1)
inProbe1.expectRequest()
inProbe1.sendNext('a')
outProbe.expectNext('a')
inProbe1.sendComplete()
inProbe2.expectCancellation()
outProbe.expectComplete()
}
"pass elements from input two when input 1 has completed without elements" in new OrElseProbedFlow {
outProbe.request(1)
inProbe1.sendComplete()
inProbe2.expectRequest()
inProbe2.sendNext('a')
outProbe.expectNext('a')
inProbe2.sendComplete()
outProbe.expectComplete()
}
"pass elements from input two when input 1 has completed without elements (lazyEmpty)" in {
val inProbe1 = TestPublisher.lazyEmpty[Char]
val source1 = Source.fromPublisher(inProbe1)
val inProbe2 = TestPublisher.probe[Char]()
val source2 = Source.fromPublisher(inProbe2)
val outProbe = TestSubscriber.probe[Char]()
val sink = Sink.fromSubscriber(outProbe)
source1.orElse(source2).runWith(sink)
outProbe.request(1)
inProbe2.expectRequest()
inProbe2.sendNext('a')
outProbe.expectNext('a')
inProbe2.sendComplete()
outProbe.expectComplete()
}
"pass all available requested elements from input two when input 1 has completed without elements" in new OrElseProbedFlow {
outProbe.request(5)
inProbe1.sendComplete()
inProbe2.expectRequest()
inProbe2.sendNext('a')
outProbe.expectNext('a')
inProbe2.sendNext('b')
outProbe.expectNext('b')
inProbe2.sendNext('c')
outProbe.expectNext('c')
inProbe2.sendComplete()
outProbe.expectComplete()
}
"complete when both inputs completes without emitting elements" in new OrElseProbedFlow {
outProbe.ensureSubscription()
inProbe1.sendComplete()
inProbe2.sendComplete()
outProbe.expectComplete()
}
"complete when both inputs completes without emitting elements, regardless of order" in new OrElseProbedFlow {
outProbe.ensureSubscription()
inProbe2.sendComplete()
outProbe.expectNoMsg() // make sure it did not complete here
inProbe1.sendComplete()
outProbe.expectComplete()
}
"continue passing primary through when secondary completes" in new OrElseProbedFlow {
outProbe.ensureSubscription()
outProbe.request(1)
inProbe2.sendComplete()
inProbe1.expectRequest()
inProbe1.sendNext('a')
outProbe.expectNext('a')
inProbe1.sendComplete()
outProbe.expectComplete()
}
"fail when input 1 fails" in new OrElseProbedFlow {
outProbe.ensureSubscription()
inProbe1.sendError(TE("in1 failed"))
inProbe2.expectCancellation()
outProbe.expectError()
}
"fail when input 2 fails" in new OrElseProbedFlow {
outProbe.ensureSubscription()
inProbe2.sendError(TE("in2 failed"))
inProbe1.expectCancellation()
outProbe.expectError()
}
trait OrElseProbedFlow {
val inProbe1 = TestPublisher.probe[Char]()
val source1 = Source.fromPublisher(inProbe1)
val inProbe2 = TestPublisher.probe[Char]()
val source2 = Source.fromPublisher(inProbe2)
val outProbe = TestSubscriber.probe[Char]()
val sink = Sink.fromSubscriber(outProbe)
source1.orElse(source2).runWith(sink)
}
}
}

View file

@ -0,0 +1,351 @@
/**
* Copyright (C) 2015-2016 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.scaladsl
import akka.stream.{ ActorMaterializer, KillSwitches, ThrottleMode }
import akka.stream.testkit.{ StreamSpec, TestPublisher, TestSubscriber }
import akka.stream.testkit.Utils.{ TE, assertAllStagesStopped }
import akka.testkit.EventFilter
import scala.collection.immutable
import scala.concurrent.Await
import scala.concurrent.duration._
class HubSpec extends StreamSpec {
implicit val mat = ActorMaterializer()
"MergeHub" must {
"work in the happy case" in assertAllStagesStopped {
val (sink, result) = MergeHub.source[Int](16).take(20).toMat(Sink.seq)(Keep.both).run()
Source(1 to 10).runWith(sink)
Source(11 to 20).runWith(sink)
result.futureValue.sorted should ===(1 to 20)
}
"notify new producers if consumer cancels before first producer" in assertAllStagesStopped {
val sink = Sink.cancelled[Int].runWith(MergeHub.source[Int](16))
val upstream = TestPublisher.probe[Int]()
Source.fromPublisher(upstream).runWith(sink)
upstream.expectCancellation()
}
"notify existing producers if consumer cancels after a few elements" in assertAllStagesStopped {
val (sink, result) = MergeHub.source[Int](16).take(5).toMat(Sink.seq)(Keep.both).run()
val upstream = TestPublisher.probe[Int]()
Source.fromPublisher(upstream).runWith(sink)
for (i 1 to 5) upstream.sendNext(i)
upstream.expectCancellation()
result.futureValue.sorted should ===(1 to 5)
}
"notify new producers if consumer cancels after a few elements" in assertAllStagesStopped {
val (sink, result) = MergeHub.source[Int](16).take(5).toMat(Sink.seq)(Keep.both).run()
val upstream1 = TestPublisher.probe[Int]()
val upstream2 = TestPublisher.probe[Int]()
Source.fromPublisher(upstream1).runWith(sink)
for (i 1 to 5) upstream1.sendNext(i)
upstream1.expectCancellation()
result.futureValue.sorted should ===(1 to 5)
Source.fromPublisher(upstream2).runWith(sink)
upstream2.expectCancellation()
}
"respect buffer size" in assertAllStagesStopped {
val downstream = TestSubscriber.manualProbe[Int]()
val sink = Sink.fromSubscriber(downstream).runWith(MergeHub.source[Int](3))
Source(1 to 10).map { i testActor ! i; i }.runWith(sink)
val sub = downstream.expectSubscription()
sub.request(1)
// Demand starts from 3
expectMsg(1)
expectMsg(2)
expectMsg(3)
expectNoMsg(100.millis)
// One element consumed (it was requested), demand 0 remains at producer
downstream.expectNext(1)
// Requesting next element, results in next element to be consumed.
sub.request(1)
downstream.expectNext(2)
// Two elements have been consumed, so threshold of 2 is reached, additional 2 demand is dispatched.
// There is 2 demand at the producer now
expectMsg(4)
expectMsg(5)
expectNoMsg(100.millis)
// Two additional elements have been sent:
// - 3, 4, 5 are pending
// - demand is 0 at the producer
// - next demand batch is after two elements have been consumed again
// Requesting next gives the next element
// Demand is not yet refreshed for the producer as there is one more element until threshold is met
sub.request(1)
downstream.expectNext(3)
expectNoMsg(100.millis)
sub.request(1)
downstream.expectNext(4)
expectMsg(6)
expectMsg(7)
sub.cancel()
}
"work with long streams" in assertAllStagesStopped {
val (sink, result) = MergeHub.source[Int](16).take(20000).toMat(Sink.seq)(Keep.both).run()
Source(1 to 10000).runWith(sink)
Source(10001 to 20000).runWith(sink)
result.futureValue.sorted should ===(1 to 20000)
}
"work with long streams when buffer size is 1" in assertAllStagesStopped {
val (sink, result) = MergeHub.source[Int](1).take(20000).toMat(Sink.seq)(Keep.both).run()
Source(1 to 10000).runWith(sink)
Source(10001 to 20000).runWith(sink)
result.futureValue.sorted should ===(1 to 20000)
}
"work with long streams when consumer is slower" in assertAllStagesStopped {
val (sink, result) =
MergeHub.source[Int](16)
.take(2000)
.throttle(10, 1.millisecond, 200, ThrottleMode.shaping)
.toMat(Sink.seq)(Keep.both)
.run()
Source(1 to 1000).runWith(sink)
Source(1001 to 2000).runWith(sink)
result.futureValue.sorted should ===(1 to 2000)
}
"work with long streams if one of the producers is slower" in assertAllStagesStopped {
val (sink, result) =
MergeHub.source[Int](16)
.take(2000)
.toMat(Sink.seq)(Keep.both)
.run()
Source(1 to 1000).throttle(10, 1.millisecond, 100, ThrottleMode.shaping).runWith(sink)
Source(1001 to 2000).runWith(sink)
result.futureValue.sorted should ===(1 to 2000)
}
"work with different producers separated over time" in assertAllStagesStopped {
val downstream = TestSubscriber.probe[immutable.Seq[Int]]()
val sink = MergeHub.source[Int](16).grouped(100).toMat(Sink.fromSubscriber(downstream))(Keep.left).run()
Source(1 to 100).runWith(sink)
downstream.requestNext() should ===(1 to 100)
Source(101 to 200).runWith(sink)
downstream.requestNext() should ===(101 to 200)
downstream.cancel()
}
"keep working even if one of the producers fail" in assertAllStagesStopped {
val (sink, result) = MergeHub.source[Int](16).take(10).toMat(Sink.seq)(Keep.both).run()
EventFilter.error("Upstream producer failed with exception").intercept {
Source.failed(TE("faling")).runWith(sink)
Source(1 to 10).runWith(sink)
}
result.futureValue.sorted should ===(1 to 10)
}
}
"BroadcastHub" must {
"work in the happy case" in assertAllStagesStopped {
val source = Source(1 to 10).runWith(BroadcastHub.sink(8))
source.runWith(Sink.seq).futureValue should ===(1 to 10)
}
"send the same elements to consumers attaching around the same time" in assertAllStagesStopped {
val (firstElem, source) = Source.maybe[Int].concat(Source(2 to 10)).toMat(BroadcastHub.sink(8))(Keep.both).run()
val f1 = source.runWith(Sink.seq)
val f2 = source.runWith(Sink.seq)
// Ensure subscription of Sinks. This is racy but there is no event we can hook into here.
Thread.sleep(100)
firstElem.success(Some(1))
f1.futureValue should ===(1 to 10)
f2.futureValue should ===(1 to 10)
}
"send the same prefix to consumers attaching around the same time if one cancels earlier" in assertAllStagesStopped {
val (firstElem, source) = Source.maybe[Int].concat(Source(2 to 20)).toMat(BroadcastHub.sink(8))(Keep.both).run()
val f1 = source.runWith(Sink.seq)
val f2 = source.take(10).runWith(Sink.seq)
// Ensure subscription of Sinks. This is racy but there is no event we can hook into here.
Thread.sleep(100)
firstElem.success(Some(1))
f1.futureValue should ===(1 to 20)
f2.futureValue should ===(1 to 10)
}
"ensure that subsequent consumers see subsequent elements without gap" in assertAllStagesStopped {
val source = Source(1 to 20).runWith(BroadcastHub.sink(8))
source.take(10).runWith(Sink.seq).futureValue should ===(1 to 10)
source.take(10).runWith(Sink.seq).futureValue should ===(11 to 20)
}
"send the same elements to consumers of different speed attaching around the same time" in assertAllStagesStopped {
val (firstElem, source) = Source.maybe[Int].concat(Source(2 to 10)).toMat(BroadcastHub.sink(8))(Keep.both).run()
val f1 = source.throttle(1, 10.millis, 3, ThrottleMode.shaping).runWith(Sink.seq)
val f2 = source.runWith(Sink.seq)
// Ensure subscription of Sinks. This is racy but there is no event we can hook into here.
Thread.sleep(100)
firstElem.success(Some(1))
f1.futureValue should ===(1 to 10)
f2.futureValue should ===(1 to 10)
}
"send the same elements to consumers of attaching around the same time if the producer is slow" in assertAllStagesStopped {
val (firstElem, source) = Source.maybe[Int].concat(Source(2 to 10))
.throttle(1, 10.millis, 3, ThrottleMode.shaping)
.toMat(BroadcastHub.sink(8))(Keep.both).run()
val f1 = source.runWith(Sink.seq)
val f2 = source.runWith(Sink.seq)
// Ensure subscription of Sinks. This is racy but there is no event we can hook into here.
Thread.sleep(100)
firstElem.success(Some(1))
f1.futureValue should ===(1 to 10)
f2.futureValue should ===(1 to 10)
}
"ensure that from two different speed consumers the slower controls the rate" in assertAllStagesStopped {
val (firstElem, source) = Source.maybe[Int].concat(Source(2 to 20)).toMat(BroadcastHub.sink(1))(Keep.both).run()
val f1 = source.throttle(1, 10.millis, 1, ThrottleMode.shaping).runWith(Sink.seq)
// Second cannot be overwhelmed since the first one throttles the overall rate, and second allows a higher rate
val f2 = source.throttle(10, 10.millis, 8, ThrottleMode.enforcing).runWith(Sink.seq)
// Ensure subscription of Sinks. This is racy but there is no event we can hook into here.
Thread.sleep(100)
firstElem.success(Some(1))
f1.futureValue should ===(1 to 20)
f2.futureValue should ===(1 to 20)
}
"send the same elements to consumers attaching around the same time with a buffer size of one" in assertAllStagesStopped {
val (firstElem, source) = Source.maybe[Int].concat(Source(2 to 10)).toMat(BroadcastHub.sink(1))(Keep.both).run()
val f1 = source.runWith(Sink.seq)
val f2 = source.runWith(Sink.seq)
// Ensure subscription of Sinks. This is racy but there is no event we can hook into here.
Thread.sleep(100)
firstElem.success(Some(1))
f1.futureValue should ===(1 to 10)
f2.futureValue should ===(1 to 10)
}
"be able to implement a keep-dropping-if-unsubscribed policy with a simple Sink.ignore" in assertAllStagesStopped {
val killSwitch = KillSwitches.shared("test-switch")
val source = Source.fromIterator(() Iterator.from(0)).via(killSwitch.flow).runWith(BroadcastHub.sink(8))
// Now the Hub "drops" elements until we attach a new consumer (Source.ignore consumes as fast as possible)
source.runWith(Sink.ignore)
// Now we attached a subscriber which will block the Sink.ignore to "take away" and drop elements anymore,
// turning the BroadcastHub to a normal non-dropping mode
val downstream = TestSubscriber.probe[Int]()
source.runWith(Sink.fromSubscriber(downstream))
downstream.request(1)
val first = downstream.expectNext()
for (i (first + 1) to (first + 10)) {
downstream.request(1)
downstream.expectNext(i)
}
downstream.cancel()
killSwitch.shutdown()
}
"properly signal error to consumers" in assertAllStagesStopped {
val upstream = TestPublisher.probe[Int]()
val source = Source.fromPublisher(upstream).runWith(BroadcastHub.sink(8))
val downstream1 = TestSubscriber.probe[Int]()
val downstream2 = TestSubscriber.probe[Int]()
source.runWith(Sink.fromSubscriber(downstream1))
source.runWith(Sink.fromSubscriber(downstream2))
downstream1.request(4)
downstream2.request(8)
(1 to 8) foreach (upstream.sendNext(_))
downstream1.expectNext(1, 2, 3, 4)
downstream2.expectNext(1, 2, 3, 4, 5, 6, 7, 8)
downstream1.expectNoMsg(100.millis)
downstream2.expectNoMsg(100.millis)
upstream.sendError(TE("Failed"))
downstream1.expectError(TE("Failed"))
downstream2.expectError(TE("Failed"))
}
"properly singal completion to consumers arriving after producer finished" in assertAllStagesStopped {
val source = Source.empty[Int].runWith(BroadcastHub.sink(8))
// Wait enough so the Hub gets the completion. This is racy, but this is fine because both
// cases should work in the end
Thread.sleep(10)
source.runWith(Sink.seq).futureValue should ===(Nil)
}
"properly singal error to consumers arriving after producer finished" in assertAllStagesStopped {
val source = Source.failed(TE("Fail!")).runWith(BroadcastHub.sink(8))
// Wait enough so the Hub gets the completion. This is racy, but this is fine because both
// cases should work in the end
Thread.sleep(10)
a[TE] shouldBe thrownBy {
Await.result(source.runWith(Sink.seq), 3.seconds)
}
}
}
}

View file

@ -11,7 +11,6 @@ import java.util.stream.{ Collector, Collectors }
import akka.stream._
import akka.stream.testkit.Utils._
import akka.stream.testkit._
import org.scalactic.ConversionCheckedTripleEquals
import akka.testkit.DefaultTimeout
import org.scalatest.concurrent.ScalaFutures
import scala.concurrent.{ Await, Future }
@ -139,7 +138,6 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures {
}
"Java collector Sink" must {
import scala.compat.java8.FunctionConverters._
class TestCollector(
_supplier: () Supplier[Array[Int]],

View file

@ -24,7 +24,7 @@ private[akka] class ActorRefBackpressureSinkStage[In](ref: ActorRef, onInitMessa
override val shape: SinkShape[In] = SinkShape(in)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
new GraphStageLogic(shape) {
new GraphStageLogic(shape) with InHandler {
implicit def self: ActorRef = stageActor.ref
val maxBuffer = inheritedAttributes.getAttribute(classOf[InputBuffer], InputBuffer(16, 16)).max
@ -67,24 +67,26 @@ private[akka] class ActorRefBackpressureSinkStage[In](ref: ActorRef, onInitMessa
completeStage()
}
setHandler(in, new InHandler {
override def onPush(): Unit = {
buffer offer grab(in)
if (acknowledgementReceived) {
dequeueAndSend()
acknowledgementReceived = false
}
if (buffer.size() < maxBuffer) pull(in)
def onPush(): Unit = {
buffer offer grab(in)
if (acknowledgementReceived) {
dequeueAndSend()
acknowledgementReceived = false
}
override def onUpstreamFinish(): Unit = {
if (buffer.isEmpty) finish()
else completeReceived = true
}
override def onUpstreamFailure(ex: Throwable): Unit = {
ref ! onFailureMessage(ex)
failStage(ex)
}
})
if (buffer.size() < maxBuffer) pull(in)
}
override def onUpstreamFinish(): Unit = {
if (buffer.isEmpty) finish()
else completeReceived = true
}
override def onUpstreamFailure(ex: Throwable): Unit = {
ref ! onFailureMessage(ex)
failStage(ex)
}
setHandler(in, this)
}
override def toString = "ActorRefBackpressureSink"

View file

@ -205,29 +205,30 @@ final class LastOptionStage[T] extends GraphStageWithMaterializedValue[SinkShape
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = {
val p: Promise[Option[T]] = Promise()
(new GraphStageLogic(shape) {
(new GraphStageLogic(shape) with InHandler {
private[this] var prev: T = null.asInstanceOf[T]
override def preStart(): Unit = pull(in)
setHandler(in, new InHandler {
private[this] var prev: T = null.asInstanceOf[T]
override def onPush(): Unit = {
prev = grab(in)
pull(in)
}
def onPush(): Unit = {
prev = grab(in)
pull(in)
}
override def onUpstreamFinish(): Unit = {
val head = prev
prev = null.asInstanceOf[T]
p.trySuccess(Option(head))
completeStage()
}
override def onUpstreamFinish(): Unit = {
val head = prev
prev = null.asInstanceOf[T]
p.trySuccess(Option(head))
completeStage()
}
override def onUpstreamFailure(ex: Throwable): Unit = {
prev = null.asInstanceOf[T]
p.tryFailure(ex)
failStage(ex)
}
})
override def onUpstreamFailure(ex: Throwable): Unit = {
prev = null.asInstanceOf[T]
p.tryFailure(ex)
failStage(ex)
}
setHandler(in, this)
}, p.future)
}
@ -242,24 +243,25 @@ final class HeadOptionStage[T] extends GraphStageWithMaterializedValue[SinkShape
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = {
val p: Promise[Option[T]] = Promise()
(new GraphStageLogic(shape) {
(new GraphStageLogic(shape) with InHandler {
override def preStart(): Unit = pull(in)
setHandler(in, new InHandler {
override def onPush(): Unit = {
p.trySuccess(Option(grab(in)))
completeStage()
}
override def onUpstreamFinish(): Unit = {
p.trySuccess(None)
completeStage()
}
def onPush(): Unit = {
p.trySuccess(Option(grab(in)))
completeStage()
}
override def onUpstreamFailure(ex: Throwable): Unit = {
p.tryFailure(ex)
failStage(ex)
}
})
override def onUpstreamFinish(): Unit = {
p.trySuccess(None)
completeStage()
}
override def onUpstreamFailure(ex: Throwable): Unit = {
p.tryFailure(ex)
failStage(ex)
}
setHandler(in, this)
}, p.future)
}
@ -277,29 +279,28 @@ final class SeqStage[T] extends GraphStageWithMaterializedValue[SinkShape[T], Fu
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = {
val p: Promise[immutable.Seq[T]] = Promise()
val logic = new GraphStageLogic(shape) {
val logic = new GraphStageLogic(shape) with InHandler {
val buf = Vector.newBuilder[T]
override def preStart(): Unit = pull(in)
setHandler(in, new InHandler {
def onPush(): Unit = {
buf += grab(in)
pull(in)
}
override def onPush(): Unit = {
buf += grab(in)
pull(in)
}
override def onUpstreamFinish(): Unit = {
val result = buf.result()
p.trySuccess(result)
completeStage()
}
override def onUpstreamFinish(): Unit = {
val result = buf.result()
p.trySuccess(result)
completeStage()
}
override def onUpstreamFailure(ex: Throwable): Unit = {
p.tryFailure(ex)
failStage(ex)
}
override def onUpstreamFailure(ex: Throwable): Unit = {
p.tryFailure(ex)
failStage(ex)
}
})
setHandler(in, this)
}
(logic, p.future)
@ -325,7 +326,7 @@ final class QueueSink[T]() extends GraphStageWithMaterializedValue[SinkShape[T],
override def toString: String = "QueueSink"
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = {
val stageLogic = new GraphStageLogic(shape) with CallbackWrapper[Output[T]] {
val stageLogic = new GraphStageLogic(shape) with CallbackWrapper[Output[T]] with InHandler {
type Received[E] = Try[Option[E]]
val maxBuffer = inheritedAttributes.getAttribute(classOf[InputBuffer], InputBuffer(16, 16)).max
@ -383,14 +384,15 @@ final class QueueSink[T]() extends GraphStageWithMaterializedValue[SinkShape[T],
}
}
setHandler(in, new InHandler {
override def onPush(): Unit = {
enqueueAndNotify(Success(Some(grab(in))))
if (buffer.used < maxBuffer) pull(in)
}
override def onUpstreamFinish(): Unit = enqueueAndNotify(Success(None))
override def onUpstreamFailure(ex: Throwable): Unit = enqueueAndNotify(Failure(ex))
})
def onPush(): Unit = {
enqueueAndNotify(Success(Some(grab(in))))
if (buffer.used < maxBuffer) pull(in)
}
override def onUpstreamFinish(): Unit = enqueueAndNotify(Success(None))
override def onUpstreamFailure(ex: Throwable): Unit = enqueueAndNotify(Failure(ex))
setHandler(in, this)
}
(stageLogic, new SinkQueueWithCancel[T] {

View file

@ -45,6 +45,7 @@ object Stages {
val dropWhile = name("dropWhile")
val scan = name("scan")
val fold = name("fold")
val foldAsync = name("foldAsync")
val reduce = name("reduce")
val intersperse = name("intersperse")
val buffer = name("buffer")
@ -82,6 +83,7 @@ object Stages {
val zipWithN = name("zipWithN")
val unzip = name("unzip")
val concat = name("concat")
val orElse = name("orElse")
val repeat = name("repeat")
val unfold = name("unfold")
val unfoldAsync = name("unfoldAsync")

View file

@ -18,18 +18,18 @@ final class Unfold[S, E](s: S, f: S ⇒ Option[(S, E)]) extends GraphStage[Sourc
override val shape: SourceShape[E] = SourceShape(out)
override def initialAttributes: Attributes = DefaultAttributes.unfold
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
new GraphStageLogic(shape) {
new GraphStageLogic(shape) with OutHandler {
private[this] var state = s
setHandler(out, new OutHandler {
override def onPull(): Unit = f(state) match {
case None complete(out)
case Some((newState, v)) {
push(out, v)
state = newState
}
def onPull(): Unit = f(state) match {
case None complete(out)
case Some((newState, v)) {
push(out, v)
state = newState
}
})
}
setHandler(out, this)
}
}
@ -41,7 +41,7 @@ final class UnfoldAsync[S, E](s: S, f: S ⇒ Future[Option[(S, E)]]) extends Gra
override val shape: SourceShape[E] = SourceShape(out)
override def initialAttributes: Attributes = DefaultAttributes.unfoldAsync
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
new GraphStageLogic(shape) {
new GraphStageLogic(shape) with OutHandler {
private[this] var state = s
private[this] var asyncHandler: Function1[Try[Option[(S, E)]], Unit] = _
@ -56,9 +56,9 @@ final class UnfoldAsync[S, E](s: S, f: S ⇒ Future[Option[(S, E)]]) extends Gra
asyncHandler = ac.invoke
}
setHandler(out, new OutHandler {
override def onPull(): Unit =
f(state).onComplete(asyncHandler)(akka.dispatch.ExecutionContexts.sameThreadExecutionContext)
})
def onPull(): Unit = f(state).onComplete(asyncHandler)(
akka.dispatch.ExecutionContexts.sameThreadExecutionContext)
setHandler(out, this)
}
}

View file

@ -126,6 +126,7 @@ object ActorGraphInterpreter {
nextInputElementCursor = (nextInputElementCursor + 1) & IndexMask
elem
}
private def clear(): Unit = {
java.util.Arrays.fill(inputBuffer, 0, inputBuffer.length, null)
inputBufferElements = 0

View file

@ -59,14 +59,12 @@ object GraphStages {
object Identity extends SimpleLinearGraphStage[Any] {
override def initialAttributes = DefaultAttributes.identityOp
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
setHandler(in, new InHandler {
override def onPush(): Unit = push(out, grab(in))
})
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler {
def onPush(): Unit = push(out, grab(in))
def onPull(): Unit = pull(in)
setHandler(out, new OutHandler {
override def onPull(): Unit = pull(in)
})
setHandler(in, this)
setHandler(out, this)
}
override def toString = "Identity"
@ -83,29 +81,28 @@ object GraphStages {
override def initialAttributes = DefaultAttributes.detacher
override val shape = FlowShape(in, out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler {
setHandler(in, new InHandler {
override def onPush(): Unit = {
if (isAvailable(out)) {
push(out, grab(in))
tryPull(in)
}
def onPush(): Unit = {
if (isAvailable(out)) {
push(out, grab(in))
tryPull(in)
}
override def onUpstreamFinish(): Unit = {
if (!isAvailable(in)) completeStage()
}
})
}
setHandler(out, new OutHandler {
override def onPull(): Unit = {
if (isAvailable(in)) {
push(out, grab(in))
if (isClosed(in)) completeStage()
else pull(in)
}
override def onUpstreamFinish(): Unit = {
if (!isAvailable(in)) completeStage()
}
def onPull(): Unit = {
if (isAvailable(in)) {
push(out, grab(in))
if (isClosed(in)) completeStage()
else pull(in)
}
})
}
setHandlers(in, out, this)
override def preStart(): Unit = tryPull(in)
}
@ -125,27 +122,27 @@ object GraphStages {
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[Done]) = {
val finishPromise = Promise[Done]()
(new GraphStageLogic(shape) {
setHandler(in, new InHandler {
override def onPush(): Unit = push(out, grab(in))
(new GraphStageLogic(shape) with InHandler with OutHandler {
def onPush(): Unit = push(out, grab(in))
override def onUpstreamFinish(): Unit = {
finishPromise.success(Done)
completeStage()
}
override def onUpstreamFinish(): Unit = {
finishPromise.success(Done)
completeStage()
}
override def onUpstreamFailure(ex: Throwable): Unit = {
finishPromise.failure(ex)
failStage(ex)
}
})
setHandler(out, new OutHandler {
override def onPull(): Unit = pull(in)
override def onDownstreamFinish(): Unit = {
finishPromise.success(Done)
completeStage()
}
})
override def onUpstreamFailure(ex: Throwable): Unit = {
finishPromise.failure(ex)
failStage(ex)
}
def onPull(): Unit = pull(in)
override def onDownstreamFinish(): Unit = {
finishPromise.success(Done)
completeStage()
}
setHandlers(in, out, this)
}, finishPromise.future)
}
@ -170,29 +167,33 @@ object GraphStages {
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, FlowMonitor[T]) = {
val monitor: FlowMonitorImpl[T] = new FlowMonitorImpl[T]
val logic: GraphStageLogic = new GraphStageLogic(shape) {
setHandler(in, new InHandler {
override def onPush(): Unit = {
val msg = grab(in)
push(out, msg)
monitor.set(if (msg.isInstanceOf[StreamState[_]]) Received(msg) else msg)
}
override def onUpstreamFinish(): Unit = {
super.onUpstreamFinish()
monitor.set(Finished)
}
override def onUpstreamFailure(ex: Throwable): Unit = {
super.onUpstreamFailure(ex)
monitor.set(Failed(ex))
}
})
setHandler(out, new OutHandler {
override def onPull(): Unit = pull(in)
override def onDownstreamFinish(): Unit = {
super.onDownstreamFinish()
monitor.set(Finished)
}
})
val logic: GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler {
def onPush(): Unit = {
val msg = grab(in)
push(out, msg)
monitor.set(if (msg.isInstanceOf[StreamState[_]]) Received(msg) else msg)
}
override def onUpstreamFinish(): Unit = {
super.onUpstreamFinish()
monitor.set(Finished)
}
override def onUpstreamFailure(ex: Throwable): Unit = {
super.onUpstreamFailure(ex)
monitor.set(Failed(ex))
}
def onPull(): Unit = pull(in)
override def onDownstreamFinish(): Unit = {
super.onDownstreamFinish()
monitor.set(Finished)
}
setHandler(in, this)
setHandler(out, this)
override def toString = "MonitorFlowLogic"
}
@ -293,14 +294,15 @@ object GraphStages {
ReactiveStreamsCompliance.requireNonNullElement(elem)
val out = Outlet[T]("single.out")
val shape = SourceShape(out)
override def createLogic(attr: Attributes) = new GraphStageLogic(shape) {
setHandler(out, new OutHandler {
override def onPull(): Unit = {
def createLogic(attr: Attributes) =
new GraphStageLogic(shape) with OutHandler {
def onPull(): Unit = {
push(out, elem)
completeStage()
}
})
}
setHandler(out, this)
}
override def toString: String = s"SingleSource($elem)"
}
@ -309,9 +311,9 @@ object GraphStages {
val shape = SourceShape(Outlet[T]("future.out"))
val out = shape.out
override def initialAttributes: Attributes = DefaultAttributes.futureSource
override def createLogic(attr: Attributes) = new GraphStageLogic(shape) {
setHandler(out, new OutHandler {
override def onPull(): Unit = {
override def createLogic(attr: Attributes) =
new GraphStageLogic(shape) with OutHandler {
def onPull(): Unit = {
val cb = getAsyncCallback[Try[T]] {
case scala.util.Success(v) emit(out, v, () completeStage())
case scala.util.Failure(t) failStage(t)
@ -319,8 +321,10 @@ object GraphStages {
future.onComplete(cb)(ExecutionContexts.sameThreadExecutionContext)
setHandler(out, eagerTerminateOutput) // After first pull we won't produce anything more
}
})
}
setHandler(out, this)
}
override def toString: String = "FutureSource"
}

View file

@ -15,32 +15,32 @@ import java.{ util ⇒ ju }
*/
private[akka] object IteratorInterpreter {
final case class IteratorUpstream[T](input: Iterator[T]) extends UpstreamBoundaryStageLogic[T] {
final case class IteratorUpstream[T](input: Iterator[T]) extends UpstreamBoundaryStageLogic[T] with OutHandler {
val out: Outlet[T] = Outlet[T]("IteratorUpstream.out")
out.id = 0
private var hasNext = input.hasNext
setHandler(out, new OutHandler {
override def onPull(): Unit = {
if (!hasNext) complete(out)
else {
val elem = input.next()
hasNext = input.hasNext
if (!hasNext) {
push(out, elem)
complete(out)
} else push(out, elem)
}
def onPull(): Unit = {
if (!hasNext) complete(out)
else {
val elem = input.next()
hasNext = input.hasNext
if (!hasNext) {
push(out, elem)
complete(out)
} else push(out, elem)
}
}
override def onDownstreamFinish(): Unit = ()
})
override def onDownstreamFinish(): Unit = ()
setHandler(out, this)
override def toString = "IteratorUpstream"
}
final case class IteratorDownstream[T]() extends DownstreamBoundaryStageLogic[T] with Iterator[T] {
final case class IteratorDownstream[T]() extends DownstreamBoundaryStageLogic[T] with Iterator[T] with InHandler {
val in: Inlet[T] = Inlet[T]("IteratorDownstream.in")
in.id = 0
@ -49,21 +49,21 @@ private[akka] object IteratorInterpreter {
private var needsPull = true
private var lastFailure: Throwable = null
setHandler(in, new InHandler {
override def onPush(): Unit = {
nextElem = grab(in)
needsPull = false
}
def onPush(): Unit = {
nextElem = grab(in)
needsPull = false
}
override def onUpstreamFinish(): Unit = {
done = true
}
override def onUpstreamFinish(): Unit = {
done = true
}
override def onUpstreamFailure(cause: Throwable): Unit = {
done = true
lastFailure = cause
}
})
override def onUpstreamFailure(cause: Throwable): Unit = {
done = true
lastFailure = cause
}
setHandler(in, this)
private def pullIfNeeded(): Unit = {
if (needsPull) {
@ -93,7 +93,6 @@ private[akka] object IteratorInterpreter {
// don't let toString consume the iterator
override def toString: String = "IteratorDownstream"
}
}
/**

View file

@ -161,6 +161,7 @@ final case class DropWhile[T](p: T ⇒ Boolean) extends GraphStage[FlowShape[T,
*/
abstract class SupervisedGraphStageLogic(inheritedAttributes: Attributes, shape: Shape) extends GraphStageLogic(shape) {
private lazy val decider = inheritedAttributes.get[SupervisionStrategy].map(_.decider).getOrElse(Supervision.stoppingDecider)
def withSupervision[T](f: () T): Option[T] =
try { Some(f()) } catch {
case NonFatal(ex)
@ -376,6 +377,8 @@ final case class Fold[In, Out](zero: Out, f: (Out, In) ⇒ Out) extends GraphSta
val out = Outlet[Out]("Fold.out")
override val shape: FlowShape[In, Out] = FlowShape(in, out)
override def toString: String = "Fold"
override val initialAttributes = DefaultAttributes.fold
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
@ -419,6 +422,98 @@ final case class Fold[In, Out](zero: Out, f: (Out, In) ⇒ Out) extends GraphSta
}
}
/**
* INTERNAL API
*/
final class FoldAsync[In, Out](zero: Out, f: (Out, In) Future[Out]) extends GraphStage[FlowShape[In, Out]] {
import akka.dispatch.ExecutionContexts
val in = Inlet[In]("FoldAsync.in")
val out = Outlet[Out]("FoldAsync.out")
val shape = FlowShape.of(in, out)
override def toString: String = "FoldAsync"
override val initialAttributes = DefaultAttributes.foldAsync
def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler {
val decider = inheritedAttributes.get[SupervisionStrategy].map(_.decider).getOrElse(Supervision.stoppingDecider)
private var aggregator: Out = zero
private var aggregating: Future[Out] = Future.successful(aggregator)
private def onRestart(t: Throwable): Unit = {
aggregator = zero
}
private def ec = ExecutionContexts.sameThreadExecutionContext
private val futureCB = getAsyncCallback[Try[Out]]((result: Try[Out]) {
result match {
case Success(update) if update != null {
aggregator = update
if (isClosed(in)) {
push(out, update)
completeStage()
} else if (isAvailable(out) && !hasBeenPulled(in)) tryPull(in)
}
case other {
val ex = other match {
case Failure(t) t
case Success(s) if s == null
ReactiveStreamsCompliance.elementMustNotBeNullException
}
val supervision = decider(ex)
if (supervision == Supervision.Stop) failStage(ex)
else {
if (supervision == Supervision.Restart) onRestart(ex)
if (isClosed(in)) {
push(out, aggregator)
completeStage()
} else if (isAvailable(out) && !hasBeenPulled(in)) tryPull(in)
}
}
}
}).invoke _
def onPush(): Unit = {
try {
aggregating = f(aggregator, grab(in))
aggregating.value match {
case Some(result) futureCB(result) // already completed
case _ aggregating.onComplete(futureCB)(ec)
}
} catch {
case NonFatal(ex) decider(ex) match {
case Supervision.Stop failStage(ex)
case supervision {
supervision match {
case Supervision.Restart onRestart(ex)
case _ () // just ignore on Resume
}
tryPull(in)
}
}
}
}
override def onUpstreamFinish(): Unit = {}
def onPull(): Unit = if (!hasBeenPulled(in)) tryPull(in)
setHandlers(in, out, this)
override def toString =
s"FoldAsync.Logic(completed=${aggregating.isCompleted})"
}
}
/**
* INTERNAL API
*/
@ -432,7 +527,7 @@ final case class Intersperse[T](start: Option[T], inject: T, end: Option[T]) ext
override val shape = FlowShape(in, out)
override def createLogic(attr: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
override def createLogic(attr: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler {
val startInHandler = new InHandler {
override def onPush(): Unit = {
// if else (to avoid using Iterator[T].flatten in hot code)
@ -456,12 +551,10 @@ final case class Intersperse[T](start: Option[T], inject: T, end: Option[T]) ext
}
}
val outHandler = new OutHandler {
override def onPull(): Unit = pull(in)
}
def onPull(): Unit = pull(in)
setHandler(in, startInHandler)
setHandler(out, outHandler)
setHandler(out, this)
}
}
@ -680,7 +773,7 @@ final case class Batch[In, Out](val max: Long, val costFn: In ⇒ Long, val seed
override val shape: FlowShape[In, Out] = FlowShape.of(in, out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler {
lazy val decider = inheritedAttributes.get[SupervisionStrategy].map(_.decider).getOrElse(Supervision.stoppingDecider)
@ -713,85 +806,81 @@ final case class Batch[In, Out](val max: Long, val costFn: In ⇒ Long, val seed
override def preStart() = pull(in)
setHandler(in, new InHandler {
def onPush(): Unit = {
val elem = grab(in)
val cost = costFn(elem)
override def onPush(): Unit = {
val elem = grab(in)
val cost = costFn(elem)
if (agg == null) {
try {
agg = seed(elem)
left -= cost
} catch {
case NonFatal(ex) decider(ex) match {
case Supervision.Stop failStage(ex)
case Supervision.Restart
restartState()
case Supervision.Resume
}
}
} else if (left < cost) {
pending = elem
} else {
try {
agg = aggregate(agg, elem)
left -= cost
} catch {
case NonFatal(ex) decider(ex) match {
case Supervision.Stop failStage(ex)
case Supervision.Restart
restartState()
case Supervision.Resume
}
if (agg == null) {
try {
agg = seed(elem)
left -= cost
} catch {
case NonFatal(ex) decider(ex) match {
case Supervision.Stop failStage(ex)
case Supervision.Restart
restartState()
case Supervision.Resume
}
}
if (isAvailable(out)) flush()
if (pending == null) pull(in)
}
override def onUpstreamFinish(): Unit = {
if (agg == null) completeStage()
}
})
setHandler(out, new OutHandler {
override def onPull(): Unit = {
if (agg == null) {
if (isClosed(in)) completeStage()
else if (!hasBeenPulled(in)) pull(in)
} else if (isClosed(in)) {
push(out, agg)
if (pending == null) completeStage()
else {
try {
agg = seed(pending)
} catch {
case NonFatal(ex) decider(ex) match {
case Supervision.Stop failStage(ex)
case Supervision.Resume
case Supervision.Restart
restartState()
if (!hasBeenPulled(in)) pull(in)
}
}
pending = null.asInstanceOf[In]
} else if (left < cost) {
pending = elem
} else {
try {
agg = aggregate(agg, elem)
left -= cost
} catch {
case NonFatal(ex) decider(ex) match {
case Supervision.Stop failStage(ex)
case Supervision.Restart
restartState()
case Supervision.Resume
}
} else {
flush()
if (!hasBeenPulled(in)) pull(in)
}
}
})
if (isAvailable(out)) flush()
if (pending == null) pull(in)
}
override def onUpstreamFinish(): Unit = {
if (agg == null) completeStage()
}
def onPull(): Unit = {
if (agg == null) {
if (isClosed(in)) completeStage()
else if (!hasBeenPulled(in)) pull(in)
} else if (isClosed(in)) {
push(out, agg)
if (pending == null) completeStage()
else {
try {
agg = seed(pending)
} catch {
case NonFatal(ex) decider(ex) match {
case Supervision.Stop failStage(ex)
case Supervision.Resume
case Supervision.Restart
restartState()
if (!hasBeenPulled(in)) pull(in)
}
}
pending = null.asInstanceOf[In]
}
} else {
flush()
if (!hasBeenPulled(in)) pull(in)
}
}
private def restartState(): Unit = {
agg = null.asInstanceOf[Out]
left = max
pending = null.asInstanceOf[In]
}
setHandlers(in, out, this)
}
}
@ -805,46 +894,46 @@ final class Expand[In, Out](val extrapolate: In ⇒ Iterator[Out]) extends Graph
override def initialAttributes = DefaultAttributes.expand
override val shape = FlowShape(in, out)
override def createLogic(attr: Attributes) = new GraphStageLogic(shape) {
override def createLogic(attr: Attributes) = new GraphStageLogic(shape) with InHandler with OutHandler {
private var iterator: Iterator[Out] = Iterator.empty
private var expanded = false
override def preStart(): Unit = pull(in)
setHandler(in, new InHandler {
override def onPush(): Unit = {
iterator = extrapolate(grab(in))
if (iterator.hasNext) {
if (isAvailable(out)) {
expanded = true
def onPush(): Unit = {
iterator = extrapolate(grab(in))
if (iterator.hasNext) {
if (isAvailable(out)) {
expanded = true
pull(in)
push(out, iterator.next())
} else expanded = false
} else pull(in)
}
override def onUpstreamFinish(): Unit = {
if (iterator.hasNext && !expanded) () // need to wait
else completeStage()
}
def onPull(): Unit = {
if (iterator.hasNext) {
if (!expanded) {
expanded = true
if (isClosed(in)) {
push(out, iterator.next())
completeStage()
} else {
// expand needs to pull first to be fair when upstream is not actually slow
pull(in)
push(out, iterator.next())
} else expanded = false
} else pull(in)
}
} else push(out, iterator.next())
}
override def onUpstreamFinish(): Unit = {
if (iterator.hasNext && !expanded) () // need to wait
else completeStage()
}
})
}
setHandler(out, new OutHandler {
override def onPull(): Unit = {
if (iterator.hasNext) {
if (!expanded) {
expanded = true
if (isClosed(in)) {
push(out, iterator.next())
completeStage()
} else {
// expand needs to pull first to be fair when upstream is not actually slow
pull(in)
push(out, iterator.next())
}
} else push(out, iterator.next())
}
}
})
setHandler(in, this)
setHandler(out, this)
}
}
@ -853,11 +942,14 @@ final class Expand[In, Out](val extrapolate: In ⇒ Iterator[Out]) extends Graph
*/
private[akka] object MapAsync {
final class Holder[T](var elem: Try[T], val cb: AsyncCallback[Holder[T]]) extends (Try[T] Unit) {
override def apply(t: Try[T]): Unit = {
def setElem(t: Try[T]): Unit =
elem = t match {
case Success(null) Failure[T](ReactiveStreamsCompliance.elementMustNotBeNullException)
case other other
}
override def apply(t: Try[T]): Unit = {
setElem(t)
cb.invoke(this)
}
}
@ -885,12 +977,14 @@ final case class MapAsync[In, Out](parallelism: Int, f: In ⇒ Future[Out])
//FIXME Put Supervision.stoppingDecider as a SupervisionStrategy on DefaultAttributes.mapAsync?
lazy val decider = inheritedAttributes.get[SupervisionStrategy].map(_.decider).getOrElse(Supervision.stoppingDecider)
var buffer: BufferImpl[Holder[Out]] = _
val futureCB =
getAsyncCallback[Holder[Out]](
_.elem match {
case Failure(e) if decider(e) == Supervision.Stop failStage(e)
case _ if (isAvailable(out)) pushOne()
})
def holderCompleted(h: Holder[Out]): Unit = {
h.elem match {
case Failure(e) if decider(e) == Supervision.Stop failStage(e)
case _ if (isAvailable(out)) pushOne()
}
}
val futureCB = getAsyncCallback[Holder[Out]](holderCompleted)
private[this] def todo = buffer.used
@ -918,14 +1012,16 @@ final case class MapAsync[In, Out](parallelism: Int, f: In ⇒ Future[Out])
// #20217 We dispatch the future if it's ready to optimize away
// scheduling it to an execution context
future.value match {
case None future.onComplete(holder)(akka.dispatch.ExecutionContexts.sameThreadExecutionContext)
case Some(f) holder.apply(f)
case None future.onComplete(holder)(akka.dispatch.ExecutionContexts.sameThreadExecutionContext)
case Some(v)
holder.setElem(v)
holderCompleted(holder)
}
} catch {
case NonFatal(ex) if (decider(ex) == Supervision.Stop) failStage(ex)
}
if (todo < parallelism) tryPull(in)
if (todo < parallelism && !hasBeenPulled(in)) tryPull(in)
}
override def onUpstreamFinish(): Unit = if (todo == 0) completeStage()
@ -954,45 +1050,47 @@ final case class MapAsyncUnordered[In, Out](parallelism: Int, f: In ⇒ Future[O
val decider =
inheritedAttributes.get[SupervisionStrategy].map(_.decider).getOrElse(Supervision.stoppingDecider)
var inFlight = 0
var buffer: BufferImpl[Out] = _
private var inFlight = 0
private var buffer: BufferImpl[Out] = _
private[this] def todo = inFlight + buffer.used
override def preStart(): Unit = buffer = BufferImpl(parallelism, materializer)
private val futureCB =
getAsyncCallback((result: Try[Out]) {
inFlight -= 1
result match {
case Success(elem) if elem != null
if (isAvailable(out)) {
if (!hasBeenPulled(in)) tryPull(in)
push(out, elem)
} else buffer.enqueue(elem)
case other
val ex = other match {
case Failure(t) t
case Success(s) if s == null ReactiveStreamsCompliance.elementMustNotBeNullException
}
if (decider(ex) == Supervision.Stop) failStage(ex)
else if (isClosed(in) && todo == 0) completeStage()
else if (!hasBeenPulled(in)) tryPull(in)
}
}).invoke _
def futureCompleted(result: Try[Out]): Unit = {
inFlight -= 1
result match {
case Success(elem) if elem != null
if (isAvailable(out)) {
if (!hasBeenPulled(in)) tryPull(in)
push(out, elem)
} else buffer.enqueue(elem)
case other
val ex = other match {
case Failure(t) t
case Success(s) if s == null ReactiveStreamsCompliance.elementMustNotBeNullException
}
if (decider(ex) == Supervision.Stop) failStage(ex)
else if (isClosed(in) && todo == 0) completeStage()
else if (!hasBeenPulled(in)) tryPull(in)
}
}
private val futureCB = getAsyncCallback(futureCompleted)
private val invokeFutureCB: Try[Out] Unit = futureCB.invoke
override def onPush(): Unit = {
try {
val future = f(grab(in))
inFlight += 1
future.value match {
case None future.onComplete(futureCB)(akka.dispatch.ExecutionContexts.sameThreadExecutionContext)
case Some(f) futureCB.apply(f)
case None future.onComplete(invokeFutureCB)(akka.dispatch.ExecutionContexts.sameThreadExecutionContext)
case Some(v) futureCompleted(v)
}
} catch {
case NonFatal(ex) if (decider(ex) == Supervision.Stop) failStage(ex)
}
if (todo < parallelism) tryPull(in)
if (todo < parallelism && !hasBeenPulled(in)) tryPull(in)
}
override def onUpstreamFinish(): Unit = {
if (todo == 0) completeStage()
}
@ -1000,6 +1098,7 @@ final case class MapAsyncUnordered[In, Out](parallelism: Int, f: In ⇒ Future[O
override def onPull(): Unit = {
if (!buffer.isEmpty) push(out, buffer.dequeue())
else if (isClosed(in) && todo == 0) completeStage()
if (todo < parallelism && !hasBeenPulled(in)) tryPull(in)
}
@ -1206,7 +1305,7 @@ final class GroupedWithin[T](val n: Int, val d: FiniteDuration) extends GraphSta
final class Delay[T](val d: FiniteDuration, val strategy: DelayOverflowStrategy) extends SimpleLinearGraphStage[T] {
private[this] def timerName = "DelayedTimer"
override def initialAttributes: Attributes = DefaultAttributes.delay
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogic(shape) {
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogic(shape) with InHandler with OutHandler {
val size =
inheritedAttributes.get[InputBuffer] match {
case None throw new IllegalStateException(s"Couldn't find InputBuffer Attribute for $this")
@ -1218,59 +1317,58 @@ final class Delay[T](val d: FiniteDuration, val strategy: DelayOverflowStrategy)
override def preStart(): Unit = buffer = BufferImpl(size, materializer)
setHandler(in, handler = new InHandler {
//FIXME rewrite into distinct strategy functions to avoid matching on strategy for every input when full
override def onPush(): Unit = {
if (buffer.isFull) strategy match {
case EmitEarly
if (!isTimerActive(timerName))
push(out, buffer.dequeue()._2)
else {
cancelTimer(timerName)
onTimer(timerName)
}
case DropHead
buffer.dropHead()
grabAndPull(true)
case DropTail
buffer.dropTail()
grabAndPull(true)
case DropNew
grab(in)
if (!isTimerActive(timerName)) scheduleOnce(timerName, d)
case DropBuffer
buffer.clear()
grabAndPull(true)
case Fail
failStage(new BufferOverflowException(s"Buffer overflow for delay combinator (max capacity was: $size)!"))
case Backpressure throw new IllegalStateException("Delay buffer must never overflow in Backpressure mode")
}
else {
grabAndPull(strategy != Backpressure || buffer.used < size - 1)
//FIXME rewrite into distinct strategy functions to avoid matching on strategy for every input when full
def onPush(): Unit = {
if (buffer.isFull) strategy match {
case EmitEarly
if (!isTimerActive(timerName))
push(out, buffer.dequeue()._2)
else {
cancelTimer(timerName)
onTimer(timerName)
}
case DropHead
buffer.dropHead()
grabAndPull(true)
case DropTail
buffer.dropTail()
grabAndPull(true)
case DropNew
grab(in)
if (!isTimerActive(timerName)) scheduleOnce(timerName, d)
}
case DropBuffer
buffer.clear()
grabAndPull(true)
case Fail
failStage(new BufferOverflowException(s"Buffer overflow for delay combinator (max capacity was: $size)!"))
case Backpressure throw new IllegalStateException("Delay buffer must never overflow in Backpressure mode")
}
def grabAndPull(pullCondition: Boolean): Unit = {
buffer.enqueue((System.nanoTime(), grab(in)))
if (pullCondition) pull(in)
else {
grabAndPull(strategy != Backpressure || buffer.used < size - 1)
if (!isTimerActive(timerName)) scheduleOnce(timerName, d)
}
}
override def onUpstreamFinish(): Unit = {
if (isAvailable(out) && isTimerActive(timerName)) willStop = true
else completeStage()
}
})
def grabAndPull(pullCondition: Boolean): Unit = {
buffer.enqueue((System.nanoTime(), grab(in)))
if (pullCondition) pull(in)
}
setHandler(out, new OutHandler {
override def onPull(): Unit = {
if (!isTimerActive(timerName) && !buffer.isEmpty && nextElementWaitTime() < 0)
push(out, buffer.dequeue()._2)
override def onUpstreamFinish(): Unit = {
if (isAvailable(out) && isTimerActive(timerName)) willStop = true
else completeStage()
}
if (!willStop && !hasBeenPulled(in)) pull(in)
completeIfReady()
}
})
def onPull(): Unit = {
if (!isTimerActive(timerName) && !buffer.isEmpty && nextElementWaitTime() < 0)
push(out, buffer.dequeue()._2)
if (!willStop && !hasBeenPulled(in)) pull(in)
completeIfReady()
}
setHandler(in, this)
setHandler(out, this)
def completeIfReady(): Unit = if (willStop && buffer.isEmpty) completeStage()
@ -1291,14 +1389,12 @@ final class Delay[T](val d: FiniteDuration, val strategy: DelayOverflowStrategy)
final class TakeWithin[T](val timeout: FiniteDuration) extends SimpleLinearGraphStage[T] {
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogic(shape) {
setHandler(in, new InHandler {
override def onPush(): Unit = push(out, grab(in))
})
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogic(shape) with InHandler with OutHandler {
def onPush(): Unit = push(out, grab(in))
def onPull(): Unit = pull(in)
setHandler(out, new OutHandler {
override def onPull(): Unit = pull(in)
})
setHandler(in, this)
setHandler(out, this)
final override protected def onTimer(key: Any): Unit =
completeStage()
@ -1310,19 +1406,19 @@ final class TakeWithin[T](val timeout: FiniteDuration) extends SimpleLinearGraph
}
final class DropWithin[T](val timeout: FiniteDuration) extends SimpleLinearGraphStage[T] {
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogic(shape) {
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogic(shape) with InHandler with OutHandler {
private var allow = false
setHandler(in, new InHandler {
override def onPush(): Unit =
if (allow) push(out, grab(in))
else pull(in)
})
def onPush(): Unit = {
if (allow) push(out, grab(in))
else pull(in)
}
setHandler(out, new OutHandler {
override def onPull(): Unit = pull(in)
})
def onPull(): Unit = pull(in)
setHandler(in, this)
setHandler(out, this)
final override protected def onTimer(key: Any): Unit = allow = true
@ -1369,6 +1465,7 @@ final class Reduce[T](val f: (T, T) ⇒ T) extends SimpleLinearGraphStage[T] {
setHandler(out, self)
}
override def toString = "Reduce"
}
@ -1404,30 +1501,21 @@ final class RecoverWith[T, M](val maximumRetries: Int, val pf: PartialFunction[T
def switchTo(source: Graph[SourceShape[T], M]): Unit = {
val sinkIn = new SubSinkInlet[T]("RecoverWithSink")
sinkIn.setHandler(new InHandler {
override def onPush(): Unit =
if (isAvailable(out)) {
push(out, sinkIn.grab())
sinkIn.pull()
}
override def onUpstreamFinish(): Unit = if (!sinkIn.isAvailable) completeStage()
override def onPush(): Unit = push(out, sinkIn.grab())
override def onUpstreamFinish(): Unit = completeStage()
override def onUpstreamFailure(ex: Throwable) = onFailure(ex)
})
def pushOut(): Unit = {
push(out, sinkIn.grab())
if (!sinkIn.isClosed) sinkIn.pull()
else completeStage()
}
val outHandler = new OutHandler {
override def onPull(): Unit = if (sinkIn.isAvailable) pushOut()
override def onPull(): Unit = sinkIn.pull()
override def onDownstreamFinish(): Unit = sinkIn.cancel()
}
Source.fromGraph(source).runWith(sinkIn.sink)(interpreter.subFusingMaterializer)
setHandler(out, outHandler)
sinkIn.pull()
if (isAvailable(out)) sinkIn.pull()
}
}

View file

@ -32,7 +32,6 @@ final class FlattenMerge[T, M](val breadth: Int) extends GraphStage[FlowShape[Gr
override val shape = FlowShape(in, out)
override def createLogic(attr: Attributes) = new GraphStageLogic(shape) {
var sources = Set.empty[SubSinkInlet[T]]
def activeSources = sources.size

View file

@ -22,7 +22,7 @@ private[akka] abstract class ByteStringParser[T] extends GraphStage[FlowShape[By
override def initialAttributes = Attributes.name("ByteStringParser")
final override val shape = FlowShape(bytesIn, objOut)
class ParsingLogic extends GraphStageLogic(shape) {
class ParsingLogic extends GraphStageLogic(shape) with InHandler {
var pullOnParserRequest = false
override def preStart(): Unit = pull(bytesIn)
setHandler(objOut, eagerTerminateOutput)
@ -58,16 +58,18 @@ private[akka] abstract class ByteStringParser[T] extends GraphStage[FlowShape[By
if (cont) doParse()
} else pull(bytesIn)
setHandler(bytesIn, new InHandler {
override def onPush(): Unit = {
pullOnParserRequest = false
buffer ++= grab(bytesIn)
doParse()
}
override def onUpstreamFinish(): Unit =
if (buffer.isEmpty && acceptUpstreamFinish) completeStage()
else current.onTruncation()
})
def onPush(): Unit = {
pullOnParserRequest = false
buffer ++= grab(bytesIn)
doParse()
}
override def onUpstreamFinish(): Unit = {
if (buffer.isEmpty && acceptUpstreamFinish) completeStage()
else current.onTruncation()
}
setHandler(bytesIn, this)
}
}

View file

@ -46,7 +46,7 @@ final private[stream] class InputStreamSinkStage(readTimeout: FiniteDuration) ex
val dataQueue = new LinkedBlockingDeque[StreamToAdapterMessage](maxBuffer + 2)
val logic = new GraphStageLogic(shape) with StageWithCallback {
val logic = new GraphStageLogic(shape) with StageWithCallback with InHandler {
private val callback: AsyncCallback[AdapterToStageMessage] =
getAsyncCallback {
@ -65,23 +65,26 @@ final private[stream] class InputStreamSinkStage(readTimeout: FiniteDuration) ex
pull(in)
}
setHandler(in, new InHandler {
override def onPush(): Unit = {
//1 is buffer for Finished or Failed callback
require(dataQueue.remainingCapacity() > 1)
dataQueue.add(Data(grab(in)))
if (dataQueue.remainingCapacity() > 1) sendPullIfAllowed()
}
override def onUpstreamFinish(): Unit = {
dataQueue.add(Finished)
completeStage()
}
override def onUpstreamFailure(ex: Throwable): Unit = {
dataQueue.add(Failed(ex))
failStage(ex)
}
})
def onPush(): Unit = {
//1 is buffer for Finished or Failed callback
require(dataQueue.remainingCapacity() > 1)
dataQueue.add(Data(grab(in)))
if (dataQueue.remainingCapacity() > 1) sendPullIfAllowed()
}
override def onUpstreamFinish(): Unit = {
dataQueue.add(Finished)
completeStage()
}
override def onUpstreamFailure(ex: Throwable): Unit = {
dataQueue.add(Failed(ex))
failStage(ex)
}
setHandler(in, this)
}
(logic, new InputStreamAdapter(dataQueue, logic.wakeUp, readTimeout))
}
}

View file

@ -568,6 +568,25 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends
def fold[T](zero: T)(f: function.Function2[T, Out, T]): javadsl.Flow[In, T, Mat] =
new Flow(delegate.fold(zero)(f.apply))
/**
* Similar to `fold` but with an asynchronous function.
* Applies the given function towards its current and next value,
* yielding the next current value.
*
* If the function `f` returns a failure and the supervision decision is
* [[akka.stream.Supervision.Restart]] current value starts at `zero` again
* the stream will continue.
*
* '''Emits when''' upstream completes
*
* '''Backpressures when''' downstream backpressures
*
* '''Completes when''' upstream completes
*
* '''Cancels when''' downstream cancels
*/
def foldAsync[T](zero: T)(f: function.Function2[T, Out, CompletionStage[T]]): javadsl.Flow[In, T, Mat] = new Flow(delegate.foldAsync(zero) { (out, in) f(out, in).toScala })
/**
* Similar to `fold` but uses first element as zero element.
* Applies the given function towards its current and next value,
@ -1340,6 +1359,46 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends
def prependMat[T >: Out, M, M2](that: Graph[SourceShape[T], M], matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, T, M2] =
new Flow(delegate.prependMat(that)(combinerToScala(matF)))
/**
* Provides a secondary source that will be consumed if this source completes without any
* elements passing by. As soon as the first element comes through this stream, the alternative
* will be cancelled.
*
* Note that this Flow will be materialized together with the [[Source]] and just kept
* from producing elements by asserting back-pressure until its time comes or it gets
* cancelled.
*
* On errors the stage is failed regardless of source of the error.
*
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
* is available from the second stream
*
* '''Backpressures when''' downstream backpressures
*
* '''Completes when''' the primary stream completes after emitting at least one element, when the primary stream completes
* without emitting and the secondary stream already has completed or when the secondary stream completes
*
* '''Cancels when''' downstream cancels and additionally the alternative is cancelled as soon as an element passes
* by from this stream.
*/
def orElse[T >: Out, M](secondary: Graph[SourceShape[T], M]): javadsl.Flow[In, T, Mat] =
new Flow(delegate.orElse(secondary))
/**
* Provides a secondary source that will be consumed if this source completes without any
* elements passing by. As soon as the first element comes through this stream, the alternative
* will be cancelled.
*
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values.
*
* @see [[#orElse]]
*/
def orElseMat[T >: Out, M2, M3](
secondary: Graph[SourceShape[T], M2],
matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, T, M3] =
new Flow(delegate.orElseMat(secondary)(combinerToScala(matF)))
/**
* Attaches the given [[Sink]] to this [[Flow]], meaning that elements that passes
* through will also be sent to the [[Sink]].

View file

@ -0,0 +1,93 @@
/**
* Copyright (C) 2015-2016 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.javadsl
import akka.NotUsed
/**
* A MergeHub is a special streaming hub that is able to collect streamed elements from a dynamic set of
* producers. It consists of two parts, a [[Source]] and a [[Sink]]. The [[Source]] streams the element to a consumer from
* its merged inputs. Once the consumer has been materialized, the [[Source]] returns a materialized value which is
* the corresponding [[Sink]]. This [[Sink]] can then be materialized arbitrary many times, where each of the new
* materializations will feed its consumed elements to the original [[Source]].
*/
object MergeHub {
/**
* Creates a [[Source]] that emits elements merged from a dynamic set of producers. After the [[Source]] returned
* by this method is materialized, it returns a [[Sink]] as a materialized value. This [[Sink]] can be materialized
* arbitrary many times and each of the materializations will feed the elements into the original [[Source]].
*
* Every new materialization of the [[Source]] results in a new, independent hub, which materializes to its own
* [[Sink]] for feeding that materialization.
*
* If one of the inputs fails the [[Sink]], the [[Source]] is failed in turn (possibly jumping over already buffered
* elements). Completed [[Sink]]s are simply removed. Once the [[Source]] is cancelled, the Hub is considered closed
* and any new producers using the [[Sink]] will be cancelled.
*
* @param clazz Type of elements this hub emits and consumes
* @param perProducerBufferSize Buffer space used per producer.
*/
def of[T](clazz: Class[T], perProducerBufferSize: Int): Source[T, Sink[T, NotUsed]] = {
akka.stream.scaladsl.MergeHub.source[T](perProducerBufferSize)
.mapMaterializedValue(_.asJava)
.asJava
}
/**
* Creates a [[Source]] that emits elements merged from a dynamic set of producers. After the [[Source]] returned
* by this method is materialized, it returns a [[Sink]] as a materialized value. This [[Sink]] can be materialized
* arbitrary many times and each of the materializations will feed the elements into the original [[Source]].
*
* Every new materialization of the [[Source]] results in a new, independent hub, which materializes to its own
* [[Sink]] for feeding that materialization.
*
* If one of the inputs fails the [[Sink]], the [[Source]] is failed in turn (possibly jumping over already buffered
* elements). Completed [[Sink]]s are simply removed. Once the [[Source]] is cancelled, the Hub is considered closed
* and any new producers using the [[Sink]] will be cancelled.
*
* @param clazz Type of elements this hub emits and consumes
*/
def of[T](clazz: Class[T]): Source[T, Sink[T, NotUsed]] = of(clazz, 16)
}
/**
* A BroadcastHub is a special streaming hub that is able to broadcast streamed elements to a dynamic set of consumers.
* It consissts of two parts, a [[Sink]] and a [[Source]]. The [[Sink]] broadcasts elements from a producer to the
* actually live consumers it has. Once the producer has been materialized, the [[Sink]] it feeds into returns a
* materialized value which is the corresponding [[Source]]. This [[Source]] can be materialized arbitrary many times,
* where weach of the new materializations will receive their elements from the original [[Sink]].
*/
object BroadcastHub {
/**
* Creates a [[Sink]] that receives elements from its upstream producer and broadcasts them to a dynamic set
* of consumers. After the [[Sink]] returned by this method is materialized, it returns a [[Source]] as materialized
* value. This [[Source]] can be materialized arbitrary many times and each materialization will receive the
* broadcast elements form the ofiginal [[Sink]].
*
* Every new materialization of the [[Sink]] results in a new, independent hub, which materializes to its own
* [[Source]] for consuming the [[Sink]] of that materialization.
*
* If the original [[Sink]] is failed, then the failure is immediately propagated to all of its materialized
* [[Source]]s (possibly jumping over already buffered elements). If the original [[Sink]] is completed, then
* all corresponding [[Source]]s are completed. Both failure and normal completion is "remembered" and later
* materializations of the [[Source]] will see the same (failure or completion) state. [[Source]]s that are
* cancelled are simply removed from the dynamic set of consumers.
*
* @param clazz Type of elements this hub emits and consumes
* @param bufferSize Buffer size used by the producer. Gives an upper bound on how "far" from each other two
* concurrent consumers can be in terms of element. If the buffer is full, the producer
* is backpressured. Must be a power of two and less than 4096.
*/
def of[T](clazz: Class[T], bufferSize: Int): Sink[T, Source[T, NotUsed]] = {
akka.stream.scaladsl.BroadcastHub.sink[T](bufferSize)
.mapMaterializedValue(_.asJava)
.asJava
}
def of[T](clazz: Class[T]): Sink[T, Source[T, NotUsed]] = of(clazz, 256)
}

View file

@ -29,6 +29,15 @@ object Sink {
def fold[U, In](zero: U, f: function.Function2[U, In, U]): javadsl.Sink[In, CompletionStage[U]] =
new Sink(scaladsl.Sink.fold[U, In](zero)(f.apply).toCompletionStage())
/**
* A `Sink` that will invoke the given asynchronous function for every received element, giving it its previous
* output (or the given `zero` value) and the element as input.
* The returned [[java.util.concurrent.CompletionStage]] will be completed with value of the final
* function evaluation when the input stream ends, or completed with `Failure`
* if there is a failure is signaled in the stream.
*/
def foldAsync[U, In](zero: U, f: function.Function2[U, In, CompletionStage[U]]): javadsl.Sink[In, CompletionStage[U]] = new Sink(scaladsl.Sink.foldAsync[U, In](zero)(f(_, _).toScala).toCompletionStage())
/**
* A `Sink` that will invoke the given function for every received element, giving it its previous
* output (from the second element) and the element as input.

View file

@ -531,6 +531,16 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap
def runFold[U](zero: U, f: function.Function2[U, Out, U], materializer: Materializer): CompletionStage[U] =
runWith(Sink.fold(zero, f), materializer)
/**
* Shortcut for running this `Source` with an asynchronous fold function.
* The given function is invoked for every received element, giving it its previous
* output (or the given `zero` value) and the element as input.
* The returned [[java.util.concurrent.CompletionStage]] will be completed with value of the final
* function evaluation when the input stream ends, or completed with `Failure`
* if there is a failure is signaled in the stream.
*/
def runFoldAsync[U](zero: U, f: function.Function2[U, Out, CompletionStage[U]], materializer: Materializer): CompletionStage[U] = runWith(Sink.foldAsync(zero, f), materializer)
/**
* Shortcut for running this `Source` with a reduce function.
* The given function is invoked for every received element, giving it its previous
@ -629,6 +639,44 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap
matF: function.Function2[Mat, M, M2]): javadsl.Source[T, M2] =
new Source(delegate.prependMat(that)(combinerToScala(matF)))
/**
* Provides a secondary source that will be consumed if this source completes without any
* elements passing by. As soon as the first element comes through this stream, the alternative
* will be cancelled.
*
* Note that this Flow will be materialized together with the [[Source]] and just kept
* from producing elements by asserting back-pressure until its time comes or it gets
* cancelled.
*
* On errors the stage is failed regardless of source of the error.
*
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
* is available from the second stream
*
* '''Backpressures when''' downstream backpressures
*
* '''Completes when''' the primary stream completes after emitting at least one element, when the primary stream completes
* without emitting and the secondary stream already has completed or when the secondary stream completes
*
* '''Cancels when''' downstream cancels and additionally the alternative is cancelled as soon as an element passes
* by from this stream.
*/
def orElse[T >: Out, M](secondary: Graph[SourceShape[T], M]): javadsl.Source[T, Mat] =
new Source(delegate.orElse(secondary))
/**
* Provides a secondary source that will be consumed if this source completes without any
* elements passing by. As soon as the first element comes through this stream, the alternative
* will be cancelled.
*
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values.
*
* @see [[#orElse]]
*/
def orElseMat[T >: Out, M, M2](secondary: Graph[SourceShape[T], M], matF: function.Function2[Mat, M, M2]): javadsl.Source[T, M2] =
new Source(delegate.orElseMat(secondary)(combinerToScala(matF)))
/**
* Attaches the given [[Sink]] to this [[Flow]], meaning that elements that passes
* through will also be sent to the [[Sink]].
@ -1201,6 +1249,25 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap
def fold[T](zero: T)(f: function.Function2[T, Out, T]): javadsl.Source[T, Mat] =
new Source(delegate.fold(zero)(f.apply))
/**
* Similar to `fold` but with an asynchronous function.
* Applies the given function towards its current and next value,
* yielding the next current value.
*
* If the function `f` returns a failure and the supervision decision is
* [[akka.stream.Supervision.Restart]] current value starts at `zero` again
* the stream will continue.
*
* '''Emits when''' upstream completes
*
* '''Backpressures when''' downstream backpressures
*
* '''Completes when''' upstream completes
*
* '''Cancels when''' downstream cancels
*/
def foldAsync[T](zero: T)(f: function.Function2[T, Out, CompletionStage[T]]): javadsl.Source[T, Mat] = new Source(delegate.foldAsync(zero) { (out, in) f(out, in).toScala })
/**
* Similar to `fold` but uses first element as zero element.
* Applies the given function towards its current and next value,

View file

@ -406,6 +406,25 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo
def fold[T](zero: T)(f: function.Function2[T, Out, T]): SubFlow[In, T, Mat] =
new SubFlow(delegate.fold(zero)(f.apply))
/**
* Similar to `fold` but with an asynchronous function.
* Applies the given function towards its current and next value,
* yielding the next current value.
*
* If the function `f` returns a failure and the supervision decision is
* [[akka.stream.Supervision.Restart]] current value starts at `zero` again
* the stream will continue.
*
* '''Emits when''' upstream completes
*
* '''Backpressures when''' downstream backpressures
*
* '''Completes when''' upstream completes
*
* '''Cancels when''' downstream cancels
*/
def foldAsync[T](zero: T)(f: function.Function2[T, Out, CompletionStage[T]]): SubFlow[In, T, Mat] = new SubFlow(delegate.foldAsync(zero) { (out, in) f(out, in).toScala })
/**
* Similar to `fold` but uses first element as zero element.
* Applies the given function towards its current and next value,
@ -963,6 +982,31 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo
def prepend[T >: Out, M](that: Graph[SourceShape[T], M]): SubFlow[In, T, Mat] =
new SubFlow(delegate.prepend(that))
/**
* Provides a secondary source that will be consumed if this source completes without any
* elements passing by. As soon as the first element comes through this stream, the alternative
* will be cancelled.
*
* Note that this Flow will be materialized together with the [[Source]] and just kept
* from producing elements by asserting back-pressure until its time comes or it gets
* cancelled.
*
* On errors the stage is failed regardless of source of the error.
*
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
* is available from the second stream
*
* '''Backpressures when''' downstream backpressures
*
* '''Completes when''' the primary stream completes after emitting at least one element, when the primary stream completes
* without emitting and the secondary stream already has completed or when the secondary stream completes
*
* '''Cancels when''' downstream cancels and additionally the alternative is cancelled as soon as an element passes
* by from this stream.
*/
def orElse[T >: Out, M](secondary: Graph[SourceShape[T], M]): javadsl.SubFlow[In, T, Mat] =
new SubFlow(delegate.orElse(secondary))
/**
* Attaches the given [[Sink]] to this [[Flow]], meaning that elements that passes
* through will also be sent to the [[Sink]].

View file

@ -404,6 +404,25 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source
def fold[T](zero: T)(f: function.Function2[T, Out, T]): SubSource[T, Mat] =
new SubSource(delegate.fold(zero)(f.apply))
/**
* Similar to `fold` but with an asynchronous function.
* Applies the given function towards its current and next value,
* yielding the next current value.
*
* If the function `f` returns a failure and the supervision decision is
* [[akka.stream.Supervision.Restart]] current value starts at `zero` again
* the stream will continue.
*
* '''Emits when''' upstream completes
*
* '''Backpressures when''' downstream backpressures
*
* '''Completes when''' upstream completes
*
* '''Cancels when''' downstream cancels
*/
def foldAsync[T](zero: T)(f: function.Function2[T, Out, CompletionStage[T]]): SubSource[T, Mat] = new SubSource(delegate.foldAsync(zero) { (out, in) f(out, in).toScala })
/**
* Similar to `fold` but uses first element as zero element.
* Applies the given function towards its current and next value,
@ -961,6 +980,31 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source
def prepend[T >: Out, M](that: Graph[SourceShape[T], M]): SubSource[T, Mat] =
new SubSource(delegate.prepend(that))
/**
* Provides a secondary source that will be consumed if this source completes without any
* elements passing by. As soon as the first element comes through this stream, the alternative
* will be cancelled.
*
* Note that this Flow will be materialized together with the [[Source]] and just kept
* from producing elements by asserting back-pressure until its time comes or it gets
* cancelled.
*
* On errors the stage is failed regardless of source of the error.
*
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
* is available from the second stream
*
* '''Backpressures when''' downstream backpressures
*
* '''Completes when''' the primary stream completes after emitting at least one element, when the primary stream completes
* without emitting and the secondary stream already has completed or when the secondary stream completes
*
* '''Cancels when''' downstream cancels and additionally the alternative is cancelled as soon as an element passes
* by from this stream.
*/
def orElse[T >: Out, M](secondary: Graph[SourceShape[T], M]): javadsl.SubSource[T, Mat] =
new SubSource(delegate.orElse(secondary))
/**
* Attaches the given [[Sink]] to this [[Flow]], meaning that elements that passes
* through will also be sent to the [[Sink]].

View file

@ -3,10 +3,9 @@
*/
package akka.stream.scaladsl
import akka.event.{ Logging, LoggingAdapter }
import akka.event.LoggingAdapter
import akka.stream._
import akka.Done
import akka.stream.impl.Stages.DefaultAttributes
import akka.stream.impl.StreamLayout.Module
import akka.stream.impl._
import akka.stream.impl.fusing._
@ -786,6 +785,27 @@ trait FlowOps[+Out, +Mat] {
*/
def fold[T](zero: T)(f: (T, Out) T): Repr[T] = via(Fold(zero, f))
/**
* Similar to `fold` but with an asynchronous function.
* Applies the given function towards its current and next value,
* yielding the next current value.
*
* If the function `f` returns a failure and the supervision decision is
* [[akka.stream.Supervision.Restart]] current value starts at `zero` again
* the stream will continue.
*
* '''Emits when''' upstream completes
*
* '''Backpressures when''' downstream backpressures
*
* '''Completes when''' upstream completes
*
* '''Cancels when''' downstream cancels
*
* See also [[FlowOps.fold]]
*/
def foldAsync[T](zero: T)(f: (T, Out) Future[T]): Repr[T] = via(new FoldAsync(zero, f))
/**
* Similar to `fold` but uses first element as zero element.
* Applies the given function towards its current and next value,
@ -1773,6 +1793,40 @@ trait FlowOps[+Out, +Mat] {
FlowShape(merge.in(1), merge.out)
}
/**
* Provides a secondary source that will be consumed if this stream completes without any
* elements passing by. As soon as the first element comes through this stream, the alternative
* will be cancelled.
*
* Note that this Flow will be materialized together with the [[Source]] and just kept
* from producing elements by asserting back-pressure until its time comes or it gets
* cancelled.
*
* On errors the stage is failed regardless of source of the error.
*
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
* is available from the second stream
*
* '''Backpressures when''' downstream backpressures
*
* '''Completes when''' the primary stream completes after emitting at least one element, when the primary stream completes
* without emitting and the secondary stream already has completed or when the secondary stream completes
*
* '''Cancels when''' downstream cancels and additionally the alternative is cancelled as soon as an element passes
* by from this stream.
*/
def orElse[U >: Out, Mat2](secondary: Graph[SourceShape[U], Mat2]): Repr[U] =
via(orElseGraph(secondary))
protected def orElseGraph[U >: Out, Mat2](secondary: Graph[SourceShape[U], Mat2]): Graph[FlowShape[Out @uncheckedVariance, U], Mat2] =
GraphDSL.create(secondary) { implicit b secondary
val orElse = b.add(OrElse[U]())
secondary ~> orElse.in(1)
FlowShape(orElse.in(0), orElse.out)
}
/**
* Concatenates this [[Flow]] with the given [[Source]] so the first element
* emitted by that source is emitted after the last element of this
@ -2011,6 +2065,31 @@ trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] {
def prependMat[U >: Out, Mat2, Mat3](that: Graph[SourceShape[U], Mat2])(matF: (Mat, Mat2) Mat3): ReprMat[U, Mat3] =
viaMat(prependGraph(that))(matF)
/**
* Provides a secondary source that will be consumed if this stream completes without any
* elements passing by. As soon as the first element comes through this stream, the alternative
* will be cancelled.
*
* Note that this Flow will be materialized together with the [[Source]] and just kept
* from producing elements by asserting back-pressure until its time comes or it gets
* cancelled.
*
* On errors the stage is failed regardless of source of the error.
*
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
* is available from the second stream
*
* '''Backpressures when''' downstream backpressures
*
* '''Completes when''' the primary stream completes after emitting at least one element, when the primary stream completes
* without emitting and the secondary stream already has completed or when the secondary stream completes
*
* '''Cancels when''' downstream cancels and additionally the alternative is cancelled as soon as an element passes
* by from this stream.
*/
def orElseMat[U >: Out, Mat2, Mat3](secondary: Graph[SourceShape[U], Mat2])(matF: (Mat, Mat2) Mat3): ReprMat[U, Mat3] =
viaMat(orElseGraph(secondary))(matF)
/**
* Attaches the given [[Sink]] to this [[Flow]], meaning that elements that passes
* through will also be sent to the [[Sink]].

View file

@ -11,7 +11,8 @@ import akka.stream.impl.fusing.GraphStages.MaterializedValueSource
import akka.stream.impl.Stages.DefaultAttributes
import akka.stream.impl.StreamLayout._
import akka.stream.scaladsl.Partition.PartitionOutOfBoundsException
import akka.stream.stage.{ OutHandler, InHandler, GraphStageLogic, GraphStage }
import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler }
import scala.annotation.unchecked.uncheckedVariance
import scala.annotation.tailrec
import scala.collection.immutable
@ -266,7 +267,7 @@ final class Interleave[T](val inputPorts: Int, val segmentSize: Int, val eagerCl
val out: Outlet[T] = Outlet[T]("Interleave.out")
override val shape: UniformFanInShape[T, T] = UniformFanInShape(out, in: _*)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler {
private var counter = 0
private var currentUpstreamIndex = 0
private var runningUpstreams = inputPorts
@ -315,9 +316,10 @@ final class Interleave[T](val inputPorts: Int, val segmentSize: Int, val eagerCl
})
}
setHandler(out, new OutHandler {
override def onPull(): Unit = if (!hasBeenPulled(currentUpstream)) tryPull(currentUpstream)
})
def onPull(): Unit =
if (!hasBeenPulled(currentUpstream)) tryPull(currentUpstream)
setHandler(out, this)
}
override def toString = "Interleave"
@ -405,30 +407,30 @@ final class Broadcast[T](val outputPorts: Int, val eagerCancel: Boolean) extends
override def initialAttributes = DefaultAttributes.broadcast
override val shape: UniformFanOutShape[T, T] = UniformFanOutShape(in, out: _*)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler {
private var pendingCount = outputPorts
private val pending = Array.fill[Boolean](outputPorts)(true)
private var downstreamsRunning = outputPorts
setHandler(in, new InHandler {
override def onPush(): Unit = {
pendingCount = downstreamsRunning
val elem = grab(in)
def onPush(): Unit = {
pendingCount = downstreamsRunning
val elem = grab(in)
var idx = 0
val itr = out.iterator
var idx = 0
val itr = out.iterator
while (itr.hasNext) {
val o = itr.next()
val i = idx
if (!isClosed(o)) {
push(o, elem)
pending(i) = true
}
idx += 1
while (itr.hasNext) {
val o = itr.next()
val i = idx
if (!isClosed(o)) {
push(o, elem)
pending(i) = true
}
idx += 1
}
})
}
setHandler(in, this)
private def tryPull(): Unit =
if (pendingCount == 0 && !hasBeenPulled(in)) pull(in)
@ -502,36 +504,35 @@ final class Partition[T](val outputPorts: Int, val partitioner: T ⇒ Int) exten
val out: Seq[Outlet[T]] = Seq.tabulate(outputPorts)(i Outlet[T]("Partition.out" + i))
override val shape: UniformFanOutShape[T, T] = UniformFanOutShape[T, T](in, out: _*)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler {
private var outPendingElem: Any = null
private var outPendingIdx: Int = _
private var downstreamRunning = outputPorts
setHandler(in, new InHandler {
override def onPush() = {
val elem = grab(in)
val idx = partitioner(elem)
if (idx < 0 || idx >= outputPorts)
failStage(PartitionOutOfBoundsException(s"partitioner must return an index in the range [0,${outputPorts - 1}]. returned: [$idx] for input [${elem.getClass.getName}]."))
else if (!isClosed(out(idx))) {
if (isAvailable(out(idx))) {
push(out(idx), elem)
if (out.exists(isAvailable(_)))
pull(in)
} else {
outPendingElem = elem
outPendingIdx = idx
}
def onPush() = {
val elem = grab(in)
val idx = partitioner(elem)
if (idx < 0 || idx >= outputPorts) {
failStage(PartitionOutOfBoundsException(s"partitioner must return an index in the range [0,${outputPorts - 1}]. returned: [$idx] for input [${elem.getClass.getName}]."))
} else if (!isClosed(out(idx))) {
if (isAvailable(out(idx))) {
push(out(idx), elem)
if (out.exists(isAvailable(_)))
pull(in)
} else {
outPendingElem = elem
outPendingIdx = idx
}
} else if (out.exists(isAvailable(_)))
pull(in)
}
} else if (out.exists(isAvailable(_)))
pull(in)
}
override def onUpstreamFinish(): Unit = {
if (outPendingElem == null)
completeStage()
}
})
override def onUpstreamFinish(): Unit = {
if (outPendingElem == null) completeStage()
}
setHandler(in, this)
out.zipWithIndex.foreach {
case (o, idx)
@ -610,7 +611,7 @@ final class Balance[T](val outputPorts: Int, val waitForAllDownstreams: Boolean)
override def initialAttributes = DefaultAttributes.balance
override val shape: UniformFanOutShape[T, T] = UniformFanOutShape[T, T](in, out: _*)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler {
private val pendingQueue = FixedSizeBuffer[Outlet[T]](outputPorts)
private def noPending: Boolean = pendingQueue.isEmpty
@ -633,9 +634,8 @@ final class Balance[T](val outputPorts: Int, val waitForAllDownstreams: Boolean)
}
}
setHandler(in, new InHandler {
override def onPush(): Unit = dequeueAndDispatch()
})
def onPush(): Unit = dequeueAndDispatch()
setHandler(in, this)
out.foreach { o
setHandler(o, new OutHandler {
@ -802,7 +802,7 @@ class ZipWithN[A, O](zipper: immutable.Seq[A] ⇒ O)(n: Int) extends GraphStage[
def out = shape.out
val inSeq = shape.inSeq
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler {
var pending = 0
// Without this field the completion signalling would take one extra pull
var willShutDown = false
@ -835,16 +835,15 @@ class ZipWithN[A, O](zipper: immutable.Seq[A] ⇒ O)(n: Int) extends GraphStage[
})
})
setHandler(out, new OutHandler {
override def onPull(): Unit = {
pending += n
if (pending == 0) pushAll()
}
})
def onPull(): Unit = {
pending += n
if (pending == 0) pushAll()
}
setHandler(out, this)
}
override def toString = "ZipWithN"
}
object Concat {
@ -877,7 +876,7 @@ final class Concat[T](val inputPorts: Int) extends GraphStage[UniformFanInShape[
override def initialAttributes = DefaultAttributes.concat
override val shape: UniformFanInShape[T, T] = UniformFanInShape(out, in: _*)
override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) {
override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) with OutHandler {
var activeStream: Int = 0
{
@ -905,14 +904,92 @@ final class Concat[T](val inputPorts: Int) extends GraphStage[UniformFanInShape[
}
}
setHandler(out, new OutHandler {
override def onPull() = pull(in(activeStream))
})
def onPull() = pull(in(activeStream))
setHandler(out, this)
}
override def toString: String = s"Concat($inputPorts)"
}
object OrElse {
private val singleton = new OrElse[Nothing]
def apply[T]() = singleton.asInstanceOf[OrElse[T]]
}
/**
* Takes two streams and passes the first through, the secondary stream is only passed
* through if the primary stream completes without passing any elements through. When
* the first element is passed through from the primary the secondary is cancelled.
* Both incoming streams are materialized when the stage is materialized.
*
* On errors the stage is failed regardless of source of the error.
*
* '''Emits when''' element is available from primary stream or the primary stream closed without emitting any elements and an element
* is available from the secondary stream
*
* '''Backpressures when''' downstream backpressures
*
* '''Completes when''' the primary stream completes after emitting at least one element, when the primary stream completes
* without emitting and the secondary stream already has completed or when the secondary stream completes
*
* '''Cancels when''' downstream cancels
*/
private[stream] final class OrElse[T] extends GraphStage[UniformFanInShape[T, T]] {
val primary = Inlet[T]("OrElse.primary")
val secondary = Inlet[T]("OrElse.secondary")
val out = Outlet[T]("OrElse.out")
override val shape: UniformFanInShape[T, T] = UniformFanInShape(out, primary, secondary)
override protected def initialAttributes: Attributes = DefaultAttributes.orElse
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler with InHandler {
private[this] var currentIn = primary
private[this] var primaryPushed = false
override def onPull(): Unit = {
pull(currentIn)
}
// for the primary inHandler
override def onPush(): Unit = {
if (!primaryPushed) {
primaryPushed = true
cancel(secondary)
}
val elem = grab(primary)
push(out, elem)
}
// for the primary inHandler
override def onUpstreamFinish(): Unit = {
if (!primaryPushed && !isClosed(secondary)) {
currentIn = secondary
if (isAvailable(out)) pull(secondary)
} else {
completeStage()
}
}
setHandler(secondary, new InHandler {
override def onPush(): Unit = {
push(out, grab(secondary))
}
override def onUpstreamFinish(): Unit = {
if (isClosed(primary)) completeStage()
}
})
setHandlers(primary, out, this)
}
override def toString: String = s"OrElse"
}
object GraphDSL extends GraphApply {
class Builder[+M] private[stream] () {

View file

@ -0,0 +1,676 @@
/**
* Copyright (C) 2015-2016 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.scaladsl
import java.util.concurrent.atomic.{ AtomicLong, AtomicReference }
import akka.NotUsed
import akka.dispatch.AbstractNodeQueue
import akka.stream._
import akka.stream.stage._
import scala.annotation.tailrec
import scala.concurrent.{ Future, Promise }
import scala.util.{ Failure, Success, Try }
/**
* A MergeHub is a special streaming hub that is able to collect streamed elements from a dynamic set of
* producers. It consists of two parts, a [[Source]] and a [[Sink]]. The [[Source]] streams the element to a consumer from
* its merged inputs. Once the consumer has been materialized, the [[Source]] returns a materialized value which is
* the corresponding [[Sink]]. This [[Sink]] can then be materialized arbitrary many times, where each of the new
* materializations will feed its consumed elements to the original [[Source]].
*/
object MergeHub {
private val Cancel = -1
/**
* Creates a [[Source]] that emits elements merged from a dynamic set of producers. After the [[Source]] returned
* by this method is materialized, it returns a [[Sink]] as a materialized value. This [[Sink]] can be materialized
* arbitrary many times and each of the materializations will feed the elements into the original [[Source]].
*
* Every new materialization of the [[Source]] results in a new, independent hub, which materializes to its own
* [[Sink]] for feeding that materialization.
*
* If one of the inputs fails the [[Sink]], the [[Source]] is failed in turn (possibly jumping over already buffered
* elements). Completed [[Sink]]s are simply removed. Once the [[Source]] is cancelled, the Hub is considered closed
* and any new producers using the [[Sink]] will be cancelled.
*
* @param perProducerBufferSize Buffer space used per producer. Default value is 16.
*/
def source[T](perProducerBufferSize: Int): Source[T, Sink[T, NotUsed]] =
Source.fromGraph(new MergeHub[T](perProducerBufferSize))
/**
* Creates a [[Source]] that emits elements merged from a dynamic set of producers. After the [[Source]] returned
* by this method is materialized, it returns a [[Sink]] as a materialized value. This [[Sink]] can be materialized
* arbitrary many times and each of the materializations will feed the elements into the original [[Source]].
*
* Every new materialization of the [[Source]] results in a new, independent hub, which materializes to its own
* [[Sink]] for feeding that materialization.
*
* If one of the inputs fails the [[Sink]], the [[Source]] is failed in turn (possibly jumping over already buffered
* elements). Completed [[Sink]]s are simply removed. Once the [[Source]] is cancelled, the Hub is considered closed
* and any new producers using the [[Sink]] will be cancelled.
*/
def source[T]: Source[T, Sink[T, NotUsed]] = source(perProducerBufferSize = 16)
final class ProducerFailed(msg: String, cause: Throwable) extends RuntimeException(msg, cause)
}
/**
* INTERNAL API
*/
private[akka] class MergeHub[T](perProducerBufferSize: Int) extends GraphStageWithMaterializedValue[SourceShape[T], Sink[T, NotUsed]] {
require(perProducerBufferSize > 0, "Buffer size must be positive")
val out: Outlet[T] = Outlet("MergeHub.out")
override val shape: SourceShape[T] = SourceShape(out)
// Half of buffer size, rounded up
private[this] val DemandThreshold = (perProducerBufferSize / 2) + (perProducerBufferSize % 2)
private sealed trait Event {
def id: Long
}
private final case class Element(id: Long, elem: T) extends Event
private final case class Register(id: Long, demandCallback: AsyncCallback[Long]) extends Event
private final case class Deregister(id: Long) extends Event
final class InputState(signalDemand: AsyncCallback[Long]) {
private var untilNextDemandSignal = DemandThreshold
def onElement(): Unit = {
untilNextDemandSignal -= 1
if (untilNextDemandSignal == 0) {
untilNextDemandSignal = DemandThreshold
signalDemand.invoke(DemandThreshold)
}
}
def close(): Unit = signalDemand.invoke(MergeHub.Cancel)
}
final class MergedSourceLogic(_shape: Shape, producerCount: AtomicLong) extends GraphStageLogic(_shape) with OutHandler {
/*
* Basically all merged messages are shared in this queue. Individual buffer sizes are enforced by tracking
* demand per producer in the 'demands' Map. One twist here is that the same queue contains control messages,
* too. Since the queue is read only if the output port has been pulled, downstream backpressure can delay
* processing of control messages. This causes no issues though, see the explanation in 'tryProcessNext'.
*/
private val queue = new AbstractNodeQueue[Event] {}
@volatile private[this] var needWakeup = false
@volatile private[this] var shuttingDown = false
private[this] val demands = scala.collection.mutable.LongMap.empty[InputState]
private[this] val wakeupCallback = getAsyncCallback[NotUsed]((_)
// We are only allowed to dequeue if we are not backpressured. See comment in tryProcessNext() for details.
if (isAvailable(out)) tryProcessNext(firstAttempt = true)
)
setHandler(out, this)
// Returns true when we have not consumed demand, false otherwise
private def onEvent(ev: Event): Boolean = ev match {
case Element(id, elem)
demands(id).onElement()
push(out, elem)
false
case Register(id, callback)
demands.put(id, new InputState(callback))
true
case Deregister(id)
demands.remove(id)
true
}
override def onPull(): Unit = tryProcessNext(firstAttempt = true)
@tailrec private def tryProcessNext(firstAttempt: Boolean): Unit = {
val nextElem = queue.poll()
// That we dequeue elements from the queue when there is demand means that Register and Deregister messages
// might be delayed for arbitrary long. This is not a problem as Register is only interesting if it is followed
// by actual elements, which would be delayed anyway by the backpressure.
// Unregister is only used to keep the map growing too large, but otherwise it is not critical to process it
// timely. In fact, the only way the map could keep growing would mean that we dequeue Registers from the
// queue, but then we will eventually reach the Deregister message, too.
if (nextElem ne null) {
needWakeup = false
if (onEvent(nextElem)) tryProcessNext(firstAttempt = true)
} else {
needWakeup = true
// additional poll() to grab any elements that might missed the needWakeup
// and have been enqueued just after it
if (firstAttempt)
tryProcessNext(firstAttempt = false)
}
}
def isShuttingDown: Boolean = shuttingDown
// External API
def enqueue(ev: Event): Unit = {
queue.add(ev)
/*
* Simple volatile var is enough, there is no need for a CAS here. The first important thing to note
* that we don't care about double-wakeups. Since the "wakeup" is actually handled by an actor message
* (AsyncCallback) we don't need to handle this case, a double-wakeup will be idempotent (only wasting some cycles).
*
* The only case that we care about is a missed wakeup. The characteristics of a missed wakeup are the following:
* (1) there is at least one message in the queue
* (2) the consumer is not running right now
* (3) no wakeupCallbacks are pending
* (4) all producers exited this method
*
* From the above we can deduce that
* (5) needWakeup = true at some point in time. This is implied by (1) and (2) and the
* 'tryProcessNext' method
* (6) There must have been one producer that observed needWakeup = false. This follows from (4) and (3)
* and the implementation of this method. In addition, this producer arrived after needWakeup = true,
* since before that, every queued elements have been consumed.
* (7) There have been at least one producer that observed needWakeup = true and enqueued an element and
* a wakeup signal. This follows from (5) and (6), and the fact that either this method sets
* needWakeup = false, or the 'tryProcessNext' method, i.e. a wakeup must happened since (5)
* (8) If there were multiple producers satisfying (6) take the last one. Due to (6), (3) and (4) we know
* there cannot be a wakeup pending, and we just enqueued an element, so (1) holds. Since we are the last
* one, (2) must be true or there is no lost wakeup. However, due to (7) we know there was at least one
* wakeup (otherwise needWakeup = true). Now, if the consumer is still running (2) is violated,
* if not running then needWakeup = false is violated (which comes from (6)). No matter what,
* contradiction. QED.
*
*/
if (needWakeup) {
needWakeup = false
wakeupCallback.invoke(NotUsed)
}
}
override def postStop(): Unit = {
// First announce that we are shutting down. This will notify late-comers to not even put anything in the queue
shuttingDown = true
// Anybody that missed the announcement needs to be notified.
var event = queue.poll()
while (event ne null) {
event match {
case Register(_, demandCallback) demandCallback.invoke(MergeHub.Cancel)
case _
}
event = queue.poll()
}
// Kill everyone else
val states = demands.valuesIterator
while (states.hasNext) {
states.next().close()
}
}
}
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Sink[T, NotUsed]) = {
val idCounter = new AtomicLong()
val logic: MergedSourceLogic = new MergedSourceLogic(shape, idCounter)
val sink = new GraphStage[SinkShape[T]] {
val in: Inlet[T] = Inlet("MergeHub.in")
override val shape: SinkShape[T] = SinkShape(in)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler {
// Start from non-zero demand to avoid initial delays.
// The HUB will expect this behavior.
private[this] var demand: Long = perProducerBufferSize
private[this] val id = idCounter.getAndIncrement()
override def preStart(): Unit = {
if (!logic.isShuttingDown) {
logic.enqueue(Register(id, getAsyncCallback(onDemand)))
// At this point, we could be in the unfortunate situation that:
// - we missed the shutdown announcement and entered this arm of the if statement
// - *before* we enqueued our Register event, the Hub already finished looking at the queue
// and is now dead, so we are never notified again.
// To safeguard against this, we MUST check the announcement again. This is enough:
// if the Hub is no longer looking at the queue, then it must be that isShuttingDown must be already true.
if (!logic.isShuttingDown) pullWithDemand()
else completeStage()
} else {
completeStage()
}
}
override def postStop(): Unit = {
// Unlike in the case of preStart, we don't care about the Hub no longer looking at the queue.
if (!logic.isShuttingDown) logic.enqueue(Deregister(id))
}
override def onPush(): Unit = {
logic.enqueue(Element(id, grab(in)))
if (demand > 0) pullWithDemand()
}
private def pullWithDemand(): Unit = {
demand -= 1
pull(in)
}
// Make some noise
override def onUpstreamFailure(ex: Throwable): Unit = {
throw new MergeHub.ProducerFailed("Upstream producer failed with exception, " +
"removing from MergeHub now", ex)
}
private def onDemand(moreDemand: Long): Unit = {
if (moreDemand == MergeHub.Cancel) completeStage()
else {
demand += moreDemand
if (!hasBeenPulled(in)) pullWithDemand()
}
}
setHandler(in, this)
}
}
(logic, Sink.fromGraph(sink))
}
}
/**
* A BroadcastHub is a special streaming hub that is able to broadcast streamed elements to a dynamic set of consumers.
* It consissts of two parts, a [[Sink]] and a [[Source]]. The [[Sink]] broadcasts elements from a producer to the
* actually live consumers it has. Once the producer has been materialized, the [[Sink]] it feeds into returns a
* materialized value which is the corresponding [[Source]]. This [[Source]] can be materialized arbitrary many times,
* where weach of the new materializations will receive their elements from the original [[Sink]].
*/
object BroadcastHub {
/**
* Creates a [[Sink]] that receives elements from its upstream producer and broadcasts them to a dynamic set
* of consumers. After the [[Sink]] returned by this method is materialized, it returns a [[Source]] as materialized
* value. This [[Source]] can be materialized arbitrary many times and each materialization will receive the
* broadcast elements form the ofiginal [[Sink]].
*
* Every new materialization of the [[Sink]] results in a new, independent hub, which materializes to its own
* [[Source]] for consuming the [[Sink]] of that materialization.
*
* If the original [[Sink]] is failed, then the failure is immediately propagated to all of its materialized
* [[Source]]s (possibly jumping over already buffered elements). If the original [[Sink]] is completed, then
* all corresponding [[Source]]s are completed. Both failure and normal completion is "remembered" and later
* materializations of the [[Source]] will see the same (failure or completion) state. [[Source]]s that are
* cancelled are simply removed from the dynamic set of consumers.
*
* @param bufferSize Buffer size used by the producer. Gives an upper bound on how "far" from each other two
* concurrent consumers can be in terms of element. If this buffer is full, the producer
* is backpressured. Must be a power of two and less than 4096.
*/
def sink[T](bufferSize: Int): Sink[T, Source[T, NotUsed]] = Sink.fromGraph(new BroadcastHub[T](bufferSize))
/**
* Creates a [[Sink]] that receives elements from its upstream producer and broadcasts them to a dynamic set
* of consumers. After the [[Sink]] returned by this method is materialized, it returns a [[Source]] as materialized
* value. This [[Source]] can be materialized arbitrary many times and each materialization will receive the
* broadcast elements form the ofiginal [[Sink]].
*
* Every new materialization of the [[Sink]] results in a new, independent hub, which materializes to its own
* [[Source]] for consuming the [[Sink]] of that materialization.
*
* If the original [[Sink]] is failed, then the failure is immediately propagated to all of its materialized
* [[Source]]s (possibly jumping over already buffered elements). If the original [[Sink]] is completed, then
* all corresponding [[Source]]s are completed. Both failure and normal completion is "remembered" and later
* materializations of the [[Source]] will see the same (failure or completion) state. [[Source]]s that are
* cancelled are simply removed from the dynamic set of consumers.
*
*/
def sink[T]: Sink[T, Source[T, NotUsed]] = sink(bufferSize = 256)
}
/**
* INTERNAL API
*/
private[akka] class BroadcastHub[T](bufferSize: Int) extends GraphStageWithMaterializedValue[SinkShape[T], Source[T, NotUsed]] {
require(bufferSize > 0, "Buffer size must be positive")
require(bufferSize < 4096, "Buffer size larger then 4095 is not allowed")
require((bufferSize & bufferSize - 1) == 0, "Buffer size must be a power of two")
private val Mask = bufferSize - 1
private val WheelMask = (bufferSize * 2) - 1
val in: Inlet[T] = Inlet("BroadcastHub.in")
override val shape: SinkShape[T] = SinkShape(in)
// Half of buffer size, rounded up
private[this] val DemandThreshold = (bufferSize / 2) + (bufferSize % 2)
private sealed trait HubEvent
private object RegistrationPending extends HubEvent
private final case class UnRegister(id: Long, previousOffset: Int, finalOffset: Int) extends HubEvent
private final case class Advance(id: Long, previousOffset: Int) extends HubEvent
private final case class NeedWakeup(id: Long, previousOffset: Int, currentOffset: Int) extends HubEvent
private final case class Consumer(id: Long, callback: AsyncCallback[ConsumerEvent])
private object Completed
private sealed trait HubState
private case class Open(callbackFuture: Future[AsyncCallback[HubEvent]], registrations: List[Consumer]) extends HubState
private case class Closed(failure: Option[Throwable]) extends HubState
private class BroadcastSinkLogic(_shape: Shape)
extends GraphStageLogic(_shape) with InHandler {
private[this] val callbackPromise: Promise[AsyncCallback[HubEvent]] = Promise()
private[this] val noRegistrationsState = Open(callbackPromise.future, Nil)
val state = new AtomicReference[HubState](noRegistrationsState)
// Start from values that will almost immediately overflow. This has no effect on performance, any starting
// number will do, however, this protects from regressions as these values *almost surely* overflow and fail
// tests if someone makes a mistake.
@volatile private[this] var tail = Int.MaxValue
private[this] var head = Int.MaxValue
/*
* An Array with a published tail ("latest message") and a privately maintained head ("earliest buffered message").
* Elements are published by simply putting them into the array and bumping the tail. If necessary, certain
* consumers are sent a wakeup message through an AsyncCallback.
*/
private[this] val queue = Array.ofDim[AnyRef](bufferSize)
/* This is basically a classic Bucket Queue: https://en.wikipedia.org/wiki/Bucket_queue
* (in fact, this is the variant described in the Optimizations section, where the given set
* of priorities always fall to a range
*
* This wheel tracks the position of Consumers relative to the slowest ones. Every slot
* contains a list of Consumers being known at that location (this might be out of date!).
* Consumers from time to time send Advance messages to indicate that they have progressed
* by reading from the broadcast queue. Consumers that are blocked (due to reaching tail) request
* a wakeup and update their position at the same time.
*
*/
private[this] val consumerWheel = Array.fill[List[Consumer]](bufferSize * 2)(Nil)
private[this] var activeConsumers = 0
override def preStart(): Unit = {
setKeepGoing(true)
callbackPromise.success(getAsyncCallback[HubEvent](onEvent))
pull(in)
}
// Cannot complete immediately if there is no space in the queue to put the completion marker
override def onUpstreamFinish(): Unit = if (!isFull) complete()
override def onPush(): Unit = {
publish(grab(in))
if (!isFull) pull(in)
}
private def onEvent(ev: HubEvent): Unit = {
ev match {
case RegistrationPending
state.getAndSet(noRegistrationsState).asInstanceOf[Open].registrations foreach { consumer
val startFrom = head
activeConsumers += 1
addConsumer(consumer, startFrom)
consumer.callback.invoke(Initialize(startFrom))
}
case UnRegister(id, previousOffset, finalOffset)
activeConsumers -= 1
val consumer = findAndRemoveConsumer(id, previousOffset)
if (activeConsumers == 0) {
if (isClosed(in)) completeStage()
else if (head != finalOffset) {
// If our final consumer goes away, we roll forward the buffer so a subsequent consumer does not
// see the already consumed elements. This feature is quite handy.
while (head != finalOffset) {
queue(head & Mask) = null
head += 1
}
head = finalOffset
if (!hasBeenPulled(in)) pull(in)
}
} else checkUnblock(previousOffset)
case Advance(id, previousOffset)
val newOffset = previousOffset + DemandThreshold
// Move the consumer from its last known offest to its new one. Check if we are unblocked.
val consumer = findAndRemoveConsumer(id, previousOffset)
addConsumer(consumer, newOffset)
checkUnblock(previousOffset)
case NeedWakeup(id, previousOffset, currentOffset)
// Move the consumer from its last known offest to its new one. Check if we are unblocked.
val consumer = findAndRemoveConsumer(id, previousOffset)
addConsumer(consumer, currentOffset)
// Also check if the consumer is now unblocked since we published an element since it went asleep.
if (currentOffset != tail) consumer.callback.invoke(Wakeup)
checkUnblock(previousOffset)
}
}
// Producer API
// We are full if the distance between the slowest (known) consumer and the fastest (known) consumer is
// the buffer size. We must wait until the slowest either advances, or cancels.
private def isFull: Boolean = tail - head == bufferSize
override def onUpstreamFailure(ex: Throwable): Unit = {
val failMessage = HubCompleted(Some(ex))
// Notify pending consumers and set tombstone
state.getAndSet(Closed(Some(ex))).asInstanceOf[Open].registrations foreach { consumer
consumer.callback.invoke(failMessage)
}
// Notify registered consumers
consumerWheel.iterator.flatMap(_.iterator) foreach { consumer
consumer.callback.invoke(failMessage)
}
failStage(ex)
}
/*
* This method removes a consumer with a given ID from the known offset and returns it.
*
* NB: You cannot remove a consumer without knowing its last offset! Consumers on the Source side always must
* track this so this can be a fast operation.
*/
private def findAndRemoveConsumer(id: Long, offset: Int): Consumer = {
// TODO: Try to eliminate modulo division somehow...
val wheelSlot = offset & WheelMask
var consumersInSlot = consumerWheel(wheelSlot)
//debug(s"consumers before removal $consumersInSlot")
var remainingConsumersInSlot: List[Consumer] = Nil
var removedConsumer: Consumer = null
while (consumersInSlot.nonEmpty) {
val consumer = consumersInSlot.head
if (consumer.id != id) remainingConsumersInSlot = consumer :: remainingConsumersInSlot
else removedConsumer = consumer
consumersInSlot = consumersInSlot.tail
}
consumerWheel(wheelSlot) = remainingConsumersInSlot
removedConsumer
}
/*
* After removing a Consumer from a wheel slot (because it cancelled, or we moved it because it advanced)
* we need to check if it was blocking us from advancing (being the slowest).
*/
private def checkUnblock(offsetOfConsumerRemoved: Int): Unit = {
if (unblockIfPossible(offsetOfConsumerRemoved)) {
if (isClosed(in)) complete()
else if (!hasBeenPulled(in)) pull(in)
}
}
private def unblockIfPossible(offsetOfConsumerRemoved: Int): Boolean = {
var unblocked = false
if (offsetOfConsumerRemoved == head) {
// Try to advance along the wheel. We can skip any wheel slots which have no waiting Consumers, until
// we either find a nonempty one, or we reached the end of the buffer.
while (consumerWheel(head & WheelMask).isEmpty && head != tail) {
queue(head & Mask) = null
head += 1
unblocked = true
}
}
unblocked
}
private def addConsumer(consumer: Consumer, offset: Int): Unit = {
val slot = offset & WheelMask
consumerWheel(slot) = consumer :: consumerWheel(slot)
}
/*
* Send a wakeup signal to all the Consumers at a certain wheel index. Note, this needs the actual index,
* which is offset modulo (bufferSize + 1).
*/
private def wakeupIdx(idx: Int): Unit = {
val itr = consumerWheel(idx).iterator
while (itr.hasNext) itr.next().callback.invoke(Wakeup)
}
private def complete(): Unit = {
val idx = tail & Mask
val wheelSlot = tail & WheelMask
queue(idx) = Completed
wakeupIdx(wheelSlot)
tail = tail + 1
if (activeConsumers == 0) {
val completedMessage = HubCompleted(None)
// Notify pending consumers and set tombstone
state.getAndSet(Closed(None)).asInstanceOf[Open].registrations foreach { consumer
consumer.callback.invoke(completedMessage)
}
// Existing consumers have already consumed all elements and will see completion status in the queue
completeStage()
}
}
private def publish(elem: T): Unit = {
val idx = tail & Mask
val wheelSlot = tail & WheelMask
queue(idx) = elem.asInstanceOf[AnyRef]
// Publish the new tail before calling the wakeup
tail = tail + 1
wakeupIdx(wheelSlot)
}
// Consumer API
def poll(offset: Int): AnyRef = {
if (offset == tail) null
else queue(offset & Mask)
}
setHandler(in, this)
}
private sealed trait ConsumerEvent
private object Wakeup extends ConsumerEvent
private final case class HubCompleted(failure: Option[Throwable]) extends ConsumerEvent
private final case class Initialize(offset: Int) extends ConsumerEvent
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Source[T, NotUsed]) = {
val idCounter = new AtomicLong()
val logic = new BroadcastSinkLogic(shape)
val source = new GraphStage[SourceShape[T]] {
val out: Outlet[T] = Outlet("BroadcastHub.out")
override val shape: SourceShape[T] = SourceShape(out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler {
private[this] var untilNextAdvanceSignal = DemandThreshold
private[this] val id = idCounter.getAndIncrement()
private[this] var initialized = false
private[this] var hubCallback: AsyncCallback[HubEvent] = _
/*
* We need to track our last offset that we published to the Hub. The reason is, that for efficiency reasons,
* the Hub can only look up and move/remove Consumers with known wheel slots. This means that no extra hash-map
* is needed, but it also means that we need to keep track of both our current offset, and the last one that
* we published.
*/
private[this] var previousPublishedOffset = 0
private[this] var offset = 0
override def preStart(): Unit = {
val callback = getAsyncCallback(onCommand)
val onHubReady: Try[AsyncCallback[HubEvent]] Unit = {
case Success(callback)
hubCallback = callback
callback.invoke(RegistrationPending)
case Failure(ex)
failStage(ex)
}
@tailrec def register(): Unit = {
logic.state.get() match {
case Closed(Some(ex)) failStage(ex)
case Closed(None) completeStage()
case previousState @ Open(callbackFuture, registrations)
val newRegistrations = Consumer(id, callback) :: registrations
if (logic.state.compareAndSet(previousState, Open(callbackFuture, newRegistrations))) {
callbackFuture.onComplete(getAsyncCallback(onHubReady).invoke)(materializer.executionContext)
} else register()
}
}
register()
}
override def onPull(): Unit = {
if (initialized) {
val elem = logic.poll(offset)
elem match {
case null
hubCallback.invoke(NeedWakeup(id, previousPublishedOffset, offset))
previousPublishedOffset = offset
untilNextAdvanceSignal = DemandThreshold
case Completed
completeStage()
case _
push(out, elem.asInstanceOf[T])
offset += 1
untilNextAdvanceSignal -= 1
if (untilNextAdvanceSignal == 0) {
untilNextAdvanceSignal = DemandThreshold
val previousOffset = previousPublishedOffset
previousPublishedOffset += DemandThreshold
hubCallback.invoke(Advance(id, previousOffset))
}
}
}
}
override def postStop(): Unit = {
if (hubCallback ne null)
hubCallback.invoke(UnRegister(id, previousPublishedOffset, offset))
}
private def onCommand(cmd: ConsumerEvent): Unit = cmd match {
case HubCompleted(Some(ex)) failStage(ex)
case HubCompleted(None) completeStage()
case Wakeup
if (isAvailable(out)) onPull()
case Initialize(initialOffset)
initialized = true
previousPublishedOffset = initialOffset
offset = initialOffset
if (isAvailable(out)) onPull()
}
setHandler(out, this)
}
}
(logic, Source.fromGraph(source))
}
}

View file

@ -3,6 +3,8 @@
*/
package akka.stream.scaladsl
import scala.concurrent.Future
import akka.NotUsed
/**

View file

@ -3,23 +3,20 @@
*/
package akka.stream.scaladsl
import java.util.{ Spliterators, Spliterator }
import java.util.stream.StreamSupport
import akka.{ Done, NotUsed }
import akka.dispatch.ExecutionContexts
import akka.actor.{ Status, ActorRef, Props }
import akka.actor.{ ActorRef, Props, Status }
import akka.stream.actor.ActorSubscriber
import akka.stream.impl.Stages.DefaultAttributes
import akka.stream.impl.StreamLayout.Module
import akka.stream.impl._
import akka.stream.stage.{ Context, PushStage, SyncDirective, TerminationDirective }
import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler, InHandler }
import akka.stream.{ javadsl, _ }
import org.reactivestreams.{ Publisher, Subscriber }
import scala.annotation.tailrec
import scala.collection.immutable
import scala.concurrent.duration.Duration.Inf
import scala.concurrent.{ Await, ExecutionContext, Future }
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.{ Failure, Success, Try }
/**
@ -244,10 +241,23 @@ object Sink {
* The returned [[scala.concurrent.Future]] will be completed with value of the final
* function evaluation when the input stream ends, or completed with `Failure`
* if there is a failure signaled in the stream.
*
* @see [[#foldAsync]]
*/
def fold[U, T](zero: U)(f: (U, T) U): Sink[T, Future[U]] =
Flow[T].fold(zero)(f).toMat(Sink.head)(Keep.right).named("foldSink")
/**
* A `Sink` that will invoke the given asynchronous function for every received element, giving it its previous
* output (or the given `zero` value) and the element as input.
* The returned [[scala.concurrent.Future]] will be completed with value of the final
* function evaluation when the input stream ends, or completed with `Failure`
* if there is a failure signaled in the stream.
*
* @see [[#fold]]
*/
def foldAsync[U, T](zero: U)(f: (U, T) Future[U]): Sink[T, Future[U]] = Flow[T].foldAsync(zero)(f).toMat(Sink.head)(Keep.right).named("foldAsyncSink")
/**
* A `Sink` that will invoke the given function for every received element, giving it its previous
* output (from the second element) and the element as input.
@ -270,23 +280,35 @@ object Sink {
*/
def onComplete[T](callback: Try[Done] Unit): Sink[T, NotUsed] = {
def newOnCompleteStage(): PushStage[T, NotUsed] = {
new PushStage[T, NotUsed] {
override def onPush(elem: T, ctx: Context[NotUsed]): SyncDirective = ctx.pull()
def newOnCompleteStage(): GraphStage[FlowShape[T, NotUsed]] = {
new GraphStage[FlowShape[T, NotUsed]] {
override def onUpstreamFailure(cause: Throwable, ctx: Context[NotUsed]): TerminationDirective = {
callback(Failure(cause))
ctx.fail(cause)
}
val in = Inlet[T]("in")
val out = Outlet[NotUsed]("out")
override val shape = FlowShape.of(in, out)
override def onUpstreamFinish(ctx: Context[NotUsed]): TerminationDirective = {
callback(Success(Done))
ctx.finish()
}
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
new GraphStageLogic(shape) with InHandler with OutHandler {
override def onPush(): Unit = pull(in)
override def onPull(): Unit = pull(in)
override def onUpstreamFailure(cause: Throwable): Unit = {
callback(Failure(cause))
failStage(cause)
}
override def onUpstreamFinish(): Unit = {
callback(Success(Done))
completeStage()
}
setHandlers(in, out, this)
}
}
}
Flow[T].transform(newOnCompleteStage).to(Sink.ignore).named("onCompleteSink")
Flow[T].via(newOnCompleteStage()).to(Sink.ignore).named("onCompleteSink")
}
/**

View file

@ -16,7 +16,7 @@ import org.reactivestreams.{ Publisher, Subscriber }
import scala.annotation.tailrec
import scala.annotation.unchecked.uncheckedVariance
import scala.collection.immutable
import scala.concurrent.duration.{ FiniteDuration }
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ Future, Promise }
import java.util.concurrent.CompletionStage
import scala.compat.java8.FutureConverters._
@ -88,8 +88,17 @@ final class Source[+Out, +Mat](override val module: Module)
* function evaluation when the input stream ends, or completed with `Failure`
* if there is a failure signaled in the stream.
*/
def runFold[U](zero: U)(f: (U, Out) U)(implicit materializer: Materializer): Future[U] =
runWith(Sink.fold(zero)(f))
def runFold[U](zero: U)(f: (U, Out) U)(implicit materializer: Materializer): Future[U] = runWith(Sink.fold(zero)(f))
/**
* Shortcut for running this `Source` with a foldAsync function.
* The given function is invoked for every received element, giving it its previous
* output (or the given `zero` value) and the element as input.
* The returned [[scala.concurrent.Future]] will be completed with value of the final
* function evaluation when the input stream ends, or completed with `Failure`
* if there is a failure signaled in the stream.
*/
def runFoldAsync[U](zero: U)(f: (U, Out) Future[U])(implicit materializer: Materializer): Future[U] = runWith(Sink.foldAsync(zero)(f))
/**
* Shortcut for running this `Source` with a reduce function.

View file

@ -49,7 +49,7 @@ object AbstractStage {
private var currentStage: AbstractStage[In, Out, Directive, Directive, Context[Out], LifecycleContext] = stage
{
// No need to refer to the handle in a private val
// No need to refer to the handler in a private val
val handler = new InHandler with OutHandler {
override def onPush(): Unit =
try { currentStage.onPush(grab(shape.in), ctx) } catch { case NonFatal(ex) onSupervision(ex) }

View file

@ -100,6 +100,9 @@ object Dependencies {
val junitIntf = "com.novocode" % "junit-interface" % "0.11" % "test" // MIT
val scalaXml = "org.scala-lang.modules" %% "scala-xml" % "1.0.4" % "test"
// in-memory filesystem for file related tests
val jimfs = "com.google.jimfs" % "jimfs" % "1.1" % "test" // ApacheV2
// metrics, measurements, perf testing
val metrics = "com.codahale.metrics" % "metrics-core" % "3.0.2" % "test" // ApacheV2
val metricsJvm = "com.codahale.metrics" % "metrics-jvm" % "3.0.2" % "test" // ApacheV2
@ -212,7 +215,7 @@ object Dependencies {
lazy val streamTestkit = l ++= Seq(Test.scalatest.value, Test.scalacheck.value, Test.junit)
lazy val streamTests = l ++= Seq(Test.scalatest.value, Test.scalacheck.value, Test.junit, Test.commonsIo)
lazy val streamTests = l ++= Seq(Test.scalatest.value, Test.scalacheck.value, Test.junit, Test.commonsIo, Test.jimfs)
lazy val streamTestsTck = l ++= Seq(Test.scalatest.value, Test.scalacheck.value, Test.junit, Test.reactiveStreamsTck)

View file

@ -865,6 +865,9 @@ object MiMa extends AutoPlugin {
// internal api
FilterAnyProblemStartingWith("akka.stream.impl"),
// #20888 new FoldAsync op for Flow
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.foldAsync"),
// #20214 SNI disabling for single connections (AkkaSSLConfig being passed around)
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.ConnectionContext.sslConfig"), // class meant only for internal extension
@ -921,17 +924,17 @@ object MiMa extends AutoPlugin {
// Interpreter internals change
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.stream.stage.GraphStageLogic.portToConn"),
// #20994 adding new decode method, since we're on JDK7+ now
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.util.ByteString.decodeString"),
// #20508 HTTP: Document how to be able to support custom request methods
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.model.HttpMethod.getRequestEntityAcceptance"),
// #20976 provide different options to deal with the illegal response header value
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.settings.ParserSettings.getIllegalResponseHeaderValueProcessingMode"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.settings.ParserSettings.illegalResponseHeaderValueProcessingMode"),
ProblemFilters.exclude[DirectAbstractMethodProblem]("akka.stream.ActorMaterializer.actorOf"),
// #20628 migrate Masker to GraphStage
@ -940,7 +943,7 @@ object MiMa extends AutoPlugin {
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.http.impl.engine.ws.Masking#Masker.initial"),
ProblemFilters.exclude[MissingClassProblem]("akka.http.impl.engine.ws.Masking$Masker$Running"),
ProblemFilters.exclude[MissingTypesProblem]("akka.http.impl.engine.ws.Masking$Unmasking"),
// #
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.model.HttpEntity.discardBytes"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.model.HttpEntity.discardBytes"),
@ -953,12 +956,16 @@ object MiMa extends AutoPlugin {
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.actor.Deployer.lookup"),
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.util.WildcardTree.apply"),
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.util.WildcardTree.find"),
// #20942 ClusterSingleton
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.singleton.ClusterSingletonManager.addRemoved"),
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.singleton.ClusterSingletonManager.selfAddressOption")
),
"2.4.9" -> Seq(
// #21025 new orElse flow op
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.orElseGraph"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.orElse"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOpsMat.orElseMat")
)
)
}