From 75355b4549a22262aeda4fe874b7630486d8afd8 Mon Sep 17 00:00:00 2001 From: Ian Clegg Date: Tue, 28 Jun 2016 19:36:48 +0100 Subject: [PATCH 001/155] 19615 set redelivery delay on camel unit test to speed it up and support future versions of camel --- .../src/test/scala/akka/camel/ConsumerIntegrationTest.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala b/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala index b673fa9057..1c4f03a16a 100644 --- a/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala @@ -123,13 +123,13 @@ class ConsumerIntegrationTest extends WordSpec with Matchers with NonSharedCamel } "Error passing consumer supports redelivery through route modification" in { - val ref = start(new FailingOnceConsumer("direct:failing-once-concumer") { + val ref = start(new FailingOnceConsumer("direct:failing-once-consumer") { override def onRouteDefinition = (rd: RouteDefinition) ⇒ { - rd.onException(classOf[TestException]).maximumRedeliveries(1).end + rd.onException(classOf[TestException]).redeliveryDelay(0L).maximumRedeliveries(1).end } }, name = "direct-failing-once-consumer") filterEvents(EventFilter[TestException](occurrences = 1)) { - camel.sendTo("direct:failing-once-concumer", msg = "hello") should ===("accepted: hello") + camel.sendTo("direct:failing-once-consumer", msg = "hello") should ===("accepted: hello") } stop(ref) } From c81ea4f36ba4677edda5ac3a91f6025628ede7aa Mon Sep 17 00:00:00 2001 From: Morton Fox Date: Fri, 8 Jul 2016 12:36:33 -0400 Subject: [PATCH 002/155] Fix the StackOverflow link (#20923) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 339887d464..16cd31d8d4 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ In addition to that, you may enjoy following: - The [Akka Team Blog](http://blog.akka.io) - [@akkateam](https://twitter.com/akkateam) on Twitter -- Questions tagged [#akka on StackOverflow](stackoverflow.com/questions/tagged/akka) +- Questions tagged [#akka on StackOverflow](http://stackoverflow.com/questions/tagged/akka) Contributing ------------ From 32810e1f1da45fff8dd79f41b466e6735de4b356 Mon Sep 17 00:00:00 2001 From: Nafer Sanabria Date: Sun, 10 Jul 2016 10:41:57 -0500 Subject: [PATCH 003/155] =htc, doc replace usages of deprecated methods of FileIO (#20928) --- .../scala/akka/cluster/ddata/Replicator.scala | 8 ++++---- .../stream/migration-guide-2.0-2.4-java.rst | 4 ++-- akka-docs/rst/java/stream/stream-io.rst | 2 +- akka-docs/rst/java/stream/stream-quickstart.rst | 2 +- .../server/FileUploadExamplesSpec.scala | 2 +- .../stream/migration-guide-2.0-2.4-scala.rst | 4 ++-- akka-docs/rst/scala/stream/stream-io.rst | 2 +- .../rst/scala/stream/stream-quickstart.rst | 2 +- .../directives/FileAndResourceDirectives.scala | 5 ++--- .../directives/FileUploadDirectives.scala | 6 +++--- .../main/scala/akka/stream/javadsl/FileIO.scala | 17 +++++++++++++---- .../scala/akka/stream/scaladsl/FileIO.scala | 6 +++--- 12 files changed, 34 insertions(+), 26 deletions(-) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala index 8ccb288c0e..74a4d43553 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala @@ -276,14 +276,14 @@ object Replicator { final case class Subscribe[A <: ReplicatedData](key: Key[A], subscriber: ActorRef) extends ReplicatorMessage /** * Unregister a subscriber. - * - * @see [[Replicator.Subscribe]] + * + * @see [[Replicator.Subscribe]] */ final case class Unsubscribe[A <: ReplicatedData](key: Key[A], subscriber: ActorRef) extends ReplicatorMessage /** * The data value is retrieved with [[#get]] using the typed key. - * - * @see [[Replicator.Subscribe]] + * + * @see [[Replicator.Subscribe]] */ final case class Changed[A <: ReplicatedData](key: Key[A])(data: A) extends ReplicatorMessage { /** diff --git a/akka-docs/rst/java/stream/migration-guide-2.0-2.4-java.rst b/akka-docs/rst/java/stream/migration-guide-2.0-2.4-java.rst index eeabc82333..b00fe25d92 100644 --- a/akka-docs/rst/java/stream/migration-guide-2.0-2.4-java.rst +++ b/akka-docs/rst/java/stream/migration-guide-2.0-2.4-java.rst @@ -136,8 +136,8 @@ IO Sources / Sinks materialize IOResult Materialized values of the following sources and sinks: - * ``FileIO.fromFile`` - * ``FileIO.toFile`` + * ``FileIO.fromPath`` + * ``FileIO.toPath`` * ``StreamConverters.fromInputStream`` * ``StreamConverters.fromOutputStream`` diff --git a/akka-docs/rst/java/stream/stream-io.rst b/akka-docs/rst/java/stream/stream-io.rst index 71f2786ae5..598b4ee04f 100644 --- a/akka-docs/rst/java/stream/stream-io.rst +++ b/akka-docs/rst/java/stream/stream-io.rst @@ -100,7 +100,7 @@ Akka Streams provide simple Sources and Sinks that can work with :class:`ByteStr on files. -Streaming data from a file is as easy as creating a `FileIO.fromFile` given a target file, and an optional +Streaming data from a file is as easy as creating a `FileIO.fromPath` given a target path, and an optional ``chunkSize`` which determines the buffer size determined as one "element" in such stream: .. includecode:: ../code/docs/stream/io/StreamFileDocTest.java#file-source diff --git a/akka-docs/rst/java/stream/stream-quickstart.rst b/akka-docs/rst/java/stream/stream-quickstart.rst index 12d9016502..c3c7e73ee9 100644 --- a/akka-docs/rst/java/stream/stream-quickstart.rst +++ b/akka-docs/rst/java/stream/stream-quickstart.rst @@ -90,7 +90,7 @@ accepts strings as its input and when materialized it will create auxiliary information of type ``CompletionStage`` (when chaining operations on a :class:`Source` or :class:`Flow` the type of the auxiliary information—called the “materialized value”—is given by the leftmost starting point; since we want -to retain what the ``FileIO.toFile`` sink has to offer, we need to say +to retain what the ``FileIO.toPath`` sink has to offer, we need to say ``Keep.right()``). We can use the new and shiny :class:`Sink` we just created by diff --git a/akka-docs/rst/scala/code/docs/http/scaladsl/server/FileUploadExamplesSpec.scala b/akka-docs/rst/scala/code/docs/http/scaladsl/server/FileUploadExamplesSpec.scala index 1a52983dcf..5bdf363a14 100644 --- a/akka-docs/rst/scala/code/docs/http/scaladsl/server/FileUploadExamplesSpec.scala +++ b/akka-docs/rst/scala/code/docs/http/scaladsl/server/FileUploadExamplesSpec.scala @@ -35,7 +35,7 @@ class FileUploadExamplesSpec extends RoutingSpec { // stream into a file as the chunks of it arrives and return a future // file to where it got stored val file = File.createTempFile("upload", "tmp") - b.entity.dataBytes.runWith(FileIO.toFile(file)).map(_ => + b.entity.dataBytes.runWith(FileIO.toPath(file.toPath)).map(_ => (b.name -> file)) case b: BodyPart => diff --git a/akka-docs/rst/scala/stream/migration-guide-2.0-2.4-scala.rst b/akka-docs/rst/scala/stream/migration-guide-2.0-2.4-scala.rst index 4f90f6162f..7d0bb64d8d 100644 --- a/akka-docs/rst/scala/stream/migration-guide-2.0-2.4-scala.rst +++ b/akka-docs/rst/scala/stream/migration-guide-2.0-2.4-scala.rst @@ -138,8 +138,8 @@ IO Sources / Sinks materialize IOResult Materialized values of the following sources and sinks: - * ``FileIO.fromFile`` - * ``FileIO.toFile`` + * ``FileIO.fromPath`` + * ``FileIO.toPath`` * ``StreamConverters.fromInputStream`` * ``StreamConverters.fromOutputStream`` diff --git a/akka-docs/rst/scala/stream/stream-io.rst b/akka-docs/rst/scala/stream/stream-io.rst index 760bff8725..178b861b11 100644 --- a/akka-docs/rst/scala/stream/stream-io.rst +++ b/akka-docs/rst/scala/stream/stream-io.rst @@ -100,7 +100,7 @@ Akka Streams provide simple Sources and Sinks that can work with :class:`ByteStr on files. -Streaming data from a file is as easy as creating a `FileIO.fromFile` given a target file, and an optional +Streaming data from a file is as easy as creating a `FileIO.fromPath` given a target path, and an optional ``chunkSize`` which determines the buffer size determined as one "element" in such stream: .. includecode:: ../code/docs/stream/io/StreamFileDocSpec.scala#file-source diff --git a/akka-docs/rst/scala/stream/stream-quickstart.rst b/akka-docs/rst/scala/stream/stream-quickstart.rst index cdc37c1da2..cc599e73f7 100644 --- a/akka-docs/rst/scala/stream/stream-quickstart.rst +++ b/akka-docs/rst/scala/stream/stream-quickstart.rst @@ -91,7 +91,7 @@ accepts strings as its input and when materialized it will create auxiliary information of type ``Future[IOResult]`` (when chaining operations on a :class:`Source` or :class:`Flow` the type of the auxiliary information—called the “materialized value”—is given by the leftmost starting point; since we want -to retain what the ``FileIO.toFile`` sink has to offer, we need to say +to retain what the ``FileIO.toPath`` sink has to offer, we need to say ``Keep.right``). We can use the new and shiny :class:`Sink` we just created by diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FileAndResourceDirectives.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FileAndResourceDirectives.scala index c8edaa8bc0..a7e802aeb0 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FileAndResourceDirectives.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FileAndResourceDirectives.scala @@ -9,14 +9,13 @@ import java.io.File import java.net.{ URI, URL } import akka.http.javadsl.model -import akka.http.javadsl.model.RequestEntity import akka.stream.ActorAttributes import akka.stream.scaladsl.{ FileIO, StreamConverters } import scala.annotation.tailrec import akka.actor.ActorSystem import akka.event.LoggingAdapter -import akka.http.scaladsl.marshalling.{ Marshaller, Marshalling, ToEntityMarshaller } +import akka.http.scaladsl.marshalling.{ Marshaller, ToEntityMarshaller } import akka.http.scaladsl.model._ import akka.http.scaladsl.model.headers._ import akka.http.impl.util._ @@ -70,7 +69,7 @@ trait FileAndResourceDirectives { withRangeSupportAndPrecompressedMediaTypeSupportAndExtractSettings { settings ⇒ complete { HttpEntity.Default(contentType, file.length, - FileIO.fromFile(file).withAttributes(ActorAttributes.dispatcher(settings.fileIODispatcher))) + FileIO.fromPath(file.toPath).withAttributes(ActorAttributes.dispatcher(settings.fileIODispatcher))) } } } else complete(HttpEntity.Empty) diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FileUploadDirectives.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FileUploadDirectives.scala index 36045a88f7..3f497c4f8e 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FileUploadDirectives.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FileUploadDirectives.scala @@ -4,10 +4,10 @@ package akka.http.scaladsl.server.directives import java.io.File - -import akka.http.scaladsl.server.{ MissingFormFieldRejection, Directive1 } +import akka.http.scaladsl.server.{ Directive1, MissingFormFieldRejection } import akka.http.scaladsl.model.{ ContentType, Multipart } import akka.util.ByteString + import scala.concurrent.Future import scala.util.{ Failure, Success } import akka.stream.scaladsl._ @@ -41,7 +41,7 @@ trait FileUploadDirectives { case (fileInfo, bytes) ⇒ val destination = File.createTempFile("akka-http-upload", ".tmp") - val uploadedF: Future[(FileInfo, File)] = bytes.runWith(FileIO.toFile(destination)) + val uploadedF: Future[(FileInfo, File)] = bytes.runWith(FileIO.toPath(destination.toPath)) .map(_ ⇒ (fileInfo, destination)) onComplete[(FileInfo, File)](uploadedF).flatMap { diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/FileIO.scala b/akka-stream/src/main/scala/akka/stream/javadsl/FileIO.scala index 0b452c4bfc..9fde2002e2 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/FileIO.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/FileIO.scala @@ -34,7 +34,7 @@ object FileIO { def toFile(f: File): javadsl.Sink[ByteString, CompletionStage[IOResult]] = toPath(f.toPath) /** - * Creates a Sink that writes incoming [[ByteString]] elements to the given file. + * Creates a Sink that writes incoming [[ByteString]] elements to the given file path. * Overwrites existing files, if you want to append to an existing file use [[#file(Path, util.Set[StandardOpenOption])]]. * * Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, @@ -43,7 +43,7 @@ object FileIO { * You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or * set it for a given Source by using [[ActorAttributes]]. * - * @param f The file to write to + * @param f The file path to write to */ def toPath(f: Path): javadsl.Sink[ByteString, CompletionStage[IOResult]] = new Sink(scaladsl.FileIO.toPath(f).toCompletionStage()) @@ -65,7 +65,7 @@ object FileIO { toPath(f.toPath) /** - * Creates a Sink that writes incoming [[ByteString]] elements to the given file. + * Creates a Sink that writes incoming [[ByteString]] elements to the given file path. * * Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. @@ -73,7 +73,7 @@ object FileIO { * You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or * set it for a given Source by using [[ActorAttributes]]. * - * @param f The file to write to + * @param f The file path to write to * @param options File open options */ def toPath(f: Path, options: util.Set[StandardOpenOption]): javadsl.Sink[ByteString, CompletionStage[IOResult]] = @@ -89,6 +89,8 @@ object FileIO { * * It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion, * and a possible exception if IO operation was not completed successfully. + * + * @param f the file to read from */ @deprecated("Use `fromPath` instead.", "2.4.5") def fromFile(f: File): javadsl.Source[ByteString, CompletionStage[IOResult]] = fromPath(f.toPath) @@ -103,6 +105,8 @@ object FileIO { * * It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion, * and a possible exception if IO operation was not completed successfully. + * + * @param f the file path to read from */ def fromPath(f: Path): javadsl.Source[ByteString, CompletionStage[IOResult]] = fromPath(f, 8192) @@ -116,6 +120,8 @@ object FileIO { * * It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion, * and a possible exception if IO operation was not completed successfully. + * @param f the file to read from + * @param chunkSize the size of each read operation */ @deprecated("Use `fromPath` instead.", "2.4.5") def fromFile(f: File, chunkSize: Int): javadsl.Source[ByteString, CompletionStage[IOResult]] = @@ -131,6 +137,9 @@ object FileIO { * * It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion, * and a possible exception if IO operation was not completed successfully. + * + * @param f the file path to read from + * @param chunkSize the size of each read operation */ def fromPath(f: Path, chunkSize: Int): javadsl.Source[ByteString, CompletionStage[IOResult]] = new Source(scaladsl.FileIO.fromPath(f, chunkSize).toCompletionStage()) diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala index 69942f543d..00ebfe4a6d 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala @@ -51,7 +51,7 @@ object FileIO { * It materializes a [[Future]] of [[IOResult]] containing the number of bytes read from the source file upon completion, * and a possible exception if IO operation was not completed successfully. * - * @param f the file to read from + * @param f the file path to read from * @param chunkSize the size of each read operation, defaults to 8192 */ def fromPath(f: Path, chunkSize: Int = 8192): Source[ByteString, Future[IOResult]] = @@ -74,7 +74,7 @@ object FileIO { toPath(f.toPath, options) /** - * Creates a Sink which writes incoming [[ByteString]] elements to the given file. Overwrites existing files by default. + * Creates a Sink which writes incoming [[ByteString]] elements to the given file path. Overwrites existing files by default. * * Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. @@ -82,7 +82,7 @@ object FileIO { * This source is backed by an Actor which will use the dedicated `akka.stream.blocking-io-dispatcher`, * unless configured otherwise by using [[ActorAttributes]]. * - * @param f the file to write to + * @param f the file path to write to * @param options File open options, defaults to Set(WRITE, CREATE) */ def toPath(f: Path, options: Set[StandardOpenOption] = Set(WRITE, CREATE)): Sink[ByteString, Future[IOResult]] = From 9232f51a146a81059a18cff6dde2d8cff020a4f6 Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Mon, 11 Jul 2016 11:59:39 +0200 Subject: [PATCH 004/155] =doc #20931 fix java docs link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 16cd31d8d4..5ac03a826f 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ Reference Documentation ----------------------- The reference documentation is available at [doc.akka.io](http://doc.akka.io), -for [Scala](http://doc.akka.io/docs/akka/current/scala.html) and [Java](http://doc.akka.io/docs/akka/current/scala.html). +for [Scala](http://doc.akka.io/docs/akka/current/scala.html) and [Java](http://doc.akka.io/docs/akka/current/java.html). Community From 9e6f346a192d0918f2615f566ce972b5750b4b56 Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Mon, 11 Jul 2016 12:00:42 +0200 Subject: [PATCH 005/155] Add news link --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 5ac03a826f..0e8fee2420 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,7 @@ You can join these groups and chats to discuss and ask Akka related questions: In addition to that, you may enjoy following: +- The [news](http://akka.io/news) section of the page, which is updated whenever a new version is released - The [Akka Team Blog](http://blog.akka.io) - [@akkateam](https://twitter.com/akkateam) on Twitter - Questions tagged [#akka on StackOverflow](http://stackoverflow.com/questions/tagged/akka) From 1009f8e2358cbb2e7d42a52f3c38a27577b59518 Mon Sep 17 00:00:00 2001 From: Nafer Sanabria Date: Mon, 11 Jul 2016 07:13:40 -0500 Subject: [PATCH 006/155] =doc Fix indefinite articles typos in docs (#20924) --- akka-docs/rst/intro/getting-started.rst | 4 ++-- akka-docs/rst/java/camel.rst | 2 +- akka-docs/rst/java/http/routing-dsl/marshalling.rst | 4 ++-- akka-docs/rst/java/stream/stream-customize.rst | 2 +- akka-docs/rst/java/stream/stream-integrations.rst | 2 +- akka-docs/rst/project/migration-guide-2.3.x-2.4.x.rst | 4 ++-- akka-docs/rst/scala/camel.rst | 2 +- .../directives/future-directives/completeOrRecoverWith.rst | 2 +- akka-docs/rst/scala/stream/stream-customize.rst | 2 +- akka-docs/rst/scala/stream/stream-integrations.rst | 2 +- 10 files changed, 13 insertions(+), 13 deletions(-) diff --git a/akka-docs/rst/intro/getting-started.rst b/akka-docs/rst/intro/getting-started.rst index 85d34100bd..9fcee093ce 100644 --- a/akka-docs/rst/intro/getting-started.rst +++ b/akka-docs/rst/intro/getting-started.rst @@ -212,12 +212,12 @@ For snapshot versions, the snapshot repository needs to be added as well: Using Akka with Eclipse ----------------------- -Setup SBT project and then use `sbteclipse `_ to generate a Eclipse project. +Setup SBT project and then use `sbteclipse `_ to generate an Eclipse project. Using Akka with IntelliJ IDEA ----------------------------- -Setup SBT project and then use `sbt-idea `_ to generate a IntelliJ IDEA project. +Setup SBT project and then use `sbt-idea `_ to generate an IntelliJ IDEA project. Using Akka with NetBeans ------------------------ diff --git a/akka-docs/rst/java/camel.rst b/akka-docs/rst/java/camel.rst index 96ff9e468c..919fbc133e 100644 --- a/akka-docs/rst/java/camel.rst +++ b/akka-docs/rst/java/camel.rst @@ -281,7 +281,7 @@ Actors may also use a Camel `ProducerTemplate`_ for producing messages to endpoi .. includecode:: code/docs/camel/MyActor.java#ProducerTemplate -For initiating a a two-way message exchange, one of the +For initiating a two-way message exchange, one of the ``ProducerTemplate.request*`` methods must be used. .. includecode:: code/docs/camel/RequestBodyActor.java#RequestProducerTemplate diff --git a/akka-docs/rst/java/http/routing-dsl/marshalling.rst b/akka-docs/rst/java/http/routing-dsl/marshalling.rst index 899fc11abb..d0376b9caa 100644 --- a/akka-docs/rst/java/http/routing-dsl/marshalling.rst +++ b/akka-docs/rst/java/http/routing-dsl/marshalling.rst @@ -13,7 +13,7 @@ of an HTTP request or response (depending on whether used on the client or serve Marshalling ----------- -On the server-side marshalling is used to convert a application-domain object to a response (entity). Requests can +On the server-side marshalling is used to convert an application-domain object to a response (entity). Requests can contain an ``Accept`` header that lists acceptable content types for the client. A marshaller contains the logic to negotiate the result content types based on the ``Accept`` and the ``AcceptCharset`` headers. @@ -30,7 +30,7 @@ These marshallers are provided by akka-http: Unmarshalling ------------- -On the server-side unmarshalling is used to convert a request (entity) to a application-domain object. This is done +On the server-side unmarshalling is used to convert a request (entity) to an application-domain object. This is done in the ``MarshallingDirectives.request`` or ``MarshallingDirectives.entity`` directive. There are several unmarshallers provided by akka-http: diff --git a/akka-docs/rst/java/stream/stream-customize.rst b/akka-docs/rst/java/stream/stream-customize.rst index e3922b5b0b..fa6bf82473 100644 --- a/akka-docs/rst/java/stream/stream-customize.rst +++ b/akka-docs/rst/java/stream/stream-customize.rst @@ -138,7 +138,7 @@ Finally, there are two methods available for convenience to complete the stage a In some cases it is inconvenient and error prone to react on the regular state machine events with the -signal based API described above. For those cases there is a API which allows for a more declarative sequencing +signal based API described above. For those cases there is an API which allows for a more declarative sequencing of actions which will greatly simplify some use cases at the cost of some extra allocations. The difference between the two APIs could be described as that the first one is signal driven from the outside, while this API is more active and drives its surroundings. diff --git a/akka-docs/rst/java/stream/stream-integrations.rst b/akka-docs/rst/java/stream/stream-integrations.rst index c4225dc15c..6f47436fe1 100644 --- a/akka-docs/rst/java/stream/stream-integrations.rst +++ b/akka-docs/rst/java/stream/stream-integrations.rst @@ -15,7 +15,7 @@ For more advanced use cases the :class:`ActorPublisher` and :class:`ActorSubscri provided to support implementing Reactive Streams :class:`Publisher` and :class:`Subscriber` with an :class:`Actor`. -These can be consumed by other Reactive Stream libraries or used as a +These can be consumed by other Reactive Stream libraries or used as an Akka Streams :class:`Source` or :class:`Sink`. .. warning:: diff --git a/akka-docs/rst/project/migration-guide-2.3.x-2.4.x.rst b/akka-docs/rst/project/migration-guide-2.3.x-2.4.x.rst index f3e239b101..afc46c3176 100644 --- a/akka-docs/rst/project/migration-guide-2.3.x-2.4.x.rst +++ b/akka-docs/rst/project/migration-guide-2.3.x-2.4.x.rst @@ -125,10 +125,10 @@ FSM notifies on same state transitions When changing states in an Finite-State-Machine Actor (``FSM``), state transition events are emitted and can be handled by the user either by registering ``onTransition`` handlers or by subscribing to these events by sending it an ``SubscribeTransitionCallBack`` message. -Previously in ``2.3.x`` when an ``FSM`` was in state ``A`` and performed an ``goto(A)`` transition, no state transition notification would be sent. +Previously in ``2.3.x`` when an ``FSM`` was in state ``A`` and performed a ``goto(A)`` transition, no state transition notification would be sent. This is because it would effectively stay in the same state, and was deemed to be semantically equivalent to calling ``stay()``. -In ``2.4.x`` when an ``FSM`` performs a any ``goto(X)`` transition, it will always trigger state transition events. +In ``2.4.x`` when an ``FSM`` performs an any ``goto(X)`` transition, it will always trigger state transition events. Which turns out to be useful in many systems where same-state transitions actually should have an effect. In case you do *not* want to trigger a state transition event when effectively performing an ``X->X`` transition, use ``stay()`` instead. diff --git a/akka-docs/rst/scala/camel.rst b/akka-docs/rst/scala/camel.rst index 7cfeec715a..bed43d22c9 100644 --- a/akka-docs/rst/scala/camel.rst +++ b/akka-docs/rst/scala/camel.rst @@ -277,7 +277,7 @@ messages to endpoints. .. includecode:: code/docs/camel/Producers.scala#ProducerTemplate -For initiating a a two-way message exchange, one of the +For initiating a two-way message exchange, one of the ``ProducerTemplate.request*`` methods must be used. .. includecode:: code/docs/camel/Producers.scala#RequestProducerTemplate diff --git a/akka-docs/rst/scala/http/routing-dsl/directives/future-directives/completeOrRecoverWith.rst b/akka-docs/rst/scala/http/routing-dsl/directives/future-directives/completeOrRecoverWith.rst index b616abff09..6ad762b2a4 100644 --- a/akka-docs/rst/scala/http/routing-dsl/directives/future-directives/completeOrRecoverWith.rst +++ b/akka-docs/rst/scala/http/routing-dsl/directives/future-directives/completeOrRecoverWith.rst @@ -14,7 +14,7 @@ Description If the ``Future[T]`` succeeds the request is completed using the value's marshaller (this directive therefore requires a marshaller for the future's parameter type to be implicitly available). The execution of the inner route passed to this directive is only executed if the given future completed with a failure, -exposing the reason of failure as a extraction of type ``Throwable``. +exposing the reason of failure as an extraction of type ``Throwable``. To handle the successful case manually as well, use the :ref:`-onComplete-` directive, instead. diff --git a/akka-docs/rst/scala/stream/stream-customize.rst b/akka-docs/rst/scala/stream/stream-customize.rst index b21978c932..fbefb77dc1 100644 --- a/akka-docs/rst/scala/stream/stream-customize.rst +++ b/akka-docs/rst/scala/stream/stream-customize.rst @@ -141,7 +141,7 @@ Finally, there are two methods available for convenience to complete the stage a In some cases it is inconvenient and error prone to react on the regular state machine events with the -signal based API described above. For those cases there is a API which allows for a more declarative sequencing +signal based API described above. For those cases there is an API which allows for a more declarative sequencing of actions which will greatly simplify some use cases at the cost of some extra allocations. The difference between the two APIs could be described as that the first one is signal driven from the outside, while this API is more active and drives its surroundings. diff --git a/akka-docs/rst/scala/stream/stream-integrations.rst b/akka-docs/rst/scala/stream/stream-integrations.rst index 18e552da6a..8669460c05 100644 --- a/akka-docs/rst/scala/stream/stream-integrations.rst +++ b/akka-docs/rst/scala/stream/stream-integrations.rst @@ -15,7 +15,7 @@ For more advanced use cases the :class:`ActorPublisher` and :class:`ActorSubscri provided to support implementing Reactive Streams :class:`Publisher` and :class:`Subscriber` with an :class:`Actor`. -These can be consumed by other Reactive Stream libraries or used as a +These can be consumed by other Reactive Stream libraries or used as an Akka Streams :class:`Source` or :class:`Sink`. .. warning:: From 07ec6b7f4cd90427e9555ccbb708a66bc90f2bec Mon Sep 17 00:00:00 2001 From: Harit Himanshu Date: Mon, 11 Jul 2016 06:48:07 -0700 Subject: [PATCH 007/155] Adding note on usage of Cluster Dispatcher #20775 (#20927) * Adding note on usage of Cluster Dispatcher #20775 * Remove `[` and `]` to from Cluster Usage node #20775 --- akka-docs/rst/java/cluster-usage.rst | 10 ++++++++++ akka-docs/rst/scala/cluster-usage.rst | 10 ++++++++++ 2 files changed, 20 insertions(+) diff --git a/akka-docs/rst/java/cluster-usage.rst b/akka-docs/rst/java/cluster-usage.rst index 1ea6133920..0c88ea0ce7 100644 --- a/akka-docs/rst/java/cluster-usage.rst +++ b/akka-docs/rst/java/cluster-usage.rst @@ -733,3 +733,13 @@ For this purpose you can define a separate dispatcher to be used for the cluster parallelism-max = 4 } } + +.. note:: + Normally it should not be necessary to configure a separate dispatcher for the Cluster. + The default-dispatcher should be sufficient for performing the Cluster tasks, i.e. ``akka.cluster.use-dispatcher`` + should not be changed. If you have Cluster related problems when using the default-dispatcher that is typically an + indication that you are running blocking or CPU intensive actors/tasks on the default-dispatcher. + Use dedicated dispatchers for such actors/tasks instead of running them on the default-dispatcher, + because that may starve system internal tasks. + Related config properties: ``akka.cluster.use-dispatcher = akka.cluster.cluster-dispatcher``. + Corresponding default values: ``akka.cluster.use-dispatcher =``. \ No newline at end of file diff --git a/akka-docs/rst/scala/cluster-usage.rst b/akka-docs/rst/scala/cluster-usage.rst index a9add41748..f4243805b3 100644 --- a/akka-docs/rst/scala/cluster-usage.rst +++ b/akka-docs/rst/scala/cluster-usage.rst @@ -785,3 +785,13 @@ For this purpose you can define a separate dispatcher to be used for the cluster parallelism-max = 4 } } + +.. note:: + Normally it should not be necessary to configure a separate dispatcher for the Cluster. + The default-dispatcher should be sufficient for performing the Cluster tasks, i.e. ``akka.cluster.use-dispatcher`` + should not be changed. If you have Cluster related problems when using the default-dispatcher that is typically an + indication that you are running blocking or CPU intensive actors/tasks on the default-dispatcher. + Use dedicated dispatchers for such actors/tasks instead of running them on the default-dispatcher, + because that may starve system internal tasks. + Related config properties: ``akka.cluster.use-dispatcher = akka.cluster.cluster-dispatcher``. + Corresponding default values: ``akka.cluster.use-dispatcher =``. \ No newline at end of file From 841be3e172083f2d3f34e8e66360d3772633a1e6 Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Mon, 11 Jul 2016 16:12:59 +0200 Subject: [PATCH 008/155] =build versions of dependencies for 2.12.0-M5 (#20939) * =build versions of dependencies for 2.12.0-M5 * Update Dependencies.scala --- project/Dependencies.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 72834ced55..2b27eb821b 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -26,12 +26,14 @@ object Dependencies { case "2.12.0-M2" => "2.2.5-M2" case "2.12.0-M3" => "2.2.5-M3" case "2.12.0-M4" => "2.2.6" + case "2.12.0-M5" => "3.0.0-RC4" case _ => "2.2.4" } }, java8CompatVersion := { scalaVersion.value match { case "2.12.0-M4" => "0.8.0-RC1" + case "2.12.0-M5" => "0.8.0-RC3" case _ => "0.7.0" } } From 55e3e123b1b9b05346e93b3a89b8faf67e976147 Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Mon, 11 Jul 2016 18:23:52 +0200 Subject: [PATCH 009/155] =act #20910 optimize ByteString#copyToBuffer (#20911) --- .../main/scala/akka/util/ByteIterator.scala | 3 +- .../src/main/scala/akka/util/ByteString.scala | 44 ++++++++- .../scala/akka/util/ByteStringBenchmark.scala | 98 +++++++++++++++++++ 3 files changed, 142 insertions(+), 3 deletions(-) create mode 100644 akka-bench-jmh/src/main/scala/akka/util/ByteStringBenchmark.scala diff --git a/akka-actor/src/main/scala/akka/util/ByteIterator.scala b/akka-actor/src/main/scala/akka/util/ByteIterator.scala index b763ef93fe..3ca0092d49 100644 --- a/akka-actor/src/main/scala/akka/util/ByteIterator.scala +++ b/akka-actor/src/main/scala/akka/util/ByteIterator.scala @@ -341,7 +341,8 @@ object ByteIterator { def getDoubles(xs: Array[Double], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = getToArray(xs, offset, n, 8) { getDouble(byteOrder) } { current.getDoubles(_, _, _)(byteOrder) } - def copyToBuffer(buffer: ByteBuffer): Int = { + override def copyToBuffer(buffer: ByteBuffer): Int = { + // the fold here is better than indexing into the LinearSeq val n = iterators.foldLeft(0) { _ + _.copyToBuffer(buffer) } normalize() n diff --git a/akka-actor/src/main/scala/akka/util/ByteString.scala b/akka-actor/src/main/scala/akka/util/ByteString.scala index 8ccfa2518b..89204c7169 100644 --- a/akka-actor/src/main/scala/akka/util/ByteString.scala +++ b/akka-actor/src/main/scala/akka/util/ByteString.scala @@ -7,7 +7,8 @@ package akka.util import java.io.{ ObjectInputStream, ObjectOutputStream } import java.nio.{ ByteBuffer, ByteOrder } import java.lang.{ Iterable ⇒ JIterable } -import scala.annotation.varargs + +import scala.annotation.{ tailrec, varargs } import scala.collection.IndexedSeqOptimized import scala.collection.mutable.{ Builder, WrappedArray } import scala.collection.immutable @@ -147,6 +148,20 @@ object ByteString { private[akka] def writeToOutputStream(os: ObjectOutputStream): Unit = toByteString1.writeToOutputStream(os) + + override def copyToBuffer(buffer: ByteBuffer): Int = + writeToBuffer(buffer, offset = 0) + + /** INTERNAL API: Specialized for internal use, writing multiple ByteString1C into the same ByteBuffer. */ + private[akka] def writeToBuffer(buffer: ByteBuffer, offset: Int): Int = { + val copyLength = math.min(buffer.remaining, offset + length) + if (copyLength > 0) { + buffer.put(bytes, offset, copyLength) + drop(copyLength) + } + copyLength + } + } private[akka] object ByteString1 extends Companion { @@ -189,6 +204,19 @@ object ByteString { private[akka] def byteStringCompanion = ByteString1 + override def copyToBuffer(buffer: ByteBuffer): Int = + writeToBuffer(buffer) + + /** INTERNAL API: Specialized for internal use, writing multiple ByteString1C into the same ByteBuffer. */ + private[akka] def writeToBuffer(buffer: ByteBuffer): Int = { + val copyLength = math.min(buffer.remaining, length) + if (copyLength > 0) { + buffer.put(bytes, startIndex, copyLength) + drop(copyLength) + } + copyLength + } + def compact: CompactByteString = if (isCompact) ByteString1C(bytes) else ByteString1C(toArray) @@ -312,6 +340,14 @@ object ByteString { def isCompact: Boolean = if (bytestrings.length == 1) bytestrings.head.isCompact else false + override def copyToBuffer(buffer: ByteBuffer): Int = { + @tailrec def copyItToTheBuffer(buffer: ByteBuffer, i: Int, written: Int): Int = + if (i < bytestrings.length) copyItToTheBuffer(buffer, i + 1, written + bytestrings(i).writeToBuffer(buffer)) + else written + + copyItToTheBuffer(buffer, 0, 0) + } + def compact: CompactByteString = { if (isCompact) bytestrings.head.compact else { @@ -452,7 +488,11 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz * @param buffer a ByteBuffer to copy bytes to * @return the number of bytes actually copied */ - def copyToBuffer(buffer: ByteBuffer): Int = iterator.copyToBuffer(buffer) + def copyToBuffer(buffer: ByteBuffer): Int = { + // TODO: remove this impl, make it an abstract method when possible + // specialized versions of this method exist in sub-classes, we keep this impl for binary compatibility, it never is actually invoked + iterator.copyToBuffer(buffer) + } /** * Create a new ByteString with all contents compacted into a single, diff --git a/akka-bench-jmh/src/main/scala/akka/util/ByteStringBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/util/ByteStringBenchmark.scala new file mode 100644 index 0000000000..ec62572d8c --- /dev/null +++ b/akka-bench-jmh/src/main/scala/akka/util/ByteStringBenchmark.scala @@ -0,0 +1,98 @@ +/** + * Copyright (C) 2014-2016 Lightbend Inc. + */ +package akka.util + +import java.nio.ByteBuffer +import java.util.concurrent.TimeUnit + +import akka.util.ByteString.{ ByteString1, ByteString1C, ByteStrings } +import org.openjdk.jmh.annotations._ +import org.openjdk.jmh.infra.Blackhole + +@State(Scope.Benchmark) +@Measurement(timeUnit = TimeUnit.MILLISECONDS) +class ByteStringBenchmark { + + val _bs_mini = ByteString(Array.ofDim[Byte](128 * 4)) + val _bs_small = ByteString(Array.ofDim[Byte](1024 * 1)) + val _bs_large = ByteString(Array.ofDim[Byte](1024 * 4)) + + val bs_mini = ByteString(Array.ofDim[Byte](128 * 4 * 4)) + val bs_small = ByteString(Array.ofDim[Byte](1024 * 1 * 4)) + val bs_large = ByteString(Array.ofDim[Byte](1024 * 4 * 4)) + + val bss_mini = ByteStrings(Vector.fill(4)(bs_mini.asInstanceOf[ByteString1C].toByteString1), 4 * bs_mini.length) + val bss_small = ByteStrings(Vector.fill(4)(bs_small.asInstanceOf[ByteString1C].toByteString1), 4 * bs_small.length) + val bss_large = ByteStrings(Vector.fill(4)(bs_large.asInstanceOf[ByteString1C].toByteString1), 4 * bs_large.length) + val bss_pc_large = bss_large.compact + + val buf = ByteBuffer.allocate(1024 * 4 * 4) + + /* + BEFORE + + [info] Benchmark Mode Cnt Score Error Units + [info] ByteStringBenchmark.bs_large_copyToBuffer thrpt 40 142 163 289.866 ± 21751578.294 ops/s + [info] ByteStringBenchmark.bss_large_copyToBuffer thrpt 40 1 489 195.631 ± 209165.487 ops/s << that's the interesting case, we needlessly fold and allocate tons of Stream etc + [info] ByteStringBenchmark.bss_large_pc_copyToBuffer thrpt 40 184 466 756.364 ± 9169108.378 ops/s // "can't beat that" + + + [info] ....[Thread state: RUNNABLE]........................................................................ + [info] 35.9% 35.9% scala.collection.Iterator$class.toStream + [info] 20.2% 20.2% scala.collection.immutable.Stream.foldLeft + [info] 11.6% 11.6% scala.collection.immutable.Stream$StreamBuilder. + [info] 10.9% 10.9% akka.util.ByteIterator. + [info] 6.1% 6.1% scala.collection.mutable.ListBuffer. + [info] 5.2% 5.2% akka.util.ByteString.copyToBuffer + [info] 5.2% 5.2% scala.collection.AbstractTraversable. + [info] 2.2% 2.2% scala.collection.immutable.VectorIterator.initFrom + [info] 1.2% 1.2% akka.util.generated.ByteStringBenchmark_bss_large_copyToBuffer.bss_large_copyToBuffer_thrpt_jmhStub + [info] 0.3% 0.3% akka.util.ByteIterator$MultiByteArrayIterator.copyToBuffer + [info] 1.2% 1.2% + + + AFTER specializing impls + + [info] ....[Thread state: RUNNABLE]........................................................................ + [info] 99.5% 99.6% akka.util.generated.ByteStringBenchmark_bss_large_copyToBuffer_jmhTest.bss_large_copyToBuffer_thrpt_jmhStub + [info] 0.1% 0.1% java.util.concurrent.CountDownLatch.countDown + [info] 0.1% 0.1% sun.reflect.NativeMethodAccessorImpl.invoke0 + [info] 0.1% 0.1% sun.misc.Unsafe.putObject + [info] 0.1% 0.1% org.openjdk.jmh.infra.IterationParamsL2.getBatchSize + [info] 0.1% 0.1% java.lang.Thread.currentThread + [info] 0.1% 0.1% sun.misc.Unsafe.compareAndSwapInt + [info] 0.1% 0.1% sun.reflect.AccessorGenerator.internalize + + [info] Benchmark Mode Cnt Score Error Units + [info] ByteStringBenchmark.bs_large_copyToBuffer thrpt 40 177 328 585.473 ± 7742067.648 ops/s + [info] ByteStringBenchmark.bss_large_copyToBuffer thrpt 40 113 535 003.488 ± 3899763.124 ops/s // previous bad case now very good (was 2M/s) + [info] ByteStringBenchmark.bss_large_pc_copyToBuffer thrpt 40 203 590 896.493 ± 7582752.024 ops/s // "can't beat that" + + */ + + @Benchmark + def bs_large_copyToBuffer(): Int = { + buf.flip() + bs_large.copyToBuffer(buf) + } + + @Benchmark + def bss_large_copyToBuffer(): Int = { + buf.flip() + bss_large.copyToBuffer(buf) + } + + // /** compact + copy */ + // @Benchmark + // def bss_large_c_copyToBuffer: Int = + // bss_large.compact.copyToBuffer(buf) + + /** Pre-compacted */ + @Benchmark + def bss_large_pc_copyToBuffer(): Int = { + buf.flip() + bss_pc_large.copyToBuffer(buf) + } + +} From 65fae200b741c0feb044eca5abe259b2fce7bb77 Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Wed, 13 Jul 2016 14:53:56 +0200 Subject: [PATCH 010/155] =htp #20884 fix header name rendering issue when $minus present (#20940) --- .../akka/http/scaladsl/model/headers/headers.scala | 10 +++++++++- .../server/directives/HeaderDirectivesSpec.scala | 12 +++++++++++- .../server/directives/HeaderDirectives.scala | 5 +++-- 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala index bf13d2a98e..e19385dafd 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala @@ -21,7 +21,7 @@ import akka.http.javadsl.{ model ⇒ jm } import akka.http.scaladsl.model._ sealed abstract class ModeledCompanion[T: ClassTag] extends Renderable { - val name = getClass.getSimpleName.replace("$minus", "-").dropRight(1) // trailing $ + val name = ModeledCompanion.nameFromClass(getClass) val lowercaseName = name.toRootLowerCase private[this] val nameBytes = name.asciiBytes final def render[R <: Rendering](r: R): r.type = r ~~ nameBytes ~~ ':' ~~ ' ' @@ -36,6 +36,14 @@ sealed abstract class ModeledCompanion[T: ClassTag] extends Renderable { case res ⇒ Left(res.errors) } } +/** INTERNAL API */ +private[akka] object ModeledCompanion { + def nameFromClass[T](clazz: Class[T]): String = { + val name = clazz.getSimpleName.replace("$minus", "-") + if (name.last == '$') name.dropRight(1) // trailing $ + else name + } +} sealed trait ModeledHeader extends HttpHeader with Serializable { def renderInRequests: Boolean = false // default implementation diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/HeaderDirectivesSpec.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/HeaderDirectivesSpec.scala index 4fe55462cc..1c0b893b64 100644 --- a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/HeaderDirectivesSpec.scala +++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/HeaderDirectivesSpec.scala @@ -34,7 +34,7 @@ class HeaderDirectivesSpec extends RoutingSpec with Inside { } "The headerValueByType directive" should { - lazy val route = + val route = headerValueByType[Origin]() { origin ⇒ complete(s"The first origin was ${origin.origins.head}") } @@ -51,6 +51,16 @@ class HeaderDirectivesSpec extends RoutingSpec with Inside { } } } + "reject a request for missing header, and format it properly when header included special characters (e.g. `-`)" in { + val route = headerValueByType[`User-Agent`]() { agent ⇒ + complete(s"Agent: ${agent}") + } + Get("abc") ~> route ~> check { + inside(rejection) { + case MissingHeaderRejection("User-Agent") ⇒ + } + } + } } "The headerValueByName directive" should { diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/HeaderDirectives.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/HeaderDirectives.scala index 87ae5a1910..0243e58990 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/HeaderDirectives.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/HeaderDirectives.scala @@ -7,7 +7,7 @@ package directives import akka.http.impl.util._ import akka.http.scaladsl.model._ -import akka.http.scaladsl.model.headers.{ HttpOriginRange, ModeledCustomHeader, ModeledCustomHeaderCompanion, Origin } +import akka.http.scaladsl.model.headers._ import scala.reflect.ClassTag import scala.util.control.NonFatal @@ -91,7 +91,7 @@ trait HeaderDirectives { * @group header */ def headerValueByType[T](magnet: HeaderMagnet[T]): Directive1[T] = - headerValuePF(magnet.extractPF) | reject(MissingHeaderRejection(magnet.runtimeClass.getSimpleName)) + headerValuePF(magnet.extractPF) | reject(MissingHeaderRejection(magnet.headerName)) //#optional-header /** @@ -159,6 +159,7 @@ object HeaderDirectives extends HeaderDirectives trait HeaderMagnet[T] { def classTag: ClassTag[T] def runtimeClass: Class[T] + def headerName = ModeledCompanion.nameFromClass(runtimeClass) /** * Returns a partial function that checks if the input value is of runtime type From 08e4ee0e6f5fd99ec341c5a22ca59c5f7902ae6a Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Wed, 13 Jul 2016 15:55:22 +0200 Subject: [PATCH 011/155] =build update to latest sbt-jmh (#20949) Which also includes the JFR profiler mode. Use with: `jmh: run -prof jmh.extras.JFR`, details here: https://github.com/ktoso/sbt-jmh#using-oracle-flight-recorder --- project/plugins.sbt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/plugins.sbt b/project/plugins.sbt index e863d3bbdb..c88bcf1660 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -25,7 +25,7 @@ addSbtPlugin("com.eed3si9n" % "sbt-unidoc" % "0.3.3") addSbtPlugin("com.thoughtworks.sbt-api-mappings" % "sbt-api-mappings" % "0.2.2") -addSbtPlugin("pl.project13.scala" % "sbt-jmh" % "0.2.3") +addSbtPlugin("pl.project13.scala" % "sbt-jmh" % "0.2.10") addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.0.0-RC1") From 400402f76cccbb82bb6d0006e4a22a51f6470d20 Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Thu, 14 Jul 2016 14:03:04 +0200 Subject: [PATCH 012/155] +act #20936 add CompletionStage API to CircuitBreaker (#20937) --- .../java/akka/pattern/CircuitBreakerTest.java | 42 +++++++++++++++++++ .../akka/pattern/CircuitBreakerSpec.scala | 2 +- .../scala/akka/pattern/CircuitBreaker.scala | 24 +++++++++-- .../circuitbreaker/DangerousJavaActor.java | 23 +++------- 4 files changed, 70 insertions(+), 21 deletions(-) create mode 100644 akka-actor-tests/src/test/java/akka/pattern/CircuitBreakerTest.java diff --git a/akka-actor-tests/src/test/java/akka/pattern/CircuitBreakerTest.java b/akka-actor-tests/src/test/java/akka/pattern/CircuitBreakerTest.java new file mode 100644 index 0000000000..c3e5da1606 --- /dev/null +++ b/akka-actor-tests/src/test/java/akka/pattern/CircuitBreakerTest.java @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2009-2016 Lightbend Inc. + */ +package akka.pattern; + +import akka.actor.*; +import akka.testkit.AkkaJUnitActorSystemResource; +import akka.testkit.AkkaSpec; +import org.junit.ClassRule; +import org.junit.Test; +import org.scalatest.junit.JUnitSuite; +import scala.compat.java8.FutureConverters; +import scala.concurrent.Await; +import scala.concurrent.duration.FiniteDuration; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.TimeUnit; + +import static org.junit.Assert.assertEquals; + +public class CircuitBreakerTest extends JUnitSuite { + + @ClassRule + public static AkkaJUnitActorSystemResource actorSystemResource = + new AkkaJUnitActorSystemResource("JavaAPI", AkkaSpec.testConf()); + + private final ActorSystem system = actorSystemResource.getSystem(); + + @Test + public void useCircuitBreakerWithCompletableFuture() throws Exception { + final FiniteDuration fiveSeconds = FiniteDuration.create(5, TimeUnit.SECONDS); + final FiniteDuration fiveHundredMillis = FiniteDuration.create(500, TimeUnit.MILLISECONDS); + final CircuitBreaker breaker = new CircuitBreaker(system.dispatcher(), system.scheduler(), 1, fiveSeconds, fiveHundredMillis); + + final CompletableFuture f = new CompletableFuture<>(); + f.complete("hello"); + final CompletionStage res = breaker.callWithCircuitBreakerCS(() -> f); + assertEquals("hello", Await.result(FutureConverters.toScala(res), fiveSeconds)); + } + +} diff --git a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala index a0df91c16c..2c1e7adb1e 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala @@ -219,7 +219,7 @@ class CircuitBreakerSpec extends AkkaSpec with BeforeAndAfter { val breaker = CircuitBreakerSpec.shortCallTimeoutCb() val fut = breaker().withCircuitBreaker(Future { - Thread.sleep(150.millis.dilated.toMillis); + Thread.sleep(150.millis.dilated.toMillis) throwException }) checkLatch(breaker.openLatch) diff --git a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala index 5f34ac2e5a..6460ee039c 100644 --- a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala +++ b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala @@ -3,18 +3,24 @@ */ package akka.pattern -import java.util.concurrent.atomic.{ AtomicInteger, AtomicLong, AtomicBoolean } +import java.util.concurrent.atomic.{ AtomicBoolean, AtomicInteger, AtomicLong } + import akka.AkkaException import akka.actor.Scheduler import akka.util.Unsafe + import scala.util.control.NoStackTrace -import java.util.concurrent.{ Callable, CopyOnWriteArrayList } -import scala.concurrent.{ ExecutionContext, Future, Promise, Await } +import java.util.concurrent.{ Callable, CompletionStage, CopyOnWriteArrayList } + +import scala.concurrent.{ Await, ExecutionContext, Future, Promise } import scala.concurrent.duration._ import scala.concurrent.TimeoutException import scala.util.control.NonFatal import scala.util.Success import akka.dispatch.ExecutionContexts.sameThreadExecutionContext +import akka.japi.function.Creator + +import scala.compat.java8.FutureConverters /** * Companion object providing factory methods for Circuit Breaker which runs callbacks in caller's thread @@ -123,6 +129,18 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite */ def callWithCircuitBreaker[T](body: Callable[Future[T]]): Future[T] = withCircuitBreaker(body.call) + /** + * Java API (8) for [[#withCircuitBreaker]] + * + * @param body Call needing protected + * @return [[java.util.concurrent.CompletionStage]] containing the call result or a + * `scala.concurrent.TimeoutException` if the call timed out + */ + def callWithCircuitBreakerCS[T](body: Callable[CompletionStage[T]]): CompletionStage[T] = + FutureConverters.toJava[T](callWithCircuitBreaker(new Callable[Future[T]] { + override def call(): Future[T] = FutureConverters.toScala(body.call()) + })) + /** * Wraps invocations of synchronous calls that need to be protected * diff --git a/akka-docs/rst/common/code/docs/circuitbreaker/DangerousJavaActor.java b/akka-docs/rst/common/code/docs/circuitbreaker/DangerousJavaActor.java index f7eba2708f..92cfaba185 100644 --- a/akka-docs/rst/common/code/docs/circuitbreaker/DangerousJavaActor.java +++ b/akka-docs/rst/common/code/docs/circuitbreaker/DangerousJavaActor.java @@ -51,27 +51,16 @@ public class DangerousJavaActor extends UntypedActor { if (message instanceof String) { String m = (String) message; if ("is my middle name".equals(m)) { - pipe(breaker.callWithCircuitBreaker( - new Callable>() { - public Future call() throws Exception { - return future( - new Callable() { - public String call() { - return dangerousCall(); - } - }, getContext().dispatcher()); - } - }), getContext().dispatcher()).to(getSender()); + pipe( + breaker.callWithCircuitBreaker(() -> + future(() -> dangerousCall(), getContext().dispatcher()) + ), getContext().dispatcher() + ).to(getSender()); } if ("block for me".equals(m)) { getSender().tell(breaker .callWithSyncCircuitBreaker( - new Callable() { - @Override - public String call() throws Exception { - return dangerousCall(); - } - }), getSelf()); + () -> dangerousCall()), getSelf()); } } } From e08958322a1ad74cd958340d95a69a646fb27898 Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Thu, 14 Jul 2016 16:48:51 +0200 Subject: [PATCH 013/155] +htp akka http Throughput benchmark, for jenkins (#20956) --- .../server/AkkaHttpServerThroughputSpec.scala | 224 ++++++++++++++++++ 1 file changed, 224 insertions(+) create mode 100644 akka-http-tests/src/test/scala/akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala new file mode 100644 index 0000000000..10cac90649 --- /dev/null +++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala @@ -0,0 +1,224 @@ +/* + * Copyright (C) 2016 Lightbend Inc. + */ + +package akka.http.scaladsl.server + +import java.util.concurrent.TimeUnit + +import akka.NotUsed +import akka.http.scaladsl.model.{ ContentTypes, HttpEntity } +import akka.http.scaladsl.{ Http, TestUtils } +import akka.stream.ActorMaterializer +import akka.stream.scaladsl.Source +import akka.testkit.AkkaSpec +import akka.util.ByteString +import org.scalatest.exceptions.TestPendingException +import org.scalatest.concurrent.ScalaFutures + +import scala.annotation.tailrec +import scala.concurrent.Await +import scala.concurrent.duration._ +import scala.util.Try + +class AkkaHttpServerThroughputSpec(config: String) extends AkkaSpec(config) + with ScalaFutures { + + override def expectedTestDuration = 1.hour + + def this() = this( + """ + akka { + test.AkkaHttpServerThroughputSpec { + rate = 10000 + duration = 30s + + totalRequestsFactor = 1.0 + } + } + """.stripMargin + ) + + implicit val dispatcher = system.dispatcher + implicit val mat = ActorMaterializer() + + val MediumByteString = ByteString(Vector.fill(1024)(0.toByte): _*) + + val array_10x: Array[Byte] = Array(Vector.fill(10)(MediumByteString).flatten: _*) + val array_100x: Array[Byte] = Array(Vector.fill(100)(MediumByteString).flatten: _*) + val source_10x: Source[ByteString, NotUsed] = Source.repeat(MediumByteString).take(10) + val source_100x: Source[ByteString, NotUsed] = Source.repeat(MediumByteString).take(100) + val tenXResponseLength = array_10x.length + val hundredXResponseLength = array_100x.length + + // format: OFF + val routes = { + import Directives._ + + path("ping") { + complete("PONG!") + } ~ + path("long-response-stream" / IntNumber) { n => + if (n == 10) complete(HttpEntity(ContentTypes.`text/plain(UTF-8)`, array_10x)) + else if (n == 100) complete(HttpEntity(ContentTypes.`text/plain(UTF-8)`, array_10x)) + else throw new RuntimeException(s"Not implemented for ${n}") + } ~ + path("long-response-array" / IntNumber) { n => + if (n == 10) complete(HttpEntity(ContentTypes.`text/plain(UTF-8)`, source_100x)) + else if (n == 100) complete(HttpEntity(ContentTypes.`text/plain(UTF-8)`, source_100x)) + else throw new RuntimeException(s"Not implemented for ${n}") + } + } + // format: ON + + val (_, hostname, port) = TestUtils.temporaryServerHostnameAndPort() + val binding = Http().bindAndHandle(routes, hostname, port) + + val totalRequestsFactor = system.settings.config.getDouble("akka.test.AkkaHttpServerThroughputSpec.totalRequestsFactor") + val requests = Math.round(10000 * totalRequestsFactor) + val rate = system.settings.config.getInt("akka.test.AkkaHttpServerThroughputSpec.rate") + val testDuration = system.settings.config.getDuration("akka.test.AkkaHttpServerThroughputSpec.duration", TimeUnit.SECONDS) + val connections: Long = 10 + + // --- urls + val url_ping = s"http://127.0.0.1:$port/ping" + def url_longResponseStream(int: Int) = s"http://127.0.0.1:$port/long-response-stream/$int" + def url_longResponseArray(int: Int) = s"http://127.0.0.1:$port/long-response-array/$int" + // --- + + "HttpServer" should { + import scala.sys.process._ + + Await.ready(binding, 3.seconds) + + "a warmup" in ifWrk2Available { + val wrkOptions = s"""-d 30s -R $rate -c $connections -t $connections""" + s"""wrk $wrkOptions $url_ping""".!!.split("\n") + info("warmup complete.") + } + + "have good throughput on PONG response (keep-alive)" in ifWrk2Available { + val wrkOptions = s"""-d ${testDuration}s -R $rate -c $connections -t $connections --u_latency""" + val output = s"""wrk $wrkOptions $url_ping""".!!.split("\n") + infoThe(output) + printWrkPercentiles(s"Throughput_pong_R:${rate}_C:${connections}_p:", output) + } + "have good throughput (ab) (short-lived connections)" in ifAbAvailable { + val id = s"Throughput_AB-short-lived_pong_R:${rate}_C:${connections}_p:" + val abOptions = s"-c $connections -n $requests -g $id.tsv -e $id.csv" + val output = s"""ab $abOptions $url_ping""".!!.split("\n") + infoThe(output) + printAbPercentiles(id, output) + } + "have good throughput (ab) (long-lived connections)" in ifAbAvailable { + val id = s"Throughput_AB_pong_shortLived_R:${rate}_C:${connections}_p:" + val abOptions = s"-c $connections -n $requests -g $id.tsv -e $id.csv" + info(s"""ab $abOptions $url_ping""") + val output = s"""ab $abOptions $url_ping""".!!.split("\n") + infoThe(output) + printAbPercentiles(s"Throughput_ab_pong_R:${rate}_C:${connections}_p:", output) + } + + List( + 10 → tenXResponseLength, + 100 → hundredXResponseLength + ) foreach { + case (n, lenght) ⇒ + s"have good throughput (streaming-response($lenght), keep-alive)" in { + val wrkOptions = s"""-d ${testDuration}s -R $rate -c $connections -t $connections --u_latency""" + val output = s"""wrk $wrkOptions ${url_longResponseStream(n)}""".!!.split("\n") + infoThe(output) + printWrkPercentiles(s"Throughput_stream($lenght)_R:${rate}_C:${connections}_p:", output) + } + s"have good throughput (array-response($lenght), keep-alive)" in { + val wrkOptions = s"""-d ${testDuration}s -R $rate -c $connections -t $connections --u_latency""" + val output = s"""wrk $wrkOptions ${url_longResponseArray(n)}""".!!.split("\n") + infoThe(output) + printWrkPercentiles(s"Throughput_array($lenght)_R:${rate}_C:${connections}_p:", output) + } + } + } + + def infoThe(lines: Array[String]): Unit = + lines.foreach(l ⇒ info(" " + l)) + + def printWrkPercentiles(prefix: String, lines: Array[String]): Unit = { + val percentilesToPrint = 8 + + def durationAsMs(d: String): Long = { + val dd = d.replace("us", "µs") // Scala Duration does not parse "us" + Duration(dd).toMillis + } + + var i = 0 + val correctedDistributionStartsHere = lines.zipWithIndex.find(p ⇒ p._1 contains "Latency Distribution").map(_._2).get + + i = correctedDistributionStartsHere + 1 // skip header + while (i < correctedDistributionStartsHere + 1 + percentilesToPrint) { + val line = lines(i).trim + val percentile = line.takeWhile(_ != '%') + println(prefix + percentile + "_corrected," + durationAsMs(line.drop(percentile.length + 1).trim)) + i += 1 + } + + val uncorrectedDistributionStartsHere = lines.zipWithIndex.find(p ⇒ p._1 contains "Uncorrected Latency").map(_._2).get + + i = uncorrectedDistributionStartsHere + 1 // skip header + while (i < uncorrectedDistributionStartsHere + 1 + percentilesToPrint) { + val line = lines(i).trim + val percentile = line.takeWhile(_ != '%') + println(prefix + percentile + "_uncorrected," + durationAsMs(line.drop(percentile.length + 1).trim)) + i += 1 + } + } + + def printAbPercentiles(prefix: String, lines: Array[String]): Unit = { + val percentilesToPrint = 9 + + def durationAsMs(d: String): Long = + Duration(d).toMillis + + var i = 0 + val correctedDistributionStartsHere = lines.zipWithIndex.find(p ⇒ p._1 contains "Percentage of the requests").map(_._2).get + + i = correctedDistributionStartsHere + 1 // skip header + while (i < correctedDistributionStartsHere + 1 + percentilesToPrint) { + val line = lines(i).trim + val percentile = line.takeWhile(_ != '%') + println(prefix + percentile + "," + durationAsMs(line.drop(percentile.length + 1).replace("(longest request)", "").trim + "ms")) + i += 1 + } + } + + var _ifWrk2Available: Option[Boolean] = None + @tailrec final def ifWrk2Available(test: ⇒ Unit): Unit = { + _ifWrk2Available match { + case Some(false) ⇒ throw new TestPendingException() + case Some(true) ⇒ test + case None ⇒ + import scala.sys.process._ + + val wrk = Try("""wrk""".!).getOrElse(-1) + _ifWrk2Available = Some(wrk == 1) // app found, help displayed + ifWrk2Available(test) + } + } + + var _ifAbAvailable: Option[Boolean] = None + @tailrec final def ifAbAvailable(test: ⇒ Unit): Unit = { + _ifAbAvailable match { + case Some(false) ⇒ throw new TestPendingException() + case Some(true) ⇒ test + case None ⇒ + import scala.sys.process._ + + val wrk = Try("""ab -h""".!).getOrElse(-1) + _ifAbAvailable = Some(wrk == 22) // app found, help displayed (22 return code is when -h runs in ab, weird but true) + ifAbAvailable(test) + } + } + + override protected def beforeTermination(): Unit = { + binding.futureValue.unbind().futureValue + } +} From 6078fe44eecd203f4954c296605cb3b9c8fa7708 Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Thu, 14 Jul 2016 16:53:41 +0200 Subject: [PATCH 014/155] dsable fuzzer in Http Throughput spec (#20957) --- .../akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala index 10cac90649..fec13d2f99 100644 --- a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala +++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala @@ -29,6 +29,7 @@ class AkkaHttpServerThroughputSpec(config: String) extends AkkaSpec(config) def this() = this( """ akka { + stream.materializer.debug.fuzzing-mode = off test.AkkaHttpServerThroughputSpec { rate = 10000 duration = 30s From 7ef1b78ae75ede84d2c05f9bf5bb9258c8e0929e Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Thu, 14 Jul 2016 17:47:56 +0200 Subject: [PATCH 015/155] +write http throughput results if asked to (#20958) --- .../server/AkkaHttpServerThroughputSpec.scala | 67 ++++++++++++++++--- 1 file changed, 56 insertions(+), 11 deletions(-) diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala index fec13d2f99..46908d6cb4 100644 --- a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala +++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala @@ -4,6 +4,7 @@ package akka.http.scaladsl.server +import java.io.{ BufferedWriter, FileWriter } import java.util.concurrent.TimeUnit import akka.NotUsed @@ -30,7 +31,9 @@ class AkkaHttpServerThroughputSpec(config: String) extends AkkaSpec(config) """ akka { stream.materializer.debug.fuzzing-mode = off + test.AkkaHttpServerThroughputSpec { + writeCsv = off rate = 10000 duration = 30s @@ -75,6 +78,8 @@ class AkkaHttpServerThroughputSpec(config: String) extends AkkaSpec(config) val (_, hostname, port) = TestUtils.temporaryServerHostnameAndPort() val binding = Http().bindAndHandle(routes, hostname, port) + val writeCsv = system.settings.config.getBoolean("akka.test.AkkaHttpServerThroughputSpec.writeCsv") + val totalRequestsFactor = system.settings.config.getDouble("akka.test.AkkaHttpServerThroughputSpec.totalRequestsFactor") val requests = Math.round(10000 * totalRequestsFactor) val rate = system.settings.config.getInt("akka.test.AkkaHttpServerThroughputSpec.rate") @@ -106,14 +111,14 @@ class AkkaHttpServerThroughputSpec(config: String) extends AkkaSpec(config) } "have good throughput (ab) (short-lived connections)" in ifAbAvailable { val id = s"Throughput_AB-short-lived_pong_R:${rate}_C:${connections}_p:" - val abOptions = s"-c $connections -n $requests -g $id.tsv -e $id.csv" + val abOptions = s"-c $connections -n $requests" val output = s"""ab $abOptions $url_ping""".!!.split("\n") infoThe(output) printAbPercentiles(id, output) } "have good throughput (ab) (long-lived connections)" in ifAbAvailable { val id = s"Throughput_AB_pong_shortLived_R:${rate}_C:${connections}_p:" - val abOptions = s"-c $connections -n $requests -g $id.tsv -e $id.csv" + val abOptions = s"-c $connections -n $requests" info(s"""ab $abOptions $url_ping""") val output = s"""ab $abOptions $url_ping""".!!.split("\n") infoThe(output) @@ -140,10 +145,21 @@ class AkkaHttpServerThroughputSpec(config: String) extends AkkaSpec(config) } } - def infoThe(lines: Array[String]): Unit = + private def infoThe(lines: Array[String]): Unit = lines.foreach(l ⇒ info(" " + l)) - def printWrkPercentiles(prefix: String, lines: Array[String]): Unit = { + private def dumpToCsv(prefix: String, titles: Seq[String], values: Seq[String]): Unit = + if (writeCsv) { + val w = new BufferedWriter(new FileWriter(prefix + ".csv")) + w.write(titles.reverse.map(it ⇒ "\"" + it + "\"").mkString(",")) + w.write("\n") + w.write(values.reverse.map(it ⇒ "\"" + it + "\"").mkString(",")) + w.write("\n") + w.flush() + w.close() + } + + private def printWrkPercentiles(prefix: String, lines: Array[String]): Unit = { val percentilesToPrint = 8 def durationAsMs(d: String): Long = { @@ -154,26 +170,46 @@ class AkkaHttpServerThroughputSpec(config: String) extends AkkaSpec(config) var i = 0 val correctedDistributionStartsHere = lines.zipWithIndex.find(p ⇒ p._1 contains "Latency Distribution").map(_._2).get + var titles = List.empty[String] + var metrics = List.empty[String] i = correctedDistributionStartsHere + 1 // skip header while (i < correctedDistributionStartsHere + 1 + percentilesToPrint) { val line = lines(i).trim val percentile = line.takeWhile(_ != '%') - println(prefix + percentile + "_corrected," + durationAsMs(line.drop(percentile.length + 1).trim)) + + val title = prefix + percentile + "_corrected" + val duration = durationAsMs(line.drop(percentile.length + 1).trim) + + titles ::= title + metrics ::= duration.toString + println(title + "," + duration) + i += 1 } + dumpToCsv(prefix + "_corrected", titles, metrics) val uncorrectedDistributionStartsHere = lines.zipWithIndex.find(p ⇒ p._1 contains "Uncorrected Latency").map(_._2).get + titles = List.empty + metrics = List.empty i = uncorrectedDistributionStartsHere + 1 // skip header while (i < uncorrectedDistributionStartsHere + 1 + percentilesToPrint) { val line = lines(i).trim val percentile = line.takeWhile(_ != '%') - println(prefix + percentile + "_uncorrected," + durationAsMs(line.drop(percentile.length + 1).trim)) + + val title = prefix + percentile + "_uncorrected" + val duration = durationAsMs(line.drop(percentile.length + 1).trim) + + titles ::= title + metrics ::= duration.toString + println(title + "," + duration) + i += 1 } + dumpToCsv(prefix + "_uncorrected", titles, metrics) } - def printAbPercentiles(prefix: String, lines: Array[String]): Unit = { + private def printAbPercentiles(prefix: String, lines: Array[String]): Unit = { val percentilesToPrint = 9 def durationAsMs(d: String): Long = @@ -182,17 +218,26 @@ class AkkaHttpServerThroughputSpec(config: String) extends AkkaSpec(config) var i = 0 val correctedDistributionStartsHere = lines.zipWithIndex.find(p ⇒ p._1 contains "Percentage of the requests").map(_._2).get + var titles = List.empty[String] + var metrics = List.empty[String] i = correctedDistributionStartsHere + 1 // skip header while (i < correctedDistributionStartsHere + 1 + percentilesToPrint) { val line = lines(i).trim val percentile = line.takeWhile(_ != '%') - println(prefix + percentile + "," + durationAsMs(line.drop(percentile.length + 1).replace("(longest request)", "").trim + "ms")) + val title = prefix + percentile + val duration = durationAsMs(line.drop(percentile.length + 1).replace("(longest request)", "").trim + "ms") + + titles ::= title + metrics ::= duration.toString + println(title + "," + duration) + i += 1 } + dumpToCsv(prefix, titles, metrics) } - var _ifWrk2Available: Option[Boolean] = None - @tailrec final def ifWrk2Available(test: ⇒ Unit): Unit = { + private var _ifWrk2Available: Option[Boolean] = None + @tailrec private final def ifWrk2Available(test: ⇒ Unit): Unit = { _ifWrk2Available match { case Some(false) ⇒ throw new TestPendingException() case Some(true) ⇒ test @@ -206,7 +251,7 @@ class AkkaHttpServerThroughputSpec(config: String) extends AkkaSpec(config) } var _ifAbAvailable: Option[Boolean] = None - @tailrec final def ifAbAvailable(test: ⇒ Unit): Unit = { + @tailrec private final def ifAbAvailable(test: ⇒ Unit): Unit = { _ifAbAvailable match { case Some(false) ⇒ throw new TestPendingException() case Some(true) ⇒ test From 6e0a2caf4b89674f8cd55108e540c032883091e9 Mon Sep 17 00:00:00 2001 From: Jacek Kunicki Date: Thu, 14 Jul 2016 21:54:33 +0200 Subject: [PATCH 016/155] Corrected onPush description in Custom stream processing docs #20959 (#20960) --- akka-docs/rst/java/stream/stream-customize.rst | 2 +- akka-docs/rst/scala/stream/stream-customize.rst | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/akka-docs/rst/java/stream/stream-customize.rst b/akka-docs/rst/java/stream/stream-customize.rst index fa6bf82473..691a38c628 100644 --- a/akka-docs/rst/java/stream/stream-customize.rst +++ b/akka-docs/rst/java/stream/stream-customize.rst @@ -105,7 +105,7 @@ The following operations are available for *input* ports: The events corresponding to an *input* port can be received in an :class:`AbstractInHandler` instance registered to the input port using ``setHandler(in, handler)``. This handler has three callbacks: -* ``onPush()`` is called when the output port has now a new element. Now it is possible to acquire this element using +* ``onPush()`` is called when the input port has now a new element. Now it is possible to acquire this element using ``grab(in)`` and/or call ``pull(in)`` on the port to request the next element. It is not mandatory to grab the element, but if it is pulled while the element has not been grabbed it will drop the buffered element. * ``onUpstreamFinish()`` is called once the upstream has completed and no longer can be pulled for new elements. diff --git a/akka-docs/rst/scala/stream/stream-customize.rst b/akka-docs/rst/scala/stream/stream-customize.rst index fbefb77dc1..0aaa7c7efd 100644 --- a/akka-docs/rst/scala/stream/stream-customize.rst +++ b/akka-docs/rst/scala/stream/stream-customize.rst @@ -108,7 +108,7 @@ The following operations are available for *input* ports: The events corresponding to an *input* port can be received in an :class:`InHandler` instance registered to the input port using ``setHandler(in, handler)``. This handler has three callbacks: -* ``onPush()`` is called when the output port has now a new element. Now it is possible to acquire this element using +* ``onPush()`` is called when the input port has now a new element. Now it is possible to acquire this element using ``grab(in)`` and/or call ``pull(in)`` on the port to request the next element. It is not mandatory to grab the element, but if it is pulled while the element has not been grabbed it will drop the buffered element. * ``onUpstreamFinish()`` is called once the upstream has completed and no longer can be pulled for new elements. @@ -481,4 +481,3 @@ that he gave up). It is interesting to note that a simplified form of this problem has found its way into the `dotty test suite `_. Dotty is the development version of Scala on its way to Scala 3. - From 03fcf871a9e94333a0f40e5aed28513c744a84c2 Mon Sep 17 00:00:00 2001 From: abesanderson Date: Fri, 15 Jul 2016 02:02:37 -0600 Subject: [PATCH 017/155] additional debug logging for fsm #20952 * additional debug logging for fsm * dont construct the string if debug logging disabled --- akka-actor/src/main/scala/akka/actor/FSM.scala | 2 +- .../src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 242083a730..bc1d0929cb 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -785,7 +785,7 @@ trait LoggingFSM[S, D] extends FSM[S, D] { this: Actor ⇒ case a: ActorRef ⇒ a.toString case _ ⇒ "unknown" } - log.debug("processing " + event + " from " + srcstr) + log.debug("processing {} from {} in state {}", event, srcstr, stateName) } if (logDepth > 0) { diff --git a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala index 9994e95154..787c9f8171 100644 --- a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala +++ b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala @@ -575,7 +575,7 @@ trait LoggingPersistentFSM[S, D, E] extends PersistentFSMBase[S, D, E] { this: A case a: ActorRef ⇒ a.toString case _ ⇒ "unknown" } - log.debug("processing " + event + " from " + srcstr) + log.debug("processing {} from {} in state {}", event, srcstr, stateName) } if (logDepth > 0) { From f0a1ba84679c78dd258230536af4e601cfa83a7a Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Fri, 15 Jul 2016 10:52:40 +0200 Subject: [PATCH 018/155] =htc replace getSimpleName with Logging.simpleName (#20951) As getSimpleName sometimes has issues on some scala classes (blows up). --- .../akka/http/scaladsl/model/headers/headers.scala | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala index e19385dafd..44c43f8857 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala @@ -16,6 +16,7 @@ import scala.util.{ Failure, Success, Try } import scala.annotation.tailrec import scala.collection.immutable import akka.parboiled2.util.Base64 +import akka.event.Logging import akka.http.impl.util._ import akka.http.javadsl.{ model ⇒ jm } import akka.http.scaladsl.model._ @@ -39,8 +40,14 @@ sealed abstract class ModeledCompanion[T: ClassTag] extends Renderable { /** INTERNAL API */ private[akka] object ModeledCompanion { def nameFromClass[T](clazz: Class[T]): String = { - val name = clazz.getSimpleName.replace("$minus", "-") - if (name.last == '$') name.dropRight(1) // trailing $ + val name = { + val n = Logging.simpleName(clazz).replace("$minus", "-") + if (n.last == '$') n.dropRight(1) // drop trailing $ + else n + } + + val dollarIndex = name.indexOf('$') + if (dollarIndex != -1) name.drop(dollarIndex + 1) else name } } From 3871e18acddcad1bce80570cd7f9294ec926b138 Mon Sep 17 00:00:00 2001 From: Thomas Szymanski Date: Fri, 15 Jul 2016 12:38:11 +0200 Subject: [PATCH 019/155] Fix typos in HTTP streaming doc (#20963) --- .../rst/java/http/implications-of-streaming-http-entity.rst | 6 +++--- .../scala/http/implications-of-streaming-http-entity.rst | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/akka-docs/rst/java/http/implications-of-streaming-http-entity.rst b/akka-docs/rst/java/http/implications-of-streaming-http-entity.rst index b66f66da53..efa76d8ef6 100644 --- a/akka-docs/rst/java/http/implications-of-streaming-http-entity.rst +++ b/akka-docs/rst/java/http/implications-of-streaming-http-entity.rst @@ -23,12 +23,12 @@ Client-Side handling of streaming HTTP Entities Consuming the HTTP Response Entity (Client) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The most commong use-case of course is consuming the response entity, which can be done via +The most common use-case of course is consuming the response entity, which can be done via running the underlying ``dataBytes`` Source. This is as simple as running the dataBytes source, (or on the server-side using directives such as It is encouraged to use various streaming techniques to utilise the underlying infrastructure to its fullest, -for example by framing the incoming chunks, parsing them line-by-line and the connecting the flow into another +for example by framing the incoming chunks, parsing them line-by-line and then connecting the flow into another destination Sink, such as a File or other Akka Streams connector: .. includecode:: ../code/docs/http/javadsl/HttpClientExampleDocTest.java#manual-entity-consume-example-1 @@ -108,7 +108,7 @@ Closing connections is also explained in depth in the :ref:`http-closing-connect Pending: Automatic discarding of not used entities ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Under certin conditions is is possible to detect an entity is very unlikely to be used by the user for a given request, +Under certain conditions it is possible to detect an entity is very unlikely to be used by the user for a given request, and issue warnings or discard the entity automatically. This advanced feature has not been implemented yet, see the below note and issues for further discussion and ideas. diff --git a/akka-docs/rst/scala/http/implications-of-streaming-http-entity.rst b/akka-docs/rst/scala/http/implications-of-streaming-http-entity.rst index d6a0403eef..c53d68859e 100644 --- a/akka-docs/rst/scala/http/implications-of-streaming-http-entity.rst +++ b/akka-docs/rst/scala/http/implications-of-streaming-http-entity.rst @@ -23,12 +23,12 @@ Client-Side handling of streaming HTTP Entities Consuming the HTTP Response Entity (Client) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The most commong use-case of course is consuming the response entity, which can be done via +The most common use-case of course is consuming the response entity, which can be done via running the underlying ``dataBytes`` Source. This is as simple as running the dataBytes source, (or on the server-side using directives such as It is encouraged to use various streaming techniques to utilise the underlying infrastructure to its fullest, -for example by framing the incoming chunks, parsing them line-by-line and the connecting the flow into another +for example by framing the incoming chunks, parsing them line-by-line and then connecting the flow into another destination Sink, such as a File or other Akka Streams connector: .. includecode:: ../code/docs/http/scaladsl/HttpClientExampleSpec.scala @@ -116,7 +116,7 @@ Closing connections is also explained in depth in the :ref:`http-closing-connect Pending: Automatic discarding of not used entities ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Under certin conditions is is possible to detect an entity is very unlikely to be used by the user for a given request, +Under certain conditions it is possible to detect an entity is very unlikely to be used by the user for a given request, and issue warnings or discard the entity automatically. This advanced feature has not been implemented yet, see the below note and issues for further discussion and ideas. From b7567a5c5568d7752854c9d38f568aff712c2da1 Mon Sep 17 00:00:00 2001 From: Nikolay Donets Date: Fri, 15 Jul 2016 13:38:47 +0200 Subject: [PATCH 020/155] Nikdon 20535 check same origin (#20962) * =htp checkSameOrigin shows allowed origins add HttpOriginRangeDefault into the javadsl and refactor resolving binary compatibility + add copyright return back public static final in the HttpOriginRange * =htp #20535 address bin compat issues in checkSameOrigin PR --- .../HeaderDirectivesExamplesSpec.scala | 6 ++--- .../model/headers/HttpOriginRange.java | 24 ++++++++--------- .../scaladsl/model/headers/HttpOrigin.scala | 5 ++-- .../directives/HeaderDirectivesTest.java | 27 +++++++++++++++++++ .../directives/HeaderDirectivesSpec.scala | 4 +-- .../akka/http/javadsl/server/Rejections.scala | 2 +- .../server/directives/HeaderDirectives.scala | 19 ++++++++----- .../akka/http/scaladsl/server/Rejection.scala | 4 +-- .../scaladsl/server/RejectionHandler.scala | 4 +-- .../server/directives/HeaderDirectives.scala | 4 +-- 10 files changed, 67 insertions(+), 32 deletions(-) diff --git a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/HeaderDirectivesExamplesSpec.scala b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/HeaderDirectivesExamplesSpec.scala index 695f23dcf8..e417ca35a4 100644 --- a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/HeaderDirectivesExamplesSpec.scala +++ b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/HeaderDirectivesExamplesSpec.scala @@ -209,13 +209,13 @@ class HeaderDirectivesExamplesSpec extends RoutingSpec with Inside { val invalidOriginHeader = Origin(invalidHttpOrigin) Get("abc") ~> invalidOriginHeader ~> route ~> check { inside(rejection) { - case InvalidOriginRejection(invalidOrigins) ⇒ - invalidOrigins shouldEqual Seq(invalidHttpOrigin) + case InvalidOriginRejection(allowedOrigins) ⇒ + allowedOrigins shouldEqual Seq(correctOrigin) } } Get("abc") ~> invalidOriginHeader ~> Route.seal(route) ~> check { status shouldEqual StatusCodes.Forbidden - responseAs[String] should include(s"${invalidHttpOrigin.value}") + responseAs[String] should include(s"${correctOrigin.value}") } } } diff --git a/akka-http-core/src/main/java/akka/http/javadsl/model/headers/HttpOriginRange.java b/akka-http-core/src/main/java/akka/http/javadsl/model/headers/HttpOriginRange.java index 0c0ae05863..70a949883f 100644 --- a/akka-http-core/src/main/java/akka/http/javadsl/model/headers/HttpOriginRange.java +++ b/akka-http-core/src/main/java/akka/http/javadsl/model/headers/HttpOriginRange.java @@ -11,18 +11,18 @@ import akka.http.impl.util.Util; * @see HttpOriginRanges for convenience access to often used values. */ public abstract class HttpOriginRange { - public abstract boolean matches(HttpOrigin origin); + public abstract boolean matches(HttpOrigin origin); - public static HttpOriginRange create(HttpOrigin... origins) { - return HttpOriginRange$.MODULE$.apply(Util.convertArray(origins)); - } + public static HttpOriginRange create(HttpOrigin... origins) { + return HttpOriginRange$.MODULE$.apply(Util.convertArray(origins)); + } - /** - * @deprecated because of troublesome initialisation order (with regards to scaladsl class implementing this class). - * In some edge cases this field could end up containing a null value. - * Will be removed in Akka 3.x, use {@link HttpEncodingRanges#ALL} instead. - */ - @Deprecated - // FIXME: Remove in Akka 3.0 - public static final HttpOriginRange ALL = HttpOriginRanges.ALL; + /** + * @deprecated because of troublesome initialisation order (with regards to scaladsl class implementing this class). + * In some edge cases this field could end up containing a null value. + * Will be removed in Akka 3.x, use {@link HttpEncodingRanges#ALL} instead. + */ + @Deprecated + // FIXME: Remove in Akka 3.0 + public static final HttpOriginRange ALL = HttpOriginRanges.ALL; } diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/HttpOrigin.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/HttpOrigin.scala index 27545e3848..bd32d48471 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/HttpOrigin.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/HttpOrigin.scala @@ -5,7 +5,6 @@ package akka.http.scaladsl.model.headers import akka.http.impl.model.JavaInitialization -import akka.util.Unsafe import language.implicitConversions import scala.collection.immutable @@ -22,6 +21,7 @@ abstract class HttpOriginRange extends jm.headers.HttpOriginRange with ValueRend /** Java API */ def matches(origin: jm.headers.HttpOrigin): Boolean = matches(origin.asScala) } + object HttpOriginRange { case object `*` extends HttpOriginRange { def matches(origin: HttpOrigin) = true @@ -43,6 +43,7 @@ object HttpOriginRange { final case class HttpOrigin(scheme: String, host: Host) extends jm.headers.HttpOrigin with ValueRenderable { def render[R <: Rendering](r: R): r.type = host.renderValue(r ~~ scheme ~~ "://") } + object HttpOrigin { implicit val originsRenderer: Renderer[immutable.Seq[HttpOrigin]] = Renderer.seqRenderer(" ", "null") @@ -50,4 +51,4 @@ object HttpOrigin { val parser = new UriParser(str, UTF8, Uri.ParsingMode.Relaxed) parser.parseOrigin() } -} \ No newline at end of file +} diff --git a/akka-http-tests/src/test/java/akka/http/javadsl/server/directives/HeaderDirectivesTest.java b/akka-http-tests/src/test/java/akka/http/javadsl/server/directives/HeaderDirectivesTest.java index 8fe7fac43f..fe9ecad53f 100644 --- a/akka-http-tests/src/test/java/akka/http/javadsl/server/directives/HeaderDirectivesTest.java +++ b/akka-http-tests/src/test/java/akka/http/javadsl/server/directives/HeaderDirectivesTest.java @@ -205,5 +205,32 @@ public class HeaderDirectivesTest extends JUnitRouteTest { .run(HttpRequest.create().addHeader(Origin.create(invalidOriginHeader))) .assertStatusCode(StatusCodes.FORBIDDEN); } + + @Test + public void testCheckSameOriginGivenALL() { + final HttpOrigin validOriginHeader = HttpOrigin.create("http://localhost", Host.create("8080")); + + // not very interesting case, however here we check that the directive simply avoids performing the check + final HttpOriginRange everythingGoes = HttpOriginRanges.ALL; + + final TestRoute route = testRoute(checkSameOrigin(everythingGoes, () -> complete("Result"))); + + route + .run(HttpRequest.create().addHeader(Origin.create(validOriginHeader))) + .assertStatusCode(StatusCodes.OK) + .assertEntity("Result"); + + route + .run(HttpRequest.create()) + .assertStatusCode(StatusCodes.OK) + .assertEntity("Result"); + + final HttpOrigin otherOriginHeader = HttpOrigin.create("http://invalid.com", Host.create("8080")); + + route + .run(HttpRequest.create().addHeader(Origin.create(otherOriginHeader))) + .assertStatusCode(StatusCodes.OK) + .assertEntity("Result"); + } } diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/HeaderDirectivesSpec.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/HeaderDirectivesSpec.scala index 1c0b893b64..6b636192d8 100644 --- a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/HeaderDirectivesSpec.scala +++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/HeaderDirectivesSpec.scala @@ -193,12 +193,12 @@ class HeaderDirectivesSpec extends RoutingSpec with Inside { val invalidOriginHeader = Origin(invalidHttpOrigin) Get("abc") ~> invalidOriginHeader ~> route ~> check { inside(rejection) { - case InvalidOriginRejection(invalidOrigins) ⇒ invalidOrigins shouldEqual Seq(invalidHttpOrigin) + case InvalidOriginRejection(allowedOrigins) ⇒ allowedOrigins shouldEqual Seq(correctOrigin) } } Get("abc") ~> invalidOriginHeader ~> Route.seal(route) ~> check { status shouldEqual StatusCodes.Forbidden - responseAs[String] should include(s"${invalidHttpOrigin.value}") + responseAs[String] should include(s"${correctOrigin.value}") } } } diff --git a/akka-http/src/main/scala/akka/http/javadsl/server/Rejections.scala b/akka-http/src/main/scala/akka/http/javadsl/server/Rejections.scala index 95d2e3b011..5dc4b11c6a 100644 --- a/akka-http/src/main/scala/akka/http/javadsl/server/Rejections.scala +++ b/akka-http/src/main/scala/akka/http/javadsl/server/Rejections.scala @@ -110,7 +110,7 @@ trait MalformedHeaderRejection extends Rejection { * Signals that the request was rejected because `Origin` header value is invalid. */ trait InvalidOriginRejection extends Rejection { - def getInvalidOrigins: java.util.List[akka.http.javadsl.model.headers.HttpOrigin] + def getAllowedOrigins: java.util.List[akka.http.javadsl.model.headers.HttpOrigin] } /** diff --git a/akka-http/src/main/scala/akka/http/javadsl/server/directives/HeaderDirectives.scala b/akka-http/src/main/scala/akka/http/javadsl/server/directives/HeaderDirectives.scala index ebc94285bb..5324b77a5e 100644 --- a/akka-http/src/main/scala/akka/http/javadsl/server/directives/HeaderDirectives.scala +++ b/akka-http/src/main/scala/akka/http/javadsl/server/directives/HeaderDirectives.scala @@ -11,12 +11,12 @@ import akka.actor.ReflectiveDynamicAccess import scala.compat.java8.OptionConverters import scala.compat.java8.OptionConverters._ import akka.http.impl.util.JavaMapping.Implicits._ +import akka.http.javadsl.model.headers.{ HttpOriginRange, HttpOriginRanges } import akka.http.javadsl.model.{ HttpHeader, StatusCodes } -import akka.http.javadsl.model.headers.HttpOriginRange import akka.http.javadsl.server.{ InvalidOriginRejection, MissingHeaderRejection, Route } +import akka.http.scaladsl.model.headers.HttpOriginRange.Default import akka.http.scaladsl.model.headers.{ ModeledCustomHeader, ModeledCustomHeaderCompanion, Origin } -import akka.http.scaladsl.server.directives.{ HeaderMagnet, BasicDirectives ⇒ B, HeaderDirectives ⇒ D } -import akka.stream.ActorMaterializer +import akka.http.scaladsl.server.directives.{ HeaderMagnet, HeaderDirectives ⇒ D } import scala.reflect.ClassTag import scala.util.{ Failure, Success } @@ -33,9 +33,16 @@ abstract class HeaderDirectives extends FutureDirectives { * * @group header */ - def checkSameOrigin(allowed: HttpOriginRange, inner: jf.Supplier[Route]): Route = RouteAdapter { - D.checkSameOrigin(allowed.asScala) { inner.get().delegate } - } + // TODO When breaking binary compatibility this should become HttpOriginRange.Default, see https://github.com/akka/akka/pull/20776/files#r70049845 + def checkSameOrigin(allowed: HttpOriginRange, inner: jf.Supplier[Route]): Route = + allowed match { + case HttpOriginRanges.ALL | HttpOriginRange.ALL | akka.http.scaladsl.model.headers.HttpOriginRange.`*` ⇒ pass(inner) + case _ ⇒ RouteAdapter { + // safe, we know it's not the `*` header + val default = allowed.asInstanceOf[akka.http.scaladsl.model.headers.HttpOriginRange.Default] + D.checkSameOrigin(default) { inner.get().delegate } + } + } /** * Extracts an HTTP header value using the given function. If the function result is undefined for all headers the diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/Rejection.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/Rejection.scala index 7a2a266dca..eb912fc931 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/Rejection.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/Rejection.scala @@ -98,9 +98,9 @@ final case class MalformedHeaderRejection(headerName: String, errorMsg: String, * Rejection created by [[akka.http.scaladsl.server.directives.HeaderDirectives.checkSameOrigin]]. * Signals that the request was rejected because `Origin` header value is invalid. */ -final case class InvalidOriginRejection(invalidOrigins: immutable.Seq[SHttpOrigin]) +final case class InvalidOriginRejection(allowedOrigins: immutable.Seq[SHttpOrigin]) extends jserver.InvalidOriginRejection with Rejection { - override def getInvalidOrigins: java.util.List[JHttpOrigin] = invalidOrigins.map(_.asJava).asJava + override def getAllowedOrigins: java.util.List[JHttpOrigin] = allowedOrigins.map(_.asJava).asJava } /** diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/RejectionHandler.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/RejectionHandler.scala index acdb5715af..e076c73a52 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/RejectionHandler.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/RejectionHandler.scala @@ -167,8 +167,8 @@ object RejectionHandler { complete((BadRequest, "Request is missing required HTTP header '" + headerName + '\'')) } .handle { - case InvalidOriginRejection(invalidOrigin) ⇒ - complete((Forbidden, s"Invalid `Origin` header values: ${invalidOrigin.mkString(", ")}")) + case InvalidOriginRejection(allowedOrigins) ⇒ + complete((Forbidden, s"Allowed `Origin` header values: ${allowedOrigins.mkString(", ")}")) } .handle { case MissingQueryParamRejection(paramName) ⇒ diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/HeaderDirectives.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/HeaderDirectives.scala index 0243e58990..1fa50f54bd 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/HeaderDirectives.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/HeaderDirectives.scala @@ -28,10 +28,10 @@ trait HeaderDirectives { * * @group header */ - def checkSameOrigin(allowed: HttpOriginRange): Directive0 = { + def checkSameOrigin(allowed: HttpOriginRange.Default): Directive0 = { headerValueByType[Origin]().flatMap { origin ⇒ if (origin.origins.exists(allowed.matches)) pass - else reject(InvalidOriginRejection(origin.origins)) + else reject(InvalidOriginRejection(allowed.origins)) } } From 218f81196c4d9006e532794001daf2345e07a88e Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Fri, 15 Jul 2016 18:53:13 +0200 Subject: [PATCH 021/155] =htp multinode latency spec for HTTP (#20964) --- akka-http-tests/build.sbt | 3 + .../AkkaHttpServerLatencyMultiNodeSpec.scala} | 223 +++++++++++++----- .../scala/akka/http/STMultiNodeSpec.scala | 24 ++ .../scalatest/extra}/QuietReporter.scala | 0 project/AkkaBuild.scala | 6 +- 5 files changed, 189 insertions(+), 67 deletions(-) rename akka-http-tests/src/{test/scala/akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala => multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala} (52%) create mode 100644 akka-http-tests/src/multi-jvm/scala/akka/http/STMultiNodeSpec.scala rename akka-remote-tests/src/test/scala/{akka/remote => org/scalatest/extra}/QuietReporter.scala (100%) diff --git a/akka-http-tests/build.sbt b/akka-http-tests/build.sbt index 489f69bc8e..cf5b82307f 100644 --- a/akka-http-tests/build.sbt +++ b/akka-http-tests/build.sbt @@ -1,4 +1,5 @@ import akka._ +import com.typesafe.sbt.SbtMultiJvm.MultiJvmKeys._ AkkaBuild.defaultSettings AkkaBuild.dontPublishSettings @@ -12,4 +13,6 @@ scalacOptions in Compile += "-language:_" mainClass in run in Test := Some("akka.http.javadsl.SimpleServerApp") enablePlugins(ScaladocNoVerificationOfDiagrams) +enablePlugins(MultiNodeScalaTest) + disablePlugins(MimaPlugin) diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala b/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala similarity index 52% rename from akka-http-tests/src/test/scala/akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala rename to akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala index 46908d6cb4..d4bf9e5e27 100644 --- a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/AkkaHttpServerThroughputSpec.scala +++ b/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala @@ -1,50 +1,88 @@ -/* - * Copyright (C) 2016 Lightbend Inc. +/** + * Copyright (C) 2009-2016 Lightbend Inc. */ - -package akka.http.scaladsl.server +package akka.http import java.io.{ BufferedWriter, FileWriter } import java.util.concurrent.TimeUnit import akka.NotUsed +import akka.actor.{ Actor, ActorIdentity, ActorRef, Identify, Props } +import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.model.{ ContentTypes, HttpEntity } +import akka.http.scaladsl.server.{ Directives, Route } import akka.http.scaladsl.{ Http, TestUtils } +import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec } import akka.stream.ActorMaterializer import akka.stream.scaladsl.Source -import akka.testkit.AkkaSpec -import akka.util.ByteString -import org.scalatest.exceptions.TestPendingException +import akka.testkit.{ ImplicitSender, LongRunningTest } +import akka.util.{ ByteString, Timeout } +import com.typesafe.config.ConfigFactory import org.scalatest.concurrent.ScalaFutures +import org.scalatest.exceptions.TestPendingException import scala.annotation.tailrec -import scala.concurrent.Await import scala.concurrent.duration._ +import scala.concurrent.{ Await, Promise } import scala.util.Try -class AkkaHttpServerThroughputSpec(config: String) extends AkkaSpec(config) - with ScalaFutures { +object AkkaHttpServerLatencyMultiNodeSpec extends MultiNodeConfig { - override def expectedTestDuration = 1.hour - - def this() = this( + commonConfig(ConfigFactory.parseString( """ akka { + actor.provider = "akka.remote.RemoteActorRefProvider" stream.materializer.debug.fuzzing-mode = off + + testconductor.barrier-timeout = 20m - test.AkkaHttpServerThroughputSpec { - writeCsv = off + test.AkkaHttpServerLatencySpec { + writeCsv = on # TODO SWITCH BACK rate = 10000 duration = 30s totalRequestsFactor = 1.0 } } - """.stripMargin - ) + """)) - implicit val dispatcher = system.dispatcher - implicit val mat = ActorMaterializer() + val server = role("server") + val loadGenerator = role("loadGenerator") + + final case class LoadGenCommand(cmd: String) + final case class LoadGenResults(results: String) { + def lines = results.split("\n") + } + final case class SetServerPort(port: Int) + class HttpLoadGeneratorActor(serverPort: Promise[Int]) extends Actor { + override def receive: Receive = { + case SetServerPort(port) ⇒ + serverPort.success(port) + context become ready(port) + case other ⇒ + throw new RuntimeException("No server port known! Initialize with SetServerPort() first! Got: " + other) + } + + def ready(port: Int): Receive = { + case LoadGenCommand(cmd) ⇒ + import scala.sys.process._ + val res = cmd.!! // blocking. DON'T DO THIS AT HOME, KIDS! + sender() ! LoadGenResults(res) + } + } +} + +class AkkaHttpServerLatencyMultiNodeSpecMultiJvmNode1 extends MultiNodeSpecSpec +class AkkaHttpServerLatencyMultiNodeSpecMultiJvmNode2 extends MultiNodeSpecSpec + +class MultiNodeSpecSpec extends MultiNodeSpec(AkkaHttpServerLatencyMultiNodeSpec) with STMultiNodeSpec + with ScalaFutures with ImplicitSender { + + import AkkaHttpServerLatencyMultiNodeSpec._ + + override implicit def patienceConfig: PatienceConfig = PatienceConfig(10.seconds, interval = 300.millis) + + def initialParticipants = 2 val MediumByteString = ByteString(Vector.fill(1024)(0.toByte): _*) @@ -54,9 +92,9 @@ class AkkaHttpServerThroughputSpec(config: String) extends AkkaSpec(config) val source_100x: Source[ByteString, NotUsed] = Source.repeat(MediumByteString).take(100) val tenXResponseLength = array_10x.length val hundredXResponseLength = array_100x.length - + // format: OFF - val routes = { + val routes: Route = { import Directives._ path("ping") { @@ -75,54 +113,77 @@ class AkkaHttpServerThroughputSpec(config: String) extends AkkaSpec(config) } // format: ON - val (_, hostname, port) = TestUtils.temporaryServerHostnameAndPort() - val binding = Http().bindAndHandle(routes, hostname, port) + val writeCsv = system.settings.config.getBoolean("akka.test.AkkaHttpServerLatencySpec.writeCsv") - val writeCsv = system.settings.config.getBoolean("akka.test.AkkaHttpServerThroughputSpec.writeCsv") - - val totalRequestsFactor = system.settings.config.getDouble("akka.test.AkkaHttpServerThroughputSpec.totalRequestsFactor") + val totalRequestsFactor = system.settings.config.getDouble("akka.test.AkkaHttpServerLatencySpec.totalRequestsFactor") val requests = Math.round(10000 * totalRequestsFactor) - val rate = system.settings.config.getInt("akka.test.AkkaHttpServerThroughputSpec.rate") - val testDuration = system.settings.config.getDuration("akka.test.AkkaHttpServerThroughputSpec.duration", TimeUnit.SECONDS) + val rate = system.settings.config.getInt("akka.test.AkkaHttpServerLatencySpec.rate") + val testDuration = system.settings.config.getDuration("akka.test.AkkaHttpServerLatencySpec.duration", TimeUnit.SECONDS) val connections: Long = 10 + override def binding = _binding + var _binding: Option[ServerBinding] = None + + val serverPortPromise: Promise[Int] = Promise() + def serverPort: Int = serverPortPromise.future.futureValue + def serverHost: String = node(server).address.host.get + // --- urls - val url_ping = s"http://127.0.0.1:$port/ping" - def url_longResponseStream(int: Int) = s"http://127.0.0.1:$port/long-response-stream/$int" - def url_longResponseArray(int: Int) = s"http://127.0.0.1:$port/long-response-array/$int" + def url_ping = s"http://$serverHost:$serverPort/ping" + def url_longResponseStream(int: Int) = s"http://$serverHost:$serverPort/long-response-stream/$int" + def url_longResponseArray(int: Int) = s"http://$serverHost:$serverPort/long-response-array/$int" // --- - "HttpServer" should { - import scala.sys.process._ + "Akka HTTP" must { + implicit val dispatcher = system.dispatcher + implicit val mat = ActorMaterializer() - Await.ready(binding, 3.seconds) + "start Akka HTTP" taggedAs LongRunningTest in { + enterBarrier("startup") + + runOn(loadGenerator) { + system.actorOf(Props(classOf[HttpLoadGeneratorActor], serverPortPromise), "load-gen") + } + enterBarrier("load-gen-ready") + + runOn(server) { + val (_, _, port) = TestUtils.temporaryServerHostnameAndPort() + info(s"Binding Akka HTTP Server to port: $port @ ${myself}") + val futureBinding = Http().bindAndHandle(routes, "0.0.0.0", port) + + _binding = Some(futureBinding.futureValue) + setServerPort(port) + } + + enterBarrier("http-server-running") + } + + "warmup" taggedAs LongRunningTest in { + val id = "warmup" - "a warmup" in ifWrk2Available { val wrkOptions = s"""-d 30s -R $rate -c $connections -t $connections""" - s"""wrk $wrkOptions $url_ping""".!!.split("\n") - info("warmup complete.") + runLoadTest(id)(s"""wrk $wrkOptions $url_ping""") } - "have good throughput on PONG response (keep-alive)" in ifWrk2Available { + "have good Latency on PONG response (keep-alive)" taggedAs LongRunningTest in ifWrk2Available { + val id = s"Latency_pong_R:${rate}_C:${connections}_p:" + val wrkOptions = s"""-d ${testDuration}s -R $rate -c $connections -t $connections --u_latency""" - val output = s"""wrk $wrkOptions $url_ping""".!!.split("\n") - infoThe(output) - printWrkPercentiles(s"Throughput_pong_R:${rate}_C:${connections}_p:", output) + runLoadTest(id)(s"""wrk $wrkOptions $url_ping""") } - "have good throughput (ab) (short-lived connections)" in ifAbAvailable { - val id = s"Throughput_AB-short-lived_pong_R:${rate}_C:${connections}_p:" + + "have good Latency (ab) (short-lived connections)" taggedAs LongRunningTest in ifAbAvailable { + val id = s"Latency_AB-short-lived_pong_R:${rate}_C:${connections}_p:" + val abOptions = s"-c $connections -n $requests" - val output = s"""ab $abOptions $url_ping""".!!.split("\n") - infoThe(output) - printAbPercentiles(id, output) + runLoadTest(id)(s"""ab $abOptions $url_ping""") } - "have good throughput (ab) (long-lived connections)" in ifAbAvailable { - val id = s"Throughput_AB_pong_shortLived_R:${rate}_C:${connections}_p:" + + "have good Latency (ab) (long-lived connections)" taggedAs LongRunningTest in ifAbAvailable { + val id = s"Latency_AB_pong_shortLived_R:${rate}_C:${connections}_p:" + val abOptions = s"-c $connections -n $requests" - info(s"""ab $abOptions $url_ping""") - val output = s"""ab $abOptions $url_ping""".!!.split("\n") - infoThe(output) - printAbPercentiles(s"Throughput_ab_pong_R:${rate}_C:${connections}_p:", output) + runLoadTest(id)(s"""ab $abOptions $url_ping""") } List( @@ -130,23 +191,53 @@ class AkkaHttpServerThroughputSpec(config: String) extends AkkaSpec(config) 100 → hundredXResponseLength ) foreach { case (n, lenght) ⇒ - s"have good throughput (streaming-response($lenght), keep-alive)" in { + s"have good Latency (streaming-response($lenght), keep-alive)" taggedAs LongRunningTest in { + val id = s"Latency_stream($lenght)_R:${rate}_C:${connections}_p:" + val wrkOptions = s"""-d ${testDuration}s -R $rate -c $connections -t $connections --u_latency""" - val output = s"""wrk $wrkOptions ${url_longResponseStream(n)}""".!!.split("\n") - infoThe(output) - printWrkPercentiles(s"Throughput_stream($lenght)_R:${rate}_C:${connections}_p:", output) + runLoadTest(id)(s"""wrk $wrkOptions ${url_longResponseStream(n)}""") } - s"have good throughput (array-response($lenght), keep-alive)" in { + s"have good Latency (array-response($lenght), keep-alive)" taggedAs LongRunningTest in { + val id = s"Latency_array($lenght)_R:${rate}_C:${connections}_p:" + val wrkOptions = s"""-d ${testDuration}s -R $rate -c $connections -t $connections --u_latency""" - val output = s"""wrk $wrkOptions ${url_longResponseArray(n)}""".!!.split("\n") - infoThe(output) - printWrkPercentiles(s"Throughput_array($lenght)_R:${rate}_C:${connections}_p:", output) + runLoadTest(id)(s"""wrk $wrkOptions ${url_longResponseArray(n)}""") } } } - private def infoThe(lines: Array[String]): Unit = - lines.foreach(l ⇒ info(" " + l)) + def runLoadTest(id: String)(cmd: String) = { + runOn(loadGenerator) { + info(s"${id} => running: $cmd") + import akka.pattern.ask + implicit val timeout = Timeout(30.minutes) // we don't want to timeout here + + val res = (loadGeneratorActor ? LoadGenCommand(cmd)).mapTo[LoadGenResults] + val results = Await.result(res, timeout.duration) + + if (id contains "warmup") () + else if (cmd startsWith "wrk") printWrkPercentiles(id, results.lines) + else if (cmd startsWith "ab") printAbPercentiles(id, results.lines) + else throw new NotImplementedError(s"Unable to handle [$cmd] results!") + } + + enterBarrier(s"load-test-complete-id:${id}") + } + + def setServerPort(p: Int): Unit = { + serverPortPromise.success(p) + loadGeneratorActor ! SetServerPort(p) + } + + lazy val loadGeneratorActor: ActorRef = { + if (isNode(loadGenerator)) { + system.actorSelection("/user/load-gen") ! Identify(None) + expectMsgType[ActorIdentity].ref.get + } else { + system.actorSelection(node(loadGenerator) / "user" / "load-gen") ! Identify(None) + expectMsgType[ActorIdentity].ref.get + } + } private def dumpToCsv(prefix: String, titles: Seq[String], values: Seq[String]): Unit = if (writeCsv) { @@ -157,6 +248,9 @@ class AkkaHttpServerThroughputSpec(config: String) extends AkkaSpec(config) w.write("\n") w.flush() w.close() + + println("====:" + titles.reverse.map(it ⇒ "\"" + it + "\"").mkString(",") + "\n") + println("====:" + values.reverse.map(it ⇒ "\"" + it + "\"").mkString(",") + "\n") } private def printWrkPercentiles(prefix: String, lines: Array[String]): Unit = { @@ -167,6 +261,8 @@ class AkkaHttpServerThroughputSpec(config: String) extends AkkaSpec(config) Duration(dd).toMillis } + println("lines.mkString() = " + lines.mkString("\n")) + var i = 0 val correctedDistributionStartsHere = lines.zipWithIndex.find(p ⇒ p._1 contains "Latency Distribution").map(_._2).get @@ -264,7 +360,4 @@ class AkkaHttpServerThroughputSpec(config: String) extends AkkaSpec(config) } } - override protected def beforeTermination(): Unit = { - binding.futureValue.unbind().futureValue - } } diff --git a/akka-http-tests/src/multi-jvm/scala/akka/http/STMultiNodeSpec.scala b/akka-http-tests/src/multi-jvm/scala/akka/http/STMultiNodeSpec.scala new file mode 100644 index 0000000000..86d04fac6b --- /dev/null +++ b/akka-http-tests/src/multi-jvm/scala/akka/http/STMultiNodeSpec.scala @@ -0,0 +1,24 @@ +/** + * Copyright (C) 2009-2016 Lightbend Inc. + */ +package akka.http + +import akka.http.scaladsl.Http.ServerBinding +import akka.remote.testkit.MultiNodeSpecCallbacks +import org.scalatest.concurrent.ScalaFutures +import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } + +trait STMultiNodeSpec extends MultiNodeSpecCallbacks with WordSpecLike with Matchers with BeforeAndAfterAll + with ScalaFutures { + + def binding: Option[ServerBinding] + + override def beforeAll() = + multiNodeSpecBeforeAll() + + override def afterAll() = { + binding foreach { _.unbind().futureValue } + multiNodeSpecAfterAll() + } + +} diff --git a/akka-remote-tests/src/test/scala/akka/remote/QuietReporter.scala b/akka-remote-tests/src/test/scala/org/scalatest/extra/QuietReporter.scala similarity index 100% rename from akka-remote-tests/src/test/scala/akka/remote/QuietReporter.scala rename to akka-remote-tests/src/test/scala/org/scalatest/extra/QuietReporter.scala diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 3318e62d05..5d417986a9 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -259,8 +259,10 @@ object AkkaBuild extends Build { lazy val httpTests = Project( id = "akka-http-tests", base = file("akka-http-tests"), - dependencies = Seq(httpTestkit % "test", streamTestkit % "test->test", testkit % "test->test", httpSprayJson, httpXml, httpJackson) - ) + dependencies = Seq( + httpTestkit % "test", streamTestkit % "test->test", testkit % "test->test", httpSprayJson, httpXml, httpJackson, + multiNodeTestkit, cluster %"test->test", remoteTests % "test->test") // required for multi-node latency/throughput Spec + ).configs(MultiJvm) lazy val httpMarshallersScala = Project( id = "akka-http-marshallers-scala-experimental", From e986989bfa210a79e8c07c15876d9709ecd35a2a Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Fri, 15 Jul 2016 19:19:53 +0200 Subject: [PATCH 022/155] =htp fix typo in latency spec using AB (#20965) --- .../scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala b/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala index d4bf9e5e27..7a99126a53 100644 --- a/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala +++ b/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala @@ -180,9 +180,9 @@ class MultiNodeSpecSpec extends MultiNodeSpec(AkkaHttpServerLatencyMultiNodeSpec } "have good Latency (ab) (long-lived connections)" taggedAs LongRunningTest in ifAbAvailable { - val id = s"Latency_AB_pong_shortLived_R:${rate}_C:${connections}_p:" + val id = s"Latency_AB_pong_long-lived_R:${rate}_C:${connections}_p:" - val abOptions = s"-c $connections -n $requests" + val abOptions = s"-c $connections -n $requests -k" runLoadTest(id)(s"""ab $abOptions $url_ping""") } From 3f12509f276493b109bcb6e89b9e922cae89730c Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Sun, 17 Jul 2016 20:06:22 +0200 Subject: [PATCH 023/155] =htp attempt to fix PR validation issue, unable to reproduce (#20975) --- .../AkkaHttpServerLatencyMultiNodeSpec.scala | 103 ++++++++---------- project/AkkaBuild.scala | 2 +- 2 files changed, 49 insertions(+), 56 deletions(-) diff --git a/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala b/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala index 7a99126a53..507e43dacc 100644 --- a/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala +++ b/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala @@ -31,6 +31,7 @@ object AkkaHttpServerLatencyMultiNodeSpec extends MultiNodeConfig { commonConfig(ConfigFactory.parseString( """ akka { + actor.default-mailbox.mailbox-type = "akka.dispatch.UnboundedMailbox" actor.provider = "akka.remote.RemoteActorRefProvider" stream.materializer.debug.fuzzing-mode = off @@ -49,6 +50,31 @@ object AkkaHttpServerLatencyMultiNodeSpec extends MultiNodeConfig { val server = role("server") val loadGenerator = role("loadGenerator") + private var _ifWrk2Available: Option[Boolean] = None + final def ifWrk2Available(test: ⇒ Unit): Unit = + if (isWrk2Available) test else throw new TestPendingException() + final def isWrk2Available: Boolean = + _ifWrk2Available getOrElse { + import scala.sys.process._ + val wrkExitCode = Try("""wrk""".!).getOrElse(-1) + + _ifWrk2Available = Some(wrkExitCode == 1) // app found, help displayed + isWrk2Available + } + + private var _abAvailable: Option[Boolean] = None + final def ifAbAvailable(test: ⇒ Unit): Unit = + if (isAbAvailable) test else throw new TestPendingException() + + final def isAbAvailable: Boolean = + _abAvailable getOrElse { + import scala.sys.process._ + val abExitCode = Try("""ab -h""".!).getOrElse(-1) + _abAvailable = Some(abExitCode == 22) // app found, help displayed (22 return code is when -h runs in ab, weird but true) + isAbAvailable + } + + final case class LoadGenCommand(cmd: String) final case class LoadGenResults(results: String) { def lines = results.split("\n") @@ -63,19 +89,27 @@ object AkkaHttpServerLatencyMultiNodeSpec extends MultiNodeConfig { throw new RuntimeException("No server port known! Initialize with SetServerPort() first! Got: " + other) } + import scala.sys.process._ def ready(port: Int): Receive = { - case LoadGenCommand(cmd) ⇒ - import scala.sys.process._ - val res = cmd.!! // blocking. DON'T DO THIS AT HOME, KIDS! + case LoadGenCommand(cmd) if cmd startsWith "wrk" ⇒ + val res = + if (isWrk2Available) cmd.!! // blocking. DON'T DO THIS AT HOME, KIDS! + else "=== WRK NOT AVAILABLE ===" + sender() ! LoadGenResults(res) + + case LoadGenCommand(cmd) if cmd startsWith "ab" ⇒ + val res = + if (isAbAvailable) cmd.!! // blocking. DON'T DO THIS AT HOME, KIDS! + else "=== AB NOT AVAILABLE ===" sender() ! LoadGenResults(res) } } } -class AkkaHttpServerLatencyMultiNodeSpecMultiJvmNode1 extends MultiNodeSpecSpec -class AkkaHttpServerLatencyMultiNodeSpecMultiJvmNode2 extends MultiNodeSpecSpec +class AkkaHttpServerLatencyMultiNodeSpecMultiJvmNode1 extends AkkaHttpServerLatencyMultiNodeSpec +class AkkaHttpServerLatencyMultiNodeSpecMultiJvmNode2 extends AkkaHttpServerLatencyMultiNodeSpec -class MultiNodeSpecSpec extends MultiNodeSpec(AkkaHttpServerLatencyMultiNodeSpec) with STMultiNodeSpec +class AkkaHttpServerLatencyMultiNodeSpec extends MultiNodeSpec(AkkaHttpServerLatencyMultiNodeSpec) with STMultiNodeSpec with ScalaFutures with ImplicitSender { import AkkaHttpServerLatencyMultiNodeSpec._ @@ -113,8 +147,6 @@ class MultiNodeSpecSpec extends MultiNodeSpec(AkkaHttpServerLatencyMultiNodeSpec } // format: ON - val writeCsv = system.settings.config.getBoolean("akka.test.AkkaHttpServerLatencySpec.writeCsv") - val totalRequestsFactor = system.settings.config.getDouble("akka.test.AkkaHttpServerLatencySpec.totalRequestsFactor") val requests = Math.round(10000 * totalRequestsFactor) val rate = system.settings.config.getInt("akka.test.AkkaHttpServerLatencySpec.rate") @@ -158,7 +190,7 @@ class MultiNodeSpecSpec extends MultiNodeSpec(AkkaHttpServerLatencyMultiNodeSpec enterBarrier("http-server-running") } - "warmup" taggedAs LongRunningTest in { + "warmup" taggedAs LongRunningTest in ifWrk2Available { val id = "warmup" val wrkOptions = s"""-d 30s -R $rate -c $connections -t $connections""" @@ -239,19 +271,10 @@ class MultiNodeSpecSpec extends MultiNodeSpec(AkkaHttpServerLatencyMultiNodeSpec } } - private def dumpToCsv(prefix: String, titles: Seq[String], values: Seq[String]): Unit = - if (writeCsv) { - val w = new BufferedWriter(new FileWriter(prefix + ".csv")) - w.write(titles.reverse.map(it ⇒ "\"" + it + "\"").mkString(",")) - w.write("\n") - w.write(values.reverse.map(it ⇒ "\"" + it + "\"").mkString(",")) - w.write("\n") - w.flush() - w.close() - - println("====:" + titles.reverse.map(it ⇒ "\"" + it + "\"").mkString(",") + "\n") - println("====:" + values.reverse.map(it ⇒ "\"" + it + "\"").mkString(",") + "\n") - } + private def renderResults(prefix: String, titles: Seq[String], values: Seq[String]): Unit = { + println("====:" + titles.reverse.map(it ⇒ "\"" + it + "\"").mkString(",") + "\n") + println("====:" + values.reverse.map(it ⇒ "\"" + it + "\"").mkString(",") + "\n") + } private def printWrkPercentiles(prefix: String, lines: Array[String]): Unit = { val percentilesToPrint = 8 @@ -261,8 +284,6 @@ class MultiNodeSpecSpec extends MultiNodeSpec(AkkaHttpServerLatencyMultiNodeSpec Duration(dd).toMillis } - println("lines.mkString() = " + lines.mkString("\n")) - var i = 0 val correctedDistributionStartsHere = lines.zipWithIndex.find(p ⇒ p._1 contains "Latency Distribution").map(_._2).get @@ -282,7 +303,7 @@ class MultiNodeSpecSpec extends MultiNodeSpec(AkkaHttpServerLatencyMultiNodeSpec i += 1 } - dumpToCsv(prefix + "_corrected", titles, metrics) + renderResults(prefix + "_corrected", titles, metrics) val uncorrectedDistributionStartsHere = lines.zipWithIndex.find(p ⇒ p._1 contains "Uncorrected Latency").map(_._2).get @@ -302,7 +323,7 @@ class MultiNodeSpecSpec extends MultiNodeSpec(AkkaHttpServerLatencyMultiNodeSpec i += 1 } - dumpToCsv(prefix + "_uncorrected", titles, metrics) + renderResults(prefix + "_uncorrected", titles, metrics) } private def printAbPercentiles(prefix: String, lines: Array[String]): Unit = { @@ -329,35 +350,7 @@ class MultiNodeSpecSpec extends MultiNodeSpec(AkkaHttpServerLatencyMultiNodeSpec i += 1 } - dumpToCsv(prefix, titles, metrics) - } - - private var _ifWrk2Available: Option[Boolean] = None - @tailrec private final def ifWrk2Available(test: ⇒ Unit): Unit = { - _ifWrk2Available match { - case Some(false) ⇒ throw new TestPendingException() - case Some(true) ⇒ test - case None ⇒ - import scala.sys.process._ - - val wrk = Try("""wrk""".!).getOrElse(-1) - _ifWrk2Available = Some(wrk == 1) // app found, help displayed - ifWrk2Available(test) - } - } - - var _ifAbAvailable: Option[Boolean] = None - @tailrec private final def ifAbAvailable(test: ⇒ Unit): Unit = { - _ifAbAvailable match { - case Some(false) ⇒ throw new TestPendingException() - case Some(true) ⇒ test - case None ⇒ - import scala.sys.process._ - - val wrk = Try("""ab -h""".!).getOrElse(-1) - _ifAbAvailable = Some(wrk == 22) // app found, help displayed (22 return code is when -h runs in ab, weird but true) - ifAbAvailable(test) - } + renderResults(prefix, titles, metrics) } } diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 5d417986a9..67ded6ba7b 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -261,7 +261,7 @@ object AkkaBuild extends Build { base = file("akka-http-tests"), dependencies = Seq( httpTestkit % "test", streamTestkit % "test->test", testkit % "test->test", httpSprayJson, httpXml, httpJackson, - multiNodeTestkit, cluster %"test->test", remoteTests % "test->test") // required for multi-node latency/throughput Spec + multiNodeTestkit, remoteTests % "test->test") // required for multi-node latency/throughput Spec ).configs(MultiJvm) lazy val httpMarshallersScala = Project( From 12fadfe8e57df1d8231a7e4f9d3b34b301bd3080 Mon Sep 17 00:00:00 2001 From: Richard Imaoka Date: Mon, 18 Jul 2016 17:33:09 +0900 Subject: [PATCH 024/155] +doc Indicate in doc BalancingPool should not support Broadcast #15030 (#20979) --- akka-docs/rst/java/routing.rst | 13 ++++++++++++- akka-docs/rst/scala/routing.rst | 16 +++++++++++++++- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/akka-docs/rst/java/routing.rst b/akka-docs/rst/java/routing.rst index f47fe99d01..3ebfe3ee40 100644 --- a/akka-docs/rst/java/routing.rst +++ b/akka-docs/rst/java/routing.rst @@ -46,7 +46,9 @@ outside of actors. .. note:: In general, any message sent to a router will be sent onwards to its routees, but there is one exception. - The special :ref:`broadcast-messages-java` will send to *all* of a router's routees + The special :ref:`broadcast-messages-java` will send to *all* of a router's routees. + However, do not use :ref:`broadcast-messages-java` when you use :ref:`balancing-pool-java` for routees + as described in :ref:`router-special-messages-java`. A Router Actor ^^^^^^^^^^^^^^ @@ -276,6 +278,10 @@ All routees share the same mailbox. replying to the original client. The other advantage is that it does not place a restriction on the message queue implementation as BalancingPool does. +.. note:: + Do not use :ref:`broadcast-messages-java` when you use :ref:`balancing-pool-java` for routers, + as described in :ref:`router-special-messages-java`. + BalancingPool defined in configuration: .. includecode:: ../scala/code/docs/routing/RouterDocSpec.scala#config-balancing-pool @@ -522,6 +528,11 @@ In this example the router receives the ``Broadcast`` message, extracts its payl (``"Watch out for Davy Jones' locker"``), and then sends the payload on to all of the router's routees. It is up to each routee actor to handle the received payload message. +.. note:: + Do not use :ref:`broadcast-messages-java` when you use :ref:`balancing-pool-java` for routers. + Routees on :ref:`balancing-pool-java` shares the same mailbox instance, thus some routees can + possibly get the broadcast message multiple times, while other routees get no broadcast message. + PoisonPill Messages ------------------- diff --git a/akka-docs/rst/scala/routing.rst b/akka-docs/rst/scala/routing.rst index bba212da76..bb04f40870 100644 --- a/akka-docs/rst/scala/routing.rst +++ b/akka-docs/rst/scala/routing.rst @@ -46,7 +46,10 @@ outside of actors. .. note:: In general, any message sent to a router will be sent onwards to its routees, but there is one exception. - The special :ref:`broadcast-messages-scala` will send to *all* of a router's routees + The special :ref:`broadcast-messages-scala` will send to *all* of a router's routees. + However, do not use :ref:`broadcast-messages-scala` when you use :ref:`balancing-pool-scala` for routees + as described in :ref:`router-special-messages-scala`. + A Router Actor ^^^^^^^^^^^^^^ @@ -275,6 +278,11 @@ All routees share the same mailbox. replying to the original client. The other advantage is that it does not place a restriction on the message queue implementation as BalancingPool does. +.. note:: + Do not use :ref:`broadcast-messages-scala` when you use :ref:`balancing-pool-scala` for routers. + as described in :ref:`router-special-messages-scala`, + + BalancingPool defined in configuration: .. includecode:: code/docs/routing/RouterDocSpec.scala#config-balancing-pool @@ -521,6 +529,12 @@ In this example the router receives the ``Broadcast`` message, extracts its payl (``"Watch out for Davy Jones' locker"``), and then sends the payload on to all of the router's routees. It is up to each routee actor to handle the received payload message. +.. note:: + Do not use :ref:`broadcast-messages-scala` when you use :ref:`balancing-pool-scala` for routers. + Routees on :ref:`balancing-pool-scala` shares the same mailbox instance, thus some routees can + possibly get the broadcast message multiple times, while other routees get no broadcast message. + + PoisonPill Messages ------------------- From 2078396197a2fd1bc1834afed24dd5467c474d8b Mon Sep 17 00:00:00 2001 From: Nafer Sanabria Date: Mon, 18 Jul 2016 03:33:44 -0500 Subject: [PATCH 025/155] =doc Akka & GitHub appearances in docs (#20968) * =doc capitalize akka word * Capitalize GitHub appearances in docs --- CONTRIBUTING.md | 2 +- README.md | 2 +- akka-docs/rst/intro/why-akka.rst | 2 +- akka-docs/rst/java/cluster-metrics.rst | 2 +- akka-docs/rst/java/io-tcp.rst | 2 +- akka-docs/rst/java/lambda-fsm.rst | 2 +- akka-docs/rst/java/remoting.rst | 2 +- akka-docs/rst/java/stream/migration-guide-2.0-2.4-java.rst | 2 +- akka-docs/rst/project/migration-guide-2.3.x-2.4.x.rst | 4 ++-- .../migration-guide-persistence-experimental-2.3.x-2.4.x.rst | 2 +- akka-docs/rst/scala/camel.rst | 2 +- akka-docs/rst/scala/cluster-metrics.rst | 2 +- akka-docs/rst/scala/fsm.rst | 2 +- akka-docs/rst/scala/io-tcp.rst | 2 +- akka-docs/rst/scala/remoting.rst | 2 +- akka-docs/rst/scala/stream/migration-guide-2.0-2.4-scala.rst | 2 +- 16 files changed, 17 insertions(+), 17 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4d41c6a848..d66cc596e5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -224,7 +224,7 @@ Example: Akka uses [Jenkins GitHub pull request builder plugin](https://wiki.jenkins-ci.org/display/JENKINS/GitHub+pull+request+builder+plugin) that automatically merges the code, builds it, runs the tests and comments on the Pull Request in GitHub. -Upon a submission of a Pull Request the Github pull request builder plugin will post a following comment: +Upon a submission of a Pull Request the GitHub pull request builder plugin will post a following comment: Can one of the repo owners verify this patch? diff --git a/README.md b/README.md index 0e8fee2420..4b6c42c083 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ Contributions are *very* welcome! If you see an issue that you'd like to see fixed, the best way to make it happen is to help out by submitting a PullRequest implementing it. Refer to the [CONTRIBUTING.md](https://github.com/akka/akka/blob/master/CONTRIBUTING.md) file for more details about the workflow, -and general hints how to prepare your pull request. You can also chat ask for clarifications or guidance in github issues directly, +and general hints how to prepare your pull request. You can also chat ask for clarifications or guidance in GitHub issues directly, or in the akka/dev chat if a more real time communication would be of benefit. A chat room is available for all questions related to *developing and contributing* to Akka: diff --git a/akka-docs/rst/intro/why-akka.rst b/akka-docs/rst/intro/why-akka.rst index f5556bbf1c..c25e982ef2 100644 --- a/akka-docs/rst/intro/why-akka.rst +++ b/akka-docs/rst/intro/why-akka.rst @@ -19,7 +19,7 @@ but also in the size of applications it is useful for. The core of Akka, akka-ac is very small and easily dropped into an existing project where you need asynchronicity and lockless concurrency without hassle. -You can choose to include only the parts of akka you need in your application. +You can choose to include only the parts of Akka you need in your application. With CPUs growing more and more cores every cycle, Akka is the alternative that provides outstanding performance even if you're only running it on one machine. Akka also supplies a wide array of concurrency-paradigms, allowing users to choose diff --git a/akka-docs/rst/java/cluster-metrics.rst b/akka-docs/rst/java/cluster-metrics.rst index 68e70effbb..3f8dfeaa13 100644 --- a/akka-docs/rst/java/cluster-metrics.rst +++ b/akka-docs/rst/java/cluster-metrics.rst @@ -14,7 +14,7 @@ Cluster metrics information is primarily used for load-balancing routers, and can also be used to implement advanced metrics-based node life cycles, such as "Node Let-it-crash" when CPU steal time becomes excessive. -Cluster Metrics Extension is a separate akka module delivered in ``akka-cluster-metrics`` jar. +Cluster Metrics Extension is a separate Akka module delivered in ``akka-cluster-metrics`` jar. To enable usage of the extension you need to add the following dependency to your project: :: diff --git a/akka-docs/rst/java/io-tcp.rst b/akka-docs/rst/java/io-tcp.rst index 5ea30571f5..1ad4b7b51d 100644 --- a/akka-docs/rst/java/io-tcp.rst +++ b/akka-docs/rst/java/io-tcp.rst @@ -189,7 +189,7 @@ For back-pressuring writes there are three modes of operation These write models (with the exception of the second which is rather specialised) are demonstrated in complete examples below. The full and contiguous source is -available `on github <@github@/akka-docs/rst/java/code/docs/io/japi>`_. +available `on GitHub <@github@/akka-docs/rst/java/code/docs/io/japi>`_. For back-pressuring reads there are two modes of operation diff --git a/akka-docs/rst/java/lambda-fsm.rst b/akka-docs/rst/java/lambda-fsm.rst index 63b4ef6e9e..18a2dc15f0 100644 --- a/akka-docs/rst/java/lambda-fsm.rst +++ b/akka-docs/rst/java/lambda-fsm.rst @@ -9,7 +9,7 @@ Overview ======== The FSM (Finite State Machine) is available as an abstract base class that implements -an akka Actor and is best described in the `Erlang design principles +an Akka Actor and is best described in the `Erlang design principles `_ A FSM can be described as a set of relations of the form: diff --git a/akka-docs/rst/java/remoting.rst b/akka-docs/rst/java/remoting.rst index 7fb1cbc8e4..8423d5007c 100644 --- a/akka-docs/rst/java/remoting.rst +++ b/akka-docs/rst/java/remoting.rst @@ -490,7 +490,7 @@ Akka behind NAT or in a Docker container ---------------------------------------- In setups involving Network Address Translation (NAT), Load Balancers or Docker -containers the hostname and port pair that akka binds to will be different than the "logical" +containers the hostname and port pair that Akka binds to will be different than the "logical" host name and port pair that is used to connect to the system from the outside. This requires special configuration that sets both the logical and the bind pairs for remoting. diff --git a/akka-docs/rst/java/stream/migration-guide-2.0-2.4-java.rst b/akka-docs/rst/java/stream/migration-guide-2.0-2.4-java.rst index b00fe25d92..4d483116c7 100644 --- a/akka-docs/rst/java/stream/migration-guide-2.0-2.4-java.rst +++ b/akka-docs/rst/java/stream/migration-guide-2.0-2.4-java.rst @@ -42,7 +42,7 @@ completion but there is no actual value attached to the completion. It is used t occurrences of ``Future`` with ``Future`` in Java and ``Future[Unit]`` with ``Future[Done]`` in Scala. -All previous usage of ``Unit`` and ``BoxedUnit`` for these two cases in the akka streams APIs +All previous usage of ``Unit`` and ``BoxedUnit`` for these two cases in the Akka Streams APIs has been updated. This means that Java code like this:: diff --git a/akka-docs/rst/project/migration-guide-2.3.x-2.4.x.rst b/akka-docs/rst/project/migration-guide-2.3.x-2.4.x.rst index afc46c3176..c19b5a6854 100644 --- a/akka-docs/rst/project/migration-guide-2.3.x-2.4.x.rst +++ b/akka-docs/rst/project/migration-guide-2.3.x-2.4.x.rst @@ -191,7 +191,7 @@ i.e. if defined it takes precedence over ``max-total-nr-of-instances``. Logger names use full class name ================================ -Previously, few places in akka used "simple" logger names, such as ``Cluster`` or ``Remoting``. +Previously, few places in Akka used "simple" logger names, such as ``Cluster`` or ``Remoting``. Now they use full class names, such as ``akka.cluster.Cluster`` or ``akka.remote.Remoting``, in order to allow package level log level definitions and ease source code lookup. In case you used specific "simple" logger name based rules in your ``logback.xml`` configurations, @@ -226,7 +226,7 @@ Please see :ref:`deployment-scenarios` for more information. New Cluster Metrics Extension ============================= Previously, cluster metrics functionality was located in the ``akka-cluster`` jar. -Now it is split out and moved into a separate akka module: ``akka-cluster-metrics`` jar. +Now it is split out and moved into a separate Akka module: ``akka-cluster-metrics`` jar. The module comes with few enhancements, such as use of Kamon sigar-loader for native library provisioning as well as use of statistical averaging of metrics data. Note that both old and new metrics configuration entries in the ``reference.conf`` diff --git a/akka-docs/rst/project/migration-guide-persistence-experimental-2.3.x-2.4.x.rst b/akka-docs/rst/project/migration-guide-persistence-experimental-2.3.x-2.4.x.rst index f8060314a1..051f7158d3 100644 --- a/akka-docs/rst/project/migration-guide-persistence-experimental-2.3.x-2.4.x.rst +++ b/akka-docs/rst/project/migration-guide-persistence-experimental-2.3.x-2.4.x.rst @@ -201,5 +201,5 @@ Persistence extension uses LevelDB based plugins for own development and keeps r However previously LevelDB was a ``compile`` scope dependency, and now it is an ``optional;provided`` dependency. To continue using LevelDB based persistence plugins it is now required for related user projects to include an additional explicit dependency declaration for the LevelDB artifacts. -This change allows production akka deployments to avoid need for the LevelDB provisioning. +This change allows production Akka deployments to avoid need for the LevelDB provisioning. Please see persistence extension ``reference.conf`` for details. diff --git a/akka-docs/rst/scala/camel.rst b/akka-docs/rst/scala/camel.rst index bed43d22c9..84cc196554 100644 --- a/akka-docs/rst/scala/camel.rst +++ b/akka-docs/rst/scala/camel.rst @@ -415,7 +415,7 @@ Here's an actor endpoint URI example containing an actor path:: akka://some-system/user/myconsumer?autoAck=false&replyTimeout=100+millis In the following example, a custom route to an actor is created, using the -actor's path. the akka camel package contains an implicit ``toActorRouteDefinition`` that allows for a route to +actor's path. the Akka camel package contains an implicit ``toActorRouteDefinition`` that allows for a route to reference an ``ActorRef`` directly as shown in the below example, The route starts from a `Jetty`_ endpoint and ends at the target actor. diff --git a/akka-docs/rst/scala/cluster-metrics.rst b/akka-docs/rst/scala/cluster-metrics.rst index e0e3d67a3d..e894b5e234 100644 --- a/akka-docs/rst/scala/cluster-metrics.rst +++ b/akka-docs/rst/scala/cluster-metrics.rst @@ -14,7 +14,7 @@ Cluster metrics information is primarily used for load-balancing routers, and can also be used to implement advanced metrics-based node life cycles, such as "Node Let-it-crash" when CPU steal time becomes excessive. -Cluster Metrics Extension is a separate akka module delivered in ``akka-cluster-metrics`` jar. +Cluster Metrics Extension is a separate Akka module delivered in ``akka-cluster-metrics`` jar. To enable usage of the extension you need to add the following dependency to your project: :: diff --git a/akka-docs/rst/scala/fsm.rst b/akka-docs/rst/scala/fsm.rst index 1b8636eee0..ea20f39540 100644 --- a/akka-docs/rst/scala/fsm.rst +++ b/akka-docs/rst/scala/fsm.rst @@ -8,7 +8,7 @@ FSM Overview ======== -The FSM (Finite State Machine) is available as a mixin for the akka Actor and +The FSM (Finite State Machine) is available as a mixin for the Akka Actor and is best described in the `Erlang design principles `_ diff --git a/akka-docs/rst/scala/io-tcp.rst b/akka-docs/rst/scala/io-tcp.rst index 87acb50dd5..c8772f4bce 100644 --- a/akka-docs/rst/scala/io-tcp.rst +++ b/akka-docs/rst/scala/io-tcp.rst @@ -190,7 +190,7 @@ For back-pressuring writes there are three modes of operation These write back-pressure models (with the exception of the second which is rather specialised) are demonstrated in complete examples below. The full and contiguous source is -available `on github <@github@/akka-docs/rst/scala/code/docs/io/EchoServer.scala>`_. +available `on GitHub <@github@/akka-docs/rst/scala/code/docs/io/EchoServer.scala>`_. For back-pressuring reads there are two modes of operation diff --git a/akka-docs/rst/scala/remoting.rst b/akka-docs/rst/scala/remoting.rst index ea9e449dc9..7849ebf879 100644 --- a/akka-docs/rst/scala/remoting.rst +++ b/akka-docs/rst/scala/remoting.rst @@ -495,7 +495,7 @@ Akka behind NAT or in a Docker container ---------------------------------------- In setups involving Network Address Translation (NAT), Load Balancers or Docker -containers the hostname and port pair that akka binds to will be different than the "logical" +containers the hostname and port pair that Akka binds to will be different than the "logical" host name and port pair that is used to connect to the system from the outside. This requires special configuration that sets both the logical and the bind pairs for remoting. diff --git a/akka-docs/rst/scala/stream/migration-guide-2.0-2.4-scala.rst b/akka-docs/rst/scala/stream/migration-guide-2.0-2.4-scala.rst index 7d0bb64d8d..01bdef1380 100644 --- a/akka-docs/rst/scala/stream/migration-guide-2.0-2.4-scala.rst +++ b/akka-docs/rst/scala/stream/migration-guide-2.0-2.4-scala.rst @@ -23,7 +23,7 @@ completion but there is no actual value attached to the completion. It is used t occurrences of ``Future`` with ``Future`` in Java and ``Future[Unit]`` with ``Future[Done]`` in Scala. -All previous usage of ``Unit`` and ``BoxedUnit`` for these two cases in the akka streams APIs +All previous usage of ``Unit`` and ``BoxedUnit`` for these two cases in the Akka Streams APIs has been updated. This means that Scala code like this:: From 08230ae2235be836745db030d297b41f9e21f49f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johan=20Andr=C3=A9n?= Date: Mon, 18 Jul 2016 10:35:14 +0200 Subject: [PATCH 026/155] =str Avoid building unused strings on every wire call (#20948) * Avoid building unused strings on every wire call Up to twice as many materializations in the same time * Specialized null check added to avoid allocations --- .../akka/stream/impl/fusing/Fusing.scala | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/Fusing.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/Fusing.scala index 4930267cb0..7ace7f49dd 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/Fusing.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/Fusing.scala @@ -436,12 +436,6 @@ private[stream] object Fusing { case Ignore ⇒ Ignore } - private implicit class NonNull[T](val x: T) extends AnyVal { - def nonNull(msg: String): T = - if (x != null) x - else throw new IllegalArgumentException("null encountered: " + msg) - } - /** * INTERNAL API * @@ -671,8 +665,8 @@ private[stream] object Fusing { */ def wire(out: OutPort, in: InPort, indent: Int): Unit = { if (Debug) println(" " * indent + s"wiring $out (${hash(out)}) -> $in (${hash(in)})") - val newOut = removeMapping(out, newOuts) nonNull s"$out (${hash(out)})" - val newIn = removeMapping(in, newIns) nonNull s"$in (${hash(in)})" + val newOut = nonNullForPort(removeMapping(out, newOuts), out) + val newIn = nonNullForPort(removeMapping(in, newIns), in) downstreams.put(newOut, newIn) upstreams.put(newIn, newOut) } @@ -683,10 +677,10 @@ private[stream] object Fusing { def rewire(oldShape: Shape, newShape: Shape, indent: Int): Unit = { if (Debug) println(" " * indent + s"rewiring ${printShape(oldShape)} -> ${printShape(newShape)}") oldShape.inlets.iterator.zip(newShape.inlets.iterator).foreach { - case (oldIn, newIn) ⇒ addMapping(newIn, removeMapping(oldIn, newIns) nonNull s"$oldIn (${hash(oldIn)})", newIns) + case (oldIn, newIn) ⇒ addMapping(newIn, nonNullForPort(removeMapping(oldIn, newIns), oldIn), newIns) } oldShape.outlets.iterator.zip(newShape.outlets.iterator).foreach { - case (oldOut, newOut) ⇒ addMapping(newOut, removeMapping(oldOut, newOuts) nonNull s"$oldOut (${hash(oldOut)})", newOuts) + case (oldOut, newOut) ⇒ addMapping(newOut, nonNullForPort(removeMapping(oldOut, newOuts), oldOut), newOuts) } } @@ -701,6 +695,13 @@ private[stream] object Fusing { */ def newOutlets(old: immutable.Seq[Outlet[_]]): immutable.Seq[Outlet[_]] = old.map(o ⇒ newOuts.get(o).head.outlet) + + // optimization - specialized null check avoiding allocation or creation of unused strings + private def nonNullForPort[T](t: T, port: AnyRef): T = { + if (t != null) t + else throw new IllegalArgumentException(s"null encountered: $port (${hash(port)})") + } + } /** From a75c3f7bf11799c7f297e4463f120a56920c1476 Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Mon, 18 Jul 2016 10:53:53 +0200 Subject: [PATCH 027/155] Update AkkaHttpServerLatencyMultiNodeSpec.scala (#20980) --- .../http/AkkaHttpServerLatencyMultiNodeSpec.scala | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala b/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala index 507e43dacc..3640a4d0c9 100644 --- a/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala +++ b/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala @@ -276,14 +276,15 @@ class AkkaHttpServerLatencyMultiNodeSpec extends MultiNodeSpec(AkkaHttpServerLat println("====:" + values.reverse.map(it ⇒ "\"" + it + "\"").mkString(",") + "\n") } + private def durationAsMs(d: String): Long = { + val dd = d.replace("us", "µs") // Scala Duration does not parse "us" + val ddd = if (dd endsWith "m") dd.replace("m", " minutes") else dd + Duration(ddd).toMillis + } + private def printWrkPercentiles(prefix: String, lines: Array[String]): Unit = { val percentilesToPrint = 8 - def durationAsMs(d: String): Long = { - val dd = d.replace("us", "µs") // Scala Duration does not parse "us" - Duration(dd).toMillis - } - var i = 0 val correctedDistributionStartsHere = lines.zipWithIndex.find(p ⇒ p._1 contains "Latency Distribution").map(_._2).get @@ -329,9 +330,6 @@ class AkkaHttpServerLatencyMultiNodeSpec extends MultiNodeSpec(AkkaHttpServerLat private def printAbPercentiles(prefix: String, lines: Array[String]): Unit = { val percentilesToPrint = 9 - def durationAsMs(d: String): Long = - Duration(d).toMillis - var i = 0 val correctedDistributionStartsHere = lines.zipWithIndex.find(p ⇒ p._1 contains "Percentage of the requests").map(_._2).get From 8047f3359f3252c83ae8c661a6ee20ed09c5a86c Mon Sep 17 00:00:00 2001 From: Nafer Sanabria Date: Mon, 18 Jul 2016 03:55:09 -0500 Subject: [PATCH 028/155] =htc #20920 create ContentLength HTTP header in JavaDSL (#20926) remove constructor --- .../http/javadsl/model/headers/ContentLength.java | 13 +++++++++++++ .../akka/http/scaladsl/model/headers/headers.scala | 3 ++- 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 akka-http-core/src/main/java/akka/http/javadsl/model/headers/ContentLength.java diff --git a/akka-http-core/src/main/java/akka/http/javadsl/model/headers/ContentLength.java b/akka-http-core/src/main/java/akka/http/javadsl/model/headers/ContentLength.java new file mode 100644 index 0000000000..959f007cfa --- /dev/null +++ b/akka-http-core/src/main/java/akka/http/javadsl/model/headers/ContentLength.java @@ -0,0 +1,13 @@ +/** + * Copyright (C) 2016 Lightbend Inc. + */ + +package akka.http.javadsl.model.headers; + +/** + * Model for the `Content-Length` header. + * Specification: https://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-26#section-3.3.2 + */ +public abstract class ContentLength extends akka.http.scaladsl.model.HttpHeader { + public abstract long length(); +} diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala index 44c43f8857..917243c58c 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala @@ -377,7 +377,8 @@ object `Content-Length` extends ModeledCompanion[`Content-Length`] * Instances of this class will only be created transiently during header parsing and will never appear * in HttpMessage.header. To access the Content-Length, see subclasses of HttpEntity. */ -final case class `Content-Length` private[http] (length: Long) extends RequestResponseHeader { +final case class `Content-Length` private[http] (length: Long) extends jm.headers.ContentLength + with RequestResponseHeader { def renderValue[R <: Rendering](r: R): r.type = r ~~ length protected def companion = `Content-Length` } From 8f19dfcece062e502ac9f0fcd43ea5ef79d658e9 Mon Sep 17 00:00:00 2001 From: Lev Khomich Date: Mon, 18 Jul 2016 15:14:06 +0600 Subject: [PATCH 029/155] =htc do not mention source in HttpEntity.toString (#20739) (#20971) --- .../akka/http/scaladsl/model/HttpEntity.scala | 16 ++++++++++++++++ .../http/scaladsl/model/HttpEntitySpec.scala | 9 +++++++++ 2 files changed, 25 insertions(+) diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpEntity.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpEntity.scala index c07ccd8db5..4be0451de0 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpEntity.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpEntity.scala @@ -369,6 +369,10 @@ object HttpEntity { override def productPrefix = "HttpEntity.Default" + override def toString: String = { + s"$productPrefix($contentType,$contentLength bytes total)" + } + /** Java API */ override def getContentLength = contentLength } @@ -414,6 +418,10 @@ object HttpEntity { override def withData(data: Source[ByteString, Any]): HttpEntity.CloseDelimited = copy(data = data) override def productPrefix = "HttpEntity.CloseDelimited" + + override def toString: String = { + s"$productPrefix($contentType)" + } } /** @@ -431,6 +439,10 @@ object HttpEntity { override def withData(data: Source[ByteString, Any]): HttpEntity.IndefiniteLength = copy(data = data) override def productPrefix = "HttpEntity.IndefiniteLength" + + override def toString: String = { + s"$productPrefix($contentType)" + } } /** @@ -469,6 +481,10 @@ object HttpEntity { override def productPrefix = "HttpEntity.Chunked" + override def toString: String = { + s"$productPrefix($contentType)" + } + /** Java API */ def getChunks: stream.javadsl.Source[jm.HttpEntity.ChunkStreamPart, AnyRef] = stream.javadsl.Source.fromGraph(chunks.asInstanceOf[Source[jm.HttpEntity.ChunkStreamPart, AnyRef]]) diff --git a/akka-http-core/src/test/scala/akka/http/scaladsl/model/HttpEntitySpec.scala b/akka-http-core/src/test/scala/akka/http/scaladsl/model/HttpEntitySpec.scala index 7a533830a7..ddef91fa85 100755 --- a/akka-http-core/src/test/scala/akka/http/scaladsl/model/HttpEntitySpec.scala +++ b/akka-http-core/src/test/scala/akka/http/scaladsl/model/HttpEntitySpec.scala @@ -141,14 +141,23 @@ class HttpEntitySpec extends FreeSpec with MustMatchers with BeforeAndAfterAll { "Default" in { val entity = Default(tpe, 11, source(abc, de, fgh, ijk)) entity.toString must include(entity.productPrefix) + entity.toString must include("11") + entity.toString mustNot include("Source") } "CloseDelimited" in { val entity = CloseDelimited(tpe, source(abc, de, fgh, ijk)) entity.toString must include(entity.productPrefix) + entity.toString mustNot include("Source") } "Chunked" in { val entity = Chunked(tpe, source(Chunk(abc))) entity.toString must include(entity.productPrefix) + entity.toString mustNot include("Source") + } + "IndefiniteLength" in { + val entity = IndefiniteLength(tpe, source(abc, de, fgh, ijk)) + entity.toString must include(entity.productPrefix) + entity.toString mustNot include("Source") } } "support withoutSizeLimit" - { From df46b203dc3204956bc0ab5df9b6be84188a4371 Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Mon, 18 Jul 2016 13:11:08 +0200 Subject: [PATCH 030/155] Bump sbt to 0.13.12 (#20978) --- project/build.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/build.properties b/project/build.properties index 43b8278c68..35c88bab7d 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=0.13.11 +sbt.version=0.13.12 From 6c82176c3059c55740f0437311070e7657d73de8 Mon Sep 17 00:00:00 2001 From: Richard Imaoka Date: Mon, 18 Jul 2016 20:16:36 +0900 Subject: [PATCH 031/155] +act Clarify usage of FromConfig in doc and API doc #18771 (#20981) --- .../src/main/scala/akka/routing/RouterConfig.scala | 8 ++++++-- akka-docs/rst/java/routing.rst | 10 +++++----- akka-docs/rst/scala/routing.rst | 8 ++++---- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/akka-actor/src/main/scala/akka/routing/RouterConfig.scala b/akka-actor/src/main/scala/akka/routing/RouterConfig.scala index 27f4df9085..1385ca9ba5 100644 --- a/akka-actor/src/main/scala/akka/routing/RouterConfig.scala +++ b/akka-actor/src/main/scala/akka/routing/RouterConfig.scala @@ -273,7 +273,9 @@ abstract class CustomRouterConfig extends RouterConfig { } /** - * Router configuration which has no default, i.e. external configuration is required. + * Wraps a [[akka.actor.Props]] to mark the actor as externally configurable to be used with a router. + * If a [[akka.actor.Props]] is not wrapped with [[FromConfig]] then the actor will ignore the router part of the deployment section + * in the configuration. */ case object FromConfig extends FromConfig { /** @@ -290,7 +292,9 @@ case object FromConfig extends FromConfig { } /** - * Java API: Router configuration which has no default, i.e. external configuration is required. + * Java API: Wraps a [[akka.actor.Props]] to mark the actor as externally configurable to be used with a router. + * If a [[akka.actor.Props]] is not wrapped with [[FromConfig]] then the actor will ignore the router part of the deployment section + * in the configuration. * * This can be used when the dispatcher to be used for the head Router needs to be configured * (defaults to default-dispatcher). diff --git a/akka-docs/rst/java/routing.rst b/akka-docs/rst/java/routing.rst index 3ebfe3ee40..6ecde18c1b 100644 --- a/akka-docs/rst/java/routing.rst +++ b/akka-docs/rst/java/routing.rst @@ -64,11 +64,11 @@ This type of router actor comes in two distinct flavors: * Group - The routee actors are created externally to the router and the router sends messages to the specified path using actor selection, without watching for termination. -The settings for a router actor can be defined in configuration or programmatically. -Although router actors can be defined in the configuration file, they must still be created -programmatically, i.e. you cannot make a router through external configuration alone. -If you define the router actor in the configuration file then these settings will be used -instead of any programmatically provided parameters. +The settings for a router actor can be defined in configuration or programmatically. +In order to make an actor to make use of an externally configurable router the ``FromConfig`` props wrapper must be used +to denote that the actor accepts routing settings from configuration. +This is in contrast with Remote Deployment where such marker props is not necessary. +If the props of an actor is NOT wrapped in ``FromConfig`` it will ignore the router section of the deployment configuration. You send messages to the routees via the router actor in the same way as for ordinary actors, i.e. via its ``ActorRef``. The router actor forwards messages onto its routees without changing diff --git a/akka-docs/rst/scala/routing.rst b/akka-docs/rst/scala/routing.rst index bb04f40870..6456209388 100644 --- a/akka-docs/rst/scala/routing.rst +++ b/akka-docs/rst/scala/routing.rst @@ -66,10 +66,10 @@ This type of router actor comes in two distinct flavors: messages to the specified path using actor selection, without watching for termination. The settings for a router actor can be defined in configuration or programmatically. -Although router actors can be defined in the configuration file, they must still be created -programmatically, i.e. you cannot make a router through external configuration alone. -If you define the router actor in the configuration file then these settings will be used -instead of any programmatically provided parameters. +In order to make an actor to make use of an externally configurable router the ``FromConfig`` props wrapper must be used +to denote that the actor accepts routing settings from configuration. +This is in contrast with Remote Deployment where such marker props is not necessary. +If the props of an actor is NOT wrapped in FromConfig it will ignore the router section of the deployment configuration. You send messages to the routees via the router actor in the same way as for ordinary actors, i.e. via its ``ActorRef``. The router actor forwards messages onto its routees without changing From 20960d9fd591741833c6e03be45fe02bb7b74c15 Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Wed, 20 Jul 2016 10:13:39 +0200 Subject: [PATCH 032/155] +doc explain how to generate JavaDoc in CONTRIBUTING.md (#20995) * +doc explain how to generate JavaDoc in CONTRIBUTING.md * Update CONTRIBUTING.md --- CONTRIBUTING.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d66cc596e5..abcc9848d2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -179,6 +179,19 @@ For more info, or for a starting point for new projects, look at the [Lightbend For larger projects that have invested a lot of time and resources into their current documentation and samples scheme (like for example Play), it is understandable that it will take some time to migrate to this new model. In these cases someone from the project needs to take the responsibility of manual QA and verifier for the documentation and samples. +### JavaDoc + +Akka generates JavaDoc-style API documentation using the [genjavadoc](https://github.com/typesafehub/genjavadoc) sbt plugin, since the sources are written mostly in Scala. + +Generating JavaDoc is not enabled by default, as it's not needed on day-to-day development as it's expected to just work. +If you'd like to check if you links and formatting looks good in JavaDoc (and not only in ScalaDoc), you can generate it by running: + +``` +sbt -Dakka.genjavadoc.enabled=true javaunidoc:doc +``` + +Which will generate JavaDoc style docs in `./target/javaunidoc/index.html` + ## External Dependencies All the external runtime dependencies for the project, including transitive dependencies, must have an open source license that is equal to, or compatible with, [Apache 2](http://www.apache.org/licenses/LICENSE-2.0). From d3ea9e49dbf1ded613d9eb082115c1a9daaed9eb Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Wed, 20 Jul 2016 12:18:52 +0200 Subject: [PATCH 033/155] =htp cache default RejectionHandler instance, it's safe to share (#20996) --- .../src/test/scala/akka/util/ByteStringSpec.scala | 2 +- .../scala/akka/http/scaladsl/server/RejectionHandler.scala | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala index 57dac8c482..55751aa816 100644 --- a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala @@ -322,7 +322,7 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { check { (a: ByteString) ⇒ a.asByteBuffers.foldLeft(ByteString.empty) { (bs, bb) ⇒ bs ++ ByteString(bb) } == a } check { (a: ByteString) ⇒ a.asByteBuffers.forall(_.isReadOnly) } check { (a: ByteString) ⇒ - import scala.collection.JavaConverters.iterableAsScalaIterableConverter; + import scala.collection.JavaConverters.iterableAsScalaIterableConverter a.asByteBuffers.zip(a.getByteBuffers().asScala).forall(x ⇒ x._1 == x._2) } } diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/RejectionHandler.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/RejectionHandler.scala index e076c73a52..bc6c6a451f 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/RejectionHandler.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/RejectionHandler.scala @@ -122,9 +122,9 @@ object RejectionHandler { import Directives._ /** - * Creates a new default [[RejectionHandler]] instance. + * Default [[RejectionHandler]] instance. */ - def default = + final val default = newBuilder() .handleAll[SchemeRejection] { rejections ⇒ val schemes = rejections.map(_.supported).mkString(", ") From fde9d86879ad66b501f4ff3af939baee6a323fcf Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Wed, 20 Jul 2016 14:01:51 +0200 Subject: [PATCH 034/155] ByteString optimisations of methods in HTTP parsing hot-path (#20994) * =act #20992 prepare benchmarks for ByteString optimisations * =act #20992 optimise common ByteString operations: drop,take,slice... * =act,htc #15965 add ByteString.decodeString(java.nio.charsets.Charset) --- .gitignore | 1 + .../test/scala/akka/util/ByteStringSpec.scala | 125 +++++++++++ .../main/scala/akka/util/ByteIterator.scala | 6 +- .../src/main/scala/akka/util/ByteString.scala | 211 +++++++++++++++--- ...> ByteString_copyToBuffer_Benchmark.scala} | 8 +- .../util/ByteString_decode_Benchmark.scala | 64 ++++++ .../ByteString_dropSliceTake_Benchmark.scala | 156 +++++++++++++ .../engine/parsing/HttpHeaderParser.scala | 4 +- .../engine/parsing/HttpRequestParser.scala | 4 +- .../AkkaHttpServerLatencyMultiNodeSpec.scala | 25 +-- .../PredefinedFromEntityUnmarshallers.scala | 4 +- project/MiMa.scala | 4 + 12 files changed, 552 insertions(+), 60 deletions(-) rename akka-bench-jmh/src/main/scala/akka/util/{ByteStringBenchmark.scala => ByteString_copyToBuffer_Benchmark.scala} (96%) create mode 100644 akka-bench-jmh/src/main/scala/akka/util/ByteString_decode_Benchmark.scala create mode 100644 akka-bench-jmh/src/main/scala/akka/util/ByteString_dropSliceTake_Benchmark.scala diff --git a/.gitignore b/.gitignore index 8e716eb2d4..0d810187b9 100755 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ *# +*.jfr *.iml *.ipr *.iws diff --git a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala index 55751aa816..0a2894ccc0 100644 --- a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala @@ -10,6 +10,7 @@ import java.lang.Float.floatToRawIntBits import java.nio.{ ByteBuffer, ByteOrder } import java.nio.ByteOrder.{ BIG_ENDIAN, LITTLE_ENDIAN } +import akka.util.ByteString.{ ByteString1, ByteString1C, ByteStrings } import org.apache.commons.codec.binary.Hex.encodeHex import org.scalacheck.Arbitrary.arbitrary import org.scalacheck.{ Arbitrary, Gen } @@ -20,6 +21,12 @@ import scala.collection.mutable.Builder class ByteStringSpec extends WordSpec with Matchers with Checkers { + // // uncomment when developing locally to get better coverage + // implicit override val generatorDrivenConfig = + // PropertyCheckConfig( + // minSuccessful = 1000, + // minSize = 0, maxSize = 100) + def genSimpleByteString(min: Int, max: Int) = for { n ← Gen.choose(min, max) b ← Gen.containerOfN[Array, Byte](n, arbitrary[Byte]) @@ -281,10 +288,113 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { reference.toSeq == builder.result } + "ByteString1" must { + "drop(0)" in { + ByteString1.fromString("").drop(0) should ===(ByteString.empty) + ByteString1.fromString("a").drop(0) should ===(ByteString("a")) + } + "drop(1)" in { + ByteString1.fromString("").drop(1) should ===(ByteString("")) + ByteString1.fromString("a").drop(1) should ===(ByteString("")) + ByteString1.fromString("ab").drop(1) should ===(ByteString("b")) + ByteString1.fromString("xaaa").drop(1) should ===(ByteString("aaa")) + ByteString1.fromString("xaab").drop(1).take(2) should ===(ByteString("aa")) + ByteString1.fromString("0123456789").drop(5).take(4).drop(1).take(2) should ===(ByteString("67")) + } + "drop(n)" in { + ByteString1.fromString("ab").drop(2) should ===(ByteString("")) + ByteString1.fromString("ab").drop(3) should ===(ByteString("")) + } + } + "ByteString1C" must { + "drop(0)" in { + ByteString1C.fromString("").drop(0) should ===(ByteString.empty) + ByteString1C.fromString("a").drop(0) should ===(ByteString("a")) + } + "drop(1)" in { + ByteString1C.fromString("").drop(1) should ===(ByteString("")) + ByteString1C.fromString("a").drop(1) should ===(ByteString("")) + ByteString1C.fromString("ab").drop(1) should ===(ByteString("b")) + } + "drop(n)" in { + ByteString1C.fromString("ab").drop(2) should ===(ByteString("")) + ByteString1C.fromString("ab").drop(3) should ===(ByteString("")) + } + "take" in { + ByteString1.fromString("abcdefg").drop(1).take(0) should ===(ByteString("")) + ByteString1.fromString("abcdefg").drop(1).take(-1) should ===(ByteString("")) + ByteString1.fromString("abcdefg").drop(1).take(-2) should ===(ByteString("")) + ByteString1.fromString("abcdefg").drop(2) should ===(ByteString("cdefg")) + ByteString1.fromString("abcdefg").drop(2).take(1) should ===(ByteString("c")) + } + } + "ByteStrings" must { + "drop(0)" in { + ByteStrings(ByteString1.fromString(""), ByteString1.fromString("")).drop(0) should ===(ByteString.empty) + ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).drop(0) should ===(ByteString("a")) + (ByteString1C.fromString("") ++ ByteString1.fromString("a")).drop(0) should ===(ByteString("a")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).drop(0) should ===(ByteString("a")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("a")).drop(0) should ===(ByteString("aa")) + ByteStrings(ByteString1.fromString(""), ByteString1.fromString("")).drop(0) should ===(ByteString("")) + } + "drop(1)" in { + ByteStrings(ByteString1.fromString(""), ByteString1.fromString("")).drop(1) should ===(ByteString("")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).drop(1) should ===(ByteString("")) + ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).drop(1) should ===(ByteString("")) + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).drop(1) should ===(ByteString("bcd")) + ByteStrings(Vector(ByteString1.fromString("xaaa"))).drop(1) should ===(ByteString("aaa")) + } + "drop(n)" in { + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).drop(1) should ===(ByteString("")) + ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).drop(1) should ===(ByteString("")) + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).drop(3) should ===(ByteString("d")) + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).drop(4) should ===(ByteString("")) + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).drop(5) should ===(ByteString("")) + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).drop(10) should ===(ByteString("")) + + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).drop(-2) should ===(ByteString("abcd")) + ByteStrings(ByteString1.fromString(""), ByteString1.fromString("")).drop(-2) should ===(ByteString("")) + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("")).drop(Int.MinValue) should ===(ByteString("ab")) + ByteStrings(ByteString1.fromString(""), ByteString1.fromString("ab")).dropRight(Int.MinValue) should ===(ByteString("ab")) + } + "slice" in { + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(0, 1) should ===(ByteString("a")) + ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).slice(1, 1) should ===(ByteString("")) + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(2, 2) should ===(ByteString("")) + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(2, 3) should ===(ByteString("c")) + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(2, 4) should ===(ByteString("cd")) + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(3, 4) should ===(ByteString("d")) + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(10, 100) should ===(ByteString("")) + } + "dropRight" in { + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).dropRight(0) should ===(ByteString("a")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).dropRight(-1) should ===(ByteString("a")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).dropRight(Int.MinValue) should ===(ByteString("a")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).dropRight(1) should ===(ByteString("")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).dropRight(Int.MaxValue) should ===(ByteString("")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")).dropRight(1) should ===(ByteString("ab")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")).dropRight(2) should ===(ByteString("a")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")).dropRight(3) should ===(ByteString("")) + } + "take" in { + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")).drop(1).take(0) should ===(ByteString("")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")).drop(1).take(-1) should ===(ByteString("")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")).drop(1).take(-2) should ===(ByteString("")) + (ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")) ++ ByteString1.fromString("defg")).drop(2) should ===(ByteString("cdefg")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")).drop(2).take(1) should ===(ByteString("c")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")).take(100) should ===(ByteString("abc")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")).drop(1).take(100) should ===(ByteString("bc")) + } + } + "A ByteString" must { "have correct size" when { "concatenating" in { check((a: ByteString, b: ByteString) ⇒ (a ++ b).size == a.size + b.size) } "dropping" in { check((a: ByteString, b: ByteString) ⇒ (a ++ b).drop(b.size).size == a.size) } + "taking" in { check((a: ByteString, b: ByteString) ⇒ (a ++ b).take(a.size) == a) } + "takingRight" in { check((a: ByteString, b: ByteString) ⇒ (a ++ b).takeRight(b.size) == b) } + "droppnig then taking" in { check((a: ByteString, b: ByteString) ⇒ (b ++ a ++ b).drop(b.size).take(a.size) == a) } + "droppingRight" in { check((a: ByteString, b: ByteString) ⇒ (b ++ a ++ b).drop(b.size).dropRight(b.size) == a) } } "be sequential" when { @@ -301,6 +411,21 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { (a ++ b ++ c) == xs } } + def excerciseRecombining(xs: ByteString, from: Int, until: Int) = { + val (tmp, c) = xs.splitAt(until) + val (a, b) = tmp.splitAt(from) + (a ++ b ++ c) should ===(xs) + } + "recombining - edge cases" in { + excerciseRecombining(ByteStrings(Vector(ByteString1(Array[Byte](1)), ByteString1(Array[Byte](2)))), -2147483648, 112121212) + excerciseRecombining(ByteStrings(Vector(ByteString1(Array[Byte](100)))), 0, 2) + excerciseRecombining(ByteStrings(Vector(ByteString1(Array[Byte](100)))), -2147483648, 2) + excerciseRecombining(ByteStrings(Vector(ByteString1.fromString("ab"), ByteString1.fromString("cd"))), 0, 1) + excerciseRecombining(ByteString1.fromString("abc").drop(1).take(1), -324234, 234232) + excerciseRecombining(ByteString("a"), 0, 2147483647) + excerciseRecombining(ByteStrings(Vector(ByteString1.fromString("ab"), ByteString1.fromString("cd"))).drop(2), 2147483647, 1) + excerciseRecombining(ByteString1.fromString("ab").drop1(1), Int.MaxValue, Int.MaxValue) + } } "behave as expected" when { diff --git a/akka-actor/src/main/scala/akka/util/ByteIterator.scala b/akka-actor/src/main/scala/akka/util/ByteIterator.scala index 3ca0092d49..e3a3ffdbab 100644 --- a/akka-actor/src/main/scala/akka/util/ByteIterator.scala +++ b/akka-actor/src/main/scala/akka/util/ByteIterator.scala @@ -234,6 +234,7 @@ object ByteIterator { new MultiByteArrayIterator(clonedIterators) } + /** For performance sensitive code, call take() directly on ByteString (it's optimised there) */ final override def take(n: Int): this.type = { var rest = n val builder = new ListBuffer[ByteArrayIterator] @@ -249,7 +250,8 @@ object ByteIterator { normalize() } - @tailrec final override def drop(n: Int): this.type = + /** For performance sensitive code, call drop() directly on ByteString (it's optimised there) */ + final override def drop(n: Int): this.type = if ((n > 0) && !isEmpty) { val nCurrent = math.min(n, current.len) current.drop(n) @@ -341,6 +343,7 @@ object ByteIterator { def getDoubles(xs: Array[Double], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = getToArray(xs, offset, n, 8) { getDouble(byteOrder) } { current.getDoubles(_, _, _)(byteOrder) } + /** For performance sensitive code, call copyToBuffer() directly on ByteString (it's optimised there) */ override def copyToBuffer(buffer: ByteBuffer): Int = { // the fold here is better than indexing into the LinearSeq val n = iterators.foldLeft(0) { _ + _.copyToBuffer(buffer) } @@ -636,6 +639,7 @@ abstract class ByteIterator extends BufferedIterator[Byte] { * @param buffer a ByteBuffer to copy bytes to * @return the number of bytes actually copied */ + /** For performance sensitive code, call take() directly on ByteString (it's optimised there) */ def copyToBuffer(buffer: ByteBuffer): Int /** diff --git a/akka-actor/src/main/scala/akka/util/ByteString.scala b/akka-actor/src/main/scala/akka/util/ByteString.scala index 89204c7169..1e782f3c09 100644 --- a/akka-actor/src/main/scala/akka/util/ByteString.scala +++ b/akka-actor/src/main/scala/akka/util/ByteString.scala @@ -12,10 +12,10 @@ import scala.annotation.{ tailrec, varargs } import scala.collection.IndexedSeqOptimized import scala.collection.mutable.{ Builder, WrappedArray } import scala.collection.immutable -import scala.collection.immutable.{ IndexedSeq, VectorBuilder } +import scala.collection.immutable.{ IndexedSeq, VectorBuilder, VectorIterator } import scala.collection.generic.CanBuildFrom import scala.reflect.ClassTag -import java.nio.charset.StandardCharsets +import java.nio.charset.{ Charset, StandardCharsets } object ByteString { @@ -104,6 +104,7 @@ object ByteString { } private[akka] object ByteString1C extends Companion { + def fromString(s: String): ByteString1C = new ByteString1C(s.getBytes) def apply(bytes: Array[Byte]): ByteString1C = new ByteString1C(bytes) val SerializationIdentity = 1.toByte @@ -124,29 +125,49 @@ object ByteString { override def length: Int = bytes.length + // Avoid `iterator` in performance sensitive code, call ops directly on ByteString instead override def iterator: ByteIterator.ByteArrayIterator = ByteIterator.ByteArrayIterator(bytes, 0, bytes.length) - private[akka] def toByteString1: ByteString1 = ByteString1(bytes) + /** INTERNAL API */ + private[akka] def toByteString1: ByteString1 = ByteString1(bytes, 0, bytes.length) + /** INTERNAL API */ private[akka] def byteStringCompanion = ByteString1C - def asByteBuffer: ByteBuffer = toByteString1.asByteBuffer + override def asByteBuffer: ByteBuffer = toByteString1.asByteBuffer - def asByteBuffers: scala.collection.immutable.Iterable[ByteBuffer] = List(asByteBuffer) + override def asByteBuffers: scala.collection.immutable.Iterable[ByteBuffer] = List(asByteBuffer) - def decodeString(charset: String): String = + override def decodeString(charset: String): String = if (isEmpty) "" else new String(bytes, charset) - def ++(that: ByteString): ByteString = + override def decodeString(charset: Charset): String = + if (isEmpty) "" else new String(bytes, charset) + + override def ++(that: ByteString): ByteString = { if (that.isEmpty) this else if (this.isEmpty) that else toByteString1 ++ that + } + + override def take(n: Int): ByteString = + if (n <= 0) ByteString.empty + else toByteString1.take(n) + + override def dropRight(n: Int): ByteString = + if (n <= 0) this + else toByteString1.dropRight(n) + + override def drop(n: Int): ByteString = + if (n <= 0) this + else toByteString1.drop(n) override def slice(from: Int, until: Int): ByteString = - if ((from != 0) || (until != length)) toByteString1.slice(from, until) - else this + if ((from == 0) && (until == length)) this + else if (from > length) ByteString.empty + else toByteString1.slice(from, until) - private[akka] def writeToOutputStream(os: ObjectOutputStream): Unit = + private[akka] override def writeToOutputStream(os: ObjectOutputStream): Unit = toByteString1.writeToOutputStream(os) override def copyToBuffer(buffer: ByteBuffer): Int = @@ -154,7 +175,7 @@ object ByteString { /** INTERNAL API: Specialized for internal use, writing multiple ByteString1C into the same ByteBuffer. */ private[akka] def writeToBuffer(buffer: ByteBuffer, offset: Int): Int = { - val copyLength = math.min(buffer.remaining, offset + length) + val copyLength = Math.min(buffer.remaining, offset + length) if (copyLength > 0) { buffer.put(bytes, offset, copyLength) drop(copyLength) @@ -164,11 +185,14 @@ object ByteString { } + /** INTERNAL API: ByteString backed by exactly one array, with start / end markers */ private[akka] object ByteString1 extends Companion { val empty: ByteString1 = new ByteString1(Array.empty[Byte]) - def apply(bytes: Array[Byte]): ByteString1 = ByteString1(bytes, 0, bytes.length) + def fromString(s: String): ByteString1 = apply(s.getBytes) + def apply(bytes: Array[Byte]): ByteString1 = apply(bytes, 0, bytes.length) def apply(bytes: Array[Byte], startIndex: Int, length: Int): ByteString1 = - if (length == 0) empty else new ByteString1(bytes, startIndex, length) + if (length == 0) empty + else new ByteString1(bytes, Math.max(0, startIndex), Math.max(0, length)) val SerializationIdentity = 0.toByte @@ -185,6 +209,7 @@ object ByteString { def apply(idx: Int): Byte = bytes(checkRangeConvert(idx)) + // Avoid `iterator` in performance sensitive code, call ops directly on ByteString instead override def iterator: ByteIterator.ByteArrayIterator = ByteIterator.ByteArrayIterator(bytes, startIndex, startIndex + length) @@ -204,12 +229,41 @@ object ByteString { private[akka] def byteStringCompanion = ByteString1 + override def dropRight(n: Int): ByteString = + dropRight1(n) + + /** INTERNAL API */ + private[akka] def dropRight1(n: Int): ByteString1 = + if (n <= 0) this + else if (length - n <= 0) ByteString1.empty + else new ByteString1(bytes, startIndex, length - n) + + override def drop(n: Int): ByteString = + if (n <= 0) this else drop1(n) + + /** INTERNAL API */ + private[akka] def drop1(n: Int): ByteString1 = { + val nextStartIndex = startIndex + n + if (nextStartIndex >= bytes.length) ByteString1.empty + else ByteString1(bytes, nextStartIndex, length - n) + } + + override def take(n: Int): ByteString = + if (n <= 0) ByteString.empty + else ByteString1(bytes, startIndex, Math.min(n, length)) + + override def slice(from: Int, until: Int): ByteString = { + if (from <= 0 && until >= length) this // we can do < / > since we're Compact + else if (until <= from) ByteString1.empty + else ByteString1(bytes, startIndex + from, until - from) + } + override def copyToBuffer(buffer: ByteBuffer): Int = writeToBuffer(buffer) /** INTERNAL API: Specialized for internal use, writing multiple ByteString1C into the same ByteBuffer. */ private[akka] def writeToBuffer(buffer: ByteBuffer): Int = { - val copyLength = math.min(buffer.remaining, length) + val copyLength = Math.min(buffer.remaining, length) if (copyLength > 0) { buffer.put(bytes, startIndex, copyLength) drop(copyLength) @@ -228,7 +282,10 @@ object ByteString { def asByteBuffers: scala.collection.immutable.Iterable[ByteBuffer] = List(asByteBuffer) - def decodeString(charset: String): String = + override def decodeString(charset: String): String = + new String(if (length == bytes.length) bytes else toArray, charset) + + override def decodeString(charset: Charset): String = // avoids Charset.forName lookup in String internals new String(if (length == bytes.length) bytes else toArray, charset) def ++(that: ByteString): ByteString = { @@ -311,8 +368,9 @@ object ByteString { */ final class ByteStrings private (private[akka] val bytestrings: Vector[ByteString1], val length: Int) extends ByteString with Serializable { if (bytestrings.isEmpty) throw new IllegalArgumentException("bytestrings must not be empty") + if (bytestrings.head.isEmpty) throw new IllegalArgumentException("bytestrings.head must not be empty") - def apply(idx: Int): Byte = + def apply(idx: Int): Byte = { if (0 <= idx && idx < length) { var pos = 0 var seen = 0 @@ -322,7 +380,9 @@ object ByteString { } bytestrings(pos)(idx - seen) } else throw new IndexOutOfBoundsException(idx.toString) + } + // Avoid `iterator` in performance sensitive code, call ops directly on ByteString instead override def iterator: ByteIterator.MultiByteArrayIterator = ByteIterator.MultiByteArrayIterator(bytestrings.toStream map { _.iterator }) @@ -367,11 +427,83 @@ object ByteString { def decodeString(charset: String): String = compact.decodeString(charset) + def decodeString(charset: Charset): String = + compact.decodeString(charset) + private[akka] def writeToOutputStream(os: ObjectOutputStream): Unit = { os.writeInt(bytestrings.length) bytestrings.foreach(_.writeToOutputStream(os)) } + override def take(n: Int): ByteString = { + @tailrec def take0(n: Int, b: ByteStringBuilder, bs: Vector[ByteString1]): ByteString = + if (bs.isEmpty || n <= 0) b.result + else { + val head = bs.head + if (n <= head.length) b.append(head.take(n)).result + else take0(n - head.length, b.append(head), bs.tail) + } + + if (n <= 0) ByteString.empty + else if (n >= length) this + else take0(n, ByteString.newBuilder, bytestrings) + } + + override def dropRight(n: Int): ByteString = + if (n <= 0) this + else { + val last = bytestrings.last + if (n < last.length) new ByteStrings(bytestrings.init :+ last.dropRight1(n), length - n) + else { + val remaining = bytestrings.init + if (remaining.isEmpty) ByteString.empty + else { + val s = new ByteStrings(remaining, length - last.length) + val remainingToBeDropped = n - last.length + s.dropRight(remainingToBeDropped) + } + } + } + + override def slice(from: Int, until: Int): ByteString = + if ((from == 0) && (until == length)) this + else if (from > length || until <= from) ByteString.empty + else drop(from).dropRight(length - until) + + override def drop(n: Int): ByteString = + if (n <= 0) this + else if (n > length) ByteString.empty + else drop0(n) + + private def drop0(n: Int): ByteString = { + var continue = true + var fullDrops = 0 + var remainingToDrop = n + do { + // impl note: could be optimised a bit by using VectorIterator instead, + // however then we're forced to call .toVector which halfs performance + // We can work around that, as there's a Scala private method "remainingVector" which is fast, + // but let's not go into calling private APIs here just yet. + val currentLength = bytestrings(fullDrops).length + if (remainingToDrop >= currentLength) { + fullDrops += 1 + remainingToDrop -= currentLength + } else continue = false + } while (remainingToDrop > 0 && continue) + + val remainingByteStrings = bytestrings.drop(fullDrops) + if (remainingByteStrings.isEmpty) ByteString.empty + else if (remainingToDrop > 0) { + val h: ByteString1 = remainingByteStrings.head.drop1(remainingToDrop) + val bs = remainingByteStrings.tail + + if (h.isEmpty) + if (bs.isEmpty) ByteString.empty + else new ByteStrings(bs, length - n) + else new ByteStrings(h +: bs, length - n) + } else ByteStrings(remainingByteStrings, length - n) + } + protected def writeReplace(): AnyRef = new SerializationProxy(this) } @@ -422,6 +554,8 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz // *must* be overridden by derived classes. This construction is necessary // to specialize the return type, as the method is already implemented in // a parent trait. + // + // Avoid `iterator` in performance sensitive code, call ops directly on ByteString instead override def iterator: ByteIterator = throw new UnsupportedOperationException("Method iterator is not implemented in ByteString") override def head: Byte = apply(0) @@ -429,14 +563,19 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz override def last: Byte = apply(length - 1) override def init: ByteString = dropRight(1) - override def slice(from: Int, until: Int): ByteString = - if ((from == 0) && (until == length)) this - else iterator.slice(from, until).toByteString - - override def take(n: Int): ByteString = slice(0, n) + // *must* be overridden by derived classes. + override def take(n: Int): ByteString = throw new UnsupportedOperationException("Method slice is not implemented in ByteString") override def takeRight(n: Int): ByteString = slice(length - n, length) - override def drop(n: Int): ByteString = slice(n, length) - override def dropRight(n: Int): ByteString = slice(0, length - n) + + // these methods are optimized in derived classes utilising the maximum knowlage about data layout available to them: + // *must* be overridden by derived classes. + override def slice(from: Int, until: Int): ByteString = throw new UnsupportedOperationException("Method slice is not implemented in ByteString") + + // *must* be overridden by derived classes. + override def drop(n: Int): ByteString = throw new UnsupportedOperationException("Method drop is not implemented in ByteString") + + // *must* be overridden by derived classes. + override def dropRight(n: Int): ByteString = throw new UnsupportedOperationException("Method dropRight is not implemented in ByteString") override def takeWhile(p: Byte ⇒ Boolean): ByteString = iterator.takeWhile(p).toByteString override def dropWhile(p: Byte ⇒ Boolean): ByteString = iterator.dropWhile(p).toByteString @@ -461,7 +600,7 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz * * @return this ByteString copied into a byte array */ - protected[ByteString] def toArray: Array[Byte] = toArray[Byte] // protected[ByteString] == public to Java but hidden to Scala * fnizz * + protected[ByteString] def toArray: Array[Byte] = toArray[Byte] override def toArray[B >: Byte](implicit arg0: ClassTag[B]): Array[B] = iterator.toArray override def copyToArray[B >: Byte](xs: Array[B], start: Int, len: Int): Unit = @@ -488,11 +627,8 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz * @param buffer a ByteBuffer to copy bytes to * @return the number of bytes actually copied */ - def copyToBuffer(buffer: ByteBuffer): Int = { - // TODO: remove this impl, make it an abstract method when possible - // specialized versions of this method exist in sub-classes, we keep this impl for binary compatibility, it never is actually invoked - iterator.copyToBuffer(buffer) - } + // *must* be overridden by derived classes. + def copyToBuffer(buffer: ByteBuffer): Int = throw new UnsupportedOperationException("Method copyToBuffer is not implemented in ByteString") /** * Create a new ByteString with all contents compacted into a single, @@ -544,9 +680,16 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz /** * Decodes this ByteString using a charset to produce a String. + * If you have a [[Charset]] instance available, use `decodeString(charset: java.nio.charset.Charset` instead. */ def decodeString(charset: String): String + /** + * Decodes this ByteString using a charset to produce a String. + * Avoids Charset.forName lookup in String internals, thus is preferable to `decodeString(charset: String)`. + */ + def decodeString(charset: Charset): String + /** * map method that will automatically cast Int back into Byte. */ @@ -608,8 +751,8 @@ object CompactByteString { * an Array. */ def fromArray(array: Array[Byte], offset: Int, length: Int): CompactByteString = { - val copyOffset = math.max(offset, 0) - val copyLength = math.max(math.min(array.length - copyOffset, length), 0) + val copyOffset = Math.max(offset, 0) + val copyLength = Math.max(Math.min(array.length - copyOffset, length), 0) if (copyLength == 0) empty else { val copyArray = new Array[Byte](copyLength) @@ -706,6 +849,8 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { override def ++=(xs: TraversableOnce[Byte]): this.type = { xs match { + case b: ByteString if b.isEmpty ⇒ + // do nothing case b: ByteString1C ⇒ clearTemp() _builder += b.toByteString1 @@ -748,7 +893,7 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { /** * Java API: append a ByteString to this builder. */ - def append(bs: ByteString): this.type = this ++= bs + def append(bs: ByteString): this.type = if (bs.isEmpty) this else this ++= bs /** * Add a single Byte to this builder. @@ -915,7 +1060,7 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { fillByteBuffer(len * 8, byteOrder) { _.asDoubleBuffer.put(array, start, len) } def clear(): Unit = { - _builder.clear + _builder.clear() _length = 0 _tempLength = 0 } diff --git a/akka-bench-jmh/src/main/scala/akka/util/ByteStringBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/util/ByteString_copyToBuffer_Benchmark.scala similarity index 96% rename from akka-bench-jmh/src/main/scala/akka/util/ByteStringBenchmark.scala rename to akka-bench-jmh/src/main/scala/akka/util/ByteString_copyToBuffer_Benchmark.scala index ec62572d8c..5868897ec7 100644 --- a/akka-bench-jmh/src/main/scala/akka/util/ByteStringBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/util/ByteString_copyToBuffer_Benchmark.scala @@ -12,7 +12,7 @@ import org.openjdk.jmh.infra.Blackhole @State(Scope.Benchmark) @Measurement(timeUnit = TimeUnit.MILLISECONDS) -class ByteStringBenchmark { +class ByteString_copyToBuffer_Benchmark { val _bs_mini = ByteString(Array.ofDim[Byte](128 * 4)) val _bs_small = ByteString(Array.ofDim[Byte](1024 * 1)) @@ -83,16 +83,10 @@ class ByteStringBenchmark { bss_large.copyToBuffer(buf) } - // /** compact + copy */ - // @Benchmark - // def bss_large_c_copyToBuffer: Int = - // bss_large.compact.copyToBuffer(buf) - /** Pre-compacted */ @Benchmark def bss_large_pc_copyToBuffer(): Int = { buf.flip() bss_pc_large.copyToBuffer(buf) } - } diff --git a/akka-bench-jmh/src/main/scala/akka/util/ByteString_decode_Benchmark.scala b/akka-bench-jmh/src/main/scala/akka/util/ByteString_decode_Benchmark.scala new file mode 100644 index 0000000000..b606f251fe --- /dev/null +++ b/akka-bench-jmh/src/main/scala/akka/util/ByteString_decode_Benchmark.scala @@ -0,0 +1,64 @@ +/** + * Copyright (C) 2014-2016 Lightbend Inc. + */ +package akka.util + +import java.nio.charset.Charset +import java.util.concurrent.TimeUnit + +import akka.util.ByteString.{ ByteString1C, ByteStrings } +import org.openjdk.jmh.annotations._ + +@State(Scope.Benchmark) +@Measurement(timeUnit = TimeUnit.MILLISECONDS) +class ByteString_decode_Benchmark { + + val _bs_large = ByteString(Array.ofDim[Byte](1024 * 4)) + + val bs_large = ByteString(Array.ofDim[Byte](1024 * 4 * 4)) + + val bss_large = ByteStrings(Vector.fill(4)(bs_large.asInstanceOf[ByteString1C].toByteString1), 4 * bs_large.length) + val bc_large = bss_large.compact // compacted + + val utf8String = "utf-8" + val utf8 = Charset.forName(utf8String) + + /* + Using Charset helps a bit, but nothing impressive: + + [info] ByteString_decode_Benchmark.bc_large_decodeString_stringCharset_utf8 thrpt 20 21 612.293 ± 825.099 ops/s + => + [info] ByteString_decode_Benchmark.bc_large_decodeString_charsetCharset_utf8 thrpt 20 22 473.372 ± 851.597 ops/s + + + [info] ByteString_decode_Benchmark.bs_large_decodeString_stringCharset_utf8 thrpt 20 84 443.674 ± 3723.987 ops/s + => + [info] ByteString_decode_Benchmark.bs_large_decodeString_charsetCharset_utf8 thrpt 20 93 865.033 ± 2052.476 ops/s + + + [info] ByteString_decode_Benchmark.bss_large_decodeString_stringCharset_utf8 thrpt 20 14 886.553 ± 326.752 ops/s + => + [info] ByteString_decode_Benchmark.bss_large_decodeString_charsetCharset_utf8 thrpt 20 16 031.670 ± 474.565 ops/s + */ + + @Benchmark + def bc_large_decodeString_stringCharset_utf8: String = + bc_large.decodeString(utf8String) + @Benchmark + def bs_large_decodeString_stringCharset_utf8: String = + bs_large.decodeString(utf8String) + @Benchmark + def bss_large_decodeString_stringCharset_utf8: String = + bss_large.decodeString(utf8String) + + @Benchmark + def bc_large_decodeString_charsetCharset_utf8: String = + bc_large.decodeString(utf8) + @Benchmark + def bs_large_decodeString_charsetCharset_utf8: String = + bs_large.decodeString(utf8) + @Benchmark + def bss_large_decodeString_charsetCharset_utf8: String = + bss_large.decodeString(utf8) + +} diff --git a/akka-bench-jmh/src/main/scala/akka/util/ByteString_dropSliceTake_Benchmark.scala b/akka-bench-jmh/src/main/scala/akka/util/ByteString_dropSliceTake_Benchmark.scala new file mode 100644 index 0000000000..90ff47a807 --- /dev/null +++ b/akka-bench-jmh/src/main/scala/akka/util/ByteString_dropSliceTake_Benchmark.scala @@ -0,0 +1,156 @@ +/** + * Copyright (C) 2014-2016 Lightbend Inc. + */ +package akka.util + +import java.nio.ByteBuffer +import java.util.concurrent.TimeUnit + +import akka.util.ByteString.{ ByteString1C, ByteStrings } +import org.openjdk.jmh.annotations._ + +@State(Scope.Benchmark) +@Measurement(timeUnit = TimeUnit.MILLISECONDS) +class ByteString_dropSliceTake_Benchmark { + + val _bs_mini = ByteString(Array.ofDim[Byte](128 * 4)) + val _bs_small = ByteString(Array.ofDim[Byte](1024 * 1)) + val _bs_large = ByteString(Array.ofDim[Byte](1024 * 4)) + + val bs_mini = ByteString(Array.ofDim[Byte](128 * 4 * 4)) + val bs_small = ByteString(Array.ofDim[Byte](1024 * 1 * 4)) + val bs_large = ByteString(Array.ofDim[Byte](1024 * 4 * 4)) + + val bss_mini = ByteStrings(Vector.fill(4)(bs_mini.asInstanceOf[ByteString1C].toByteString1), 4 * bs_mini.length) + val bss_small = ByteStrings(Vector.fill(4)(bs_small.asInstanceOf[ByteString1C].toByteString1), 4 * bs_small.length) + val bss_large = ByteStrings(Vector.fill(4)(bs_large.asInstanceOf[ByteString1C].toByteString1), 4 * bs_large.length) + val bss_pc_large = bss_large.compact + + /* + --------------------------------- BASELINE -------------------------------------------------------------------- + [info] Benchmark Mode Cnt Score Error Units + [info] ByteString_dropSliceTake_Benchmark.bs_large_dropRight_100 thrpt 20 111 122 621.983 ± 6172679.160 ops/s + [info] ByteString_dropSliceTake_Benchmark.bs_large_dropRight_256 thrpt 20 110 238 003.870 ± 4042572.908 ops/s + [info] ByteString_dropSliceTake_Benchmark.bs_large_dropRight_2000 thrpt 20 106 435 449.123 ± 2972282.531 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_dropRight_100 thrpt 20 1 155 292.430 ± 23096.219 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_dropRight_256 thrpt 20 1 191 713.229 ± 15910.426 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_dropRight_2000 thrpt 20 1 201 342.579 ± 21119.392 ops/s + + [info] ByteString_dropSliceTake_Benchmark.bs_large_drop_100 thrpt 20 108 252 561.824 ± 3841392.346 ops/s + [info] ByteString_dropSliceTake_Benchmark.bs_large_drop_256 thrpt 20 112 515 936.237 ± 5651549.124 ops/s + [info] ByteString_dropSliceTake_Benchmark.bs_large_drop_2000 thrpt 20 110 851 553.706 ± 3327510.108 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_drop_18 thrpt 20 983 544.541 ± 46299.808 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_drop_100 thrpt 20 875 345.433 ± 44760.533 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_drop_256 thrpt 20 864 182.258 ± 111172.303 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_drop_2000 thrpt 20 997 459.151 ± 33627.993 ops/s + + [info] ByteString_dropSliceTake_Benchmark.bs_large_slice_80_80 thrpt 20 112 299 538.691 ± 7259114.294 ops/s + [info] ByteString_dropSliceTake_Benchmark.bs_large_slice_129_129 thrpt 20 105 640 836.625 ± 9112709.942 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_slice_80_80 thrpt 20 10 868 202.262 ± 526537.133 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_slice_129_129 thrpt 20 9 429 199.802 ± 1321542.453 ops/s + + --------------------------------- AFTER ----------------------------------------------------------------------- + + ------ TODAY ––––––– + [info] Benchmark Mode Cnt Score Error Units + [info] ByteString_dropSliceTake_Benchmark.bs_large_dropRight_100 thrpt 20 126 091 961.654 ± 2813125.268 ops/s + [info] ByteString_dropSliceTake_Benchmark.bs_large_dropRight_256 thrpt 20 118 393 394.350 ± 2934782.759 ops/s + [info] ByteString_dropSliceTake_Benchmark.bs_large_dropRight_2000 thrpt 20 119 183 386.004 ± 4445324.298 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_dropRight_100 thrpt 20 8 813 065.392 ± 234570.880 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_dropRight_256 thrpt 20 9 039 585.934 ± 297168.301 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_dropRight_2000 thrpt 20 9 629 458.168 ± 124846.904 ops/s + + [info] ByteString_dropSliceTake_Benchmark.bs_large_drop_100 thrpt 20 111 666 137.955 ± 4846727.674 ops/s + [info] ByteString_dropSliceTake_Benchmark.bs_large_drop_256 thrpt 20 114 405 514.622 ± 4985750.805 ops/s + [info] ByteString_dropSliceTake_Benchmark.bs_large_drop_2000 thrpt 20 114 364 716.297 ± 2512280.603 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_drop_18 thrpt 20 10 040 457.962 ± 527850.116 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_drop_100 thrpt 20 9 184 934.769 ± 549140.840 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_drop_256 thrpt 20 10 887 437.121 ± 195606.240 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_drop_2000 thrpt 20 10 725 300.292 ± 403470.413 ops/s + + [info] ByteString_dropSliceTake_Benchmark.bs_large_slice_80_80 thrpt 20 233 017 314.148 ± 7070246.826 ops/s + [info] ByteString_dropSliceTake_Benchmark.bs_large_slice_129_129 thrpt 20 275 245 086.247 ± 4969752.048 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_slice_80_80 thrpt 20 264 963 420.976 ± 4259289.143 ops/s + [info] ByteString_dropSliceTake_Benchmark.bss_large_slice_129_129 thrpt 20 265 477 577.022 ± 4623974.283 ops/s + + */ + + // 18 == "http://example.com", a typical url length + + @Benchmark + def bs_large_drop_0: ByteString = + bs_large.drop(0) + @Benchmark + def bss_large_drop_0: ByteString = + bss_large.drop(0) + + @Benchmark + def bs_large_drop_18: ByteString = + bs_large.drop(18) + @Benchmark + def bss_large_drop_18: ByteString = + bss_large.drop(18) + + @Benchmark + def bs_large_drop_100: ByteString = + bs_large.drop(100) + @Benchmark + def bss_large_drop_100: ByteString = + bss_large.drop(100) + + @Benchmark + def bs_large_drop_256: ByteString = + bs_large.drop(256) + @Benchmark + def bss_large_drop_256: ByteString = + bss_large.drop(256) + + @Benchmark + def bs_large_drop_2000: ByteString = + bs_large.drop(2000) + @Benchmark + def bss_large_drop_2000: ByteString = + bss_large.drop(2000) + + /* these force 2 array drops, and 1 element drop inside the 2nd to first/last; can be considered as "bad case" */ + + @Benchmark + def bs_large_slice_129_129: ByteString = + bs_large.slice(129, 129) + @Benchmark + def bss_large_slice_129_129: ByteString = + bss_large.slice(129, 129) + + /* these only move the indexes, don't drop any arrays "happy case" */ + + @Benchmark + def bs_large_slice_80_80: ByteString = + bs_large.slice(80, 80) + @Benchmark + def bss_large_slice_80_80: ByteString = + bss_large.slice(80, 80) + + // drop right --- + + @Benchmark + def bs_large_dropRight_100: ByteString = + bs_large.dropRight(100) + @Benchmark + def bss_large_dropRight_100: ByteString = + bss_large.dropRight(100) + + @Benchmark + def bs_large_dropRight_256: ByteString = + bs_large.dropRight(256) + @Benchmark + def bss_large_dropRight_256: ByteString = + bss_large.dropRight(256) + + @Benchmark + def bs_large_dropRight_2000: ByteString = + bs_large.dropRight(2000) + @Benchmark + def bss_large_dropRight_2000: ByteString = + bss_large.dropRight(2000) + +} diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpHeaderParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpHeaderParser.scala index fa4b23637a..63588995e8 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpHeaderParser.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpHeaderParser.scala @@ -473,7 +473,7 @@ private[http] object HttpHeaderParser { private[parsing] class ModeledHeaderValueParser(headerName: String, maxHeaderValueLength: Int, maxValueCount: Int, settings: HeaderParser.Settings) extends HeaderValueParser(headerName, maxValueCount) { def apply(hhp: HttpHeaderParser, input: ByteString, valueStart: Int, onIllegalHeader: ErrorInfo ⇒ Unit): (HttpHeader, Int) = { - // TODO: optimize by running the header value parser directly on the input ByteString (rather than an extracted String) + // TODO: optimize by running the header value parser directly on the input ByteString (rather than an extracted String); seems done? val (headerValue, endIx) = scanHeaderValue(hhp, input, valueStart, valueStart + maxHeaderValueLength + 2)() val trimmedHeaderValue = headerValue.trim val header = HeaderParser.parseFull(headerName, trimmedHeaderValue, settings) match { @@ -569,4 +569,4 @@ private[http] object HttpHeaderParser { def withValueCountIncreased = copy(valueCount = valueCount + 1) def spaceLeft = valueCount < parser.maxValueCount } -} \ No newline at end of file +} diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpRequestParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpRequestParser.scala index 3fe26250ac..cfb40519b5 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpRequestParser.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpRequestParser.scala @@ -101,8 +101,8 @@ private[http] class HttpRequestParser( val uriEnd = findUriEnd() try { - uriBytes = input.iterator.slice(uriStart, uriEnd).toArray[Byte] // TODO: can we reduce allocations here? - uri = Uri.parseHttpRequestTarget(uriBytes, mode = uriParsingMode) + uriBytes = input.slice(uriStart, uriEnd).toArray[Byte] // TODO: can we reduce allocations here? + uri = Uri.parseHttpRequestTarget(uriBytes, mode = uriParsingMode) // TODO ByteStringParserInput? } catch { case IllegalUriException(info) ⇒ throw new ParsingException(BadRequest, info) } diff --git a/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala b/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala index 3640a4d0c9..ef5eac35c5 100644 --- a/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala +++ b/akka-http-tests/src/multi-jvm/scala/akka/http/AkkaHttpServerLatencyMultiNodeSpec.scala @@ -52,29 +52,28 @@ object AkkaHttpServerLatencyMultiNodeSpec extends MultiNodeConfig { private var _ifWrk2Available: Option[Boolean] = None final def ifWrk2Available(test: ⇒ Unit): Unit = - if (isWrk2Available) test else throw new TestPendingException() - final def isWrk2Available: Boolean = + if (isWrk2Available) test else throw new TestPendingException() + final def isWrk2Available: Boolean = _ifWrk2Available getOrElse { - import scala.sys.process._ - val wrkExitCode = Try("""wrk""".!).getOrElse(-1) + import scala.sys.process._ + val wrkExitCode = Try("""wrk""".!).getOrElse(-1) - _ifWrk2Available = Some(wrkExitCode == 1) // app found, help displayed - isWrk2Available + _ifWrk2Available = Some(wrkExitCode == 1) // app found, help displayed + isWrk2Available } private var _abAvailable: Option[Boolean] = None final def ifAbAvailable(test: ⇒ Unit): Unit = if (isAbAvailable) test else throw new TestPendingException() - - final def isAbAvailable: Boolean = + + final def isAbAvailable: Boolean = _abAvailable getOrElse { import scala.sys.process._ val abExitCode = Try("""ab -h""".!).getOrElse(-1) _abAvailable = Some(abExitCode == 22) // app found, help displayed (22 return code is when -h runs in ab, weird but true) isAbAvailable } - - + final case class LoadGenCommand(cmd: String) final case class LoadGenResults(results: String) { def lines = results.split("\n") @@ -92,13 +91,13 @@ object AkkaHttpServerLatencyMultiNodeSpec extends MultiNodeConfig { import scala.sys.process._ def ready(port: Int): Receive = { case LoadGenCommand(cmd) if cmd startsWith "wrk" ⇒ - val res = + val res = if (isWrk2Available) cmd.!! // blocking. DON'T DO THIS AT HOME, KIDS! else "=== WRK NOT AVAILABLE ===" sender() ! LoadGenResults(res) - + case LoadGenCommand(cmd) if cmd startsWith "ab" ⇒ - val res = + val res = if (isAbAvailable) cmd.!! // blocking. DON'T DO THIS AT HOME, KIDS! else "=== AB NOT AVAILABLE ===" sender() ! LoadGenResults(res) diff --git a/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/PredefinedFromEntityUnmarshallers.scala b/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/PredefinedFromEntityUnmarshallers.scala index 21f629c050..43f4cbd420 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/PredefinedFromEntityUnmarshallers.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/PredefinedFromEntityUnmarshallers.scala @@ -35,7 +35,7 @@ trait PredefinedFromEntityUnmarshallers extends MultipartUnmarshallers { implicit def stringUnmarshaller: FromEntityUnmarshaller[String] = byteStringUnmarshaller mapWithInput { (entity, bytes) ⇒ if (entity.isKnownEmpty) "" - else bytes.decodeString(Unmarshaller.bestUnmarshallingCharsetFor(entity).nioCharset.name) + else bytes.decodeString(Unmarshaller.bestUnmarshallingCharsetFor(entity).nioCharset) } implicit def defaultUrlEncodedFormDataUnmarshaller: FromEntityUnmarshaller[FormData] = @@ -53,4 +53,4 @@ trait PredefinedFromEntityUnmarshallers extends MultipartUnmarshallers { } } -object PredefinedFromEntityUnmarshallers extends PredefinedFromEntityUnmarshallers \ No newline at end of file +object PredefinedFromEntityUnmarshallers extends PredefinedFromEntityUnmarshallers diff --git a/project/MiMa.scala b/project/MiMa.scala index 43bd291a6f..1081eed72a 100644 --- a/project/MiMa.scala +++ b/project/MiMa.scala @@ -904,6 +904,10 @@ object MiMa extends AutoPlugin { // #20543 GraphStage subtypes should not be private to akka ProblemFilters.exclude[DirectAbstractMethodProblem]("akka.stream.ActorMaterializer.actorOf") + ), + "2.4.9" -> Seq( + // #20994 adding new decode method, since we're on JDK7+ now + ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.util.ByteString.decodeString") ) ) } From 210c11e85e51d7d08296cd8eeec60cbf54c6a493 Mon Sep 17 00:00:00 2001 From: Konrad Malawski Date: Thu, 21 Jul 2016 15:48:32 +0200 Subject: [PATCH 035/155] +htp,ben HttpBlueprint benchmark (#21005) --- .../akka/http/HttpBlueprintBenchmark.scala | 144 ++++++++++++++++++ 1 file changed, 144 insertions(+) create mode 100644 akka-bench-jmh/src/main/scala/akka/http/HttpBlueprintBenchmark.scala diff --git a/akka-bench-jmh/src/main/scala/akka/http/HttpBlueprintBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/http/HttpBlueprintBenchmark.scala new file mode 100644 index 0000000000..24f61916c9 --- /dev/null +++ b/akka-bench-jmh/src/main/scala/akka/http/HttpBlueprintBenchmark.scala @@ -0,0 +1,144 @@ +/** + * Copyright (C) 2015-2016 Lightbend Inc. + */ + +package akka.http + +import java.util.concurrent.{ CountDownLatch, TimeUnit } + +import akka.NotUsed +import akka.actor.ActorSystem +import akka.http.impl.util.ByteStringRendering +import akka.http.scaladsl.{ Http, HttpExt } +import akka.http.scaladsl.Http.ServerBinding +import akka.http.scaladsl.model._ +import akka.http.scaladsl.server.Directives._ +import akka.http.scaladsl.unmarshalling._ +import akka.stream._ +import akka.stream.TLSProtocol.{ SslTlsInbound, SslTlsOutbound } +import akka.stream.scaladsl._ +import akka.stream.stage.{ GraphStage, GraphStageLogic } +import akka.util.ByteString +import com.typesafe.config.ConfigFactory +import org.openjdk.jmh.annotations._ +import org.openjdk.jmh.infra.Blackhole + +import scala.concurrent.{ Await, Future } +import scala.concurrent.duration._ +import scala.util.Try + +/* +Baseline: + + [info] Benchmark Mode Cnt Score Error Units + [info] HttpBlueprintBenchmark.run_10000_reqs thrpt 20 197972.659 ± 14512.694 ops/s + */ +@State(Scope.Benchmark) +@OutputTimeUnit(TimeUnit.SECONDS) +@BenchmarkMode(Array(Mode.Throughput)) +class HttpBlueprintBenchmark { + + val config = ConfigFactory.parseString( + """ + akka { + loglevel = "WARNING" + + stream.materializer { + + # default: sync-processing-limit = 1000 + sync-processing-limit = 1000 + + # default: output-burst-limit = 10000 + output-burst-limit = 1000 + + # default: initial-input-buffer-size = 4 + initial-input-buffer-size = 4 + + # default: max-input-buffer-size = 16 + max-input-buffer-size = 16 + + } + + http { + # default: request-timeout = 20s + request-timeout = infinite # disabled + # request-timeout = 20s + } + }""".stripMargin + ).withFallback(ConfigFactory.load()) + + implicit val system: ActorSystem = ActorSystem("HttpBenchmark", config) + + val materializer: ActorMaterializer = ActorMaterializer() + val notFusingMaterializer = ActorMaterializer(materializer.settings.withAutoFusing(false)) + + val request: HttpRequest = HttpRequest() + val requestRendered = ByteString( + "GET / HTTP/1.1\r\n" + + "Accept: */*\r\n" + + "Accept-Encoding: gzip, deflate\r\n" + + "Connection: keep-alive\r\n" + + "Host: example.com\r\n" + + "User-Agent: HTTPie/0.9.3\r\n" + + "\r\n" + ) + + val response: HttpResponse = HttpResponse() + val responseRendered: ByteString = ByteString( + s"HTTP/1.1 200 OK\r\n" + + s"Content-Length: 0\r\n" + + s"\r\n" + ) + + def TCPPlacebo(requests: Int): Flow[ByteString, ByteString, NotUsed] = + Flow.fromSinkAndSource( + Flow[ByteString].takeWhile(it => !(it.utf8String contains "Connection: close")) to Sink.ignore, + Source.repeat(requestRendered).take(requests) + ) + + def layer: BidiFlow[HttpResponse, SslTlsOutbound, SslTlsInbound, HttpRequest, NotUsed] = Http().serverLayer()(materializer) + def server(requests: Int): Flow[HttpResponse, HttpRequest, _] = layer atop TLSPlacebo() join TCPPlacebo(requests) + + val reply = Flow[HttpRequest].map { _ => response } + + @TearDown + def shutdown(): Unit = { + Await.result(system.terminate(), 5.seconds) + } + + val nothingHere: Flow[HttpRequest, HttpResponse, NotUsed] = + Flow.fromSinkAndSource(Sink.cancelled, Source.empty) + + @Benchmark + @OperationsPerInvocation(100) + def run_100_reqs(blackhole: Blackhole) = { + val n = 100 + val latch = new CountDownLatch(n) + + val replyCountdown = reply map { x => + latch.countDown() + blackhole.consume(x) + x + } + server(n).joinMat(replyCountdown)(Keep.right).run()(materializer) + + latch.await() + } + + @Benchmark + @OperationsPerInvocation(100 * 1000) + def run_10000_reqs(blackhole: Blackhole) = { + val n = 100 * 1000 + val latch = new CountDownLatch(n) + + val replyCountdown = reply map { x => + latch.countDown() + blackhole.consume(x) + x + } + server(n).joinMat(replyCountdown)(Keep.right).run()(materializer) + + latch.await() + } + +} From c8dfa2458d0220fd6d837a0e943f800d436f565c Mon Sep 17 00:00:00 2001 From: Alexandre Tamborrino Date: Thu, 21 Jul 2016 17:35:13 +0200 Subject: [PATCH 036/155] Fix snippet example in stream-rate doc #21006 Fix #section-buffer by replacing .withAttributes by .addAttributes to avoid overwriting the .async option. * Update StreamBuffersRateDocTest.java Fix #section-buffer by replacing .withAttributes by .addAttributes to avoid overwriting the .async option. --- .../rst/java/code/docs/stream/StreamBuffersRateDocTest.java | 2 +- .../rst/scala/code/docs/stream/StreamBuffersRateSpec.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-docs/rst/java/code/docs/stream/StreamBuffersRateDocTest.java b/akka-docs/rst/java/code/docs/stream/StreamBuffersRateDocTest.java index a1383e6626..0adc78c863 100644 --- a/akka-docs/rst/java/code/docs/stream/StreamBuffersRateDocTest.java +++ b/akka-docs/rst/java/code/docs/stream/StreamBuffersRateDocTest.java @@ -65,7 +65,7 @@ public class StreamBuffersRateDocTest extends AbstractJavaTest { final Flow flow1 = Flow.of(Integer.class) .map(elem -> elem * 2).async() - .withAttributes(Attributes.inputBuffer(1, 1)); // the buffer size of this map is 1 + .addAttributes(Attributes.inputBuffer(1, 1)); // the buffer size of this map is 1 final Flow flow2 = flow1.via( Flow.of(Integer.class) diff --git a/akka-docs/rst/scala/code/docs/stream/StreamBuffersRateSpec.scala b/akka-docs/rst/scala/code/docs/stream/StreamBuffersRateSpec.scala index dc29301e75..eee8237169 100644 --- a/akka-docs/rst/scala/code/docs/stream/StreamBuffersRateSpec.scala +++ b/akka-docs/rst/scala/code/docs/stream/StreamBuffersRateSpec.scala @@ -30,7 +30,7 @@ class StreamBuffersRateSpec extends AkkaSpec { //#section-buffer val section = Flow[Int].map(_ * 2).async - .withAttributes(Attributes.inputBuffer(initial = 1, max = 1)) // the buffer size of this map is 1 + .addAttributes(Attributes.inputBuffer(initial = 1, max = 1)) // the buffer size of this map is 1 val flow = section.via(Flow[Int].map(_ / 2)).async // the buffer size of this map is the default //#section-buffer } From 14bfc353ba57d73bfcf971311552aa3b4022b59a Mon Sep 17 00:00:00 2001 From: Vadim Semenov Date: Thu, 21 Jul 2016 18:36:48 +0300 Subject: [PATCH 037/155] Fixed scaladoc-typo in FSM.scala --- akka-actor/src/main/scala/akka/actor/FSM.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index bc1d0929cb..6d4d0f11d7 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -224,6 +224,7 @@ object FSM { * Finite State Machine actor trait. Use as follows: * *
+ *   object A {
  *     trait State
  *     case class One extends State
  *     case class Two extends State

From e0d73187bdd2dd8a92518c95f1226f30bc425f3b Mon Sep 17 00:00:00 2001
From: Alexander Golubev 
Date: Fri, 22 Jul 2016 04:03:26 -0400
Subject: [PATCH 038/155] =str 20967 print stream state on test failed (#21003)

---
 .../akka/stream/testkit/StreamSpec.scala      | 43 +++++++++++++++++++
 .../scaladsl/FlowLimitWeightedSpec.scala      |  5 +--
 .../stream/scaladsl/FlowMapAsyncSpec.scala    |  4 +-
 .../scaladsl/FlowMapAsyncUnorderedSpec.scala  |  5 +--
 .../stream/scaladsl/FlowMapConcatSpec.scala   |  3 +-
 .../akka/stream/scaladsl/FlowMapSpec.scala    |  9 ++--
 .../stream/impl/ActorMaterializerImpl.scala   |  4 +-
 7 files changed, 54 insertions(+), 19 deletions(-)
 create mode 100644 akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamSpec.scala

diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamSpec.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamSpec.scala
new file mode 100644
index 0000000000..7ab2d0fd80
--- /dev/null
+++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamSpec.scala
@@ -0,0 +1,43 @@
+/**
+ * Copyright (C) 2015-2016 Lightbend Inc. 
+ */
+package akka.stream.testkit
+
+import akka.actor.{ ActorSystem, ActorRef }
+import akka.stream.impl.StreamSupervisor
+import akka.testkit.{ AkkaSpec, TestProbe }
+import com.typesafe.config.{ ConfigFactory, Config }
+import org.scalatest.Failed
+import scala.concurrent.duration._
+
+class StreamSpec(_system: ActorSystem) extends AkkaSpec(_system) {
+  def this(config: Config) =
+    this(ActorSystem(
+      AkkaSpec.getCallerName(getClass),
+      ConfigFactory.load(config.withFallback(AkkaSpec.testConf))))
+
+  def this(s: String) = this(ConfigFactory.parseString(s))
+
+  def this(configMap: Map[String, _]) = this(AkkaSpec.mapToConfig(configMap))
+
+  def this() = this(ActorSystem(AkkaSpec.getCallerName(getClass), AkkaSpec.testConf))
+
+  override def withFixture(test: NoArgTest) = {
+    super.withFixture(test) match {
+      case failed: Failed ⇒
+        val probe = TestProbe()(system)
+        system.actorSelection("/user/" + StreamSupervisor.baseName + "*").tell(StreamSupervisor.GetChildren, probe.ref)
+        val children: Seq[ActorRef] = probe.receiveWhile(2.seconds) {
+          case StreamSupervisor.Children(children) ⇒ children
+        }.flatten
+        println("--- Stream actors debug dump ---")
+        if (children.isEmpty) println("Stream is completed. No debug information is available")
+        else {
+          println("Stream actors alive: " + children)
+          children.foreach(_ ! StreamSupervisor.PrintDebugDump)
+        }
+        failed
+      case other ⇒ other
+    }
+  }
+}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitWeightedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitWeightedSpec.scala
index 367ff1a217..37e39290c3 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitWeightedSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitWeightedSpec.scala
@@ -3,12 +3,11 @@
  */
 package akka.stream.scaladsl
 
+import akka.stream.testkit.StreamSpec
 import akka.stream.{ StreamLimitReachedException, ActorMaterializer, ActorMaterializerSettings }
-import akka.testkit.AkkaSpec
 import scala.concurrent.Await
-import scala.concurrent.duration._
 
-class FlowLimitWeightedSpec extends AkkaSpec {
+class FlowLimitWeightedSpec extends StreamSpec {
 
   val settings = ActorMaterializerSettings(system)
     .withInputBuffer(initialSize = 2, maxSize = 16)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala
index 2bf6fc708d..1e5dc84e76 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala
@@ -20,10 +20,8 @@ import scala.annotation.tailrec
 import scala.concurrent.Promise
 import java.util.concurrent.atomic.AtomicInteger
 import java.util.concurrent.LinkedBlockingQueue
-import org.scalatest.concurrent.ScalaFutures
-import akka.testkit.AkkaSpec
 
-class FlowMapAsyncSpec extends AkkaSpec {
+class FlowMapAsyncSpec extends StreamSpec {
 
   implicit val materializer = ActorMaterializer()
 
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala
index ef741d59bd..3d1d8955c5 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala
@@ -20,11 +20,8 @@ import java.util.concurrent.atomic.AtomicInteger
 import scala.concurrent.Promise
 import java.util.concurrent.LinkedBlockingQueue
 import scala.annotation.tailrec
-import org.scalatest.concurrent.ScalaFutures
-import org.scalactic.ConversionCheckedTripleEquals
-import akka.testkit.AkkaSpec
 
-class FlowMapAsyncUnorderedSpec extends AkkaSpec {
+class FlowMapAsyncUnorderedSpec extends StreamSpec {
 
   implicit val materializer = ActorMaterializer()
 
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala
index 5a7523a0c2..e05b651132 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala
@@ -8,9 +8,8 @@ import akka.stream.{ Supervision, ActorAttributes, ActorMaterializer, ActorMater
 import akka.stream.testkit.Utils._
 import akka.stream.testkit._
 import scala.util.control.NoStackTrace
-import akka.testkit.AkkaSpec
 
-class FlowMapConcatSpec extends AkkaSpec with ScriptedTest {
+class FlowMapConcatSpec extends StreamSpec with ScriptedTest {
 
   val settings = ActorMaterializerSettings(system)
     .withInputBuffer(initialSize = 2, maxSize = 16)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapSpec.scala
index 0f2ab8f7f3..c9e6e908e3 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapSpec.scala
@@ -4,12 +4,11 @@
 package akka.stream.scaladsl
 
 import java.util.concurrent.ThreadLocalRandom.{ current ⇒ random }
-import akka.stream.ActorMaterializer
-import akka.stream.ActorMaterializerSettings
-import akka.stream.testkit._
-import akka.testkit.AkkaSpec
 
-class FlowMapSpec extends AkkaSpec with ScriptedTest {
+import akka.stream.{ ActorMaterializer, ActorMaterializerSettings }
+import akka.stream.testkit._
+
+class FlowMapSpec extends StreamSpec with ScriptedTest {
 
   val settings = ActorMaterializerSettings(system)
     .withInputBuffer(initialSize = 2, maxSize = 16)
diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala
index 8bcda5503c..8c2b997042 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala
@@ -283,8 +283,8 @@ class FlowNames extends Extension {
 object StreamSupervisor {
   def props(settings: ActorMaterializerSettings, haveShutDown: AtomicBoolean): Props =
     Props(new StreamSupervisor(settings, haveShutDown)).withDeploy(Deploy.local)
-
-  private val actorName = SeqActorName("StreamSupervisor")
+  private[stream] val baseName = "StreamSupervisor"
+  private val actorName = SeqActorName(baseName)
   def nextName(): String = actorName.next()
 
   final case class Materialize(props: Props, name: String)

From 6fb2d176a1f566133d9eb42f531ade8b9d344c86 Mon Sep 17 00:00:00 2001
From: Hawstein 
Date: Fri, 22 Jul 2016 17:33:30 +0800
Subject: [PATCH 039/155] +htp #20881 add toStrictEntity and
 extractStrictEntity directive (#20953)

---
 .../BasicDirectivesExamplesTest.java          | 53 +++++++++++++++++++
 .../basic-directives/extractStrictEntity.rst  | 23 ++++++++
 .../directives/basic-directives/index.rst     |  4 ++
 .../basic-directives/toStrictEntity.rst       | 23 ++++++++
 .../BasicDirectivesExamplesSpec.scala         | 35 ++++++++++++
 .../basic-directives/extractStrictEntity.rst  | 30 +++++++++++
 .../directives/basic-directives/index.rst     |  4 ++
 .../basic-directives/toStrictEntity.rst       | 30 +++++++++++
 .../server/directives/BasicDirectives.scala   | 27 ++++++++++
 .../server/directives/BasicDirectives.scala   | 51 +++++++++++++++++-
 10 files changed, 279 insertions(+), 1 deletion(-)
 create mode 100644 akka-docs/rst/java/http/routing-dsl/directives/basic-directives/extractStrictEntity.rst
 create mode 100644 akka-docs/rst/java/http/routing-dsl/directives/basic-directives/toStrictEntity.rst
 create mode 100644 akka-docs/rst/scala/http/routing-dsl/directives/basic-directives/extractStrictEntity.rst
 create mode 100644 akka-docs/rst/scala/http/routing-dsl/directives/basic-directives/toStrictEntity.rst

diff --git a/akka-docs/rst/java/code/docs/http/javadsl/server/directives/BasicDirectivesExamplesTest.java b/akka-docs/rst/java/code/docs/http/javadsl/server/directives/BasicDirectivesExamplesTest.java
index 21d7fcc471..32790670a2 100644
--- a/akka-docs/rst/java/code/docs/http/javadsl/server/directives/BasicDirectivesExamplesTest.java
+++ b/akka-docs/rst/java/code/docs/http/javadsl/server/directives/BasicDirectivesExamplesTest.java
@@ -3,6 +3,7 @@
  */
 package docs.http.javadsl.server.directives;
 
+import akka.NotUsed;
 import akka.actor.ActorSystem;
 import akka.dispatch.ExecutionContexts;
 import akka.event.Logging;
@@ -31,14 +32,17 @@ import akka.util.ByteString;
 import org.junit.Ignore;
 import org.junit.Test;
 import scala.concurrent.ExecutionContextExecutor;
+import scala.concurrent.duration.FiniteDuration;
 
 import java.nio.file.Paths;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.Iterator;
 import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CompletionStage;
 import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
 import java.util.function.Function;
 import java.util.function.Predicate;
 import java.util.function.Supplier;
@@ -785,4 +789,53 @@ public class BasicDirectivesExamplesTest extends JUnitRouteTest {
     //#extractUnmatchedPath
   }
 
+  @Test
+  public void testExtractStrictEntity() {
+    //#extractStrictEntity
+    final FiniteDuration timeout = FiniteDuration.create(3, TimeUnit.SECONDS);
+    final Route route = extractStrictEntity(timeout, strict ->
+      complete(strict.getData().utf8String())
+    );
+
+    // tests:
+    final Iterator iterator = Arrays.asList(
+      ByteString.fromString("1"),
+      ByteString.fromString("2"),
+      ByteString.fromString("3")).iterator();
+    final Source dataBytes = Source.fromIterator(() -> iterator);
+    testRoute(route).run(
+      HttpRequest.POST("/")
+        .withEntity(HttpEntities.create(ContentTypes.TEXT_PLAIN_UTF8, dataBytes))
+    ).assertEntity("123");
+    //#extractStrictEntity
+  }
+
+  @Test
+  public void testToStrictEntity() {
+    //#toStrictEntity
+    final FiniteDuration timeout = FiniteDuration.create(3, TimeUnit.SECONDS);
+    final Route route = toStrictEntity(timeout, () ->
+      extractRequest(req -> {
+        if (req.entity() instanceof HttpEntity.Strict) {
+          final HttpEntity.Strict strict = (HttpEntity.Strict)req.entity();
+          return complete("Request entity is strict, data=" + strict.getData().utf8String());
+        } else {
+          return complete("Ooops, request entity is not strict!");
+        }
+      })
+    );
+
+    // tests:
+    final Iterator iterator = Arrays.asList(
+      ByteString.fromString("1"),
+      ByteString.fromString("2"),
+      ByteString.fromString("3")).iterator();
+    final Source dataBytes = Source.fromIterator(() -> iterator);
+    testRoute(route).run(
+      HttpRequest.POST("/")
+        .withEntity(HttpEntities.create(ContentTypes.TEXT_PLAIN_UTF8, dataBytes))
+    ).assertEntity("Request entity is strict, data=123");
+    //#toStrictEntity
+  }
+
 }
diff --git a/akka-docs/rst/java/http/routing-dsl/directives/basic-directives/extractStrictEntity.rst b/akka-docs/rst/java/http/routing-dsl/directives/basic-directives/extractStrictEntity.rst
new file mode 100644
index 0000000000..3b2279c1aa
--- /dev/null
+++ b/akka-docs/rst/java/http/routing-dsl/directives/basic-directives/extractStrictEntity.rst
@@ -0,0 +1,23 @@
+.. _-extractStrictEntity-java-:
+
+extractStrictEntity
+===================
+
+Description
+-----------
+
+Extracts the strict http entity as ``HttpEntity.Strict`` from the :class:`RequestContext`.
+
+A timeout parameter is given and if the stream isn't completed after the timeout, the directive will be failed.
+
+.. warning::
+
+  The directive will read the request entity into memory within the size limit(8M by default) and effectively disable streaming.
+  The size limit can be configured globally with ``akka.http.parsing.max-content-length`` or
+  overridden by wrapping with :ref:`-withSizeLimit-java-` or :ref:`-withoutSizeLimit-java-` directive.
+
+
+Example
+-------
+
+.. includecode:: ../../../../code/docs/http/javadsl/server/directives/BasicDirectivesExamplesTest.java#extractStrictEntity
diff --git a/akka-docs/rst/java/http/routing-dsl/directives/basic-directives/index.rst b/akka-docs/rst/java/http/routing-dsl/directives/basic-directives/index.rst
index 39238f6d99..a212015c69 100644
--- a/akka-docs/rst/java/http/routing-dsl/directives/basic-directives/index.rst
+++ b/akka-docs/rst/java/http/routing-dsl/directives/basic-directives/index.rst
@@ -19,6 +19,7 @@ a single value or a tuple of values.
   * :ref:`-extract-java-`
   * :ref:`-extractExecutionContext-java-`
   * :ref:`-extractMaterializer-java-`
+  * :ref:`-extractStrictEntity-java-`
   * :ref:`-extractLog-java-`
   * :ref:`-extractRequest-java-`
   * :ref:`-extractRequestContext-java-`
@@ -41,6 +42,7 @@ Transforming the Request(Context)
   * :ref:`-withMaterializer-java-`
   * :ref:`-withLog-java-`
   * :ref:`-withSettings-java-`
+  * :ref:`-toStrictEntity-java-`
 
 
 .. _Response Transforming Directives-java:
@@ -93,6 +95,7 @@ Alphabetically
    extract
    extractExecutionContext
    extractMaterializer
+   extractStrictEntity
    extractLog
    extractRequest
    extractRequestContext
@@ -117,6 +120,7 @@ Alphabetically
    provide
    recoverRejections
    recoverRejectionsWith
+   toStrictEntity
    withExecutionContext
    withMaterializer
    withLog
diff --git a/akka-docs/rst/java/http/routing-dsl/directives/basic-directives/toStrictEntity.rst b/akka-docs/rst/java/http/routing-dsl/directives/basic-directives/toStrictEntity.rst
new file mode 100644
index 0000000000..a1950e6b42
--- /dev/null
+++ b/akka-docs/rst/java/http/routing-dsl/directives/basic-directives/toStrictEntity.rst
@@ -0,0 +1,23 @@
+.. _-toStrictEntity-java-:
+
+toStrictEntity
+==============
+
+Description
+-----------
+
+Transforms the request entity to strict entity before it is handled by the inner route.
+
+A timeout parameter is given and if the stream isn't completed after the timeout, the directive will be failed.
+
+.. warning::
+
+  The directive will read the request entity into memory within the size limit(8M by default) and effectively disable streaming.
+  The size limit can be configured globally with ``akka.http.parsing.max-content-length`` or
+  overridden by wrapping with :ref:`-withSizeLimit-java-` or :ref:`-withoutSizeLimit-java-` directive.
+
+
+Example
+-------
+
+.. includecode:: ../../../../code/docs/http/javadsl/server/directives/BasicDirectivesExamplesTest.java#toStrictEntity
diff --git a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/BasicDirectivesExamplesSpec.scala b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/BasicDirectivesExamplesSpec.scala
index 0b5ff6247e..39d3969998 100644
--- a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/BasicDirectivesExamplesSpec.scala
+++ b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/BasicDirectivesExamplesSpec.scala
@@ -826,5 +826,40 @@ class BasicDirectivesExamplesSpec extends RoutingSpec {
     }
     //#
   }
+  "extractStrictEntity-example" in {
+    //#extractStrictEntity-example
+    import scala.concurrent.duration._
+    val route = extractStrictEntity(3.seconds) { entity =>
+      complete(entity.data.utf8String)
+    }
+
+    // tests:
+    val dataBytes = Source.fromIterator(() ⇒ Iterator.range(1, 10).map(x ⇒ ByteString(x.toString)))
+    Post("/", HttpEntity(ContentTypes.`text/plain(UTF-8)`, data = dataBytes)) ~> route ~> check {
+      responseAs[String] shouldEqual "123456789"
+    }
+    //#
+  }
+  "toStrictEntity-example" in {
+    //#toStrictEntity-example
+    import scala.concurrent.duration._
+    val route = toStrictEntity(3.seconds) {
+      extractRequest { req =>
+        req.entity match {
+          case strict: HttpEntity.Strict =>
+            complete(s"Request entity is strict, data=${strict.data.utf8String}")
+          case _ =>
+            complete("Ooops, request entity is not strict!")
+        }
+      }
+    }
+
+    // tests:
+    val dataBytes = Source.fromIterator(() ⇒ Iterator.range(1, 10).map(x ⇒ ByteString(x.toString)))
+    Post("/", HttpEntity(ContentTypes.`text/plain(UTF-8)`, data = dataBytes)) ~> route ~> check {
+      responseAs[String] shouldEqual "Request entity is strict, data=123456789"
+    }
+    //#
+  }
 
 }
diff --git a/akka-docs/rst/scala/http/routing-dsl/directives/basic-directives/extractStrictEntity.rst b/akka-docs/rst/scala/http/routing-dsl/directives/basic-directives/extractStrictEntity.rst
new file mode 100644
index 0000000000..29ca0aa174
--- /dev/null
+++ b/akka-docs/rst/scala/http/routing-dsl/directives/basic-directives/extractStrictEntity.rst
@@ -0,0 +1,30 @@
+.. _-extractStrictEntity-:
+
+extractStrictEntity
+===================
+
+Signature
+---------
+
+.. includecode2:: /../../akka-http/src/main/scala/akka/http/scaladsl/server/directives/BasicDirectives.scala
+   :snippet: extractStrictEntity
+
+Description
+-----------
+
+Extracts the strict http entity as ``HttpEntity.Strict`` from the :class:`RequestContext`.
+
+A timeout parameter is given and if the stream isn't completed after the timeout, the directive will be failed.
+
+.. warning::
+
+  The directive will read the request entity into memory within the size limit(8M by default) and effectively disable streaming.
+  The size limit can be configured globally with ``akka.http.parsing.max-content-length`` or
+  overridden by wrapping with :ref:`-withSizeLimit-` or :ref:`-withoutSizeLimit-` directive.
+
+
+Example
+-------
+
+.. includecode2:: ../../../../code/docs/http/scaladsl/server/directives/BasicDirectivesExamplesSpec.scala
+   :snippet: extractStrictEntity-example
diff --git a/akka-docs/rst/scala/http/routing-dsl/directives/basic-directives/index.rst b/akka-docs/rst/scala/http/routing-dsl/directives/basic-directives/index.rst
index 709f7d7b29..012bc536ca 100644
--- a/akka-docs/rst/scala/http/routing-dsl/directives/basic-directives/index.rst
+++ b/akka-docs/rst/scala/http/routing-dsl/directives/basic-directives/index.rst
@@ -20,6 +20,7 @@ a single value or a tuple of values.
   * :ref:`-extractDataBytes-`
   * :ref:`-extractExecutionContext-`
   * :ref:`-extractMaterializer-`
+  * :ref:`-extractStrictEntity-`
   * :ref:`-extractLog-`
   * :ref:`-extractRequest-`
   * :ref:`-extractRequestContext-`
@@ -45,6 +46,7 @@ Transforming the Request(Context)
   * :ref:`-withMaterializer-`
   * :ref:`-withLog-`
   * :ref:`-withSettings-`
+  * :ref:`-toStrictEntity-`
 
 
 .. _Response Transforming Directives:
@@ -98,6 +100,7 @@ Alphabetically
    extractExecutionContext
    extractDataBytes
    extractMaterializer
+   extractStrictEntity
    extractLog
    extractRequest
    extractRequestContext
@@ -124,6 +127,7 @@ Alphabetically
    recoverRejections
    recoverRejectionsWith
    textract
+   toStrictEntity
    tprovide
    withExecutionContext
    withMaterializer
diff --git a/akka-docs/rst/scala/http/routing-dsl/directives/basic-directives/toStrictEntity.rst b/akka-docs/rst/scala/http/routing-dsl/directives/basic-directives/toStrictEntity.rst
new file mode 100644
index 0000000000..f45a9ac048
--- /dev/null
+++ b/akka-docs/rst/scala/http/routing-dsl/directives/basic-directives/toStrictEntity.rst
@@ -0,0 +1,30 @@
+.. _-toStrictEntity-:
+
+toStrictEntity
+==============
+
+Signature
+---------
+
+.. includecode2:: /../../akka-http/src/main/scala/akka/http/scaladsl/server/directives/BasicDirectives.scala
+   :snippet: toStrictEntity
+
+Description
+-----------
+
+Transforms the request entity to strict entity before it is handled by the inner route.
+
+A timeout parameter is given and if the stream isn't completed after the timeout, the directive will be failed.
+
+.. warning::
+
+  The directive will read the request entity into memory within the size limit(8M by default) and effectively disable streaming.
+  The size limit can be configured globally with ``akka.http.parsing.max-content-length`` or
+  overridden by wrapping with :ref:`-withSizeLimit-` or :ref:`-withoutSizeLimit-` directive.
+
+
+Example
+-------
+
+.. includecode2:: ../../../../code/docs/http/scaladsl/server/directives/BasicDirectivesExamplesSpec.scala
+   :snippet: toStrictEntity-example
diff --git a/akka-http/src/main/scala/akka/http/javadsl/server/directives/BasicDirectives.scala b/akka-http/src/main/scala/akka/http/javadsl/server/directives/BasicDirectives.scala
index 886223d662..f87a9b55f3 100644
--- a/akka-http/src/main/scala/akka/http/javadsl/server/directives/BasicDirectives.scala
+++ b/akka-http/src/main/scala/akka/http/javadsl/server/directives/BasicDirectives.scala
@@ -16,6 +16,7 @@ import akka.util.ByteString
 import scala.concurrent.ExecutionContextExecutor
 import akka.http.impl.model.JavaUri
 import akka.http.javadsl.model.HttpRequest
+import akka.http.javadsl.model.HttpEntity
 import akka.http.javadsl.model.RequestEntity
 import akka.http.javadsl.model.Uri
 import akka.http.javadsl.server._
@@ -38,6 +39,7 @@ import akka.event.LoggingAdapter
 import akka.http.javadsl.server
 
 import scala.compat.java8.FutureConverters._
+import scala.concurrent.duration.FiniteDuration
 
 abstract class BasicDirectives {
   import akka.http.impl.util.JavaMapping.Implicits._
@@ -283,4 +285,29 @@ abstract class BasicDirectives {
    */
   def extractRequestEntity(inner: JFunction[RequestEntity, Route]): Route = extractEntity(inner)
 
+  /**
+   * WARNING: This will read the entire request entity into memory regardless of size and effectively disable streaming.
+   *
+   * Converts the HttpEntity from the [[akka.http.javadsl.server.RequestContext]] into an
+   * [[akka.http.javadsl.model.HttpEntity.Strict]] and extracts it, or fails the route if unable to drain the
+   * entire request body within the timeout.
+   *
+   * @param timeout The directive is failed if the stream isn't completed after the given timeout.
+   */
+  def extractStrictEntity(timeout: FiniteDuration, inner: JFunction[HttpEntity.Strict, Route]): Route = RouteAdapter {
+    D.extractStrictEntity(timeout) { strict ⇒ inner.apply(strict).delegate }
+  }
+
+  /**
+   * WARNING: This will read the entire request entity into memory regardless of size and effectively disable streaming.
+   *
+   * Extracts the [[akka.http.javadsl.server.RequestContext]] itself with the strict HTTP entity,
+   * or fails the route if unable to drain the entire request body within the timeout.
+   *
+   * @param timeout The directive is failed if the stream isn't completed after the given timeout.
+   */
+  def toStrictEntity(timeout: FiniteDuration, inner: Supplier[Route]): Route = RouteAdapter {
+    D.toStrictEntity(timeout) { inner.get.delegate }
+  }
+
 }
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/BasicDirectives.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/BasicDirectives.scala
index 1b5d4fcecc..0b0fd6a99c 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/BasicDirectives.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/BasicDirectives.scala
@@ -8,9 +8,11 @@ package directives
 import akka.stream.scaladsl.Source
 import akka.util.ByteString
 
+import scala.concurrent.duration.FiniteDuration
 import scala.concurrent.{ Future, ExecutionContextExecutor }
 import scala.collection.immutable
 import akka.event.LoggingAdapter
+import akka.stream.impl.ConstantFun.scalaIdentityFunction
 import akka.stream.Materializer
 import akka.http.scaladsl.settings.{ RoutingSettings, ParserSettings }
 import akka.http.scaladsl.server.util.Tuple
@@ -18,6 +20,8 @@ import akka.http.scaladsl.util.FastFuture
 import akka.http.scaladsl.model._
 import akka.http.scaladsl.util.FastFuture._
 
+import scala.util.{ Failure, Success }
+
 /**
  * @groupname basic Basic directives
  * @groupprio basic 10
@@ -301,6 +305,51 @@ trait BasicDirectives {
    * @group basic
    */
   def extractDataBytes: Directive1[Source[ByteString, Any]] = BasicDirectives._extractDataBytes
+
+  /**
+   * WARNING: This will read the entire request entity into memory regardless of size and effectively disable streaming.
+   *
+   * Converts the HttpEntity from the [[akka.http.scaladsl.server.RequestContext]] into an
+   * [[akka.http.scaladsl.model.HttpEntity.Strict]] and extracts it, or fails the route if unable to drain the
+   * entire request body within the timeout.
+   *
+   * @param timeout The directive is failed if the stream isn't completed after the given timeout.
+   * @group basic
+   */
+  def extractStrictEntity(timeout: FiniteDuration): Directive1[HttpEntity.Strict] =
+    extract { ctx ⇒
+      import ctx.materializer
+
+      ctx.request.entity.toStrict(timeout)
+
+    }.flatMap { entity ⇒
+      import FutureDirectives._
+
+      onComplete(entity).flatMap {
+        case Success(x) ⇒ provide(x)
+        case Failure(t) ⇒ StandardRoute(_.fail(t))
+      }
+    }
+
+  /**
+   * WARNING: This will read the entire request entity into memory regardless of size and effectively disable streaming.
+   *
+   * Extracts the [[akka.http.scaladsl.server.RequestContext]] itself with the strict HTTP entity,
+   * or fails the route if unable to drain the entire request body within the timeout.
+   *
+   * @param timeout The directive is failed if the stream isn't completed after the given timeout.
+   * @group basic
+   */
+  def toStrictEntity(timeout: FiniteDuration): Directive0 =
+    Directive { inner ⇒ ctx ⇒
+      import ctx.{ executionContext, materializer }
+
+      ctx.request.entity.toStrict(timeout).flatMap { strictEntity ⇒
+        val newCtx = ctx.mapRequest(_.copy(entity = strictEntity))
+        inner(())(newCtx)
+      }
+    }
+
 }
 
 object BasicDirectives extends BasicDirectives {
@@ -312,7 +361,7 @@ object BasicDirectives extends BasicDirectives {
   private val _extractLog: Directive1[LoggingAdapter] = extract(_.log)
   private val _extractSettings: Directive1[RoutingSettings] = extract(_.settings)
   private val _extractParserSettings: Directive1[ParserSettings] = extract(_.parserSettings)
-  private val _extractRequestContext: Directive1[RequestContext] = extract(conforms)
+  private val _extractRequestContext: Directive1[RequestContext] = extract(scalaIdentityFunction)
   private val _extractRequestEntity: Directive1[RequestEntity] = extract(_.request.entity)
   private val _extractDataBytes: Directive1[Source[ByteString, Any]] = extract(_.request.entity.dataBytes)
 }

From 27efafecfc17879bd1db71d78a9aabe818019c91 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Johan=20Andr=C3=A9n?= 
Date: Fri, 22 Jul 2016 13:12:33 +0200
Subject: [PATCH 040/155] =htp #21009 Correct type of response in logging
 directive samples  (#21010)

* Correct type of response in logging directive samples #21009

* Updates to signatures in the docs as well
---
 .../DebuggingDirectivesExamplesTest.java      |  8 +++---
 .../DebuggingDirectivesExamplesSpec.scala     | 25 ++++++++++---------
 .../debugging-directives/logRequestResult.rst |  4 +--
 .../debugging-directives/logResult.rst        |  8 +++---
 4 files changed, 22 insertions(+), 23 deletions(-)

diff --git a/akka-docs/rst/java/code/docs/http/javadsl/server/directives/DebuggingDirectivesExamplesTest.java b/akka-docs/rst/java/code/docs/http/javadsl/server/directives/DebuggingDirectivesExamplesTest.java
index dee1e78d3d..3910d1f8fe 100644
--- a/akka-docs/rst/java/code/docs/http/javadsl/server/directives/DebuggingDirectivesExamplesTest.java
+++ b/akka-docs/rst/java/code/docs/http/javadsl/server/directives/DebuggingDirectivesExamplesTest.java
@@ -48,7 +48,7 @@ public class DebuggingDirectivesExamplesTest extends JUnitRouteTest {
 
     // logs just the request method at info level
     Function requestMethodAsInfo = (request) -> 
-      LogEntry.create(request.method().toString(), InfoLevel());
+      LogEntry.create(request.method().name(), InfoLevel());
 
     final Route routeUsingFunction = get(() -> 
       logRequest(requestMethodAsInfo, () -> complete("logged")));
@@ -70,7 +70,7 @@ public class DebuggingDirectivesExamplesTest extends JUnitRouteTest {
         (response.status().isSuccess())  ? 
             Optional.of(
               LogEntry.create(
-                request.method().toString() + ":" + response.status().intValue(), 
+                request.method().name() + ":" + response.status().intValue(),
                 InfoLevel()))
           : Optional.empty(); // not a successful response
 
@@ -80,7 +80,7 @@ public class DebuggingDirectivesExamplesTest extends JUnitRouteTest {
         (!rejections.isEmpty())  ? 
           Optional.of(
             LogEntry.create(
-                rejections
+              rejections
                 .stream()
                 .map(Rejection::toString)
                 .collect(Collectors.joining(", ")), 
@@ -116,7 +116,7 @@ public class DebuggingDirectivesExamplesTest extends JUnitRouteTest {
       LogEntry.create(
         rejections
         .stream()
-        .map(rejection->rejection.toString())
+        .map(rejection -> rejection.toString())
         .collect(Collectors.joining(", ")), 
       InfoLevel());
 
diff --git a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/DebuggingDirectivesExamplesSpec.scala b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/DebuggingDirectivesExamplesSpec.scala
index 4ff330a9eb..37239609a5 100644
--- a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/DebuggingDirectivesExamplesSpec.scala
+++ b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/DebuggingDirectivesExamplesSpec.scala
@@ -6,6 +6,7 @@ package docs.http.scaladsl.server.directives
 
 import akka.event.Logging
 import akka.http.scaladsl.model.{ HttpRequest, HttpResponse }
+import akka.http.scaladsl.server.RouteResult
 import akka.http.scaladsl.server.directives.{ DebuggingDirectives, LogEntry, LoggingMagnet }
 import docs.http.scaladsl.server.RoutingSpec
 
@@ -21,15 +22,15 @@ class DebuggingDirectivesExamplesSpec extends RoutingSpec {
     DebuggingDirectives.logRequest(("get-user", Logging.InfoLevel))
 
     // logs just the request method at debug level
-    def requestMethod(req: HttpRequest): String = req.method.toString
+    def requestMethod(req: HttpRequest): String = req.method.name
     DebuggingDirectives.logRequest(requestMethod _)
 
     // logs just the request method at info level
-    def requestMethodAsInfo(req: HttpRequest): LogEntry = LogEntry(req.method.toString, Logging.InfoLevel)
+    def requestMethodAsInfo(req: HttpRequest): LogEntry = LogEntry(req.method.name, Logging.InfoLevel)
     DebuggingDirectives.logRequest(requestMethodAsInfo _)
 
     // This one doesn't use the implicit LoggingContext but uses `println` for logging
-    def printRequestMethod(req: HttpRequest): Unit = println(req.method)
+    def printRequestMethod(req: HttpRequest): Unit = println(req.method.name)
     val logRequestPrintln = DebuggingDirectives.logRequest(LoggingMagnet(_ => printRequestMethod))
 
     // tests:
@@ -48,14 +49,14 @@ class DebuggingDirectivesExamplesSpec extends RoutingSpec {
     DebuggingDirectives.logRequestResult(("get-user", Logging.InfoLevel))
 
     // logs just the request method and response status at info level
-    def requestMethodAndResponseStatusAsInfo(req: HttpRequest): Any => Option[LogEntry] = {
-      case res: HttpResponse => Some(LogEntry(req.method + ":" + res.status, Logging.InfoLevel))
-      case _                 => None // other kind of responses
+    def requestMethodAndResponseStatusAsInfo(req: HttpRequest): RouteResult => Option[LogEntry] = {
+      case RouteResult.Complete(res) => Some(LogEntry(req.method.name + ": " + res.status, Logging.InfoLevel))
+      case _                         => None // no log entries for rejections
     }
     DebuggingDirectives.logRequestResult(requestMethodAndResponseStatusAsInfo _)
 
     // This one doesn't use the implicit LoggingContext but uses `println` for logging
-    def printRequestMethodAndResponseStatus(req: HttpRequest)(res: Any): Unit =
+    def printRequestMethodAndResponseStatus(req: HttpRequest)(res: RouteResult): Unit =
       println(requestMethodAndResponseStatusAsInfo(req)(res).map(_.obj.toString).getOrElse(""))
     val logRequestResultPrintln = DebuggingDirectives.logRequestResult(LoggingMagnet(_ => printRequestMethodAndResponseStatus))
 
@@ -75,18 +76,18 @@ class DebuggingDirectivesExamplesSpec extends RoutingSpec {
     DebuggingDirectives.logResult(("get-user", Logging.InfoLevel))
 
     // logs just the response status at debug level
-    def responseStatus(res: Any): String = res match {
-      case x: HttpResponse => x.status.toString
-      case _               => "unknown response part"
+    def responseStatus(res: RouteResult): String = res match {
+      case RouteResult.Complete(x)          => x.status.toString
+      case RouteResult.Rejected(rejections) => "Rejected: " + rejections.mkString(", ")
     }
     DebuggingDirectives.logResult(responseStatus _)
 
     // logs just the response status at info level
-    def responseStatusAsInfo(res: Any): LogEntry = LogEntry(responseStatus(res), Logging.InfoLevel)
+    def responseStatusAsInfo(res: RouteResult): LogEntry = LogEntry(responseStatus(res), Logging.InfoLevel)
     DebuggingDirectives.logResult(responseStatusAsInfo _)
 
     // This one doesn't use the implicit LoggingContext but uses `println` for logging
-    def printResponseStatus(res: Any): Unit = println(responseStatus(res))
+    def printResponseStatus(res: RouteResult): Unit = println(responseStatus(res))
     val logResultPrintln = DebuggingDirectives.logResult(LoggingMagnet(_ => printResponseStatus))
 
     // tests:
diff --git a/akka-docs/rst/scala/http/routing-dsl/directives/debugging-directives/logRequestResult.rst b/akka-docs/rst/scala/http/routing-dsl/directives/debugging-directives/logRequestResult.rst
index 801bd8fc65..23c2f42e04 100644
--- a/akka-docs/rst/scala/http/routing-dsl/directives/debugging-directives/logRequestResult.rst
+++ b/akka-docs/rst/scala/http/routing-dsl/directives/debugging-directives/logRequestResult.rst
@@ -10,9 +10,7 @@ Signature
 
     def logRequestResult(marker: String)(implicit log: LoggingContext): Directive0
     def logRequestResult(marker: String, level: LogLevel)(implicit log: LoggingContext): Directive0
-    def logRequestResult(show: HttpRequest => HttpResponsePart => Option[LogEntry])
-                          (implicit log: LoggingContext): Directive0
-    def logRequestResult(show: HttpRequest => Any => Option[LogEntry])(implicit log: LoggingContext): Directive0
+    def logRequestResult(show: HttpRequest => RouteResult => Option[LogEntry])(implicit log: LoggingContext): Directive0
 
 The signature shown is simplified, the real signature uses magnets. [1]_
 
diff --git a/akka-docs/rst/scala/http/routing-dsl/directives/debugging-directives/logResult.rst b/akka-docs/rst/scala/http/routing-dsl/directives/debugging-directives/logResult.rst
index bdbbe4e2de..17403bb018 100644
--- a/akka-docs/rst/scala/http/routing-dsl/directives/debugging-directives/logResult.rst
+++ b/akka-docs/rst/scala/http/routing-dsl/directives/debugging-directives/logResult.rst
@@ -10,9 +10,9 @@ Signature
 
     def logResult(marker: String)(implicit log: LoggingContext): Directive0
     def logResult(marker: String, level: LogLevel)(implicit log: LoggingContext): Directive0
-    def logResult(show: Any => String)(implicit log: LoggingContext): Directive0
-    def logResult(show: Any => LogEntry)(implicit log: LoggingContext): Directive0
-    def logResult(magnet: LoggingMagnet[Any => Unit])(implicit log: LoggingContext): Directive0
+    def logResult(show: RouteResult => String)(implicit log: LoggingContext): Directive0
+    def logResult(show: RouteResult => LogEntry)(implicit log: LoggingContext): Directive0
+    def logResult(magnet: LoggingMagnet[RouteResult => Unit])(implicit log: LoggingContext): Directive0
 
 The signature shown is simplified, the real signature uses magnets. [1]_
 
@@ -24,7 +24,7 @@ Description
 Logs the response.
 
 See :ref:`-logRequest-` for the general description how these directives work. This directive is different
-as it requires a ``LoggingMagnet[Any => Unit]``. Instead of just logging ``HttpResponses``, ``logResult`` is able to
+as it requires a ``LoggingMagnet[RouteResult => Unit]``. Instead of just logging ``HttpResponses``, ``logResult`` is able to
 log any :ref:`RouteResult` coming back from the inner route.
 
 Use ``logRequest`` for logging the request, or ``logRequestResult`` for logging both.

From 9dc474a10ac1887c9d280904974e91f4f9208299 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Johan=20Andr=C3=A9n?= 
Date: Fri, 22 Jul 2016 14:07:41 +0200
Subject: [PATCH 041/155] Pre-fuse http server layer (#20990)

* Ported the first pre-fuse part endre did in pr #1972

* Allow the same HttpServerBluePrint to materialize multiple times

HttpRequestParser now behave like a proper GraphStage (with regards to materialization)
HttpResponseParser is kept "weird" to limit scope of commit.

* TestClient method to dump with http client and curl in parallel for comparison

* Cleanup

* tightening down what can be overriden
* tightening down access modifiers
* updates according to review

* Better defaults for the test server

* Ups. Don't listen to public interfaces in test server by default.
---
 .../test/scala/akka/util/ByteStringSpec.scala |   2 +-
 .../engine/parsing/HttpMessageParser.scala    | 155 ++++-----
 .../engine/parsing/HttpRequestParser.scala    | 297 ++++++++++--------
 .../engine/parsing/HttpResponseParser.scala   |  80 ++++-
 .../engine/server/HttpServerBluePrint.scala   |   6 +-
 .../main/scala/akka/http/scaladsl/Http.scala  |  86 +++--
 .../engine/parsing/RequestParserSpec.scala    |   4 +-
 .../scala/akka/http/scaladsl/TestClient.scala |  57 +++-
 .../scala/akka/http/scaladsl/TestServer.scala |  11 +-
 9 files changed, 423 insertions(+), 275 deletions(-)

diff --git a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala
index 0a2894ccc0..b87de033f2 100644
--- a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala
@@ -414,7 +414,7 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers {
       def excerciseRecombining(xs: ByteString, from: Int, until: Int) = {
         val (tmp, c) = xs.splitAt(until)
         val (a, b) = tmp.splitAt(from)
-        (a ++ b ++ c) should ===(xs) 
+        (a ++ b ++ c) should ===(xs)
       }
       "recombining - edge cases" in {
         excerciseRecombining(ByteStrings(Vector(ByteString1(Array[Byte](1)), ByteString1(Array[Byte](2)))), -2147483648, 112121212)
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpMessageParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpMessageParser.scala
index 46c997d238..bb6cd112ef 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpMessageParser.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpMessageParser.scala
@@ -23,52 +23,35 @@ import akka.stream.{ Attributes, FlowShape, Inlet, Outlet }
 
 /**
  * INTERNAL API
+ *
+ * Common logic for http request and response message parsing
  */
-private[http] abstract class HttpMessageParser[Output >: MessageOutput <: ParserOutput](
-  val settings:     ParserSettings,
-  val headerParser: HttpHeaderParser) { self ⇒
-  import HttpMessageParser._
-  import settings._
+private[http] trait HttpMessageParser[Output >: MessageOutput <: ParserOutput] {
 
-  private[this] val result = new ListBuffer[Output]
+  import HttpMessageParser._
+
+  protected final val result = new ListBuffer[Output]
   private[this] var state: ByteString ⇒ StateResult = startNewMessage(_, 0)
   private[this] var protocol: HttpProtocol = `HTTP/1.1`
-  private[this] var completionHandling: CompletionHandling = CompletionOk
-  private[this] var terminated = false
+  protected var completionHandling: CompletionHandling = CompletionOk
+  protected var terminated = false
 
   private[this] var lastSession: SSLSession = null // used to prevent having to recreate header on each message
   private[this] var tlsSessionInfoHeader: `Tls-Session-Info` = null
-  def initialHeaderBuffer: ListBuffer[HttpHeader] =
+
+  protected def settings: ParserSettings
+  protected def headerParser: HttpHeaderParser
+  /** invoked if the specified protocol is unknown */
+  protected def onBadProtocol(): Nothing
+  protected def parseMessage(input: ByteString, offset: Int): HttpMessageParser.StateResult
+  protected def parseEntity(headers: List[HttpHeader], protocol: HttpProtocol, input: ByteString, bodyStart: Int,
+                            clh: Option[`Content-Length`], cth: Option[`Content-Type`], teh: Option[`Transfer-Encoding`],
+                            expect100continue: Boolean, hostHeaderPresent: Boolean, closeAfterResponseCompletion: Boolean): HttpMessageParser.StateResult
+
+  protected final def initialHeaderBuffer: ListBuffer[HttpHeader] =
     if (settings.includeTlsSessionInfoHeader && tlsSessionInfoHeader != null) ListBuffer(tlsSessionInfoHeader)
     else ListBuffer()
 
-  // Note that this GraphStage mutates the HttpMessageParser instance, use with caution.
-  val stage = new GraphStage[FlowShape[SessionBytes, Output]] {
-    val in: Inlet[SessionBytes] = Inlet("HttpMessageParser.in")
-    val out: Outlet[Output] = Outlet("HttpMessageParser.out")
-    override val shape: FlowShape[SessionBytes, Output] = FlowShape(in, out)
-
-    override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
-      new GraphStageLogic(shape) with InHandler with OutHandler {
-        override def onPush(): Unit = handleParserOutput(self.parseSessionBytes(grab(in)))
-        override def onPull(): Unit = handleParserOutput(self.onPull())
-
-        override def onUpstreamFinish(): Unit =
-          if (self.onUpstreamFinish()) completeStage()
-          else if (isAvailable(out)) handleParserOutput(self.onPull())
-
-        private def handleParserOutput(output: Output): Unit = {
-          output match {
-            case StreamEnd    ⇒ completeStage()
-            case NeedMoreData ⇒ pull(in)
-            case x            ⇒ push(out, x)
-          }
-        }
-
-        setHandlers(in, out, this)
-      }
-  }
-
   final def parseSessionBytes(input: SessionBytes): Output = {
     if (input.session ne lastSession) {
       lastSession = input.session
@@ -93,17 +76,17 @@ private[http] abstract class HttpMessageParser[Output >: MessageOutput <: Parser
 
     if (result.nonEmpty) throw new IllegalStateException("Unexpected `onPush`")
     run(state)
-    onPull()
+    doPull()
   }
 
-  final def onPull(): Output =
+  protected final def doPull(): Output =
     if (result.nonEmpty) {
       val head = result.head
       result.remove(0) // faster than `ListBuffer::drop`
       head
     } else if (terminated) StreamEnd else NeedMoreData
 
-  final def onUpstreamFinish(): Boolean = {
+  protected final def shouldComplete(): Boolean = {
     completionHandling() match {
       case Some(x) ⇒ emit(x)
       case None    ⇒ // nothing to do
@@ -118,28 +101,24 @@ private[http] abstract class HttpMessageParser[Output >: MessageOutput <: Parser
     catch { case NotEnoughDataException ⇒ continue(input, offset)(startNewMessage) }
   }
 
-  protected def parseMessage(input: ByteString, offset: Int): StateResult
-
-  def parseProtocol(input: ByteString, cursor: Int): Int = {
+  protected final def parseProtocol(input: ByteString, cursor: Int): Int = {
     def c(ix: Int) = byteChar(input, cursor + ix)
     if (c(0) == 'H' && c(1) == 'T' && c(2) == 'T' && c(3) == 'P' && c(4) == '/' && c(5) == '1' && c(6) == '.') {
       protocol = c(7) match {
         case '0' ⇒ `HTTP/1.0`
         case '1' ⇒ `HTTP/1.1`
-        case _   ⇒ badProtocol
+        case _   ⇒ onBadProtocol
       }
       cursor + 8
-    } else badProtocol
+    } else onBadProtocol
   }
 
-  def badProtocol: Nothing
-
-  @tailrec final def parseHeaderLines(input: ByteString, lineStart: Int, headers: ListBuffer[HttpHeader] = initialHeaderBuffer,
-                                      headerCount: Int = 0, ch: Option[Connection] = None,
-                                      clh: Option[`Content-Length`] = None, cth: Option[`Content-Type`] = None,
-                                      teh: Option[`Transfer-Encoding`] = None, e100c: Boolean = false,
-                                      hh: Boolean = false): StateResult =
-    if (headerCount < maxHeaderCount) {
+  @tailrec protected final def parseHeaderLines(input: ByteString, lineStart: Int, headers: ListBuffer[HttpHeader] = initialHeaderBuffer,
+                                                headerCount: Int = 0, ch: Option[Connection] = None,
+                                                clh: Option[`Content-Length`] = None, cth: Option[`Content-Type`] = None,
+                                                teh: Option[`Transfer-Encoding`] = None, e100c: Boolean = false,
+                                                hh: Boolean = false): StateResult =
+    if (headerCount < settings.maxHeaderCount) {
       var lineEnd = 0
       val resultHeader =
         try {
@@ -182,19 +161,15 @@ private[http] abstract class HttpMessageParser[Output >: MessageOutput <: Parser
 
         case h         ⇒ parseHeaderLines(input, lineEnd, headers += h, headerCount + 1, ch, clh, cth, teh, e100c, hh)
       }
-    } else failMessageStart(s"HTTP message contains more than the configured limit of $maxHeaderCount headers")
+    } else failMessageStart(s"HTTP message contains more than the configured limit of ${settings.maxHeaderCount} headers")
 
   // work-around for compiler complaining about non-tail-recursion if we inline this method
-  def parseHeaderLinesAux(headers: ListBuffer[HttpHeader], headerCount: Int, ch: Option[Connection],
-                          clh: Option[`Content-Length`], cth: Option[`Content-Type`], teh: Option[`Transfer-Encoding`],
-                          e100c: Boolean, hh: Boolean)(input: ByteString, lineStart: Int): StateResult =
+  private def parseHeaderLinesAux(headers: ListBuffer[HttpHeader], headerCount: Int, ch: Option[Connection],
+                                  clh: Option[`Content-Length`], cth: Option[`Content-Type`], teh: Option[`Transfer-Encoding`],
+                                  e100c: Boolean, hh: Boolean)(input: ByteString, lineStart: Int): StateResult =
     parseHeaderLines(input, lineStart, headers, headerCount, ch, clh, cth, teh, e100c, hh)
 
-  def parseEntity(headers: List[HttpHeader], protocol: HttpProtocol, input: ByteString, bodyStart: Int,
-                  clh: Option[`Content-Length`], cth: Option[`Content-Type`], teh: Option[`Transfer-Encoding`],
-                  expect100continue: Boolean, hostHeaderPresent: Boolean, closeAfterResponseCompletion: Boolean): StateResult
-
-  def parseFixedLengthBody(
+  protected final def parseFixedLengthBody(
     remainingBodyBytes: Long,
     isLastMessage:      Boolean)(input: ByteString, bodyStart: Int): StateResult = {
     val remainingInputBytes = input.length - bodyStart
@@ -213,7 +188,7 @@ private[http] abstract class HttpMessageParser[Output >: MessageOutput <: Parser
     } else continue(input, bodyStart)(parseFixedLengthBody(remainingBodyBytes, isLastMessage))
   }
 
-  def parseChunk(input: ByteString, offset: Int, isLastMessage: Boolean, totalBytesRead: Long): StateResult = {
+  protected final def parseChunk(input: ByteString, offset: Int, isLastMessage: Boolean, totalBytesRead: Long): StateResult = {
     @tailrec def parseTrailer(extension: String, lineStart: Int, headers: List[HttpHeader] = Nil,
                               headerCount: Int = 0): StateResult = {
       var errorInfo: ErrorInfo = null
@@ -230,9 +205,9 @@ private[http] abstract class HttpMessageParser[Output >: MessageOutput <: Parser
             setCompletionHandling(CompletionOk)
             if (isLastMessage) terminate()
             else startNewMessage(input, lineEnd)
-          case header if headerCount < maxHeaderCount ⇒
+          case header if headerCount < settings.maxHeaderCount ⇒
             parseTrailer(extension, lineEnd, header :: headers, headerCount + 1)
-          case _ ⇒ failEntityStream(s"Chunk trailer contains more than the configured limit of $maxHeaderCount headers")
+          case _ ⇒ failEntityStream(s"Chunk trailer contains more than the configured limit of ${settings.maxHeaderCount} headers")
         }
       } else failEntityStream(errorInfo)
     }
@@ -252,24 +227,24 @@ private[http] abstract class HttpMessageParser[Output >: MessageOutput <: Parser
       } else parseTrailer(extension, cursor)
 
     @tailrec def parseChunkExtensions(chunkSize: Int, cursor: Int)(startIx: Int = cursor): StateResult =
-      if (cursor - startIx <= maxChunkExtLength) {
+      if (cursor - startIx <= settings.maxChunkExtLength) {
         def extension = asciiString(input, startIx, cursor)
         byteChar(input, cursor) match {
           case '\r' if byteChar(input, cursor + 1) == '\n' ⇒ parseChunkBody(chunkSize, extension, cursor + 2)
           case '\n' ⇒ parseChunkBody(chunkSize, extension, cursor + 1)
           case _ ⇒ parseChunkExtensions(chunkSize, cursor + 1)(startIx)
         }
-      } else failEntityStream(s"HTTP chunk extension length exceeds configured limit of $maxChunkExtLength characters")
+      } else failEntityStream(s"HTTP chunk extension length exceeds configured limit of ${settings.maxChunkExtLength} characters")
 
     @tailrec def parseSize(cursor: Int, size: Long): StateResult =
-      if (size <= maxChunkSize) {
+      if (size <= settings.maxChunkSize) {
         byteChar(input, cursor) match {
           case c if CharacterClasses.HEXDIG(c) ⇒ parseSize(cursor + 1, size * 16 + CharUtils.hexValue(c))
           case ';' if cursor > offset ⇒ parseChunkExtensions(size.toInt, cursor + 1)()
           case '\r' if cursor > offset && byteChar(input, cursor + 1) == '\n' ⇒ parseChunkBody(size.toInt, "", cursor + 2)
           case c ⇒ failEntityStream(s"Illegal character '${escape(c)}' in chunk start")
         }
-      } else failEntityStream(s"HTTP chunk size exceeds the configured limit of $maxChunkSize bytes")
+      } else failEntityStream(s"HTTP chunk size exceeds the configured limit of ${settings.maxChunkSize} bytes")
 
     try parseSize(offset, 0)
     catch {
@@ -277,9 +252,9 @@ private[http] abstract class HttpMessageParser[Output >: MessageOutput <: Parser
     }
   }
 
-  def emit(output: Output): Unit = result += output
+  protected def emit(output: Output): Unit = result += output
 
-  def continue(input: ByteString, offset: Int)(next: (ByteString, Int) ⇒ StateResult): StateResult = {
+  protected final def continue(input: ByteString, offset: Int)(next: (ByteString, Int) ⇒ StateResult): StateResult = {
     state =
       math.signum(offset - input.length) match {
         case -1 ⇒
@@ -291,30 +266,30 @@ private[http] abstract class HttpMessageParser[Output >: MessageOutput <: Parser
     done()
   }
 
-  def continue(next: (ByteString, Int) ⇒ StateResult): StateResult = {
+  protected final def continue(next: (ByteString, Int) ⇒ StateResult): StateResult = {
     state = next(_, 0)
     done()
   }
 
-  def failMessageStart(summary: String): StateResult = failMessageStart(summary, "")
-  def failMessageStart(summary: String, detail: String): StateResult = failMessageStart(StatusCodes.BadRequest, summary, detail)
-  def failMessageStart(status: StatusCode): StateResult = failMessageStart(status, status.defaultMessage)
-  def failMessageStart(status: StatusCode, summary: String, detail: String = ""): StateResult = failMessageStart(status, ErrorInfo(summary, detail))
-  def failMessageStart(status: StatusCode, info: ErrorInfo): StateResult = {
+  protected final def failMessageStart(summary: String): StateResult = failMessageStart(summary, "")
+  protected final def failMessageStart(summary: String, detail: String): StateResult = failMessageStart(StatusCodes.BadRequest, summary, detail)
+  protected final def failMessageStart(status: StatusCode): StateResult = failMessageStart(status, status.defaultMessage)
+  protected final def failMessageStart(status: StatusCode, summary: String, detail: String = ""): StateResult = failMessageStart(status, ErrorInfo(summary, detail))
+  protected final def failMessageStart(status: StatusCode, info: ErrorInfo): StateResult = {
     emit(MessageStartError(status, info))
     setCompletionHandling(CompletionOk)
     terminate()
   }
 
-  def failEntityStream(summary: String): StateResult = failEntityStream(summary, "")
-  def failEntityStream(summary: String, detail: String): StateResult = failEntityStream(ErrorInfo(summary, detail))
-  def failEntityStream(info: ErrorInfo): StateResult = {
+  protected final def failEntityStream(summary: String): StateResult = failEntityStream(summary, "")
+  protected final def failEntityStream(summary: String, detail: String): StateResult = failEntityStream(ErrorInfo(summary, detail))
+  protected final def failEntityStream(info: ErrorInfo): StateResult = {
     emit(EntityStreamError(info))
     setCompletionHandling(CompletionOk)
     terminate()
   }
 
-  def terminate(): StateResult = {
+  protected final def terminate(): StateResult = {
     terminated = true
     done()
   }
@@ -325,19 +300,19 @@ private[http] abstract class HttpMessageParser[Output >: MessageOutput <: Parser
    */
   private def done(): StateResult = null // StateResult is a phantom type
 
-  def contentType(cth: Option[`Content-Type`]) = cth match {
+  protected final def contentType(cth: Option[`Content-Type`]) = cth match {
     case Some(x) ⇒ x.contentType
     case None    ⇒ ContentTypes.`application/octet-stream`
   }
 
-  def emptyEntity(cth: Option[`Content-Type`]) =
+  protected final def emptyEntity(cth: Option[`Content-Type`]) =
     StrictEntityCreator(if (cth.isDefined) HttpEntity.empty(cth.get.contentType) else HttpEntity.Empty)
 
-  def strictEntity(cth: Option[`Content-Type`], input: ByteString, bodyStart: Int,
-                   contentLength: Int) =
+  protected final def strictEntity(cth: Option[`Content-Type`], input: ByteString, bodyStart: Int,
+                                   contentLength: Int) =
     StrictEntityCreator(HttpEntity.Strict(contentType(cth), input.slice(bodyStart, bodyStart + contentLength)))
 
-  def defaultEntity[A <: ParserOutput](cth: Option[`Content-Type`], contentLength: Long) =
+  protected final def defaultEntity[A <: ParserOutput](cth: Option[`Content-Type`], contentLength: Long) =
     StreamedEntityCreator[A, UniversalEntity] { entityParts ⇒
       val data = entityParts.collect {
         case EntityPart(bytes)       ⇒ bytes
@@ -346,7 +321,7 @@ private[http] abstract class HttpMessageParser[Output >: MessageOutput <: Parser
       HttpEntity.Default(contentType(cth), contentLength, HttpEntity.limitableByteSource(data))
     }
 
-  def chunkedEntity[A <: ParserOutput](cth: Option[`Content-Type`]) =
+  protected final def chunkedEntity[A <: ParserOutput](cth: Option[`Content-Type`]) =
     StreamedEntityCreator[A, RequestEntity] { entityChunks ⇒
       val chunks = entityChunks.collect {
         case EntityChunk(chunk)      ⇒ chunk
@@ -355,16 +330,20 @@ private[http] abstract class HttpMessageParser[Output >: MessageOutput <: Parser
       HttpEntity.Chunked(contentType(cth), HttpEntity.limitableChunkSource(chunks))
     }
 
-  def addTransferEncodingWithChunkedPeeled(headers: List[HttpHeader], teh: `Transfer-Encoding`): List[HttpHeader] =
+  protected final def addTransferEncodingWithChunkedPeeled(headers: List[HttpHeader], teh: `Transfer-Encoding`): List[HttpHeader] =
     teh.withChunkedPeeled match {
       case Some(x) ⇒ x :: headers
       case None    ⇒ headers
     }
 
-  def setCompletionHandling(completionHandling: CompletionHandling): Unit =
+  protected final def setCompletionHandling(completionHandling: CompletionHandling): Unit =
     this.completionHandling = completionHandling
+
 }
 
+/**
+ * INTERNAL API
+ */
 private[http] object HttpMessageParser {
   sealed trait StateResult // phantom type for ensuring soundness of our parsing method setup
   final case class Trampoline(f: ByteString ⇒ StateResult) extends StateResult
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpRequestParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpRequestParser.scala
index cfb40519b5..227bc1bb53 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpRequestParser.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpRequestParser.scala
@@ -5,7 +5,8 @@
 package akka.http.impl.engine.parsing
 
 import java.lang.{ StringBuilder ⇒ JStringBuilder }
-import scala.annotation.tailrec
+
+import scala.annotation.{ switch, tailrec }
 import akka.http.scaladsl.settings.ParserSettings
 import akka.util.ByteString
 import akka.http.impl.engine.ws.Handshake
@@ -14,160 +15,196 @@ import akka.http.scaladsl.model._
 import headers._
 import StatusCodes._
 import ParserOutput._
+import akka.stream.{ Attributes, FlowShape, Inlet, Outlet }
+import akka.stream.TLSProtocol.SessionBytes
+import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler }
 
 /**
  * INTERNAL API
  */
-private[http] class HttpRequestParser(
-  _settings:           ParserSettings,
+private[http] final class HttpRequestParser(
+  settings:            ParserSettings,
   rawRequestUriHeader: Boolean,
-  _headerParser:       HttpHeaderParser)
-  extends HttpMessageParser[RequestOutput](_settings, _headerParser) {
+  headerParser:        HttpHeaderParser)
+  extends GraphStage[FlowShape[SessionBytes, RequestOutput]] { self ⇒
+
   import HttpMessageParser._
   import settings._
 
-  private[this] var method: HttpMethod = _
-  private[this] var uri: Uri = _
-  private[this] var uriBytes: Array[Byte] = _
+  val in = Inlet[SessionBytes]("HttpRequestParser.in")
+  val out = Outlet[RequestOutput]("HttpRequestParser.out")
 
-  def createShallowCopy(): HttpRequestParser =
-    new HttpRequestParser(settings, rawRequestUriHeader, headerParser.createShallowCopy())
+  val shape = FlowShape.of(in, out)
 
-  def parseMessage(input: ByteString, offset: Int): StateResult = {
-    var cursor = parseMethod(input, offset)
-    cursor = parseRequestTarget(input, cursor)
-    cursor = parseProtocol(input, cursor)
-    if (byteChar(input, cursor) == '\r' && byteChar(input, cursor + 1) == '\n')
-      parseHeaderLines(input, cursor + 2)
-    else badProtocol
-  }
+  override protected def initialAttributes: Attributes = Attributes.name("HttpRequestParser")
 
-  def parseMethod(input: ByteString, cursor: Int): Int = {
-    @tailrec def parseCustomMethod(ix: Int = 0, sb: JStringBuilder = new JStringBuilder(16)): Int =
-      if (ix < maxMethodLength) {
-        byteChar(input, cursor + ix) match {
-          case ' ' ⇒
-            customMethods(sb.toString) match {
-              case Some(m) ⇒
-                method = m
-                cursor + ix + 1
-              case None ⇒ throw new ParsingException(NotImplemented, ErrorInfo("Unsupported HTTP method", sb.toString))
-            }
-          case c ⇒ parseCustomMethod(ix + 1, sb.append(c))
+  override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with HttpMessageParser[RequestOutput] with InHandler with OutHandler {
+
+    import HttpMessageParser._
+
+    override val settings = self.settings
+    override val headerParser = self.headerParser.createShallowCopy()
+
+    private[this] var method: HttpMethod = _
+    private[this] var uri: Uri = _
+    private[this] var uriBytes: Array[Byte] = _
+
+    override def onPush(): Unit = handleParserOutput(parseSessionBytes(grab(in)))
+    override def onPull(): Unit = handleParserOutput(doPull())
+
+    override def onUpstreamFinish(): Unit =
+      if (super.shouldComplete()) completeStage()
+      else if (isAvailable(out)) handleParserOutput(doPull())
+
+    setHandlers(in, out, this)
+
+    private def handleParserOutput(output: RequestOutput): Unit = {
+      output match {
+        case StreamEnd    ⇒ completeStage()
+        case NeedMoreData ⇒ pull(in)
+        case x            ⇒ push(out, x)
+      }
+    }
+
+    override def parseMessage(input: ByteString, offset: Int): StateResult = {
+      var cursor = parseMethod(input, offset)
+      cursor = parseRequestTarget(input, cursor)
+      cursor = parseProtocol(input, cursor)
+      if (byteChar(input, cursor) == '\r' && byteChar(input, cursor + 1) == '\n')
+        parseHeaderLines(input, cursor + 2)
+      else onBadProtocol
+    }
+
+    def parseMethod(input: ByteString, cursor: Int): Int = {
+      @tailrec def parseCustomMethod(ix: Int = 0, sb: JStringBuilder = new JStringBuilder(16)): Int =
+        if (ix < maxMethodLength) {
+          byteChar(input, cursor + ix) match {
+            case ' ' ⇒
+              customMethods(sb.toString) match {
+                case Some(m) ⇒
+                  method = m
+                  cursor + ix + 1
+                case None ⇒ throw new ParsingException(NotImplemented, ErrorInfo("Unsupported HTTP method", sb.toString))
+              }
+            case c ⇒ parseCustomMethod(ix + 1, sb.append(c))
+          }
+        } else throw new ParsingException(
+          BadRequest,
+          ErrorInfo("Unsupported HTTP method", s"HTTP method too long (started with '${sb.toString}'). " +
+            "Increase `akka.http.server.parsing.max-method-length` to support HTTP methods with more characters."))
+
+      @tailrec def parseMethod(meth: HttpMethod, ix: Int = 1): Int =
+        if (ix == meth.value.length)
+          if (byteChar(input, cursor + ix) == ' ') {
+            method = meth
+            cursor + ix + 1
+          } else parseCustomMethod()
+        else if (byteChar(input, cursor + ix) == meth.value.charAt(ix)) parseMethod(meth, ix + 1)
+        else parseCustomMethod()
+
+      import HttpMethods._
+      (byteChar(input, cursor): @switch) match {
+        case 'G' ⇒ parseMethod(GET)
+        case 'P' ⇒ byteChar(input, cursor + 1) match {
+          case 'O' ⇒ parseMethod(POST, 2)
+          case 'U' ⇒ parseMethod(PUT, 2)
+          case 'A' ⇒ parseMethod(PATCH, 2)
+          case _   ⇒ parseCustomMethod()
         }
-      } else throw new ParsingException(
-        BadRequest,
-        ErrorInfo("Unsupported HTTP method", s"HTTP method too long (started with '${sb.toString}'). " +
-          "Increase `akka.http.server.parsing.max-method-length` to support HTTP methods with more characters."))
-
-    @tailrec def parseMethod(meth: HttpMethod, ix: Int = 1): Int =
-      if (ix == meth.value.length)
-        if (byteChar(input, cursor + ix) == ' ') {
-          method = meth
-          cursor + ix + 1
-        } else parseCustomMethod()
-      else if (byteChar(input, cursor + ix) == meth.value.charAt(ix)) parseMethod(meth, ix + 1)
-      else parseCustomMethod()
-
-    import HttpMethods._
-    byteChar(input, cursor) match {
-      case 'G' ⇒ parseMethod(GET)
-      case 'P' ⇒ byteChar(input, cursor + 1) match {
-        case 'O' ⇒ parseMethod(POST, 2)
-        case 'U' ⇒ parseMethod(PUT, 2)
-        case 'A' ⇒ parseMethod(PATCH, 2)
+        case 'D' ⇒ parseMethod(DELETE)
+        case 'H' ⇒ parseMethod(HEAD)
+        case 'O' ⇒ parseMethod(OPTIONS)
+        case 'T' ⇒ parseMethod(TRACE)
+        case 'C' ⇒ parseMethod(CONNECT)
         case _   ⇒ parseCustomMethod()
       }
-      case 'D' ⇒ parseMethod(DELETE)
-      case 'H' ⇒ parseMethod(HEAD)
-      case 'O' ⇒ parseMethod(OPTIONS)
-      case 'T' ⇒ parseMethod(TRACE)
-      case 'C' ⇒ parseMethod(CONNECT)
-      case _   ⇒ parseCustomMethod()
     }
-  }
 
-  def parseRequestTarget(input: ByteString, cursor: Int): Int = {
-    val uriStart = cursor
-    val uriEndLimit = cursor + maxUriLength
+    def parseRequestTarget(input: ByteString, cursor: Int): Int = {
+      val uriStart = cursor
+      val uriEndLimit = cursor + maxUriLength
 
-    @tailrec def findUriEnd(ix: Int = cursor): Int =
-      if (ix == input.length) throw NotEnoughDataException
-      else if (CharacterClasses.WSPCRLF(input(ix).toChar)) ix
-      else if (ix < uriEndLimit) findUriEnd(ix + 1)
-      else throw new ParsingException(
-        RequestUriTooLong,
-        s"URI length exceeds the configured limit of $maxUriLength characters")
+      @tailrec def findUriEnd(ix: Int = cursor): Int =
+        if (ix == input.length) throw NotEnoughDataException
+        else if (CharacterClasses.WSPCRLF(input(ix).toChar)) ix
+        else if (ix < uriEndLimit) findUriEnd(ix + 1)
+        else throw new ParsingException(
+          RequestUriTooLong,
+          s"URI length exceeds the configured limit of $maxUriLength characters")
 
-    val uriEnd = findUriEnd()
-    try {
-      uriBytes = input.slice(uriStart, uriEnd).toArray[Byte] // TODO: can we reduce allocations here?
-      uri = Uri.parseHttpRequestTarget(uriBytes, mode = uriParsingMode) // TODO ByteStringParserInput?
-    } catch {
-      case IllegalUriException(info) ⇒ throw new ParsingException(BadRequest, info)
+      val uriEnd = findUriEnd()
+      try {
+        uriBytes = input.slice(uriStart, uriEnd).toArray[Byte] // TODO: can we reduce allocations here?
+        uri = Uri.parseHttpRequestTarget(uriBytes, mode = uriParsingMode) // TODO ByteStringParserInput?
+      } catch {
+        case IllegalUriException(info) ⇒ throw new ParsingException(BadRequest, info)
+      }
+      uriEnd + 1
     }
-    uriEnd + 1
-  }
 
-  def badProtocol = throw new ParsingException(HTTPVersionNotSupported)
+    override def onBadProtocol() = throw new ParsingException(HTTPVersionNotSupported)
 
-  // http://tools.ietf.org/html/rfc7230#section-3.3
-  def parseEntity(headers: List[HttpHeader], protocol: HttpProtocol, input: ByteString, bodyStart: Int,
-                  clh: Option[`Content-Length`], cth: Option[`Content-Type`], teh: Option[`Transfer-Encoding`],
-                  expect100continue: Boolean, hostHeaderPresent: Boolean, closeAfterResponseCompletion: Boolean): StateResult =
-    if (hostHeaderPresent || protocol == HttpProtocols.`HTTP/1.0`) {
-      def emitRequestStart(
-        createEntity: EntityCreator[RequestOutput, RequestEntity],
-        headers:      List[HttpHeader]                            = headers) = {
-        val allHeaders0 =
-          if (rawRequestUriHeader) `Raw-Request-URI`(new String(uriBytes, HttpCharsets.`US-ASCII`.nioCharset)) :: headers
-          else headers
+    // http://tools.ietf.org/html/rfc7230#section-3.3
+    override def parseEntity(headers: List[HttpHeader], protocol: HttpProtocol, input: ByteString, bodyStart: Int,
+                             clh: Option[`Content-Length`], cth: Option[`Content-Type`], teh: Option[`Transfer-Encoding`],
+                             expect100continue: Boolean, hostHeaderPresent: Boolean, closeAfterResponseCompletion: Boolean): StateResult =
+      if (hostHeaderPresent || protocol == HttpProtocols.`HTTP/1.0`) {
+        def emitRequestStart(
+          createEntity: EntityCreator[RequestOutput, RequestEntity],
+          headers:      List[HttpHeader]                            = headers) = {
+          val allHeaders0 =
+            if (rawRequestUriHeader) `Raw-Request-URI`(new String(uriBytes, HttpCharsets.`US-ASCII`.nioCharset)) :: headers
+            else headers
 
-        val allHeaders =
-          if (method == HttpMethods.GET) {
-            Handshake.Server.websocketUpgrade(headers, hostHeaderPresent) match {
-              case Some(upgrade) ⇒ upgrade :: allHeaders0
-              case None          ⇒ allHeaders0
+          val allHeaders =
+            if (method == HttpMethods.GET) {
+              Handshake.Server.websocketUpgrade(headers, hostHeaderPresent) match {
+                case Some(upgrade) ⇒ upgrade :: allHeaders0
+                case None          ⇒ allHeaders0
+              }
+            } else allHeaders0
+
+          emit(RequestStart(method, uri, protocol, allHeaders, createEntity, expect100continue, closeAfterResponseCompletion))
+        }
+
+        teh match {
+          case None ⇒
+            val contentLength = clh match {
+              case Some(`Content-Length`(len)) ⇒ len
+              case None                        ⇒ 0
+            }
+            if (contentLength == 0) {
+              emitRequestStart(emptyEntity(cth))
+              setCompletionHandling(HttpMessageParser.CompletionOk)
+              startNewMessage(input, bodyStart)
+            } else if (!method.isEntityAccepted) {
+              failMessageStart(UnprocessableEntity, s"${method.name} requests must not have an entity")
+            } else if (contentLength <= input.size - bodyStart) {
+              val cl = contentLength.toInt
+              emitRequestStart(strictEntity(cth, input, bodyStart, cl))
+              setCompletionHandling(HttpMessageParser.CompletionOk)
+              startNewMessage(input, bodyStart + cl)
+            } else {
+              emitRequestStart(defaultEntity(cth, contentLength))
+              parseFixedLengthBody(contentLength, closeAfterResponseCompletion)(input, bodyStart)
             }
-          } else allHeaders0
 
-        emit(RequestStart(method, uri, protocol, allHeaders, createEntity, expect100continue, closeAfterResponseCompletion))
-      }
-
-      teh match {
-        case None ⇒
-          val contentLength = clh match {
-            case Some(`Content-Length`(len)) ⇒ len
-            case None                        ⇒ 0
-          }
-          if (contentLength == 0) {
-            emitRequestStart(emptyEntity(cth))
-            setCompletionHandling(HttpMessageParser.CompletionOk)
-            startNewMessage(input, bodyStart)
-          } else if (!method.isEntityAccepted) {
+          case Some(_) if !method.isEntityAccepted ⇒
             failMessageStart(UnprocessableEntity, s"${method.name} requests must not have an entity")
-          } else if (contentLength <= input.size - bodyStart) {
-            val cl = contentLength.toInt
-            emitRequestStart(strictEntity(cth, input, bodyStart, cl))
-            setCompletionHandling(HttpMessageParser.CompletionOk)
-            startNewMessage(input, bodyStart + cl)
-          } else {
-            emitRequestStart(defaultEntity(cth, contentLength))
-            parseFixedLengthBody(contentLength, closeAfterResponseCompletion)(input, bodyStart)
-          }
 
-        case Some(_) if !method.isEntityAccepted ⇒
-          failMessageStart(UnprocessableEntity, s"${method.name} requests must not have an entity")
+          case Some(te) ⇒
+            val completedHeaders = addTransferEncodingWithChunkedPeeled(headers, te)
+            if (te.isChunked) {
+              if (clh.isEmpty) {
+                emitRequestStart(chunkedEntity(cth), completedHeaders)
+                parseChunk(input, bodyStart, closeAfterResponseCompletion, totalBytesRead = 0L)
+              } else failMessageStart("A chunked request must not contain a Content-Length header.")
+            } else parseEntity(completedHeaders, protocol, input, bodyStart, clh, cth, teh = None,
+              expect100continue, hostHeaderPresent, closeAfterResponseCompletion)
+        }
+      } else failMessageStart("Request is missing required `Host` header")
 
-        case Some(te) ⇒
-          val completedHeaders = addTransferEncodingWithChunkedPeeled(headers, te)
-          if (te.isChunked) {
-            if (clh.isEmpty) {
-              emitRequestStart(chunkedEntity(cth), completedHeaders)
-              parseChunk(input, bodyStart, closeAfterResponseCompletion, totalBytesRead = 0L)
-            } else failMessageStart("A chunked request must not contain a Content-Length header.")
-          } else parseEntity(completedHeaders, protocol, input, bodyStart, clh, cth, teh = None,
-            expect100continue, hostHeaderPresent, closeAfterResponseCompletion)
-      }
-    } else failMessageStart("Request is missing required `Host` header")
+  }
+
+  override def toString: String = "HttpRequestParser"
 }
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpResponseParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpResponseParser.scala
index 2d65fe6c2d..ca8683f2b2 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpResponseParser.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpResponseParser.scala
@@ -13,12 +13,15 @@ import akka.util.ByteString
 import akka.http.scaladsl.model._
 import headers._
 import ParserOutput._
+import akka.stream.{ Attributes, FlowShape, Inlet, Outlet }
+import akka.stream.TLSProtocol.SessionBytes
+import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler }
 
 /**
  * INTERNAL API
  */
-private[http] class HttpResponseParser(_settings: ParserSettings, _headerParser: HttpHeaderParser)
-  extends HttpMessageParser[ResponseOutput](_settings, _headerParser) {
+private[http] class HttpResponseParser(protected val settings: ParserSettings, protected val headerParser: HttpHeaderParser)
+  extends HttpMessageParser[ResponseOutput] { self ⇒
   import HttpResponseParser._
   import HttpMessageParser._
   import settings._
@@ -26,31 +29,74 @@ private[http] class HttpResponseParser(_settings: ParserSettings, _headerParser:
   private[this] var contextForCurrentResponse: Option[ResponseContext] = None
   private[this] var statusCode: StatusCode = StatusCodes.OK
 
-  def createShallowCopy(): HttpResponseParser = new HttpResponseParser(settings, headerParser.createShallowCopy())
+  // Note that this GraphStage mutates the HttpMessageParser instance, use with caution.
+  final val stage = new GraphStage[FlowShape[SessionBytes, ResponseOutput]] {
+    val in: Inlet[SessionBytes] = Inlet("HttpResponseParser.in")
+    val out: Outlet[ResponseOutput] = Outlet("HttpResponseParser.out")
+    override val shape: FlowShape[SessionBytes, ResponseOutput] = FlowShape(in, out)
 
-  def setContextForNextResponse(responseContext: ResponseContext): Unit =
+    override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
+      new GraphStageLogic(shape) with InHandler with OutHandler {
+        override def onPush(): Unit = handleParserOutput(self.parseSessionBytes(grab(in)))
+        override def onPull(): Unit = handleParserOutput(self.onPull())
+
+        override def onUpstreamFinish(): Unit =
+          if (self.onUpstreamFinish()) completeStage()
+          else if (isAvailable(out)) handleParserOutput(self.onPull())
+
+        private def handleParserOutput(output: ResponseOutput): Unit = {
+          output match {
+            case StreamEnd    ⇒ completeStage()
+            case NeedMoreData ⇒ pull(in)
+            case x            ⇒ push(out, x)
+          }
+        }
+
+        setHandlers(in, out, this)
+      }
+  }
+
+  final def createShallowCopy(): HttpResponseParser = new HttpResponseParser(settings, headerParser.createShallowCopy())
+
+  final def setContextForNextResponse(responseContext: ResponseContext): Unit =
     if (contextForCurrentResponse.isEmpty) contextForCurrentResponse = Some(responseContext)
 
-  protected def parseMessage(input: ByteString, offset: Int): StateResult =
+  final def onPull(): ResponseOutput =
+    if (result.nonEmpty) {
+      val head = result.head
+      result.remove(0) // faster than `ListBuffer::drop`
+      head
+    } else if (terminated) StreamEnd else NeedMoreData
+
+  final def onUpstreamFinish(): Boolean = {
+    completionHandling() match {
+      case Some(x) ⇒ emit(x)
+      case None    ⇒ // nothing to do
+    }
+    terminated = true
+    result.isEmpty
+  }
+
+  override final def emit(output: ResponseOutput): Unit = {
+    if (output == MessageEnd) contextForCurrentResponse = None
+    super.emit(output)
+  }
+
+  override protected def parseMessage(input: ByteString, offset: Int): StateResult =
     if (contextForCurrentResponse.isDefined) {
       var cursor = parseProtocol(input, offset)
       if (byteChar(input, cursor) == ' ') {
         cursor = parseStatus(input, cursor + 1)
         parseHeaderLines(input, cursor)
-      } else badProtocol
+      } else onBadProtocol()
     } else {
       emit(NeedNextRequestMethod)
       continue(input, offset)(startNewMessage)
     }
 
-  override def emit(output: ResponseOutput): Unit = {
-    if (output == MessageEnd) contextForCurrentResponse = None
-    super.emit(output)
-  }
+  override final def onBadProtocol() = throw new ParsingException("The server-side HTTP version is not supported")
 
-  def badProtocol = throw new ParsingException("The server-side HTTP version is not supported")
-
-  def parseStatus(input: ByteString, cursor: Int): Int = {
+  private def parseStatus(input: ByteString, cursor: Int): Int = {
     def badStatusCode = throw new ParsingException("Illegal response status code")
     def parseStatusCode() = {
       def intValue(offset: Int): Int = {
@@ -84,9 +130,9 @@ private[http] class HttpResponseParser(_settings: ParserSettings, _headerParser:
   def handleInformationalResponses: Boolean = true
 
   // http://tools.ietf.org/html/rfc7230#section-3.3
-  def parseEntity(headers: List[HttpHeader], protocol: HttpProtocol, input: ByteString, bodyStart: Int,
-                  clh: Option[`Content-Length`], cth: Option[`Content-Type`], teh: Option[`Transfer-Encoding`],
-                  expect100continue: Boolean, hostHeaderPresent: Boolean, closeAfterResponseCompletion: Boolean): StateResult = {
+  protected final def parseEntity(headers: List[HttpHeader], protocol: HttpProtocol, input: ByteString, bodyStart: Int,
+                                  clh: Option[`Content-Length`], cth: Option[`Content-Type`], teh: Option[`Transfer-Encoding`],
+                                  expect100continue: Boolean, hostHeaderPresent: Boolean, closeAfterResponseCompletion: Boolean): StateResult = {
 
     def emitResponseStart(
       createEntity: EntityCreator[ResponseOutput, ResponseEntity],
@@ -161,7 +207,7 @@ private[http] class HttpResponseParser(_settings: ParserSettings, _headerParser:
     } else finishEmptyResponse()
   }
 
-  def parseToCloseBody(input: ByteString, bodyStart: Int, totalBytesRead: Long): StateResult = {
+  private def parseToCloseBody(input: ByteString, bodyStart: Int, totalBytesRead: Long): StateResult = {
     val newTotalBytes = totalBytesRead + math.max(0, input.length - bodyStart)
     if (input.length > bodyStart)
       emit(EntityPart(input.drop(bodyStart).compact))
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/server/HttpServerBluePrint.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/server/HttpServerBluePrint.scala
index 9e6f78fd4a..bcc28bc828 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/server/HttpServerBluePrint.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/server/HttpServerBluePrint.scala
@@ -228,11 +228,7 @@ private[http] object HttpServerBluePrint {
       case x ⇒ x
     }
 
-    Flow[SessionBytes].via(
-      // each connection uses a single (private) request parser instance for all its requests
-      // which builds a cache of all header instances seen on that connection
-      rootParser.createShallowCopy().stage).named("rootParser")
-      .map(establishAbsoluteUri)
+    Flow[SessionBytes].via(rootParser).map(establishAbsoluteUri)
   }
 
   def rendering(settings: ServerSettings, log: LoggingAdapter): Flow[ResponseRenderingContext, ResponseRenderingOutput, NotUsed] = {
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/Http.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/Http.scala
index 248f70eb2d..f3b014a2dd 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/Http.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/Http.scala
@@ -9,6 +9,7 @@ import java.util.concurrent.CompletionStage
 import javax.net.ssl._
 
 import akka.actor._
+import akka.dispatch.ExecutionContexts
 import akka.event.{ Logging, LoggingAdapter }
 import akka.http.impl.engine.HttpConnectionTimeoutException
 import akka.http.impl.engine.client.PoolMasterActor.{ PoolSize, ShutdownAll }
@@ -26,6 +27,7 @@ import akka.{ Done, NotUsed }
 import akka.stream._
 import akka.stream.TLSProtocol._
 import akka.stream.scaladsl._
+import akka.util.ByteString
 import com.typesafe.config.Config
 import com.typesafe.sslconfig.akka._
 import com.typesafe.sslconfig.akka.util.AkkaLoggerFactory
@@ -55,6 +57,27 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
 
   private[this] final val DefaultPortForProtocol = -1 // any negative value
 
+  private def fuseServerLayer(settings: ServerSettings, connectionContext: ConnectionContext, log: LoggingAdapter)(implicit mat: Materializer): BidiFlow[HttpResponse, ByteString, ByteString, HttpRequest, NotUsed] = {
+    val httpLayer = serverLayer(settings, None, log)
+    val tlsStage = sslTlsStage(connectionContext, Server)
+    BidiFlow.fromGraph(Fusing.aggressive(GraphDSL.create() { implicit b ⇒
+      import GraphDSL.Implicits._
+      val http = b.add(httpLayer)
+      val tls = b.add(tlsStage)
+
+      val timeouts = b.add(Flow[ByteString].recover {
+        case t: TimeoutException ⇒ throw new HttpConnectionTimeoutException(t.getMessage)
+      })
+
+      tls.out2 ~> http.in2
+      tls.in1 <~ http.out1
+
+      tls.out1 ~> timeouts.in
+
+      BidiShape(http.in1, timeouts.out, tls.in2, http.out2)
+    }))
+  }
+
   /**
    * Creates a [[akka.stream.scaladsl.Source]] of [[akka.http.scaladsl.Http.IncomingConnection]] instances which represents a prospective HTTP server binding
    * on the given `endpoint`.
@@ -81,14 +104,14 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
            settings:          ServerSettings    = ServerSettings(system),
            log:               LoggingAdapter    = system.log)(implicit fm: Materializer): Source[IncomingConnection, Future[ServerBinding]] = {
     val effectivePort = if (port >= 0) port else connectionContext.defaultPort
-    val tlsStage = sslTlsStage(connectionContext, Server)
+
+    val fullLayer = fuseServerLayer(settings, connectionContext, log)
+
     val connections: Source[Tcp.IncomingConnection, Future[Tcp.ServerBinding]] =
       Tcp().bind(interface, effectivePort, settings.backlog, settings.socketOptions, halfClose = false, settings.timeouts.idleTimeout)
     connections.map {
       case Tcp.IncomingConnection(localAddress, remoteAddress, flow) ⇒
-        val layer = serverLayer(settings, Some(remoteAddress), log)
-        val flowWithTimeoutRecovered = flow.via(MapError { case t: TimeoutException ⇒ new HttpConnectionTimeoutException(t.getMessage) })
-        IncomingConnection(localAddress, remoteAddress, layer atop tlsStage join flowWithTimeoutRecovered)
+        IncomingConnection(localAddress, remoteAddress, fullLayer join flow)
     }.mapMaterializedValue {
       _.map(tcpBinding ⇒ ServerBinding(tcpBinding.localAddress)(() ⇒ tcpBinding.unbind()))(fm.executionContext)
     }
@@ -110,30 +133,39 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
     connectionContext: ConnectionContext = defaultServerHttpContext,
     settings:          ServerSettings    = ServerSettings(system),
     log:               LoggingAdapter    = system.log)(implicit fm: Materializer): Future[ServerBinding] = {
-    def handleOneConnection(incomingConnection: IncomingConnection): Future[Done] =
-      try
-        incomingConnection.flow
-          .watchTermination()(Keep.right)
-          .joinMat(handler)(Keep.left)
-          .run()
-      catch {
-        case NonFatal(e) ⇒
-          log.error(e, "Could not materialize handling flow for {}", incomingConnection)
-          throw e
-      }
+    val effectivePort = if (port >= 0) port else connectionContext.defaultPort
+
+    val fullLayer: Flow[ByteString, ByteString, Future[Done]] = Flow.fromGraph(Fusing.aggressive(
+      Flow[HttpRequest]
+        .watchTermination()(Keep.right)
+        .viaMat(handler)(Keep.left)
+        .joinMat(fuseServerLayer(settings, connectionContext, log))(Keep.left)))
+
+    val connections: Source[Tcp.IncomingConnection, Future[Tcp.ServerBinding]] =
+      Tcp().bind(interface, effectivePort, settings.backlog, settings.socketOptions, halfClose = false, settings.timeouts.idleTimeout)
+
+    connections.mapAsyncUnordered(settings.maxConnections) {
+      case incoming: Tcp.IncomingConnection ⇒
+        try {
+          fullLayer.addAttributes(HttpAttributes.remoteAddress(Some(incoming.remoteAddress)))
+            .joinMat(incoming.flow)(Keep.left)
+            .run().recover {
+              // Ignore incoming errors from the connection as they will cancel the binding.
+              // As far as it is known currently, these errors can only happen if a TCP error bubbles up
+              // from the TCP layer through the HTTP layer to the Http.IncomingConnection.flow.
+              // See https://github.com/akka/akka/issues/17992
+              case NonFatal(ex) ⇒
+                Done
+            }(ExecutionContexts.sameThreadExecutionContext)
+        } catch {
+          case NonFatal(e) ⇒
+            log.error(e, "Could not materialize handling flow for {}", incoming)
+            throw e
+        }
+    }.mapMaterializedValue {
+      _.map(tcpBinding ⇒ ServerBinding(tcpBinding.localAddress)(() ⇒ tcpBinding.unbind()))(fm.executionContext)
+    }.to(Sink.ignore).run()
 
-    bind(interface, port, connectionContext, settings, log)
-      .mapAsyncUnordered(settings.maxConnections) { connection ⇒
-        handleOneConnection(connection).recoverWith {
-          // Ignore incoming errors from the connection as they will cancel the binding.
-          // As far as it is known currently, these errors can only happen if a TCP error bubbles up
-          // from the TCP layer through the HTTP layer to the Http.IncomingConnection.flow.
-          // See https://github.com/akka/akka/issues/17992
-          case NonFatal(_) ⇒ Future.successful(())
-        }(fm.executionContext)
-      }
-      .to(Sink.ignore)
-      .run()
   }
 
   /**
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/RequestParserSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/RequestParserSpec.scala
index 17dc6ae6bc..c53f6cbdbf 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/RequestParserSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/RequestParserSpec.scala
@@ -300,7 +300,7 @@ class RequestParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
 
     "support `rawRequestUriHeader` setting" in new Test {
       override protected def newParser: HttpRequestParser =
-        new HttpRequestParser(parserSettings, rawRequestUriHeader = true, _headerParser = HttpHeaderParser(parserSettings)())
+        new HttpRequestParser(parserSettings, rawRequestUriHeader = true, headerParser = HttpHeaderParser(parserSettings)())
 
       """GET /f%6f%6fbar?q=b%61z HTTP/1.1
         |Host: ping
@@ -557,7 +557,7 @@ class RequestParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
     def multiParse(parser: HttpRequestParser)(input: Seq[String]): Seq[Either[RequestOutput, StrictEqualHttpRequest]] =
       Source(input.toList)
         .map(bytes ⇒ SessionBytes(TLSPlacebo.dummySession, ByteString(bytes)))
-        .via(parser.stage).named("parser")
+        .via(parser).named("parser")
         .splitWhen(x ⇒ x.isInstanceOf[MessageStart] || x.isInstanceOf[EntityStreamError])
         .prefixAndTail(1)
         .collect {
diff --git a/akka-http-core/src/test/scala/akka/http/scaladsl/TestClient.scala b/akka-http-core/src/test/scala/akka/http/scaladsl/TestClient.scala
index 771280ae72..7f593ef718 100644
--- a/akka-http-core/src/test/scala/akka/http/scaladsl/TestClient.scala
+++ b/akka-http-core/src/test/scala/akka/http/scaladsl/TestClient.scala
@@ -4,13 +4,22 @@
 
 package akka.http.scaladsl
 
+import java.io.File
+import java.nio.file.spi.FileSystemProvider
+import java.nio.file.{ FileSystem, Path }
+
 import com.typesafe.config.{ Config, ConfigFactory }
+
 import scala.util.{ Failure, Success }
-import akka.actor.{ UnhandledMessage, ActorSystem }
-import akka.stream.ActorMaterializer
-import akka.stream.scaladsl.{ Sink, Source }
+import akka.actor.{ ActorSystem, UnhandledMessage }
+import akka.stream.{ ActorMaterializer, IOResult }
+import akka.stream.scaladsl.{ FileIO, Sink, Source }
 import akka.http.scaladsl.model._
 import akka.http.impl.util._
+import akka.util.ByteString
+
+import scala.concurrent.{ Await, Future }
+import scala.concurrent.duration._
 
 object TestClient extends App {
   val testConf: Config = ConfigFactory.parseString("""
@@ -62,5 +71,47 @@ object TestClient extends App {
     }
   }
 
+  // for gathering dumps of entity and headers from akka http client
+  // and curl in parallel to compare
+  def fetchAndStoreABunchOfUrlsWithHttpAndCurl(urls: Seq[String]): Unit = {
+    assert(urls.nonEmpty)
+    assert(new File("/tmp/client-dumps/").exists(), "you need to create /tmp/client-dumps/ before running")
+
+    val testConf: Config = ConfigFactory.parseString("""
+    akka.loglevel = DEBUG
+    akka.log-dead-letters = off
+    akka.io.tcp.trace-logging = off""")
+    implicit val system = ActorSystem("ServerTest", testConf)
+    implicit val fm = ActorMaterializer()
+    import system.dispatcher
+
+    try {
+      val done = Future.traverse(urls.zipWithIndex) {
+        case (url, index) ⇒
+          Http().singleRequest(HttpRequest(uri = url)).map { response ⇒
+
+            val path = new File(s"/tmp/client-dumps/akka-body-$index.dump").toPath
+            val headersPath = new File(s"/tmp/client-dumps/akka-headers-$index.dump").toPath
+
+            import scala.sys.process._
+            (s"""curl -D /tmp/client-dumps/curl-headers-$index.dump $url""" #> new File(s"/tmp/client-dumps/curl-body-$index.dump")).!
+
+            val headers = Source(response.headers).map(header ⇒ ByteString(header.name + ": " + header.value + "\n"))
+              .runWith(FileIO.toPath(headersPath))
+
+            val body = response.entity.dataBytes
+              .runWith(FileIO.toPath(path))
+              .map(res ⇒ (url, path, res)): Future[(String, Path, IOResult)]
+
+            headers.flatMap(_ ⇒ body)
+          }
+      }
+
+      println("Fetched urls: " + Await.result(done, 10.minutes))
+    } finally {
+      Http().shutdownAllConnectionPools().flatMap(_ ⇒ system.terminate())
+    }
+  }
+
   def shutdown(): Unit = system.terminate()
 }
\ No newline at end of file
diff --git a/akka-http-core/src/test/scala/akka/http/scaladsl/TestServer.scala b/akka-http-core/src/test/scala/akka/http/scaladsl/TestServer.scala
index 0176913e0d..071b46cd5b 100644
--- a/akka-http-core/src/test/scala/akka/http/scaladsl/TestServer.scala
+++ b/akka-http-core/src/test/scala/akka/http/scaladsl/TestServer.scala
@@ -13,7 +13,7 @@ import scala.concurrent.Await
 import akka.actor.ActorSystem
 import akka.http.scaladsl.model._
 import akka.http.scaladsl.model.ws._
-import akka.stream.ActorMaterializer
+import akka.stream._
 import akka.stream.scaladsl.{ Source, Flow }
 import com.typesafe.config.{ ConfigFactory, Config }
 import HttpMethods._
@@ -23,10 +23,17 @@ object TestServer extends App {
     akka.loglevel = INFO
     akka.log-dead-letters = off
     akka.stream.materializer.debug.fuzzing-mode = off
+    akka.actor.serialize-creators = off
+    akka.actor.serialize-messages = off
+    akka.actor.default-dispatcher.throughput = 1000
     """)
   implicit val system = ActorSystem("ServerTest", testConf)
-  implicit val fm = ActorMaterializer()
 
+  val settings = ActorMaterializerSettings(system)
+    .withFuzzing(false)
+    //    .withSyncProcessingLimit(Int.MaxValue)
+    .withInputBuffer(128, 128)
+  implicit val fm = ActorMaterializer(settings)
   try {
     val binding = Http().bindAndHandleSync({
       case req @ HttpRequest(GET, Uri.Path("/"), _, _, _) if req.header[UpgradeToWebSocket].isDefined ⇒

From 8fbb3e37c0f4149db470362f6498492647914893 Mon Sep 17 00:00:00 2001
From: Alexei 
Date: Fri, 22 Jul 2016 15:09:16 +0300
Subject: [PATCH 042/155] =act Identify does not reset idle state of actors
 under ReceiveTimeout. fixes #20998 (#20999)

* Identify does not reset idle state of actors under ReceiveTimeout #20998

* unit test for Identify not to have impact on ReceiveTimeout #20998
---
 .../src/test/scala/akka/actor/ReceiveTimeoutSpec.scala     | 7 ++++++-
 akka-actor/src/main/scala/akka/actor/Actor.scala           | 2 +-
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala
index 445d52bf67..2ada2abda8 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala
@@ -102,7 +102,12 @@ class ReceiveTimeoutSpec extends AkkaSpec {
         }
       }))
 
-      val ticks = system.scheduler.schedule(100.millis, 100.millis, timeoutActor, TransperentTick)(system.dispatcher)
+      val ticks = system.scheduler.schedule(100.millis, 100.millis, new Runnable {
+        override def run() = {
+          timeoutActor ! TransperentTick
+          timeoutActor ! Identify(None)
+        }
+      })(system.dispatcher)
 
       Await.ready(timeoutLatch, TestLatch.DefaultTimeout)
       ticks.cancel()
diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala
index 7be9612952..f0e766e410 100644
--- a/akka-actor/src/main/scala/akka/actor/Actor.scala
+++ b/akka-actor/src/main/scala/akka/actor/Actor.scala
@@ -61,7 +61,7 @@ case object Kill extends Kill {
  * is returned in the `ActorIdentity` message as `correlationId`.
  */
 @SerialVersionUID(1L)
-final case class Identify(messageId: Any) extends AutoReceivedMessage
+final case class Identify(messageId: Any) extends AutoReceivedMessage with NotInfluenceReceiveTimeout
 
 /**
  * Reply to [[akka.actor.Identify]]. Contains

From 6efec50c940a16de10ec64cc2f311dc74ab91851 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Johan=20Andr=C3=A9n?= 
Date: Fri, 22 Jul 2016 16:02:44 +0200
Subject: [PATCH 043/155] Limitable in HTTP Core rewritten as GraphStage #21019

---
 .../akka/http/scaladsl/model/HttpEntity.scala | 65 +++++++++++--------
 1 file changed, 38 insertions(+), 27 deletions(-)

diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpEntity.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpEntity.scala
index 4be0451de0..6945c35dd4 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpEntity.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpEntity.scala
@@ -549,14 +549,14 @@ object HttpEntity {
    * to entity constructors.
    */
   def limitableByteSource[Mat](source: Source[ByteString, Mat]): Source[ByteString, Mat] =
-    limitable(source, sizeOfByteString)
+    source.via(new Limitable(sizeOfByteString))
 
   /**
    * Turns the given source into one that respects the `withSizeLimit` calls when used as a parameter
    * to entity constructors.
    */
   def limitableChunkSource[Mat](source: Source[ChunkStreamPart, Mat]): Source[ChunkStreamPart, Mat] =
-    limitable(source, sizeOfChunkStreamPart)
+    source.via(new Limitable(sizeOfChunkStreamPart))
 
   /**
    * INTERNAL API
@@ -564,35 +564,46 @@ object HttpEntity {
   private val sizeOfByteString: ByteString ⇒ Int = _.size
   private val sizeOfChunkStreamPart: ChunkStreamPart ⇒ Int = _.data.size
 
-  /**
-   * INTERNAL API
-   */
-  private def limitable[Out, Mat](source: Source[Out, Mat], sizeOf: Out ⇒ Int): Source[Out, Mat] =
-    source.via(Flow[Out].transform { () ⇒
-      new PushStage[Out, Out] {
-        var maxBytes = -1L
-        var bytesLeft = Long.MaxValue
+  private val limitableDefaults = Attributes.name("limitable")
 
-        override def preStart(ctx: LifecycleContext) =
-          ctx.attributes.getFirst[SizeLimit] match {
-            case Some(limit: SizeLimit) if limit.isDisabled ⇒
-            // "no limit"
-            case Some(SizeLimit(bytes, cl @ Some(contentLength))) ⇒
-              if (contentLength > bytes) throw EntityStreamSizeException(bytes, cl)
-            // else we still count but never throw an error
-            case Some(SizeLimit(bytes, None)) ⇒
-              maxBytes = bytes
-              bytesLeft = bytes
-            case None ⇒
-          }
+  private final class Limitable[T](sizeOf: T ⇒ Int) extends GraphStage[FlowShape[T, T]] {
+    val in = Inlet[T]("Limitable.in")
+    val out = Outlet[T]("Limitable.out")
+    override val shape = FlowShape.of(in, out)
+    override protected val initialAttributes: Attributes = limitableDefaults
 
-        def onPush(elem: Out, ctx: stage.Context[Out]): stage.SyncDirective = {
-          bytesLeft -= sizeOf(elem)
-          if (bytesLeft >= 0) ctx.push(elem)
-          else ctx.fail(EntityStreamSizeException(maxBytes))
+    override def createLogic(attributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler {
+      private var maxBytes = -1L
+      private var bytesLeft = Long.MaxValue
+
+      override def preStart(): Unit = {
+        attributes.getFirst[SizeLimit] match {
+          case Some(limit: SizeLimit) if limit.isDisabled ⇒
+          // "no limit"
+          case Some(SizeLimit(bytes, cl @ Some(contentLength))) ⇒
+            if (contentLength > bytes) throw EntityStreamSizeException(bytes, cl)
+          // else we still count but never throw an error
+          case Some(SizeLimit(bytes, None)) ⇒
+            maxBytes = bytes
+            bytesLeft = bytes
+          case None ⇒
         }
       }
-    }.named("limitable"))
+
+      override def onPush(): Unit = {
+        val elem = grab(in)
+        bytesLeft -= sizeOf(elem)
+        if (bytesLeft >= 0) push(out, elem)
+        else failStage(EntityStreamSizeException(maxBytes))
+      }
+
+      override def onPull(): Unit = {
+        pull(in)
+      }
+
+      setHandlers(in, out, this)
+    }
+  }
 
   /**
    * INTERNAL API

From 1d0837856a4c1e47422330050f167f72022f3c2c Mon Sep 17 00:00:00 2001
From: ivan-lorenz 
Date: Fri, 22 Jul 2016 16:04:11 +0200
Subject: [PATCH 044/155] =doc set up mailbox for BalancingPool documented
 #13961

---
 akka-docs/rst/java/routing.rst                     | 14 ++++++++++++++
 .../scala/code/docs/routing/RouterDocSpec.scala    | 12 ++++++++++++
 akka-docs/rst/scala/routing.rst                    | 14 ++++++++++++++
 3 files changed, 40 insertions(+)

diff --git a/akka-docs/rst/java/routing.rst b/akka-docs/rst/java/routing.rst
index 6ecde18c1b..665b94847d 100644
--- a/akka-docs/rst/java/routing.rst
+++ b/akka-docs/rst/java/routing.rst
@@ -311,6 +311,20 @@ with a ``thread-pool-executor`` hinting the number of allocated threads explicit
 
 .. includecode:: ../scala/code/docs/routing/RouterDocSpec.scala#config-balancing-pool3
 
+It is also possible to change the ``mailbox`` used by the balancing dispatcher for
+scenarios where the default unbounded mailbox is not well suited. An example of such
+a scenario could arise whether there exists the need to manage priority for each message.
+You can then implement a priority mailbox and configure your dispatcher:
+
+.. includecode:: ../scala/code/docs/routing/RouterDocSpec.scala#config-balancing-pool4
+
+.. note::
+
+   Bear in mind that ``BalancingDispatcher`` requires a message queue that must be thread-safe for
+   multiple concurrent consumers. So it is mandatory for the message queue backing a custom mailbox
+   for this kind of dispatcher to implement akka.dispatch.MultipleConsumerSemantics. See details
+   on how to implement your custom mailbox in :ref:`mailboxes-java`.
+
 There is no Group variant of the BalancingPool.
 
 SmallestMailboxPool
diff --git a/akka-docs/rst/scala/code/docs/routing/RouterDocSpec.scala b/akka-docs/rst/scala/code/docs/routing/RouterDocSpec.scala
index 29b13a85ba..40de0a8645 100644
--- a/akka-docs/rst/scala/code/docs/routing/RouterDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/routing/RouterDocSpec.scala
@@ -101,6 +101,18 @@ akka.actor.deployment {
   }
 }
 #//#config-balancing-pool3
+
+#//#config-balancing-pool4
+akka.actor.deployment {
+  /parent/router10c {
+    router = balancing-pool
+    nr-of-instances = 5
+    pool-dispatcher {
+      mailbox = myapp.myprioritymailbox
+    }
+  }
+}
+#//#config-balancing-pool4
     
 #//#config-smallest-mailbox-pool
 akka.actor.deployment {
diff --git a/akka-docs/rst/scala/routing.rst b/akka-docs/rst/scala/routing.rst
index 6456209388..5e8b5bb023 100644
--- a/akka-docs/rst/scala/routing.rst
+++ b/akka-docs/rst/scala/routing.rst
@@ -312,6 +312,20 @@ with a ``thread-pool-executor`` hinting the number of allocated threads explicit
 
 .. includecode:: code/docs/routing/RouterDocSpec.scala#config-balancing-pool3
 
+It is also possible to change the ``mailbox`` used by the balancing dispatcher for
+scenarios where the default unbounded mailbox is not well suited. An example of such
+a scenario could arise whether there exists the need to manage priority for each message.
+You can then implement a priority mailbox and configure your dispatcher:
+
+.. includecode:: code/docs/routing/RouterDocSpec.scala#config-balancing-pool4
+
+.. note::
+
+   Bear in mind that ``BalancingDispatcher`` requires a message queue that must be thread-safe for
+   multiple concurrent consumers. So it is mandatory for the message queue backing a custom mailbox
+   for this kind of dispatcher to implement akka.dispatch.MultipleConsumerSemantics. See details
+   on how to implement your custom mailbox in :ref:`mailboxes-scala`.
+
 There is no Group variant of the BalancingPool.
 
 SmallestMailboxPool

From cf46ab887ff17d16c6da559d4dacfe0ca99dd79c Mon Sep 17 00:00:00 2001
From: Hawstein 
Date: Sun, 24 Jul 2016 23:56:39 +0800
Subject: [PATCH 045/155] +htc #20771 provide different options to deal with
 the illegal response header value (#20976)

---
 .../src/main/resources/reference.conf         |  9 +++
 .../client/OutgoingConnectionBlueprint.scala  |  2 +-
 .../impl/engine/parsing/BodyPartParser.scala  |  2 +-
 .../engine/parsing/HttpHeaderParser.scala     | 60 +++++++++++-----
 .../engine/server/HttpServerBluePrint.scala   |  2 +-
 .../engine/ws/WebSocketClientBlueprint.scala  |  2 +-
 .../http/impl/model/parser/HeaderParser.scala | 15 ++--
 .../impl/settings/ParserSettingsImpl.scala    | 40 ++++++-----
 .../javadsl/settings/ParserSettings.scala     |  2 +
 .../scaladsl/settings/ParserSettings.scala    | 23 ++++++-
 .../LowLevelOutgoingConnectionSpec.scala      | 69 ++++++++++++++++++-
 .../engine/parsing/HttpHeaderParserSpec.scala |  1 +
 .../parsing/HttpHeaderParserTestBed.scala     |  2 +-
 .../engine/parsing/RequestParserSpec.scala    |  4 +-
 .../engine/parsing/ResponseParserSpec.scala   |  2 +-
 project/MiMa.scala                            |  6 +-
 16 files changed, 185 insertions(+), 56 deletions(-)

diff --git a/akka-http-core/src/main/resources/reference.conf b/akka-http-core/src/main/resources/reference.conf
index 28012b6ba1..bdc03585ac 100644
--- a/akka-http-core/src/main/resources/reference.conf
+++ b/akka-http-core/src/main/resources/reference.conf
@@ -352,6 +352,15 @@ akka.http {
     # `full`  : the full error details (potentially spanning several lines) are logged
     error-logging-verbosity = full
 
+    # Configures the processing mode when encountering illegal characters in
+    # header value of response.
+    #
+    # Supported mode:
+    # `error`  : default mode, throw an ParsingException and terminate the processing
+    # `warn`   : ignore the illegal characters in response header value and log a warning message
+    # `ignore` : just ignore the illegal characters in response header value
+    illegal-response-header-value-processing-mode = error
+
     # limits for the number of different values per header type that the
     # header cache will hold
     header-cache {
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/client/OutgoingConnectionBlueprint.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/client/OutgoingConnectionBlueprint.scala
index 3b61836874..a49a6e191b 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/client/OutgoingConnectionBlueprint.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/client/OutgoingConnectionBlueprint.scala
@@ -83,7 +83,7 @@ private[http] object OutgoingConnectionBlueprint {
       val responseParsingMerge = b.add {
         // the initial header parser we initially use for every connection,
         // will not be mutated, all "shared copy" parsers copy on first-write into the header cache
-        val rootParser = new HttpResponseParser(parserSettings, HttpHeaderParser(parserSettings) { info ⇒
+        val rootParser = new HttpResponseParser(parserSettings, HttpHeaderParser(parserSettings, log) { info ⇒
           if (parserSettings.illegalHeaderWarnings)
             logParsingError(info withSummaryPrepended "Illegal response header", log, parserSettings.errorLoggingVerbosity)
         })
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/BodyPartParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/BodyPartParser.scala
index 19ac33da07..56eeb9ea98 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/BodyPartParser.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/BodyPartParser.scala
@@ -58,7 +58,7 @@ private[http] final class BodyPartParser(
   private[this] val boyerMoore = new BoyerMoore(needle)
 
   // TODO: prevent re-priming header parser from scratch
-  private[this] val headerParser = HttpHeaderParser(settings) { errorInfo ⇒
+  private[this] val headerParser = HttpHeaderParser(settings, log) { errorInfo ⇒
     if (illegalHeaderWarnings) log.warning(errorInfo.withSummaryPrepended("Illegal multipart header").formatPretty)
   }
 
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpHeaderParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpHeaderParser.scala
index 63588995e8..867e39680b 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpHeaderParser.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpHeaderParser.scala
@@ -7,6 +7,10 @@ package akka.http.impl.engine.parsing
 import java.nio.{ CharBuffer, ByteBuffer }
 import java.util.Arrays.copyOf
 import java.lang.{ StringBuilder ⇒ JStringBuilder }
+import akka.event.LoggingAdapter
+import akka.http.scaladsl.settings.ParserSettings.IllegalResponseHeaderValueProcessingMode
+import akka.http.scaladsl.settings.ParserSettings
+
 import scala.annotation.tailrec
 import akka.parboiled2.CharUtils
 import akka.util.ByteString
@@ -60,6 +64,7 @@ import akka.http.impl.model.parser.CharacterClasses._
  */
 private[engine] final class HttpHeaderParser private (
   val settings:                      HttpHeaderParser.Settings,
+  val log:                           LoggingAdapter,
   onIllegalHeader:                   ErrorInfo ⇒ Unit,
   private[this] var nodes:           Array[Char]               = new Array(512), // initial size, can grow as needed
   private[this] var nodeCount:       Int                       = 0,
@@ -85,7 +90,7 @@ private[engine] final class HttpHeaderParser private (
    * Returns a copy of this parser that shares the trie data with this instance.
    */
   def createShallowCopy(): HttpHeaderParser =
-    new HttpHeaderParser(settings, onIllegalHeader, nodes, nodeCount, branchData, branchDataCount, values, valueCount)
+    new HttpHeaderParser(settings, log, onIllegalHeader, nodes, nodeCount, branchData, branchDataCount, values, valueCount)
 
   /**
    * Parses a header line and returns the line start index of the subsequent line.
@@ -145,12 +150,14 @@ private[engine] final class HttpHeaderParser private (
     val colonIx = scanHeaderNameAndReturnIndexOfColon(input, lineStart, lineStart + 1 + maxHeaderNameLength)(cursor)
     val headerName = asciiString(input, lineStart, colonIx)
     try {
-      val valueParser = new RawHeaderValueParser(headerName, maxHeaderValueLength, headerValueCacheLimit(headerName))
+      val valueParser = new RawHeaderValueParser(headerName, maxHeaderValueLength,
+        headerValueCacheLimit(headerName), log, illegalResponseHeaderValueProcessingMode)
       insert(input, valueParser)(cursor, colonIx + 1, nodeIx, colonIx)
       parseHeaderLine(input, lineStart)(cursor, nodeIx)
     } catch {
       case OutOfTrieSpaceException ⇒ // if we cannot insert we drop back to simply creating new header instances
-        val (headerValue, endIx) = scanHeaderValue(this, input, colonIx + 1, colonIx + maxHeaderValueLength + 3)()
+        val (headerValue, endIx) = scanHeaderValue(this, input, colonIx + 1, colonIx + maxHeaderValueLength + 3,
+          log, settings.illegalResponseHeaderValueProcessingMode)()
         resultHeader = RawHeader(headerName, headerValue.trim)
         endIx
     }
@@ -413,6 +420,7 @@ private[http] object HttpHeaderParser {
     def maxHeaderValueLength: Int
     def headerValueCacheLimit(headerName: String): Int
     def customMediaTypes: MediaTypes.FindCustom
+    def illegalResponseHeaderValueProcessingMode: IllegalResponseHeaderValueProcessingMode
   }
 
   private def predefinedHeaders = Seq(
@@ -426,16 +434,16 @@ private[http] object HttpHeaderParser {
     "Cache-Control: no-cache",
     "Expect: 100-continue")
 
-  def apply(settings: HttpHeaderParser.Settings)(onIllegalHeader: ErrorInfo ⇒ Unit = info ⇒ throw IllegalHeaderException(info)) =
-    prime(unprimed(settings, onIllegalHeader))
+  def apply(settings: HttpHeaderParser.Settings, log: LoggingAdapter)(onIllegalHeader: ErrorInfo ⇒ Unit = info ⇒ throw IllegalHeaderException(info)) =
+    prime(unprimed(settings, log, onIllegalHeader))
 
-  def unprimed(settings: HttpHeaderParser.Settings, warnOnIllegalHeader: ErrorInfo ⇒ Unit) =
-    new HttpHeaderParser(settings, warnOnIllegalHeader)
+  def unprimed(settings: HttpHeaderParser.Settings, log: LoggingAdapter, warnOnIllegalHeader: ErrorInfo ⇒ Unit) =
+    new HttpHeaderParser(settings, log, warnOnIllegalHeader)
 
   def prime(parser: HttpHeaderParser): HttpHeaderParser = {
     val valueParsers: Seq[HeaderValueParser] =
       HeaderParser.ruleNames.map { name ⇒
-        new ModeledHeaderValueParser(name, parser.settings.maxHeaderValueLength, parser.settings.headerValueCacheLimit(name), parser.settings)
+        new ModeledHeaderValueParser(name, parser.settings.maxHeaderValueLength, parser.settings.headerValueCacheLimit(name), parser.log, parser.settings)
       }(collection.breakOut)
     def insertInGoodOrder(items: Seq[Any])(startIx: Int = 0, endIx: Int = items.size): Unit =
       if (endIx - startIx > 0) {
@@ -470,11 +478,11 @@ private[http] object HttpHeaderParser {
     def cachingEnabled = maxValueCount > 0
   }
 
-  private[parsing] class ModeledHeaderValueParser(headerName: String, maxHeaderValueLength: Int, maxValueCount: Int, settings: HeaderParser.Settings)
+  private[parsing] class ModeledHeaderValueParser(headerName: String, maxHeaderValueLength: Int, maxValueCount: Int, log: LoggingAdapter, settings: HeaderParser.Settings)
     extends HeaderValueParser(headerName, maxValueCount) {
     def apply(hhp: HttpHeaderParser, input: ByteString, valueStart: Int, onIllegalHeader: ErrorInfo ⇒ Unit): (HttpHeader, Int) = {
       // TODO: optimize by running the header value parser directly on the input ByteString (rather than an extracted String); seems done?
-      val (headerValue, endIx) = scanHeaderValue(hhp, input, valueStart, valueStart + maxHeaderValueLength + 2)()
+      val (headerValue, endIx) = scanHeaderValue(hhp, input, valueStart, valueStart + maxHeaderValueLength + 2, log, settings.illegalResponseHeaderValueProcessingMode)()
       val trimmedHeaderValue = headerValue.trim
       val header = HeaderParser.parseFull(headerName, trimmedHeaderValue, settings) match {
         case Right(h) ⇒ h
@@ -486,10 +494,10 @@ private[http] object HttpHeaderParser {
     }
   }
 
-  private[parsing] class RawHeaderValueParser(headerName: String, maxHeaderValueLength: Int, maxValueCount: Int)
-    extends HeaderValueParser(headerName, maxValueCount) {
+  private[parsing] class RawHeaderValueParser(headerName: String, maxHeaderValueLength: Int, maxValueCount: Int,
+                                              log: LoggingAdapter, mode: IllegalResponseHeaderValueProcessingMode) extends HeaderValueParser(headerName, maxValueCount) {
     def apply(hhp: HttpHeaderParser, input: ByteString, valueStart: Int, onIllegalHeader: ErrorInfo ⇒ Unit): (HttpHeader, Int) = {
-      val (headerValue, endIx) = scanHeaderValue(hhp, input, valueStart, valueStart + maxHeaderValueLength + 2)()
+      val (headerValue, endIx) = scanHeaderValue(hhp, input, valueStart, valueStart + maxHeaderValueLength + 2, log, mode)()
       RawHeader(headerName, headerValue.trim) → endIx
     }
   }
@@ -503,15 +511,16 @@ private[http] object HttpHeaderParser {
       }
     else fail(s"HTTP header name exceeds the configured limit of ${limit - start - 1} characters")
 
-  @tailrec private def scanHeaderValue(hhp: HttpHeaderParser, input: ByteString, start: Int,
-                                       limit: Int)(sb: JStringBuilder = null, ix: Int = start): (String, Int) = {
+  @tailrec private def scanHeaderValue(hhp: HttpHeaderParser, input: ByteString, start: Int, limit: Int, log: LoggingAdapter,
+                                       mode: IllegalResponseHeaderValueProcessingMode)(sb: JStringBuilder = null, ix: Int = start): (String, Int) = {
+
     def appended(c: Char) = (if (sb != null) sb else new JStringBuilder(asciiString(input, start, ix))).append(c)
     def appended2(c: Int) = if ((c >> 16) != 0) appended(c.toChar).append((c >> 16).toChar) else appended(c.toChar)
     if (ix < limit)
       byteChar(input, ix) match {
-        case '\t' ⇒ scanHeaderValue(hhp, input, start, limit)(appended(' '), ix + 1)
+        case '\t' ⇒ scanHeaderValue(hhp, input, start, limit, log, mode)(appended(' '), ix + 1)
         case '\r' if byteChar(input, ix + 1) == '\n' ⇒
-          if (WSP(byteChar(input, ix + 2))) scanHeaderValue(hhp, input, start, limit)(appended(' '), ix + 3)
+          if (WSP(byteChar(input, ix + 2))) scanHeaderValue(hhp, input, start, limit, log, mode)(appended(' '), ix + 3)
           else (if (sb != null) sb.toString else asciiString(input, start, ix), ix + 2)
         case c ⇒
           var nix = ix + 1
@@ -544,8 +553,21 @@ private[http] object HttpHeaderParser {
                 case -1 ⇒ if (sb != null) sb.append(c).append(byteChar(input, ix + 1)).append(byteChar(input, ix + 2)).append(byteChar(input, ix + 3)) else null
                 case cc ⇒ appended2(cc)
               }
-            } else fail(s"Illegal character '${escape(c)}' in header value")
-          scanHeaderValue(hhp, input, start, limit)(nsb, nix)
+            } else {
+              mode match {
+                case ParserSettings.IllegalResponseHeaderValueProcessingMode.Error ⇒
+                  fail(s"Illegal character '${escape(c)}' in header value")
+                case ParserSettings.IllegalResponseHeaderValueProcessingMode.Warn ⇒
+                  // ignore the illegal character and log a warning message
+                  log.warning(s"Illegal character '${escape(c)}' in header value")
+                  sb
+                case ParserSettings.IllegalResponseHeaderValueProcessingMode.Ignore ⇒
+                  // just ignore the illegal character
+                  sb
+              }
+
+            }
+          scanHeaderValue(hhp, input, start, limit, log, mode)(nsb, nix)
       }
     else fail(s"HTTP header value exceeds the configured limit of ${limit - start - 2} characters")
   }
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/server/HttpServerBluePrint.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/server/HttpServerBluePrint.scala
index bcc28bc828..301d542369 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/server/HttpServerBluePrint.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/server/HttpServerBluePrint.scala
@@ -211,7 +211,7 @@ private[http] object HttpServerBluePrint {
     // the initial header parser we initially use for every connection,
     // will not be mutated, all "shared copy" parsers copy on first-write into the header cache
     val rootParser = new HttpRequestParser(parserSettings, rawRequestUriHeader,
-      HttpHeaderParser(parserSettings) { info ⇒
+      HttpHeaderParser(parserSettings, log) { info ⇒
         if (parserSettings.illegalHeaderWarnings)
           logParsingError(info withSummaryPrepended "Illegal request header", log, parserSettings.errorLoggingVerbosity)
       })
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocketClientBlueprint.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocketClientBlueprint.scala
index e193877496..c95ef042eb 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocketClientBlueprint.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocketClientBlueprint.scala
@@ -62,7 +62,7 @@ object WebSocketClientBlueprint {
         new GraphStageLogic(shape) with InHandler with OutHandler {
           // a special version of the parser which only parses one message and then reports the remaining data
           // if some is available
-          val parser = new HttpResponseParser(settings.parserSettings, HttpHeaderParser(settings.parserSettings)()) {
+          val parser = new HttpResponseParser(settings.parserSettings, HttpHeaderParser(settings.parserSettings, log)()) {
             var first = true
             override def handleInformationalResponses = false
             override protected def parseMessage(input: ByteString, offset: Int): StateResult = {
diff --git a/akka-http-core/src/main/scala/akka/http/impl/model/parser/HeaderParser.scala b/akka-http-core/src/main/scala/akka/http/impl/model/parser/HeaderParser.scala
index 1d95e2f0b6..04c6e3a157 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/model/parser/HeaderParser.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/model/parser/HeaderParser.scala
@@ -6,6 +6,7 @@ package akka.http.impl.model.parser
 
 import akka.http.scaladsl.settings.ParserSettings
 import akka.http.scaladsl.settings.ParserSettings.CookieParsingMode
+import akka.http.scaladsl.settings.ParserSettings.IllegalResponseHeaderValueProcessingMode
 import akka.http.scaladsl.model.headers.HttpCookiePair
 import akka.stream.impl.ConstantFun
 import scala.util.control.NonFatal
@@ -169,20 +170,26 @@ private[http] object HeaderParser {
     def uriParsingMode: Uri.ParsingMode
     def cookieParsingMode: ParserSettings.CookieParsingMode
     def customMediaTypes: MediaTypes.FindCustom
+    def illegalResponseHeaderValueProcessingMode: IllegalResponseHeaderValueProcessingMode
   }
   def Settings(
-    uriParsingMode:    Uri.ParsingMode                  = Uri.ParsingMode.Relaxed,
-    cookieParsingMode: ParserSettings.CookieParsingMode = ParserSettings.CookieParsingMode.RFC6265,
-    customMediaTypes:  MediaTypes.FindCustom            = ConstantFun.scalaAnyTwoToNone): Settings = {
+    uriParsingMode:    Uri.ParsingMode                          = Uri.ParsingMode.Relaxed,
+    cookieParsingMode: ParserSettings.CookieParsingMode         = ParserSettings.CookieParsingMode.RFC6265,
+    customMediaTypes:  MediaTypes.FindCustom                    = ConstantFun.scalaAnyTwoToNone,
+    mode:              IllegalResponseHeaderValueProcessingMode = ParserSettings.IllegalResponseHeaderValueProcessingMode.Error): Settings = {
+
     val _uriParsingMode = uriParsingMode
     val _cookieParsingMode = cookieParsingMode
     val _customMediaTypes = customMediaTypes
+    val _illegalResponseHeaderValueProcessingMode = mode
 
     new Settings {
       def uriParsingMode: Uri.ParsingMode = _uriParsingMode
       def cookieParsingMode: CookieParsingMode = _cookieParsingMode
       def customMediaTypes: MediaTypes.FindCustom = _customMediaTypes
+      def illegalResponseHeaderValueProcessingMode: IllegalResponseHeaderValueProcessingMode =
+        _illegalResponseHeaderValueProcessingMode
     }
   }
   val DefaultSettings: Settings = Settings()
-}
\ No newline at end of file
+}
diff --git a/akka-http-core/src/main/scala/akka/http/impl/settings/ParserSettingsImpl.scala b/akka-http-core/src/main/scala/akka/http/impl/settings/ParserSettingsImpl.scala
index 4214065af4..09362ba630 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/settings/ParserSettingsImpl.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/settings/ParserSettingsImpl.scala
@@ -5,7 +5,7 @@
 package akka.http.impl.settings
 
 import akka.http.scaladsl.settings.ParserSettings
-import akka.http.scaladsl.settings.ParserSettings.{ ErrorLoggingVerbosity, CookieParsingMode }
+import akka.http.scaladsl.settings.ParserSettings.{ IllegalResponseHeaderValueProcessingMode, ErrorLoggingVerbosity, CookieParsingMode }
 import akka.stream.impl.ConstantFun
 import com.typesafe.config.Config
 import scala.collection.JavaConverters._
@@ -14,24 +14,25 @@ import akka.http.impl.util._
 
 /** INTERNAL API */
 private[akka] final case class ParserSettingsImpl(
-  maxUriLength:                Int,
-  maxMethodLength:             Int,
-  maxResponseReasonLength:     Int,
-  maxHeaderNameLength:         Int,
-  maxHeaderValueLength:        Int,
-  maxHeaderCount:              Int,
-  maxContentLength:            Long,
-  maxChunkExtLength:           Int,
-  maxChunkSize:                Int,
-  uriParsingMode:              Uri.ParsingMode,
-  cookieParsingMode:           CookieParsingMode,
-  illegalHeaderWarnings:       Boolean,
-  errorLoggingVerbosity:       ParserSettings.ErrorLoggingVerbosity,
-  headerValueCacheLimits:      Map[String, Int],
-  includeTlsSessionInfoHeader: Boolean,
-  customMethods:               String ⇒ Option[HttpMethod],
-  customStatusCodes:           Int ⇒ Option[StatusCode],
-  customMediaTypes:            MediaTypes.FindCustom)
+  maxUriLength:                             Int,
+  maxMethodLength:                          Int,
+  maxResponseReasonLength:                  Int,
+  maxHeaderNameLength:                      Int,
+  maxHeaderValueLength:                     Int,
+  maxHeaderCount:                           Int,
+  maxContentLength:                         Long,
+  maxChunkExtLength:                        Int,
+  maxChunkSize:                             Int,
+  uriParsingMode:                           Uri.ParsingMode,
+  cookieParsingMode:                        CookieParsingMode,
+  illegalHeaderWarnings:                    Boolean,
+  errorLoggingVerbosity:                    ErrorLoggingVerbosity,
+  illegalResponseHeaderValueProcessingMode: IllegalResponseHeaderValueProcessingMode,
+  headerValueCacheLimits:                   Map[String, Int],
+  includeTlsSessionInfoHeader:              Boolean,
+  customMethods:                            String ⇒ Option[HttpMethod],
+  customStatusCodes:                        Int ⇒ Option[StatusCode],
+  customMediaTypes:                         MediaTypes.FindCustom)
   extends akka.http.scaladsl.settings.ParserSettings {
 
   require(maxUriLength > 0, "max-uri-length must be > 0")
@@ -76,6 +77,7 @@ object ParserSettingsImpl extends SettingsCompanion[ParserSettingsImpl]("akka.ht
       CookieParsingMode(c getString "cookie-parsing-mode"),
       c getBoolean "illegal-header-warnings",
       ErrorLoggingVerbosity(c getString "error-logging-verbosity"),
+      IllegalResponseHeaderValueProcessingMode(c getString "illegal-response-header-value-processing-mode"),
       cacheConfig.entrySet.asScala.map(kvp ⇒ kvp.getKey → cacheConfig.getInt(kvp.getKey))(collection.breakOut),
       c getBoolean "tls-session-info-header",
       noCustomMethods,
diff --git a/akka-http-core/src/main/scala/akka/http/javadsl/settings/ParserSettings.scala b/akka-http-core/src/main/scala/akka/http/javadsl/settings/ParserSettings.scala
index c977b4ead7..21279b3bfb 100644
--- a/akka-http-core/src/main/scala/akka/http/javadsl/settings/ParserSettings.scala
+++ b/akka-http-core/src/main/scala/akka/http/javadsl/settings/ParserSettings.scala
@@ -33,6 +33,7 @@ abstract class ParserSettings private[akka] () extends BodyPartParser.Settings {
   def getCookieParsingMode: ParserSettings.CookieParsingMode
   def getIllegalHeaderWarnings: Boolean
   def getErrorLoggingVerbosity: ParserSettings.ErrorLoggingVerbosity
+  def getIllegalResponseHeaderValueProcessingMode: ParserSettings.IllegalResponseHeaderValueProcessingMode
   def getHeaderValueCacheLimits: ju.Map[String, Int]
   def getIncludeTlsSessionInfoHeader: Boolean
   def headerValueCacheLimits: Map[String, Int]
@@ -81,6 +82,7 @@ abstract class ParserSettings private[akka] () extends BodyPartParser.Settings {
 object ParserSettings extends SettingsCompanion[ParserSettings] {
   trait CookieParsingMode
   trait ErrorLoggingVerbosity
+  trait IllegalResponseHeaderValueProcessingMode
 
   override def create(config: Config): ParserSettings = ParserSettingsImpl(config)
   override def create(configOverrides: String): ParserSettings = ParserSettingsImpl(configOverrides)
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/settings/ParserSettings.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/settings/ParserSettings.scala
index fa4ae90d73..97dd767d49 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/settings/ParserSettings.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/settings/ParserSettings.scala
@@ -34,6 +34,7 @@ abstract class ParserSettings private[akka] () extends akka.http.javadsl.setting
   def cookieParsingMode: ParserSettings.CookieParsingMode
   def illegalHeaderWarnings: Boolean
   def errorLoggingVerbosity: ParserSettings.ErrorLoggingVerbosity
+  def illegalResponseHeaderValueProcessingMode: ParserSettings.IllegalResponseHeaderValueProcessingMode
   def headerValueCacheLimits: Map[String, Int]
   def includeTlsSessionInfoHeader: Boolean
   def customMethods: String ⇒ Option[HttpMethod]
@@ -56,6 +57,7 @@ abstract class ParserSettings private[akka] () extends akka.http.javadsl.setting
   override def getMaxUriLength = maxUriLength
   override def getMaxMethodLength = maxMethodLength
   override def getErrorLoggingVerbosity: js.ParserSettings.ErrorLoggingVerbosity = errorLoggingVerbosity
+  override def getIllegalResponseHeaderValueProcessingMode = illegalResponseHeaderValueProcessingMode
 
   override def getCustomMethods = new Function[String, Optional[akka.http.javadsl.model.HttpMethod]] {
     override def apply(t: String) = OptionConverters.toJava(customMethods(t))
@@ -100,10 +102,12 @@ abstract class ParserSettings private[akka] () extends akka.http.javadsl.setting
     val map = types.map(c ⇒ (c.mainType, c.subType) → c).toMap
     self.copy(customMediaTypes = (main, sub) ⇒ map.get((main, sub)))
   }
+  def withIllegalResponseHeaderValueProcessingMode(newValue: ParserSettings.IllegalResponseHeaderValueProcessingMode): ParserSettings =
+    self.copy(illegalResponseHeaderValueProcessingMode = newValue)
 }
 
 object ParserSettings extends SettingsCompanion[ParserSettings] {
-  trait CookieParsingMode extends akka.http.javadsl.settings.ParserSettings.CookieParsingMode
+  sealed trait CookieParsingMode extends akka.http.javadsl.settings.ParserSettings.CookieParsingMode
   object CookieParsingMode {
     case object RFC6265 extends CookieParsingMode
     case object Raw extends CookieParsingMode
@@ -114,7 +118,7 @@ object ParserSettings extends SettingsCompanion[ParserSettings] {
     }
   }
 
-  trait ErrorLoggingVerbosity extends akka.http.javadsl.settings.ParserSettings.ErrorLoggingVerbosity
+  sealed trait ErrorLoggingVerbosity extends akka.http.javadsl.settings.ParserSettings.ErrorLoggingVerbosity
   object ErrorLoggingVerbosity {
     case object Off extends ErrorLoggingVerbosity
     case object Simple extends ErrorLoggingVerbosity
@@ -129,6 +133,21 @@ object ParserSettings extends SettingsCompanion[ParserSettings] {
       }
   }
 
+  sealed trait IllegalResponseHeaderValueProcessingMode extends akka.http.javadsl.settings.ParserSettings.IllegalResponseHeaderValueProcessingMode
+  object IllegalResponseHeaderValueProcessingMode {
+    case object Error extends IllegalResponseHeaderValueProcessingMode
+    case object Warn extends IllegalResponseHeaderValueProcessingMode
+    case object Ignore extends IllegalResponseHeaderValueProcessingMode
+
+    def apply(string: String): IllegalResponseHeaderValueProcessingMode =
+      string.toRootLowerCase match {
+        case "error"  ⇒ Error
+        case "warn"   ⇒ Warn
+        case "ignore" ⇒ Ignore
+        case x        ⇒ throw new IllegalArgumentException(s"[$x] is not a legal `illegal-response-header-value-processing-mode` setting")
+      }
+  }
+
   override def apply(config: Config): ParserSettings = ParserSettingsImpl(config)
   override def apply(configOverrides: String): ParserSettings = ParserSettingsImpl(configOverrides)
 }
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/client/LowLevelOutgoingConnectionSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/client/LowLevelOutgoingConnectionSpec.scala
index e2cea50f60..14a359351d 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/client/LowLevelOutgoingConnectionSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/client/LowLevelOutgoingConnectionSpec.scala
@@ -4,6 +4,8 @@
 
 package akka.http.impl.engine.client
 
+import com.typesafe.config.ConfigFactory
+
 import scala.concurrent.duration._
 import scala.reflect.ClassTag
 import org.scalatest.Inside
@@ -326,6 +328,65 @@ class LowLevelOutgoingConnectionSpec extends AkkaSpec("akka.loggers = []\n akka.
       }
     }
 
+    "process the illegal response header value properly" which {
+
+      val illegalChar = '\u0001'
+      val escapeChar = "\\u%04x" format illegalChar.toInt
+
+      "catch illegal response header value by default" in new TestSetup {
+        sendStandardRequest()
+        sendWireData(
+          s"""HTTP/1.1 200 OK
+              |Some-Header: value1$illegalChar
+              |Other-Header: value2
+              |
+              |""")
+
+        responsesSub.request(1)
+        val error @ IllegalResponseException(info) = responses.expectError()
+        info.summary shouldEqual s"""Illegal character '$escapeChar' in header value"""
+        netOut.expectError(error)
+        requestsSub.expectCancellation()
+        netInSub.expectCancellation()
+      }
+
+      val ignoreConfig =
+        """
+          akka.http.parsing.illegal-response-header-value-processing-mode = ignore
+        """
+      "ignore illegal response header value if setting the config to ignore" in new TestSetup(config = ignoreConfig) {
+        sendStandardRequest()
+        sendWireData(
+          s"""HTTP/1.1 200 OK
+              |Some-Header: value1$illegalChar
+              |Other-Header: value2
+              |
+              |""")
+
+        val HttpResponse(_, headers, _, _) = expectResponse()
+        val headerStr = headers.map(h ⇒ s"${h.name}: ${h.value}").mkString(",")
+        headerStr shouldEqual "Some-Header: value1,Other-Header: value2"
+      }
+
+      val warnConfig =
+        """
+          akka.http.parsing.illegal-response-header-value-processing-mode = warn
+        """
+      "ignore illegal response header value and log a warning message if setting the config to warn" in new TestSetup(config = warnConfig) {
+        sendStandardRequest()
+        sendWireData(
+          s"""HTTP/1.1 200 OK
+              |Some-Header: value1$illegalChar
+              |Other-Header: value2
+              |
+              |""")
+
+        val HttpResponse(_, headers, _, _) = expectResponse()
+        val headerStr = headers.map(h ⇒ s"${h.name}: ${h.value}").mkString(",")
+        headerStr shouldEqual "Some-Header: value1,Other-Header: value2"
+      }
+    }
+
     "produce proper errors" which {
 
       "catch the request entity stream being shorter than the Content-Length" in new TestSetup {
@@ -808,13 +869,14 @@ class LowLevelOutgoingConnectionSpec extends AkkaSpec("akka.loggers = []\n akka.
     }
   }
 
-  class TestSetup(maxResponseContentLength: Int = -1) {
+  class TestSetup(maxResponseContentLength: Int = -1, config: String = "") {
     val requests = TestPublisher.manualProbe[HttpRequest]()
     val responses = TestSubscriber.manualProbe[HttpResponse]()
 
     def settings = {
-      val s = ClientConnectionSettings(system)
-        .withUserAgentHeader(Some(`User-Agent`(List(ProductVersion("akka-http", "test")))))
+      val s = ClientConnectionSettings(
+        ConfigFactory.parseString(config).withFallback(system.settings.config)
+      ).withUserAgentHeader(Some(`User-Agent`(List(ProductVersion("akka-http", "test")))))
       if (maxResponseContentLength < 0) s
       else s.withParserSettings(s.parserSettings.withMaxContentLength(maxResponseContentLength))
     }
@@ -873,5 +935,6 @@ class LowLevelOutgoingConnectionSpec extends AkkaSpec("akka.loggers = []\n akka.
       responsesSub.request(1)
       responses.expectNext()
     }
+
   }
 }
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/HttpHeaderParserSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/HttpHeaderParserSpec.scala
index 4282538f0b..c632c74f3c 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/HttpHeaderParserSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/HttpHeaderParserSpec.scala
@@ -246,6 +246,7 @@ class HttpHeaderParserSpec extends WordSpec with Matchers with BeforeAndAfterAll
     val parser = {
       val p = HttpHeaderParser.unprimed(
         settings = ParserSettings(system),
+        system.log,
         warnOnIllegalHeader = info ⇒ system.log.warning(info.formatPretty))
       if (primed) HttpHeaderParser.prime(p) else p
     }
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/HttpHeaderParserTestBed.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/HttpHeaderParserTestBed.scala
index b8dcb95b9a..89c554d2fb 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/HttpHeaderParserTestBed.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/HttpHeaderParserTestBed.scala
@@ -15,7 +15,7 @@ object HttpHeaderParserTestBed extends App {
   val system = ActorSystem("HttpHeaderParserTestBed", testConf)
 
   val parser = HttpHeaderParser.prime {
-    HttpHeaderParser.unprimed(ParserSettings(system), warnOnIllegalHeader = info ⇒ system.log.warning(info.formatPretty))
+    HttpHeaderParser.unprimed(ParserSettings(system), system.log, warnOnIllegalHeader = info ⇒ system.log.warning(info.formatPretty))
   }
 
   println {
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/RequestParserSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/RequestParserSpec.scala
index c53f6cbdbf..ccf1b3cdc3 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/RequestParserSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/RequestParserSpec.scala
@@ -300,7 +300,7 @@ class RequestParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
 
     "support `rawRequestUriHeader` setting" in new Test {
       override protected def newParser: HttpRequestParser =
-        new HttpRequestParser(parserSettings, rawRequestUriHeader = true, headerParser = HttpHeaderParser(parserSettings)())
+        new HttpRequestParser(parserSettings, rawRequestUriHeader = true, headerParser = HttpHeaderParser(parserSettings, system.log)())
 
       """GET /f%6f%6fbar?q=b%61z HTTP/1.1
         |Host: ping
@@ -582,7 +582,7 @@ class RequestParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
         .awaitResult(awaitAtMost)
 
     protected def parserSettings: ParserSettings = ParserSettings(system)
-    protected def newParser = new HttpRequestParser(parserSettings, false, HttpHeaderParser(parserSettings)())
+    protected def newParser = new HttpRequestParser(parserSettings, false, HttpHeaderParser(parserSettings, system.log)())
 
     private def compactEntity(entity: RequestEntity): Future[RequestEntity] =
       entity match {
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/ResponseParserSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/ResponseParserSpec.scala
index 5ad34b5d38..470d03c3e4 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/ResponseParserSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/ResponseParserSpec.scala
@@ -320,7 +320,7 @@ class ResponseParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
     protected def parserSettings: ParserSettings = ParserSettings(system)
 
     def newParserStage(requestMethod: HttpMethod = GET) = {
-      val parser = new HttpResponseParser(parserSettings, HttpHeaderParser(parserSettings)())
+      val parser = new HttpResponseParser(parserSettings, HttpHeaderParser(parserSettings, system.log)())
       parser.setContextForNextResponse(HttpResponseParser.ResponseContext(requestMethod, None))
       parser.stage
     }
diff --git a/project/MiMa.scala b/project/MiMa.scala
index 1081eed72a..106a7c14bc 100644
--- a/project/MiMa.scala
+++ b/project/MiMa.scala
@@ -907,7 +907,11 @@ object MiMa extends AutoPlugin {
       ),
       "2.4.9" -> Seq(
         // #20994 adding new decode method, since we're on JDK7+ now
-        ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.util.ByteString.decodeString")
+        ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.util.ByteString.decodeString"),
+
+        // #20976 provide different options to deal with the illegal response header value
+        ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.settings.ParserSettings.getIllegalResponseHeaderValueProcessingMode"),
+        ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.settings.ParserSettings.illegalResponseHeaderValueProcessingMode")
       )
     )
   }

From 3a2e918ccfc72b06564dbf41aecff25eb9725da0 Mon Sep 17 00:00:00 2001
From: kenji yoshida <6b656e6a69@gmail.com>
Date: Mon, 25 Jul 2016 16:26:38 +0900
Subject: [PATCH 046/155] =doc fix .rst syntax (#21032)

`[text](http://example.com)` is markdown syntax, not rst
---
 akka-docs/rst/project/issue-tracking.rst | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/akka-docs/rst/project/issue-tracking.rst b/akka-docs/rst/project/issue-tracking.rst
index 7a68465e63..98406eaa68 100644
--- a/akka-docs/rst/project/issue-tracking.rst
+++ b/akka-docs/rst/project/issue-tracking.rst
@@ -19,7 +19,7 @@ have reproducible test cases that you can share.
 Roadmaps
 ^^^^^^^^
 
-Short and long-term plans are published in the [akka/akka-meta](https://github.com/akka/akka-meta/issues) repository.
+Short and long-term plans are published in the `akka/akka-meta `_ repository.
 
 Creating tickets
 ----------------
@@ -37,7 +37,7 @@ Submitting Pull Requests
 .. note:: *A pull request is worth a thousand +1's.* -- Old Klangian Proverb
 
 Pull Requests fixing issues or adding functionality are very welcome.
-Please read [CONTRIBUTING.md](https://github.com/akka/akka/blob/master/CONTRIBUTING.md) for
+Please read `CONTRIBUTING.md `_ for
 more information about contributing to Akka.
 
 

From 584f383a6d584e33cde1a364822097f2e32d388f Mon Sep 17 00:00:00 2001
From: kenji yoshida <6b656e6a69@gmail.com>
Date: Mon, 25 Jul 2016 17:41:51 +0900
Subject: [PATCH 047/155] =doc fix FAQ url (#21031)

---
 akka-docs/_sphinx/themes/akka/layout.html | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/akka-docs/_sphinx/themes/akka/layout.html b/akka-docs/_sphinx/themes/akka/layout.html
index 0fdb57b588..22e0dd9c44 100644
--- a/akka-docs/_sphinx/themes/akka/layout.html
+++ b/akka-docs/_sphinx/themes/akka/layout.html
@@ -86,7 +86,7 @@