2014-12-08 17:29:40 +01:00
|
|
|
package docs.stream.cookbook
|
|
|
|
|
|
2016-01-20 10:00:37 +02:00
|
|
|
import akka.NotUsed
|
2016-02-11 16:39:25 +01:00
|
|
|
import akka.stream.{ Attributes, Outlet, Inlet, FlowShape }
|
2014-12-08 17:29:40 +01:00
|
|
|
import akka.stream.scaladsl.{ Flow, Sink, Source }
|
|
|
|
|
import akka.util.ByteString
|
|
|
|
|
|
|
|
|
|
import scala.concurrent.Await
|
|
|
|
|
import scala.concurrent.duration._
|
|
|
|
|
|
|
|
|
|
class RecipeByteStrings extends RecipeSpec {
|
|
|
|
|
|
|
|
|
|
"Recipes for bytestring streams" must {
|
|
|
|
|
|
|
|
|
|
"have a working chunker" in {
|
|
|
|
|
val rawBytes = Source(List(ByteString(1, 2), ByteString(3), ByteString(4, 5, 6), ByteString(7, 8, 9)))
|
|
|
|
|
val ChunkLimit = 2
|
|
|
|
|
|
|
|
|
|
//#bytestring-chunker
|
|
|
|
|
import akka.stream.stage._
|
|
|
|
|
|
2016-02-11 16:39:25 +01:00
|
|
|
class Chunker(val chunkSize: Int) extends GraphStage[FlowShape[ByteString, ByteString]] {
|
|
|
|
|
val in = Inlet[ByteString]("Chunker.in")
|
|
|
|
|
val out = Outlet[ByteString]("Chunker.out")
|
|
|
|
|
override val shape = FlowShape.of(in, out)
|
|
|
|
|
|
|
|
|
|
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
|
|
|
|
|
private var buffer = ByteString.empty
|
|
|
|
|
|
|
|
|
|
setHandler(out, new OutHandler {
|
|
|
|
|
override def onPull(): Unit = {
|
|
|
|
|
if (isClosed(in)) emitChunk()
|
|
|
|
|
else pull(in)
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
setHandler(in, new InHandler {
|
|
|
|
|
override def onPush(): Unit = {
|
|
|
|
|
val elem = grab(in)
|
|
|
|
|
buffer ++= elem
|
|
|
|
|
emitChunk()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
override def onUpstreamFinish(): Unit = {
|
|
|
|
|
if (buffer.isEmpty) completeStage()
|
2016-08-01 12:20:30 +02:00
|
|
|
else {
|
|
|
|
|
// There are elements left in buffer, so
|
|
|
|
|
// we keep accepting downstream pulls and push from buffer until emptied.
|
|
|
|
|
//
|
|
|
|
|
// It might be though, that the upstream finished while it was pulled, in which
|
|
|
|
|
// case we will not get an onPull from the downstream, because we already had one.
|
|
|
|
|
// In that case we need to emit from the buffer.
|
|
|
|
|
if (isAvailable(out)) emitChunk()
|
|
|
|
|
}
|
2016-02-11 16:39:25 +01:00
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
private def emitChunk(): Unit = {
|
|
|
|
|
if (buffer.isEmpty) {
|
|
|
|
|
if (isClosed(in)) completeStage()
|
|
|
|
|
else pull(in)
|
|
|
|
|
} else {
|
|
|
|
|
val (chunk, nextBuffer) = buffer.splitAt(chunkSize)
|
|
|
|
|
buffer = nextBuffer
|
|
|
|
|
push(out, chunk)
|
|
|
|
|
}
|
2014-12-08 17:29:40 +01:00
|
|
|
}
|
|
|
|
|
|
2016-02-11 16:39:25 +01:00
|
|
|
}
|
2014-12-08 17:29:40 +01:00
|
|
|
}
|
|
|
|
|
|
2016-02-11 16:39:25 +01:00
|
|
|
val chunksStream = rawBytes.via(new Chunker(ChunkLimit))
|
2014-12-08 17:29:40 +01:00
|
|
|
//#bytestring-chunker
|
|
|
|
|
|
2016-02-12 01:36:21 +08:00
|
|
|
val chunksFuture = chunksStream.limit(10).runWith(Sink.seq)
|
2014-12-08 17:29:40 +01:00
|
|
|
|
|
|
|
|
val chunks = Await.result(chunksFuture, 3.seconds)
|
|
|
|
|
|
|
|
|
|
chunks.forall(_.size <= 2) should be(true)
|
2016-06-01 12:24:50 +02:00
|
|
|
chunks.fold(ByteString.empty)(_ ++ _) should be(ByteString(1, 2, 3, 4, 5, 6, 7, 8, 9))
|
2014-12-08 17:29:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
"have a working bytes limiter" in {
|
|
|
|
|
val SizeLimit = 9
|
|
|
|
|
|
|
|
|
|
//#bytes-limiter
|
|
|
|
|
import akka.stream.stage._
|
2016-02-11 16:39:25 +01:00
|
|
|
class ByteLimiter(val maximumBytes: Long) extends GraphStage[FlowShape[ByteString, ByteString]] {
|
|
|
|
|
val in = Inlet[ByteString]("ByteLimiter.in")
|
|
|
|
|
val out = Outlet[ByteString]("ByteLimiter.out")
|
|
|
|
|
override val shape = FlowShape.of(in, out)
|
|
|
|
|
|
|
|
|
|
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
|
|
|
|
|
private var count = 0
|
|
|
|
|
|
|
|
|
|
setHandlers(in, out, new InHandler with OutHandler {
|
|
|
|
|
|
|
|
|
|
override def onPull(): Unit = {
|
|
|
|
|
pull(in)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
override def onPush(): Unit = {
|
|
|
|
|
val chunk = grab(in)
|
|
|
|
|
count += chunk.size
|
|
|
|
|
if (count > maximumBytes) failStage(new IllegalStateException("Too much bytes"))
|
|
|
|
|
else push(out, chunk)
|
|
|
|
|
}
|
|
|
|
|
})
|
2014-12-08 17:29:40 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-02-11 16:39:25 +01:00
|
|
|
val limiter = Flow[ByteString].via(new ByteLimiter(SizeLimit))
|
2014-12-08 17:29:40 +01:00
|
|
|
//#bytes-limiter
|
|
|
|
|
|
|
|
|
|
val bytes1 = Source(List(ByteString(1, 2), ByteString(3), ByteString(4, 5, 6), ByteString(7, 8, 9)))
|
|
|
|
|
val bytes2 = Source(List(ByteString(1, 2), ByteString(3), ByteString(4, 5, 6), ByteString(7, 8, 9, 10)))
|
|
|
|
|
|
2016-02-12 01:36:21 +08:00
|
|
|
Await.result(bytes1.via(limiter).limit(10).runWith(Sink.seq), 3.seconds)
|
2016-06-01 12:24:50 +02:00
|
|
|
.fold(ByteString.empty)(_ ++ _) should be(ByteString(1, 2, 3, 4, 5, 6, 7, 8, 9))
|
2014-12-08 17:29:40 +01:00
|
|
|
|
|
|
|
|
an[IllegalStateException] must be thrownBy {
|
2016-02-12 01:36:21 +08:00
|
|
|
Await.result(bytes2.via(limiter).limit(10).runWith(Sink.seq), 3.seconds)
|
2014-12-08 17:29:40 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
"demonstrate compacting" in {
|
|
|
|
|
|
|
|
|
|
val data = Source(List(ByteString(1, 2), ByteString(3), ByteString(4, 5, 6), ByteString(7, 8, 9)))
|
|
|
|
|
|
|
|
|
|
//#compacting-bytestrings
|
2016-01-20 10:00:37 +02:00
|
|
|
val compacted: Source[ByteString, NotUsed] = data.map(_.compact)
|
2014-12-08 17:29:40 +01:00
|
|
|
//#compacting-bytestrings
|
|
|
|
|
|
2016-02-12 01:36:21 +08:00
|
|
|
Await.result(compacted.limit(10).runWith(Sink.seq), 3.seconds).forall(_.isCompact) should be(true)
|
2014-12-08 17:29:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|