format source with scalafmt, #26511

This commit is contained in:
Auto Format 2019-03-13 10:56:20 +01:00 committed by Patrik Nordwall
parent 2ba9b988df
commit 75579bed17
779 changed files with 15729 additions and 13096 deletions

View file

@ -117,8 +117,9 @@ class GraphDSLDocSpec extends AkkaSpec {
//#graph-dsl-components-create
object PriorityWorkerPool {
def apply[In, Out](worker: Flow[In, Out, Any],
workerCount: Int): Graph[PriorityWorkerPoolShape[In, Out], NotUsed] = {
def apply[In, Out](
worker: Flow[In, Out, Any],
workerCount: Int): Graph[PriorityWorkerPoolShape[In, Out], NotUsed] = {
GraphDSL.create() { implicit b =>
import GraphDSL.Implicits._
@ -138,9 +139,10 @@ class GraphDSLDocSpec extends AkkaSpec {
// We now expose the input ports of the priorityMerge and the output
// of the resultsMerge as our PriorityWorkerPool ports
// -- all neatly wrapped in our domain specific Shape
PriorityWorkerPoolShape(jobsIn = priorityMerge.in(0),
priorityJobsIn = priorityMerge.preferred,
resultsOut = resultsMerge.out)
PriorityWorkerPoolShape(
jobsIn = priorityMerge.in(0),
priorityJobsIn = priorityMerge.preferred,
resultsOut = resultsMerge.out)
}
}

View file

@ -416,21 +416,20 @@ class GraphStageDocSpec extends AkkaSpec {
val promise = Promise[A]()
val logic = new GraphStageLogic(shape) {
setHandler(in,
new InHandler {
override def onPush(): Unit = {
val elem = grab(in)
promise.success(elem)
push(out, elem)
setHandler(in, new InHandler {
override def onPush(): Unit = {
val elem = grab(in)
promise.success(elem)
push(out, elem)
// replace handler with one that only forwards elements
setHandler(in, new InHandler {
override def onPush(): Unit = {
push(out, grab(in))
}
})
}
})
// replace handler with one that only forwards elements
setHandler(in, new InHandler {
override def onPush(): Unit = {
push(out, grab(in))
}
})
}
})
setHandler(out, new OutHandler {
override def onPull(): Unit = {
@ -477,44 +476,46 @@ class GraphStageDocSpec extends AkkaSpec {
pull(in)
}
setHandler(in,
new InHandler {
override def onPush(): Unit = {
val elem = grab(in)
buffer.enqueue(elem)
if (downstreamWaiting) {
downstreamWaiting = false
val bufferedElem = buffer.dequeue()
push(out, bufferedElem)
}
if (!bufferFull) {
pull(in)
}
}
setHandler(
in,
new InHandler {
override def onPush(): Unit = {
val elem = grab(in)
buffer.enqueue(elem)
if (downstreamWaiting) {
downstreamWaiting = false
val bufferedElem = buffer.dequeue()
push(out, bufferedElem)
}
if (!bufferFull) {
pull(in)
}
}
override def onUpstreamFinish(): Unit = {
if (buffer.nonEmpty) {
// emit the rest if possible
emitMultiple(out, buffer.toIterator)
}
completeStage()
}
})
override def onUpstreamFinish(): Unit = {
if (buffer.nonEmpty) {
// emit the rest if possible
emitMultiple(out, buffer.toIterator)
}
completeStage()
}
})
setHandler(out,
new OutHandler {
override def onPull(): Unit = {
if (buffer.isEmpty) {
downstreamWaiting = true
} else {
val elem = buffer.dequeue
push(out, elem)
}
if (!bufferFull && !hasBeenPulled(in)) {
pull(in)
}
}
})
setHandler(
out,
new OutHandler {
override def onPull(): Unit = {
if (buffer.isEmpty) {
downstreamWaiting = true
} else {
val elem = buffer.dequeue
push(out, elem)
}
if (!bufferFull && !hasBeenPulled(in)) {
pull(in)
}
}
})
}
}

View file

@ -113,9 +113,10 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec {
// value to the left is used)
val runnableGraph: RunnableGraph[Source[String, NotUsed]] =
producer.toMat(
PartitionHub.sink((size, elem) => math.abs(elem.hashCode % size),
startAfterNrOfConsumers = 2,
bufferSize = 256))(Keep.right)
PartitionHub.sink(
(size, elem) => math.abs(elem.hashCode % size),
startAfterNrOfConsumers = 2,
bufferSize = 256))(Keep.right)
// By running/materializing the producer, we get back a Source, which
// gives us access to the elements published by the producer.
@ -169,9 +170,10 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec {
// Note that this is a moving target since the elements are consumed concurrently.
val runnableGraph: RunnableGraph[Source[Int, NotUsed]] =
producer.toMat(
PartitionHub.statefulSink(() => (info, elem) => info.consumerIds.minBy(id => info.queueSize(id)),
startAfterNrOfConsumers = 2,
bufferSize = 16))(Keep.right)
PartitionHub.statefulSink(
() => (info, elem) => info.consumerIds.minBy(id => info.queueSize(id)),
startAfterNrOfConsumers = 2,
bufferSize = 16))(Keep.right)
val fromProducer: Source[Int, NotUsed] = runnableGraph.run()

View file

@ -206,11 +206,12 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) {
val probe = TestProbe()
val receiver = system.actorOf(Props(new AckingReceiver(probe.ref, ackWith = AckMessage)))
val sink = Sink.actorRefWithAck(receiver,
onInitMessage = InitMessage,
ackMessage = AckMessage,
onCompleteMessage = OnCompleteMessage,
onFailureMessage = onErrorMessage)
val sink = Sink.actorRefWithAck(
receiver,
onInitMessage = InitMessage,
ackMessage = AckMessage,
onCompleteMessage = OnCompleteMessage,
onFailureMessage = onErrorMessage)
words.map(_.toLowerCase).runWith(sink)
@ -295,13 +296,14 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) {
//#external-service-mapAsyncUnordered
probe.receiveN(7).toSet should be(
Set("rolandkuhn@somewhere.com",
"patriknw@somewhere.com",
"bantonsson@somewhere.com",
"drewhk@somewhere.com",
"ktosopl@somewhere.com",
"mmartynas@somewhere.com",
"akkateam@somewhere.com"))
Set(
"rolandkuhn@somewhere.com",
"patriknw@somewhere.com",
"bantonsson@somewhere.com",
"drewhk@somewhere.com",
"ktosopl@somewhere.com",
"mmartynas@somewhere.com",
"akkateam@somewhere.com"))
}
"careful managed blocking with mapAsync" in {
@ -332,13 +334,14 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) {
//#blocking-mapAsync
probe.receiveN(7).toSet should be(
Set("rolandkuhn".hashCode.toString,
"patriknw".hashCode.toString,
"bantonsson".hashCode.toString,
"drewhk".hashCode.toString,
"ktosopl".hashCode.toString,
"mmartynas".hashCode.toString,
"akkateam".hashCode.toString))
Set(
"rolandkuhn".hashCode.toString,
"patriknw".hashCode.toString,
"bantonsson".hashCode.toString,
"drewhk".hashCode.toString,
"ktosopl".hashCode.toString,
"mmartynas".hashCode.toString,
"akkateam".hashCode.toString))
}
"careful managed blocking with map" in {
@ -452,16 +455,17 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) {
//#sometimes-slow-mapAsyncUnordered
probe.receiveN(10).toSet should be(
Set("after: A",
"after: B",
"after: C",
"after: D",
"after: E",
"after: F",
"after: G",
"after: H",
"after: I",
"after: J"))
Set(
"after: A",
"after: B",
"after: C",
"after: D",
"after: E",
"after: F",
"after: G",
"after: H",
"after: I",
"after: J"))
}
"illustrate use of source queue" in {

View file

@ -35,10 +35,11 @@ class RestartDocSpec extends AkkaSpec with CompileOnlySpec {
"demonstrate a restart with backoff source" in compileOnlySpec {
//#restart-with-backoff-source
val restartSource = RestartSource.withBackoff(minBackoff = 3.seconds,
maxBackoff = 30.seconds,
randomFactor = 0.2, // adds 20% "noise" to vary the intervals slightly
maxRestarts = 20 // limits the amount of restarts to 20
val restartSource = RestartSource.withBackoff(
minBackoff = 3.seconds,
maxBackoff = 30.seconds,
randomFactor = 0.2, // adds 20% "noise" to vary the intervals slightly
maxRestarts = 20 // limits the amount of restarts to 20
) { () =>
// Create a source from a future of a source
Source.fromFutureSource {

View file

@ -35,27 +35,28 @@ class RecipeByteStrings extends RecipeSpec {
emitChunk()
}
})
setHandler(in,
new InHandler {
override def onPush(): Unit = {
val elem = grab(in)
buffer ++= elem
emitChunk()
}
setHandler(
in,
new InHandler {
override def onPush(): Unit = {
val elem = grab(in)
buffer ++= elem
emitChunk()
}
override def onUpstreamFinish(): Unit = {
if (buffer.isEmpty) completeStage()
else {
// There are elements left in buffer, so
// we keep accepting downstream pulls and push from buffer until emptied.
//
// It might be though, that the upstream finished while it was pulled, in which
// case we will not get an onPull from the downstream, because we already had one.
// In that case we need to emit from the buffer.
if (isAvailable(out)) emitChunk()
}
}
})
override def onUpstreamFinish(): Unit = {
if (buffer.isEmpty) completeStage()
else {
// There are elements left in buffer, so
// we keep accepting downstream pulls and push from buffer until emptied.
//
// It might be though, that the upstream finished while it was pulled, in which
// case we will not get an onPull from the downstream, because we already had one.
// In that case we need to emit from the buffer.
if (isAvailable(out)) emitChunk()
}
}
})
private def emitChunk(): Unit = {
if (buffer.isEmpty) {
@ -93,21 +94,19 @@ class RecipeByteStrings extends RecipeSpec {
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
private var count = 0
setHandlers(in,
out,
new InHandler with OutHandler {
setHandlers(in, out, new InHandler with OutHandler {
override def onPull(): Unit = {
pull(in)
}
override def onPull(): Unit = {
pull(in)
}
override def onPush(): Unit = {
val chunk = grab(in)
count += chunk.size
if (count > maximumBytes) failStage(new IllegalStateException("Too much bytes"))
else push(out, chunk)
}
})
override def onPush(): Unit = {
val chunk = grab(in)
count += chunk.size
if (count > maximumBytes) failStage(new IllegalStateException("Too much bytes"))
else push(out, chunk)
}
})
}
}

View file

@ -37,10 +37,11 @@ class RecipeGlobalRateLimit extends RecipeSpec {
private var waitQueue = immutable.Queue.empty[ActorRef]
private var permitTokens = maxAvailableTokens
private val replenishTimer = system.scheduler.schedule(initialDelay = tokenRefreshPeriod,
interval = tokenRefreshPeriod,
receiver = self,
ReplenishTokens)
private val replenishTimer = system.scheduler.schedule(
initialDelay = tokenRefreshPeriod,
interval = tokenRefreshPeriod,
receiver = self,
ReplenishTokens)
override def receive: Receive = open

View file

@ -55,22 +55,23 @@ object HoldOps {
private var currentValue: T = _
private var waitingFirstValue = true
setHandlers(in,
out,
new InHandler with OutHandler {
override def onPush(): Unit = {
currentValue = grab(in)
if (waitingFirstValue) {
waitingFirstValue = false
if (isAvailable(out)) push(out, currentValue)
}
pull(in)
}
setHandlers(
in,
out,
new InHandler with OutHandler {
override def onPush(): Unit = {
currentValue = grab(in)
if (waitingFirstValue) {
waitingFirstValue = false
if (isAvailable(out)) push(out, currentValue)
}
pull(in)
}
override def onPull(): Unit = {
if (!waitingFirstValue) push(out, currentValue)
}
})
override def onPull(): Unit = {
if (!waitingFirstValue) push(out, currentValue)
}
})
override def preStart(): Unit = {
pull(in)

View file

@ -17,11 +17,12 @@ class RecipeParseLines extends RecipeSpec {
"work" in {
val rawData = Source(
List(ByteString("Hello World"),
ByteString("\r"),
ByteString("!\r"),
ByteString("\nHello Akka!\r\nHello Streams!"),
ByteString("\r\n\r\n")))
List(
ByteString("Hello World"),
ByteString("\r"),
ByteString("!\r"),
ByteString("\nHello Akka!\r\nHello Streams!"),
ByteString("\r\n\r\n")))
//#parse-lines
import akka.stream.scaladsl.Framing

View file

@ -40,9 +40,8 @@ class RecipeReduceByKey extends RecipeSpec {
def words = Source(List("hello", "world", "and", "hello", "universe", "akka") ++ List.fill(1000)("rocks!"))
//#reduce-by-key-general
def reduceByKey[In, K, Out](maximumGroupSize: Int,
groupKey: (In) => K,
map: (In) => Out)(reduce: (Out, Out) => Out): Flow[In, (K, Out), NotUsed] = {
def reduceByKey[In, K, Out](maximumGroupSize: Int, groupKey: (In) => K, map: (In) => Out)(
reduce: (Out, Out) => Out): Flow[In, (K, Out), NotUsed] = {
Flow[In]
.groupBy[K](maximumGroupSize, groupKey)

View file

@ -20,9 +20,10 @@ object SourceOrFlow {
//#log
.log(name = "myStream")
.addAttributes(
Attributes.logLevels(onElement = Attributes.LogLevels.Off,
onFailure = Attributes.LogLevels.Error,
onFinish = Attributes.LogLevels.Info))
Attributes.logLevels(
onElement = Attributes.LogLevels.Off,
onFailure = Attributes.LogLevels.Error,
onFinish = Attributes.LogLevels.Info))
//#log
}