=htc, doc replace usages of deprecated methods of FileIO (#20928)

This commit is contained in:
Nafer Sanabria 2016-07-10 10:41:57 -05:00 committed by Konrad Malawski
parent c81ea4f36b
commit 32810e1f1d
12 changed files with 34 additions and 26 deletions

View file

@ -136,8 +136,8 @@ IO Sources / Sinks materialize IOResult
Materialized values of the following sources and sinks: Materialized values of the following sources and sinks:
* ``FileIO.fromFile`` * ``FileIO.fromPath``
* ``FileIO.toFile`` * ``FileIO.toPath``
* ``StreamConverters.fromInputStream`` * ``StreamConverters.fromInputStream``
* ``StreamConverters.fromOutputStream`` * ``StreamConverters.fromOutputStream``

View file

@ -100,7 +100,7 @@ Akka Streams provide simple Sources and Sinks that can work with :class:`ByteStr
on files. on files.
Streaming data from a file is as easy as creating a `FileIO.fromFile` given a target file, and an optional Streaming data from a file is as easy as creating a `FileIO.fromPath` given a target path, and an optional
``chunkSize`` which determines the buffer size determined as one "element" in such stream: ``chunkSize`` which determines the buffer size determined as one "element" in such stream:
.. includecode:: ../code/docs/stream/io/StreamFileDocTest.java#file-source .. includecode:: ../code/docs/stream/io/StreamFileDocTest.java#file-source

View file

@ -90,7 +90,7 @@ accepts strings as its input and when materialized it will create auxiliary
information of type ``CompletionStage<IOResult>`` (when chaining operations on information of type ``CompletionStage<IOResult>`` (when chaining operations on
a :class:`Source` or :class:`Flow` the type of the auxiliary information—called a :class:`Source` or :class:`Flow` the type of the auxiliary information—called
the “materialized value”—is given by the leftmost starting point; since we want the “materialized value”—is given by the leftmost starting point; since we want
to retain what the ``FileIO.toFile`` sink has to offer, we need to say to retain what the ``FileIO.toPath`` sink has to offer, we need to say
``Keep.right()``). ``Keep.right()``).
We can use the new and shiny :class:`Sink` we just created by We can use the new and shiny :class:`Sink` we just created by

View file

@ -35,7 +35,7 @@ class FileUploadExamplesSpec extends RoutingSpec {
// stream into a file as the chunks of it arrives and return a future // stream into a file as the chunks of it arrives and return a future
// file to where it got stored // file to where it got stored
val file = File.createTempFile("upload", "tmp") val file = File.createTempFile("upload", "tmp")
b.entity.dataBytes.runWith(FileIO.toFile(file)).map(_ => b.entity.dataBytes.runWith(FileIO.toPath(file.toPath)).map(_ =>
(b.name -> file)) (b.name -> file))
case b: BodyPart => case b: BodyPart =>

View file

@ -138,8 +138,8 @@ IO Sources / Sinks materialize IOResult
Materialized values of the following sources and sinks: Materialized values of the following sources and sinks:
* ``FileIO.fromFile`` * ``FileIO.fromPath``
* ``FileIO.toFile`` * ``FileIO.toPath``
* ``StreamConverters.fromInputStream`` * ``StreamConverters.fromInputStream``
* ``StreamConverters.fromOutputStream`` * ``StreamConverters.fromOutputStream``

View file

@ -100,7 +100,7 @@ Akka Streams provide simple Sources and Sinks that can work with :class:`ByteStr
on files. on files.
Streaming data from a file is as easy as creating a `FileIO.fromFile` given a target file, and an optional Streaming data from a file is as easy as creating a `FileIO.fromPath` given a target path, and an optional
``chunkSize`` which determines the buffer size determined as one "element" in such stream: ``chunkSize`` which determines the buffer size determined as one "element" in such stream:
.. includecode:: ../code/docs/stream/io/StreamFileDocSpec.scala#file-source .. includecode:: ../code/docs/stream/io/StreamFileDocSpec.scala#file-source

View file

@ -91,7 +91,7 @@ accepts strings as its input and when materialized it will create auxiliary
information of type ``Future[IOResult]`` (when chaining operations on information of type ``Future[IOResult]`` (when chaining operations on
a :class:`Source` or :class:`Flow` the type of the auxiliary information—called a :class:`Source` or :class:`Flow` the type of the auxiliary information—called
the “materialized value”—is given by the leftmost starting point; since we want the “materialized value”—is given by the leftmost starting point; since we want
to retain what the ``FileIO.toFile`` sink has to offer, we need to say to retain what the ``FileIO.toPath`` sink has to offer, we need to say
``Keep.right``). ``Keep.right``).
We can use the new and shiny :class:`Sink` we just created by We can use the new and shiny :class:`Sink` we just created by

View file

@ -9,14 +9,13 @@ import java.io.File
import java.net.{ URI, URL } import java.net.{ URI, URL }
import akka.http.javadsl.model import akka.http.javadsl.model
import akka.http.javadsl.model.RequestEntity
import akka.stream.ActorAttributes import akka.stream.ActorAttributes
import akka.stream.scaladsl.{ FileIO, StreamConverters } import akka.stream.scaladsl.{ FileIO, StreamConverters }
import scala.annotation.tailrec import scala.annotation.tailrec
import akka.actor.ActorSystem import akka.actor.ActorSystem
import akka.event.LoggingAdapter import akka.event.LoggingAdapter
import akka.http.scaladsl.marshalling.{ Marshaller, Marshalling, ToEntityMarshaller } import akka.http.scaladsl.marshalling.{ Marshaller, ToEntityMarshaller }
import akka.http.scaladsl.model._ import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers._ import akka.http.scaladsl.model.headers._
import akka.http.impl.util._ import akka.http.impl.util._
@ -70,7 +69,7 @@ trait FileAndResourceDirectives {
withRangeSupportAndPrecompressedMediaTypeSupportAndExtractSettings { settings withRangeSupportAndPrecompressedMediaTypeSupportAndExtractSettings { settings
complete { complete {
HttpEntity.Default(contentType, file.length, HttpEntity.Default(contentType, file.length,
FileIO.fromFile(file).withAttributes(ActorAttributes.dispatcher(settings.fileIODispatcher))) FileIO.fromPath(file.toPath).withAttributes(ActorAttributes.dispatcher(settings.fileIODispatcher)))
} }
} }
} else complete(HttpEntity.Empty) } else complete(HttpEntity.Empty)

View file

@ -4,10 +4,10 @@
package akka.http.scaladsl.server.directives package akka.http.scaladsl.server.directives
import java.io.File import java.io.File
import akka.http.scaladsl.server.{ Directive1, MissingFormFieldRejection }
import akka.http.scaladsl.server.{ MissingFormFieldRejection, Directive1 }
import akka.http.scaladsl.model.{ ContentType, Multipart } import akka.http.scaladsl.model.{ ContentType, Multipart }
import akka.util.ByteString import akka.util.ByteString
import scala.concurrent.Future import scala.concurrent.Future
import scala.util.{ Failure, Success } import scala.util.{ Failure, Success }
import akka.stream.scaladsl._ import akka.stream.scaladsl._
@ -41,7 +41,7 @@ trait FileUploadDirectives {
case (fileInfo, bytes) case (fileInfo, bytes)
val destination = File.createTempFile("akka-http-upload", ".tmp") val destination = File.createTempFile("akka-http-upload", ".tmp")
val uploadedF: Future[(FileInfo, File)] = bytes.runWith(FileIO.toFile(destination)) val uploadedF: Future[(FileInfo, File)] = bytes.runWith(FileIO.toPath(destination.toPath))
.map(_ (fileInfo, destination)) .map(_ (fileInfo, destination))
onComplete[(FileInfo, File)](uploadedF).flatMap { onComplete[(FileInfo, File)](uploadedF).flatMap {

View file

@ -34,7 +34,7 @@ object FileIO {
def toFile(f: File): javadsl.Sink[ByteString, CompletionStage[IOResult]] = toPath(f.toPath) def toFile(f: File): javadsl.Sink[ByteString, CompletionStage[IOResult]] = toPath(f.toPath)
/** /**
* Creates a Sink that writes incoming [[ByteString]] elements to the given file. * Creates a Sink that writes incoming [[ByteString]] elements to the given file path.
* Overwrites existing files, if you want to append to an existing file use [[#file(Path, util.Set[StandardOpenOption])]]. * Overwrites existing files, if you want to append to an existing file use [[#file(Path, util.Set[StandardOpenOption])]].
* *
* Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
@ -43,7 +43,7 @@ object FileIO {
* You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or * You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or
* set it for a given Source by using [[ActorAttributes]]. * set it for a given Source by using [[ActorAttributes]].
* *
* @param f The file to write to * @param f The file path to write to
*/ */
def toPath(f: Path): javadsl.Sink[ByteString, CompletionStage[IOResult]] = def toPath(f: Path): javadsl.Sink[ByteString, CompletionStage[IOResult]] =
new Sink(scaladsl.FileIO.toPath(f).toCompletionStage()) new Sink(scaladsl.FileIO.toPath(f).toCompletionStage())
@ -65,7 +65,7 @@ object FileIO {
toPath(f.toPath) toPath(f.toPath)
/** /**
* Creates a Sink that writes incoming [[ByteString]] elements to the given file. * Creates a Sink that writes incoming [[ByteString]] elements to the given file path.
* *
* Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
* and a possible exception if IO operation was not completed successfully. * and a possible exception if IO operation was not completed successfully.
@ -73,7 +73,7 @@ object FileIO {
* You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or * You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or
* set it for a given Source by using [[ActorAttributes]]. * set it for a given Source by using [[ActorAttributes]].
* *
* @param f The file to write to * @param f The file path to write to
* @param options File open options * @param options File open options
*/ */
def toPath(f: Path, options: util.Set[StandardOpenOption]): javadsl.Sink[ByteString, CompletionStage[IOResult]] = def toPath(f: Path, options: util.Set[StandardOpenOption]): javadsl.Sink[ByteString, CompletionStage[IOResult]] =
@ -89,6 +89,8 @@ object FileIO {
* *
* It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion, * It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion,
* and a possible exception if IO operation was not completed successfully. * and a possible exception if IO operation was not completed successfully.
*
* @param f the file to read from
*/ */
@deprecated("Use `fromPath` instead.", "2.4.5") @deprecated("Use `fromPath` instead.", "2.4.5")
def fromFile(f: File): javadsl.Source[ByteString, CompletionStage[IOResult]] = fromPath(f.toPath) def fromFile(f: File): javadsl.Source[ByteString, CompletionStage[IOResult]] = fromPath(f.toPath)
@ -103,6 +105,8 @@ object FileIO {
* *
* It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion, * It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion,
* and a possible exception if IO operation was not completed successfully. * and a possible exception if IO operation was not completed successfully.
*
* @param f the file path to read from
*/ */
def fromPath(f: Path): javadsl.Source[ByteString, CompletionStage[IOResult]] = fromPath(f, 8192) def fromPath(f: Path): javadsl.Source[ByteString, CompletionStage[IOResult]] = fromPath(f, 8192)
@ -116,6 +120,8 @@ object FileIO {
* *
* It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion, * It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion,
* and a possible exception if IO operation was not completed successfully. * and a possible exception if IO operation was not completed successfully.
* @param f the file to read from
* @param chunkSize the size of each read operation
*/ */
@deprecated("Use `fromPath` instead.", "2.4.5") @deprecated("Use `fromPath` instead.", "2.4.5")
def fromFile(f: File, chunkSize: Int): javadsl.Source[ByteString, CompletionStage[IOResult]] = def fromFile(f: File, chunkSize: Int): javadsl.Source[ByteString, CompletionStage[IOResult]] =
@ -131,6 +137,9 @@ object FileIO {
* *
* It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion, * It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion,
* and a possible exception if IO operation was not completed successfully. * and a possible exception if IO operation was not completed successfully.
*
* @param f the file path to read from
* @param chunkSize the size of each read operation
*/ */
def fromPath(f: Path, chunkSize: Int): javadsl.Source[ByteString, CompletionStage[IOResult]] = def fromPath(f: Path, chunkSize: Int): javadsl.Source[ByteString, CompletionStage[IOResult]] =
new Source(scaladsl.FileIO.fromPath(f, chunkSize).toCompletionStage()) new Source(scaladsl.FileIO.fromPath(f, chunkSize).toCompletionStage())

View file

@ -51,7 +51,7 @@ object FileIO {
* It materializes a [[Future]] of [[IOResult]] containing the number of bytes read from the source file upon completion, * It materializes a [[Future]] of [[IOResult]] containing the number of bytes read from the source file upon completion,
* and a possible exception if IO operation was not completed successfully. * and a possible exception if IO operation was not completed successfully.
* *
* @param f the file to read from * @param f the file path to read from
* @param chunkSize the size of each read operation, defaults to 8192 * @param chunkSize the size of each read operation, defaults to 8192
*/ */
def fromPath(f: Path, chunkSize: Int = 8192): Source[ByteString, Future[IOResult]] = def fromPath(f: Path, chunkSize: Int = 8192): Source[ByteString, Future[IOResult]] =
@ -74,7 +74,7 @@ object FileIO {
toPath(f.toPath, options) toPath(f.toPath, options)
/** /**
* Creates a Sink which writes incoming [[ByteString]] elements to the given file. Overwrites existing files by default. * Creates a Sink which writes incoming [[ByteString]] elements to the given file path. Overwrites existing files by default.
* *
* Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion,
* and a possible exception if IO operation was not completed successfully. * and a possible exception if IO operation was not completed successfully.
@ -82,7 +82,7 @@ object FileIO {
* This source is backed by an Actor which will use the dedicated `akka.stream.blocking-io-dispatcher`, * This source is backed by an Actor which will use the dedicated `akka.stream.blocking-io-dispatcher`,
* unless configured otherwise by using [[ActorAttributes]]. * unless configured otherwise by using [[ActorAttributes]].
* *
* @param f the file to write to * @param f the file path to write to
* @param options File open options, defaults to Set(WRITE, CREATE) * @param options File open options, defaults to Set(WRITE, CREATE)
*/ */
def toPath(f: Path, options: Set[StandardOpenOption] = Set(WRITE, CREATE)): Sink[ByteString, Future[IOResult]] = def toPath(f: Path, options: Set[StandardOpenOption] = Set(WRITE, CREATE)): Sink[ByteString, Future[IOResult]] =