Merge pull request #1098 from drewhk/wip-IO-UDP-fire-and-forget-drewhk
UDP implementation and generalized Selector
This commit is contained in:
commit
f334b1cfe5
29 changed files with 1855 additions and 423 deletions
|
|
@ -9,7 +9,7 @@ import Tcp._
|
|||
import TestUtils._
|
||||
|
||||
class CapacityLimitSpec extends AkkaSpec("akka.loglevel = ERROR\nakka.io.tcp.max-channels = 4")
|
||||
with IntegrationSpecSupport {
|
||||
with TcpIntegrationSpecSupport {
|
||||
|
||||
"The TCP transport implementation" should {
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
package akka.io
|
||||
|
||||
import java.io.IOException
|
||||
import java.net.{ ConnectException, InetSocketAddress, SocketException }
|
||||
import java.net.{ Socket, ConnectException, InetSocketAddress, SocketException }
|
||||
import java.nio.ByteBuffer
|
||||
import java.nio.channels.{ SelectionKey, Selector, ServerSocketChannel, SocketChannel }
|
||||
import java.nio.channels.spi.SelectorProvider
|
||||
|
|
@ -14,18 +14,27 @@ import scala.collection.immutable
|
|||
import scala.concurrent.duration._
|
||||
import scala.util.control.NonFatal
|
||||
import org.scalatest.matchers._
|
||||
import Tcp._
|
||||
import TcpSelector._
|
||||
import akka.io.Tcp._
|
||||
import akka.io.SelectionHandler._
|
||||
import TestUtils._
|
||||
import akka.actor.{ ActorRef, PoisonPill, Terminated }
|
||||
import akka.testkit.{ AkkaSpec, EventFilter, TestActorRef, TestProbe }
|
||||
import akka.util.ByteString
|
||||
import akka.actor.DeathPactException
|
||||
import akka.util.{ Helpers, ByteString }
|
||||
import akka.actor.DeathPactException
|
||||
import java.nio.channels.SelectionKey._
|
||||
import akka.io.Inet.SocketOption
|
||||
|
||||
class TcpConnectionSpec extends AkkaSpec("akka.io.tcp.register-timeout = 500ms") {
|
||||
val serverAddress = temporaryServerAddress()
|
||||
|
||||
// Helper to avoid Windows localization specific differences
|
||||
def ignoreIfWindows(): Unit = {
|
||||
if (Helpers.isWindows) {
|
||||
info("Detected Windows: ignoring check")
|
||||
pending
|
||||
}
|
||||
}
|
||||
|
||||
"An outgoing connection" must {
|
||||
// common behavior
|
||||
|
||||
|
|
@ -33,21 +42,22 @@ class TcpConnectionSpec extends AkkaSpec("akka.io.tcp.register-timeout = 500ms")
|
|||
val userHandler = TestProbe()
|
||||
val selector = TestProbe()
|
||||
val connectionActor =
|
||||
createConnectionActor(options = Vector(SO.ReuseAddress(true)))(selector.ref, userHandler.ref)
|
||||
createConnectionActor(options = Vector(Inet.SO.ReuseAddress(true)))(selector.ref, userHandler.ref)
|
||||
val clientChannel = connectionActor.underlyingActor.channel
|
||||
clientChannel.socket.getReuseAddress must be(true)
|
||||
}
|
||||
|
||||
"set socket options after connecting" in withLocalServer() { localServer ⇒
|
||||
"set socket options after connecting" ignore withLocalServer() { localServer ⇒
|
||||
// Workaround for systems where SO_KEEPALIVE is true by default
|
||||
val userHandler = TestProbe()
|
||||
val selector = TestProbe()
|
||||
val connectionActor =
|
||||
createConnectionActor(options = Vector(SO.KeepAlive(true)))(selector.ref, userHandler.ref)
|
||||
createConnectionActor(options = Vector(SO.KeepAlive(false)))(selector.ref, userHandler.ref)
|
||||
val clientChannel = connectionActor.underlyingActor.channel
|
||||
clientChannel.socket.getKeepAlive must be(false) // only set after connection is established
|
||||
clientChannel.socket.getKeepAlive must be(true) // only set after connection is established
|
||||
EventFilter.warning(pattern = "registration timeout", occurrences = 1) intercept {
|
||||
selector.send(connectionActor, ChannelConnectable)
|
||||
clientChannel.socket.getKeepAlive must be(true)
|
||||
clientChannel.socket.getKeepAlive must be(false)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -65,7 +75,7 @@ class TcpConnectionSpec extends AkkaSpec("akka.io.tcp.register-timeout = 500ms")
|
|||
}
|
||||
|
||||
"bundle incoming Received messages as long as more data is available" in withEstablishedConnection(
|
||||
clientSocketOptions = List(SO.ReceiveBufferSize(1000000)) // to make sure enough data gets through
|
||||
clientSocketOptions = List(Inet.SO.ReceiveBufferSize(1000000)) // to make sure enough data gets through
|
||||
) { setup ⇒
|
||||
import setup._
|
||||
|
||||
|
|
@ -145,14 +155,32 @@ class TcpConnectionSpec extends AkkaSpec("akka.io.tcp.register-timeout = 500ms")
|
|||
writer.expectMsg(Ack)
|
||||
}
|
||||
|
||||
/*
|
||||
* Disabled on Windows: http://support.microsoft.com/kb/214397
|
||||
*
|
||||
* "To optimize performance at the application layer, Winsock copies data buffers from application send calls
|
||||
* to a Winsock kernel buffer. Then, the stack uses its own heuristics (such as Nagle algorithm) to determine
|
||||
* when to actually put the packet on the wire. You can change the amount of Winsock kernel buffer allocated to
|
||||
* the socket using the SO_SNDBUF option (it is 8K by default). If necessary, Winsock can buffer significantly more
|
||||
* than the SO_SNDBUF buffer size. In most cases, the send completion in the application only indicates the data
|
||||
* buffer in an application send call is copied to the Winsock kernel buffer and does not indicate that the data
|
||||
* has hit the network medium. The only exception is when you disable the Winsock buffering by setting
|
||||
* SO_SNDBUF to 0."
|
||||
*/
|
||||
"stop writing in cases of backpressure and resume afterwards" in
|
||||
withEstablishedConnection(setSmallRcvBuffer) { setup ⇒
|
||||
withEstablishedConnection(clientSocketOptions = List(SO.ReceiveBufferSize(1000000))) { setup ⇒
|
||||
info("Currently ignored as SO_SNDBUF is usually a lower bound on the send buffer so the test fails as no real " +
|
||||
"backpressure present.")
|
||||
pending
|
||||
ignoreIfWindows()
|
||||
import setup._
|
||||
object Ack1
|
||||
object Ack2
|
||||
|
||||
clientSideChannel.socket.setSendBufferSize(1024)
|
||||
|
||||
awaitCond(clientSideChannel.socket.getSendBufferSize == 1024)
|
||||
|
||||
val writer = TestProbe()
|
||||
|
||||
// producing backpressure by sending much more than currently fits into
|
||||
|
|
@ -192,7 +220,7 @@ class TcpConnectionSpec extends AkkaSpec("akka.io.tcp.register-timeout = 500ms")
|
|||
|
||||
// the selector interprets StopReading to deregister interest
|
||||
// for reading
|
||||
selector.expectMsg(StopReading)
|
||||
selector.expectMsg(DisableReadInterest)
|
||||
connectionHandler.send(connectionActor, ResumeReading)
|
||||
selector.expectMsg(ReadInterest)
|
||||
}
|
||||
|
|
@ -231,7 +259,20 @@ class TcpConnectionSpec extends AkkaSpec("akka.io.tcp.register-timeout = 500ms")
|
|||
connectionHandler.expectNoMsg(500.millis)
|
||||
}
|
||||
|
||||
"abort the connection and reply with `Aborted` upong reception of an `Abort` command (simplified)" in withEstablishedConnection() { setup ⇒
|
||||
import setup._
|
||||
|
||||
connectionHandler.send(connectionActor, Abort)
|
||||
connectionHandler.expectMsg(Aborted)
|
||||
|
||||
assertThisConnectionActorTerminated()
|
||||
|
||||
val buffer = ByteBuffer.allocate(1)
|
||||
val thrown = evaluating { serverSideChannel.read(buffer) } must produce[IOException]
|
||||
}
|
||||
|
||||
"abort the connection and reply with `Aborted` upong reception of an `Abort` command" in withEstablishedConnection() { setup ⇒
|
||||
ignoreIfWindows()
|
||||
import setup._
|
||||
|
||||
connectionHandler.send(connectionActor, Abort)
|
||||
|
|
@ -244,7 +285,49 @@ class TcpConnectionSpec extends AkkaSpec("akka.io.tcp.register-timeout = 500ms")
|
|||
thrown.getMessage must be("Connection reset by peer")
|
||||
}
|
||||
|
||||
/*
|
||||
* Partly disabled on Windows: http://support.microsoft.com/kb/214397
|
||||
*
|
||||
* "To optimize performance at the application layer, Winsock copies data buffers from application send calls
|
||||
* to a Winsock kernel buffer. Then, the stack uses its own heuristics (such as Nagle algorithm) to determine
|
||||
* when to actually put the packet on the wire. You can change the amount of Winsock kernel buffer allocated to
|
||||
* the socket using the SO_SNDBUF option (it is 8K by default). If necessary, Winsock can buffer significantly more
|
||||
* than the SO_SNDBUF buffer size. In most cases, the send completion in the application only indicates the data
|
||||
* buffer in an application send call is copied to the Winsock kernel buffer and does not indicate that the data
|
||||
* has hit the network medium. The only exception is when you disable the Winsock buffering by setting
|
||||
* SO_SNDBUF to 0."
|
||||
*/
|
||||
"close the connection and reply with `ConfirmedClosed` upong reception of an `ConfirmedClose` command (simplified)" in withEstablishedConnection(setSmallRcvBuffer) { setup ⇒
|
||||
import setup._
|
||||
|
||||
// we should test here that a pending write command is properly finished first
|
||||
object Ack
|
||||
// set an artificially small send buffer size so that the write is queued
|
||||
// inside the connection actor
|
||||
clientSideChannel.socket.setSendBufferSize(1024)
|
||||
|
||||
// we send a write and a close command directly afterwards
|
||||
connectionHandler.send(connectionActor, writeCmd(Ack))
|
||||
connectionHandler.send(connectionActor, ConfirmedClose)
|
||||
|
||||
pullFromServerSide(TestSize)
|
||||
connectionHandler.expectMsg(Ack)
|
||||
|
||||
selector.send(connectionActor, ChannelReadable)
|
||||
|
||||
val buffer = ByteBuffer.allocate(1)
|
||||
serverSelectionKey must be(selectedAs(SelectionKey.OP_READ, 2.seconds))
|
||||
serverSideChannel.read(buffer) must be(-1)
|
||||
serverSideChannel.close()
|
||||
|
||||
selector.send(connectionActor, ChannelReadable)
|
||||
connectionHandler.expectMsg(ConfirmedClosed)
|
||||
|
||||
assertThisConnectionActorTerminated()
|
||||
}
|
||||
|
||||
"close the connection and reply with `ConfirmedClosed` upong reception of an `ConfirmedClose` command" in withEstablishedConnection(setSmallRcvBuffer) { setup ⇒
|
||||
ignoreIfWindows()
|
||||
import setup._
|
||||
|
||||
// we should test here that a pending write command is properly finished first
|
||||
|
|
@ -284,13 +367,29 @@ class TcpConnectionSpec extends AkkaSpec("akka.io.tcp.register-timeout = 500ms")
|
|||
|
||||
assertThisConnectionActorTerminated()
|
||||
}
|
||||
"report when peer aborted the connection" in withEstablishedConnection() { setup ⇒
|
||||
"report when peer aborted the connection (simplified)" in withEstablishedConnection() { setup ⇒
|
||||
import setup._
|
||||
|
||||
EventFilter[IOException](occurrences = 1) intercept {
|
||||
abortClose(serverSideChannel)
|
||||
selector.send(connectionActor, ChannelReadable)
|
||||
connectionHandler.expectMsgType[ErrorClosed].cause must be("Connection reset by peer")
|
||||
val err = connectionHandler.expectMsgType[ErrorClosed]
|
||||
}
|
||||
// wait a while
|
||||
connectionHandler.expectNoMsg(200.millis)
|
||||
|
||||
assertThisConnectionActorTerminated()
|
||||
}
|
||||
|
||||
"report when peer aborted the connection" in withEstablishedConnection() { setup ⇒
|
||||
import setup._
|
||||
ignoreIfWindows()
|
||||
|
||||
EventFilter[IOException](occurrences = 1) intercept {
|
||||
abortClose(serverSideChannel)
|
||||
selector.send(connectionActor, ChannelReadable)
|
||||
val err = connectionHandler.expectMsgType[ErrorClosed]
|
||||
err.cause must be("Connection reset by peer")
|
||||
}
|
||||
// wait a while
|
||||
connectionHandler.expectNoMsg(200.millis)
|
||||
|
|
@ -313,33 +412,57 @@ class TcpConnectionSpec extends AkkaSpec("akka.io.tcp.register-timeout = 500ms")
|
|||
assertThisConnectionActorTerminated()
|
||||
}
|
||||
|
||||
// error conditions
|
||||
// This tets is disabled on windows, as the assumption that not calling accept on a server socket means that
|
||||
// no TCP level connection has been established with the client does not hold.
|
||||
"report failed connection attempt while not accepted" in withUnacceptedConnection() { setup ⇒
|
||||
import setup._
|
||||
ignoreIfWindows()
|
||||
// close instead of accept
|
||||
localServer.close()
|
||||
|
||||
EventFilter[SocketException](occurrences = 1) intercept {
|
||||
selector.send(connectionActor, ChannelConnectable)
|
||||
userHandler.expectMsgType[ErrorClosed].cause must be("Connection reset by peer")
|
||||
val err = userHandler.expectMsgType[ErrorClosed]
|
||||
err.cause must be("Connection reset by peer")
|
||||
}
|
||||
|
||||
verifyActorTermination(connectionActor)
|
||||
}
|
||||
|
||||
val UnboundAddress = temporaryServerAddress()
|
||||
"report failed connection attempt when target is unreachable" in
|
||||
"report failed connection attempt when target is unreachable (simplified)" in
|
||||
withUnacceptedConnection(connectionActorCons = createConnectionActor(serverAddress = UnboundAddress)) { setup ⇒
|
||||
import setup._
|
||||
|
||||
val sel = SelectorProvider.provider().openSelector()
|
||||
val key = clientSideChannel.register(sel, SelectionKey.OP_CONNECT | SelectionKey.OP_READ)
|
||||
sel.select(200)
|
||||
// This timeout should be large enough to work on Windows
|
||||
sel.select(3000)
|
||||
|
||||
key.isConnectable must be(true)
|
||||
EventFilter[ConnectException](occurrences = 1) intercept {
|
||||
selector.send(connectionActor, ChannelConnectable)
|
||||
userHandler.expectMsgType[ErrorClosed].cause must be("Connection refused")
|
||||
val err = userHandler.expectMsgType[ErrorClosed]
|
||||
}
|
||||
|
||||
verifyActorTermination(connectionActor)
|
||||
}
|
||||
|
||||
"report failed connection attempt when target is unreachable" in
|
||||
withUnacceptedConnection(connectionActorCons = createConnectionActor(serverAddress = UnboundAddress)) { setup ⇒
|
||||
import setup._
|
||||
ignoreIfWindows()
|
||||
|
||||
val sel = SelectorProvider.provider().openSelector()
|
||||
val key = clientSideChannel.register(sel, SelectionKey.OP_CONNECT | SelectionKey.OP_READ)
|
||||
// This timeout should be large enough to work on Windows
|
||||
sel.select(3000)
|
||||
|
||||
key.isConnectable must be(true)
|
||||
EventFilter[ConnectException](occurrences = 1) intercept {
|
||||
selector.send(connectionActor, ChannelConnectable)
|
||||
val err = userHandler.expectMsgType[ErrorClosed]
|
||||
err.cause must be("Connection refused")
|
||||
}
|
||||
|
||||
verifyActorTermination(connectionActor)
|
||||
|
|
@ -519,7 +642,7 @@ class TcpConnectionSpec extends AkkaSpec("akka.io.tcp.register-timeout = 500ms")
|
|||
val connectionActor = connectionActorCons(selector.ref, userHandler.ref)
|
||||
val clientSideChannel = connectionActor.underlyingActor.channel
|
||||
|
||||
selector.expectMsg(RegisterOutgoingConnection(clientSideChannel))
|
||||
selector.expectMsg(RegisterChannel(clientSideChannel, OP_CONNECT))
|
||||
|
||||
body {
|
||||
UnacceptedSetup(
|
||||
|
|
@ -566,18 +689,21 @@ class TcpConnectionSpec extends AkkaSpec("akka.io.tcp.register-timeout = 500ms")
|
|||
def createConnectionActor(
|
||||
serverAddress: InetSocketAddress = serverAddress,
|
||||
localAddress: Option[InetSocketAddress] = None,
|
||||
options: immutable.Seq[Tcp.SocketOption] = Nil)(
|
||||
options: immutable.Seq[SocketOption] = Nil)(
|
||||
_selector: ActorRef,
|
||||
commander: ActorRef): TestActorRef[TcpOutgoingConnection] = {
|
||||
|
||||
TestActorRef(
|
||||
new TcpOutgoingConnection(Tcp(system), commander, serverAddress, localAddress, options) {
|
||||
val ref = TestActorRef(
|
||||
new TcpOutgoingConnection(Tcp(system), commander, Connect(serverAddress, localAddress, options)) {
|
||||
override def postRestart(reason: Throwable) {
|
||||
// ensure we never restart
|
||||
context.stop(self)
|
||||
}
|
||||
override def selector = _selector
|
||||
})
|
||||
|
||||
ref ! ChannelRegistered
|
||||
ref
|
||||
}
|
||||
|
||||
def abortClose(channel: SocketChannel): Unit = {
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import TestUtils._
|
|||
import akka.testkit.EventFilter
|
||||
import java.io.IOException
|
||||
|
||||
class IntegrationSpec extends AkkaSpec("akka.loglevel = INFO") with IntegrationSpecSupport {
|
||||
class TcpIntegrationSpec extends AkkaSpec("akka.loglevel = INFO") with TcpIntegrationSpecSupport {
|
||||
|
||||
"The TCP transport implementation" should {
|
||||
|
||||
|
|
@ -8,10 +8,11 @@ import scala.annotation.tailrec
|
|||
import akka.testkit.{ AkkaSpec, TestProbe }
|
||||
import akka.actor.ActorRef
|
||||
import scala.collection.immutable
|
||||
import akka.io.Inet.SocketOption
|
||||
import Tcp._
|
||||
import TestUtils._
|
||||
|
||||
trait IntegrationSpecSupport { _: AkkaSpec ⇒
|
||||
trait TcpIntegrationSpecSupport { _: AkkaSpec ⇒
|
||||
|
||||
class TestSetup {
|
||||
val bindHandler = TestProbe()
|
||||
|
|
@ -8,9 +8,11 @@ import java.net.Socket
|
|||
import scala.concurrent.duration._
|
||||
import akka.actor.{ Terminated, SupervisorStrategy, Actor, Props }
|
||||
import akka.testkit.{ TestProbe, TestActorRef, AkkaSpec }
|
||||
import TcpSelector._
|
||||
import Tcp._
|
||||
import akka.testkit.EventFilter
|
||||
import akka.io.SelectionHandler._
|
||||
import java.nio.channels.SelectionKey._
|
||||
import akka.io.TcpListener.{ RegisterIncoming, FailedRegisterIncoming }
|
||||
|
||||
class TcpListenerSpec extends AkkaSpec("akka.io.tcp.batch-accept-limit = 2") {
|
||||
|
||||
|
|
@ -19,7 +21,7 @@ class TcpListenerSpec extends AkkaSpec("akka.io.tcp.batch-accept-limit = 2") {
|
|||
"register its ServerSocketChannel with its selector" in new TestSetup
|
||||
|
||||
"let the Bind commander know when binding is completed" in new TestSetup {
|
||||
listener ! Bound
|
||||
listener ! ChannelRegistered
|
||||
bindCommander.expectMsg(Bound)
|
||||
}
|
||||
|
||||
|
|
@ -30,17 +32,25 @@ class TcpListenerSpec extends AkkaSpec("akka.io.tcp.batch-accept-limit = 2") {
|
|||
attemptConnectionToEndpoint()
|
||||
attemptConnectionToEndpoint()
|
||||
|
||||
def expectWorkerForCommand: Unit = {
|
||||
selectorRouter.expectMsgPF() {
|
||||
case WorkerForCommand(RegisterIncoming(chan), commander, _) ⇒
|
||||
chan.isOpen must be(true)
|
||||
commander must be === listener
|
||||
}
|
||||
}
|
||||
|
||||
// since the batch-accept-limit is 2 we must only receive 2 accepted connections
|
||||
listener ! ChannelAcceptable
|
||||
|
||||
parent.expectMsg(AcceptInterest)
|
||||
selectorRouter.expectMsgPF() { case RegisterIncomingConnection(_, `handlerRef`, Nil) ⇒ /* ok */ }
|
||||
selectorRouter.expectMsgPF() { case RegisterIncomingConnection(_, `handlerRef`, Nil) ⇒ /* ok */ }
|
||||
expectWorkerForCommand
|
||||
expectWorkerForCommand
|
||||
selectorRouter.expectNoMsg(100.millis)
|
||||
|
||||
// and pick up the last remaining connection on the next ChannelAcceptable
|
||||
listener ! ChannelAcceptable
|
||||
selectorRouter.expectMsgPF() { case RegisterIncomingConnection(_, `handlerRef`, Nil) ⇒ /* ok */ }
|
||||
expectWorkerForCommand
|
||||
}
|
||||
|
||||
"react to Unbind commands by replying with Unbound and stopping itself" in new TestSetup {
|
||||
|
|
@ -59,11 +69,15 @@ class TcpListenerSpec extends AkkaSpec("akka.io.tcp.batch-accept-limit = 2") {
|
|||
attemptConnectionToEndpoint()
|
||||
|
||||
listener ! ChannelAcceptable
|
||||
val channel = selectorRouter.expectMsgType[RegisterIncomingConnection].channel
|
||||
channel.isOpen must be(true)
|
||||
val channel = selectorRouter.expectMsgPF() {
|
||||
case WorkerForCommand(RegisterIncoming(chan), commander, _) ⇒
|
||||
chan.isOpen must be(true)
|
||||
commander must be === listener
|
||||
chan
|
||||
}
|
||||
|
||||
EventFilter.warning(pattern = "selector capacity limit", occurrences = 1) intercept {
|
||||
listener ! CommandFailed(RegisterIncomingConnection(channel, handler.ref, Nil))
|
||||
listener ! FailedRegisterIncoming(channel)
|
||||
awaitCond(!channel.isOpen)
|
||||
}
|
||||
}
|
||||
|
|
@ -80,10 +94,10 @@ class TcpListenerSpec extends AkkaSpec("akka.io.tcp.batch-accept-limit = 2") {
|
|||
val endpoint = TestUtils.temporaryServerAddress()
|
||||
private val parentRef = TestActorRef(new ListenerParent)
|
||||
|
||||
parent.expectMsgType[RegisterServerSocketChannel]
|
||||
parent.expectMsgType[RegisterChannel]
|
||||
|
||||
def bindListener() {
|
||||
listener ! Bound
|
||||
listener ! ChannelRegistered
|
||||
bindCommander.expectMsg(Bound)
|
||||
}
|
||||
|
||||
|
|
@ -93,8 +107,7 @@ class TcpListenerSpec extends AkkaSpec("akka.io.tcp.batch-accept-limit = 2") {
|
|||
|
||||
private class ListenerParent extends Actor {
|
||||
val listener = context.actorOf(
|
||||
props = Props(new TcpListener(selectorRouter.ref, handler.ref, endpoint, 100, bindCommander.ref,
|
||||
Tcp(system).Settings, Nil)),
|
||||
props = Props(new TcpListener(selectorRouter.ref, Tcp(system), bindCommander.ref, Bind(handler.ref, endpoint, 100, Nil))),
|
||||
name = "test-listener-" + counter.next())
|
||||
parent.watch(listener)
|
||||
def receive: Receive = {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,74 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
package akka.io
|
||||
|
||||
import akka.testkit.{ TestProbe, ImplicitSender, AkkaSpec }
|
||||
import TestUtils._
|
||||
import akka.util.ByteString
|
||||
import java.net.InetSocketAddress
|
||||
import akka.actor.ActorRef
|
||||
|
||||
class UdpConnIntegrationSpec extends AkkaSpec("akka.loglevel = INFO") with ImplicitSender {
|
||||
|
||||
def bindUdp(handler: ActorRef): (InetSocketAddress, ActorRef) = {
|
||||
val address = temporaryServerAddress()
|
||||
val commander = TestProbe()
|
||||
commander.send(IO(UdpFF), UdpFF.Bind(handler, address))
|
||||
commander.expectMsg(UdpFF.Bound)
|
||||
(address, commander.sender)
|
||||
}
|
||||
|
||||
def connectUdp(localAddress: Option[InetSocketAddress], remoteAddress: InetSocketAddress, handler: ActorRef): ActorRef = {
|
||||
val commander = TestProbe()
|
||||
commander.send(IO(UdpConn), UdpConn.Connect(handler, localAddress, remoteAddress, Nil))
|
||||
commander.expectMsg(UdpConn.Connected)
|
||||
commander.sender
|
||||
}
|
||||
|
||||
"The UDP connection oriented implementation" must {
|
||||
|
||||
"be able to send and receive without binding" in {
|
||||
val (serverAddress, server) = bindUdp(testActor)
|
||||
val data1 = ByteString("To infinity and beyond!")
|
||||
val data2 = ByteString("All your datagram belong to us")
|
||||
connectUdp(localAddress = None, serverAddress, testActor) ! UdpConn.Send(data1)
|
||||
|
||||
val clientAddress = expectMsgPF() {
|
||||
case UdpFF.Received(d, a) ⇒
|
||||
d must be === data1
|
||||
a
|
||||
}
|
||||
|
||||
server ! UdpFF.Send(data2, clientAddress)
|
||||
|
||||
// FIXME: Currently this line fails
|
||||
expectMsgPF() {
|
||||
case UdpConn.Received(d) ⇒ d must be === data2
|
||||
}
|
||||
}
|
||||
|
||||
"be able to send and receive with binding" in {
|
||||
val clientAddress = temporaryServerAddress()
|
||||
val (serverAddress, server) = bindUdp(testActor)
|
||||
val data1 = ByteString("To infinity and beyond!")
|
||||
val data2 = ByteString("All your datagram belong to us")
|
||||
connectUdp(Some(clientAddress), serverAddress, testActor) ! UdpConn.Send(data1)
|
||||
|
||||
expectMsgPF() {
|
||||
case UdpFF.Received(d, a) ⇒
|
||||
d must be === data1
|
||||
a must be === clientAddress
|
||||
}
|
||||
|
||||
server ! UdpFF.Send(data2, clientAddress)
|
||||
|
||||
// FIXME: Currently this line fails
|
||||
expectMsgPF() {
|
||||
case UdpConn.Received(d) ⇒ d must be === data2
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
package akka.io
|
||||
|
||||
import akka.testkit.{ TestProbe, ImplicitSender, AkkaSpec }
|
||||
import akka.io.UdpFF._
|
||||
import TestUtils._
|
||||
import akka.util.ByteString
|
||||
import java.net.InetSocketAddress
|
||||
import akka.actor.ActorRef
|
||||
|
||||
class UdpFFIntegrationSpec extends AkkaSpec("akka.loglevel = INFO") with ImplicitSender {
|
||||
|
||||
def bindUdp(handler: ActorRef): (InetSocketAddress, ActorRef) = {
|
||||
val address = temporaryServerAddress()
|
||||
val commander = TestProbe()
|
||||
commander.send(IO(UdpFF), Bind(handler, address))
|
||||
commander.expectMsg(Bound)
|
||||
(address, commander.sender)
|
||||
}
|
||||
|
||||
val simpleSender: ActorRef = {
|
||||
val commander = TestProbe()
|
||||
commander.send(IO(UdpFF), SimpleSender)
|
||||
commander.expectMsg(SimpleSendReady)
|
||||
commander.sender
|
||||
}
|
||||
|
||||
"The UDP Fire-and-Forget implementation" must {
|
||||
|
||||
"be able to send without binding" in {
|
||||
val (serverAddress, server) = bindUdp(testActor)
|
||||
val data = ByteString("To infinity and beyond!")
|
||||
simpleSender ! Send(data, serverAddress)
|
||||
|
||||
expectMsgType[Received].data must be === data
|
||||
|
||||
}
|
||||
|
||||
"be able to send with binding" in {
|
||||
val (serverAddress, server) = bindUdp(testActor)
|
||||
val (clientAddress, client) = bindUdp(testActor)
|
||||
val data = ByteString("Fly little packet!")
|
||||
|
||||
client ! Send(data, serverAddress)
|
||||
|
||||
expectMsgPF() {
|
||||
case Received(d, a) ⇒
|
||||
d must be === data
|
||||
a must be === clientAddress
|
||||
}
|
||||
server ! Send(data, clientAddress)
|
||||
expectMsgPF() {
|
||||
case Received(d, a) ⇒
|
||||
d must be === data
|
||||
a must be === serverAddress
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -416,7 +416,7 @@ akka {
|
|||
|
||||
# The maximal number of direct buffers kept in the direct buffer pool for
|
||||
# reuse.
|
||||
max-direct-buffer-pool-size = 1000
|
||||
direct-buffer-pool-limit = 1000
|
||||
|
||||
# The duration a connection actor waits for a `Register` message from
|
||||
# its commander before aborting the connection.
|
||||
|
|
@ -425,7 +425,7 @@ akka {
|
|||
# The maximum number of bytes delivered by a `Received` message. Before
|
||||
# more data is read from the network the connection actor will try to
|
||||
# do other work.
|
||||
received-message-size-limit = unlimited
|
||||
max-received-message-size = unlimited
|
||||
|
||||
# Enable fine grained logging of what goes on inside the implementation.
|
||||
# Be aware that this may log more than once per message sent to the actors
|
||||
|
|
@ -445,6 +445,129 @@ akka {
|
|||
management-dispatcher = "akka.actor.default-dispatcher"
|
||||
}
|
||||
|
||||
udp-fire-and-forget {
|
||||
|
||||
# The number of selectors to stripe the served channels over; each of
|
||||
# these will use one select loop on the selector-dispatcher.
|
||||
nr-of-selectors = 1
|
||||
|
||||
# Maximum number of open channels supported by this UDP module Generally
|
||||
# UDP does not require a large number of channels, therefore it is
|
||||
# recommended to keep this setting low.
|
||||
max-channels = 4096
|
||||
|
||||
# The select loop can be used in two modes:
|
||||
# - setting "infinite" will select without a timeout, hogging a thread
|
||||
# - setting a positive timeout will do a bounded select call,
|
||||
# enabling sharing of a single thread between multiple selectors
|
||||
# (in this case you will have to use a different configuration for the
|
||||
# selector-dispatcher, e.g. using "type=Dispatcher" with size 1)
|
||||
# - setting it to zero means polling, i.e. calling selectNow()
|
||||
select-timeout = infinite
|
||||
|
||||
# When trying to assign a new connection to a selector and the chosen
|
||||
# selector is at full capacity, retry selector choosing and assignment
|
||||
# this many times before giving up
|
||||
selector-association-retries = 10
|
||||
|
||||
# The maximum number of datagrams that are read in one go,
|
||||
# higher numbers decrease latency, lower numbers increase fairness on
|
||||
# the worker-dispatcher
|
||||
receive-throughput = 3
|
||||
|
||||
# The number of bytes per direct buffer in the pool used to read or write
|
||||
# network data from the kernel.
|
||||
direct-buffer-size = 128 KiB
|
||||
|
||||
# The maximal number of direct buffers kept in the direct buffer pool for
|
||||
# reuse.
|
||||
direct-buffer-pool-limit = 1000
|
||||
|
||||
# The maximum number of bytes delivered by a `Received` message. Before
|
||||
# more data is read from the network the connection actor will try to
|
||||
# do other work.
|
||||
received-message-size-limit = unlimited
|
||||
|
||||
# Enable fine grained logging of what goes on inside the implementation.
|
||||
# Be aware that this may log more than once per message sent to the actors
|
||||
# of the tcp implementation.
|
||||
trace-logging = off
|
||||
|
||||
# Fully qualified config path which holds the dispatcher configuration
|
||||
# to be used for running the select() calls in the selectors
|
||||
selector-dispatcher = "akka.io.pinned-dispatcher"
|
||||
|
||||
# Fully qualified config path which holds the dispatcher configuration
|
||||
# for the read/write worker actors
|
||||
worker-dispatcher = "akka.actor.default-dispatcher"
|
||||
|
||||
# Fully qualified config path which holds the dispatcher configuration
|
||||
# for the selector management actors
|
||||
management-dispatcher = "akka.actor.default-dispatcher"
|
||||
}
|
||||
|
||||
udp-connection {
|
||||
|
||||
# The number of selectors to stripe the served channels over; each of
|
||||
# these will use one select loop on the selector-dispatcher.
|
||||
nr-of-selectors = 1
|
||||
|
||||
# Maximum number of open channels supported by this UDP module Generally
|
||||
# UDP does not require a large number of channels, therefore it is
|
||||
# recommended to keep this setting low.
|
||||
max-channels = 4096
|
||||
|
||||
# The select loop can be used in two modes:
|
||||
# - setting "infinite" will select without a timeout, hogging a thread
|
||||
# - setting a positive timeout will do a bounded select call,
|
||||
# enabling sharing of a single thread between multiple selectors
|
||||
# (in this case you will have to use a different configuration for the
|
||||
# selector-dispatcher, e.g. using "type=Dispatcher" with size 1)
|
||||
# - setting it to zero means polling, i.e. calling selectNow()
|
||||
select-timeout = infinite
|
||||
|
||||
# When trying to assign a new connection to a selector and the chosen
|
||||
# selector is at full capacity, retry selector choosing and assignment
|
||||
# this many times before giving up
|
||||
selector-association-retries = 10
|
||||
|
||||
# The maximum number of datagrams that are read in one go,
|
||||
# higher numbers decrease latency, lower numbers increase fairness on
|
||||
# the worker-dispatcher
|
||||
receive-throughput = 3
|
||||
|
||||
# The number of bytes per direct buffer in the pool used to read or write
|
||||
# network data from the kernel.
|
||||
direct-buffer-size = 128 KiB
|
||||
|
||||
# The maximal number of direct buffers kept in the direct buffer pool for
|
||||
# reuse.
|
||||
direct-buffer-pool-limit = 1000
|
||||
|
||||
# The maximum number of bytes delivered by a `Received` message. Before
|
||||
# more data is read from the network the connection actor will try to
|
||||
# do other work.
|
||||
received-message-size-limit = unlimited
|
||||
|
||||
# Enable fine grained logging of what goes on inside the implementation.
|
||||
# Be aware that this may log more than once per message sent to the actors
|
||||
# of the tcp implementation.
|
||||
trace-logging = off
|
||||
|
||||
# Fully qualified config path which holds the dispatcher configuration
|
||||
# to be used for running the select() calls in the selectors
|
||||
selector-dispatcher = "akka.io.pinned-dispatcher"
|
||||
|
||||
# Fully qualified config path which holds the dispatcher configuration
|
||||
# for the read/write worker actors
|
||||
worker-dispatcher = "akka.actor.default-dispatcher"
|
||||
|
||||
# Fully qualified config path which holds the dispatcher configuration
|
||||
# for the selector management actors
|
||||
management-dispatcher = "akka.actor.default-dispatcher"
|
||||
}
|
||||
|
||||
|
||||
# IMPORTANT NOTICE:
|
||||
#
|
||||
# The following settings belong to the deprecated akka.actor.IO
|
||||
|
|
@ -463,4 +586,6 @@ akka {
|
|||
# 0 or negative means that the platform default will be used.
|
||||
default-backlog = 1000
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,16 +8,6 @@ import java.util.concurrent.atomic.AtomicBoolean
|
|||
import java.nio.ByteBuffer
|
||||
import annotation.tailrec
|
||||
|
||||
trait WithBufferPool {
|
||||
def tcp: TcpExt
|
||||
|
||||
def acquireBuffer(): ByteBuffer =
|
||||
tcp.bufferPool.acquire()
|
||||
|
||||
def releaseBuffer(buffer: ByteBuffer): Unit =
|
||||
tcp.bufferPool.release(buffer)
|
||||
}
|
||||
|
||||
trait BufferPool {
|
||||
def acquire(): ByteBuffer
|
||||
def release(buf: ByteBuffer)
|
||||
|
|
|
|||
|
|
@ -4,7 +4,9 @@
|
|||
|
||||
package akka.io
|
||||
|
||||
import akka.actor.{ ActorRef, ActorSystem, ExtensionKey }
|
||||
import akka.actor._
|
||||
import akka.routing.RandomRouter
|
||||
import akka.io.SelectionHandler.WorkerForCommand
|
||||
|
||||
object IO {
|
||||
|
||||
|
|
@ -14,4 +16,26 @@ object IO {
|
|||
|
||||
def apply[T <: Extension](key: ExtensionKey[T])(implicit system: ActorSystem): ActorRef = key(system).manager
|
||||
|
||||
trait HasFailureMessage {
|
||||
def failureMessage: Any
|
||||
}
|
||||
|
||||
abstract class SelectorBasedManager(selectorSettings: SelectionHandlerSettings, nrOfSelectors: Int) extends Actor {
|
||||
|
||||
val selectorPool = context.actorOf(
|
||||
props = Props(new SelectionHandler(self, selectorSettings)).withRouter(RandomRouter(nrOfSelectors)),
|
||||
name = "selectors")
|
||||
|
||||
private def createWorkerMessage(pf: PartialFunction[HasFailureMessage, Props]): PartialFunction[HasFailureMessage, WorkerForCommand] = {
|
||||
case cmd ⇒
|
||||
val props = pf(cmd)
|
||||
val commander = sender
|
||||
WorkerForCommand(cmd, commander, props)
|
||||
}
|
||||
|
||||
def workerForCommandHandler(pf: PartialFunction[Any, Props]): Receive = {
|
||||
case cmd: HasFailureMessage if pf.isDefinedAt(cmd) ⇒ selectorPool ! createWorkerMessage(pf)(cmd)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
89
akka-actor/src/main/scala/akka/io/Inet.scala
Normal file
89
akka-actor/src/main/scala/akka/io/Inet.scala
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
package akka.io
|
||||
|
||||
import java.net.{ DatagramSocket, Socket, ServerSocket }
|
||||
|
||||
object Inet {
|
||||
|
||||
/**
|
||||
* SocketOption is a package of data (from the user) and associated
|
||||
* behavior (how to apply that to a socket).
|
||||
*/
|
||||
trait SocketOption {
|
||||
|
||||
def beforeDatagramBind(ds: DatagramSocket): Unit = ()
|
||||
|
||||
def beforeServerSocketBind(ss: ServerSocket): Unit = ()
|
||||
|
||||
/**
|
||||
* Action to be taken for this option before calling connect()
|
||||
*/
|
||||
def beforeConnect(s: Socket): Unit = ()
|
||||
/**
|
||||
* Action to be taken for this option after connect returned (i.e. on
|
||||
* the slave socket for servers).
|
||||
*/
|
||||
def afterConnect(s: Socket): Unit = ()
|
||||
}
|
||||
|
||||
object SO {
|
||||
|
||||
/**
|
||||
* [[akka.io.Tcp.SocketOption]] to set the SO_RCVBUF option
|
||||
*
|
||||
* For more information see [[java.net.Socket.setReceiveBufferSize]]
|
||||
*/
|
||||
case class ReceiveBufferSize(size: Int) extends SocketOption {
|
||||
require(size > 0, "ReceiveBufferSize must be > 0")
|
||||
override def beforeServerSocketBind(s: ServerSocket): Unit = s.setReceiveBufferSize(size)
|
||||
override def beforeDatagramBind(s: DatagramSocket): Unit = s.setReceiveBufferSize(size)
|
||||
override def beforeConnect(s: Socket): Unit = s.setReceiveBufferSize(size)
|
||||
}
|
||||
|
||||
// server socket options
|
||||
|
||||
/**
|
||||
* [[akka.io.Tcp.SocketOption]] to enable or disable SO_REUSEADDR
|
||||
*
|
||||
* For more information see [[java.net.Socket.setReuseAddress]]
|
||||
*/
|
||||
case class ReuseAddress(on: Boolean) extends SocketOption {
|
||||
override def beforeServerSocketBind(s: ServerSocket): Unit = s.setReuseAddress(on)
|
||||
override def beforeDatagramBind(s: DatagramSocket): Unit = s.setReuseAddress(on)
|
||||
override def beforeConnect(s: Socket): Unit = s.setReuseAddress(on)
|
||||
}
|
||||
|
||||
/**
|
||||
* [[akka.io.Tcp.SocketOption]] to set the SO_SNDBUF option.
|
||||
*
|
||||
* For more information see [[java.net.Socket.setSendBufferSize]]
|
||||
*/
|
||||
case class SendBufferSize(size: Int) extends SocketOption {
|
||||
require(size > 0, "SendBufferSize must be > 0")
|
||||
override def afterConnect(s: Socket): Unit = s.setSendBufferSize(size)
|
||||
}
|
||||
|
||||
/**
|
||||
* [[akka.io.Tcp.SocketOption]] to set the traffic class or
|
||||
* type-of-service octet in the IP header for packets sent from this
|
||||
* socket.
|
||||
*
|
||||
* For more information see [[java.net.Socket.setTrafficClass]]
|
||||
*/
|
||||
case class TrafficClass(tc: Int) extends SocketOption {
|
||||
require(0 <= tc && tc <= 255, "TrafficClass needs to be in the interval [0, 255]")
|
||||
override def afterConnect(s: Socket): Unit = s.setTrafficClass(tc)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
trait SoForwarders {
|
||||
val ReceiveBufferSize = SO.ReceiveBufferSize
|
||||
val ReuseAddress = SO.ReuseAddress
|
||||
val SendBufferSize = SO.SendBufferSize
|
||||
val TrafficClass = SO.TrafficClass
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -6,58 +6,87 @@ package akka.io
|
|||
|
||||
import java.lang.Runnable
|
||||
import java.nio.channels.spi.SelectorProvider
|
||||
import java.nio.channels.{ ServerSocketChannel, SelectionKey, SocketChannel }
|
||||
import java.nio.channels.{ SelectableChannel, SelectionKey }
|
||||
import java.nio.channels.SelectionKey._
|
||||
import scala.util.control.NonFatal
|
||||
import scala.collection.immutable
|
||||
import scala.concurrent.duration._
|
||||
import akka.actor._
|
||||
import Tcp._
|
||||
import com.typesafe.config.Config
|
||||
import akka.actor.Terminated
|
||||
import akka.io.IO.HasFailureMessage
|
||||
|
||||
private[io] class TcpSelector(manager: ActorRef, tcp: TcpExt) extends Actor with ActorLogging {
|
||||
import TcpSelector._
|
||||
import tcp.Settings._
|
||||
abstract class SelectionHandlerSettings(config: Config) {
|
||||
import config._
|
||||
|
||||
val MaxChannels = getString("max-channels") match {
|
||||
case "unlimited" ⇒ -1
|
||||
case _ ⇒ getInt("max-channels")
|
||||
}
|
||||
val SelectTimeout = getString("select-timeout") match {
|
||||
case "infinite" ⇒ Duration.Inf
|
||||
case x ⇒ Duration(x)
|
||||
}
|
||||
val SelectorAssociationRetries = getInt("selector-association-retries")
|
||||
|
||||
val SelectorDispatcher = getString("selector-dispatcher")
|
||||
val WorkerDispatcher = getString("worker-dispatcher")
|
||||
val TraceLogging = getBoolean("trace-logging")
|
||||
|
||||
require(MaxChannels == -1 || MaxChannels > 0, "max-channels must be > 0 or 'unlimited'")
|
||||
require(SelectTimeout >= Duration.Zero, "select-timeout must not be negative")
|
||||
require(SelectorAssociationRetries >= 0, "selector-association-retries must be >= 0")
|
||||
|
||||
def MaxChannelsPerSelector: Int
|
||||
|
||||
}
|
||||
|
||||
private[io] object SelectionHandler {
|
||||
|
||||
case class WorkerForCommand(apiCommand: HasFailureMessage, commander: ActorRef, childProps: Props)
|
||||
|
||||
case class RegisterChannel(channel: SelectableChannel, initialOps: Int)
|
||||
case object ChannelRegistered
|
||||
case class Retry(command: WorkerForCommand, retriesLeft: Int) { require(retriesLeft >= 0) }
|
||||
|
||||
case object ChannelConnectable
|
||||
case object ChannelAcceptable
|
||||
case object ChannelReadable
|
||||
case object ChannelWritable
|
||||
case object AcceptInterest
|
||||
case object ReadInterest
|
||||
case object DisableReadInterest
|
||||
case object WriteInterest
|
||||
}
|
||||
|
||||
private[io] class SelectionHandler(manager: ActorRef, settings: SelectionHandlerSettings) extends Actor with ActorLogging {
|
||||
import SelectionHandler._
|
||||
import settings._
|
||||
|
||||
@volatile var childrenKeys = immutable.HashMap.empty[String, SelectionKey]
|
||||
val sequenceNumber = Iterator.from(0)
|
||||
val selectorManagementDispatcher = context.system.dispatchers.lookup(SelectorDispatcher)
|
||||
val selector = SelectorProvider.provider.openSelector
|
||||
val OP_READ_AND_WRITE = OP_READ + OP_WRITE // compile-time constant
|
||||
val OP_READ_AND_WRITE = OP_READ | OP_WRITE // compile-time constant
|
||||
|
||||
def receive: Receive = {
|
||||
case WriteInterest ⇒ execute(enableInterest(OP_WRITE, sender))
|
||||
case ReadInterest ⇒ execute(enableInterest(OP_READ, sender))
|
||||
case AcceptInterest ⇒ execute(enableInterest(OP_ACCEPT, sender))
|
||||
case WriteInterest ⇒ execute(enableInterest(OP_WRITE, sender))
|
||||
case ReadInterest ⇒ execute(enableInterest(OP_READ, sender))
|
||||
case AcceptInterest ⇒ execute(enableInterest(OP_ACCEPT, sender))
|
||||
|
||||
case StopReading ⇒ execute(disableInterest(OP_READ, sender))
|
||||
case DisableReadInterest ⇒ execute(disableInterest(OP_READ, sender))
|
||||
|
||||
case cmd: RegisterIncomingConnection ⇒
|
||||
handleIncomingConnection(cmd, SelectorAssociationRetries)
|
||||
case cmd: WorkerForCommand ⇒
|
||||
withCapacityProtection(cmd, SelectorAssociationRetries) { spawnChild(cmd.childProps) }
|
||||
|
||||
case cmd: Connect ⇒
|
||||
handleConnect(cmd, SelectorAssociationRetries)
|
||||
case RegisterChannel(channel, initialOps) ⇒
|
||||
execute(registerChannel(channel, sender, initialOps))
|
||||
|
||||
case cmd: Bind ⇒
|
||||
handleBind(cmd, SelectorAssociationRetries)
|
||||
case Retry(WorkerForCommand(cmd, commander, _), 0) ⇒
|
||||
commander ! cmd.failureMessage
|
||||
|
||||
case RegisterOutgoingConnection(channel) ⇒
|
||||
execute(registerOutgoingConnection(channel, sender))
|
||||
|
||||
case RegisterServerSocketChannel(channel) ⇒
|
||||
execute(registerListener(channel, sender))
|
||||
|
||||
case Retry(command, 0) ⇒
|
||||
log.warning("Command '{}' failed since all selectors are at capacity", command)
|
||||
sender ! CommandFailed(command)
|
||||
|
||||
case Retry(cmd: RegisterIncomingConnection, retriesLeft) ⇒
|
||||
handleIncomingConnection(cmd, retriesLeft)
|
||||
|
||||
case Retry(cmd: Connect, retriesLeft) ⇒
|
||||
handleConnect(cmd, retriesLeft)
|
||||
|
||||
case Retry(cmd: Bind, retriesLeft) ⇒
|
||||
handleBind(cmd, retriesLeft)
|
||||
case Retry(cmd, retriesLeft) ⇒
|
||||
withCapacityProtection(cmd, retriesLeft) { spawnChild(cmd.childProps) }
|
||||
|
||||
case Terminated(child) ⇒
|
||||
execute(unregister(child))
|
||||
|
|
@ -67,51 +96,36 @@ private[io] class TcpSelector(manager: ActorRef, tcp: TcpExt) extends Actor with
|
|||
try {
|
||||
try {
|
||||
val iterator = selector.keys.iterator
|
||||
while (iterator.hasNext) iterator.next().channel.close()
|
||||
while (iterator.hasNext) {
|
||||
val key = iterator.next()
|
||||
try key.channel.close()
|
||||
catch {
|
||||
case NonFatal(e) ⇒ log.error(e, "Error closing channel")
|
||||
}
|
||||
}
|
||||
} finally selector.close()
|
||||
} catch {
|
||||
case NonFatal(e) ⇒ log.error(e, "Error closing selector or key")
|
||||
case NonFatal(e) ⇒ log.error(e, "Error closing selector")
|
||||
}
|
||||
}
|
||||
|
||||
// we can never recover from failures of a connection or listener child
|
||||
override def supervisorStrategy = SupervisorStrategy.stoppingStrategy
|
||||
|
||||
def handleIncomingConnection(cmd: RegisterIncomingConnection, retriesLeft: Int): Unit =
|
||||
withCapacityProtection(cmd, retriesLeft) {
|
||||
import cmd._
|
||||
val connection = spawnChild(() ⇒ new TcpIncomingConnection(channel, tcp, handler, options))
|
||||
execute(registerIncomingConnection(channel, connection))
|
||||
}
|
||||
|
||||
def handleConnect(cmd: Connect, retriesLeft: Int): Unit =
|
||||
withCapacityProtection(cmd, retriesLeft) {
|
||||
import cmd._
|
||||
val commander = sender
|
||||
spawnChild(() ⇒ new TcpOutgoingConnection(tcp, commander, remoteAddress, localAddress, options))
|
||||
}
|
||||
|
||||
def handleBind(cmd: Bind, retriesLeft: Int): Unit =
|
||||
withCapacityProtection(cmd, retriesLeft) {
|
||||
import cmd._
|
||||
val commander = sender
|
||||
spawnChild(() ⇒ new TcpListener(context.parent, handler, endpoint, backlog, commander, tcp.Settings, options))
|
||||
}
|
||||
|
||||
def withCapacityProtection(cmd: Command, retriesLeft: Int)(body: ⇒ Unit): Unit = {
|
||||
log.debug("Executing {}", cmd)
|
||||
def withCapacityProtection(cmd: WorkerForCommand, retriesLeft: Int)(body: ⇒ Unit): Unit = {
|
||||
log.debug("Executing [{}]", cmd)
|
||||
if (MaxChannelsPerSelector == -1 || childrenKeys.size < MaxChannelsPerSelector) {
|
||||
body
|
||||
} else {
|
||||
log.warning("Rejecting '{}' with {} retries left, retrying...", cmd, retriesLeft)
|
||||
log.warning("Rejecting [{}] with [{}] retries left, retrying...", cmd, retriesLeft)
|
||||
context.parent forward Retry(cmd, retriesLeft - 1)
|
||||
}
|
||||
}
|
||||
|
||||
def spawnChild(creator: () ⇒ Actor) =
|
||||
def spawnChild(props: Props): ActorRef =
|
||||
context.watch {
|
||||
context.actorOf(
|
||||
props = Props(creator, dispatcher = WorkerDispatcher),
|
||||
props = props.withDispatcher(WorkerDispatcher),
|
||||
name = sequenceNumber.next().toString)
|
||||
}
|
||||
|
||||
|
|
@ -125,29 +139,11 @@ private[io] class TcpSelector(manager: ActorRef, tcp: TcpExt) extends Actor with
|
|||
def updateKeyMap(child: ActorRef, key: SelectionKey): Unit =
|
||||
childrenKeys = childrenKeys.updated(child.path.name, key)
|
||||
|
||||
def registerOutgoingConnection(channel: SocketChannel, connection: ActorRef) =
|
||||
def registerChannel(channel: SelectableChannel, channelActor: ActorRef, initialOps: Int): Task =
|
||||
new Task {
|
||||
def tryRun() {
|
||||
val key = channel.register(selector, OP_CONNECT, connection)
|
||||
updateKeyMap(connection, key)
|
||||
}
|
||||
}
|
||||
|
||||
def registerListener(channel: ServerSocketChannel, listener: ActorRef) =
|
||||
new Task {
|
||||
def tryRun() {
|
||||
val key = channel.register(selector, OP_ACCEPT, listener)
|
||||
updateKeyMap(listener, key)
|
||||
listener ! Bound
|
||||
}
|
||||
}
|
||||
|
||||
def registerIncomingConnection(channel: SocketChannel, connection: ActorRef) =
|
||||
new Task {
|
||||
def tryRun() {
|
||||
// we only enable reading after the user-level connection handler has registered
|
||||
val key = channel.register(selector, 0, connection)
|
||||
updateKeyMap(connection, key)
|
||||
updateKeyMap(channelActor, channel.register(selector, initialOps, channelActor))
|
||||
channelActor ! ChannelRegistered
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -193,17 +189,19 @@ private[io] class TcpSelector(manager: ActorRef, tcp: TcpExt) extends Actor with
|
|||
while (iterator.hasNext) {
|
||||
val key = iterator.next
|
||||
if (key.isValid) {
|
||||
key.interestOps(0) // prevent immediate reselection by always clearing
|
||||
// Cache because the performance implications of calling this on different platforms are not clear
|
||||
val readyOps = key.readyOps()
|
||||
key.interestOps(key.interestOps & ~readyOps) // prevent immediate reselection by always clearing
|
||||
val connection = key.attachment.asInstanceOf[ActorRef]
|
||||
key.readyOps match {
|
||||
readyOps match {
|
||||
case OP_READ ⇒ connection ! ChannelReadable
|
||||
case OP_WRITE ⇒ connection ! ChannelWritable
|
||||
case OP_READ_AND_WRITE ⇒ connection ! ChannelWritable; connection ! ChannelReadable
|
||||
case x if (x & OP_ACCEPT) > 0 ⇒ connection ! ChannelAcceptable
|
||||
case x if (x & OP_CONNECT) > 0 ⇒ connection ! ChannelConnectable
|
||||
case x ⇒ log.warning("Invalid readyOps: {}", x)
|
||||
case x ⇒ log.warning("Invalid readyOps: [{}]", x)
|
||||
}
|
||||
} else log.warning("Invalid selection key: {}", key)
|
||||
} else log.warning("Invalid selection key: [{}]", key)
|
||||
}
|
||||
keys.clear() // we need to remove the selected keys from the set, otherwise they remain selected
|
||||
}
|
||||
|
|
@ -213,30 +211,15 @@ private[io] class TcpSelector(manager: ActorRef, tcp: TcpExt) extends Actor with
|
|||
|
||||
selectorManagementDispatcher.execute(select) // start selection "loop"
|
||||
|
||||
// FIXME: Add possibility to signal failure of task to someone
|
||||
abstract class Task extends Runnable {
|
||||
def tryRun()
|
||||
def run() {
|
||||
try tryRun()
|
||||
catch {
|
||||
case _: java.nio.channels.ClosedSelectorException ⇒ // ok, expected during shutdown
|
||||
case NonFatal(e) ⇒ log.error(e, "Error during selector management task: {}", e)
|
||||
case NonFatal(e) ⇒ log.error(e, "Error during selector management task: [{}]", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private[io] object TcpSelector {
|
||||
case class RegisterOutgoingConnection(channel: SocketChannel)
|
||||
case class RegisterServerSocketChannel(channel: ServerSocketChannel)
|
||||
case class RegisterIncomingConnection(channel: SocketChannel, handler: ActorRef,
|
||||
options: immutable.Traversable[SocketOption]) extends Tcp.Command
|
||||
case class Retry(command: Command, retriesLeft: Int) { require(retriesLeft >= 0) }
|
||||
|
||||
case object ChannelConnectable
|
||||
case object ChannelAcceptable
|
||||
case object ChannelReadable
|
||||
case object ChannelWritable
|
||||
case object AcceptInterest
|
||||
case object ReadInterest
|
||||
case object WriteInterest
|
||||
}
|
||||
}
|
||||
|
|
@ -6,7 +6,7 @@ package akka.io
|
|||
|
||||
import java.net.InetSocketAddress
|
||||
import java.net.Socket
|
||||
import java.net.ServerSocket
|
||||
import akka.io.Inet._
|
||||
import com.typesafe.config.Config
|
||||
import scala.concurrent.duration._
|
||||
import scala.collection.immutable
|
||||
|
|
@ -16,58 +16,15 @@ import akka.actor._
|
|||
object Tcp extends ExtensionKey[TcpExt] {
|
||||
|
||||
// Java API
|
||||
override def get(system: ActorSystem): TcpExt = system.extension(this)
|
||||
|
||||
/**
|
||||
* SocketOption is a package of data (from the user) and associated
|
||||
* behavior (how to apply that to a socket).
|
||||
*/
|
||||
sealed trait SocketOption {
|
||||
/**
|
||||
* Action to be taken for this option before calling bind()
|
||||
*/
|
||||
def beforeBind(s: ServerSocket): Unit = ()
|
||||
/**
|
||||
* Action to be taken for this option before calling connect()
|
||||
*/
|
||||
def beforeConnect(s: Socket): Unit = ()
|
||||
/**
|
||||
* Action to be taken for this option after connect returned (i.e. on
|
||||
* the slave socket for servers).
|
||||
*/
|
||||
def afterConnect(s: Socket): Unit = ()
|
||||
}
|
||||
override def get(system: ActorSystem): TcpExt = super.get(system)
|
||||
|
||||
// shared socket options
|
||||
object SO {
|
||||
|
||||
/**
|
||||
* [[akka.io.Tcp.SocketOption]] to set the SO_RCVBUF option
|
||||
*
|
||||
* For more information see [[java.net.Socket.setReceiveBufferSize]]
|
||||
*/
|
||||
case class ReceiveBufferSize(size: Int) extends SocketOption {
|
||||
require(size > 0, "ReceiveBufferSize must be > 0")
|
||||
override def beforeBind(s: ServerSocket): Unit = s.setReceiveBufferSize(size)
|
||||
override def beforeConnect(s: Socket): Unit = s.setReceiveBufferSize(size)
|
||||
}
|
||||
|
||||
// server socket options
|
||||
|
||||
/**
|
||||
* [[akka.io.Tcp.SocketOption]] to enable or disable SO_REUSEADDR
|
||||
*
|
||||
* For more information see [[java.net.Socket.setReuseAddress]]
|
||||
*/
|
||||
case class ReuseAddress(on: Boolean) extends SocketOption {
|
||||
override def beforeBind(s: ServerSocket): Unit = s.setReuseAddress(on)
|
||||
override def beforeConnect(s: Socket): Unit = s.setReuseAddress(on)
|
||||
}
|
||||
object SO extends Inet.SoForwarders {
|
||||
|
||||
// general socket options
|
||||
|
||||
/**
|
||||
* [[akka.io.Tcp.SocketOption]] to enable or disable SO_KEEPALIVE
|
||||
* [[akka.io.Inet.SocketOption]] to enable or disable SO_KEEPALIVE
|
||||
*
|
||||
* For more information see [[java.net.Socket.setKeepAlive]]
|
||||
*/
|
||||
|
|
@ -76,7 +33,7 @@ object Tcp extends ExtensionKey[TcpExt] {
|
|||
}
|
||||
|
||||
/**
|
||||
* [[akka.io.Tcp.SocketOption]] to enable or disable OOBINLINE (receipt
|
||||
* [[akka.io.Inet.SocketOption]] to enable or disable OOBINLINE (receipt
|
||||
* of TCP urgent data) By default, this option is disabled and TCP urgent
|
||||
* data is silently discarded.
|
||||
*
|
||||
|
|
@ -86,20 +43,10 @@ object Tcp extends ExtensionKey[TcpExt] {
|
|||
override def afterConnect(s: Socket): Unit = s.setOOBInline(on)
|
||||
}
|
||||
|
||||
/**
|
||||
* [[akka.io.Tcp.SocketOption]] to set the SO_SNDBUF option.
|
||||
*
|
||||
* For more information see [[java.net.Socket.setSendBufferSize]]
|
||||
*/
|
||||
case class SendBufferSize(size: Int) extends SocketOption {
|
||||
require(size > 0, "SendBufferSize must be > 0")
|
||||
override def afterConnect(s: Socket): Unit = s.setSendBufferSize(size)
|
||||
}
|
||||
|
||||
// SO_LINGER is handled by the Close code
|
||||
|
||||
/**
|
||||
* [[akka.io.Tcp.SocketOption]] to enable or disable TCP_NODELAY
|
||||
* [[akka.io.Inet.SocketOption]] to enable or disable TCP_NODELAY
|
||||
* (disable or enable Nagle's algorithm)
|
||||
*
|
||||
* For more information see [[java.net.Socket.setTcpNoDelay]]
|
||||
|
|
@ -108,21 +55,12 @@ object Tcp extends ExtensionKey[TcpExt] {
|
|||
override def afterConnect(s: Socket): Unit = s.setTcpNoDelay(on)
|
||||
}
|
||||
|
||||
/**
|
||||
* [[akka.io.Tcp.SocketOption]] to set the traffic class or
|
||||
* type-of-service octet in the IP header for packets sent from this
|
||||
* socket.
|
||||
*
|
||||
* For more information see [[java.net.Socket.setTrafficClass]]
|
||||
*/
|
||||
case class TrafficClass(tc: Int) extends SocketOption {
|
||||
require(0 <= tc && tc <= 255, "TrafficClass needs to be in the interval [0, 255]")
|
||||
override def afterConnect(s: Socket): Unit = s.setTrafficClass(tc)
|
||||
}
|
||||
}
|
||||
|
||||
/// COMMANDS
|
||||
trait Command
|
||||
trait Command extends IO.HasFailureMessage {
|
||||
def failureMessage = CommandFailed(this)
|
||||
}
|
||||
|
||||
case class Connect(remoteAddress: InetSocketAddress,
|
||||
localAddress: Option[InetSocketAddress] = None,
|
||||
|
|
@ -179,34 +117,23 @@ object Tcp extends ExtensionKey[TcpExt] {
|
|||
class TcpExt(system: ExtendedActorSystem) extends IO.Extension {
|
||||
|
||||
val Settings = new Settings(system.settings.config.getConfig("akka.io.tcp"))
|
||||
class Settings private[TcpExt] (config: Config) {
|
||||
import config._
|
||||
class Settings private[TcpExt] (_config: Config) extends SelectionHandlerSettings(_config) {
|
||||
import _config._
|
||||
|
||||
val NrOfSelectors = getInt("nr-of-selectors")
|
||||
val MaxChannels = getString("max-channels") match {
|
||||
case "unlimited" ⇒ -1
|
||||
case _ ⇒ getInt("max-channels")
|
||||
}
|
||||
val SelectTimeout = getString("select-timeout") match {
|
||||
case "infinite" ⇒ Duration.Inf
|
||||
case x ⇒ Duration(x)
|
||||
}
|
||||
val SelectorAssociationRetries = getInt("selector-association-retries")
|
||||
|
||||
val BatchAcceptLimit = getInt("batch-accept-limit")
|
||||
val DirectBufferSize = getIntBytes("direct-buffer-size")
|
||||
val MaxDirectBufferPoolSize = getInt("max-direct-buffer-pool-size")
|
||||
val MaxDirectBufferPoolSize = getInt("direct-buffer-pool-limit")
|
||||
val RegisterTimeout = getString("register-timeout") match {
|
||||
case "infinite" ⇒ Duration.Undefined
|
||||
case x ⇒ Duration(x)
|
||||
}
|
||||
val ReceivedMessageSizeLimit = getString("received-message-size-limit") match {
|
||||
val ReceivedMessageSizeLimit = getString("max-received-message-size") match {
|
||||
case "unlimited" ⇒ Int.MaxValue
|
||||
case x ⇒ getIntBytes("received-message-size-limit")
|
||||
}
|
||||
val SelectorDispatcher = getString("selector-dispatcher")
|
||||
val WorkerDispatcher = getString("worker-dispatcher")
|
||||
val ManagementDispatcher = getString("management-dispatcher")
|
||||
val TraceLogging = getBoolean("trace-logging")
|
||||
|
||||
require(NrOfSelectors > 0, "nr-of-selectors must be > 0")
|
||||
require(MaxChannels == -1 || MaxChannels > 0, "max-channels must be > 0 or 'unlimited'")
|
||||
|
|
@ -223,7 +150,7 @@ class TcpExt(system: ExtendedActorSystem) extends IO.Extension {
|
|||
}
|
||||
}
|
||||
|
||||
val manager = {
|
||||
val manager: ActorRef = {
|
||||
system.asInstanceOf[ActorSystemImpl].systemActorOf(
|
||||
props = Props(new TcpManager(this)).withDispatcher(Settings.ManagementDispatcher),
|
||||
name = "IO-TCP")
|
||||
|
|
|
|||
|
|
@ -14,15 +14,17 @@ import scala.util.control.NonFatal
|
|||
import scala.concurrent.duration._
|
||||
import akka.actor._
|
||||
import akka.util.ByteString
|
||||
import Tcp._
|
||||
import TcpSelector._
|
||||
import akka.io.Inet.SocketOption
|
||||
import akka.io.Tcp._
|
||||
import akka.io.SelectionHandler._
|
||||
|
||||
/**
|
||||
* Base class for TcpIncomingConnection and TcpOutgoingConnection.
|
||||
*/
|
||||
private[io] abstract class TcpConnection(val channel: SocketChannel,
|
||||
val tcp: TcpExt) extends Actor with ActorLogging with WithBufferPool {
|
||||
val tcp: TcpExt) extends Actor with ActorLogging {
|
||||
import tcp.Settings._
|
||||
import tcp.bufferPool
|
||||
import TcpConnection._
|
||||
var pendingWrite: PendingWrite = null
|
||||
|
||||
|
|
@ -38,7 +40,7 @@ private[io] abstract class TcpConnection(val channel: SocketChannel,
|
|||
/** connection established, waiting for registration from user handler */
|
||||
def waitingForRegistration(commander: ActorRef): Receive = {
|
||||
case Register(handler) ⇒
|
||||
if (TraceLogging) log.debug("{} registered as connection handler", handler)
|
||||
if (TraceLogging) log.debug("[{}] registered as connection handler", handler)
|
||||
doRead(handler, None) // immediately try reading
|
||||
|
||||
context.setReceiveTimeout(Duration.Undefined)
|
||||
|
|
@ -52,13 +54,13 @@ private[io] abstract class TcpConnection(val channel: SocketChannel,
|
|||
case ReceiveTimeout ⇒
|
||||
// after sending `Register` user should watch this actor to make sure
|
||||
// it didn't die because of the timeout
|
||||
log.warning("Configured registration timeout of {} expired, stopping", RegisterTimeout)
|
||||
log.warning("Configured registration timeout of [{}] expired, stopping", RegisterTimeout)
|
||||
context.stop(self)
|
||||
}
|
||||
|
||||
/** normal connected state */
|
||||
def connected(handler: ActorRef): Receive = {
|
||||
case StopReading ⇒ selector ! StopReading
|
||||
case StopReading ⇒ selector ! DisableReadInterest
|
||||
case ResumeReading ⇒ selector ! ReadInterest
|
||||
case ChannelReadable ⇒ doRead(handler, None)
|
||||
|
||||
|
|
@ -74,14 +76,14 @@ private[io] abstract class TcpConnection(val channel: SocketChannel,
|
|||
pendingWrite = createWrite(write)
|
||||
doWrite(handler)
|
||||
|
||||
case ChannelWritable ⇒ doWrite(handler)
|
||||
case ChannelWritable ⇒ if (writePending) doWrite(handler)
|
||||
|
||||
case cmd: CloseCommand ⇒ handleClose(handler, Some(sender), closeResponse(cmd))
|
||||
}
|
||||
|
||||
/** connection is closing but a write has to be finished first */
|
||||
def closingWithPendingWrite(handler: ActorRef, closeCommander: Option[ActorRef], closedEvent: ConnectionClosed): Receive = {
|
||||
case StopReading ⇒ selector ! StopReading
|
||||
case StopReading ⇒ selector ! DisableReadInterest
|
||||
case ResumeReading ⇒ selector ! ReadInterest
|
||||
case ChannelReadable ⇒ doRead(handler, closeCommander)
|
||||
|
||||
|
|
@ -95,7 +97,7 @@ private[io] abstract class TcpConnection(val channel: SocketChannel,
|
|||
|
||||
/** connection is closed on our side and we're waiting from confirmation from the other side */
|
||||
def closing(handler: ActorRef, closeCommander: Option[ActorRef]): Receive = {
|
||||
case StopReading ⇒ selector ! StopReading
|
||||
case StopReading ⇒ selector ! DisableReadInterest
|
||||
case ResumeReading ⇒ selector ! ReadInterest
|
||||
case ChannelReadable ⇒ doRead(handler, closeCommander)
|
||||
case Abort ⇒ handleClose(handler, Some(sender), Aborted)
|
||||
|
|
@ -137,18 +139,18 @@ private[io] abstract class TcpConnection(val channel: SocketChannel,
|
|||
}
|
||||
} else MoreDataWaiting(receivedData)
|
||||
|
||||
val buffer = acquireBuffer()
|
||||
val buffer = bufferPool.acquire()
|
||||
try innerRead(buffer, ByteString.empty, ReceivedMessageSizeLimit) match {
|
||||
case NoData ⇒
|
||||
if (TraceLogging) log.debug("Read nothing.")
|
||||
selector ! ReadInterest
|
||||
case GotCompleteData(data) ⇒
|
||||
if (TraceLogging) log.debug("Read {} bytes.", data.length)
|
||||
if (TraceLogging) log.debug("Read [{}] bytes.", data.length)
|
||||
|
||||
handler ! Received(data)
|
||||
selector ! ReadInterest
|
||||
case MoreDataWaiting(data) ⇒
|
||||
if (TraceLogging) log.debug("Read {} bytes. More data waiting.", data.length)
|
||||
if (TraceLogging) log.debug("Read [{}] bytes. More data waiting.", data.length)
|
||||
|
||||
handler ! Received(data)
|
||||
self ! ChannelReadable
|
||||
|
|
@ -157,7 +159,7 @@ private[io] abstract class TcpConnection(val channel: SocketChannel,
|
|||
doCloseConnection(handler, closeCommander, closeReason)
|
||||
} catch {
|
||||
case e: IOException ⇒ handleError(handler, e)
|
||||
} finally releaseBuffer(buffer)
|
||||
} finally bufferPool.release(buffer)
|
||||
}
|
||||
|
||||
final def doWrite(handler: ActorRef): Unit = {
|
||||
|
|
@ -165,7 +167,7 @@ private[io] abstract class TcpConnection(val channel: SocketChannel,
|
|||
val toWrite = pendingWrite.buffer.remaining()
|
||||
require(toWrite != 0)
|
||||
val writtenBytes = channel.write(pendingWrite.buffer)
|
||||
if (TraceLogging) log.debug("Wrote {} bytes to channel", writtenBytes)
|
||||
if (TraceLogging) log.debug("Wrote [{}] bytes to channel", writtenBytes)
|
||||
|
||||
pendingWrite = pendingWrite.consume(writtenBytes)
|
||||
|
||||
|
|
@ -179,7 +181,7 @@ private[io] abstract class TcpConnection(val channel: SocketChannel,
|
|||
val buffer = pendingWrite.buffer
|
||||
pendingWrite = null
|
||||
|
||||
releaseBuffer(buffer)
|
||||
bufferPool.release(buffer)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -246,7 +248,7 @@ private[io] abstract class TcpConnection(val channel: SocketChannel,
|
|||
case NonFatal(e) ⇒
|
||||
// setSoLinger can fail due to http://bugs.sun.com/view_bug.do?bug_id=6799574
|
||||
// (also affected: OS/X Java 1.6.0_37)
|
||||
if (TraceLogging) log.debug("setSoLinger(true, 0) failed with {}", e)
|
||||
if (TraceLogging) log.debug("setSoLinger(true, 0) failed with [{}]", e)
|
||||
}
|
||||
channel.close()
|
||||
}
|
||||
|
|
@ -256,7 +258,7 @@ private[io] abstract class TcpConnection(val channel: SocketChannel,
|
|||
abort()
|
||||
|
||||
if (writePending)
|
||||
releaseBuffer(pendingWrite.buffer)
|
||||
bufferPool.release(pendingWrite.buffer)
|
||||
|
||||
if (closedMessage != null) {
|
||||
val interestedInClose =
|
||||
|
|
@ -288,7 +290,7 @@ private[io] abstract class TcpConnection(val channel: SocketChannel,
|
|||
def wantsAck = ack != NoAck
|
||||
}
|
||||
def createWrite(write: Write): PendingWrite = {
|
||||
val buffer = acquireBuffer()
|
||||
val buffer = bufferPool.acquire()
|
||||
val copied = write.data.copyToBuffer(buffer)
|
||||
buffer.flip()
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,8 @@ package akka.io
|
|||
import java.nio.channels.SocketChannel
|
||||
import scala.collection.immutable
|
||||
import akka.actor.ActorRef
|
||||
import Tcp.SocketOption
|
||||
import akka.io.Inet.SocketOption
|
||||
import akka.io.SelectionHandler.{ ChannelRegistered, RegisterChannel }
|
||||
|
||||
/**
|
||||
* An actor handling the connection state machine for an incoming, already connected
|
||||
|
|
@ -21,7 +22,9 @@ private[io] class TcpIncomingConnection(_channel: SocketChannel,
|
|||
|
||||
context.watch(handler) // sign death pact
|
||||
|
||||
completeConnect(handler, options)
|
||||
context.parent ! RegisterChannel(channel, 0)
|
||||
|
||||
def receive = PartialFunction.empty
|
||||
def receive = {
|
||||
case ChannelRegistered ⇒ completeConnect(handler, options)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,47 +5,65 @@
|
|||
package akka.io
|
||||
|
||||
import java.net.InetSocketAddress
|
||||
import java.nio.channels.ServerSocketChannel
|
||||
import java.nio.channels.{ SocketChannel, SelectionKey, ServerSocketChannel }
|
||||
import scala.annotation.tailrec
|
||||
import scala.collection.immutable
|
||||
import scala.util.control.NonFatal
|
||||
import akka.actor.{ ActorLogging, ActorRef, Actor }
|
||||
import TcpSelector._
|
||||
import Tcp._
|
||||
import akka.actor.{ Props, ActorLogging, ActorRef, Actor }
|
||||
import akka.io.SelectionHandler._
|
||||
import akka.io.Inet.SocketOption
|
||||
import akka.io.Tcp._
|
||||
import akka.io.IO.HasFailureMessage
|
||||
|
||||
private[io] class TcpListener(selectorRouter: ActorRef,
|
||||
handler: ActorRef,
|
||||
endpoint: InetSocketAddress,
|
||||
backlog: Int,
|
||||
bindCommander: ActorRef,
|
||||
settings: TcpExt#Settings,
|
||||
options: immutable.Traversable[SocketOption]) extends Actor with ActorLogging {
|
||||
private[io] object TcpListener {
|
||||
|
||||
case class RegisterIncoming(channel: SocketChannel) extends HasFailureMessage {
|
||||
def failureMessage = FailedRegisterIncoming(channel)
|
||||
}
|
||||
|
||||
case class FailedRegisterIncoming(channel: SocketChannel)
|
||||
|
||||
}
|
||||
|
||||
private[io] class TcpListener(val selectorRouter: ActorRef,
|
||||
val tcp: TcpExt,
|
||||
val bindCommander: ActorRef,
|
||||
val bind: Bind) extends Actor with ActorLogging {
|
||||
|
||||
def selector: ActorRef = context.parent
|
||||
import TcpListener._
|
||||
import tcp.Settings._
|
||||
import bind._
|
||||
|
||||
context.watch(handler) // sign death pact
|
||||
val channel = {
|
||||
val serverSocketChannel = ServerSocketChannel.open
|
||||
serverSocketChannel.configureBlocking(false)
|
||||
val socket = serverSocketChannel.socket
|
||||
options.foreach(_.beforeBind(socket))
|
||||
socket.bind(endpoint, backlog) // will blow up the actor constructor if the bind fails
|
||||
options.foreach(_.beforeServerSocketBind(socket))
|
||||
try socket.bind(endpoint, backlog)
|
||||
catch {
|
||||
case NonFatal(e) ⇒
|
||||
bindCommander ! CommandFailed(bind)
|
||||
log.error(e, "Bind failed for TCP channel on endpoint [{}]", endpoint)
|
||||
context.stop(self)
|
||||
}
|
||||
serverSocketChannel
|
||||
}
|
||||
context.parent ! RegisterServerSocketChannel(channel)
|
||||
context.parent ! RegisterChannel(channel, SelectionKey.OP_ACCEPT)
|
||||
log.debug("Successfully bound to {}", endpoint)
|
||||
|
||||
def receive: Receive = {
|
||||
case Bound ⇒
|
||||
case ChannelRegistered ⇒
|
||||
bindCommander ! Bound
|
||||
context.become(bound)
|
||||
}
|
||||
|
||||
def bound: Receive = {
|
||||
case ChannelAcceptable ⇒
|
||||
acceptAllPending(settings.BatchAcceptLimit)
|
||||
acceptAllPending(BatchAcceptLimit)
|
||||
|
||||
case CommandFailed(RegisterIncomingConnection(socketChannel, _, _)) ⇒
|
||||
case FailedRegisterIncoming(socketChannel) ⇒
|
||||
log.warning("Could not register incoming connection since selector capacity limit is reached, closing connection")
|
||||
try socketChannel.close()
|
||||
catch {
|
||||
|
|
@ -70,7 +88,7 @@ private[io] class TcpListener(selectorRouter: ActorRef,
|
|||
if (socketChannel != null) {
|
||||
log.debug("New connection accepted")
|
||||
socketChannel.configureBlocking(false)
|
||||
selectorRouter ! RegisterIncomingConnection(socketChannel, handler, options)
|
||||
selectorRouter ! WorkerForCommand(RegisterIncoming(socketChannel), self, Props(new TcpIncomingConnection(socketChannel, tcp, handler, options)))
|
||||
acceptAllPending(limit - 1)
|
||||
}
|
||||
} else context.parent ! AcceptInterest
|
||||
|
|
|
|||
|
|
@ -4,9 +4,9 @@
|
|||
|
||||
package akka.io
|
||||
|
||||
import akka.actor.{ ActorLogging, Actor, Props }
|
||||
import akka.routing.RandomRouter
|
||||
import Tcp._
|
||||
import akka.actor.{ ActorLogging, Props }
|
||||
import akka.io.IO.SelectorBasedManager
|
||||
|
||||
/**
|
||||
* TcpManager is a facade for accepting commands ([[akka.io.Tcp.Command]]) to open client or server TCP connections.
|
||||
|
|
@ -43,13 +43,15 @@ import Tcp._
|
|||
* with a [[akka.io.Tcp.CommandFailed]] message. This message contains the original command for reference.
|
||||
*
|
||||
*/
|
||||
private[io] class TcpManager(tcp: TcpExt) extends Actor with ActorLogging {
|
||||
private[io] class TcpManager(tcp: TcpExt) extends SelectorBasedManager(tcp.Settings, tcp.Settings.NrOfSelectors) with ActorLogging {
|
||||
|
||||
val selectorPool = context.actorOf(
|
||||
props = Props(new TcpSelector(self, tcp)).withRouter(RandomRouter(tcp.Settings.NrOfSelectors)),
|
||||
name = "selectors")
|
||||
|
||||
def receive = {
|
||||
case x @ (_: Connect | _: Bind) ⇒ selectorPool forward x
|
||||
def receive = workerForCommandHandler {
|
||||
case c: Connect ⇒
|
||||
val commander = sender
|
||||
Props(new TcpOutgoingConnection(tcp, commander, c))
|
||||
case b: Bind ⇒
|
||||
val commander = sender
|
||||
Props(new TcpListener(selectorPool, tcp, commander, b))
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,13 +4,13 @@
|
|||
|
||||
package akka.io
|
||||
|
||||
import java.net.InetSocketAddress
|
||||
import java.io.IOException
|
||||
import java.nio.channels.SocketChannel
|
||||
import scala.collection.immutable
|
||||
import akka.actor.ActorRef
|
||||
import TcpSelector._
|
||||
import Tcp._
|
||||
import akka.io.Inet.SocketOption
|
||||
import akka.io.SelectionHandler._
|
||||
import akka.io.Tcp._
|
||||
import java.io.IOException
|
||||
import java.nio.channels.{ SelectionKey, SocketChannel }
|
||||
import scala.collection.immutable
|
||||
|
||||
/**
|
||||
* An actor handling the connection state machine for an outgoing connection
|
||||
|
|
@ -18,26 +18,27 @@ import Tcp._
|
|||
*/
|
||||
private[io] class TcpOutgoingConnection(_tcp: TcpExt,
|
||||
commander: ActorRef,
|
||||
remoteAddress: InetSocketAddress,
|
||||
localAddress: Option[InetSocketAddress],
|
||||
options: immutable.Traversable[SocketOption])
|
||||
connect: Connect)
|
||||
extends TcpConnection(TcpOutgoingConnection.newSocketChannel(), _tcp) {
|
||||
|
||||
import connect._
|
||||
|
||||
context.watch(commander) // sign death pact
|
||||
|
||||
localAddress.foreach(channel.socket.bind)
|
||||
options.foreach(_.beforeConnect(channel.socket))
|
||||
selector ! RegisterChannel(channel, SelectionKey.OP_CONNECT)
|
||||
|
||||
log.debug("Attempting connection to {}", remoteAddress)
|
||||
if (channel.connect(remoteAddress))
|
||||
completeConnect(commander, options)
|
||||
else {
|
||||
selector ! RegisterOutgoingConnection(channel)
|
||||
context.become(connecting(commander, options))
|
||||
def receive: Receive = {
|
||||
case ChannelRegistered ⇒
|
||||
log.debug("Attempting connection to [{}]", remoteAddress)
|
||||
if (channel.connect(remoteAddress))
|
||||
completeConnect(commander, options)
|
||||
else {
|
||||
context.become(connecting(commander, options))
|
||||
}
|
||||
}
|
||||
|
||||
def receive: Receive = PartialFunction.empty
|
||||
|
||||
def connecting(commander: ActorRef, options: immutable.Traversable[SocketOption]): Receive = {
|
||||
case ChannelConnectable ⇒
|
||||
try {
|
||||
|
|
|
|||
48
akka-actor/src/main/scala/akka/io/Udp.scala
Normal file
48
akka-actor/src/main/scala/akka/io/Udp.scala
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
package akka.io
|
||||
|
||||
import java.net.DatagramSocket
|
||||
import akka.io.Inet.SocketOption
|
||||
import com.typesafe.config.Config
|
||||
import akka.actor.{ Props, ActorSystemImpl }
|
||||
|
||||
object Udp {
|
||||
|
||||
object SO extends Inet.SoForwarders {
|
||||
|
||||
/**
|
||||
* [[akka.io.Inet.SocketOption]] to set the SO_BROADCAST option
|
||||
*
|
||||
* For more information see [[java.net.DatagramSocket#setBroadcast]]
|
||||
*/
|
||||
case class Broadcast(on: Boolean) extends SocketOption {
|
||||
override def beforeDatagramBind(s: DatagramSocket): Unit = s.setBroadcast(on)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private[io] class UdpSettings(_config: Config) extends SelectionHandlerSettings(_config) {
|
||||
import _config._
|
||||
|
||||
val NrOfSelectors = getInt("nr-of-selectors")
|
||||
val DirectBufferSize = getIntBytes("direct-buffer-size")
|
||||
val MaxDirectBufferPoolSize = getInt("direct-buffer-pool-limit")
|
||||
val BatchReceiveLimit = getInt("receive-throughput")
|
||||
|
||||
val ManagementDispatcher = getString("management-dispatcher")
|
||||
|
||||
// FIXME: Use new requiring
|
||||
require(NrOfSelectors > 0, "nr-of-selectors must be > 0")
|
||||
|
||||
override val MaxChannelsPerSelector = if (MaxChannels == -1) -1 else math.max(MaxChannels / NrOfSelectors, 1)
|
||||
|
||||
private[this] def getIntBytes(path: String): Int = {
|
||||
val size = getBytes(path)
|
||||
require(size < Int.MaxValue, s"$path must be < 2 GiB")
|
||||
size.toInt
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
64
akka-actor/src/main/scala/akka/io/UdpConn.scala
Normal file
64
akka-actor/src/main/scala/akka/io/UdpConn.scala
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
package akka.io
|
||||
|
||||
import akka.actor._
|
||||
import akka.io.Inet.SocketOption
|
||||
import akka.io.Udp.UdpSettings
|
||||
import akka.util.ByteString
|
||||
import java.net.InetSocketAddress
|
||||
import scala.collection.immutable
|
||||
|
||||
object UdpConn extends ExtensionKey[UdpConnExt] {
|
||||
// Java API
|
||||
override def get(system: ActorSystem): UdpConnExt = super.get(system)
|
||||
|
||||
trait Command extends IO.HasFailureMessage {
|
||||
def failureMessage = CommandFailed(this)
|
||||
}
|
||||
|
||||
case object NoAck
|
||||
case class Send(payload: ByteString, ack: Any) extends Command {
|
||||
require(ack != null, "ack must be non-null. Use NoAck if you don't want acks.")
|
||||
|
||||
def wantsAck: Boolean = ack != NoAck
|
||||
}
|
||||
object Send {
|
||||
def apply(data: ByteString): Send = Send(data, NoAck)
|
||||
}
|
||||
|
||||
case class Connect(handler: ActorRef,
|
||||
localAddress: Option[InetSocketAddress],
|
||||
remoteAddress: InetSocketAddress,
|
||||
options: immutable.Traversable[SocketOption] = Nil) extends Command
|
||||
|
||||
case object StopReading extends Command
|
||||
case object ResumeReading extends Command
|
||||
|
||||
trait Event
|
||||
|
||||
case class Received(data: ByteString) extends Event
|
||||
case class CommandFailed(cmd: Command) extends Event
|
||||
case object Connected extends Event
|
||||
case object Disconnected extends Event
|
||||
|
||||
case object Close extends Command
|
||||
|
||||
case class SendFailed(cause: Throwable) extends Event
|
||||
|
||||
}
|
||||
|
||||
class UdpConnExt(system: ExtendedActorSystem) extends IO.Extension {
|
||||
|
||||
val settings: UdpSettings = new UdpSettings(system.settings.config.getConfig("akka.io.udp-fire-and-forget"))
|
||||
|
||||
val manager: ActorRef = {
|
||||
system.asInstanceOf[ActorSystemImpl].systemActorOf(
|
||||
props = Props(new UdpConnManager(this)),
|
||||
name = "IO-UDP-CONN")
|
||||
}
|
||||
|
||||
val bufferPool: BufferPool = new DirectByteBufferPool(settings.DirectBufferSize, settings.MaxDirectBufferPoolSize)
|
||||
|
||||
}
|
||||
18
akka-actor/src/main/scala/akka/io/UdpConnManager.scala
Normal file
18
akka-actor/src/main/scala/akka/io/UdpConnManager.scala
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
package akka.io
|
||||
|
||||
import akka.actor.Props
|
||||
import akka.io.IO.SelectorBasedManager
|
||||
import akka.io.UdpConn.Connect
|
||||
|
||||
class UdpConnManager(udpConn: UdpConnExt) extends SelectorBasedManager(udpConn.settings, udpConn.settings.NrOfSelectors) {
|
||||
|
||||
def receive = workerForCommandHandler {
|
||||
case c: Connect ⇒
|
||||
val commander = sender
|
||||
Props(new UdpConnection(udpConn, commander, c))
|
||||
}
|
||||
|
||||
}
|
||||
132
akka-actor/src/main/scala/akka/io/UdpConnection.scala
Normal file
132
akka-actor/src/main/scala/akka/io/UdpConnection.scala
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
package akka.io
|
||||
|
||||
import akka.actor.{ Actor, ActorLogging, ActorRef }
|
||||
import akka.io.SelectionHandler._
|
||||
import akka.io.UdpConn._
|
||||
import akka.util.ByteString
|
||||
import java.nio.ByteBuffer
|
||||
import java.nio.channels.DatagramChannel
|
||||
import java.nio.channels.SelectionKey._
|
||||
import scala.annotation.tailrec
|
||||
import scala.util.control.NonFatal
|
||||
|
||||
private[io] class UdpConnection(val udpConn: UdpConnExt,
|
||||
val commander: ActorRef,
|
||||
val connect: Connect) extends Actor with ActorLogging {
|
||||
|
||||
def selector: ActorRef = context.parent
|
||||
|
||||
import connect._
|
||||
import udpConn._
|
||||
import udpConn.settings._
|
||||
|
||||
var pendingSend: (Send, ActorRef) = null
|
||||
def writePending = pendingSend ne null
|
||||
|
||||
context.watch(handler) // sign death pact
|
||||
val channel = {
|
||||
val datagramChannel = DatagramChannel.open
|
||||
datagramChannel.configureBlocking(false)
|
||||
val socket = datagramChannel.socket
|
||||
options.foreach(_.beforeDatagramBind(socket))
|
||||
try {
|
||||
localAddress foreach socket.bind
|
||||
datagramChannel.connect(remoteAddress)
|
||||
} catch {
|
||||
case NonFatal(e) ⇒
|
||||
log.error(e, "Failure while connecting UDP channel to remote address [{}] local address [{}]",
|
||||
remoteAddress, localAddress.map { _.toString }.getOrElse("undefined"))
|
||||
commander ! CommandFailed(connect)
|
||||
context.stop(self)
|
||||
}
|
||||
datagramChannel
|
||||
}
|
||||
selector ! RegisterChannel(channel, OP_READ)
|
||||
log.debug("Successfully connected to [{}]", remoteAddress)
|
||||
|
||||
def receive = {
|
||||
case ChannelRegistered ⇒
|
||||
commander ! Connected
|
||||
context.become(connected, discardOld = true)
|
||||
}
|
||||
|
||||
def connected: Receive = {
|
||||
case StopReading ⇒ selector ! DisableReadInterest
|
||||
case ResumeReading ⇒ selector ! ReadInterest
|
||||
case ChannelReadable ⇒ doRead(handler)
|
||||
|
||||
case Close ⇒
|
||||
log.debug("Closing UDP connection to [{}]", remoteAddress)
|
||||
channel.close()
|
||||
sender ! Disconnected
|
||||
log.debug("Connection closed to [{}], stopping listener", remoteAddress)
|
||||
context.stop(self)
|
||||
|
||||
case send: Send if writePending ⇒
|
||||
if (TraceLogging) log.debug("Dropping write because queue is full")
|
||||
sender ! CommandFailed(send)
|
||||
|
||||
case send: Send if send.payload.isEmpty ⇒
|
||||
if (send.wantsAck)
|
||||
sender ! send.ack
|
||||
|
||||
case send: Send ⇒
|
||||
pendingSend = (send, sender)
|
||||
selector ! WriteInterest
|
||||
|
||||
case ChannelWritable ⇒ doWrite()
|
||||
}
|
||||
|
||||
def doRead(handler: ActorRef): Unit = {
|
||||
@tailrec def innerRead(readsLeft: Int, buffer: ByteBuffer): Unit = {
|
||||
buffer.clear()
|
||||
buffer.limit(DirectBufferSize)
|
||||
|
||||
if (channel.read(buffer) > 0) {
|
||||
buffer.flip()
|
||||
handler ! Received(ByteString(buffer))
|
||||
innerRead(readsLeft - 1, buffer)
|
||||
}
|
||||
}
|
||||
val buffer = bufferPool.acquire()
|
||||
try innerRead(BatchReceiveLimit, buffer) finally {
|
||||
selector ! ReadInterest
|
||||
bufferPool.release(buffer)
|
||||
}
|
||||
}
|
||||
|
||||
final def doWrite(): Unit = {
|
||||
|
||||
val buffer = udpConn.bufferPool.acquire()
|
||||
try {
|
||||
val (send, commander) = pendingSend
|
||||
buffer.clear()
|
||||
send.payload.copyToBuffer(buffer)
|
||||
buffer.flip()
|
||||
val writtenBytes = channel.write(buffer)
|
||||
if (TraceLogging) log.debug("Wrote [{}] bytes to channel", writtenBytes)
|
||||
|
||||
// Datagram channel either sends the whole message, or nothing
|
||||
if (writtenBytes == 0) commander ! CommandFailed(send)
|
||||
else if (send.wantsAck) commander ! send.ack
|
||||
} finally {
|
||||
udpConn.bufferPool.release(buffer)
|
||||
pendingSend = null
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
override def postStop() {
|
||||
if (channel.isOpen) {
|
||||
log.debug("Closing DatagramChannel after being stopped")
|
||||
try channel.close()
|
||||
catch {
|
||||
case NonFatal(e) ⇒ log.error(e, "Error closing DatagramChannel")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
66
akka-actor/src/main/scala/akka/io/UdpFF.scala
Normal file
66
akka-actor/src/main/scala/akka/io/UdpFF.scala
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
package akka.io
|
||||
|
||||
import akka.actor._
|
||||
import akka.io.Inet.SocketOption
|
||||
import akka.io.Udp.UdpSettings
|
||||
import akka.util.ByteString
|
||||
import java.net.InetSocketAddress
|
||||
import scala.collection.immutable
|
||||
|
||||
object UdpFF extends ExtensionKey[UdpFFExt] {
|
||||
|
||||
// Java API
|
||||
override def get(system: ActorSystem): UdpFFExt = super.get(system)
|
||||
|
||||
trait Command extends IO.HasFailureMessage {
|
||||
def failureMessage = CommandFailed(this)
|
||||
}
|
||||
|
||||
case object NoAck
|
||||
case class Send(payload: ByteString, target: InetSocketAddress, ack: Any) extends Command {
|
||||
require(ack != null, "ack must be non-null. Use NoAck if you don't want acks.")
|
||||
|
||||
def wantsAck: Boolean = ack != NoAck
|
||||
}
|
||||
object Send {
|
||||
def apply(data: ByteString, target: InetSocketAddress): Send = Send(data, target, NoAck)
|
||||
}
|
||||
|
||||
case class Bind(handler: ActorRef,
|
||||
endpoint: InetSocketAddress,
|
||||
options: immutable.Traversable[SocketOption] = Nil) extends Command
|
||||
case object Unbind extends Command
|
||||
|
||||
case class SimpleSender(options: immutable.Traversable[SocketOption] = Nil) extends Command
|
||||
object SimpleSender extends SimpleSender(Nil)
|
||||
|
||||
case object StopReading extends Command
|
||||
case object ResumeReading extends Command
|
||||
|
||||
trait Event
|
||||
|
||||
case class Received(data: ByteString, sender: InetSocketAddress) extends Event
|
||||
case class CommandFailed(cmd: Command) extends Event
|
||||
case object Bound extends Event
|
||||
case object SimpleSendReady extends Event
|
||||
case object Unbound extends Event
|
||||
|
||||
case class SendFailed(cause: Throwable) extends Event
|
||||
|
||||
}
|
||||
|
||||
class UdpFFExt(system: ExtendedActorSystem) extends IO.Extension {
|
||||
|
||||
val settings: UdpSettings = new UdpSettings(system.settings.config.getConfig("akka.io.udp-fire-and-forget"))
|
||||
|
||||
val manager: ActorRef = {
|
||||
system.asInstanceOf[ActorSystemImpl].systemActorOf(
|
||||
props = Props(new UdpFFManager(this)),
|
||||
name = "IO-UDP-FF")
|
||||
}
|
||||
|
||||
val bufferPool: BufferPool = new DirectByteBufferPool(settings.DirectBufferSize, settings.MaxDirectBufferPoolSize)
|
||||
}
|
||||
96
akka-actor/src/main/scala/akka/io/UdpFFListener.scala
Normal file
96
akka-actor/src/main/scala/akka/io/UdpFFListener.scala
Normal file
|
|
@ -0,0 +1,96 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
package akka.io
|
||||
|
||||
import akka.actor.{ ActorLogging, Actor, ActorRef }
|
||||
import akka.io.SelectionHandler._
|
||||
import akka.io.UdpFF._
|
||||
import akka.util.ByteString
|
||||
import java.net.InetSocketAddress
|
||||
import java.nio.ByteBuffer
|
||||
import java.nio.channels.DatagramChannel
|
||||
import java.nio.channels.SelectionKey._
|
||||
import scala.annotation.tailrec
|
||||
import scala.util.control.NonFatal
|
||||
|
||||
private[io] class UdpFFListener(val udpFF: UdpFFExt,
|
||||
val bindCommander: ActorRef,
|
||||
val bind: Bind)
|
||||
extends Actor with ActorLogging with WithUdpFFSend {
|
||||
|
||||
import bind._
|
||||
import udpFF.bufferPool
|
||||
import udpFF.settings._
|
||||
|
||||
def selector: ActorRef = context.parent
|
||||
|
||||
context.watch(handler) // sign death pact
|
||||
val channel = {
|
||||
val datagramChannel = DatagramChannel.open
|
||||
datagramChannel.configureBlocking(false)
|
||||
val socket = datagramChannel.socket
|
||||
options.foreach(_.beforeDatagramBind(socket))
|
||||
try socket.bind(endpoint)
|
||||
catch {
|
||||
case NonFatal(e) ⇒
|
||||
bindCommander ! CommandFailed(bind)
|
||||
log.error(e, "Failed to bind UDP channel to endpoint [{}]", endpoint)
|
||||
context.stop(self)
|
||||
}
|
||||
datagramChannel
|
||||
}
|
||||
context.parent ! RegisterChannel(channel, OP_READ)
|
||||
log.debug("Successfully bound to [{}]", endpoint)
|
||||
|
||||
def receive: Receive = {
|
||||
case ChannelRegistered ⇒
|
||||
bindCommander ! Bound
|
||||
context.become(readHandlers orElse sendHandlers, discardOld = true)
|
||||
}
|
||||
|
||||
def readHandlers: Receive = {
|
||||
case StopReading ⇒ selector ! DisableReadInterest
|
||||
case ResumeReading ⇒ selector ! ReadInterest
|
||||
case ChannelReadable ⇒ doReceive(handler)
|
||||
|
||||
case Unbind ⇒
|
||||
log.debug("Unbinding endpoint [{}]", endpoint)
|
||||
try {
|
||||
channel.close()
|
||||
sender ! Unbound
|
||||
log.debug("Unbound endpoint [{}], stopping listener", endpoint)
|
||||
} finally context.stop(self)
|
||||
}
|
||||
|
||||
def doReceive(handler: ActorRef): Unit = {
|
||||
@tailrec def innerReceive(readsLeft: Int, buffer: ByteBuffer) {
|
||||
buffer.clear()
|
||||
buffer.limit(DirectBufferSize)
|
||||
|
||||
channel.receive(buffer) match {
|
||||
case sender: InetSocketAddress ⇒
|
||||
buffer.flip()
|
||||
handler ! Received(ByteString(buffer), sender)
|
||||
if (readsLeft > 0) innerReceive(readsLeft - 1, buffer)
|
||||
case null ⇒ // null means no data was available
|
||||
}
|
||||
}
|
||||
|
||||
val buffer = bufferPool.acquire()
|
||||
try innerReceive(BatchReceiveLimit, buffer) finally {
|
||||
bufferPool.release(buffer)
|
||||
selector ! ReadInterest
|
||||
}
|
||||
}
|
||||
|
||||
override def postStop() {
|
||||
if (channel.isOpen) {
|
||||
log.debug("Closing DatagramChannel after being stopped")
|
||||
try channel.close()
|
||||
catch {
|
||||
case NonFatal(e) ⇒ log.error(e, "Error closing DatagramChannel")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
56
akka-actor/src/main/scala/akka/io/UdpFFManager.scala
Normal file
56
akka-actor/src/main/scala/akka/io/UdpFFManager.scala
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
package akka.io
|
||||
|
||||
import akka.actor.Props
|
||||
import akka.io.IO.SelectorBasedManager
|
||||
import akka.io.UdpFF._
|
||||
|
||||
/**
|
||||
* UdpFFManager is a facade for simple fire-and-forget style UDP operations
|
||||
*
|
||||
* UdpFFManager is obtainable by calling {{{ IO(UdpFF) }}} (see [[akka.io.IO]] and [[akka.io.UdpFF]])
|
||||
*
|
||||
* *Warning!* UdpFF uses [[java.nio.channels.DatagramChannel#send]] to deliver datagrams, and as a consequence if a
|
||||
* security manager has been installed then for each datagram it will verify if the target address and port number are
|
||||
* permitted. If this performance overhead is undesirable use the connection style Udp extension.
|
||||
*
|
||||
* == Bind and send ==
|
||||
*
|
||||
* To bind and listen to a local address, a [[akka.io.UdpFF..Bind]] command must be sent to this actor. If the binding
|
||||
* was successful, the sender of the [[akka.io.UdpFF.Bind]] will be notified with a [[akka.io.UdpFF.Bound]]
|
||||
* message. The sender of the [[akka.io.UdpFF.Bound]] message is the Listener actor (an internal actor responsible for
|
||||
* listening to server events). To unbind the port an [[akka.io.Tcp.Unbind]] message must be sent to the Listener actor.
|
||||
*
|
||||
* If the bind request is rejected because the Udp system is not able to register more channels (see the nr-of-selectors
|
||||
* and max-channels configuration options in the akka.io.udpFF section of the configuration) the sender will be notified
|
||||
* with a [[akka.io.UdpFF.CommandFailed]] message. This message contains the original command for reference.
|
||||
*
|
||||
* The handler provided in the [[akka.io.UdpFF.Bind]] message will receive inbound datagrams to the bound port
|
||||
* wrapped in [[akka.io.UdpFF.Received]] messages which contain the payload of the datagram and the sender address.
|
||||
*
|
||||
* UDP datagrams can be sent by sending [[akka.io.UdpFF.Send]] messages to the Listener actor. The sender port of the
|
||||
* outbound datagram will be the port to which the Listener is bound.
|
||||
*
|
||||
* == Simple send ==
|
||||
*
|
||||
* UdpFF provides a simple method of sending UDP datagrams if no reply is expected. To acquire the Sender actor
|
||||
* a SimpleSend message has to be sent to the manager. The sender of the command will be notified by a SimpleSendReady
|
||||
* message that the service is available. UDP datagrams can be sent by sending [[akka.io.UdpFF.Send]] messages to the
|
||||
* sender of SimpleSendReady. All the datagrams will contain an ephemeral local port as sender and answers will be
|
||||
* discarded.
|
||||
*
|
||||
*/
|
||||
private[io] class UdpFFManager(udpFF: UdpFFExt) extends SelectorBasedManager(udpFF.settings, udpFF.settings.NrOfSelectors) {
|
||||
|
||||
def receive = workerForCommandHandler {
|
||||
case b: Bind ⇒
|
||||
val commander = sender
|
||||
Props(new UdpFFListener(udpFF, commander, b))
|
||||
case SimpleSender(options) ⇒
|
||||
val commander = sender
|
||||
Props(new UdpFFSender(udpFF, options, commander))
|
||||
}
|
||||
|
||||
}
|
||||
51
akka-actor/src/main/scala/akka/io/UdpFFSender.scala
Normal file
51
akka-actor/src/main/scala/akka/io/UdpFFSender.scala
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
package akka.io
|
||||
|
||||
import akka.actor._
|
||||
import java.nio.channels.DatagramChannel
|
||||
import akka.io.UdpFF._
|
||||
import akka.io.SelectionHandler.{ ChannelRegistered, RegisterChannel }
|
||||
import scala.collection.immutable
|
||||
import akka.io.Inet.SocketOption
|
||||
import scala.util.control.NonFatal
|
||||
|
||||
/**
|
||||
* Base class for TcpIncomingConnection and TcpOutgoingConnection.
|
||||
*/
|
||||
private[io] class UdpFFSender(val udpFF: UdpFFExt, options: immutable.Traversable[SocketOption], val commander: ActorRef)
|
||||
extends Actor with ActorLogging with WithUdpFFSend {
|
||||
|
||||
def selector: ActorRef = context.parent
|
||||
|
||||
val channel = {
|
||||
val datagramChannel = DatagramChannel.open
|
||||
datagramChannel.configureBlocking(false)
|
||||
val socket = datagramChannel.socket
|
||||
|
||||
options foreach { _.beforeDatagramBind(socket) }
|
||||
|
||||
datagramChannel
|
||||
}
|
||||
selector ! RegisterChannel(channel, 0)
|
||||
|
||||
def receive: Receive = {
|
||||
case ChannelRegistered ⇒
|
||||
context.become(sendHandlers, discardOld = true)
|
||||
commander ! SimpleSendReady
|
||||
}
|
||||
|
||||
override def postStop(): Unit = if (channel.isOpen) {
|
||||
log.debug("Closing DatagramChannel after being stopped")
|
||||
try channel.close()
|
||||
catch {
|
||||
case NonFatal(e) ⇒ log.error(e, "Error closing DatagramChannel")
|
||||
}
|
||||
}
|
||||
|
||||
override def postRestart(reason: Throwable): Unit =
|
||||
throw new IllegalStateException("Restarting not supported for connection actors.")
|
||||
|
||||
}
|
||||
|
||||
75
akka-actor/src/main/scala/akka/io/WithUdpFFSend.scala
Normal file
75
akka-actor/src/main/scala/akka/io/WithUdpFFSend.scala
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
package akka.io
|
||||
|
||||
import akka.actor.{ ActorRef, ActorLogging, Actor }
|
||||
import akka.io.UdpFF.{ CommandFailed, Send }
|
||||
import akka.io.SelectionHandler._
|
||||
import java.nio.channels.DatagramChannel
|
||||
|
||||
private[io] trait WithUdpFFSend {
|
||||
me: Actor with ActorLogging ⇒
|
||||
|
||||
var pendingSend: Send = null
|
||||
var pendingCommander: ActorRef = null
|
||||
// If send fails first, we allow a second go after selected writable, but no more. This flag signals that
|
||||
// pending send was already tried once.
|
||||
var retriedSend = false
|
||||
def hasWritePending = pendingSend ne null
|
||||
|
||||
def selector: ActorRef
|
||||
def channel: DatagramChannel
|
||||
def udpFF: UdpFFExt
|
||||
val settings = udpFF.settings
|
||||
|
||||
import settings._
|
||||
|
||||
def sendHandlers: Receive = {
|
||||
|
||||
case send: Send if hasWritePending ⇒
|
||||
if (TraceLogging) log.debug("Dropping write because queue is full")
|
||||
sender ! CommandFailed(send)
|
||||
|
||||
case send: Send if send.payload.isEmpty ⇒
|
||||
if (send.wantsAck)
|
||||
sender ! send.ack
|
||||
|
||||
case send: Send ⇒
|
||||
pendingSend = send
|
||||
pendingCommander = sender
|
||||
doSend()
|
||||
|
||||
case ChannelWritable ⇒ if (hasWritePending) doSend()
|
||||
|
||||
}
|
||||
|
||||
final def doSend(): Unit = {
|
||||
|
||||
val buffer = udpFF.bufferPool.acquire()
|
||||
try {
|
||||
buffer.clear()
|
||||
pendingSend.payload.copyToBuffer(buffer)
|
||||
buffer.flip()
|
||||
val writtenBytes = channel.send(buffer, pendingSend.target)
|
||||
if (TraceLogging) log.debug("Wrote [{}] bytes to channel", writtenBytes)
|
||||
|
||||
// Datagram channel either sends the whole message, or nothing
|
||||
if (writtenBytes == 0) {
|
||||
if (retriedSend) {
|
||||
pendingCommander ! CommandFailed(pendingSend)
|
||||
retriedSend = false
|
||||
pendingSend = null
|
||||
pendingCommander = null
|
||||
} else {
|
||||
selector ! WriteInterest
|
||||
retriedSend = true
|
||||
}
|
||||
} else if (pendingSend.wantsAck) pendingCommander ! pendingSend.ack
|
||||
|
||||
} finally {
|
||||
udpFF.bufferPool.release(buffer)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
248
akka-docs/rst/scala/io-old.rst
Normal file
248
akka-docs/rst/scala/io-old.rst
Normal file
|
|
@ -0,0 +1,248 @@
|
|||
.. _io-scala-old:
|
||||
|
||||
.. warning::
|
||||
This is the documentation of the old IO implementation that is considered now deprecated. Please take a look
|
||||
at new IO API: :ref:`io-scala`
|
||||
|
||||
IO (Scala)
|
||||
==========
|
||||
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
This documentation is in progress and some sections may be incomplete. More will be coming.
|
||||
|
||||
Components
|
||||
----------
|
||||
|
||||
ByteString
|
||||
^^^^^^^^^^
|
||||
|
||||
A primary goal of Akka's IO support is to only communicate between actors with immutable objects. When dealing with network IO on the jvm ``Array[Byte]`` and ``ByteBuffer`` are commonly used to represent collections of ``Byte``\s, but they are mutable. Scala's collection library also lacks a suitably efficient immutable collection for ``Byte``\s. Being able to safely and efficiently move ``Byte``\s around is very important for this IO support, so ``ByteString`` was developed.
|
||||
|
||||
``ByteString`` is a `Rope-like <http://en.wikipedia.org/wiki/Rope_(computer_science)>`_ data structure that is immutable and efficient. When 2 ``ByteString``\s are concatenated together they are both stored within the resulting ``ByteString`` instead of copying both to a new ``Array``. Operations such as ``drop`` and ``take`` return ``ByteString``\s that still reference the original ``Array``, but just change the offset and length that is visible. Great care has also been taken to make sure that the internal ``Array`` cannot be modified. Whenever a potentially unsafe ``Array`` is used to create a new ``ByteString`` a defensive copy is created. If you require a ``ByteString`` that only blocks a much memory as necessary for it's content, use the ``compact`` method to get a ``CompactByteString`` instance. If the ``ByteString`` represented only a slice of the original array, this will result in copying all bytes in that slice.
|
||||
|
||||
``ByteString`` inherits all methods from ``IndexedSeq``, and it also has some new ones. For more information, look up the ``akka.util.ByteString`` class and it's companion object in the ScalaDoc.
|
||||
|
||||
``ByteString`` also comes with it's own optimized builder and iterator classes ``ByteStringBuilder`` and ``ByteIterator`` which provides special features in addition to the standard builder / iterator methods:
|
||||
|
||||
Compatibility with java.io
|
||||
..........................
|
||||
|
||||
A ``ByteStringBuilder`` can be wrapped in a `java.io.OutputStream` via the ``asOutputStream`` method. Likewise, ``ByteIterator`` can we wrapped in a ``java.io.InputStream`` via ``asInputStream``. Using these, ``akka.io`` applications can integrate legacy code based on ``java.io`` streams.
|
||||
|
||||
Encoding and decoding of binary data
|
||||
....................................
|
||||
|
||||
``ByteStringBuilder`` and ``ByteIterator`` support encoding and decoding of binary data. As an example, consider a stream of binary data frames with the following format:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
frameLen: Int
|
||||
n: Int
|
||||
m: Int
|
||||
n times {
|
||||
a: Short
|
||||
b: Long
|
||||
}
|
||||
data: m times Double
|
||||
|
||||
In this example, the data is to be stored in arrays of ``a``, ``b`` and ``data``.
|
||||
|
||||
Decoding of such frames can be efficiently implemented in the following fashion:
|
||||
|
||||
.. includecode:: code/docs/io/BinaryCoding.scala
|
||||
:include: decoding
|
||||
|
||||
This implementation naturally follows the example data format. In a true Scala application, one might, of course, want use specialized immutable Short/Long/Double containers instead of mutable Arrays.
|
||||
|
||||
After extracting data from a ``ByteIterator``, the remaining content can also be turned back into a ``ByteString`` using the ``toSeq`` method
|
||||
|
||||
.. includecode:: code/docs/io/BinaryCoding.scala
|
||||
:include: rest-to-seq
|
||||
|
||||
with no copying from bytes to rest involved. In general, conversions from ByteString to ByteIterator and vice versa are O(1) for non-chunked ByteStrings and (at worst) O(nChunks) for chunked ByteStrings.
|
||||
|
||||
Encoding of data also is very natural, using ``ByteStringBuilder``
|
||||
|
||||
.. includecode:: code/docs/io/BinaryCoding.scala
|
||||
:include: encoding
|
||||
|
||||
|
||||
The encoded data then can be sent over socket (see ``IOManager``):
|
||||
|
||||
.. includecode:: code/docs/io/BinaryCoding.scala
|
||||
:include: sending
|
||||
|
||||
|
||||
IO.Handle
|
||||
^^^^^^^^^
|
||||
|
||||
``IO.Handle`` is an immutable reference to a Java NIO ``Channel``. Passing mutable ``Channel``\s between ``Actor``\s could lead to unsafe behavior, so instead subclasses of the ``IO.Handle`` trait are used. Currently there are 2 concrete subclasses: ``IO.SocketHandle`` (representing a ``SocketChannel``) and ``IO.ServerHandle`` (representing a ``ServerSocketChannel``).
|
||||
|
||||
IOManager
|
||||
^^^^^^^^^
|
||||
|
||||
The ``IOManager`` takes care of the low level IO details. Each ``ActorSystem`` has it's own ``IOManager``, which can be accessed calling ``IOManager(system: ActorSystem)``. ``Actor``\s communicate with the ``IOManager`` with specific messages. The messages sent from an ``Actor`` to the ``IOManager`` are handled automatically when using certain methods and the messages sent from an ``IOManager`` are handled within an ``Actor``\'s ``receive`` method.
|
||||
|
||||
Connecting to a remote host:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val address = new InetSocketAddress("remotehost", 80)
|
||||
val socket = IOManager(actorSystem).connect(address)
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val socket = IOManager(actorSystem).connect("remotehost", 80)
|
||||
|
||||
Creating a server:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val address = new InetSocketAddress("localhost", 80)
|
||||
val serverSocket = IOManager(actorSystem).listen(address)
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val serverSocket = IOManager(actorSystem).listen("localhost", 80)
|
||||
|
||||
Receiving messages from the ``IOManager``:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
def receive = {
|
||||
|
||||
case IO.Listening(server, address) =>
|
||||
println("The server is listening on socket " + address)
|
||||
|
||||
case IO.Connected(socket, address) =>
|
||||
println("Successfully connected to " + address)
|
||||
|
||||
case IO.NewClient(server) =>
|
||||
println("New incoming connection on server")
|
||||
val socket = server.accept()
|
||||
println("Writing to new client socket")
|
||||
socket.write(bytes)
|
||||
println("Closing socket")
|
||||
socket.close()
|
||||
|
||||
case IO.Read(socket, bytes) =>
|
||||
println("Received incoming data from socket")
|
||||
|
||||
case IO.Closed(socket: IO.SocketHandle, cause) =>
|
||||
println("Socket has closed, cause: " + cause)
|
||||
|
||||
case IO.Closed(server: IO.ServerHandle, cause) =>
|
||||
println("Server socket has closed, cause: " + cause)
|
||||
|
||||
}
|
||||
|
||||
IO.Iteratee
|
||||
^^^^^^^^^^^
|
||||
|
||||
Included with Akka's IO support is a basic implementation of ``Iteratee``\s. ``Iteratee``\s are an effective way of handling a stream of data without needing to wait for all the data to arrive. This is especially useful when dealing with non blocking IO since we will usually receive data in chunks which may not include enough information to process, or it may contain much more data than we currently need.
|
||||
|
||||
This ``Iteratee`` implementation is much more basic than what is usually found. There is only support for ``ByteString`` input, and enumerators aren't used. The reason for this limited implementation is to reduce the amount of explicit type signatures needed and to keep things simple. It is important to note that Akka's ``Iteratee``\s are completely optional, incoming data can be handled in any way, including other ``Iteratee`` libraries.
|
||||
|
||||
``Iteratee``\s work by processing the data that it is given and returning either the result (with any unused input) or a continuation if more input is needed. They are monadic, so methods like ``flatMap`` can be used to pass the result of an ``Iteratee`` to another.
|
||||
|
||||
The basic ``Iteratee``\s included in the IO support can all be found in the ScalaDoc under ``akka.actor.IO``, and some of them are covered in the example below.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Http Server
|
||||
^^^^^^^^^^^
|
||||
|
||||
This example will create a simple high performance HTTP server. We begin with our imports:
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: imports
|
||||
|
||||
Some commonly used constants:
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: constants
|
||||
|
||||
And case classes to hold the resulting request:
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: request-class
|
||||
|
||||
Now for our first ``Iteratee``. There are 3 main sections of a HTTP request: the request line, the headers, and an optional body. The main request ``Iteratee`` handles each section separately:
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-request
|
||||
|
||||
In the above code ``readRequest`` takes the results of 3 different ``Iteratees`` (``readRequestLine``, ``readHeaders``, ``readBody``) and combines them into a single ``Request`` object. ``readRequestLine`` actually returns a tuple, so we extract it's individual components. ``readBody`` depends on values contained within the header section, so we must pass those to the method.
|
||||
|
||||
The request line has 3 parts to it: the HTTP method, the requested URI, and the HTTP version. The parts are separated by a single space, and the entire request line ends with a ``CRLF``.
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-request-line
|
||||
|
||||
Reading the request method is simple as it is a single string ending in a space. The simple ``Iteratee`` that performs this is ``IO.takeUntil(delimiter: ByteString): Iteratee[ByteString]``. It keeps consuming input until the specified delimiter is found. Reading the HTTP version is also a simple string that ends with a ``CRLF``.
|
||||
|
||||
The ``ascii`` method is a helper that takes a ``ByteString`` and parses it as a ``US-ASCII`` ``String``.
|
||||
|
||||
Reading the request URI is a bit more complicated because we want to parse the individual components of the URI instead of just returning a simple string:
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-request-uri
|
||||
|
||||
For this example we are only interested in handling absolute paths. To detect if we the URI is an absolute path we use ``IO.peek(length: Int): Iteratee[ByteString]``, which returns a ``ByteString`` of the request length but doesn't actually consume the input. We peek at the next bit of input and see if it matches our ``PATH`` constant (defined above as ``ByteString("/")``). If it doesn't match we throw an error, but for a more robust solution we would want to handle other valid URIs.
|
||||
|
||||
Next we handle the path itself:
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-path
|
||||
|
||||
The ``step`` method is a recursive method that takes a ``List`` of the accumulated path segments. It first checks if the remaining input starts with the ``PATH`` constant, and if it does, it drops that input, and returns the ``readUriPart`` ``Iteratee`` which has it's result added to the path segment accumulator and the ``step`` method is run again.
|
||||
|
||||
If after reading in a path segment the next input does not start with a path, we reverse the accumulated segments and return it (dropping the last segment if it is blank).
|
||||
|
||||
Following the path we read in the query (if it exists):
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-query
|
||||
|
||||
It is much simpler than reading the path since we aren't doing any parsing of the query since there is no standard format of the query string.
|
||||
|
||||
Both the path and query used the ``readUriPart`` ``Iteratee``, which is next:
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-uri-part
|
||||
|
||||
Here we have several ``Set``\s that contain valid characters pulled from the URI spec. The ``readUriPart`` method takes a ``Set`` of valid characters (already mapped to ``Byte``\s) and will continue to match characters until it reaches on that is not part of the ``Set``. If it is a percent encoded character then that is handled as a valid character and processing continues, or else we are done collecting this part of the URI.
|
||||
|
||||
Headers are next:
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-headers
|
||||
|
||||
And if applicable, we read in the message body:
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-body
|
||||
|
||||
Finally we get to the actual ``Actor``:
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: actor
|
||||
|
||||
And it's companion object:
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: actor-companion
|
||||
|
||||
And the OKResponse:
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: ok-response
|
||||
|
||||
A ``main`` method to start everything up:
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: main
|
||||
|
|
@ -1,21 +1,75 @@
|
|||
.. _io-scala:
|
||||
|
||||
IO (Scala)
|
||||
I/O (Scala)
|
||||
==========
|
||||
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
The ``akka.io`` package has been developed in collaboration between the Akka
|
||||
and `spray.io`_ teams. Its design incorporates the experiences with the
|
||||
``spray-io`` module along with improvements that were jointly developed for
|
||||
more general consumption as an actor-based service.
|
||||
|
||||
This documentation is in progress and some sections may be incomplete. More will be coming.
|
||||
|
||||
Components
|
||||
----------
|
||||
.. note::
|
||||
The old I/O implementation has been deprecated and its documentation has been moved: :ref:`io-scala-old`
|
||||
|
||||
Terminology, Concepts
|
||||
---------------------
|
||||
The I/O API is completely actor based, meaning that all operations are implemented as message passing instead of
|
||||
direct method calls. Every I/O driver (TCP, UDP) has a special actor, called *manager* that serves
|
||||
as the entry point for the API. The manager is accessible through an extension, for example the following code
|
||||
looks up the TCP manager and returns its ``ActorRef``:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val tcpManager = IO(Tcp)
|
||||
|
||||
For various I/O commands the manager instantiates worker actors that will expose themselves to the user of the
|
||||
API by replying to the command. For example after a ``Connect`` command sent to the TCP manager the manager creates
|
||||
an actor representing the TCP connection. All operations related to the given TCP connections can be invoked by sending
|
||||
messages to the connection actor which announces itself by sending a ``Connected`` message.
|
||||
|
||||
DeathWatch and Resource Management
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Worker actors usually need a user-side counterpart actor listening for events (such events could be inbound connections,
|
||||
incoming bytes or acknowledgements for writes). These worker actors *watch* their listener counterparts, therefore the
|
||||
resources assigned to them are automatically released when the listener stops. This design makes the API more robust
|
||||
against resource leaks.
|
||||
|
||||
Thanks to the completely actor based approach of the I/O API the opposite direction works as well: a user actor
|
||||
responsible for handling a connection might watch the connection actor to be notified if it unexpectedly terminates.
|
||||
|
||||
Write models (Ack, Nack)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Basically all of the I/O devices have a maximum throughput which limits the frequency and size of writes. When an
|
||||
application tries to push more data then a device can handle, the driver has to buffer all bytes that the device has
|
||||
not yet been able to write. With this approach it is possible to handle short bursts of intensive writes --- but no buffer is infinite.
|
||||
Therefore, the driver has to notify the writer (a user-side actor) either that no further writes are possible, or by
|
||||
explicitly notifying it when the next chunk is possible to be written or buffered.
|
||||
|
||||
Both of these models are available in the TCP and UDP implementations of Akka I/O. Ack based flow control can be enabled
|
||||
by providing an ack object in the write message (``Write`` in the case of TCP and ``Send`` for UDP) that will be used by
|
||||
the worker to notify the writer about the success.
|
||||
|
||||
If a write (or any other command) fails, the driver notifies the commander with a special message (``CommandFailed`` in
|
||||
the case of UDP and TCP). This message also serves as a means to notify the writer of a failed write. Please note, that
|
||||
in a Nack based flow-control setting the writer has to buffer some of the writes as the failure notification for a
|
||||
write ``W1`` might arrive after additional write commands ``W2`` ``W3`` has been sent.
|
||||
|
||||
.. warning::
|
||||
An acknowledged write does not mean acknowledged delivery or storage. The Ack/Nack
|
||||
protocol described here is a means of flow control not error handling: receiving an Ack for a write signals that the
|
||||
I/O driver is ready to accept a new one.
|
||||
|
||||
ByteString
|
||||
^^^^^^^^^^
|
||||
|
||||
A primary goal of Akka's IO support is to only communicate between actors with immutable objects. When dealing with network IO on the jvm ``Array[Byte]`` and ``ByteBuffer`` are commonly used to represent collections of ``Byte``\s, but they are mutable. Scala's collection library also lacks a suitably efficient immutable collection for ``Byte``\s. Being able to safely and efficiently move ``Byte``\s around is very important for this IO support, so ``ByteString`` was developed.
|
||||
A primary goal of Akka's IO support is to only communicate between actors with immutable objects. When dealing with network I/O on the jvm ``Array[Byte]`` and ``ByteBuffer`` are commonly used to represent collections of ``Byte``\s, but they are mutable. Scala's collection library also lacks a suitably efficient immutable collection for ``Byte``\s. Being able to safely and efficiently move ``Byte``\s around is very important for this I/O support, so ``ByteString`` was developed.
|
||||
|
||||
``ByteString`` is a `Rope-like <http://en.wikipedia.org/wiki/Rope_(computer_science)>`_ data structure that is immutable and efficient. When 2 ``ByteString``\s are concatenated together they are both stored within the resulting ``ByteString`` instead of copying both to a new ``Array``. Operations such as ``drop`` and ``take`` return ``ByteString``\s that still reference the original ``Array``, but just change the offset and length that is visible. Great care has also been taken to make sure that the internal ``Array`` cannot be modified. Whenever a potentially unsafe ``Array`` is used to create a new ``ByteString`` a defensive copy is created. If you require a ``ByteString`` that only blocks a much memory as necessary for it's content, use the ``compact`` method to get a ``CompactByteString`` instance. If the ``ByteString`` represented only a slice of the original array, this will result in copying all bytes in that slice.
|
||||
|
||||
|
|
@ -57,7 +111,7 @@ After extracting data from a ``ByteIterator``, the remaining content can also be
|
|||
|
||||
.. includecode:: code/docs/io/BinaryCoding.scala
|
||||
:include: rest-to-seq
|
||||
|
||||
|
||||
with no copying from bytes to rest involved. In general, conversions from ByteString to ByteIterator and vice versa are O(1) for non-chunked ByteStrings and (at worst) O(nChunks) for chunked ByteStrings.
|
||||
|
||||
Encoding of data also is very natural, using ``ByteStringBuilder``
|
||||
|
|
@ -65,180 +119,263 @@ Encoding of data also is very natural, using ``ByteStringBuilder``
|
|||
.. includecode:: code/docs/io/BinaryCoding.scala
|
||||
:include: encoding
|
||||
|
||||
|
||||
The encoded data then can be sent over socket (see ``IOManager``):
|
||||
|
||||
.. includecode:: code/docs/io/BinaryCoding.scala
|
||||
:include: sending
|
||||
Using TCP
|
||||
---------
|
||||
|
||||
|
||||
IO.Handle
|
||||
^^^^^^^^^
|
||||
|
||||
``IO.Handle`` is an immutable reference to a Java NIO ``Channel``. Passing mutable ``Channel``\s between ``Actor``\s could lead to unsafe behavior, so instead subclasses of the ``IO.Handle`` trait are used. Currently there are 2 concrete subclasses: ``IO.SocketHandle`` (representing a ``SocketChannel``) and ``IO.ServerHandle`` (representing a ``ServerSocketChannel``).
|
||||
|
||||
IOManager
|
||||
^^^^^^^^^
|
||||
|
||||
The ``IOManager`` takes care of the low level IO details. Each ``ActorSystem`` has it's own ``IOManager``, which can be accessed calling ``IOManager(system: ActorSystem)``. ``Actor``\s communicate with the ``IOManager`` with specific messages. The messages sent from an ``Actor`` to the ``IOManager`` are handled automatically when using certain methods and the messages sent from an ``IOManager`` are handled within an ``Actor``\'s ``receive`` method.
|
||||
|
||||
Connecting to a remote host:
|
||||
As with all of the Akka I/O APIs, everything starts with acquiring a reference to the appropriate manager:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val address = new InetSocketAddress("remotehost", 80)
|
||||
val socket = IOManager(actorSystem).connect(address)
|
||||
import akka.io.IO
|
||||
import akka.io.Tcp
|
||||
val tcpManager = IO(Tcp)
|
||||
|
||||
This is an actor that handles the underlying low level I/O resources (Selectors, channels) and instantiates workers for
|
||||
specific tasks, like listening to incoming connections.
|
||||
|
||||
Connecting
|
||||
^^^^^^^^^^
|
||||
|
||||
The first step of connecting to a remote address is sending a ``Connect`` message to the TCP manager:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val socket = IOManager(actorSystem).connect("remotehost", 80)
|
||||
import akka.io.Tcp._
|
||||
IO(Tcp) ! Connect(remoteSocketAddress)
|
||||
// It is also possible to set various socket options or specify a local address:
|
||||
IO(Tcp) ! Connect(remoteSocketAddress, Some(localSocketAddress), List(SO.KeepAlive(true)))
|
||||
|
||||
Creating a server:
|
||||
After issuing the Connect command the TCP manager spawns a worker actor that will handle commands related to the
|
||||
connection. This worker actor will reveal itself by replying with a ``Connected`` message to the actor who sent the
|
||||
``Connect`` command.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val address = new InetSocketAddress("localhost", 80)
|
||||
val serverSocket = IOManager(actorSystem).listen(address)
|
||||
case Connected(remoteAddress, localAddress) =>
|
||||
connectionActor = sender
|
||||
|
||||
At this point, there is still no listener associated with the connection. To finish the connection setup a ``Register``
|
||||
has to be sent to the connection actor with the listener ``ActorRef`` as a parameter.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val serverSocket = IOManager(actorSystem).listen("localhost", 80)
|
||||
connectionActor ! Register(listener)
|
||||
|
||||
Receiving messages from the ``IOManager``:
|
||||
After registration, the listener actor provided in the ``listener`` parameter will be watched by the connection actor.
|
||||
If the listener stops, the connection is closed, and all resources allocated for the connection released. During the
|
||||
lifetime the listener may receive various event notifications:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
def receive = {
|
||||
case Received(dataByteString) => // handle incoming chunk of data
|
||||
case CommandFailed(cmd) => // handle failure of command: cmd
|
||||
case _: ConnectionClosed => // handle closed connections
|
||||
|
||||
case IO.Listening(server, address) =>
|
||||
println("The server is listening on socket " + address)
|
||||
The last line handles all connection close events in the same way. It is possible to listen for more fine-grained
|
||||
connection events, see the appropriate section below.
|
||||
|
||||
case IO.Connected(socket, address) =>
|
||||
println("Successfully connected to " + address)
|
||||
|
||||
case IO.NewClient(server) =>
|
||||
println("New incoming connection on server")
|
||||
val socket = server.accept()
|
||||
println("Writing to new client socket")
|
||||
socket.write(bytes)
|
||||
println("Closing socket")
|
||||
socket.close()
|
||||
Accepting connections
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
case IO.Read(socket, bytes) =>
|
||||
println("Received incoming data from socket")
|
||||
To create a TCP server and listen for inbound connection, a ``Bind`` command has to be sent to the TCP manager:
|
||||
|
||||
case IO.Closed(socket: IO.SocketHandle, cause) =>
|
||||
println("Socket has closed, cause: " + cause)
|
||||
.. code-block:: scala
|
||||
|
||||
case IO.Closed(server: IO.ServerHandle, cause) =>
|
||||
println("Server socket has closed, cause: " + cause)
|
||||
import akka.io.IO
|
||||
import akka.io.Tcp
|
||||
IO(Tcp) ! Bind(handler, localAddress)
|
||||
|
||||
}
|
||||
The actor sending the ``Bind`` message will receive a ``Bound`` message signalling that the server is ready to accept
|
||||
incoming connections. Accepting connections is very similar to the last two steps of opening outbound connections: when
|
||||
an incoming connection is established, the actor provided in ``handler`` will receive a ``Connected`` message whose
|
||||
sender is the connection actor:
|
||||
|
||||
IO.Iteratee
|
||||
^^^^^^^^^^^
|
||||
.. code-block:: scala
|
||||
|
||||
Included with Akka's IO support is a basic implementation of ``Iteratee``\s. ``Iteratee``\s are an effective way of handling a stream of data without needing to wait for all the data to arrive. This is especially useful when dealing with non blocking IO since we will usually receive data in chunks which may not include enough information to process, or it may contain much more data than we currently need.
|
||||
case Connected(remoteAddress, localAddress) =>
|
||||
connectionActor = sender
|
||||
|
||||
This ``Iteratee`` implementation is much more basic than what is usually found. There is only support for ``ByteString`` input, and enumerators aren't used. The reason for this limited implementation is to reduce the amount of explicit type signatures needed and to keep things simple. It is important to note that Akka's ``Iteratee``\s are completely optional, incoming data can be handled in any way, including other ``Iteratee`` libraries.
|
||||
At this point, there is still no listener associated with the connection. To finish the connection setup a ``Register``
|
||||
has to be sent to the connection actor with the listener ``ActorRef`` as a parameter.
|
||||
|
||||
``Iteratee``\s work by processing the data that it is given and returning either the result (with any unused input) or a continuation if more input is needed. They are monadic, so methods like ``flatMap`` can be used to pass the result of an ``Iteratee`` to another.
|
||||
.. code-block:: scala
|
||||
|
||||
The basic ``Iteratee``\s included in the IO support can all be found in the ScalaDoc under ``akka.actor.IO``, and some of them are covered in the example below.
|
||||
connectionActor ! Register(listener)
|
||||
|
||||
Examples
|
||||
--------
|
||||
After registration, the listener actor provided in the ``listener`` parameter will be watched by the connection actor.
|
||||
If the listener stops, the connection is closed, and all resources allocated for the connection released. During the
|
||||
lifetime the listener will receive various event notifications in the same way as we has seen in the outbound
|
||||
connection case.
|
||||
|
||||
Http Server
|
||||
^^^^^^^^^^^
|
||||
Closing connections
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This example will create a simple high performance HTTP server. We begin with our imports:
|
||||
A connection can be closed by sending one of the commands ``Close``, ``ConfirmedClose`` or ``Abort`` to the connection
|
||||
actor.
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: imports
|
||||
``Close`` will close the connection by sending a ``FIN`` message, but without waiting for confirmation from
|
||||
the remote endpoint. Pending writes will be flushed. If the close is successful, the listener will be notified with
|
||||
``Closed``
|
||||
|
||||
Some commonly used constants:
|
||||
``ConfirmedClose`` will close the sending direction of the connection by sending a ``FIN`` message, but receives
|
||||
will continue until the remote endpoint closes the connection, too. Pending writes will be flushed. If the close is
|
||||
successful, the listener will be notified with ``ConfirmedClosed``
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: constants
|
||||
``Abort`` will immediately terminate the connection by sending a ``RST`` message to the remote endpoint. Pending
|
||||
writes will be not flushed. If the close is successful, the listener will be notified with ``Aborted``
|
||||
|
||||
And case classes to hold the resulting request:
|
||||
``PeerClosed`` will be sent to the listener if the connection has been closed by the remote endpoint.
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: request-class
|
||||
``ErrorClosed`` will be sent to the listener whenever an error happened that forced the connection to be closed.
|
||||
|
||||
Now for our first ``Iteratee``. There are 3 main sections of a HTTP request: the request line, the headers, and an optional body. The main request ``Iteratee`` handles each section separately:
|
||||
All close notifications are subclasses of ``ConnectionClosed`` so listeners who do not need fine-grained close events
|
||||
may handle all close events in the same way.
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-request
|
||||
Throttling Reads and Writes
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In the above code ``readRequest`` takes the results of 3 different ``Iteratees`` (``readRequestLine``, ``readHeaders``, ``readBody``) and combines them into a single ``Request`` object. ``readRequestLine`` actually returns a tuple, so we extract it's individual components. ``readBody`` depends on values contained within the header section, so we must pass those to the method.
|
||||
*This section is not yet ready. More coming soon*
|
||||
|
||||
The request line has 3 parts to it: the HTTP method, the requested URI, and the HTTP version. The parts are separated by a single space, and the entire request line ends with a ``CRLF``.
|
||||
Using UDP
|
||||
---------
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-request-line
|
||||
UDP support comes in two flavors: connectionless, and connection based:
|
||||
|
||||
Reading the request method is simple as it is a single string ending in a space. The simple ``Iteratee`` that performs this is ``IO.takeUntil(delimiter: ByteString): Iteratee[ByteString]``. It keeps consuming input until the specified delimiter is found. Reading the HTTP version is also a simple string that ends with a ``CRLF``.
|
||||
.. code-block:: scala
|
||||
|
||||
The ``ascii`` method is a helper that takes a ``ByteString`` and parses it as a ``US-ASCII`` ``String``.
|
||||
import akka.io.IO
|
||||
import akka.io.UdpFF
|
||||
val connectionLessUdp = IO(UdpFF)
|
||||
// ... or ...
|
||||
import akka.io.UdpConn
|
||||
val connectionBasedUdp = IO(UdpConn)
|
||||
|
||||
Reading the request URI is a bit more complicated because we want to parse the individual components of the URI instead of just returning a simple string:
|
||||
UDP servers can be only implemented by the connectionless API, but clients can use both.
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-request-uri
|
||||
Connectionless UDP
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For this example we are only interested in handling absolute paths. To detect if we the URI is an absolute path we use ``IO.peek(length: Int): Iteratee[ByteString]``, which returns a ``ByteString`` of the request length but doesn't actually consume the input. We peek at the next bit of input and see if it matches our ``PATH`` constant (defined above as ``ByteString("/")``). If it doesn't match we throw an error, but for a more robust solution we would want to handle other valid URIs.
|
||||
Simple Send
|
||||
............
|
||||
|
||||
Next we handle the path itself:
|
||||
To simply send a UDP datagram without listening to an answer one needs to send the ``SimpleSender`` command to the
|
||||
manager:
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-path
|
||||
.. code-block:: scala
|
||||
|
||||
The ``step`` method is a recursive method that takes a ``List`` of the accumulated path segments. It first checks if the remaining input starts with the ``PATH`` constant, and if it does, it drops that input, and returns the ``readUriPart`` ``Iteratee`` which has it's result added to the path segment accumulator and the ``step`` method is run again.
|
||||
IO(UdpFF) ! SimpleSender
|
||||
// or with socket options:
|
||||
import akka.io.Udp._
|
||||
IO(UdpFF) ! SimpleSender(List(SO.Broadcast(true)))
|
||||
|
||||
If after reading in a path segment the next input does not start with a path, we reverse the accumulated segments and return it (dropping the last segment if it is blank).
|
||||
The manager will create a worker for sending, and the worker will reply with a ``SimpleSendReady`` message:
|
||||
|
||||
Following the path we read in the query (if it exists):
|
||||
.. code-block:: scala
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-query
|
||||
case SimpleSendReady =>
|
||||
simpleSender = sender
|
||||
|
||||
It is much simpler than reading the path since we aren't doing any parsing of the query since there is no standard format of the query string.
|
||||
After saving the sender of the ``SimpleSendReady`` message it is possible to send out UDP datagrams with a simple
|
||||
message send:
|
||||
|
||||
Both the path and query used the ``readUriPart`` ``Iteratee``, which is next:
|
||||
.. code-block:: scala
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-uri-part
|
||||
simpleSender ! Send(data, serverAddress)
|
||||
|
||||
Here we have several ``Set``\s that contain valid characters pulled from the URI spec. The ``readUriPart`` method takes a ``Set`` of valid characters (already mapped to ``Byte``\s) and will continue to match characters until it reaches on that is not part of the ``Set``. If it is a percent encoded character then that is handled as a valid character and processing continues, or else we are done collecting this part of the URI.
|
||||
|
||||
Headers are next:
|
||||
Bind (and Send)
|
||||
...............
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-headers
|
||||
To listen for UDP datagrams arriving on a given port, the ``Bind`` command has to be sent to the connectionless UDP
|
||||
manager
|
||||
|
||||
And if applicable, we read in the message body:
|
||||
.. code-block:: scala
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-body
|
||||
IO(UdpFF) ! Bind(handler, localAddress)
|
||||
|
||||
Finally we get to the actual ``Actor``:
|
||||
After the bind succeeds, the sender of the ``Bind`` command will be notified with a ``Bound`` message. The sender of
|
||||
this message is the worker for the UDP channel bound to the local address.
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: actor
|
||||
.. code-block:: scala
|
||||
|
||||
And it's companion object:
|
||||
case Bound =>
|
||||
udpWorker = sender // Save the worker ref for later use
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: actor-companion
|
||||
The actor passed in the ``handler`` parameter will receive inbound UDP datagrams sent to the bound address:
|
||||
|
||||
And the OKResponse:
|
||||
.. code-block:: scala
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: ok-response
|
||||
case Received(dataByteString, remoteAddress) => // Do something with the data
|
||||
|
||||
A ``main`` method to start everything up:
|
||||
The ``Received`` message contains the payload of the datagram and the address of the sender.
|
||||
|
||||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: main
|
||||
It is also possible to send UDP datagrams using the ``ActorRef`` of the worker saved in ``udpWorker``:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
udpWorker ! Send(data, serverAddress)
|
||||
|
||||
.. note::
|
||||
The difference between using a bound UDP worker to send instead of a simple-send worker is that in the former case
|
||||
the sender field of the UDP datagram will be the bound local address, while in the latter it will be an undetermined
|
||||
ephemeral port.
|
||||
|
||||
Connection based UDP
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The service provided by the connection based UDP API is similar to the bind-and-send service we have seen earlier, but
|
||||
the main difference is that a connection is only able to send to the remoteAddress it was connected to, and will
|
||||
receive datagrams only from that address.
|
||||
|
||||
Connecting is similar to what we have seen in the previous section:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
IO(UdpConn) ! Connect(handler, remoteAddress)
|
||||
// or, with more options:
|
||||
IO(UdpConn) ! Connect(handler, Some(localAddress), remoteAddress, List(SO.Broadcast(true)))
|
||||
|
||||
After the connect succeeds, the sender of the ``Connect`` command will be notified with a ``Connected`` message. The sender of
|
||||
this message is the worker for the UDP connection.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
case Connected =>
|
||||
udpConnectionActor = sender // Save the worker ref for later use
|
||||
|
||||
The actor passed in the ``handler`` parameter will receive inbound UDP datagrams sent to the bound address:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
case Received(dataByteString) => // Do something with the data
|
||||
|
||||
The ``Received`` message contains the payload of the datagram but unlike in the connectionless case, no sender address
|
||||
will be provided, as an UDP connection only receives messages from the endpoint it has been connected to.
|
||||
|
||||
It is also possible to send UDP datagrams using the ``ActorRef`` of the worker saved in ``udpWorker``:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
udpConnectionActor ! Send(data)
|
||||
|
||||
Again, the send does not contain a remote address, as it is always the endpoint we have been connected to.
|
||||
|
||||
.. note::
|
||||
There is a small performance benefit in using connection based UDP API over the connectionless one.
|
||||
If there is a SecurityManager enabled on the system, every connectionless message send has to go through a security
|
||||
check, while in the case of connection-based UDP the security check is cached after connect, thus writes does
|
||||
not suffer an additional performance penalty.
|
||||
|
||||
Throttling Reads and Writes
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
*This section is not yet ready. More coming soon*
|
||||
|
||||
|
||||
Architecture in-depth
|
||||
---------------------
|
||||
|
||||
For further details on the design and internal architecture see :ref:`io-layer`.
|
||||
|
||||
.. _spray.io: http://spray.io
|
||||
Loading…
Add table
Add a link
Reference in a new issue