integrate NetworkFailureInjector and add first test
- rework socket pipeline to transform protobuf into case classes and
back
- introduce NetworkOp messages for that purpose
- make API asynchronous (because it is, really) and add Done
notification for all server operations; enter(...) is still
synchronous, because that is its only purpose in life
- factor out mkPipeline in NettyRemoteTransport, enabling the very slick
TestConductorTransport (essentially a one-liner)
- switch NetworkFailureInjector from Channel{Up,Down}streamHandler to
subclassing SimpleChannelHandler, because otherwise deadlocks
occurred, not sure why (but SCH is the recommended way from the netty
docs, so there may well be a reason)
This commit is contained in:
parent
0314b9abbb
commit
9266ac451b
15 changed files with 538 additions and 269 deletions
|
|
@ -112,8 +112,6 @@ class ActiveRemoteClient private[akka] (
|
|||
private var connection: ChannelFuture = _
|
||||
@volatile
|
||||
private[remote] var openChannels: DefaultChannelGroup = _
|
||||
@volatile
|
||||
private var executionHandler: ExecutionHandler = _
|
||||
|
||||
@volatile
|
||||
private var reconnectionTimeWindowStart = 0L
|
||||
|
|
@ -156,9 +154,8 @@ class ActiveRemoteClient private[akka] (
|
|||
runSwitch switchOn {
|
||||
openChannels = new DefaultDisposableChannelGroup(classOf[RemoteClient].getName)
|
||||
|
||||
executionHandler = new ExecutionHandler(netty.executor)
|
||||
val b = new ClientBootstrap(netty.clientChannelFactory)
|
||||
b.setPipelineFactory(new ActiveRemoteClientPipelineFactory(name, b, executionHandler, remoteAddress, localAddress, this))
|
||||
b.setPipelineFactory(netty.mkPipeline(new ActiveRemoteClientHandler(name, b, remoteAddress, localAddress, netty.timer, this), true))
|
||||
b.setOption("tcpNoDelay", true)
|
||||
b.setOption("keepAlive", true)
|
||||
b.setOption("connectTimeoutMillis", settings.ConnectionTimeout.toMillis)
|
||||
|
|
@ -206,7 +203,6 @@ class ActiveRemoteClient private[akka] (
|
|||
if (openChannels ne null) openChannels.close.awaitUninterruptibly()
|
||||
} finally {
|
||||
connection = null
|
||||
executionHandler = null
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -319,31 +315,6 @@ class ActiveRemoteClientHandler(
|
|||
}
|
||||
}
|
||||
|
||||
class ActiveRemoteClientPipelineFactory(
|
||||
name: String,
|
||||
bootstrap: ClientBootstrap,
|
||||
executionHandler: ExecutionHandler,
|
||||
remoteAddress: Address,
|
||||
localAddress: Address,
|
||||
client: ActiveRemoteClient) extends ChannelPipelineFactory {
|
||||
|
||||
import client.netty.settings
|
||||
|
||||
def getPipeline: ChannelPipeline = {
|
||||
val timeout = new IdleStateHandler(client.netty.timer,
|
||||
settings.ReadTimeout.toSeconds.toInt,
|
||||
settings.WriteTimeout.toSeconds.toInt,
|
||||
settings.AllTimeout.toSeconds.toInt)
|
||||
val lenDec = new LengthFieldBasedFrameDecoder(settings.MessageFrameSize, 0, 4, 0, 4)
|
||||
val lenPrep = new LengthFieldPrepender(4)
|
||||
val messageDec = new RemoteMessageDecoder
|
||||
val messageEnc = new RemoteMessageEncoder(client.netty)
|
||||
val remoteClient = new ActiveRemoteClientHandler(name, bootstrap, remoteAddress, localAddress, client.netty.timer, client)
|
||||
|
||||
new StaticChannelPipeline(timeout, lenDec, messageDec, lenPrep, messageEnc, executionHandler, remoteClient)
|
||||
}
|
||||
}
|
||||
|
||||
class PassiveRemoteClient(val currentChannel: Channel,
|
||||
netty: NettyRemoteTransport,
|
||||
remoteAddress: Address)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue