From 418b11d5539bfa69eabdcce5f1ad0e77550a5154 Mon Sep 17 00:00:00 2001 From: Roland Date: Wed, 14 Sep 2011 16:09:17 +0200 Subject: [PATCH 001/106] first step towards TestConductor - it compiles - server side ("Conductor") functions almost there - client side ("Player") sketched, but missing network failures - no internal failure handling whatsoever, waiting for Project DeathWatch - not yet possible to shutdown, need to kill VM - next step is to hook into the NettyRemoteSupport for failure injection --- .../testconductor/TestConductorProtocol.java | 2610 +++++++++++++++++ .../main/protocol/TestConductorProtocol.proto | 48 + .../akka/remote/testconductor/Conductor.scala | 268 ++ .../akka/remote/testconductor/DataTypes.scala | 13 + .../akka/remote/testconductor/Features.scala | 74 + .../NetworkFailureInjector.scala | 161 + .../akka/remote/testconductor/Player.scala | 133 + .../testconductor/RemoteConnection.scala | 52 + 8 files changed, 3359 insertions(+) create mode 100644 akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java create mode 100644 akka-remote/src/main/protocol/TestConductorProtocol.proto create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/Features.scala create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/Player.scala create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala diff --git a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java new file mode 100644 index 0000000000..e9065b53e4 --- /dev/null +++ b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java @@ -0,0 +1,2610 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: TestConductorProtocol.proto + +package akka.remote.testconductor; + +public final class TestConductorProtocol { + private TestConductorProtocol() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public enum FailType + implements com.google.protobuf.ProtocolMessageEnum { + Throttle(0, 1), + Disconnect(1, 2), + Abort(2, 3), + Shutdown(3, 4), + ; + + public static final int Throttle_VALUE = 1; + public static final int Disconnect_VALUE = 2; + public static final int Abort_VALUE = 3; + public static final int Shutdown_VALUE = 4; + + + public final int getNumber() { return value; } + + public static FailType valueOf(int value) { + switch (value) { + case 1: return Throttle; + case 2: return Disconnect; + case 3: return Abort; + case 4: return Shutdown; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public FailType findValueByNumber(int number) { + return FailType.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.getDescriptor().getEnumTypes().get(0); + } + + private static final FailType[] VALUES = { + Throttle, Disconnect, Abort, Shutdown, + }; + + public static FailType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private FailType(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:FailType) + } + + public enum Direction + implements com.google.protobuf.ProtocolMessageEnum { + Send(0, 1), + Receive(1, 2), + ; + + public static final int Send_VALUE = 1; + public static final int Receive_VALUE = 2; + + + public final int getNumber() { return value; } + + public static Direction valueOf(int value) { + switch (value) { + case 1: return Send; + case 2: return Receive; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Direction findValueByNumber(int number) { + return Direction.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.getDescriptor().getEnumTypes().get(1); + } + + private static final Direction[] VALUES = { + Send, Receive, + }; + + public static Direction valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private Direction(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:Direction) + } + + public interface WrapperOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .Hello hello = 1; + boolean hasHello(); + akka.remote.testconductor.TestConductorProtocol.Hello getHello(); + akka.remote.testconductor.TestConductorProtocol.HelloOrBuilder getHelloOrBuilder(); + + // optional .EnterBarrier barrier = 2; + boolean hasBarrier(); + akka.remote.testconductor.TestConductorProtocol.EnterBarrier getBarrier(); + akka.remote.testconductor.TestConductorProtocol.EnterBarrierOrBuilder getBarrierOrBuilder(); + + // optional .InjectFailure failure = 3; + boolean hasFailure(); + akka.remote.testconductor.TestConductorProtocol.InjectFailure getFailure(); + akka.remote.testconductor.TestConductorProtocol.InjectFailureOrBuilder getFailureOrBuilder(); + } + public static final class Wrapper extends + com.google.protobuf.GeneratedMessage + implements WrapperOrBuilder { + // Use Wrapper.newBuilder() to construct. + private Wrapper(Builder builder) { + super(builder); + } + private Wrapper(boolean noInit) {} + + private static final Wrapper defaultInstance; + public static Wrapper getDefaultInstance() { + return defaultInstance; + } + + public Wrapper getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Wrapper_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Wrapper_fieldAccessorTable; + } + + private int bitField0_; + // optional .Hello hello = 1; + public static final int HELLO_FIELD_NUMBER = 1; + private akka.remote.testconductor.TestConductorProtocol.Hello hello_; + public boolean hasHello() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public akka.remote.testconductor.TestConductorProtocol.Hello getHello() { + return hello_; + } + public akka.remote.testconductor.TestConductorProtocol.HelloOrBuilder getHelloOrBuilder() { + return hello_; + } + + // optional .EnterBarrier barrier = 2; + public static final int BARRIER_FIELD_NUMBER = 2; + private akka.remote.testconductor.TestConductorProtocol.EnterBarrier barrier_; + public boolean hasBarrier() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public akka.remote.testconductor.TestConductorProtocol.EnterBarrier getBarrier() { + return barrier_; + } + public akka.remote.testconductor.TestConductorProtocol.EnterBarrierOrBuilder getBarrierOrBuilder() { + return barrier_; + } + + // optional .InjectFailure failure = 3; + public static final int FAILURE_FIELD_NUMBER = 3; + private akka.remote.testconductor.TestConductorProtocol.InjectFailure failure_; + public boolean hasFailure() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public akka.remote.testconductor.TestConductorProtocol.InjectFailure getFailure() { + return failure_; + } + public akka.remote.testconductor.TestConductorProtocol.InjectFailureOrBuilder getFailureOrBuilder() { + return failure_; + } + + private void initFields() { + hello_ = akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance(); + barrier_ = akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance(); + failure_ = akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasHello()) { + if (!getHello().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasBarrier()) { + if (!getBarrier().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasFailure()) { + if (!getFailure().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, hello_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, barrier_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, failure_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, hello_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, barrier_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, failure_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.remote.testconductor.TestConductorProtocol.Wrapper prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.remote.testconductor.TestConductorProtocol.WrapperOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Wrapper_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Wrapper_fieldAccessorTable; + } + + // Construct using akka.remote.testconductor.TestConductorProtocol.Wrapper.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getHelloFieldBuilder(); + getBarrierFieldBuilder(); + getFailureFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (helloBuilder_ == null) { + hello_ = akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance(); + } else { + helloBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (barrierBuilder_ == null) { + barrier_ = akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance(); + } else { + barrierBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (failureBuilder_ == null) { + failure_ = akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance(); + } else { + failureBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.remote.testconductor.TestConductorProtocol.Wrapper.getDescriptor(); + } + + public akka.remote.testconductor.TestConductorProtocol.Wrapper getDefaultInstanceForType() { + return akka.remote.testconductor.TestConductorProtocol.Wrapper.getDefaultInstance(); + } + + public akka.remote.testconductor.TestConductorProtocol.Wrapper build() { + akka.remote.testconductor.TestConductorProtocol.Wrapper result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private akka.remote.testconductor.TestConductorProtocol.Wrapper buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + akka.remote.testconductor.TestConductorProtocol.Wrapper result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public akka.remote.testconductor.TestConductorProtocol.Wrapper buildPartial() { + akka.remote.testconductor.TestConductorProtocol.Wrapper result = new akka.remote.testconductor.TestConductorProtocol.Wrapper(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (helloBuilder_ == null) { + result.hello_ = hello_; + } else { + result.hello_ = helloBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (barrierBuilder_ == null) { + result.barrier_ = barrier_; + } else { + result.barrier_ = barrierBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (failureBuilder_ == null) { + result.failure_ = failure_; + } else { + result.failure_ = failureBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.remote.testconductor.TestConductorProtocol.Wrapper) { + return mergeFrom((akka.remote.testconductor.TestConductorProtocol.Wrapper)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.remote.testconductor.TestConductorProtocol.Wrapper other) { + if (other == akka.remote.testconductor.TestConductorProtocol.Wrapper.getDefaultInstance()) return this; + if (other.hasHello()) { + mergeHello(other.getHello()); + } + if (other.hasBarrier()) { + mergeBarrier(other.getBarrier()); + } + if (other.hasFailure()) { + mergeFailure(other.getFailure()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasHello()) { + if (!getHello().isInitialized()) { + + return false; + } + } + if (hasBarrier()) { + if (!getBarrier().isInitialized()) { + + return false; + } + } + if (hasFailure()) { + if (!getFailure().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + akka.remote.testconductor.TestConductorProtocol.Hello.Builder subBuilder = akka.remote.testconductor.TestConductorProtocol.Hello.newBuilder(); + if (hasHello()) { + subBuilder.mergeFrom(getHello()); + } + input.readMessage(subBuilder, extensionRegistry); + setHello(subBuilder.buildPartial()); + break; + } + case 18: { + akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder subBuilder = akka.remote.testconductor.TestConductorProtocol.EnterBarrier.newBuilder(); + if (hasBarrier()) { + subBuilder.mergeFrom(getBarrier()); + } + input.readMessage(subBuilder, extensionRegistry); + setBarrier(subBuilder.buildPartial()); + break; + } + case 26: { + akka.remote.testconductor.TestConductorProtocol.InjectFailure.Builder subBuilder = akka.remote.testconductor.TestConductorProtocol.InjectFailure.newBuilder(); + if (hasFailure()) { + subBuilder.mergeFrom(getFailure()); + } + input.readMessage(subBuilder, extensionRegistry); + setFailure(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // optional .Hello hello = 1; + private akka.remote.testconductor.TestConductorProtocol.Hello hello_ = akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Hello, akka.remote.testconductor.TestConductorProtocol.Hello.Builder, akka.remote.testconductor.TestConductorProtocol.HelloOrBuilder> helloBuilder_; + public boolean hasHello() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public akka.remote.testconductor.TestConductorProtocol.Hello getHello() { + if (helloBuilder_ == null) { + return hello_; + } else { + return helloBuilder_.getMessage(); + } + } + public Builder setHello(akka.remote.testconductor.TestConductorProtocol.Hello value) { + if (helloBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + hello_ = value; + onChanged(); + } else { + helloBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setHello( + akka.remote.testconductor.TestConductorProtocol.Hello.Builder builderForValue) { + if (helloBuilder_ == null) { + hello_ = builderForValue.build(); + onChanged(); + } else { + helloBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeHello(akka.remote.testconductor.TestConductorProtocol.Hello value) { + if (helloBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + hello_ != akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance()) { + hello_ = + akka.remote.testconductor.TestConductorProtocol.Hello.newBuilder(hello_).mergeFrom(value).buildPartial(); + } else { + hello_ = value; + } + onChanged(); + } else { + helloBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearHello() { + if (helloBuilder_ == null) { + hello_ = akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance(); + onChanged(); + } else { + helloBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public akka.remote.testconductor.TestConductorProtocol.Hello.Builder getHelloBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getHelloFieldBuilder().getBuilder(); + } + public akka.remote.testconductor.TestConductorProtocol.HelloOrBuilder getHelloOrBuilder() { + if (helloBuilder_ != null) { + return helloBuilder_.getMessageOrBuilder(); + } else { + return hello_; + } + } + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Hello, akka.remote.testconductor.TestConductorProtocol.Hello.Builder, akka.remote.testconductor.TestConductorProtocol.HelloOrBuilder> + getHelloFieldBuilder() { + if (helloBuilder_ == null) { + helloBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Hello, akka.remote.testconductor.TestConductorProtocol.Hello.Builder, akka.remote.testconductor.TestConductorProtocol.HelloOrBuilder>( + hello_, + getParentForChildren(), + isClean()); + hello_ = null; + } + return helloBuilder_; + } + + // optional .EnterBarrier barrier = 2; + private akka.remote.testconductor.TestConductorProtocol.EnterBarrier barrier_ = akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.EnterBarrier, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder, akka.remote.testconductor.TestConductorProtocol.EnterBarrierOrBuilder> barrierBuilder_; + public boolean hasBarrier() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public akka.remote.testconductor.TestConductorProtocol.EnterBarrier getBarrier() { + if (barrierBuilder_ == null) { + return barrier_; + } else { + return barrierBuilder_.getMessage(); + } + } + public Builder setBarrier(akka.remote.testconductor.TestConductorProtocol.EnterBarrier value) { + if (barrierBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + barrier_ = value; + onChanged(); + } else { + barrierBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder setBarrier( + akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder builderForValue) { + if (barrierBuilder_ == null) { + barrier_ = builderForValue.build(); + onChanged(); + } else { + barrierBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder mergeBarrier(akka.remote.testconductor.TestConductorProtocol.EnterBarrier value) { + if (barrierBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + barrier_ != akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance()) { + barrier_ = + akka.remote.testconductor.TestConductorProtocol.EnterBarrier.newBuilder(barrier_).mergeFrom(value).buildPartial(); + } else { + barrier_ = value; + } + onChanged(); + } else { + barrierBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder clearBarrier() { + if (barrierBuilder_ == null) { + barrier_ = akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance(); + onChanged(); + } else { + barrierBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + public akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder getBarrierBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getBarrierFieldBuilder().getBuilder(); + } + public akka.remote.testconductor.TestConductorProtocol.EnterBarrierOrBuilder getBarrierOrBuilder() { + if (barrierBuilder_ != null) { + return barrierBuilder_.getMessageOrBuilder(); + } else { + return barrier_; + } + } + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.EnterBarrier, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder, akka.remote.testconductor.TestConductorProtocol.EnterBarrierOrBuilder> + getBarrierFieldBuilder() { + if (barrierBuilder_ == null) { + barrierBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.EnterBarrier, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder, akka.remote.testconductor.TestConductorProtocol.EnterBarrierOrBuilder>( + barrier_, + getParentForChildren(), + isClean()); + barrier_ = null; + } + return barrierBuilder_; + } + + // optional .InjectFailure failure = 3; + private akka.remote.testconductor.TestConductorProtocol.InjectFailure failure_ = akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.InjectFailure, akka.remote.testconductor.TestConductorProtocol.InjectFailure.Builder, akka.remote.testconductor.TestConductorProtocol.InjectFailureOrBuilder> failureBuilder_; + public boolean hasFailure() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public akka.remote.testconductor.TestConductorProtocol.InjectFailure getFailure() { + if (failureBuilder_ == null) { + return failure_; + } else { + return failureBuilder_.getMessage(); + } + } + public Builder setFailure(akka.remote.testconductor.TestConductorProtocol.InjectFailure value) { + if (failureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + failure_ = value; + onChanged(); + } else { + failureBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + public Builder setFailure( + akka.remote.testconductor.TestConductorProtocol.InjectFailure.Builder builderForValue) { + if (failureBuilder_ == null) { + failure_ = builderForValue.build(); + onChanged(); + } else { + failureBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + public Builder mergeFailure(akka.remote.testconductor.TestConductorProtocol.InjectFailure value) { + if (failureBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + failure_ != akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance()) { + failure_ = + akka.remote.testconductor.TestConductorProtocol.InjectFailure.newBuilder(failure_).mergeFrom(value).buildPartial(); + } else { + failure_ = value; + } + onChanged(); + } else { + failureBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + public Builder clearFailure() { + if (failureBuilder_ == null) { + failure_ = akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance(); + onChanged(); + } else { + failureBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + public akka.remote.testconductor.TestConductorProtocol.InjectFailure.Builder getFailureBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getFailureFieldBuilder().getBuilder(); + } + public akka.remote.testconductor.TestConductorProtocol.InjectFailureOrBuilder getFailureOrBuilder() { + if (failureBuilder_ != null) { + return failureBuilder_.getMessageOrBuilder(); + } else { + return failure_; + } + } + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.InjectFailure, akka.remote.testconductor.TestConductorProtocol.InjectFailure.Builder, akka.remote.testconductor.TestConductorProtocol.InjectFailureOrBuilder> + getFailureFieldBuilder() { + if (failureBuilder_ == null) { + failureBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.InjectFailure, akka.remote.testconductor.TestConductorProtocol.InjectFailure.Builder, akka.remote.testconductor.TestConductorProtocol.InjectFailureOrBuilder>( + failure_, + getParentForChildren(), + isClean()); + failure_ = null; + } + return failureBuilder_; + } + + // @@protoc_insertion_point(builder_scope:Wrapper) + } + + static { + defaultInstance = new Wrapper(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:Wrapper) + } + + public interface HelloOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; + boolean hasName(); + String getName(); + + // required string host = 2; + boolean hasHost(); + String getHost(); + + // required int32 port = 3; + boolean hasPort(); + int getPort(); + } + public static final class Hello extends + com.google.protobuf.GeneratedMessage + implements HelloOrBuilder { + // Use Hello.newBuilder() to construct. + private Hello(Builder builder) { + super(builder); + } + private Hello(boolean noInit) {} + + private static final Hello defaultInstance; + public static Hello getDefaultInstance() { + return defaultInstance; + } + + public Hello getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Hello_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Hello_fieldAccessorTable; + } + + private int bitField0_; + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getName() { + java.lang.Object ref = name_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + name_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string host = 2; + public static final int HOST_FIELD_NUMBER = 2; + private java.lang.Object host_; + public boolean hasHost() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public String getHost() { + java.lang.Object ref = host_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + host_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getHostBytes() { + java.lang.Object ref = host_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + host_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required int32 port = 3; + public static final int PORT_FIELD_NUMBER = 3; + private int port_; + public boolean hasPort() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public int getPort() { + return port_; + } + + private void initFields() { + name_ = ""; + host_ = ""; + port_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasHost()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasPort()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getHostBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt32(3, port_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getHostBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(3, port_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.remote.testconductor.TestConductorProtocol.Hello parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.remote.testconductor.TestConductorProtocol.Hello prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.remote.testconductor.TestConductorProtocol.HelloOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Hello_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Hello_fieldAccessorTable; + } + + // Construct using akka.remote.testconductor.TestConductorProtocol.Hello.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + host_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + port_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.remote.testconductor.TestConductorProtocol.Hello.getDescriptor(); + } + + public akka.remote.testconductor.TestConductorProtocol.Hello getDefaultInstanceForType() { + return akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance(); + } + + public akka.remote.testconductor.TestConductorProtocol.Hello build() { + akka.remote.testconductor.TestConductorProtocol.Hello result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private akka.remote.testconductor.TestConductorProtocol.Hello buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + akka.remote.testconductor.TestConductorProtocol.Hello result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public akka.remote.testconductor.TestConductorProtocol.Hello buildPartial() { + akka.remote.testconductor.TestConductorProtocol.Hello result = new akka.remote.testconductor.TestConductorProtocol.Hello(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.host_ = host_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.port_ = port_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.remote.testconductor.TestConductorProtocol.Hello) { + return mergeFrom((akka.remote.testconductor.TestConductorProtocol.Hello)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.remote.testconductor.TestConductorProtocol.Hello other) { + if (other == akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance()) return this; + if (other.hasName()) { + setName(other.getName()); + } + if (other.hasHost()) { + setHost(other.getHost()); + } + if (other.hasPort()) { + setPort(other.getPort()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + if (!hasHost()) { + + return false; + } + if (!hasPort()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + host_ = input.readBytes(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + port_ = input.readInt32(); + break; + } + } + } + } + + private int bitField0_; + + // required string name = 1; + private java.lang.Object name_ = ""; + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + name_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setName(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + void setName(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + } + + // required string host = 2; + private java.lang.Object host_ = ""; + public boolean hasHost() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public String getHost() { + java.lang.Object ref = host_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + host_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setHost(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + host_ = value; + onChanged(); + return this; + } + public Builder clearHost() { + bitField0_ = (bitField0_ & ~0x00000002); + host_ = getDefaultInstance().getHost(); + onChanged(); + return this; + } + void setHost(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000002; + host_ = value; + onChanged(); + } + + // required int32 port = 3; + private int port_ ; + public boolean hasPort() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public int getPort() { + return port_; + } + public Builder setPort(int value) { + bitField0_ |= 0x00000004; + port_ = value; + onChanged(); + return this; + } + public Builder clearPort() { + bitField0_ = (bitField0_ & ~0x00000004); + port_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:Hello) + } + + static { + defaultInstance = new Hello(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:Hello) + } + + public interface EnterBarrierOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; + boolean hasName(); + String getName(); + } + public static final class EnterBarrier extends + com.google.protobuf.GeneratedMessage + implements EnterBarrierOrBuilder { + // Use EnterBarrier.newBuilder() to construct. + private EnterBarrier(Builder builder) { + super(builder); + } + private EnterBarrier(boolean noInit) {} + + private static final EnterBarrier defaultInstance; + public static EnterBarrier getDefaultInstance() { + return defaultInstance; + } + + public EnterBarrier getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_EnterBarrier_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_EnterBarrier_fieldAccessorTable; + } + + private int bitField0_; + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getName() { + java.lang.Object ref = name_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + name_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + name_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.remote.testconductor.TestConductorProtocol.EnterBarrier prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.remote.testconductor.TestConductorProtocol.EnterBarrierOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_EnterBarrier_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_EnterBarrier_fieldAccessorTable; + } + + // Construct using akka.remote.testconductor.TestConductorProtocol.EnterBarrier.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDescriptor(); + } + + public akka.remote.testconductor.TestConductorProtocol.EnterBarrier getDefaultInstanceForType() { + return akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance(); + } + + public akka.remote.testconductor.TestConductorProtocol.EnterBarrier build() { + akka.remote.testconductor.TestConductorProtocol.EnterBarrier result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private akka.remote.testconductor.TestConductorProtocol.EnterBarrier buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + akka.remote.testconductor.TestConductorProtocol.EnterBarrier result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public akka.remote.testconductor.TestConductorProtocol.EnterBarrier buildPartial() { + akka.remote.testconductor.TestConductorProtocol.EnterBarrier result = new akka.remote.testconductor.TestConductorProtocol.EnterBarrier(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.remote.testconductor.TestConductorProtocol.EnterBarrier) { + return mergeFrom((akka.remote.testconductor.TestConductorProtocol.EnterBarrier)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.remote.testconductor.TestConductorProtocol.EnterBarrier other) { + if (other == akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance()) return this; + if (other.hasName()) { + setName(other.getName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required string name = 1; + private java.lang.Object name_ = ""; + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + name_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setName(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + void setName(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + } + + // @@protoc_insertion_point(builder_scope:EnterBarrier) + } + + static { + defaultInstance = new EnterBarrier(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:EnterBarrier) + } + + public interface InjectFailureOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .FailType failure = 1; + boolean hasFailure(); + akka.remote.testconductor.TestConductorProtocol.FailType getFailure(); + + // optional .Direction direction = 2; + boolean hasDirection(); + akka.remote.testconductor.TestConductorProtocol.Direction getDirection(); + + // optional string host = 3; + boolean hasHost(); + String getHost(); + + // optional int32 port = 4; + boolean hasPort(); + int getPort(); + + // optional float rateMBit = 5; + boolean hasRateMBit(); + float getRateMBit(); + + // optional int32 exitValue = 6; + boolean hasExitValue(); + int getExitValue(); + } + public static final class InjectFailure extends + com.google.protobuf.GeneratedMessage + implements InjectFailureOrBuilder { + // Use InjectFailure.newBuilder() to construct. + private InjectFailure(Builder builder) { + super(builder); + } + private InjectFailure(boolean noInit) {} + + private static final InjectFailure defaultInstance; + public static InjectFailure getDefaultInstance() { + return defaultInstance; + } + + public InjectFailure getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_InjectFailure_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_InjectFailure_fieldAccessorTable; + } + + private int bitField0_; + // required .FailType failure = 1; + public static final int FAILURE_FIELD_NUMBER = 1; + private akka.remote.testconductor.TestConductorProtocol.FailType failure_; + public boolean hasFailure() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public akka.remote.testconductor.TestConductorProtocol.FailType getFailure() { + return failure_; + } + + // optional .Direction direction = 2; + public static final int DIRECTION_FIELD_NUMBER = 2; + private akka.remote.testconductor.TestConductorProtocol.Direction direction_; + public boolean hasDirection() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public akka.remote.testconductor.TestConductorProtocol.Direction getDirection() { + return direction_; + } + + // optional string host = 3; + public static final int HOST_FIELD_NUMBER = 3; + private java.lang.Object host_; + public boolean hasHost() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public String getHost() { + java.lang.Object ref = host_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + host_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getHostBytes() { + java.lang.Object ref = host_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + host_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional int32 port = 4; + public static final int PORT_FIELD_NUMBER = 4; + private int port_; + public boolean hasPort() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public int getPort() { + return port_; + } + + // optional float rateMBit = 5; + public static final int RATEMBIT_FIELD_NUMBER = 5; + private float rateMBit_; + public boolean hasRateMBit() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public float getRateMBit() { + return rateMBit_; + } + + // optional int32 exitValue = 6; + public static final int EXITVALUE_FIELD_NUMBER = 6; + private int exitValue_; + public boolean hasExitValue() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + public int getExitValue() { + return exitValue_; + } + + private void initFields() { + failure_ = akka.remote.testconductor.TestConductorProtocol.FailType.Throttle; + direction_ = akka.remote.testconductor.TestConductorProtocol.Direction.Send; + host_ = ""; + port_ = 0; + rateMBit_ = 0F; + exitValue_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasFailure()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, failure_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, direction_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getHostBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeInt32(4, port_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeFloat(5, rateMBit_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeInt32(6, exitValue_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, failure_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, direction_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getHostBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(4, port_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeFloatSize(5, rateMBit_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(6, exitValue_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.remote.testconductor.TestConductorProtocol.InjectFailure prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.remote.testconductor.TestConductorProtocol.InjectFailureOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_InjectFailure_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_InjectFailure_fieldAccessorTable; + } + + // Construct using akka.remote.testconductor.TestConductorProtocol.InjectFailure.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + failure_ = akka.remote.testconductor.TestConductorProtocol.FailType.Throttle; + bitField0_ = (bitField0_ & ~0x00000001); + direction_ = akka.remote.testconductor.TestConductorProtocol.Direction.Send; + bitField0_ = (bitField0_ & ~0x00000002); + host_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + port_ = 0; + bitField0_ = (bitField0_ & ~0x00000008); + rateMBit_ = 0F; + bitField0_ = (bitField0_ & ~0x00000010); + exitValue_ = 0; + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDescriptor(); + } + + public akka.remote.testconductor.TestConductorProtocol.InjectFailure getDefaultInstanceForType() { + return akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance(); + } + + public akka.remote.testconductor.TestConductorProtocol.InjectFailure build() { + akka.remote.testconductor.TestConductorProtocol.InjectFailure result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private akka.remote.testconductor.TestConductorProtocol.InjectFailure buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + akka.remote.testconductor.TestConductorProtocol.InjectFailure result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public akka.remote.testconductor.TestConductorProtocol.InjectFailure buildPartial() { + akka.remote.testconductor.TestConductorProtocol.InjectFailure result = new akka.remote.testconductor.TestConductorProtocol.InjectFailure(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.failure_ = failure_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.direction_ = direction_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.host_ = host_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.port_ = port_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.rateMBit_ = rateMBit_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.exitValue_ = exitValue_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.remote.testconductor.TestConductorProtocol.InjectFailure) { + return mergeFrom((akka.remote.testconductor.TestConductorProtocol.InjectFailure)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.remote.testconductor.TestConductorProtocol.InjectFailure other) { + if (other == akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance()) return this; + if (other.hasFailure()) { + setFailure(other.getFailure()); + } + if (other.hasDirection()) { + setDirection(other.getDirection()); + } + if (other.hasHost()) { + setHost(other.getHost()); + } + if (other.hasPort()) { + setPort(other.getPort()); + } + if (other.hasRateMBit()) { + setRateMBit(other.getRateMBit()); + } + if (other.hasExitValue()) { + setExitValue(other.getExitValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasFailure()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + akka.remote.testconductor.TestConductorProtocol.FailType value = akka.remote.testconductor.TestConductorProtocol.FailType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + failure_ = value; + } + break; + } + case 16: { + int rawValue = input.readEnum(); + akka.remote.testconductor.TestConductorProtocol.Direction value = akka.remote.testconductor.TestConductorProtocol.Direction.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + direction_ = value; + } + break; + } + case 26: { + bitField0_ |= 0x00000004; + host_ = input.readBytes(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + port_ = input.readInt32(); + break; + } + case 45: { + bitField0_ |= 0x00000010; + rateMBit_ = input.readFloat(); + break; + } + case 48: { + bitField0_ |= 0x00000020; + exitValue_ = input.readInt32(); + break; + } + } + } + } + + private int bitField0_; + + // required .FailType failure = 1; + private akka.remote.testconductor.TestConductorProtocol.FailType failure_ = akka.remote.testconductor.TestConductorProtocol.FailType.Throttle; + public boolean hasFailure() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public akka.remote.testconductor.TestConductorProtocol.FailType getFailure() { + return failure_; + } + public Builder setFailure(akka.remote.testconductor.TestConductorProtocol.FailType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + failure_ = value; + onChanged(); + return this; + } + public Builder clearFailure() { + bitField0_ = (bitField0_ & ~0x00000001); + failure_ = akka.remote.testconductor.TestConductorProtocol.FailType.Throttle; + onChanged(); + return this; + } + + // optional .Direction direction = 2; + private akka.remote.testconductor.TestConductorProtocol.Direction direction_ = akka.remote.testconductor.TestConductorProtocol.Direction.Send; + public boolean hasDirection() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public akka.remote.testconductor.TestConductorProtocol.Direction getDirection() { + return direction_; + } + public Builder setDirection(akka.remote.testconductor.TestConductorProtocol.Direction value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + direction_ = value; + onChanged(); + return this; + } + public Builder clearDirection() { + bitField0_ = (bitField0_ & ~0x00000002); + direction_ = akka.remote.testconductor.TestConductorProtocol.Direction.Send; + onChanged(); + return this; + } + + // optional string host = 3; + private java.lang.Object host_ = ""; + public boolean hasHost() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public String getHost() { + java.lang.Object ref = host_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + host_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setHost(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + host_ = value; + onChanged(); + return this; + } + public Builder clearHost() { + bitField0_ = (bitField0_ & ~0x00000004); + host_ = getDefaultInstance().getHost(); + onChanged(); + return this; + } + void setHost(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000004; + host_ = value; + onChanged(); + } + + // optional int32 port = 4; + private int port_ ; + public boolean hasPort() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public int getPort() { + return port_; + } + public Builder setPort(int value) { + bitField0_ |= 0x00000008; + port_ = value; + onChanged(); + return this; + } + public Builder clearPort() { + bitField0_ = (bitField0_ & ~0x00000008); + port_ = 0; + onChanged(); + return this; + } + + // optional float rateMBit = 5; + private float rateMBit_ ; + public boolean hasRateMBit() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public float getRateMBit() { + return rateMBit_; + } + public Builder setRateMBit(float value) { + bitField0_ |= 0x00000010; + rateMBit_ = value; + onChanged(); + return this; + } + public Builder clearRateMBit() { + bitField0_ = (bitField0_ & ~0x00000010); + rateMBit_ = 0F; + onChanged(); + return this; + } + + // optional int32 exitValue = 6; + private int exitValue_ ; + public boolean hasExitValue() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + public int getExitValue() { + return exitValue_; + } + public Builder setExitValue(int value) { + bitField0_ |= 0x00000020; + exitValue_ = value; + onChanged(); + return this; + } + public Builder clearExitValue() { + bitField0_ = (bitField0_ & ~0x00000020); + exitValue_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:InjectFailure) + } + + static { + defaultInstance = new InjectFailure(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:InjectFailure) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_Wrapper_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_Wrapper_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_Hello_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_Hello_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_EnterBarrier_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_EnterBarrier_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_InjectFailure_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_InjectFailure_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\033TestConductorProtocol.proto\"a\n\007Wrapper" + + "\022\025\n\005hello\030\001 \001(\0132\006.Hello\022\036\n\007barrier\030\002 \001(\013" + + "2\r.EnterBarrier\022\037\n\007failure\030\003 \001(\0132\016.Injec" + + "tFailure\"1\n\005Hello\022\014\n\004name\030\001 \002(\t\022\014\n\004host\030" + + "\002 \002(\t\022\014\n\004port\030\003 \002(\005\"\034\n\014EnterBarrier\022\014\n\004n" + + "ame\030\001 \002(\t\"\213\001\n\rInjectFailure\022\032\n\007failure\030\001" + + " \002(\0162\t.FailType\022\035\n\tdirection\030\002 \001(\0162\n.Dir" + + "ection\022\014\n\004host\030\003 \001(\t\022\014\n\004port\030\004 \001(\005\022\020\n\010ra" + + "teMBit\030\005 \001(\002\022\021\n\texitValue\030\006 \001(\005*A\n\010FailT" + + "ype\022\014\n\010Throttle\020\001\022\016\n\nDisconnect\020\002\022\t\n\005Abo", + "rt\020\003\022\014\n\010Shutdown\020\004*\"\n\tDirection\022\010\n\004Send\020" + + "\001\022\013\n\007Receive\020\002B\035\n\031akka.remote.testconduc" + + "torH\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_Wrapper_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_Wrapper_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Wrapper_descriptor, + new java.lang.String[] { "Hello", "Barrier", "Failure", }, + akka.remote.testconductor.TestConductorProtocol.Wrapper.class, + akka.remote.testconductor.TestConductorProtocol.Wrapper.Builder.class); + internal_static_Hello_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_Hello_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Hello_descriptor, + new java.lang.String[] { "Name", "Host", "Port", }, + akka.remote.testconductor.TestConductorProtocol.Hello.class, + akka.remote.testconductor.TestConductorProtocol.Hello.Builder.class); + internal_static_EnterBarrier_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_EnterBarrier_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_EnterBarrier_descriptor, + new java.lang.String[] { "Name", }, + akka.remote.testconductor.TestConductorProtocol.EnterBarrier.class, + akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder.class); + internal_static_InjectFailure_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_InjectFailure_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_InjectFailure_descriptor, + new java.lang.String[] { "Failure", "Direction", "Host", "Port", "RateMBit", "ExitValue", }, + akka.remote.testconductor.TestConductorProtocol.InjectFailure.class, + akka.remote.testconductor.TestConductorProtocol.InjectFailure.Builder.class); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/akka-remote/src/main/protocol/TestConductorProtocol.proto b/akka-remote/src/main/protocol/TestConductorProtocol.proto new file mode 100644 index 0000000000..1db35a7516 --- /dev/null +++ b/akka-remote/src/main/protocol/TestConductorProtocol.proto @@ -0,0 +1,48 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ + +option java_package = "akka.remote.testconductor"; +option optimize_for = SPEED; + +/****************************************** + Compile with: + cd ./akka-remote/src/main/protocol + protoc TestConductorProtocol.proto --java_out ../java +*******************************************/ + +message Wrapper { + optional Hello hello = 1; + optional EnterBarrier barrier = 2; + optional InjectFailure failure = 3; +} + +message Hello { + required string name = 1; + required string host = 2; + required int32 port = 3; +} + +message EnterBarrier { + required string name = 1; +} + +enum FailType { + Throttle = 1; + Disconnect = 2; + Abort = 3; + Shutdown = 4; +} +enum Direction { + Send = 1; + Receive = 2; +} +message InjectFailure { + required FailType failure = 1; + optional Direction direction = 2; + optional string host = 3; + optional int32 port = 4; + optional float rateMBit = 5; + optional int32 exitValue = 6; +} + diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala new file mode 100644 index 0000000000..58a6a5f88e --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -0,0 +1,268 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ +package akka.remote.testconductor + +import akka.actor.{ Actor, ActorRef, LoggingFSM, Timeout, UntypedChannel } +import akka.event.EventHandler +import RemoteConnection.getAddrString +import akka.util.duration._ +import TestConductorProtocol._ +import akka.NoStackTrace +import org.jboss.netty.channel.{ Channel, SimpleChannelUpstreamHandler, ChannelHandlerContext, ChannelStateEvent, MessageEvent } + +object Conductor extends RunControl with FailureInject with BarrierSync { + + import Controller._ + + private val controller = Actor.actorOf[Controller] + controller ! ClientConnected + + override def enter(name: String*) { + implicit val timeout = Timeout(30 seconds) + name foreach (b ⇒ (controller ? EnterBarrier(b)).get) + } + + override def throttle(node: String, target: String, direction: Direction, rateMBit: Float) { + controller ! Throttle(node, target, direction, rateMBit) + } + + override def blackhole(node: String, target: String, direction: Direction) { + controller ! Throttle(node, target, direction, 0f) + } + + override def disconnect(node: String, target: String) { + controller ! Disconnect(node, target, false) + } + + override def abort(node: String, target: String) { + controller ! Disconnect(node, target, true) + } + + override def shutdown(node: String, exitValue: Int) { + controller ! Terminate(node, exitValue) + } + + override def kill(node: String) { + controller ! Terminate(node, -1) + } + + override def getNodes = (controller ? GetNodes).as[List[String]].get + + override def removeNode(node: String) { + controller ! Remove(node) + } + +} + +class ConductorHandler(controller: ActorRef) extends SimpleChannelUpstreamHandler { + + var clients = Map[Channel, ActorRef]() + + override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { + val channel = event.getChannel + EventHandler.debug(this, "connection from " + getAddrString(channel)) + val fsm = Actor.actorOf(new ServerFSM(controller, channel)) + clients += channel -> fsm + } + + override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { + val channel = event.getChannel + EventHandler.debug(this, "disconnect from " + getAddrString(channel)) + val fsm = clients(channel) + fsm.stop() + clients -= channel + } + + override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) = { + val channel = event.getChannel + EventHandler.debug(this, "message from " + getAddrString(channel) + ": " + event.getMessage) + event.getMessage match { + case msg: Wrapper if msg.getAllFields.size == 1 ⇒ + clients(channel) ! msg + case msg ⇒ + EventHandler.info(this, "client " + getAddrString(channel) + " sent garbage '" + msg + "', disconnecting") + channel.close() + } + } + +} + +object ServerFSM { + sealed trait State + case object Initial extends State + case object Ready extends State + + case class Send(msg: Wrapper) +} + +class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor with LoggingFSM[ServerFSM.State, Null] { + import ServerFSM._ + import akka.actor.FSM._ + import Controller._ + + startWith(Initial, null) + + when(Initial, stateTimeout = 10 seconds) { + case Ev(msg: Wrapper) ⇒ + if (msg.hasHello) { + val hello = msg.getHello + controller ! ClientConnected(hello.getName, hello.getHost, hello.getPort) + goto(Ready) + } else { + EventHandler.warning(this, "client " + getAddrString(channel) + " sent no Hello in first message, disconnecting") + channel.close() + stop() + } + case Ev(StateTimeout) ⇒ + EventHandler.info(this, "closing channel to " + getAddrString(channel) + " because of Hello timeout") + channel.close() + stop() + } + + when(Ready) { + case Ev(msg: Wrapper) ⇒ + if (msg.hasBarrier) { + val barrier = msg.getBarrier + controller ! EnterBarrier(barrier.getName) + } else { + EventHandler.warning(this, "client " + getAddrString(channel) + " sent unsupported message " + msg) + } + stay + case Ev(Send(msg)) ⇒ + channel.write(msg) + stay + case Ev(EnterBarrier(name)) ⇒ + val barrier = TestConductorProtocol.EnterBarrier.newBuilder.setName(name).build + channel.write(Wrapper.newBuilder.setBarrier(barrier).build) + stay + } + + initialize +} + +object Controller { + case class ClientConnected(name: String, host: String, port: Int) + case class ClientDisconnected(name: String) + case object GetNodes + + case class NodeInfo(name: String, host: String, port: Int, fsm: ActorRef) +} + +class Controller extends Actor { + import Controller._ + + val host = System.getProperty("akka.testconductor.host", "localhost") + val port = Integer.getInteger("akka.testconductor.port", 4545) + val connection = RemoteConnection(Server, host, port, new ConductorHandler(self)) + + val barrier = Actor.actorOf[BarrierCoordinator] + var nodes = Map[String, NodeInfo]() + + override def receive = Actor.loggable(this) { + case ClientConnected(name, host, port) ⇒ + self.channel match { + case ref: ActorRef ⇒ nodes += name -> NodeInfo(name, host, port, ref) + } + barrier forward ClientConnected + case ClientConnected ⇒ + barrier forward ClientConnected + case ClientDisconnected(name) ⇒ + nodes -= name + barrier forward ClientDisconnected + case e @ EnterBarrier(name) ⇒ + barrier forward e + case Throttle(node, target, direction, rateMBit) ⇒ + val t = nodes(target) + val throttle = + InjectFailure.newBuilder + .setFailure(FailType.Throttle) + .setDirection(TestConductorProtocol.Direction.valueOf(direction.toString)) + .setHost(t.host) + .setPort(t.port) + .setRateMBit(rateMBit) + .build + nodes(node).fsm ! ServerFSM.Send(Wrapper.newBuilder.setFailure(throttle).build) + case Disconnect(node, target, abort) ⇒ + val t = nodes(target) + val disconnect = + InjectFailure.newBuilder + .setFailure(if (abort) FailType.Abort else FailType.Disconnect) + .setHost(t.host) + .setPort(t.port) + .build + nodes(node).fsm ! ServerFSM.Send(Wrapper.newBuilder.setFailure(disconnect).build) + case Terminate(node, exitValueOrKill) ⇒ + if (exitValueOrKill < 0) { + // TODO: kill via SBT + } else { + val shutdown = InjectFailure.newBuilder.setFailure(FailType.Shutdown).setExitValue(exitValueOrKill).build + nodes(node).fsm ! ServerFSM.Send(Wrapper.newBuilder.setFailure(shutdown).build) + } + // TODO: properly remove node from BarrierCoordinator + // case Remove(node) => + // nodes -= node + case GetNodes ⇒ self reply nodes.keys + } +} + +object BarrierCoordinator { + sealed trait State + case object Idle extends State + case object Waiting extends State + + case class Data(clients: Int, barrier: String, arrived: List[UntypedChannel]) + class BarrierTimeoutException(msg: String) extends RuntimeException(msg) with NoStackTrace +} + +class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, BarrierCoordinator.Data] { + import BarrierCoordinator._ + import akka.actor.FSM._ + import Controller._ + + startWith(Idle, Data(0, "", Nil)) + + when(Idle) { + case Event(EnterBarrier(name), Data(num, _, _)) ⇒ + if (num == 0) throw new IllegalStateException("no client expected yet") + goto(Waiting) using Data(num, name, self.channel :: Nil) + case Event(ClientConnected, d @ Data(num, _, _)) ⇒ + stay using d.copy(clients = num + 1) + case Event(ClientDisconnected, d @ Data(num, _, _)) ⇒ + if (num == 0) throw new IllegalStateException("no client to disconnect") + stay using d.copy(clients = num - 1) + } + + onTransition { + case Idle -> Waiting ⇒ setTimer("Timeout", StateTimeout, 30 seconds, false) + case Waiting -> Idle ⇒ cancelTimer("Timeout") + } + + when(Waiting) { + case Event(e @ EnterBarrier(name), d @ Data(num, barrier, arrived)) ⇒ + if (name != barrier) throw new IllegalStateException("trying enter barrier '" + name + "' while barrier '" + barrier + "' is active") + val together = self.channel :: arrived + if (together.size == num) { + together foreach (_ ! e) + goto(Idle) using Data(num, "", Nil) + } else { + stay using d.copy(arrived = together) + } + case Event(ClientConnected, d @ Data(num, _, _)) ⇒ + stay using d.copy(clients = num + 1) + case Event(ClientDisconnected, d @ Data(num, barrier, arrived)) ⇒ + val expected = num - 1 + if (arrived.size == expected) { + val e = EnterBarrier(barrier) + self.channel :: arrived foreach (_ ! e) + goto(Idle) using Data(expected, "", Nil) + } else { + stay using d.copy(clients = expected) + } + case Event(StateTimeout, Data(num, barrier, arrived)) ⇒ + throw new BarrierTimeoutException("only " + arrived.size + " of " + num + " arrived at barrier " + barrier) + } + + initialize +} + diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala new file mode 100644 index 0000000000..2b54ea1018 --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -0,0 +1,13 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ +package akka.remote.testconductor + +sealed trait ClientOp +sealed trait ServerOp + +case class EnterBarrier(name: String) extends ClientOp with ServerOp +case class Throttle(node: String, target: String, direction: Direction, rateMBit: Float) extends ServerOp +case class Disconnect(node: String, target: String, abort: Boolean) extends ServerOp +case class Terminate(node: String, exitValueOrKill: Int) extends ServerOp +case class Remove(node: String) extends ServerOp diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala new file mode 100644 index 0000000000..399b58337b --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala @@ -0,0 +1,74 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ +package akka.remote.testconductor + +trait BarrierSync { + /** + * Enter all given barriers in the order in which they were given. + */ + def enter(name: String*): Unit +} + +sealed trait Direction +case object Send extends Direction +case object Receive extends Direction +case object Both extends Direction + +trait FailureInject { + + /** + * Make the remoting pipeline on the node throttle data sent to or received + * from the given remote peer. + */ + def throttle(node: String, target: String, direction: Direction, rateMBit: Float): Unit + + /** + * Switch the Netty pipeline of the remote support into blackhole mode for + * sending and/or receiving: it will just drop all messages right before + * submitting them to the Socket or right after receiving them from the + * Socket. + */ + def blackhole(node: String, target: String, direction: Direction): Unit + + /** + * Tell the remote support to shutdown the connection to the given remote + * peer. It works regardless of whether the recipient was initiator or + * responder. + */ + def disconnect(node: String, target: String): Unit + + /** + * Tell the remote support to TCP_RESET the connection to the given remote + * peer. It works regardless of whether the recipient was initiator or + * responder. + */ + def abort(node: String, target: String): Unit + +} + +trait RunControl { + + /** + * Tell the remote node to shut itself down using System.exit with the given + * exitValue. + */ + def shutdown(node: String, exitValue: Int): Unit + + /** + * Tell the SBT plugin to forcibly terminate the given remote node using Process.destroy. + */ + def kill(node: String): Unit + + /** + * Obtain the list of remote host names currently registered. + */ + def getNodes: List[String] + + /** + * Remove a remote host from the list, so that the remaining nodes may still + * pass subsequent barriers. + */ + def removeNode(node: String): Unit + +} diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala new file mode 100644 index 0000000000..eec6a2cbf1 --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -0,0 +1,161 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ +package akka.remote.testconductor + +import java.net.InetSocketAddress + +import scala.collection.immutable.Queue + +import org.jboss.netty.buffer.ChannelBuffer +import org.jboss.netty.channel.ChannelState.BOUND +import org.jboss.netty.channel.ChannelState.OPEN +import org.jboss.netty.channel.Channel +import org.jboss.netty.channel.ChannelDownstreamHandler +import org.jboss.netty.channel.ChannelEvent +import org.jboss.netty.channel.ChannelHandlerContext +import org.jboss.netty.channel.ChannelStateEvent +import org.jboss.netty.channel.ChannelUpstreamHandler +import org.jboss.netty.channel.MessageEvent + +import akka.actor.FSM +import akka.actor.Actor +import akka.util.duration.doubleToDurationDouble +import akka.util.Index +import akka.util.RemoteAddress + +object NetworkFailureInjector { + + val channels = new Index[RemoteAddress, Channel]() + + def close(remote: RemoteAddress): Unit = { + val set = channels.remove(remote) + // channels will be cleaned up by the handler + set foreach (_.close()) + } +} + +class NetworkFailureInjector extends ChannelUpstreamHandler with ChannelDownstreamHandler { + + import NetworkFailureInjector._ + + // local cache of remote address + private var remote: Option[RemoteAddress] = None + + // everything goes via these Throttle actors to enable easy steering + private val sender = Actor.actorOf(new Throttle(_.sendDownstream(_))) + private val receiver = Actor.actorOf(new Throttle(_.sendUpstream(_))) + + /* + * State, Data and Messages for the internal Throttle actor + */ + sealed private trait State + private case object PassThrough extends State + private case object Throttle extends State + private case object Blackhole extends State + + private case class Data(ctx: ChannelHandlerContext, rateMBit: Float, queue: Queue[MessageEvent]) + + private case class SetRate(rateMBit: Float) + private case class Send(ctx: ChannelHandlerContext, msg: MessageEvent) + private case object Tick + + private class Throttle(send: (ChannelHandlerContext, MessageEvent) ⇒ Unit) extends Actor with FSM[State, Data] { + import FSM._ + + startWith(PassThrough, Data(null, -1, Queue())) + + when(PassThrough) { + case Event(Send(ctx, msg), d) ⇒ + send(ctx, msg) + stay + } + + when(Throttle) { + case Event(Send(ctx, msg), d) ⇒ + if (!timerActive_?("send")) { + setTimer("send", Tick, (size(msg) / d.rateMBit) microseconds, false) + } + stay using d.copy(ctx = ctx, queue = d.queue.enqueue(msg)) + case Event(Tick, d) ⇒ + val (msg, queue) = d.queue.dequeue + send(d.ctx, msg) + if (queue.nonEmpty) setTimer("send", Tick, (size(queue.head) / d.rateMBit) microseconds, false) + stay using d.copy(queue = queue) + } + + onTransition { + case Throttle -> PassThrough ⇒ + stateData.queue foreach (send(stateData.ctx, _)) + cancelTimer("send") + case Throttle -> Blackhole ⇒ + cancelTimer("send") + } + + when(Blackhole) { + case Event(Send(_, _), _) ⇒ + stay + } + + whenUnhandled { + case Event(SetRate(rate), d) ⇒ + if (rate > 0) { + goto(Throttle) using d.copy(rateMBit = rate, queue = Queue()) + } else if (rate == 0) { + goto(Blackhole) + } else { + goto(PassThrough) + } + } + + initialize + + private def size(msg: MessageEvent) = msg.getMessage() match { + case b: ChannelBuffer ⇒ b.readableBytes() * 8 + case _ ⇒ throw new UnsupportedOperationException("NetworkFailureInjector only supports ChannelBuffer messages") + } + } + + def throttleSend(rateMBit: Float) { + sender ! SetRate(rateMBit) + } + + def throttleReceive(rateMBit: Float) { + receiver ! SetRate(rateMBit) + } + + override def handleUpstream(ctx: ChannelHandlerContext, evt: ChannelEvent) { + evt match { + case msg: MessageEvent ⇒ + receiver ! Send(ctx, msg) + case state: ChannelStateEvent ⇒ + state.getState match { + case BOUND ⇒ + state.getValue match { + case null ⇒ + remote = remote flatMap { a ⇒ channels.remove(a, state.getChannel); None } + case a: InetSocketAddress ⇒ + val addr = RemoteAddress(a) + channels.put(addr, state.getChannel) + remote = Some(addr) + } + case OPEN if state.getValue == false ⇒ + remote = remote flatMap { a ⇒ channels.remove(a, state.getChannel); None } + } + ctx.sendUpstream(evt) + case _ ⇒ + ctx.sendUpstream(evt) + } + } + + override def handleDownstream(ctx: ChannelHandlerContext, evt: ChannelEvent) { + evt match { + case msg: MessageEvent ⇒ + sender ! Send(ctx, msg) + case _ ⇒ + ctx.sendUpstream(evt) + } + } + +} + diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala new file mode 100644 index 0000000000..16abe5bb27 --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala @@ -0,0 +1,133 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ +package akka.remote.testconductor + +import akka.actor.{ Actor, ActorRef, LoggingFSM, Timeout, UntypedChannel } +import akka.event.EventHandler +import RemoteConnection.getAddrString +import akka.util.duration._ +import TestConductorProtocol._ +import akka.NoStackTrace +import org.jboss.netty.channel.{ Channel, SimpleChannelUpstreamHandler, ChannelHandlerContext, ChannelStateEvent, MessageEvent } +import com.eaio.uuid.UUID + +object Player extends BarrierSync { + + private val server = Actor.actorOf[ClientFSM] + + override def enter(name: String*) { + EventHandler.debug(this, "entering barriers " + name.mkString("(", ", ", ")")) + implicit val timeout = Timeout(30 seconds) + name foreach { b ⇒ + (server ? EnterBarrier(b)).get + EventHandler.debug(this, "passed barrier " + b) + } + } +} + +object ClientFSM { + sealed trait State + case object Connecting extends State + case object Connected extends State + + case class Data(channel: Channel, msg: Either[List[ClientOp], (String, UntypedChannel)]) + + class ConnectionFailure(msg: String) extends RuntimeException(msg) with NoStackTrace + case object Disconnected +} + +class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { + import ClientFSM._ + import akka.actor.FSM._ + + val name = System.getProperty("akka.testconductor.name", (new UUID).toString) + val host = System.getProperty("akka.testconductor.host", "localhost") + val port = Integer.getInteger("akka.testconductor.port", 4545) + val handler = new PlayerHandler(self) + + val myself = Actor.remote.address + + startWith(Connecting, Data(RemoteConnection(Client, host, port, handler), Left(Nil))) + + when(Connecting, stateTimeout = 10 seconds) { + case Event(msg: ClientOp, Data(channel, Left(msgs))) ⇒ + stay using Data(channel, Left(msg :: msgs)) + case Event(Connected, Data(channel, Left(msgs))) ⇒ + val hello = Hello.newBuilder.setName(name).setHost(myself.getAddress.getHostAddress).setPort(myself.getPort).build + channel.write(Wrapper.newBuilder.setHello(hello).build) + msgs.reverse foreach sendMsg(channel) + goto(Connected) using Data(channel, Left(Nil)) + case Event(_: ConnectionFailure, _) ⇒ + // System.exit(1) + stop + case Event(StateTimeout, _) ⇒ + EventHandler.error(this, "connect timeout to TestConductor") + // System.exit(1) + stop + } + + when(Connected) { + case Event(Disconnected, _) ⇒ + EventHandler.info(this, "disconnected from TestConductor") + throw new ConnectionFailure("disconnect") + case Event(msg: EnterBarrier, Data(channel, _)) ⇒ + sendMsg(channel)(msg) + stay using Data(channel, Right((msg.name, self.channel))) + case Event(msg: Wrapper, Data(channel, Right((barrier, sender)))) if msg.getAllFields.size == 1 ⇒ + if (msg.hasBarrier) { + val b = msg.getBarrier.getName + if (b != barrier) { + sender.sendException(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) + } else { + sender ! b + } + } + stay using Data(channel, Left(Nil)) + } + + onTermination { + case StopEvent(_, _, Data(channel, _)) ⇒ + channel.close() + } + + private def sendMsg(channel: Channel)(msg: ClientOp) { + msg match { + case EnterBarrier(name) ⇒ + val enter = TestConductorProtocol.EnterBarrier.newBuilder.setName(name).build + channel.write(Wrapper.newBuilder.setBarrier(enter).build) + } + } + +} + +class PlayerHandler(fsm: ActorRef) extends SimpleChannelUpstreamHandler { + + import ClientFSM._ + + override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { + val channel = event.getChannel + EventHandler.debug(this, "connected to " + getAddrString(channel)) + while (!fsm.isRunning) Thread.sleep(100) + fsm ! Connected + } + + override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { + val channel = event.getChannel + EventHandler.debug(this, "disconnected from " + getAddrString(channel)) + fsm.stop() + } + + override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) = { + val channel = event.getChannel + EventHandler.debug(this, "message from " + getAddrString(channel) + ": " + event.getMessage) + event.getMessage match { + case msg: Wrapper if msg.getAllFields.size == 1 ⇒ + fsm ! msg + case msg ⇒ + EventHandler.info(this, "server " + getAddrString(channel) + " sent garbage '" + msg + "', disconnecting") + channel.close() + } + } +} + diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala b/akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala new file mode 100644 index 0000000000..a92b6295e2 --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala @@ -0,0 +1,52 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ +package akka.remote.testconductor + +import org.jboss.netty.channel.{ Channel, ChannelPipeline, ChannelPipelineFactory, ChannelUpstreamHandler, SimpleChannelUpstreamHandler, StaticChannelPipeline } +import org.jboss.netty.channel.socket.nio.{ NioClientSocketChannelFactory, NioServerSocketChannelFactory } +import org.jboss.netty.bootstrap.{ ClientBootstrap, ServerBootstrap } +import org.jboss.netty.handler.codec.frame.{ LengthFieldBasedFrameDecoder, LengthFieldPrepender } +import org.jboss.netty.handler.codec.compression.{ ZlibDecoder, ZlibEncoder } +import org.jboss.netty.handler.codec.protobuf.{ ProtobufDecoder, ProtobufEncoder } +import org.jboss.netty.handler.timeout.{ ReadTimeoutHandler, ReadTimeoutException } +import java.net.InetSocketAddress +import java.util.concurrent.Executors + +class TestConductorPipelineFactory(handler: ChannelUpstreamHandler) extends ChannelPipelineFactory { + def getPipeline: ChannelPipeline = { + val encap = List(new LengthFieldPrepender(4), new LengthFieldBasedFrameDecoder(10000, 0, 4, 0, 4)) + val proto = List(new ProtobufEncoder, new ProtobufDecoder(TestConductorProtocol.Wrapper.getDefaultInstance)) + new StaticChannelPipeline(encap ::: proto ::: handler :: Nil: _*) + } +} + +sealed trait Role +case object Client extends Role +case object Server extends Role + +object RemoteConnection { + def apply(role: Role, host: String, port: Int, handler: ChannelUpstreamHandler): Channel = { + val sockaddr = new InetSocketAddress(host, port) + role match { + case Client ⇒ + val socketfactory = new NioClientSocketChannelFactory(Executors.newCachedThreadPool, Executors.newCachedThreadPool) + val bootstrap = new ClientBootstrap(socketfactory) + bootstrap.setPipelineFactory(new TestConductorPipelineFactory(handler)) + bootstrap.setOption("tcpNoDelay", true) + bootstrap.connect(sockaddr).getChannel + case Server ⇒ + val socketfactory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool, Executors.newCachedThreadPool) + val bootstrap = new ServerBootstrap(socketfactory) + bootstrap.setPipelineFactory(new TestConductorPipelineFactory(handler)) + bootstrap.setOption("reuseAddress", true) + bootstrap.setOption("child.tcpNoDelay", true) + bootstrap.bind(sockaddr) + } + } + + def getAddrString(channel: Channel) = channel.getRemoteAddress match { + case i: InetSocketAddress ⇒ i.toString + case _ ⇒ "[unknown]" + } +} From 6c786d20b808b4b5989076223fcc7da0d27e71f9 Mon Sep 17 00:00:00 2001 From: Roland Date: Wed, 2 May 2012 21:56:26 +0200 Subject: [PATCH 002/106] porting to 2.0, making it compile: UNTESTED! --- .../remote/netty/NettyRemoteSupport.scala | 24 ++++- .../main/scala/akka/remote/netty/Server.scala | 4 +- .../akka/remote/testconductor/Conductor.scala | 93 ++++++++++++------- .../NetworkFailureInjector.scala | 24 +++-- .../akka/remote/testconductor/Player.scala | 72 ++++++++------ 5 files changed, 135 insertions(+), 82 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 8acd33c7fb..55e2d95636 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -31,9 +31,11 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor val settings = new NettySettings(remoteSettings.config.getConfig("akka.remote.netty"), remoteSettings.systemName) + // TODO replace by system.scheduler val timer: HashedWheelTimer = new HashedWheelTimer(system.threadFactory) - val executor = new OrderedMemoryAwareThreadPoolExecutor( + // TODO make configurable + lazy val executor = new OrderedMemoryAwareThreadPoolExecutor( settings.ExecutionPoolSize, settings.MaxChannelMemorySize, settings.MaxTotalMemorySize, @@ -41,6 +43,7 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor settings.ExecutionPoolKeepalive.unit, system.threadFactory) + // TODO make configurable/shareable with server socket factory val clientChannelFactory = new NioClientSocketChannelFactory( Executors.newCachedThreadPool(system.threadFactory), Executors.newCachedThreadPool(system.threadFactory)) @@ -50,9 +53,20 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor override protected def useUntrustedMode = remoteSettings.UntrustedMode - val server = try new NettyRemoteServer(this) catch { - case ex ⇒ shutdown(); throw ex - } + val server: NettyRemoteServer = try createServer() catch { case NonFatal(ex) ⇒ shutdown(); throw ex } + + /** + * Override this method to inject a subclass of NettyRemoteServer instead of + * the normal one, e.g. for altering the pipeline. + */ + protected def createServer(): NettyRemoteServer = new NettyRemoteServer(this) + + /** + * Override this method to inject a subclass of RemoteClient instead of + * the normal one, e.g. for altering the pipeline. Get this transport’s + * address from `this.address`. + */ + protected def createClient(recipient: Address): RemoteClient = new ActiveRemoteClient(this, recipient, address) // the address is set in start() or from the RemoteServerHandler, whichever comes first private val _address = new AtomicReference[Address] @@ -121,7 +135,7 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor //Recheck for addition, race between upgrades case Some(client) ⇒ client //If already populated by other writer case None ⇒ //Populate map - val client = new ActiveRemoteClient(this, recipientAddress, address) + val client = createClient(recipientAddress) client.connect() remoteClients += recipientAddress -> client client diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index 1f18b27c8c..97d3f194f3 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -37,13 +37,15 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { private val bootstrap = { val b = new ServerBootstrap(factory) - b.setPipelineFactory(new RemoteServerPipelineFactory(openChannels, executionHandler, netty)) + b.setPipelineFactory(makePipeline()) b.setOption("backlog", settings.Backlog) b.setOption("tcpNoDelay", true) b.setOption("child.keepAlive", true) b.setOption("reuseAddress", true) b } + + protected def makePipeline(): ChannelPipelineFactory = new RemoteServerPipelineFactory(openChannels, executionHandler, netty) @volatile private[akka] var channel: Channel = _ diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala index 58a6a5f88e..3265fc8808 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -3,24 +3,41 @@ */ package akka.remote.testconductor -import akka.actor.{ Actor, ActorRef, LoggingFSM, Timeout, UntypedChannel } -import akka.event.EventHandler +import akka.actor.{ Actor, ActorRef, ActorSystem, LoggingFSM, Props } import RemoteConnection.getAddrString -import akka.util.duration._ import TestConductorProtocol._ -import akka.NoStackTrace import org.jboss.netty.channel.{ Channel, SimpleChannelUpstreamHandler, ChannelHandlerContext, ChannelStateEvent, MessageEvent } +import com.typesafe.config.ConfigFactory +import akka.util.Timeout +import akka.util.Duration +import akka.util.duration._ +import akka.pattern.ask +import java.util.concurrent.TimeUnit.MILLISECONDS +import akka.dispatch.Await +import akka.event.LoggingAdapter +import akka.actor.PoisonPill +import akka.event.Logging +import scala.util.control.NoStackTrace object Conductor extends RunControl with FailureInject with BarrierSync { + val system = ActorSystem("conductor", ConfigFactory.load().getConfig("conductor")) + + object Settings { + val config = system.settings.config + + implicit val BarrierTimeout = Timeout(Duration(config.getMilliseconds("barrier-timeout"), MILLISECONDS)) + implicit val QueryTimeout = Timeout(Duration(config.getMilliseconds("query-timeout"), MILLISECONDS)) + } + import Controller._ - private val controller = Actor.actorOf[Controller] + private val controller = system.actorOf(Props[Controller], "controller") controller ! ClientConnected override def enter(name: String*) { - implicit val timeout = Timeout(30 seconds) - name foreach (b ⇒ (controller ? EnterBarrier(b)).get) + import Settings.BarrierTimeout + name foreach (b ⇒ Await.result(controller ? EnterBarrier(b), Duration.Inf)) } override def throttle(node: String, target: String, direction: Direction, rateMBit: Float) { @@ -47,7 +64,10 @@ object Conductor extends RunControl with FailureInject with BarrierSync { controller ! Terminate(node, -1) } - override def getNodes = (controller ? GetNodes).as[List[String]].get + override def getNodes = { + import Settings.QueryTimeout + Await.result(controller ? GetNodes mapTo manifest[List[String]], Duration.Inf) + } override def removeNode(node: String) { controller ! Remove(node) @@ -55,33 +75,33 @@ object Conductor extends RunControl with FailureInject with BarrierSync { } -class ConductorHandler(controller: ActorRef) extends SimpleChannelUpstreamHandler { +class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { var clients = Map[Channel, ActorRef]() override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { val channel = event.getChannel - EventHandler.debug(this, "connection from " + getAddrString(channel)) - val fsm = Actor.actorOf(new ServerFSM(controller, channel)) + log.debug("connection from {}", getAddrString(channel)) + val fsm = system.actorOf(Props(new ServerFSM(controller, channel))) clients += channel -> fsm } override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { val channel = event.getChannel - EventHandler.debug(this, "disconnect from " + getAddrString(channel)) + log.debug("disconnect from {}", getAddrString(channel)) val fsm = clients(channel) - fsm.stop() + fsm ! PoisonPill clients -= channel } override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) = { val channel = event.getChannel - EventHandler.debug(this, "message from " + getAddrString(channel) + ": " + event.getMessage) + log.debug("message from {}: {}", getAddrString(channel), event.getMessage) event.getMessage match { case msg: Wrapper if msg.getAllFields.size == 1 ⇒ clients(channel) ! msg case msg ⇒ - EventHandler.info(this, "client " + getAddrString(channel) + " sent garbage '" + msg + "', disconnecting") + log.info("client {} sent garbage '{}', disconnecting", getAddrString(channel), msg) channel.close() } } @@ -104,35 +124,35 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi startWith(Initial, null) when(Initial, stateTimeout = 10 seconds) { - case Ev(msg: Wrapper) ⇒ + case Event(msg: Wrapper, _) ⇒ if (msg.hasHello) { val hello = msg.getHello controller ! ClientConnected(hello.getName, hello.getHost, hello.getPort) goto(Ready) } else { - EventHandler.warning(this, "client " + getAddrString(channel) + " sent no Hello in first message, disconnecting") + log.warning("client {} sent no Hello in first message, disconnecting", getAddrString(channel)) channel.close() stop() } - case Ev(StateTimeout) ⇒ - EventHandler.info(this, "closing channel to " + getAddrString(channel) + " because of Hello timeout") + case Event(StateTimeout, _) ⇒ + log.info("closing channel to {} because of Hello timeout", getAddrString(channel)) channel.close() stop() } when(Ready) { - case Ev(msg: Wrapper) ⇒ + case Event(msg: Wrapper, _) ⇒ if (msg.hasBarrier) { val barrier = msg.getBarrier controller ! EnterBarrier(barrier.getName) } else { - EventHandler.warning(this, "client " + getAddrString(channel) + " sent unsupported message " + msg) + log.warning("client {} sent unsupported message {}", getAddrString(channel), msg) } stay - case Ev(Send(msg)) ⇒ + case Event(Send(msg), _) ⇒ channel.write(msg) stay - case Ev(EnterBarrier(name)) ⇒ + case Event(EnterBarrier(name), _) ⇒ val barrier = TestConductorProtocol.EnterBarrier.newBuilder.setName(name).build channel.write(Wrapper.newBuilder.setBarrier(barrier).build) stay @@ -152,18 +172,19 @@ object Controller { class Controller extends Actor { import Controller._ - val host = System.getProperty("akka.testconductor.host", "localhost") - val port = Integer.getInteger("akka.testconductor.port", 4545) - val connection = RemoteConnection(Server, host, port, new ConductorHandler(self)) + val config = context.system.settings.config - val barrier = Actor.actorOf[BarrierCoordinator] + val host = config.getString("akka.testconductor.host") + val port = config.getInt("akka.testconductor.port") + val connection = RemoteConnection(Server, host, port, + new ConductorHandler(context.system, self, Logging(context.system, "ConductorHandler"))) + + val barrier = context.actorOf(Props[BarrierCoordinator], "barriers") var nodes = Map[String, NodeInfo]() - override def receive = Actor.loggable(this) { + override def receive = { case ClientConnected(name, host, port) ⇒ - self.channel match { - case ref: ActorRef ⇒ nodes += name -> NodeInfo(name, host, port, ref) - } + nodes += name -> NodeInfo(name, host, port, sender) barrier forward ClientConnected case ClientConnected ⇒ barrier forward ClientConnected @@ -202,7 +223,7 @@ class Controller extends Actor { // TODO: properly remove node from BarrierCoordinator // case Remove(node) => // nodes -= node - case GetNodes ⇒ self reply nodes.keys + case GetNodes ⇒ sender ! nodes.keys } } @@ -211,7 +232,7 @@ object BarrierCoordinator { case object Idle extends State case object Waiting extends State - case class Data(clients: Int, barrier: String, arrived: List[UntypedChannel]) + case class Data(clients: Int, barrier: String, arrived: List[ActorRef]) class BarrierTimeoutException(msg: String) extends RuntimeException(msg) with NoStackTrace } @@ -225,7 +246,7 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, when(Idle) { case Event(EnterBarrier(name), Data(num, _, _)) ⇒ if (num == 0) throw new IllegalStateException("no client expected yet") - goto(Waiting) using Data(num, name, self.channel :: Nil) + goto(Waiting) using Data(num, name, sender :: Nil) case Event(ClientConnected, d @ Data(num, _, _)) ⇒ stay using d.copy(clients = num + 1) case Event(ClientDisconnected, d @ Data(num, _, _)) ⇒ @@ -241,7 +262,7 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, when(Waiting) { case Event(e @ EnterBarrier(name), d @ Data(num, barrier, arrived)) ⇒ if (name != barrier) throw new IllegalStateException("trying enter barrier '" + name + "' while barrier '" + barrier + "' is active") - val together = self.channel :: arrived + val together = sender :: arrived if (together.size == num) { together foreach (_ ! e) goto(Idle) using Data(num, "", Nil) @@ -254,7 +275,7 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, val expected = num - 1 if (arrived.size == expected) { val e = EnterBarrier(barrier) - self.channel :: arrived foreach (_ ! e) + sender :: arrived foreach (_ ! e) goto(Idle) using Data(expected, "", Nil) } else { stay using d.copy(clients = expected) diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index eec6a2cbf1..88102b5e86 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -4,9 +4,7 @@ package akka.remote.testconductor import java.net.InetSocketAddress - import scala.collection.immutable.Queue - import org.jboss.netty.buffer.ChannelBuffer import org.jboss.netty.channel.ChannelState.BOUND import org.jboss.netty.channel.ChannelState.OPEN @@ -17,34 +15,34 @@ import org.jboss.netty.channel.ChannelHandlerContext import org.jboss.netty.channel.ChannelStateEvent import org.jboss.netty.channel.ChannelUpstreamHandler import org.jboss.netty.channel.MessageEvent - import akka.actor.FSM import akka.actor.Actor import akka.util.duration.doubleToDurationDouble import akka.util.Index -import akka.util.RemoteAddress +import akka.actor.Address +import akka.actor.ActorSystem +import akka.actor.Props object NetworkFailureInjector { - val channels = new Index[RemoteAddress, Channel]() + val channels = new Index[Address, Channel](16, (c1, c2) => c1 compareTo c2) - def close(remote: RemoteAddress): Unit = { - val set = channels.remove(remote) + def close(remote: Address): Unit = { // channels will be cleaned up by the handler - set foreach (_.close()) + for (chs <- channels.remove(remote); c <- chs) c.close() } } -class NetworkFailureInjector extends ChannelUpstreamHandler with ChannelDownstreamHandler { +class NetworkFailureInjector(system: ActorSystem) extends ChannelUpstreamHandler with ChannelDownstreamHandler { import NetworkFailureInjector._ // local cache of remote address - private var remote: Option[RemoteAddress] = None + private var remote: Option[Address] = None // everything goes via these Throttle actors to enable easy steering - private val sender = Actor.actorOf(new Throttle(_.sendDownstream(_))) - private val receiver = Actor.actorOf(new Throttle(_.sendUpstream(_))) + private val sender = system.actorOf(Props(new Throttle(_.sendDownstream(_)))) + private val receiver = system.actorOf(Props(new Throttle(_.sendUpstream(_)))) /* * State, Data and Messages for the internal Throttle actor @@ -135,7 +133,7 @@ class NetworkFailureInjector extends ChannelUpstreamHandler with ChannelDownstre case null ⇒ remote = remote flatMap { a ⇒ channels.remove(a, state.getChannel); None } case a: InetSocketAddress ⇒ - val addr = RemoteAddress(a) + val addr = Address("akka", "XXX", a.getHostName, a.getPort) channels.put(addr, state.getChannel) remote = Some(addr) } diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala index 16abe5bb27..029045394c 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala @@ -3,25 +3,42 @@ */ package akka.remote.testconductor -import akka.actor.{ Actor, ActorRef, LoggingFSM, Timeout, UntypedChannel } -import akka.event.EventHandler +import akka.actor.{ Actor, ActorRef, ActorSystem, LoggingFSM, Props } import RemoteConnection.getAddrString import akka.util.duration._ import TestConductorProtocol._ -import akka.NoStackTrace import org.jboss.netty.channel.{ Channel, SimpleChannelUpstreamHandler, ChannelHandlerContext, ChannelStateEvent, MessageEvent } import com.eaio.uuid.UUID +import com.typesafe.config.ConfigFactory +import akka.util.Timeout +import akka.util.Duration +import java.util.concurrent.TimeUnit.MILLISECONDS +import akka.pattern.ask +import akka.dispatch.Await +import scala.util.control.NoStackTrace +import akka.actor.Status +import akka.event.LoggingAdapter +import akka.actor.PoisonPill +import akka.event.Logging object Player extends BarrierSync { - private val server = Actor.actorOf[ClientFSM] + val system = ActorSystem("Player", ConfigFactory.load().getConfig("player")) + + object Settings { + val config = system.settings.config + + implicit val BarrierTimeout = Timeout(Duration(config.getMilliseconds("barrier-timeout"), MILLISECONDS)) + } + + private val server = system.actorOf(Props[ClientFSM], "client") override def enter(name: String*) { - EventHandler.debug(this, "entering barriers " + name.mkString("(", ", ", ")")) - implicit val timeout = Timeout(30 seconds) + system.log.debug("entering barriers " + name.mkString("(", ", ", ")")) name foreach { b ⇒ - (server ? EnterBarrier(b)).get - EventHandler.debug(this, "passed barrier " + b) + import Settings.BarrierTimeout + Await.result(server ? EnterBarrier(b), Duration.Inf) + system.log.debug("passed barrier {}", b) } } } @@ -31,7 +48,7 @@ object ClientFSM { case object Connecting extends State case object Connected extends State - case class Data(channel: Channel, msg: Either[List[ClientOp], (String, UntypedChannel)]) + case class Data(channel: Channel, msg: Either[List[ClientOp], (String, ActorRef)]) class ConnectionFailure(msg: String) extends RuntimeException(msg) with NoStackTrace case object Disconnected @@ -39,14 +56,16 @@ object ClientFSM { class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { import ClientFSM._ - import akka.actor.FSM._ - val name = System.getProperty("akka.testconductor.name", (new UUID).toString) - val host = System.getProperty("akka.testconductor.host", "localhost") - val port = Integer.getInteger("akka.testconductor.port", 4545) - val handler = new PlayerHandler(self) + val config = context.system.settings.config - val myself = Actor.remote.address + val name = config.getString("akka.testconductor.name") + val host = config.getString("akka.testconductor.host") + val port = config.getInt("akka.testconductor.port") + val handler = new PlayerHandler(self, Logging(context.system, "PlayerHandler")) + + val myself = "XXX" + val myport = 12345 startWith(Connecting, Data(RemoteConnection(Client, host, port, handler), Left(Nil))) @@ -54,7 +73,7 @@ class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { case Event(msg: ClientOp, Data(channel, Left(msgs))) ⇒ stay using Data(channel, Left(msg :: msgs)) case Event(Connected, Data(channel, Left(msgs))) ⇒ - val hello = Hello.newBuilder.setName(name).setHost(myself.getAddress.getHostAddress).setPort(myself.getPort).build + val hello = Hello.newBuilder.setName(name).setHost(myself).setPort(myport).build channel.write(Wrapper.newBuilder.setHello(hello).build) msgs.reverse foreach sendMsg(channel) goto(Connected) using Data(channel, Left(Nil)) @@ -62,23 +81,23 @@ class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { // System.exit(1) stop case Event(StateTimeout, _) ⇒ - EventHandler.error(this, "connect timeout to TestConductor") + log.error("connect timeout to TestConductor") // System.exit(1) stop } when(Connected) { case Event(Disconnected, _) ⇒ - EventHandler.info(this, "disconnected from TestConductor") + log.info("disconnected from TestConductor") throw new ConnectionFailure("disconnect") case Event(msg: EnterBarrier, Data(channel, _)) ⇒ sendMsg(channel)(msg) - stay using Data(channel, Right((msg.name, self.channel))) + stay using Data(channel, Right((msg.name, sender))) case Event(msg: Wrapper, Data(channel, Right((barrier, sender)))) if msg.getAllFields.size == 1 ⇒ if (msg.hasBarrier) { val b = msg.getBarrier.getName if (b != barrier) { - sender.sendException(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) + sender ! Status.Failure(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) } else { sender ! b } @@ -101,31 +120,30 @@ class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { } -class PlayerHandler(fsm: ActorRef) extends SimpleChannelUpstreamHandler { +class PlayerHandler(fsm: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { import ClientFSM._ override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { val channel = event.getChannel - EventHandler.debug(this, "connected to " + getAddrString(channel)) - while (!fsm.isRunning) Thread.sleep(100) + log.debug("connected to {}", getAddrString(channel)) fsm ! Connected } override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { val channel = event.getChannel - EventHandler.debug(this, "disconnected from " + getAddrString(channel)) - fsm.stop() + log.debug("disconnected from {}", getAddrString(channel)) + fsm ! PoisonPill } override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) = { val channel = event.getChannel - EventHandler.debug(this, "message from " + getAddrString(channel) + ": " + event.getMessage) + log.debug("message from {}: {}", getAddrString(channel), event.getMessage) event.getMessage match { case msg: Wrapper if msg.getAllFields.size == 1 ⇒ fsm ! msg case msg ⇒ - EventHandler.info(this, "server " + getAddrString(channel) + " sent garbage '" + msg + "', disconnecting") + log.info("server {} sent garbage '{}', disconnecting", getAddrString(channel), msg) channel.close() } } From 5cf0fa66f803caffee4100507263ab87a8adf71a Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 3 May 2012 20:48:27 +0200 Subject: [PATCH 003/106] TestConductor: convert to Akka Extension and add test - make start-up synchronous and explicit for client and server - server can be queried for actual port, client requires explicit port - simple multi-jvm-test for verification of TestConductor barriers --- .../testconductor/TestConductorProtocol.java | 1230 +++++++++++++---- .../main/protocol/TestConductorProtocol.proto | 17 +- akka-remote/src/main/resources/reference.conf | 21 + .../remote/netty/NettyRemoteSupport.scala | 6 +- .../main/scala/akka/remote/netty/Server.scala | 2 +- .../akka/remote/testconductor/Conductor.scala | 63 +- .../akka/remote/testconductor/Extension.scala | 31 + .../akka/remote/testconductor/Features.scala | 10 + .../NetworkFailureInjector.scala | 4 +- .../akka/remote/testconductor/Player.scala | 68 +- .../akka/remote/testconductor/package.scala | 19 + .../testconductor/TestConductorSpec.scala | 52 + 12 files changed, 1170 insertions(+), 353 deletions(-) create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/package.scala create mode 100644 akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala diff --git a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java index e9065b53e4..0b2950018f 100644 --- a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java +++ b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java @@ -905,13 +905,10 @@ public final class TestConductorProtocol { boolean hasName(); String getName(); - // required string host = 2; - boolean hasHost(); - String getHost(); - - // required int32 port = 3; - boolean hasPort(); - int getPort(); + // required .Address address = 2; + boolean hasAddress(); + akka.remote.testconductor.TestConductorProtocol.Address getAddress(); + akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddressOrBuilder(); } public static final class Hello extends com.google.protobuf.GeneratedMessage @@ -974,52 +971,22 @@ public final class TestConductorProtocol { } } - // required string host = 2; - public static final int HOST_FIELD_NUMBER = 2; - private java.lang.Object host_; - public boolean hasHost() { + // required .Address address = 2; + public static final int ADDRESS_FIELD_NUMBER = 2; + private akka.remote.testconductor.TestConductorProtocol.Address address_; + public boolean hasAddress() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public String getHost() { - java.lang.Object ref = host_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - host_ = s; - } - return s; - } + public akka.remote.testconductor.TestConductorProtocol.Address getAddress() { + return address_; } - private com.google.protobuf.ByteString getHostBytes() { - java.lang.Object ref = host_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - host_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required int32 port = 3; - public static final int PORT_FIELD_NUMBER = 3; - private int port_; - public boolean hasPort() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public int getPort() { - return port_; + public akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddressOrBuilder() { + return address_; } private void initFields() { name_ = ""; - host_ = ""; - port_ = 0; + address_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -1030,11 +997,11 @@ public final class TestConductorProtocol { memoizedIsInitialized = 0; return false; } - if (!hasHost()) { + if (!hasAddress()) { memoizedIsInitialized = 0; return false; } - if (!hasPort()) { + if (!getAddress().isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -1049,10 +1016,7 @@ public final class TestConductorProtocol { output.writeBytes(1, getNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getHostBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeInt32(3, port_); + output.writeMessage(2, address_); } getUnknownFields().writeTo(output); } @@ -1069,11 +1033,7 @@ public final class TestConductorProtocol { } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getHostBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(3, port_); + .computeMessageSize(2, address_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -1191,6 +1151,7 @@ public final class TestConductorProtocol { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getAddressFieldBuilder(); } } private static Builder create() { @@ -1201,10 +1162,12 @@ public final class TestConductorProtocol { super.clear(); name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - host_ = ""; + if (addressBuilder_ == null) { + address_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + } else { + addressBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000002); - port_ = 0; - bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -1250,11 +1213,11 @@ public final class TestConductorProtocol { if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.host_ = host_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; + if (addressBuilder_ == null) { + result.address_ = address_; + } else { + result.address_ = addressBuilder_.build(); } - result.port_ = port_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -1274,11 +1237,8 @@ public final class TestConductorProtocol { if (other.hasName()) { setName(other.getName()); } - if (other.hasHost()) { - setHost(other.getHost()); - } - if (other.hasPort()) { - setPort(other.getPort()); + if (other.hasAddress()) { + mergeAddress(other.getAddress()); } this.mergeUnknownFields(other.getUnknownFields()); return this; @@ -1289,11 +1249,11 @@ public final class TestConductorProtocol { return false; } - if (!hasHost()) { + if (!hasAddress()) { return false; } - if (!hasPort()) { + if (!getAddress().isInitialized()) { return false; } @@ -1329,13 +1289,12 @@ public final class TestConductorProtocol { break; } case 18: { - bitField0_ |= 0x00000002; - host_ = input.readBytes(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - port_ = input.readInt32(); + akka.remote.testconductor.TestConductorProtocol.Address.Builder subBuilder = akka.remote.testconductor.TestConductorProtocol.Address.newBuilder(); + if (hasAddress()) { + subBuilder.mergeFrom(getAddress()); + } + input.readMessage(subBuilder, extensionRegistry); + setAddress(subBuilder.buildPartial()); break; } } @@ -1380,62 +1339,95 @@ public final class TestConductorProtocol { onChanged(); } - // required string host = 2; - private java.lang.Object host_ = ""; - public boolean hasHost() { + // required .Address address = 2; + private akka.remote.testconductor.TestConductorProtocol.Address address_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder> addressBuilder_; + public boolean hasAddress() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public String getHost() { - java.lang.Object ref = host_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - host_ = s; - return s; + public akka.remote.testconductor.TestConductorProtocol.Address getAddress() { + if (addressBuilder_ == null) { + return address_; } else { - return (String) ref; + return addressBuilder_.getMessage(); } } - public Builder setHost(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - host_ = value; - onChanged(); - return this; - } - public Builder clearHost() { - bitField0_ = (bitField0_ & ~0x00000002); - host_ = getDefaultInstance().getHost(); - onChanged(); - return this; - } - void setHost(com.google.protobuf.ByteString value) { + public Builder setAddress(akka.remote.testconductor.TestConductorProtocol.Address value) { + if (addressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + address_ = value; + onChanged(); + } else { + addressBuilder_.setMessage(value); + } bitField0_ |= 0x00000002; - host_ = value; - onChanged(); - } - - // required int32 port = 3; - private int port_ ; - public boolean hasPort() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public int getPort() { - return port_; - } - public Builder setPort(int value) { - bitField0_ |= 0x00000004; - port_ = value; - onChanged(); return this; } - public Builder clearPort() { - bitField0_ = (bitField0_ & ~0x00000004); - port_ = 0; - onChanged(); + public Builder setAddress( + akka.remote.testconductor.TestConductorProtocol.Address.Builder builderForValue) { + if (addressBuilder_ == null) { + address_ = builderForValue.build(); + onChanged(); + } else { + addressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; return this; } + public Builder mergeAddress(akka.remote.testconductor.TestConductorProtocol.Address value) { + if (addressBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + address_ != akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance()) { + address_ = + akka.remote.testconductor.TestConductorProtocol.Address.newBuilder(address_).mergeFrom(value).buildPartial(); + } else { + address_ = value; + } + onChanged(); + } else { + addressBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder clearAddress() { + if (addressBuilder_ == null) { + address_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + onChanged(); + } else { + addressBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + public akka.remote.testconductor.TestConductorProtocol.Address.Builder getAddressBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getAddressFieldBuilder().getBuilder(); + } + public akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddressOrBuilder() { + if (addressBuilder_ != null) { + return addressBuilder_.getMessageOrBuilder(); + } else { + return address_; + } + } + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder> + getAddressFieldBuilder() { + if (addressBuilder_ == null) { + addressBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder>( + address_, + getParentForChildren(), + isClean()); + address_ = null; + } + return addressBuilder_; + } // @@protoc_insertion_point(builder_scope:Hello) } @@ -1831,6 +1823,658 @@ public final class TestConductorProtocol { // @@protoc_insertion_point(class_scope:EnterBarrier) } + public interface AddressOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string protocol = 1; + boolean hasProtocol(); + String getProtocol(); + + // required string system = 2; + boolean hasSystem(); + String getSystem(); + + // required string host = 3; + boolean hasHost(); + String getHost(); + + // required int32 port = 4; + boolean hasPort(); + int getPort(); + } + public static final class Address extends + com.google.protobuf.GeneratedMessage + implements AddressOrBuilder { + // Use Address.newBuilder() to construct. + private Address(Builder builder) { + super(builder); + } + private Address(boolean noInit) {} + + private static final Address defaultInstance; + public static Address getDefaultInstance() { + return defaultInstance; + } + + public Address getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Address_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Address_fieldAccessorTable; + } + + private int bitField0_; + // required string protocol = 1; + public static final int PROTOCOL_FIELD_NUMBER = 1; + private java.lang.Object protocol_; + public boolean hasProtocol() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getProtocol() { + java.lang.Object ref = protocol_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + protocol_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getProtocolBytes() { + java.lang.Object ref = protocol_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + protocol_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string system = 2; + public static final int SYSTEM_FIELD_NUMBER = 2; + private java.lang.Object system_; + public boolean hasSystem() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public String getSystem() { + java.lang.Object ref = system_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + system_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getSystemBytes() { + java.lang.Object ref = system_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + system_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string host = 3; + public static final int HOST_FIELD_NUMBER = 3; + private java.lang.Object host_; + public boolean hasHost() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public String getHost() { + java.lang.Object ref = host_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + host_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getHostBytes() { + java.lang.Object ref = host_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + host_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required int32 port = 4; + public static final int PORT_FIELD_NUMBER = 4; + private int port_; + public boolean hasPort() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public int getPort() { + return port_; + } + + private void initFields() { + protocol_ = ""; + system_ = ""; + host_ = ""; + port_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasProtocol()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSystem()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasHost()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasPort()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getProtocolBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getSystemBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getHostBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeInt32(4, port_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getProtocolBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getSystemBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getHostBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(4, port_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.remote.testconductor.TestConductorProtocol.Address parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.remote.testconductor.TestConductorProtocol.Address prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Address_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Address_fieldAccessorTable; + } + + // Construct using akka.remote.testconductor.TestConductorProtocol.Address.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + protocol_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + system_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + host_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + port_ = 0; + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.remote.testconductor.TestConductorProtocol.Address.getDescriptor(); + } + + public akka.remote.testconductor.TestConductorProtocol.Address getDefaultInstanceForType() { + return akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + } + + public akka.remote.testconductor.TestConductorProtocol.Address build() { + akka.remote.testconductor.TestConductorProtocol.Address result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private akka.remote.testconductor.TestConductorProtocol.Address buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + akka.remote.testconductor.TestConductorProtocol.Address result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public akka.remote.testconductor.TestConductorProtocol.Address buildPartial() { + akka.remote.testconductor.TestConductorProtocol.Address result = new akka.remote.testconductor.TestConductorProtocol.Address(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.protocol_ = protocol_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.system_ = system_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.host_ = host_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.port_ = port_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.remote.testconductor.TestConductorProtocol.Address) { + return mergeFrom((akka.remote.testconductor.TestConductorProtocol.Address)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.remote.testconductor.TestConductorProtocol.Address other) { + if (other == akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance()) return this; + if (other.hasProtocol()) { + setProtocol(other.getProtocol()); + } + if (other.hasSystem()) { + setSystem(other.getSystem()); + } + if (other.hasHost()) { + setHost(other.getHost()); + } + if (other.hasPort()) { + setPort(other.getPort()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasProtocol()) { + + return false; + } + if (!hasSystem()) { + + return false; + } + if (!hasHost()) { + + return false; + } + if (!hasPort()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + protocol_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + system_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + host_ = input.readBytes(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + port_ = input.readInt32(); + break; + } + } + } + } + + private int bitField0_; + + // required string protocol = 1; + private java.lang.Object protocol_ = ""; + public boolean hasProtocol() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getProtocol() { + java.lang.Object ref = protocol_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + protocol_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setProtocol(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + protocol_ = value; + onChanged(); + return this; + } + public Builder clearProtocol() { + bitField0_ = (bitField0_ & ~0x00000001); + protocol_ = getDefaultInstance().getProtocol(); + onChanged(); + return this; + } + void setProtocol(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + protocol_ = value; + onChanged(); + } + + // required string system = 2; + private java.lang.Object system_ = ""; + public boolean hasSystem() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public String getSystem() { + java.lang.Object ref = system_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + system_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setSystem(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + system_ = value; + onChanged(); + return this; + } + public Builder clearSystem() { + bitField0_ = (bitField0_ & ~0x00000002); + system_ = getDefaultInstance().getSystem(); + onChanged(); + return this; + } + void setSystem(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000002; + system_ = value; + onChanged(); + } + + // required string host = 3; + private java.lang.Object host_ = ""; + public boolean hasHost() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public String getHost() { + java.lang.Object ref = host_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + host_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setHost(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + host_ = value; + onChanged(); + return this; + } + public Builder clearHost() { + bitField0_ = (bitField0_ & ~0x00000004); + host_ = getDefaultInstance().getHost(); + onChanged(); + return this; + } + void setHost(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000004; + host_ = value; + onChanged(); + } + + // required int32 port = 4; + private int port_ ; + public boolean hasPort() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public int getPort() { + return port_; + } + public Builder setPort(int value) { + bitField0_ |= 0x00000008; + port_ = value; + onChanged(); + return this; + } + public Builder clearPort() { + bitField0_ = (bitField0_ & ~0x00000008); + port_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:Address) + } + + static { + defaultInstance = new Address(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:Address) + } + public interface InjectFailureOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -1842,19 +2486,16 @@ public final class TestConductorProtocol { boolean hasDirection(); akka.remote.testconductor.TestConductorProtocol.Direction getDirection(); - // optional string host = 3; - boolean hasHost(); - String getHost(); + // optional .Address address = 3; + boolean hasAddress(); + akka.remote.testconductor.TestConductorProtocol.Address getAddress(); + akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddressOrBuilder(); - // optional int32 port = 4; - boolean hasPort(); - int getPort(); - - // optional float rateMBit = 5; + // optional float rateMBit = 6; boolean hasRateMBit(); float getRateMBit(); - // optional int32 exitValue = 6; + // optional int32 exitValue = 7; boolean hasExitValue(); int getExitValue(); } @@ -1907,63 +2548,34 @@ public final class TestConductorProtocol { return direction_; } - // optional string host = 3; - public static final int HOST_FIELD_NUMBER = 3; - private java.lang.Object host_; - public boolean hasHost() { + // optional .Address address = 3; + public static final int ADDRESS_FIELD_NUMBER = 3; + private akka.remote.testconductor.TestConductorProtocol.Address address_; + public boolean hasAddress() { return ((bitField0_ & 0x00000004) == 0x00000004); } - public String getHost() { - java.lang.Object ref = host_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - host_ = s; - } - return s; - } + public akka.remote.testconductor.TestConductorProtocol.Address getAddress() { + return address_; } - private com.google.protobuf.ByteString getHostBytes() { - java.lang.Object ref = host_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - host_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddressOrBuilder() { + return address_; } - // optional int32 port = 4; - public static final int PORT_FIELD_NUMBER = 4; - private int port_; - public boolean hasPort() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public int getPort() { - return port_; - } - - // optional float rateMBit = 5; - public static final int RATEMBIT_FIELD_NUMBER = 5; + // optional float rateMBit = 6; + public static final int RATEMBIT_FIELD_NUMBER = 6; private float rateMBit_; public boolean hasRateMBit() { - return ((bitField0_ & 0x00000010) == 0x00000010); + return ((bitField0_ & 0x00000008) == 0x00000008); } public float getRateMBit() { return rateMBit_; } - // optional int32 exitValue = 6; - public static final int EXITVALUE_FIELD_NUMBER = 6; + // optional int32 exitValue = 7; + public static final int EXITVALUE_FIELD_NUMBER = 7; private int exitValue_; public boolean hasExitValue() { - return ((bitField0_ & 0x00000020) == 0x00000020); + return ((bitField0_ & 0x00000010) == 0x00000010); } public int getExitValue() { return exitValue_; @@ -1972,8 +2584,7 @@ public final class TestConductorProtocol { private void initFields() { failure_ = akka.remote.testconductor.TestConductorProtocol.FailType.Throttle; direction_ = akka.remote.testconductor.TestConductorProtocol.Direction.Send; - host_ = ""; - port_ = 0; + address_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); rateMBit_ = 0F; exitValue_ = 0; } @@ -1986,6 +2597,12 @@ public final class TestConductorProtocol { memoizedIsInitialized = 0; return false; } + if (hasAddress()) { + if (!getAddress().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -2000,16 +2617,13 @@ public final class TestConductorProtocol { output.writeEnum(2, direction_.getNumber()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getHostBytes()); + output.writeMessage(3, address_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeInt32(4, port_); + output.writeFloat(6, rateMBit_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeFloat(5, rateMBit_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeInt32(6, exitValue_); + output.writeInt32(7, exitValue_); } getUnknownFields().writeTo(output); } @@ -2030,19 +2644,15 @@ public final class TestConductorProtocol { } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getHostBytes()); + .computeMessageSize(3, address_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream - .computeInt32Size(4, port_); + .computeFloatSize(6, rateMBit_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream - .computeFloatSize(5, rateMBit_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(6, exitValue_); + .computeInt32Size(7, exitValue_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -2160,6 +2770,7 @@ public final class TestConductorProtocol { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getAddressFieldBuilder(); } } private static Builder create() { @@ -2172,14 +2783,16 @@ public final class TestConductorProtocol { bitField0_ = (bitField0_ & ~0x00000001); direction_ = akka.remote.testconductor.TestConductorProtocol.Direction.Send; bitField0_ = (bitField0_ & ~0x00000002); - host_ = ""; + if (addressBuilder_ == null) { + address_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + } else { + addressBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000004); - port_ = 0; - bitField0_ = (bitField0_ & ~0x00000008); rateMBit_ = 0F; - bitField0_ = (bitField0_ & ~0x00000010); + bitField0_ = (bitField0_ & ~0x00000008); exitValue_ = 0; - bitField0_ = (bitField0_ & ~0x00000020); + bitField0_ = (bitField0_ & ~0x00000010); return this; } @@ -2229,18 +2842,18 @@ public final class TestConductorProtocol { if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } - result.host_ = host_; + if (addressBuilder_ == null) { + result.address_ = address_; + } else { + result.address_ = addressBuilder_.build(); + } if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } - result.port_ = port_; + result.rateMBit_ = rateMBit_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } - result.rateMBit_ = rateMBit_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } result.exitValue_ = exitValue_; result.bitField0_ = to_bitField0_; onBuilt(); @@ -2264,11 +2877,8 @@ public final class TestConductorProtocol { if (other.hasDirection()) { setDirection(other.getDirection()); } - if (other.hasHost()) { - setHost(other.getHost()); - } - if (other.hasPort()) { - setPort(other.getPort()); + if (other.hasAddress()) { + mergeAddress(other.getAddress()); } if (other.hasRateMBit()) { setRateMBit(other.getRateMBit()); @@ -2285,6 +2895,12 @@ public final class TestConductorProtocol { return false; } + if (hasAddress()) { + if (!getAddress().isInitialized()) { + + return false; + } + } return true; } @@ -2334,22 +2950,21 @@ public final class TestConductorProtocol { break; } case 26: { - bitField0_ |= 0x00000004; - host_ = input.readBytes(); + akka.remote.testconductor.TestConductorProtocol.Address.Builder subBuilder = akka.remote.testconductor.TestConductorProtocol.Address.newBuilder(); + if (hasAddress()) { + subBuilder.mergeFrom(getAddress()); + } + input.readMessage(subBuilder, extensionRegistry); + setAddress(subBuilder.buildPartial()); break; } - case 32: { + case 53: { bitField0_ |= 0x00000008; - port_ = input.readInt32(); - break; - } - case 45: { - bitField0_ |= 0x00000010; rateMBit_ = input.readFloat(); break; } - case 48: { - bitField0_ |= 0x00000020; + case 56: { + bitField0_ |= 0x00000010; exitValue_ = input.readInt32(); break; } @@ -2407,100 +3022,133 @@ public final class TestConductorProtocol { return this; } - // optional string host = 3; - private java.lang.Object host_ = ""; - public boolean hasHost() { + // optional .Address address = 3; + private akka.remote.testconductor.TestConductorProtocol.Address address_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder> addressBuilder_; + public boolean hasAddress() { return ((bitField0_ & 0x00000004) == 0x00000004); } - public String getHost() { - java.lang.Object ref = host_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - host_ = s; - return s; + public akka.remote.testconductor.TestConductorProtocol.Address getAddress() { + if (addressBuilder_ == null) { + return address_; } else { - return (String) ref; + return addressBuilder_.getMessage(); } } - public Builder setHost(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - host_ = value; - onChanged(); - return this; - } - public Builder clearHost() { - bitField0_ = (bitField0_ & ~0x00000004); - host_ = getDefaultInstance().getHost(); - onChanged(); - return this; - } - void setHost(com.google.protobuf.ByteString value) { + public Builder setAddress(akka.remote.testconductor.TestConductorProtocol.Address value) { + if (addressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + address_ = value; + onChanged(); + } else { + addressBuilder_.setMessage(value); + } bitField0_ |= 0x00000004; - host_ = value; - onChanged(); - } - - // optional int32 port = 4; - private int port_ ; - public boolean hasPort() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public int getPort() { - return port_; - } - public Builder setPort(int value) { - bitField0_ |= 0x00000008; - port_ = value; - onChanged(); return this; } - public Builder clearPort() { - bitField0_ = (bitField0_ & ~0x00000008); - port_ = 0; - onChanged(); + public Builder setAddress( + akka.remote.testconductor.TestConductorProtocol.Address.Builder builderForValue) { + if (addressBuilder_ == null) { + address_ = builderForValue.build(); + onChanged(); + } else { + addressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; return this; } + public Builder mergeAddress(akka.remote.testconductor.TestConductorProtocol.Address value) { + if (addressBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + address_ != akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance()) { + address_ = + akka.remote.testconductor.TestConductorProtocol.Address.newBuilder(address_).mergeFrom(value).buildPartial(); + } else { + address_ = value; + } + onChanged(); + } else { + addressBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + public Builder clearAddress() { + if (addressBuilder_ == null) { + address_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + onChanged(); + } else { + addressBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + public akka.remote.testconductor.TestConductorProtocol.Address.Builder getAddressBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getAddressFieldBuilder().getBuilder(); + } + public akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddressOrBuilder() { + if (addressBuilder_ != null) { + return addressBuilder_.getMessageOrBuilder(); + } else { + return address_; + } + } + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder> + getAddressFieldBuilder() { + if (addressBuilder_ == null) { + addressBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder>( + address_, + getParentForChildren(), + isClean()); + address_ = null; + } + return addressBuilder_; + } - // optional float rateMBit = 5; + // optional float rateMBit = 6; private float rateMBit_ ; public boolean hasRateMBit() { - return ((bitField0_ & 0x00000010) == 0x00000010); + return ((bitField0_ & 0x00000008) == 0x00000008); } public float getRateMBit() { return rateMBit_; } public Builder setRateMBit(float value) { - bitField0_ |= 0x00000010; + bitField0_ |= 0x00000008; rateMBit_ = value; onChanged(); return this; } public Builder clearRateMBit() { - bitField0_ = (bitField0_ & ~0x00000010); + bitField0_ = (bitField0_ & ~0x00000008); rateMBit_ = 0F; onChanged(); return this; } - // optional int32 exitValue = 6; + // optional int32 exitValue = 7; private int exitValue_ ; public boolean hasExitValue() { - return ((bitField0_ & 0x00000020) == 0x00000020); + return ((bitField0_ & 0x00000010) == 0x00000010); } public int getExitValue() { return exitValue_; } public Builder setExitValue(int value) { - bitField0_ |= 0x00000020; + bitField0_ |= 0x00000010; exitValue_ = value; onChanged(); return this; } public Builder clearExitValue() { - bitField0_ = (bitField0_ & ~0x00000020); + bitField0_ = (bitField0_ & ~0x00000010); exitValue_ = 0; onChanged(); return this; @@ -2532,6 +3180,11 @@ public final class TestConductorProtocol { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_EnterBarrier_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_Address_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_Address_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_InjectFailure_descriptor; private static @@ -2549,16 +3202,17 @@ public final class TestConductorProtocol { "\n\033TestConductorProtocol.proto\"a\n\007Wrapper" + "\022\025\n\005hello\030\001 \001(\0132\006.Hello\022\036\n\007barrier\030\002 \001(\013" + "2\r.EnterBarrier\022\037\n\007failure\030\003 \001(\0132\016.Injec" + - "tFailure\"1\n\005Hello\022\014\n\004name\030\001 \002(\t\022\014\n\004host\030" + - "\002 \002(\t\022\014\n\004port\030\003 \002(\005\"\034\n\014EnterBarrier\022\014\n\004n" + - "ame\030\001 \002(\t\"\213\001\n\rInjectFailure\022\032\n\007failure\030\001" + - " \002(\0162\t.FailType\022\035\n\tdirection\030\002 \001(\0162\n.Dir" + - "ection\022\014\n\004host\030\003 \001(\t\022\014\n\004port\030\004 \001(\005\022\020\n\010ra" + - "teMBit\030\005 \001(\002\022\021\n\texitValue\030\006 \001(\005*A\n\010FailT" + - "ype\022\014\n\010Throttle\020\001\022\016\n\nDisconnect\020\002\022\t\n\005Abo", - "rt\020\003\022\014\n\010Shutdown\020\004*\"\n\tDirection\022\010\n\004Send\020" + - "\001\022\013\n\007Receive\020\002B\035\n\031akka.remote.testconduc" + - "torH\001" + "tFailure\"0\n\005Hello\022\014\n\004name\030\001 \002(\t\022\031\n\007addre" + + "ss\030\002 \002(\0132\010.Address\"\034\n\014EnterBarrier\022\014\n\004na" + + "me\030\001 \002(\t\"G\n\007Address\022\020\n\010protocol\030\001 \002(\t\022\016\n" + + "\006system\030\002 \002(\t\022\014\n\004host\030\003 \002(\t\022\014\n\004port\030\004 \002(" + + "\005\"\212\001\n\rInjectFailure\022\032\n\007failure\030\001 \002(\0162\t.F" + + "ailType\022\035\n\tdirection\030\002 \001(\0162\n.Direction\022\031" + + "\n\007address\030\003 \001(\0132\010.Address\022\020\n\010rateMBit\030\006 ", + "\001(\002\022\021\n\texitValue\030\007 \001(\005*A\n\010FailType\022\014\n\010Th" + + "rottle\020\001\022\016\n\nDisconnect\020\002\022\t\n\005Abort\020\003\022\014\n\010S" + + "hutdown\020\004*\"\n\tDirection\022\010\n\004Send\020\001\022\013\n\007Rece" + + "ive\020\002B\035\n\031akka.remote.testconductorH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -2578,7 +3232,7 @@ public final class TestConductorProtocol { internal_static_Hello_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Hello_descriptor, - new java.lang.String[] { "Name", "Host", "Port", }, + new java.lang.String[] { "Name", "Address", }, akka.remote.testconductor.TestConductorProtocol.Hello.class, akka.remote.testconductor.TestConductorProtocol.Hello.Builder.class); internal_static_EnterBarrier_descriptor = @@ -2589,12 +3243,20 @@ public final class TestConductorProtocol { new java.lang.String[] { "Name", }, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.class, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder.class); - internal_static_InjectFailure_descriptor = + internal_static_Address_descriptor = getDescriptor().getMessageTypes().get(3); + internal_static_Address_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Address_descriptor, + new java.lang.String[] { "Protocol", "System", "Host", "Port", }, + akka.remote.testconductor.TestConductorProtocol.Address.class, + akka.remote.testconductor.TestConductorProtocol.Address.Builder.class); + internal_static_InjectFailure_descriptor = + getDescriptor().getMessageTypes().get(4); internal_static_InjectFailure_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_InjectFailure_descriptor, - new java.lang.String[] { "Failure", "Direction", "Host", "Port", "RateMBit", "ExitValue", }, + new java.lang.String[] { "Failure", "Direction", "Address", "RateMBit", "ExitValue", }, akka.remote.testconductor.TestConductorProtocol.InjectFailure.class, akka.remote.testconductor.TestConductorProtocol.InjectFailure.Builder.class); return null; diff --git a/akka-remote/src/main/protocol/TestConductorProtocol.proto b/akka-remote/src/main/protocol/TestConductorProtocol.proto index 1db35a7516..213820e687 100644 --- a/akka-remote/src/main/protocol/TestConductorProtocol.proto +++ b/akka-remote/src/main/protocol/TestConductorProtocol.proto @@ -19,14 +19,20 @@ message Wrapper { message Hello { required string name = 1; - required string host = 2; - required int32 port = 3; + required Address address = 2; } message EnterBarrier { required string name = 1; } +message Address { + required string protocol = 1; + required string system = 2; + required string host = 3; + required int32 port = 4; +} + enum FailType { Throttle = 1; Disconnect = 2; @@ -40,9 +46,8 @@ enum Direction { message InjectFailure { required FailType failure = 1; optional Direction direction = 2; - optional string host = 3; - optional int32 port = 4; - optional float rateMBit = 5; - optional int32 exitValue = 6; + optional Address address = 3; + optional float rateMBit = 6; + optional int32 exitValue = 7; } diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 1438904fe2..384d00b55d 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -155,4 +155,25 @@ akka { type = PinnedDispatcher } } + + testconductor { + + # Timeout for joining a barrier: this is the maximum time any participants + # waits for everybody else to join a named barrier. + barrier-timeout = 30s + + # Timeout for interrogation of TestConductor’s Controller actor + query-timeout = 5s + + # Default port to start the conductor on; 0 means + port = 0 + + # Hostname of the TestConductor server, used by the server to bind to the IP + # and by the client to connect to it. + host = localhost + + # Name of the TestConductor client (for identification on the server e.g. for + # failure injection) + name = "noname" + } } diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 55e2d95636..c3a41f8275 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -56,13 +56,13 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor val server: NettyRemoteServer = try createServer() catch { case NonFatal(ex) ⇒ shutdown(); throw ex } /** - * Override this method to inject a subclass of NettyRemoteServer instead of + * Override this method to inject a subclass of NettyRemoteServer instead of * the normal one, e.g. for altering the pipeline. */ protected def createServer(): NettyRemoteServer = new NettyRemoteServer(this) - + /** - * Override this method to inject a subclass of RemoteClient instead of + * Override this method to inject a subclass of RemoteClient instead of * the normal one, e.g. for altering the pipeline. Get this transport’s * address from `this.address`. */ diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index 97d3f194f3..ac4289e8ae 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -44,7 +44,7 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { b.setOption("reuseAddress", true) b } - + protected def makePipeline(): ChannelPipelineFactory = new RemoteServerPipelineFactory(openChannels, executionHandler, netty) @volatile diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala index 3265fc8808..c46e22eb9f 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -18,26 +18,30 @@ import akka.event.LoggingAdapter import akka.actor.PoisonPill import akka.event.Logging import scala.util.control.NoStackTrace +import akka.event.LoggingReceive +import akka.actor.Address +import java.net.InetSocketAddress -object Conductor extends RunControl with FailureInject with BarrierSync { - - val system = ActorSystem("conductor", ConfigFactory.load().getConfig("conductor")) - - object Settings { - val config = system.settings.config - - implicit val BarrierTimeout = Timeout(Duration(config.getMilliseconds("barrier-timeout"), MILLISECONDS)) - implicit val QueryTimeout = Timeout(Duration(config.getMilliseconds("query-timeout"), MILLISECONDS)) - } +trait Conductor extends RunControl with FailureInject { this: TestConductorExt ⇒ import Controller._ - private val controller = system.actorOf(Props[Controller], "controller") - controller ! ClientConnected + private var _controller: ActorRef = _ + private def controller: ActorRef = _controller match { + case null ⇒ throw new RuntimeException("TestConductorServer was not started") + case x ⇒ x + } - override def enter(name: String*) { + override def startController() { + if (_controller ne null) throw new RuntimeException("TestConductorServer was already started") + _controller = system.actorOf(Props[Controller], "controller") import Settings.BarrierTimeout - name foreach (b ⇒ Await.result(controller ? EnterBarrier(b), Duration.Inf)) + startClient(Await.result(controller ? GetPort mapTo, Duration.Inf)) + } + + override def port: Int = { + import Settings.QueryTimeout + Await.result(controller ? GetPort mapTo, Duration.Inf) } override def throttle(node: String, target: String, direction: Direction, rateMBit: Float) { @@ -127,7 +131,7 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi case Event(msg: Wrapper, _) ⇒ if (msg.hasHello) { val hello = msg.getHello - controller ! ClientConnected(hello.getName, hello.getHost, hello.getPort) + controller ! ClientConnected(hello.getName, hello.getAddress) goto(Ready) } else { log.warning("client {} sent no Hello in first message, disconnecting", getAddrString(channel)) @@ -162,29 +166,28 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi } object Controller { - case class ClientConnected(name: String, host: String, port: Int) + case class ClientConnected(name: String, address: Address) case class ClientDisconnected(name: String) case object GetNodes + case object GetPort - case class NodeInfo(name: String, host: String, port: Int, fsm: ActorRef) + case class NodeInfo(name: String, addr: Address, fsm: ActorRef) } class Controller extends Actor { import Controller._ - val config = context.system.settings.config - - val host = config.getString("akka.testconductor.host") - val port = config.getInt("akka.testconductor.port") - val connection = RemoteConnection(Server, host, port, + val settings = TestConductor().Settings + val connection = RemoteConnection(Server, settings.host, settings.port, new ConductorHandler(context.system, self, Logging(context.system, "ConductorHandler"))) val barrier = context.actorOf(Props[BarrierCoordinator], "barriers") var nodes = Map[String, NodeInfo]() - override def receive = { - case ClientConnected(name, host, port) ⇒ - nodes += name -> NodeInfo(name, host, port, sender) + override def receive = LoggingReceive { + case "ready?" ⇒ sender ! "yes" + case ClientConnected(name, addr) ⇒ + nodes += name -> NodeInfo(name, addr, sender) barrier forward ClientConnected case ClientConnected ⇒ barrier forward ClientConnected @@ -199,8 +202,7 @@ class Controller extends Actor { InjectFailure.newBuilder .setFailure(FailType.Throttle) .setDirection(TestConductorProtocol.Direction.valueOf(direction.toString)) - .setHost(t.host) - .setPort(t.port) + .setAddress(t.addr) .setRateMBit(rateMBit) .build nodes(node).fsm ! ServerFSM.Send(Wrapper.newBuilder.setFailure(throttle).build) @@ -209,8 +211,7 @@ class Controller extends Actor { val disconnect = InjectFailure.newBuilder .setFailure(if (abort) FailType.Abort else FailType.Disconnect) - .setHost(t.host) - .setPort(t.port) + .setAddress(t.addr) .build nodes(node).fsm ! ServerFSM.Send(Wrapper.newBuilder.setFailure(disconnect).build) case Terminate(node, exitValueOrKill) ⇒ @@ -224,6 +225,10 @@ class Controller extends Actor { // case Remove(node) => // nodes -= node case GetNodes ⇒ sender ! nodes.keys + case GetPort ⇒ + sender ! (connection.getLocalAddress match { + case inet: InetSocketAddress ⇒ inet.getPort + }) } } diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala new file mode 100644 index 0000000000..94847664c9 --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala @@ -0,0 +1,31 @@ +package akka.remote.testconductor + +import akka.actor.ExtensionKey +import akka.actor.Extension +import akka.actor.ExtendedActorSystem +import akka.remote.RemoteActorRefProvider +import akka.actor.ActorContext +import akka.util.{ Duration, Timeout } +import java.util.concurrent.TimeUnit.MILLISECONDS + +object TestConductor extends ExtensionKey[TestConductorExt] { + def apply()(implicit ctx: ActorContext): TestConductorExt = apply(ctx.system) +} + +class TestConductorExt(val system: ExtendedActorSystem) extends Extension with Conductor with Player { + + object Settings { + val config = system.settings.config + + implicit val BarrierTimeout = Timeout(Duration(config.getMilliseconds("akka.testconductor.barrier-timeout"), MILLISECONDS)) + implicit val QueryTimeout = Timeout(Duration(config.getMilliseconds("akka.testconductor.query-timeout"), MILLISECONDS)) + + val name = config.getString("akka.testconductor.name") + val host = config.getString("akka.testconductor.host") + val port = config.getInt("akka.testconductor.port") + } + + val transport = system.provider.asInstanceOf[RemoteActorRefProvider].transport + val address = transport.address + +} \ No newline at end of file diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala index 399b58337b..930be600c2 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala @@ -49,6 +49,16 @@ trait FailureInject { trait RunControl { + /** + * Start the server port. + */ + def startController(): Unit + + /** + * Get the actual port used by the server. + */ + def port: Int + /** * Tell the remote node to shut itself down using System.exit with the given * exitValue. diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index 88102b5e86..6569d81acc 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -25,11 +25,11 @@ import akka.actor.Props object NetworkFailureInjector { - val channels = new Index[Address, Channel](16, (c1, c2) => c1 compareTo c2) + val channels = new Index[Address, Channel](16, (c1, c2) ⇒ c1 compareTo c2) def close(remote: Address): Unit = { // channels will be cleaned up by the handler - for (chs <- channels.remove(remote); c <- chs) c.close() + for (chs ← channels.remove(remote); c ← chs) c.close() } } diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala index 029045394c..93aa6bc33d 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala @@ -21,23 +21,40 @@ import akka.event.LoggingAdapter import akka.actor.PoisonPill import akka.event.Logging -object Player extends BarrierSync { +trait Player extends BarrierSync { this: TestConductorExt ⇒ - val system = ActorSystem("Player", ConfigFactory.load().getConfig("player")) - - object Settings { - val config = system.settings.config - - implicit val BarrierTimeout = Timeout(Duration(config.getMilliseconds("barrier-timeout"), MILLISECONDS)) + private var _client: ActorRef = _ + private def client = _client match { + case null ⇒ throw new IllegalStateException("TestConductor client not yet started") + case x ⇒ x } - private val server = system.actorOf(Props[ClientFSM], "client") + def startClient(port: Int) { + import ClientFSM._ + import akka.actor.FSM._ + import Settings.BarrierTimeout + + if (_client ne null) throw new IllegalStateException("TestConductorClient already started") + _client = system.actorOf(Props(new ClientFSM(port)), "TestConductorClient") + val a = system.actorOf(Props(new Actor { + var waiting: ActorRef = _ + def receive = { + case fsm: ActorRef ⇒ waiting = sender; fsm ! SubscribeTransitionCallBack(self) + case Transition(_, Connecting, Connected) ⇒ waiting ! "okay" + case t: Transition[_] ⇒ waiting ! Status.Failure(new RuntimeException("unexpected transition: " + t)) + case CurrentState(_, Connected) ⇒ waiting ! "okay" + case _: CurrentState[_] ⇒ + } + })) + + Await.result(a ? client, Duration.Inf) + } override def enter(name: String*) { system.log.debug("entering barriers " + name.mkString("(", ", ", ")")) name foreach { b ⇒ import Settings.BarrierTimeout - Await.result(server ? EnterBarrier(b), Duration.Inf) + Await.result(client ? EnterBarrier(b), Duration.Inf) system.log.debug("passed barrier {}", b) } } @@ -48,35 +65,28 @@ object ClientFSM { case object Connecting extends State case object Connected extends State - case class Data(channel: Channel, msg: Either[List[ClientOp], (String, ActorRef)]) + case class Data(channel: Channel, barrier: Option[(String, ActorRef)]) class ConnectionFailure(msg: String) extends RuntimeException(msg) with NoStackTrace case object Disconnected } -class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { +class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { import ClientFSM._ - val config = context.system.settings.config + val settings = TestConductor().Settings - val name = config.getString("akka.testconductor.name") - val host = config.getString("akka.testconductor.host") - val port = config.getInt("akka.testconductor.port") val handler = new PlayerHandler(self, Logging(context.system, "PlayerHandler")) - val myself = "XXX" - val myport = 12345 - - startWith(Connecting, Data(RemoteConnection(Client, host, port, handler), Left(Nil))) + startWith(Connecting, Data(RemoteConnection(Client, settings.host, port, handler), None)) when(Connecting, stateTimeout = 10 seconds) { - case Event(msg: ClientOp, Data(channel, Left(msgs))) ⇒ - stay using Data(channel, Left(msg :: msgs)) - case Event(Connected, Data(channel, Left(msgs))) ⇒ - val hello = Hello.newBuilder.setName(name).setHost(myself).setPort(myport).build + case Event(msg: ClientOp, _) ⇒ + stay replying Status.Failure(new IllegalStateException("not connected yet")) + case Event(Connected, d @ Data(channel, _)) ⇒ + val hello = Hello.newBuilder.setName(settings.name).setAddress(TestConductor().address).build channel.write(Wrapper.newBuilder.setHello(hello).build) - msgs.reverse foreach sendMsg(channel) - goto(Connected) using Data(channel, Left(Nil)) + goto(Connected) case Event(_: ConnectionFailure, _) ⇒ // System.exit(1) stop @@ -92,8 +102,8 @@ class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { throw new ConnectionFailure("disconnect") case Event(msg: EnterBarrier, Data(channel, _)) ⇒ sendMsg(channel)(msg) - stay using Data(channel, Right((msg.name, sender))) - case Event(msg: Wrapper, Data(channel, Right((barrier, sender)))) if msg.getAllFields.size == 1 ⇒ + stay using Data(channel, Some(msg.name, sender)) + case Event(msg: Wrapper, Data(channel, Some((barrier, sender)))) if msg.getAllFields.size == 1 ⇒ if (msg.hasBarrier) { val b = msg.getBarrier.getName if (b != barrier) { @@ -102,7 +112,7 @@ class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { sender ! b } } - stay using Data(channel, Left(Nil)) + stay using Data(channel, None) } onTermination { @@ -110,6 +120,8 @@ class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { channel.close() } + initialize + private def sendMsg(channel: Channel)(msg: ClientOp) { msg match { case EnterBarrier(name) ⇒ diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/package.scala b/akka-remote/src/main/scala/akka/remote/testconductor/package.scala new file mode 100644 index 0000000000..8ebeea90a9 --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/package.scala @@ -0,0 +1,19 @@ +package akka.remote + +import akka.actor.Address +import testconductor.{ TestConductorProtocol ⇒ TCP } + +package object testconductor { + + implicit def address2proto(addr: Address): TCP.Address = + TCP.Address.newBuilder + .setProtocol(addr.protocol) + .setSystem(addr.system) + .setHost(addr.host.get) + .setPort(addr.port.get) + .build + + implicit def address2scala(addr: TCP.Address): Address = + Address(addr.getProtocol, addr.getSystem, addr.getHost, addr.getPort) + +} \ No newline at end of file diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala new file mode 100644 index 0000000000..cae2917577 --- /dev/null +++ b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -0,0 +1,52 @@ +package akka.remote.testconductor + +import akka.remote.AkkaRemoteSpec +import com.typesafe.config.ConfigFactory +import akka.remote.AbstractRemoteActorMultiJvmSpec + +object TestConductorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { + override def NrOfNodes = 2 + override def commonConfig = ConfigFactory.parseString(""" + akka.loglevel = DEBUG + akka.actor.provider = akka.remote.RemoteActorRefProvider + akka.actor.debug { + receive = on + fsm = on + } + akka.testconductor { + host = localhost + port = 4712 + } + """) + def nameConfig(n: Int) = ConfigFactory.parseString("akka.testconductor.name = node" + n).withFallback(nodeConfigs(n)) +} + +import TestConductorMultiJvmSpec._ + +class TestConductorMultiJvmNode1 extends AkkaRemoteSpec(nameConfig(0)) { + + val nodes = TestConductorMultiJvmSpec.NrOfNodes + + "running a test" in { + val tc = TestConductor(system) + tc.startController() + barrier("start") + barrier("first") + tc.enter("begin") + barrier("end") + } +} + +class TestConductorMultiJvmNode2 extends AkkaRemoteSpec(nameConfig(1)) { + + val nodes = TestConductorMultiJvmSpec.NrOfNodes + + "running a test" in { + barrier("start") + val tc = TestConductor(system) + tc.startClient(4712) + barrier("first") + tc.enter("begin") + barrier("end") + } +} From 0314b9abbbeac4a0c8f72dea6fb866eb29cb3847 Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 4 May 2012 22:30:00 +0200 Subject: [PATCH 004/106] fix bug in FSM when manually rescheduling non-recurring timer, see #2043 --- akka-actor/src/main/scala/akka/actor/FSM.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index b277142e76..81126c4d8d 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -443,10 +443,10 @@ trait FSM[S, D] extends Listeners { timeoutFuture = None } generation += 1 - processMsg(msg, t) if (!repeat) { timers -= name } + processMsg(msg, t) } case SubscribeTransitionCallBack(actorRef) ⇒ // TODO use DeathWatch to clean up list From 9266ac451b4d938f972f943e352b9da9329ca226 Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 4 May 2012 22:33:08 +0200 Subject: [PATCH 005/106] integrate NetworkFailureInjector and add first test - rework socket pipeline to transform protobuf into case classes and back - introduce NetworkOp messages for that purpose - make API asynchronous (because it is, really) and add Done notification for all server operations; enter(...) is still synchronous, because that is its only purpose in life - factor out mkPipeline in NettyRemoteTransport, enabling the very slick TestConductorTransport (essentially a one-liner) - switch NetworkFailureInjector from Channel{Up,Down}streamHandler to subclassing SimpleChannelHandler, because otherwise deadlocks occurred, not sure why (but SCH is the recommended way from the netty docs, so there may well be a reason) --- .../testconductor/TestConductorProtocol.java | 126 +++++++++++++-- .../main/protocol/TestConductorProtocol.proto | 2 + .../main/scala/akka/remote/netty/Client.scala | 31 +--- .../remote/netty/NettyRemoteSupport.scala | 65 ++++++-- .../main/scala/akka/remote/netty/Server.scala | 26 +-- .../akka/remote/testconductor/Conductor.scala | 151 +++++++++--------- .../akka/remote/testconductor/DataTypes.scala | 77 ++++++++- .../akka/remote/testconductor/Extension.scala | 7 + .../akka/remote/testconductor/Features.scala | 33 ++-- .../NetworkFailureInjector.scala | 116 ++++++++------ .../akka/remote/testconductor/Player.scala | 69 ++++---- .../testconductor/RemoteConnection.scala | 3 +- .../akka/remote/testconductor/package.scala | 12 ++ .../AbstractRemoteActorMultiJvmSpec.scala | 2 +- .../testconductor/TestConductorSpec.scala | 87 ++++++++-- 15 files changed, 538 insertions(+), 269 deletions(-) diff --git a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java index 0b2950018f..f112a1b0c2 100644 --- a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java +++ b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java @@ -87,10 +87,12 @@ public final class TestConductorProtocol { implements com.google.protobuf.ProtocolMessageEnum { Send(0, 1), Receive(1, 2), + Both(2, 3), ; public static final int Send_VALUE = 1; public static final int Receive_VALUE = 2; + public static final int Both_VALUE = 3; public final int getNumber() { return value; } @@ -99,6 +101,7 @@ public final class TestConductorProtocol { switch (value) { case 1: return Send; case 2: return Receive; + case 3: return Both; default: return null; } } @@ -129,7 +132,7 @@ public final class TestConductorProtocol { } private static final Direction[] VALUES = { - Send, Receive, + Send, Receive, Both, }; public static Direction valueOf( @@ -169,6 +172,10 @@ public final class TestConductorProtocol { boolean hasFailure(); akka.remote.testconductor.TestConductorProtocol.InjectFailure getFailure(); akka.remote.testconductor.TestConductorProtocol.InjectFailureOrBuilder getFailureOrBuilder(); + + // optional string done = 4; + boolean hasDone(); + String getDone(); } public static final class Wrapper extends com.google.protobuf.GeneratedMessage @@ -238,10 +245,43 @@ public final class TestConductorProtocol { return failure_; } + // optional string done = 4; + public static final int DONE_FIELD_NUMBER = 4; + private java.lang.Object done_; + public boolean hasDone() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public String getDone() { + java.lang.Object ref = done_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + done_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getDoneBytes() { + java.lang.Object ref = done_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + done_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + private void initFields() { hello_ = akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance(); barrier_ = akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance(); failure_ = akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance(); + done_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -282,6 +322,9 @@ public final class TestConductorProtocol { if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeMessage(3, failure_); } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, getDoneBytes()); + } getUnknownFields().writeTo(output); } @@ -303,6 +346,10 @@ public final class TestConductorProtocol { size += com.google.protobuf.CodedOutputStream .computeMessageSize(3, failure_); } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, getDoneBytes()); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -448,6 +495,8 @@ public final class TestConductorProtocol { failureBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); + done_ = ""; + bitField0_ = (bitField0_ & ~0x00000008); return this; } @@ -510,6 +559,10 @@ public final class TestConductorProtocol { } else { result.failure_ = failureBuilder_.build(); } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.done_ = done_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -535,6 +588,9 @@ public final class TestConductorProtocol { if (other.hasFailure()) { mergeFailure(other.getFailure()); } + if (other.hasDone()) { + setDone(other.getDone()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -611,6 +667,11 @@ public final class TestConductorProtocol { setFailure(subBuilder.buildPartial()); break; } + case 34: { + bitField0_ |= 0x00000008; + done_ = input.readBytes(); + break; + } } } } @@ -887,6 +948,42 @@ public final class TestConductorProtocol { return failureBuilder_; } + // optional string done = 4; + private java.lang.Object done_ = ""; + public boolean hasDone() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public String getDone() { + java.lang.Object ref = done_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + done_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setDone(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + done_ = value; + onChanged(); + return this; + } + public Builder clearDone() { + bitField0_ = (bitField0_ & ~0x00000008); + done_ = getDefaultInstance().getDone(); + onChanged(); + return this; + } + void setDone(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000008; + done_ = value; + onChanged(); + } + // @@protoc_insertion_point(builder_scope:Wrapper) } @@ -3199,20 +3296,21 @@ public final class TestConductorProtocol { descriptor; static { java.lang.String[] descriptorData = { - "\n\033TestConductorProtocol.proto\"a\n\007Wrapper" + + "\n\033TestConductorProtocol.proto\"o\n\007Wrapper" + "\022\025\n\005hello\030\001 \001(\0132\006.Hello\022\036\n\007barrier\030\002 \001(\013" + "2\r.EnterBarrier\022\037\n\007failure\030\003 \001(\0132\016.Injec" + - "tFailure\"0\n\005Hello\022\014\n\004name\030\001 \002(\t\022\031\n\007addre" + - "ss\030\002 \002(\0132\010.Address\"\034\n\014EnterBarrier\022\014\n\004na" + - "me\030\001 \002(\t\"G\n\007Address\022\020\n\010protocol\030\001 \002(\t\022\016\n" + - "\006system\030\002 \002(\t\022\014\n\004host\030\003 \002(\t\022\014\n\004port\030\004 \002(" + - "\005\"\212\001\n\rInjectFailure\022\032\n\007failure\030\001 \002(\0162\t.F" + - "ailType\022\035\n\tdirection\030\002 \001(\0162\n.Direction\022\031" + - "\n\007address\030\003 \001(\0132\010.Address\022\020\n\010rateMBit\030\006 ", - "\001(\002\022\021\n\texitValue\030\007 \001(\005*A\n\010FailType\022\014\n\010Th" + - "rottle\020\001\022\016\n\nDisconnect\020\002\022\t\n\005Abort\020\003\022\014\n\010S" + - "hutdown\020\004*\"\n\tDirection\022\010\n\004Send\020\001\022\013\n\007Rece" + - "ive\020\002B\035\n\031akka.remote.testconductorH\001" + "tFailure\022\014\n\004done\030\004 \001(\t\"0\n\005Hello\022\014\n\004name\030" + + "\001 \002(\t\022\031\n\007address\030\002 \002(\0132\010.Address\"\034\n\014Ente" + + "rBarrier\022\014\n\004name\030\001 \002(\t\"G\n\007Address\022\020\n\010pro" + + "tocol\030\001 \002(\t\022\016\n\006system\030\002 \002(\t\022\014\n\004host\030\003 \002(" + + "\t\022\014\n\004port\030\004 \002(\005\"\212\001\n\rInjectFailure\022\032\n\007fai" + + "lure\030\001 \002(\0162\t.FailType\022\035\n\tdirection\030\002 \001(\016" + + "2\n.Direction\022\031\n\007address\030\003 \001(\0132\010.Address\022", + "\020\n\010rateMBit\030\006 \001(\002\022\021\n\texitValue\030\007 \001(\005*A\n\010" + + "FailType\022\014\n\010Throttle\020\001\022\016\n\nDisconnect\020\002\022\t" + + "\n\005Abort\020\003\022\014\n\010Shutdown\020\004*,\n\tDirection\022\010\n\004" + + "Send\020\001\022\013\n\007Receive\020\002\022\010\n\004Both\020\003B\035\n\031akka.re" + + "mote.testconductorH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -3224,7 +3322,7 @@ public final class TestConductorProtocol { internal_static_Wrapper_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Wrapper_descriptor, - new java.lang.String[] { "Hello", "Barrier", "Failure", }, + new java.lang.String[] { "Hello", "Barrier", "Failure", "Done", }, akka.remote.testconductor.TestConductorProtocol.Wrapper.class, akka.remote.testconductor.TestConductorProtocol.Wrapper.Builder.class); internal_static_Hello_descriptor = diff --git a/akka-remote/src/main/protocol/TestConductorProtocol.proto b/akka-remote/src/main/protocol/TestConductorProtocol.proto index 213820e687..e483bf4f01 100644 --- a/akka-remote/src/main/protocol/TestConductorProtocol.proto +++ b/akka-remote/src/main/protocol/TestConductorProtocol.proto @@ -15,6 +15,7 @@ message Wrapper { optional Hello hello = 1; optional EnterBarrier barrier = 2; optional InjectFailure failure = 3; + optional string done = 4; } message Hello { @@ -42,6 +43,7 @@ enum FailType { enum Direction { Send = 1; Receive = 2; + Both = 3; } message InjectFailure { required FailType failure = 1; diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index a0e91398fc..cf143650bc 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -112,8 +112,6 @@ class ActiveRemoteClient private[akka] ( private var connection: ChannelFuture = _ @volatile private[remote] var openChannels: DefaultChannelGroup = _ - @volatile - private var executionHandler: ExecutionHandler = _ @volatile private var reconnectionTimeWindowStart = 0L @@ -156,9 +154,8 @@ class ActiveRemoteClient private[akka] ( runSwitch switchOn { openChannels = new DefaultDisposableChannelGroup(classOf[RemoteClient].getName) - executionHandler = new ExecutionHandler(netty.executor) val b = new ClientBootstrap(netty.clientChannelFactory) - b.setPipelineFactory(new ActiveRemoteClientPipelineFactory(name, b, executionHandler, remoteAddress, localAddress, this)) + b.setPipelineFactory(netty.mkPipeline(new ActiveRemoteClientHandler(name, b, remoteAddress, localAddress, netty.timer, this), true)) b.setOption("tcpNoDelay", true) b.setOption("keepAlive", true) b.setOption("connectTimeoutMillis", settings.ConnectionTimeout.toMillis) @@ -206,7 +203,6 @@ class ActiveRemoteClient private[akka] ( if (openChannels ne null) openChannels.close.awaitUninterruptibly() } finally { connection = null - executionHandler = null } } @@ -319,31 +315,6 @@ class ActiveRemoteClientHandler( } } -class ActiveRemoteClientPipelineFactory( - name: String, - bootstrap: ClientBootstrap, - executionHandler: ExecutionHandler, - remoteAddress: Address, - localAddress: Address, - client: ActiveRemoteClient) extends ChannelPipelineFactory { - - import client.netty.settings - - def getPipeline: ChannelPipeline = { - val timeout = new IdleStateHandler(client.netty.timer, - settings.ReadTimeout.toSeconds.toInt, - settings.WriteTimeout.toSeconds.toInt, - settings.AllTimeout.toSeconds.toInt) - val lenDec = new LengthFieldBasedFrameDecoder(settings.MessageFrameSize, 0, 4, 0, 4) - val lenPrep = new LengthFieldPrepender(4) - val messageDec = new RemoteMessageDecoder - val messageEnc = new RemoteMessageEncoder(client.netty) - val remoteClient = new ActiveRemoteClientHandler(name, bootstrap, remoteAddress, localAddress, client.netty.timer, client) - - new StaticChannelPipeline(timeout, lenDec, messageDec, lenPrep, messageEnc, executionHandler, remoteClient) - } -} - class PassiveRemoteClient(val currentChannel: Channel, netty: NettyRemoteTransport, remoteAddress: Address) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index c3a41f8275..35ef3bf7fd 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -22,6 +22,13 @@ import akka.event.Logging import akka.remote.RemoteProtocol.AkkaRemoteProtocol import akka.remote.{ RemoteTransportException, RemoteTransport, RemoteSettings, RemoteMarshallingOps, RemoteActorRefProvider, RemoteActorRef, RemoteServerStarted } import akka.util.NonFatal +import org.jboss.netty.channel.StaticChannelPipeline +import org.jboss.netty.channel.ChannelHandler +import org.jboss.netty.handler.codec.frame.LengthFieldPrepender +import org.jboss.netty.handler.codec.frame.LengthFieldBasedFrameDecoder +import org.jboss.netty.handler.timeout.IdleStateHandler +import org.jboss.netty.channel.ChannelPipelineFactory +import org.jboss.netty.handler.execution.ExecutionHandler /** * Provides the implementation of the Netty remote support @@ -34,20 +41,54 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor // TODO replace by system.scheduler val timer: HashedWheelTimer = new HashedWheelTimer(system.threadFactory) - // TODO make configurable - lazy val executor = new OrderedMemoryAwareThreadPoolExecutor( - settings.ExecutionPoolSize, - settings.MaxChannelMemorySize, - settings.MaxTotalMemorySize, - settings.ExecutionPoolKeepalive.length, - settings.ExecutionPoolKeepalive.unit, - system.threadFactory) - // TODO make configurable/shareable with server socket factory val clientChannelFactory = new NioClientSocketChannelFactory( Executors.newCachedThreadPool(system.threadFactory), Executors.newCachedThreadPool(system.threadFactory)) + object PipelineFactory { + def apply(handlers: Seq[ChannelHandler]): StaticChannelPipeline = new StaticChannelPipeline(handlers: _*) + def apply(endpoint: ⇒ Seq[ChannelHandler], withTimeout: Boolean): ChannelPipelineFactory = + new ChannelPipelineFactory { + def getPipeline = apply(defaultStack(withTimeout) ++ endpoint) + } + + def defaultStack(withTimeout: Boolean): Seq[ChannelHandler] = + (if (withTimeout) timeout :: Nil else Nil) ::: + msgFormat ::: + authenticator ::: + executionHandler :: + Nil + + def timeout = new IdleStateHandler(timer, + settings.ReadTimeout.toSeconds.toInt, + settings.WriteTimeout.toSeconds.toInt, + settings.AllTimeout.toSeconds.toInt) + + def msgFormat = new LengthFieldBasedFrameDecoder(settings.MessageFrameSize, 0, 4, 0, 4) :: + new LengthFieldPrepender(4) :: + new RemoteMessageDecoder :: + new RemoteMessageEncoder(NettyRemoteTransport.this) :: + Nil + + val executionHandler = new ExecutionHandler(new OrderedMemoryAwareThreadPoolExecutor( + settings.ExecutionPoolSize, + settings.MaxChannelMemorySize, + settings.MaxTotalMemorySize, + settings.ExecutionPoolKeepalive.length, + settings.ExecutionPoolKeepalive.unit, + system.threadFactory)) + + def authenticator = if (settings.RequireCookie) new RemoteServerAuthenticationHandler(settings.SecureCookie) :: Nil else Nil + } + + /** + * This method is factored out to provide an extension point in case the + * pipeline shall be changed. It is recommended to use + */ + def mkPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean): ChannelPipelineFactory = + PipelineFactory(Seq(endpoint), withTimeout) + private val remoteClients = new HashMap[Address, RemoteClient] private val clientsLock = new ReentrantReadWriteLock @@ -105,11 +146,7 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor try { timer.stop() } finally { - try { - clientChannelFactory.releaseExternalResources() - } finally { - executor.shutdown() - } + clientChannelFactory.releaseExternalResources() } } } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index ac4289e8ae..f9d4ede1d8 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -30,14 +30,12 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { Executors.newCachedThreadPool(netty.system.threadFactory), Executors.newCachedThreadPool(netty.system.threadFactory)) - private val executionHandler = new ExecutionHandler(netty.executor) - // group of open channels, used for clean-up private val openChannels: ChannelGroup = new DefaultDisposableChannelGroup("akka-remote-server") private val bootstrap = { val b = new ServerBootstrap(factory) - b.setPipelineFactory(makePipeline()) + b.setPipelineFactory(netty.mkPipeline(new RemoteServerHandler(openChannels, netty), false)) b.setOption("backlog", settings.Backlog) b.setOption("tcpNoDelay", true) b.setOption("child.keepAlive", true) @@ -45,8 +43,6 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { b } - protected def makePipeline(): ChannelPipelineFactory = new RemoteServerPipelineFactory(openChannels, executionHandler, netty) - @volatile private[akka] var channel: Channel = _ @@ -79,26 +75,6 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { } } -class RemoteServerPipelineFactory( - val openChannels: ChannelGroup, - val executionHandler: ExecutionHandler, - val netty: NettyRemoteTransport) extends ChannelPipelineFactory { - - import netty.settings - - def getPipeline: ChannelPipeline = { - val lenDec = new LengthFieldBasedFrameDecoder(settings.MessageFrameSize, 0, 4, 0, 4) - val lenPrep = new LengthFieldPrepender(4) - val messageDec = new RemoteMessageDecoder - val messageEnc = new RemoteMessageEncoder(netty) - - val authenticator = if (settings.RequireCookie) new RemoteServerAuthenticationHandler(settings.SecureCookie) :: Nil else Nil - val remoteServer = new RemoteServerHandler(openChannels, netty) - val stages: List[ChannelHandler] = lenDec :: messageDec :: lenPrep :: messageEnc :: executionHandler :: authenticator ::: remoteServer :: Nil - new StaticChannelPipeline(stages: _*) - } -} - @ChannelHandler.Sharable class RemoteServerAuthenticationHandler(secureCookie: Option[String]) extends SimpleChannelUpstreamHandler { val authenticated = new AnyRef diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala index c46e22eb9f..c9cbeadf83 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -21,6 +21,7 @@ import scala.util.control.NoStackTrace import akka.event.LoggingReceive import akka.actor.Address import java.net.InetSocketAddress +import akka.dispatch.Future trait Conductor extends RunControl with FailureInject { this: TestConductorExt ⇒ @@ -32,55 +33,63 @@ trait Conductor extends RunControl with FailureInject { this: TestConductorExt case x ⇒ x } - override def startController() { + override def startController(): Future[Int] = { if (_controller ne null) throw new RuntimeException("TestConductorServer was already started") _controller = system.actorOf(Props[Controller], "controller") import Settings.BarrierTimeout - startClient(Await.result(controller ? GetPort mapTo, Duration.Inf)) + controller ? GetPort flatMap { case port: Int ⇒ startClient(port) map (_ ⇒ port) } } - override def port: Int = { + override def port: Future[Int] = { import Settings.QueryTimeout - Await.result(controller ? GetPort mapTo, Duration.Inf) + controller ? GetPort mapTo } - override def throttle(node: String, target: String, direction: Direction, rateMBit: Float) { - controller ! Throttle(node, target, direction, rateMBit) - } - - override def blackhole(node: String, target: String, direction: Direction) { - controller ! Throttle(node, target, direction, 0f) - } - - override def disconnect(node: String, target: String) { - controller ! Disconnect(node, target, false) - } - - override def abort(node: String, target: String) { - controller ! Disconnect(node, target, true) - } - - override def shutdown(node: String, exitValue: Int) { - controller ! Terminate(node, exitValue) - } - - override def kill(node: String) { - controller ! Terminate(node, -1) - } - - override def getNodes = { + override def throttle(node: String, target: String, direction: Direction, rateMBit: Double): Future[Done] = { import Settings.QueryTimeout - Await.result(controller ? GetNodes mapTo manifest[List[String]], Duration.Inf) + controller ? Throttle(node, target, direction, rateMBit.toFloat) mapTo } - override def removeNode(node: String) { - controller ! Remove(node) + override def blackhole(node: String, target: String, direction: Direction): Future[Done] = { + import Settings.QueryTimeout + controller ? Throttle(node, target, direction, 0f) mapTo + } + + override def disconnect(node: String, target: String): Future[Done] = { + import Settings.QueryTimeout + controller ? Disconnect(node, target, false) mapTo + } + + override def abort(node: String, target: String): Future[Done] = { + import Settings.QueryTimeout + controller ? Disconnect(node, target, true) mapTo + } + + override def shutdown(node: String, exitValue: Int): Future[Done] = { + import Settings.QueryTimeout + controller ? Terminate(node, exitValue) mapTo + } + + override def kill(node: String): Future[Done] = { + import Settings.QueryTimeout + controller ? Terminate(node, -1) mapTo + } + + override def getNodes: Future[List[String]] = { + import Settings.QueryTimeout + controller ? GetNodes mapTo + } + + override def removeNode(node: String): Future[Done] = { + import Settings.QueryTimeout + controller ? Remove(node) mapTo } } class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { + @volatile var clients = Map[Channel, ActorRef]() override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { @@ -102,7 +111,7 @@ class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAd val channel = event.getChannel log.debug("message from {}: {}", getAddrString(channel), event.getMessage) event.getMessage match { - case msg: Wrapper if msg.getAllFields.size == 1 ⇒ + case msg: NetworkOp ⇒ clients(channel) ! msg case msg ⇒ log.info("client {} sent garbage '{}', disconnecting", getAddrString(channel), msg) @@ -116,28 +125,26 @@ object ServerFSM { sealed trait State case object Initial extends State case object Ready extends State - - case class Send(msg: Wrapper) } -class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor with LoggingFSM[ServerFSM.State, Null] { +class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor with LoggingFSM[ServerFSM.State, Option[ActorRef]] { import ServerFSM._ import akka.actor.FSM._ import Controller._ - startWith(Initial, null) + startWith(Initial, None) when(Initial, stateTimeout = 10 seconds) { - case Event(msg: Wrapper, _) ⇒ - if (msg.hasHello) { - val hello = msg.getHello - controller ! ClientConnected(hello.getName, hello.getAddress) - goto(Ready) - } else { - log.warning("client {} sent no Hello in first message, disconnecting", getAddrString(channel)) - channel.close() - stop() - } + case Event(Hello(name, addr), _) ⇒ + controller ! ClientConnected(name, addr) + goto(Ready) + case Event(x: NetworkOp, _) ⇒ + log.warning("client {} sent no Hello in first message (instead {}), disconnecting", getAddrString(channel), x) + channel.close() + stop() + case Event(Send(msg), _) ⇒ + log.warning("cannot send {} in state Initial", msg) + stay case Event(StateTimeout, _) ⇒ log.info("closing channel to {} because of Hello timeout", getAddrString(channel)) channel.close() @@ -145,20 +152,24 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi } when(Ready) { - case Event(msg: Wrapper, _) ⇒ - if (msg.hasBarrier) { - val barrier = msg.getBarrier - controller ! EnterBarrier(barrier.getName) - } else { - log.warning("client {} sent unsupported message {}", getAddrString(channel), msg) - } + case Event(msg: EnterBarrier, _) ⇒ + controller ! msg stay - case Event(Send(msg), _) ⇒ + case Event(d: Done, Some(s)) ⇒ + s ! d + stay using None + case Event(msg: NetworkOp, _) ⇒ + log.warning("client {} sent unsupported message {}", getAddrString(channel), msg) + channel.close() + stop() + case Event(Send(msg: EnterBarrier), _) ⇒ channel.write(msg) stay - case Event(EnterBarrier(name), _) ⇒ - val barrier = TestConductorProtocol.EnterBarrier.newBuilder.setName(name).build - channel.write(Wrapper.newBuilder.setBarrier(barrier).build) + case Event(Send(msg), None) ⇒ + channel.write(msg) + stay using Some(sender) + case Event(Send(msg), _) ⇒ + log.warning("cannot send {} while waiting for previous ACK", msg) stay } @@ -185,7 +196,6 @@ class Controller extends Actor { var nodes = Map[String, NodeInfo]() override def receive = LoggingReceive { - case "ready?" ⇒ sender ! "yes" case ClientConnected(name, addr) ⇒ nodes += name -> NodeInfo(name, addr, sender) barrier forward ClientConnected @@ -198,28 +208,15 @@ class Controller extends Actor { barrier forward e case Throttle(node, target, direction, rateMBit) ⇒ val t = nodes(target) - val throttle = - InjectFailure.newBuilder - .setFailure(FailType.Throttle) - .setDirection(TestConductorProtocol.Direction.valueOf(direction.toString)) - .setAddress(t.addr) - .setRateMBit(rateMBit) - .build - nodes(node).fsm ! ServerFSM.Send(Wrapper.newBuilder.setFailure(throttle).build) + nodes(node).fsm forward Send(ThrottleMsg(t.addr, direction, rateMBit)) case Disconnect(node, target, abort) ⇒ val t = nodes(target) - val disconnect = - InjectFailure.newBuilder - .setFailure(if (abort) FailType.Abort else FailType.Disconnect) - .setAddress(t.addr) - .build - nodes(node).fsm ! ServerFSM.Send(Wrapper.newBuilder.setFailure(disconnect).build) + nodes(node).fsm forward Send(DisconnectMsg(t.addr, abort)) case Terminate(node, exitValueOrKill) ⇒ if (exitValueOrKill < 0) { // TODO: kill via SBT } else { - val shutdown = InjectFailure.newBuilder.setFailure(FailType.Shutdown).setExitValue(exitValueOrKill).build - nodes(node).fsm ! ServerFSM.Send(Wrapper.newBuilder.setFailure(shutdown).build) + nodes(node).fsm forward Send(TerminateMsg(exitValueOrKill)) } // TODO: properly remove node from BarrierCoordinator // case Remove(node) => @@ -269,7 +266,7 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, if (name != barrier) throw new IllegalStateException("trying enter barrier '" + name + "' while barrier '" + barrier + "' is active") val together = sender :: arrived if (together.size == num) { - together foreach (_ ! e) + together foreach (_ ! Send(e)) goto(Idle) using Data(num, "", Nil) } else { stay using d.copy(arrived = together) @@ -280,7 +277,7 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, val expected = num - 1 if (arrived.size == expected) { val e = EnterBarrier(barrier) - sender :: arrived foreach (_ ! e) + sender :: arrived foreach (_ ! Send(e)) goto(Idle) using Data(expected, "", Nil) } else { stay using d.copy(clients = expected) diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala index 2b54ea1018..90d7eeccd5 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -3,11 +3,82 @@ */ package akka.remote.testconductor -sealed trait ClientOp -sealed trait ServerOp +import org.jboss.netty.handler.codec.oneone.OneToOneEncoder +import org.jboss.netty.channel.ChannelHandlerContext +import org.jboss.netty.channel.Channel +import akka.remote.testconductor.{ TestConductorProtocol ⇒ TCP } +import com.google.protobuf.Message +import akka.actor.Address +import org.jboss.netty.handler.codec.oneone.OneToOneDecoder -case class EnterBarrier(name: String) extends ClientOp with ServerOp +case class Send(msg: NetworkOp) + +sealed trait ClientOp // messages sent to Player FSM +sealed trait ServerOp // messages sent to Conductor FSM +sealed trait NetworkOp // messages sent over the wire + +case class Hello(name: String, addr: Address) extends NetworkOp +case class EnterBarrier(name: String) extends ClientOp with ServerOp with NetworkOp case class Throttle(node: String, target: String, direction: Direction, rateMBit: Float) extends ServerOp +case class ThrottleMsg(target: Address, direction: Direction, rateMBit: Float) extends NetworkOp case class Disconnect(node: String, target: String, abort: Boolean) extends ServerOp +case class DisconnectMsg(target: Address, abort: Boolean) extends NetworkOp case class Terminate(node: String, exitValueOrKill: Int) extends ServerOp +case class TerminateMsg(exitValue: Int) extends NetworkOp +abstract class Done extends NetworkOp +case object Done extends Done { + def getInstance: Done = this +} + case class Remove(node: String) extends ServerOp + +class MsgEncoder extends OneToOneEncoder { + def encode(ctx: ChannelHandlerContext, ch: Channel, msg: AnyRef): AnyRef = msg match { + case x: NetworkOp ⇒ + val w = TCP.Wrapper.newBuilder + x match { + case Hello(name, addr) ⇒ + w.setHello(TCP.Hello.newBuilder.setName(name).setAddress(addr)) + case EnterBarrier(name) ⇒ + w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name)) + case ThrottleMsg(target, dir, rate) ⇒ + w.setFailure(TCP.InjectFailure.newBuilder.setAddress(target) + .setFailure(TCP.FailType.Throttle).setDirection(dir).setRateMBit(rate)) + case DisconnectMsg(target, abort) ⇒ + w.setFailure(TCP.InjectFailure.newBuilder.setAddress(target) + .setFailure(if (abort) TCP.FailType.Abort else TCP.FailType.Disconnect)) + case TerminateMsg(exitValue) ⇒ + w.setFailure(TCP.InjectFailure.newBuilder.setFailure(TCP.FailType.Shutdown).setExitValue(exitValue)) + case _: Done ⇒ + w.setDone("") + } + w.build + case _ ⇒ throw new IllegalArgumentException("wrong message " + msg) + } +} + +class MsgDecoder extends OneToOneDecoder { + def decode(ctx: ChannelHandlerContext, ch: Channel, msg: AnyRef): AnyRef = msg match { + case w: TCP.Wrapper if w.getAllFields.size == 1 ⇒ + if (w.hasHello) { + val h = w.getHello + Hello(h.getName, h.getAddress) + } else if (w.hasBarrier) { + EnterBarrier(w.getBarrier.getName) + } else if (w.hasFailure) { + val f = w.getFailure + import TCP.{ FailType ⇒ FT } + f.getFailure match { + case FT.Throttle ⇒ ThrottleMsg(f.getAddress, f.getDirection, f.getRateMBit) + case FT.Abort ⇒ DisconnectMsg(f.getAddress, true) + case FT.Disconnect ⇒ DisconnectMsg(f.getAddress, false) + case FT.Shutdown ⇒ TerminateMsg(f.getExitValue) + } + } else if (w.hasDone) { + Done + } else { + throw new IllegalArgumentException("unknown message " + msg) + } + case _ ⇒ throw new IllegalArgumentException("wrong message " + msg) + } +} diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala index 94847664c9..bffa84847f 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala @@ -7,9 +7,14 @@ import akka.remote.RemoteActorRefProvider import akka.actor.ActorContext import akka.util.{ Duration, Timeout } import java.util.concurrent.TimeUnit.MILLISECONDS +import akka.actor.ActorRef +import java.util.concurrent.ConcurrentHashMap +import akka.actor.Address object TestConductor extends ExtensionKey[TestConductorExt] { + def apply()(implicit ctx: ActorContext): TestConductorExt = apply(ctx.system) + } class TestConductorExt(val system: ExtendedActorSystem) extends Extension with Conductor with Player { @@ -28,4 +33,6 @@ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with C val transport = system.provider.asInstanceOf[RemoteActorRefProvider].transport val address = transport.address + val failureInjectors = new ConcurrentHashMap[Address, FailureInjector] + } \ No newline at end of file diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala index 930be600c2..b94f205726 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala @@ -3,6 +3,8 @@ */ package akka.remote.testconductor +import akka.dispatch.Future + trait BarrierSync { /** * Enter all given barriers in the order in which they were given. @@ -11,9 +13,12 @@ trait BarrierSync { } sealed trait Direction -case object Send extends Direction -case object Receive extends Direction -case object Both extends Direction + +object Direction { + case object Send extends Direction + case object Receive extends Direction + case object Both extends Direction +} trait FailureInject { @@ -21,7 +26,7 @@ trait FailureInject { * Make the remoting pipeline on the node throttle data sent to or received * from the given remote peer. */ - def throttle(node: String, target: String, direction: Direction, rateMBit: Float): Unit + def throttle(node: String, target: String, direction: Direction, rateMBit: Double): Future[Done] /** * Switch the Netty pipeline of the remote support into blackhole mode for @@ -29,56 +34,56 @@ trait FailureInject { * submitting them to the Socket or right after receiving them from the * Socket. */ - def blackhole(node: String, target: String, direction: Direction): Unit + def blackhole(node: String, target: String, direction: Direction): Future[Done] /** * Tell the remote support to shutdown the connection to the given remote * peer. It works regardless of whether the recipient was initiator or * responder. */ - def disconnect(node: String, target: String): Unit + def disconnect(node: String, target: String): Future[Done] /** * Tell the remote support to TCP_RESET the connection to the given remote * peer. It works regardless of whether the recipient was initiator or * responder. */ - def abort(node: String, target: String): Unit + def abort(node: String, target: String): Future[Done] } trait RunControl { /** - * Start the server port. + * Start the server port, returns the port number. */ - def startController(): Unit + def startController(): Future[Int] /** * Get the actual port used by the server. */ - def port: Int + def port: Future[Int] /** * Tell the remote node to shut itself down using System.exit with the given * exitValue. */ - def shutdown(node: String, exitValue: Int): Unit + def shutdown(node: String, exitValue: Int): Future[Done] /** * Tell the SBT plugin to forcibly terminate the given remote node using Process.destroy. */ - def kill(node: String): Unit + def kill(node: String): Future[Done] /** * Obtain the list of remote host names currently registered. */ - def getNodes: List[String] + def getNodes: Future[List[String]] /** * Remove a remote host from the list, so that the remaining nodes may still * pass subsequent barriers. */ - def removeNode(node: String): Unit + def removeNode(node: String): Future[Done] } diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index 6569d81acc..30e5308979 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2011 Typesafe Inc. + * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.remote.testconductor @@ -9,11 +9,9 @@ import org.jboss.netty.buffer.ChannelBuffer import org.jboss.netty.channel.ChannelState.BOUND import org.jboss.netty.channel.ChannelState.OPEN import org.jboss.netty.channel.Channel -import org.jboss.netty.channel.ChannelDownstreamHandler import org.jboss.netty.channel.ChannelEvent import org.jboss.netty.channel.ChannelHandlerContext import org.jboss.netty.channel.ChannelStateEvent -import org.jboss.netty.channel.ChannelUpstreamHandler import org.jboss.netty.channel.MessageEvent import akka.actor.FSM import akka.actor.Actor @@ -22,23 +20,26 @@ import akka.util.Index import akka.actor.Address import akka.actor.ActorSystem import akka.actor.Props +import akka.actor.ActorRef +import akka.event.Logging +import org.jboss.netty.channel.SimpleChannelHandler -object NetworkFailureInjector { - - val channels = new Index[Address, Channel](16, (c1, c2) ⇒ c1 compareTo c2) - - def close(remote: Address): Unit = { - // channels will be cleaned up by the handler - for (chs ← channels.remove(remote); c ← chs) c.close() +case class FailureInjector(sender: ActorRef, receiver: ActorRef) { + def refs(dir: Direction) = dir match { + case Direction.Send ⇒ Seq(sender) + case Direction.Receive ⇒ Seq(receiver) + case Direction.Both ⇒ Seq(sender, receiver) } } -class NetworkFailureInjector(system: ActorSystem) extends ChannelUpstreamHandler with ChannelDownstreamHandler { +object NetworkFailureInjector { + case class SetRate(rateMBit: Float) + case class Disconnect(abort: Boolean) +} - import NetworkFailureInjector._ +class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { - // local cache of remote address - private var remote: Option[Address] = None + val log = Logging(system, "FailureInjector") // everything goes via these Throttle actors to enable easy steering private val sender = system.actorOf(Props(new Throttle(_.sendDownstream(_)))) @@ -54,8 +55,8 @@ class NetworkFailureInjector(system: ActorSystem) extends ChannelUpstreamHandler private case class Data(ctx: ChannelHandlerContext, rateMBit: Float, queue: Queue[MessageEvent]) - private case class SetRate(rateMBit: Float) private case class Send(ctx: ChannelHandlerContext, msg: MessageEvent) + private case class SetContext(ctx: ChannelHandlerContext) private case object Tick private class Throttle(send: (ChannelHandlerContext, MessageEvent) ⇒ Unit) extends Actor with FSM[State, Data] { @@ -65,6 +66,7 @@ class NetworkFailureInjector(system: ActorSystem) extends ChannelUpstreamHandler when(PassThrough) { case Event(Send(ctx, msg), d) ⇒ + log.debug("sending msg (PassThrough): {}", msg) send(ctx, msg) stay } @@ -77,26 +79,37 @@ class NetworkFailureInjector(system: ActorSystem) extends ChannelUpstreamHandler stay using d.copy(ctx = ctx, queue = d.queue.enqueue(msg)) case Event(Tick, d) ⇒ val (msg, queue) = d.queue.dequeue + log.debug("sending msg (Tick, {}/{} left): {}", d.queue.size, queue.size, msg) send(d.ctx, msg) - if (queue.nonEmpty) setTimer("send", Tick, (size(queue.head) / d.rateMBit) microseconds, false) + if (queue.nonEmpty) { + val time = (size(queue.head) / d.rateMBit).microseconds + log.debug("scheduling next Tick in {}", time) + setTimer("send", Tick, time, false) + } stay using d.copy(queue = queue) } onTransition { case Throttle -> PassThrough ⇒ - stateData.queue foreach (send(stateData.ctx, _)) + stateData.queue foreach { msg ⇒ + log.debug("sending msg (Transition): {}") + send(stateData.ctx, msg) + } cancelTimer("send") case Throttle -> Blackhole ⇒ cancelTimer("send") } when(Blackhole) { - case Event(Send(_, _), _) ⇒ + case Event(Send(_, msg), _) ⇒ + log.debug("dropping msg {}", msg) stay } whenUnhandled { - case Event(SetRate(rate), d) ⇒ + case Event(SetContext(ctx), d) ⇒ stay using d.copy(ctx = ctx) + case Event(NetworkFailureInjector.SetRate(rate), d) ⇒ + sender ! "ok" if (rate > 0) { goto(Throttle) using d.copy(rateMBit = rate, queue = Queue()) } else if (rate == 0) { @@ -104,6 +117,11 @@ class NetworkFailureInjector(system: ActorSystem) extends ChannelUpstreamHandler } else { goto(PassThrough) } + case Event(NetworkFailureInjector.Disconnect(abort), Data(ctx, _, _)) ⇒ + sender ! "ok" + // TODO implement abort + ctx.getChannel.disconnect() + stay } initialize @@ -114,46 +132,42 @@ class NetworkFailureInjector(system: ActorSystem) extends ChannelUpstreamHandler } } - def throttleSend(rateMBit: Float) { - sender ! SetRate(rateMBit) + private var remote: Option[Address] = None + + override def messageReceived(ctx: ChannelHandlerContext, msg: MessageEvent) { + log.debug("upstream(queued): {}", msg) + receiver ! Send(ctx, msg) } - def throttleReceive(rateMBit: Float) { - receiver ! SetRate(rateMBit) - } - - override def handleUpstream(ctx: ChannelHandlerContext, evt: ChannelEvent) { - evt match { - case msg: MessageEvent ⇒ - receiver ! Send(ctx, msg) - case state: ChannelStateEvent ⇒ - state.getState match { - case BOUND ⇒ - state.getValue match { - case null ⇒ - remote = remote flatMap { a ⇒ channels.remove(a, state.getChannel); None } - case a: InetSocketAddress ⇒ - val addr = Address("akka", "XXX", a.getHostName, a.getPort) - channels.put(addr, state.getChannel) - remote = Some(addr) - } - case OPEN if state.getValue == false ⇒ - remote = remote flatMap { a ⇒ channels.remove(a, state.getChannel); None } + override def channelConnected(ctx: ChannelHandlerContext, state: ChannelStateEvent) { + state.getValue match { + case a: InetSocketAddress ⇒ + val addr = Address("akka", "", a.getHostName, a.getPort) + log.debug("connected to {}", addr) + TestConductor(system).failureInjectors.put(addr, FailureInjector(sender, receiver)) match { + case null ⇒ // okay + case fi ⇒ system.log.error("{} already registered for address {}", fi, addr) } - ctx.sendUpstream(evt) - case _ ⇒ - ctx.sendUpstream(evt) + remote = Some(addr) + sender ! SetContext(ctx) + case x ⇒ throw new IllegalArgumentException("unknown address type: " + x) } } - override def handleDownstream(ctx: ChannelHandlerContext, evt: ChannelEvent) { - evt match { - case msg: MessageEvent ⇒ - sender ! Send(ctx, msg) - case _ ⇒ - ctx.sendUpstream(evt) + override def channelDisconnected(ctx: ChannelHandlerContext, state: ChannelStateEvent) { + log.debug("disconnected from {}", remote) + remote = remote flatMap { addr ⇒ + TestConductor(system).failureInjectors.remove(addr) + system.stop(sender) + system.stop(receiver) + None } } + override def writeRequested(ctx: ChannelHandlerContext, msg: MessageEvent) { + log.debug("downstream(queued): {}", msg) + sender ! Send(ctx, msg) + } + } diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala index 93aa6bc33d..72b15922f3 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala @@ -6,20 +6,20 @@ package akka.remote.testconductor import akka.actor.{ Actor, ActorRef, ActorSystem, LoggingFSM, Props } import RemoteConnection.getAddrString import akka.util.duration._ -import TestConductorProtocol._ import org.jboss.netty.channel.{ Channel, SimpleChannelUpstreamHandler, ChannelHandlerContext, ChannelStateEvent, MessageEvent } import com.eaio.uuid.UUID import com.typesafe.config.ConfigFactory import akka.util.Timeout import akka.util.Duration import java.util.concurrent.TimeUnit.MILLISECONDS -import akka.pattern.ask +import akka.pattern.{ ask, pipe } import akka.dispatch.Await import scala.util.control.NoStackTrace import akka.actor.Status import akka.event.LoggingAdapter import akka.actor.PoisonPill import akka.event.Logging +import akka.dispatch.Future trait Player extends BarrierSync { this: TestConductorExt ⇒ @@ -29,7 +29,7 @@ trait Player extends BarrierSync { this: TestConductorExt ⇒ case x ⇒ x } - def startClient(port: Int) { + def startClient(port: Int): Future[Done] = { import ClientFSM._ import akka.actor.FSM._ import Settings.BarrierTimeout @@ -40,21 +40,21 @@ trait Player extends BarrierSync { this: TestConductorExt ⇒ var waiting: ActorRef = _ def receive = { case fsm: ActorRef ⇒ waiting = sender; fsm ! SubscribeTransitionCallBack(self) - case Transition(_, Connecting, Connected) ⇒ waiting ! "okay" + case Transition(_, Connecting, Connected) ⇒ waiting ! Done case t: Transition[_] ⇒ waiting ! Status.Failure(new RuntimeException("unexpected transition: " + t)) - case CurrentState(_, Connected) ⇒ waiting ! "okay" + case CurrentState(_, Connected) ⇒ waiting ! Done case _: CurrentState[_] ⇒ } })) - Await.result(a ? client, Duration.Inf) + a ? client mapTo } override def enter(name: String*) { system.log.debug("entering barriers " + name.mkString("(", ", ", ")")) name foreach { b ⇒ import Settings.BarrierTimeout - Await.result(client ? EnterBarrier(b), Duration.Inf) + Await.result(client ? Send(EnterBarrier(b)), Duration.Inf) system.log.debug("passed barrier {}", b) } } @@ -84,8 +84,7 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client case Event(msg: ClientOp, _) ⇒ stay replying Status.Failure(new IllegalStateException("not connected yet")) case Event(Connected, d @ Data(channel, _)) ⇒ - val hello = Hello.newBuilder.setName(settings.name).setAddress(TestConductor().address).build - channel.write(Wrapper.newBuilder.setHello(hello).build) + channel.write(Hello(settings.name, TestConductor().address)) goto(Connected) case Event(_: ConnectionFailure, _) ⇒ // System.exit(1) @@ -100,19 +99,41 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client case Event(Disconnected, _) ⇒ log.info("disconnected from TestConductor") throw new ConnectionFailure("disconnect") - case Event(msg: EnterBarrier, Data(channel, _)) ⇒ - sendMsg(channel)(msg) + case Event(Send(msg: EnterBarrier), Data(channel, None)) ⇒ + channel.write(msg) stay using Data(channel, Some(msg.name, sender)) - case Event(msg: Wrapper, Data(channel, Some((barrier, sender)))) if msg.getAllFields.size == 1 ⇒ - if (msg.hasBarrier) { - val b = msg.getBarrier.getName - if (b != barrier) { - sender ! Status.Failure(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) - } else { - sender ! b - } + case Event(Send(d: Done), Data(channel, _)) ⇒ + channel.write(d) + stay + case Event(Send(x), _) ⇒ + log.warning("cannot send message {}", x) + stay + case Event(EnterBarrier(b), Data(channel, Some((barrier, sender)))) ⇒ + if (b != barrier) { + sender ! Status.Failure(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) + } else { + sender ! b } stay using Data(channel, None) + case Event(ThrottleMsg(target, dir, rate), _) ⇒ + import settings.QueryTimeout + import context.dispatcher + TestConductor().failureInjectors.get(target.copy(system = "")) match { + case null ⇒ log.warning("cannot throttle unknown address {}", target) + case inj ⇒ + Future.sequence(inj.refs(dir) map (_ ? NetworkFailureInjector.SetRate(rate))) map (_ ⇒ Send(Done)) pipeTo self + } + stay + case Event(DisconnectMsg(target, abort), _) ⇒ + import settings.QueryTimeout + TestConductor().failureInjectors.get(target.copy(system = "")) match { + case null ⇒ log.warning("cannot disconnect unknown address {}", target) + case inj ⇒ inj.sender ? NetworkFailureInjector.Disconnect(abort) map (_ ⇒ Send(Done)) pipeTo self + } + stay + case Event(TerminateMsg(exit), _) ⇒ + System.exit(exit) + stay // needed because Java doesn’t have Nothing } onTermination { @@ -122,14 +143,6 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client initialize - private def sendMsg(channel: Channel)(msg: ClientOp) { - msg match { - case EnterBarrier(name) ⇒ - val enter = TestConductorProtocol.EnterBarrier.newBuilder.setName(name).build - channel.write(Wrapper.newBuilder.setBarrier(enter).build) - } - } - } class PlayerHandler(fsm: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { @@ -152,7 +165,7 @@ class PlayerHandler(fsm: ActorRef, log: LoggingAdapter) extends SimpleChannelUps val channel = event.getChannel log.debug("message from {}: {}", getAddrString(channel), event.getMessage) event.getMessage match { - case msg: Wrapper if msg.getAllFields.size == 1 ⇒ + case msg: NetworkOp ⇒ fsm ! msg case msg ⇒ log.info("server {} sent garbage '{}', disconnecting", getAddrString(channel), msg) diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala b/akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala index a92b6295e2..b2f4baebbb 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala @@ -17,7 +17,8 @@ class TestConductorPipelineFactory(handler: ChannelUpstreamHandler) extends Chan def getPipeline: ChannelPipeline = { val encap = List(new LengthFieldPrepender(4), new LengthFieldBasedFrameDecoder(10000, 0, 4, 0, 4)) val proto = List(new ProtobufEncoder, new ProtobufDecoder(TestConductorProtocol.Wrapper.getDefaultInstance)) - new StaticChannelPipeline(encap ::: proto ::: handler :: Nil: _*) + val msg = List(new MsgEncoder, new MsgDecoder) + new StaticChannelPipeline(encap ::: proto ::: msg ::: handler :: Nil: _*) } } diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/package.scala b/akka-remote/src/main/scala/akka/remote/testconductor/package.scala index 8ebeea90a9..b24279dbf6 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/package.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/package.scala @@ -16,4 +16,16 @@ package object testconductor { implicit def address2scala(addr: TCP.Address): Address = Address(addr.getProtocol, addr.getSystem, addr.getHost, addr.getPort) + implicit def direction2proto(dir: Direction): TCP.Direction = dir match { + case Direction.Send ⇒ TCP.Direction.Send + case Direction.Receive ⇒ TCP.Direction.Receive + case Direction.Both ⇒ TCP.Direction.Both + } + + implicit def direction2scala(dir: TCP.Direction): Direction = dir match { + case TCP.Direction.Send ⇒ Direction.Send + case TCP.Direction.Receive ⇒ Direction.Receive + case TCP.Direction.Both ⇒ Direction.Both + } + } \ No newline at end of file diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala index ab8bdadae6..ca4313b56b 100644 --- a/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala @@ -1,6 +1,7 @@ package akka.remote import com.typesafe.config.{Config, ConfigFactory} +import akka.actor.Address trait AbstractRemoteActorMultiJvmSpec { def NrOfNodes: Int @@ -8,7 +9,6 @@ trait AbstractRemoteActorMultiJvmSpec { def PortRangeStart = 1990 def NodeRange = 1 to NrOfNodes - def PortRange = PortRangeStart to NrOfNodes private[this] val remotes: IndexedSeq[String] = { val nodesOpt = Option(AkkaRemoteSpec.testNodes).map(_.split(",").toIndexedSeq) diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index cae2917577..096d4c5a89 100644 --- a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -3,12 +3,24 @@ package akka.remote.testconductor import akka.remote.AkkaRemoteSpec import com.typesafe.config.ConfigFactory import akka.remote.AbstractRemoteActorMultiJvmSpec +import akka.actor.Props +import akka.actor.Actor +import akka.dispatch.Await +import akka.dispatch.Await.Awaitable +import akka.util.Duration +import akka.util.duration._ +import akka.testkit.ImplicitSender object TestConductorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { override def NrOfNodes = 2 override def commonConfig = ConfigFactory.parseString(""" akka.loglevel = DEBUG akka.actor.provider = akka.remote.RemoteActorRefProvider + akka.remote { + transport = akka.remote.testconductor.TestConductorTransport + log-received-messages = on + log-sent-messages = on + } akka.actor.debug { receive = on fsm = on @@ -19,34 +31,87 @@ object TestConductorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { } """) def nameConfig(n: Int) = ConfigFactory.parseString("akka.testconductor.name = node" + n).withFallback(nodeConfigs(n)) + + implicit def awaitHelper[T](w: Awaitable[T]) = new AwaitHelper(w) + class AwaitHelper[T](w: Awaitable[T]) { + def await: T = Await.result(w, Duration.Inf) + } } -import TestConductorMultiJvmSpec._ +class TestConductorMultiJvmNode1 extends AkkaRemoteSpec(TestConductorMultiJvmSpec.nameConfig(0)) { -class TestConductorMultiJvmNode1 extends AkkaRemoteSpec(nameConfig(0)) { + import TestConductorMultiJvmSpec._ - val nodes = TestConductorMultiJvmSpec.NrOfNodes + val nodes = NrOfNodes - "running a test" in { - val tc = TestConductor(system) - tc.startController() + val tc = TestConductor(system) + + val echo = system.actorOf(Props(new Actor { + def receive = { + case x ⇒ testActor ! x; sender ! x + } + }), "echo") + + "running a test with barrier" in { + tc.startController().await barrier("start") barrier("first") tc.enter("begin") barrier("end") } + + "throttling" in { + expectMsg("start") + tc.throttle("node1", "node0", Direction.Send, 0.016).await + tc.enter("throttled_send") + within(1 second, 2 seconds) { + receiveN(10) must be(0 to 9) + } + tc.enter("throttled_send2") + tc.throttle("node1", "node0", Direction.Send, -1).await + + tc.throttle("node1", "node0", Direction.Receive, 0.016).await + tc.enter("throttled_recv") + receiveN(10, 500 millis) must be(10 to 19) + tc.enter("throttled_recv2") + tc.throttle("node1", "node0", Direction.Receive, -1).await + } } -class TestConductorMultiJvmNode2 extends AkkaRemoteSpec(nameConfig(1)) { +class TestConductorMultiJvmNode2 extends AkkaRemoteSpec(TestConductorMultiJvmSpec.nameConfig(1)) with ImplicitSender { - val nodes = TestConductorMultiJvmSpec.NrOfNodes + import TestConductorMultiJvmSpec._ - "running a test" in { + val nodes = NrOfNodes + + val tc = TestConductor(system) + + val echo = system.actorFor("akka://" + akkaSpec(0) + "/user/echo") + + "running a test with barrier" in { barrier("start") - val tc = TestConductor(system) - tc.startClient(4712) + tc.startClient(4712).await barrier("first") tc.enter("begin") barrier("end") } + + "throttling" in { + echo ! "start" + expectMsg("start") + tc.enter("throttled_send") + for (i <- 0 to 9) echo ! i + expectMsg(500 millis, 0) + within(1 second, 2 seconds) { + receiveN(9) must be(1 to 9) + } + tc.enter("throttled_send2", "throttled_recv") + for (i <- 10 to 19) echo ! i + expectMsg(500 millis, 10) + within(1 second, 2 seconds) { + receiveN(9) must be(11 to 19) + } + tc.enter("throttled_recv2") + } + } From c68df0635f2213d397649533db24a0a01ffe17c5 Mon Sep 17 00:00:00 2001 From: Roland Date: Sat, 5 May 2012 15:16:21 +0200 Subject: [PATCH 006/106] add previously forgotten TestConductorTransport --- .../TestConductorTransport.scala | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala b/akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala new file mode 100644 index 0000000000..d03adebe9a --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala @@ -0,0 +1,21 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.remote.testconductor + +import akka.remote.netty.NettyRemoteTransport +import akka.remote.RemoteSettings +import akka.actor.ActorSystemImpl +import akka.remote.RemoteActorRefProvider +import org.jboss.netty.channel.ChannelHandler +import org.jboss.netty.channel.ChannelPipelineFactory + +class TestConductorTransport(_remoteSettings: RemoteSettings, _system: ActorSystemImpl, _provider: RemoteActorRefProvider) + extends NettyRemoteTransport(_remoteSettings, _system, _provider) { + + override def mkPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean): ChannelPipelineFactory = + new ChannelPipelineFactory { + def getPipeline = PipelineFactory(new NetworkFailureInjector(system) +: PipelineFactory.defaultStack(withTimeout) :+ endpoint) + } + +} \ No newline at end of file From a351e6ad9fb9ca24d69a20da9f6bb2028f901a91 Mon Sep 17 00:00:00 2001 From: Roland Date: Sat, 5 May 2012 22:14:42 +0200 Subject: [PATCH 007/106] implement more precise bandwith throttling - will keep track of theoretical packet boundaries and send on timer tick or send request according to actual time - will split packets if calculated release time is >100ms into the future (configurable) to simulate proper trickling --- akka-remote/src/main/resources/reference.conf | 5 + .../akka/remote/testconductor/Extension.scala | 1 + .../NetworkFailureInjector.scala | 131 +++++++++++++----- .../testconductor/TestConductorSpec.scala | 4 +- .../src/main/scala/akka/testkit/TestKit.scala | 6 +- 5 files changed, 109 insertions(+), 38 deletions(-) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 384d00b55d..f14ee3d87c 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -165,6 +165,11 @@ akka { # Timeout for interrogation of TestConductor’s Controller actor query-timeout = 5s + # Threshold for packet size in time unit above which the failure injector will + # split the packet and deliver in smaller portions; do not give value smaller + # than HashedWheelTimer resolution (would not make sense) + packet-split-threshold = 100ms + # Default port to start the conductor on; 0 means port = 0 diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala index bffa84847f..97f5dd7295 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala @@ -24,6 +24,7 @@ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with C implicit val BarrierTimeout = Timeout(Duration(config.getMilliseconds("akka.testconductor.barrier-timeout"), MILLISECONDS)) implicit val QueryTimeout = Timeout(Duration(config.getMilliseconds("akka.testconductor.query-timeout"), MILLISECONDS)) + val PacketSplitThreshold = Duration(config.getMilliseconds("akka.testconductor.packet-split-threshold"), MILLISECONDS) val name = config.getString("akka.testconductor.name") val host = config.getString("akka.testconductor.host") diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index 30e5308979..5e101dea0c 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -23,6 +23,13 @@ import akka.actor.Props import akka.actor.ActorRef import akka.event.Logging import org.jboss.netty.channel.SimpleChannelHandler +import scala.annotation.tailrec +import akka.util.Duration +import akka.actor.LoggingFSM +import org.jboss.netty.channel.Channels +import org.jboss.netty.channel.ChannelFuture +import org.jboss.netty.channel.ChannelFutureListener +import org.jboss.netty.channel.ChannelFuture case class FailureInjector(sender: ActorRef, receiver: ActorRef) { def refs(dir: Direction) = dir match { @@ -42,8 +49,10 @@ class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { val log = Logging(system, "FailureInjector") // everything goes via these Throttle actors to enable easy steering - private val sender = system.actorOf(Props(new Throttle(_.sendDownstream(_)))) - private val receiver = system.actorOf(Props(new Throttle(_.sendUpstream(_)))) + private val sender = system.actorOf(Props(new Throttle(Direction.Send))) + private val receiver = system.actorOf(Props(new Throttle(Direction.Receive))) + + private val packetSplitThreshold = TestConductor(system).Settings.PacketSplitThreshold /* * State, Data and Messages for the internal Throttle actor @@ -53,47 +62,40 @@ class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { private case object Throttle extends State private case object Blackhole extends State - private case class Data(ctx: ChannelHandlerContext, rateMBit: Float, queue: Queue[MessageEvent]) + private case class Data(lastSent: Long, rateMBit: Float, queue: Queue[Send]) - private case class Send(ctx: ChannelHandlerContext, msg: MessageEvent) + private case class Send(ctx: ChannelHandlerContext, future: Option[ChannelFuture], msg: AnyRef) private case class SetContext(ctx: ChannelHandlerContext) private case object Tick - private class Throttle(send: (ChannelHandlerContext, MessageEvent) ⇒ Unit) extends Actor with FSM[State, Data] { + private class Throttle(dir: Direction) extends Actor with LoggingFSM[State, Data] { import FSM._ - startWith(PassThrough, Data(null, -1, Queue())) + var channelContext: ChannelHandlerContext = _ + + startWith(PassThrough, Data(0, -1, Queue())) when(PassThrough) { - case Event(Send(ctx, msg), d) ⇒ + case Event(s @ Send(_, _, msg), _) ⇒ log.debug("sending msg (PassThrough): {}", msg) - send(ctx, msg) + send(s) stay } when(Throttle) { - case Event(Send(ctx, msg), d) ⇒ - if (!timerActive_?("send")) { - setTimer("send", Tick, (size(msg) / d.rateMBit) microseconds, false) - } - stay using d.copy(ctx = ctx, queue = d.queue.enqueue(msg)) - case Event(Tick, d) ⇒ - val (msg, queue) = d.queue.dequeue - log.debug("sending msg (Tick, {}/{} left): {}", d.queue.size, queue.size, msg) - send(d.ctx, msg) - if (queue.nonEmpty) { - val time = (size(queue.head) / d.rateMBit).microseconds - log.debug("scheduling next Tick in {}", time) - setTimer("send", Tick, time, false) - } - stay using d.copy(queue = queue) + case Event(s: Send, d @ Data(_, _, Queue())) ⇒ + stay using sendThrottled(d.copy(lastSent = System.nanoTime, queue = Queue(s))) + case Event(s: Send, data) ⇒ + stay using sendThrottled(data.copy(queue = data.queue.enqueue(s))) + case Event(Tick, data) ⇒ + stay using sendThrottled(data) } onTransition { case Throttle -> PassThrough ⇒ - stateData.queue foreach { msg ⇒ - log.debug("sending msg (Transition): {}") - send(stateData.ctx, msg) + for (s ← stateData.queue) { + log.debug("sending msg (Transition): {}", s.msg) + send(s) } cancelTimer("send") case Throttle -> Blackhole ⇒ @@ -101,32 +103,95 @@ class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { } when(Blackhole) { - case Event(Send(_, msg), _) ⇒ + case Event(Send(_, _, msg), _) ⇒ log.debug("dropping msg {}", msg) stay } whenUnhandled { - case Event(SetContext(ctx), d) ⇒ stay using d.copy(ctx = ctx) case Event(NetworkFailureInjector.SetRate(rate), d) ⇒ sender ! "ok" if (rate > 0) { - goto(Throttle) using d.copy(rateMBit = rate, queue = Queue()) + goto(Throttle) using d.copy(lastSent = System.nanoTime, rateMBit = rate, queue = Queue()) } else if (rate == 0) { goto(Blackhole) } else { goto(PassThrough) } + case Event(SetContext(ctx), _) ⇒ channelContext = ctx; stay case Event(NetworkFailureInjector.Disconnect(abort), Data(ctx, _, _)) ⇒ sender ! "ok" // TODO implement abort - ctx.getChannel.disconnect() + channelContext.getChannel.disconnect() stay } initialize - private def size(msg: MessageEvent) = msg.getMessage() match { + private def sendThrottled(d: Data): Data = { + val (data, toSend, toTick) = schedule(d) + for (s ← toSend) { + log.debug("sending msg (Tick): {}", s.msg) + send(s) + } + for (time ← toTick) { + log.debug("scheduling next Tick in {}", time) + setTimer("send", Tick, time, false) + } + data + } + + private def send(s: Send): Unit = dir match { + case Direction.Send ⇒ Channels.write(s.ctx, s.future getOrElse Channels.future(s.ctx.getChannel), s.msg) + case Direction.Receive ⇒ Channels.fireMessageReceived(s.ctx, s.msg) + case _ ⇒ + } + + private def schedule(d: Data): (Data, Seq[Send], Option[Duration]) = { + val now = System.nanoTime + @tailrec def rec(d: Data, toSend: Seq[Send]): (Data, Seq[Send], Option[Duration]) = { + if (d.queue.isEmpty) (d, toSend, None) + else { + val timeForPacket = d.lastSent + (1000 * size(d.queue.head.msg) / d.rateMBit).toLong + if (timeForPacket <= now) rec(Data(timeForPacket, d.rateMBit, d.queue.tail), toSend :+ d.queue.head) + else { + val deadline = now + packetSplitThreshold.toNanos + if (timeForPacket <= deadline) (d, toSend, Some((timeForPacket - now).nanos)) + else { + val micros = (deadline - d.lastSent) / 1000 + val (s1, s2) = split(d.queue.head, (micros * d.rateMBit / 8).toInt) + (d.copy(queue = s1 +: s2 +: d.queue.tail), toSend, Some(packetSplitThreshold)) + } + } + } + } + rec(d, Seq()) + } + + private def split(s: Send, bytes: Int): (Send, Send) = { + s.msg match { + case buf: ChannelBuffer ⇒ + val f = s.future map { f ⇒ + val newF = Channels.future(s.ctx.getChannel) + newF.addListener(new ChannelFutureListener { + def operationComplete(future: ChannelFuture) { + if (future.isCancelled) f.cancel() + else future.getCause match { + case null ⇒ + case thr ⇒ f.setFailure(thr) + } + } + }) + newF + } + val b = buf.slice() + b.writerIndex(b.readerIndex + bytes) + buf.readerIndex(buf.readerIndex + bytes) + (Send(s.ctx, f, b), Send(s.ctx, s.future, buf)) + } + } + + private def size(msg: AnyRef) = msg match { case b: ChannelBuffer ⇒ b.readableBytes() * 8 case _ ⇒ throw new UnsupportedOperationException("NetworkFailureInjector only supports ChannelBuffer messages") } @@ -136,7 +201,7 @@ class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { override def messageReceived(ctx: ChannelHandlerContext, msg: MessageEvent) { log.debug("upstream(queued): {}", msg) - receiver ! Send(ctx, msg) + receiver ! Send(ctx, Option(msg.getFuture), msg.getMessage) } override def channelConnected(ctx: ChannelHandlerContext, state: ChannelStateEvent) { @@ -166,7 +231,7 @@ class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { override def writeRequested(ctx: ChannelHandlerContext, msg: MessageEvent) { log.debug("downstream(queued): {}", msg) - sender ! Send(ctx, msg) + sender ! Send(ctx, Option(msg.getFuture), msg.getMessage) } } diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 096d4c5a89..c7e848caf3 100644 --- a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -62,7 +62,7 @@ class TestConductorMultiJvmNode1 extends AkkaRemoteSpec(TestConductorMultiJvmSpe "throttling" in { expectMsg("start") - tc.throttle("node1", "node0", Direction.Send, 0.016).await + tc.throttle("node1", "node0", Direction.Send, 0.01).await tc.enter("throttled_send") within(1 second, 2 seconds) { receiveN(10) must be(0 to 9) @@ -70,7 +70,7 @@ class TestConductorMultiJvmNode1 extends AkkaRemoteSpec(TestConductorMultiJvmSpe tc.enter("throttled_send2") tc.throttle("node1", "node0", Direction.Send, -1).await - tc.throttle("node1", "node0", Direction.Receive, 0.016).await + tc.throttle("node1", "node0", Direction.Receive, 0.01).await tc.enter("throttled_recv") receiveN(10, 500 millis) must be(10 to 19) tc.enter("throttled_recv2") diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index bcac5c24cf..cbcfc2a77d 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -69,7 +69,7 @@ class TestActor(queue: BlockingDeque[TestActor.Message]) extends Actor { *
  * class Test extends TestKit(ActorSystem()) {
  *     try {
- *     
+ *
  *       val test = system.actorOf(Props[SomeActor]
  *
  *       within (1 second) {
@@ -77,7 +77,7 @@ class TestActor(queue: BlockingDeque[TestActor.Message]) extends Actor {
  *         expectMsg(Result1) // bounded to 1 second
  *         expectMsg(Result2) // bounded to the remainder of the 1 second
  *       }
- *     
+ *
  *     } finally {
  *       system.shutdown()
  *     }
@@ -86,7 +86,7 @@ class TestActor(queue: BlockingDeque[TestActor.Message]) extends Actor {
  *
  * Beware of two points:
  *
- *  - the ActorSystem passed into the constructor needs to be shutdown, 
+ *  - the ActorSystem passed into the constructor needs to be shutdown,
  *    otherwise thread pools and memory will be leaked
  *  - this trait is not thread-safe (only one actor with one queue, one stack
  *    of `within` blocks); it is expected that the code is executed from a

From 0076bddb523a358d0c00ba6fd5725deaffd926e3 Mon Sep 17 00:00:00 2001
From: Roland 
Date: Mon, 7 May 2012 07:36:02 +0200
Subject: [PATCH 008/106] optimize partial message scheduling

- split only right before send (if necessary)
- do not reschedule Tick if that has already been done, because the head
  of the queue does not change so the old data are still correct
- make test a bit less fickle wrt. timing
---
 .../NetworkFailureInjector.scala              | 23 ++++++++++---------
 .../testconductor/TestConductorSpec.scala     |  6 ++---
 2 files changed, 15 insertions(+), 14 deletions(-)

diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala
index 5e101dea0c..b853523979 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala
+++ b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala
@@ -83,8 +83,8 @@ class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler {
     }
 
     when(Throttle) {
-      case Event(s: Send, d @ Data(_, _, Queue())) ⇒
-        stay using sendThrottled(d.copy(lastSent = System.nanoTime, queue = Queue(s)))
+      case Event(s: Send, data @ Data(_, _, Queue())) ⇒
+        stay using sendThrottled(data.copy(lastSent = System.nanoTime, queue = Queue(s)))
       case Event(s: Send, data) ⇒
         stay using sendThrottled(data.copy(queue = data.queue.enqueue(s)))
       case Event(Tick, data) ⇒
@@ -134,10 +134,11 @@ class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler {
         log.debug("sending msg (Tick): {}", s.msg)
         send(s)
       }
-      for (time ← toTick) {
-        log.debug("scheduling next Tick in {}", time)
-        setTimer("send", Tick, time, false)
-      }
+      if (!timerActive_?("send"))
+        for (time ← toTick) {
+          log.debug("scheduling next Tick in {}", time)
+          setTimer("send", Tick, time, false)
+        }
       data
     }
 
@@ -155,12 +156,12 @@ class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler {
           val timeForPacket = d.lastSent + (1000 * size(d.queue.head.msg) / d.rateMBit).toLong
           if (timeForPacket <= now) rec(Data(timeForPacket, d.rateMBit, d.queue.tail), toSend :+ d.queue.head)
           else {
-            val deadline = now + packetSplitThreshold.toNanos
-            if (timeForPacket <= deadline) (d, toSend, Some((timeForPacket - now).nanos))
+            val splitThreshold = d.lastSent + packetSplitThreshold.toNanos
+            if (now < splitThreshold) (d, toSend, Some((timeForPacket - now).nanos min (splitThreshold - now).nanos))
             else {
-              val micros = (deadline - d.lastSent) / 1000
-              val (s1, s2) = split(d.queue.head, (micros * d.rateMBit / 8).toInt)
-              (d.copy(queue = s1 +: s2 +: d.queue.tail), toSend, Some(packetSplitThreshold))
+              val microsToSend = (now - d.lastSent) / 1000
+              val (s1, s2) = split(d.queue.head, (microsToSend * d.rateMBit / 8).toInt)
+              (d.copy(queue = s2 +: d.queue.tail), toSend :+ s1, Some((timeForPacket - now).nanos min packetSplitThreshold))
             }
           }
         }
diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
index c7e848caf3..16193f7bd3 100644
--- a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
+++ b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
@@ -64,7 +64,7 @@ class TestConductorMultiJvmNode1 extends AkkaRemoteSpec(TestConductorMultiJvmSpe
     expectMsg("start")
     tc.throttle("node1", "node0", Direction.Send, 0.01).await
     tc.enter("throttled_send")
-    within(1 second, 2 seconds) {
+    within(0.6 seconds, 2 seconds) {
       receiveN(10) must be(0 to 9)
     }
     tc.enter("throttled_send2")
@@ -102,13 +102,13 @@ class TestConductorMultiJvmNode2 extends AkkaRemoteSpec(TestConductorMultiJvmSpe
     tc.enter("throttled_send")
     for (i <- 0 to 9) echo ! i
     expectMsg(500 millis, 0)
-    within(1 second, 2 seconds) {
+    within(0.6 seconds, 2 seconds) {
       receiveN(9) must be(1 to 9)
     }
     tc.enter("throttled_send2", "throttled_recv")
     for (i <- 10 to 19) echo ! i
     expectMsg(500 millis, 10)
-    within(1 second, 2 seconds) {
+    within(0.6 seconds, 2 seconds) {
       receiveN(9) must be(11 to 19)
     }
     tc.enter("throttled_recv2")

From f81184236fa0748304a26ae133d791d074c95536 Mon Sep 17 00:00:00 2001
From: Roland 
Date: Mon, 7 May 2012 08:04:15 +0200
Subject: [PATCH 009/106] wait for initial crew before starting the party

- the Controller is started with the required initial number of
  participants
- if that is >0, it will hold off sending Done to the clients until that
  number has connected, then set it to zero
- if that is <=0, send Done back immediately upon connect
---
 .../akka/remote/testconductor/Conductor.scala  | 17 +++++++++++------
 .../akka/remote/testconductor/Features.scala   |  2 +-
 .../akka/remote/testconductor/Player.scala     | 18 ++++++++++++++++--
 .../testconductor/TestConductorSpec.scala      |  8 +-------
 4 files changed, 29 insertions(+), 16 deletions(-)

diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala
index c9cbeadf83..7e3d315fea 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala
+++ b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala
@@ -33,9 +33,9 @@ trait Conductor extends RunControl with FailureInject { this: TestConductorExt 
     case x    ⇒ x
   }
 
-  override def startController(): Future[Int] = {
+  override def startController(participants: Int): Future[Int] = {
     if (_controller ne null) throw new RuntimeException("TestConductorServer was already started")
-    _controller = system.actorOf(Props[Controller], "controller")
+    _controller = system.actorOf(Props(new Controller(participants)), "controller")
     import Settings.BarrierTimeout
     controller ? GetPort flatMap { case port: Int ⇒ startClient(port) map (_ ⇒ port) }
   }
@@ -162,7 +162,7 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi
       log.warning("client {} sent unsupported message {}", getAddrString(channel), msg)
       channel.close()
       stop()
-    case Event(Send(msg: EnterBarrier), _) ⇒
+    case Event(Send(msg @ (_: EnterBarrier | _: Done)), _) ⇒
       channel.write(msg)
       stay
     case Event(Send(msg), None) ⇒
@@ -185,9 +185,11 @@ object Controller {
   case class NodeInfo(name: String, addr: Address, fsm: ActorRef)
 }
 
-class Controller extends Actor {
+class Controller(_participants: Int) extends Actor {
   import Controller._
 
+  var initialParticipants = _participants
+
   val settings = TestConductor().Settings
   val connection = RemoteConnection(Server, settings.host, settings.port,
     new ConductorHandler(context.system, self, Logging(context.system, "ConductorHandler")))
@@ -199,8 +201,11 @@ class Controller extends Actor {
     case ClientConnected(name, addr) ⇒
       nodes += name -> NodeInfo(name, addr, sender)
       barrier forward ClientConnected
-    case ClientConnected ⇒
-      barrier forward ClientConnected
+      if (initialParticipants <= 0) sender ! Done
+      else if (nodes.size == initialParticipants) {
+        for (NodeInfo(_, _, client) ← nodes.values) client ! Send(Done)
+        initialParticipants = 0
+      }
     case ClientDisconnected(name) ⇒
       nodes -= name
       barrier forward ClientDisconnected
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala
index b94f205726..336d04c368 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala
+++ b/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala
@@ -57,7 +57,7 @@ trait RunControl {
   /**
    * Start the server port, returns the port number.
    */
-  def startController(): Future[Int]
+  def startController(participants: Int): Future[Int]
 
   /**
    * Get the actual port used by the server.
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala
index 72b15922f3..f7d2fbd532 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala
+++ b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala
@@ -40,7 +40,8 @@ trait Player extends BarrierSync { this: TestConductorExt ⇒
       var waiting: ActorRef = _
       def receive = {
         case fsm: ActorRef                        ⇒ waiting = sender; fsm ! SubscribeTransitionCallBack(self)
-        case Transition(_, Connecting, Connected) ⇒ waiting ! Done
+        case Transition(_, Connecting, AwaitDone) ⇒ // step 1, not there yet
+        case Transition(_, AwaitDone, Connected)  ⇒ waiting ! Done
         case t: Transition[_]                     ⇒ waiting ! Status.Failure(new RuntimeException("unexpected transition: " + t))
         case CurrentState(_, Connected)           ⇒ waiting ! Done
         case _: CurrentState[_]                   ⇒
@@ -63,6 +64,7 @@ trait Player extends BarrierSync { this: TestConductorExt ⇒
 object ClientFSM {
   sealed trait State
   case object Connecting extends State
+  case object AwaitDone extends State
   case object Connected extends State
 
   case class Data(channel: Channel, barrier: Option[(String, ActorRef)])
@@ -85,7 +87,7 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client
       stay replying Status.Failure(new IllegalStateException("not connected yet"))
     case Event(Connected, d @ Data(channel, _)) ⇒
       channel.write(Hello(settings.name, TestConductor().address))
-      goto(Connected)
+      goto(AwaitDone)
     case Event(_: ConnectionFailure, _) ⇒
       // System.exit(1)
       stop
@@ -95,6 +97,18 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client
       stop
   }
 
+  when(AwaitDone, stateTimeout = settings.BarrierTimeout.duration) {
+    case Event(Done, _) ⇒
+      log.debug("received Done: starting test")
+      goto(Connected)
+    case Event(msg: ClientOp, _) ⇒
+      stay replying Status.Failure(new IllegalStateException("not connected yet"))
+    case Event(StateTimeout, _) ⇒
+      log.error("connect timeout to TestConductor")
+      // System.exit(1)
+      stop
+  }
+
   when(Connected) {
     case Event(Disconnected, _) ⇒
       log.info("disconnected from TestConductor")
diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
index 16193f7bd3..512757c130 100644
--- a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
+++ b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
@@ -53,11 +53,8 @@ class TestConductorMultiJvmNode1 extends AkkaRemoteSpec(TestConductorMultiJvmSpe
   }), "echo")
 
   "running a test with barrier" in {
-    tc.startController().await
-    barrier("start")
-    barrier("first")
+    tc.startController(2).await
     tc.enter("begin")
-    barrier("end")
   }
 
   "throttling" in {
@@ -89,11 +86,8 @@ class TestConductorMultiJvmNode2 extends AkkaRemoteSpec(TestConductorMultiJvmSpe
   val echo = system.actorFor("akka://" + akkaSpec(0) + "/user/echo")
 
   "running a test with barrier" in {
-    barrier("start")
     tc.startClient(4712).await
-    barrier("first")
     tc.enter("begin")
-    barrier("end")
   }
 
   "throttling" in {

From d8268f8e6fe93f3d7a428c3b171325774c896b8b Mon Sep 17 00:00:00 2001
From: Roland 
Date: Mon, 7 May 2012 18:12:53 +0200
Subject: [PATCH 010/106] fix BuilderParent in generated
 TestConductorProtocol.java

---
 .../remote/testconductor/TestConductorProtocol.java    | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
index f112a1b0c2..4b9da03059 100644
--- a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
+++ b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
@@ -460,7 +460,7 @@ public final class TestConductorProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(BuilderParent parent) {
+      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
@@ -1242,7 +1242,7 @@ public final class TestConductorProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(BuilderParent parent) {
+      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
@@ -1750,7 +1750,7 @@ public final class TestConductorProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(BuilderParent parent) {
+      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
@@ -2255,7 +2255,7 @@ public final class TestConductorProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(BuilderParent parent) {
+      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
@@ -2861,7 +2861,7 @@ public final class TestConductorProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(BuilderParent parent) {
+      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }

From 33cea733a315bc5c8306a81f7ddbb6853360c319 Mon Sep 17 00:00:00 2001
From: Roland 
Date: Tue, 8 May 2012 10:05:14 +0200
Subject: [PATCH 011/106] rename mkPipeline => createPipeline

---
 akka-remote/src/main/scala/akka/remote/netty/Client.scala   | 2 +-
 .../main/scala/akka/remote/netty/NettyRemoteSupport.scala   | 6 +++---
 akka-remote/src/main/scala/akka/remote/netty/Server.scala   | 2 +-
 .../akka/remote/testconductor/TestConductorTransport.scala  | 2 +-
 4 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala
index cf143650bc..4735132534 100644
--- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala
+++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala
@@ -155,7 +155,7 @@ class ActiveRemoteClient private[akka] (
       openChannels = new DefaultDisposableChannelGroup(classOf[RemoteClient].getName)
 
       val b = new ClientBootstrap(netty.clientChannelFactory)
-      b.setPipelineFactory(netty.mkPipeline(new ActiveRemoteClientHandler(name, b, remoteAddress, localAddress, netty.timer, this), true))
+      b.setPipelineFactory(netty.createPipeline(new ActiveRemoteClientHandler(name, b, remoteAddress, localAddress, netty.timer, this), true))
       b.setOption("tcpNoDelay", true)
       b.setOption("keepAlive", true)
       b.setOption("connectTimeoutMillis", settings.ConnectionTimeout.toMillis)
diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala
index 35ef3bf7fd..60c2ac6097 100644
--- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala
+++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala
@@ -86,7 +86,7 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor
    * This method is factored out to provide an extension point in case the
    * pipeline shall be changed. It is recommended to use
    */
-  def mkPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean): ChannelPipelineFactory =
+  def createPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean): ChannelPipelineFactory =
     PipelineFactory(Seq(endpoint), withTimeout)
 
   private val remoteClients = new HashMap[Address, RemoteClient]
@@ -98,13 +98,13 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor
 
   /**
    * Override this method to inject a subclass of NettyRemoteServer instead of
-   * the normal one, e.g. for altering the pipeline.
+   * the normal one, e.g. for inserting security hooks.
    */
   protected def createServer(): NettyRemoteServer = new NettyRemoteServer(this)
 
   /**
    * Override this method to inject a subclass of RemoteClient instead of
-   * the normal one, e.g. for altering the pipeline. Get this transport’s
+   * the normal one, e.g. for inserting security hooks. Get this transport’s
    * address from `this.address`.
    */
   protected def createClient(recipient: Address): RemoteClient = new ActiveRemoteClient(this, recipient, address)
diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala
index f9d4ede1d8..87993f783d 100644
--- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala
+++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala
@@ -35,7 +35,7 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) {
 
   private val bootstrap = {
     val b = new ServerBootstrap(factory)
-    b.setPipelineFactory(netty.mkPipeline(new RemoteServerHandler(openChannels, netty), false))
+    b.setPipelineFactory(netty.createPipeline(new RemoteServerHandler(openChannels, netty), false))
     b.setOption("backlog", settings.Backlog)
     b.setOption("tcpNoDelay", true)
     b.setOption("child.keepAlive", true)
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala b/akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala
index d03adebe9a..2c51c2cf18 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala
+++ b/akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala
@@ -13,7 +13,7 @@ import org.jboss.netty.channel.ChannelPipelineFactory
 class TestConductorTransport(_remoteSettings: RemoteSettings, _system: ActorSystemImpl, _provider: RemoteActorRefProvider)
   extends NettyRemoteTransport(_remoteSettings, _system, _provider) {
 
-  override def mkPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean): ChannelPipelineFactory =
+  override def createPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean): ChannelPipelineFactory =
     new ChannelPipelineFactory {
       def getPipeline = PipelineFactory(new NetworkFailureInjector(system) +: PipelineFactory.defaultStack(withTimeout) :+ endpoint)
     }

From e950045015e16659b34c9901b7daa34a0e1f185e Mon Sep 17 00:00:00 2001
From: Roland 
Date: Tue, 8 May 2012 11:08:43 +0200
Subject: [PATCH 012/106] handle barrier failures better

---
 .../testconductor/TestConductorProtocol.java  | 81 ++++++++++++++++---
 .../main/protocol/TestConductorProtocol.proto |  1 +
 .../akka/remote/testconductor/Conductor.scala | 22 +++--
 .../akka/remote/testconductor/DataTypes.scala |  7 +-
 .../akka/remote/testconductor/Player.scala    |  3 +
 5 files changed, 96 insertions(+), 18 deletions(-)

diff --git a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
index 4b9da03059..3d6c145097 100644
--- a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
+++ b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
@@ -1543,6 +1543,10 @@ public final class TestConductorProtocol {
     // required string name = 1;
     boolean hasName();
     String getName();
+    
+    // optional bool failed = 2;
+    boolean hasFailed();
+    boolean getFailed();
   }
   public static final class EnterBarrier extends
       com.google.protobuf.GeneratedMessage
@@ -1605,8 +1609,19 @@ public final class TestConductorProtocol {
       }
     }
     
+    // optional bool failed = 2;
+    public static final int FAILED_FIELD_NUMBER = 2;
+    private boolean failed_;
+    public boolean hasFailed() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    public boolean getFailed() {
+      return failed_;
+    }
+    
     private void initFields() {
       name_ = "";
+      failed_ = false;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -1627,6 +1642,9 @@ public final class TestConductorProtocol {
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
         output.writeBytes(1, getNameBytes());
       }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeBool(2, failed_);
+      }
       getUnknownFields().writeTo(output);
     }
     
@@ -1640,6 +1658,10 @@ public final class TestConductorProtocol {
         size += com.google.protobuf.CodedOutputStream
           .computeBytesSize(1, getNameBytes());
       }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(2, failed_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -1766,6 +1788,8 @@ public final class TestConductorProtocol {
         super.clear();
         name_ = "";
         bitField0_ = (bitField0_ & ~0x00000001);
+        failed_ = false;
+        bitField0_ = (bitField0_ & ~0x00000002);
         return this;
       }
       
@@ -1808,6 +1832,10 @@ public final class TestConductorProtocol {
           to_bitField0_ |= 0x00000001;
         }
         result.name_ = name_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.failed_ = failed_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -1827,6 +1855,9 @@ public final class TestConductorProtocol {
         if (other.hasName()) {
           setName(other.getName());
         }
+        if (other.hasFailed()) {
+          setFailed(other.getFailed());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -1867,6 +1898,11 @@ public final class TestConductorProtocol {
               name_ = input.readBytes();
               break;
             }
+            case 16: {
+              bitField0_ |= 0x00000002;
+              failed_ = input.readBool();
+              break;
+            }
           }
         }
       }
@@ -1909,6 +1945,27 @@ public final class TestConductorProtocol {
         onChanged();
       }
       
+      // optional bool failed = 2;
+      private boolean failed_ ;
+      public boolean hasFailed() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      public boolean getFailed() {
+        return failed_;
+      }
+      public Builder setFailed(boolean value) {
+        bitField0_ |= 0x00000002;
+        failed_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearFailed() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        failed_ = false;
+        onChanged();
+        return this;
+      }
+      
       // @@protoc_insertion_point(builder_scope:EnterBarrier)
     }
     
@@ -3300,17 +3357,17 @@ public final class TestConductorProtocol {
       "\022\025\n\005hello\030\001 \001(\0132\006.Hello\022\036\n\007barrier\030\002 \001(\013" +
       "2\r.EnterBarrier\022\037\n\007failure\030\003 \001(\0132\016.Injec" +
       "tFailure\022\014\n\004done\030\004 \001(\t\"0\n\005Hello\022\014\n\004name\030" +
-      "\001 \002(\t\022\031\n\007address\030\002 \002(\0132\010.Address\"\034\n\014Ente" +
-      "rBarrier\022\014\n\004name\030\001 \002(\t\"G\n\007Address\022\020\n\010pro" +
-      "tocol\030\001 \002(\t\022\016\n\006system\030\002 \002(\t\022\014\n\004host\030\003 \002(" +
-      "\t\022\014\n\004port\030\004 \002(\005\"\212\001\n\rInjectFailure\022\032\n\007fai" +
-      "lure\030\001 \002(\0162\t.FailType\022\035\n\tdirection\030\002 \001(\016" +
-      "2\n.Direction\022\031\n\007address\030\003 \001(\0132\010.Address\022",
-      "\020\n\010rateMBit\030\006 \001(\002\022\021\n\texitValue\030\007 \001(\005*A\n\010" +
-      "FailType\022\014\n\010Throttle\020\001\022\016\n\nDisconnect\020\002\022\t" +
-      "\n\005Abort\020\003\022\014\n\010Shutdown\020\004*,\n\tDirection\022\010\n\004" +
-      "Send\020\001\022\013\n\007Receive\020\002\022\010\n\004Both\020\003B\035\n\031akka.re" +
-      "mote.testconductorH\001"
+      "\001 \002(\t\022\031\n\007address\030\002 \002(\0132\010.Address\",\n\014Ente" +
+      "rBarrier\022\014\n\004name\030\001 \002(\t\022\016\n\006failed\030\002 \001(\010\"G" +
+      "\n\007Address\022\020\n\010protocol\030\001 \002(\t\022\016\n\006system\030\002 " +
+      "\002(\t\022\014\n\004host\030\003 \002(\t\022\014\n\004port\030\004 \002(\005\"\212\001\n\rInje" +
+      "ctFailure\022\032\n\007failure\030\001 \002(\0162\t.FailType\022\035\n" +
+      "\tdirection\030\002 \001(\0162\n.Direction\022\031\n\007address\030",
+      "\003 \001(\0132\010.Address\022\020\n\010rateMBit\030\006 \001(\002\022\021\n\texi" +
+      "tValue\030\007 \001(\005*A\n\010FailType\022\014\n\010Throttle\020\001\022\016" +
+      "\n\nDisconnect\020\002\022\t\n\005Abort\020\003\022\014\n\010Shutdown\020\004*" +
+      ",\n\tDirection\022\010\n\004Send\020\001\022\013\n\007Receive\020\002\022\010\n\004B" +
+      "oth\020\003B\035\n\031akka.remote.testconductorH\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -3338,7 +3395,7 @@ public final class TestConductorProtocol {
           internal_static_EnterBarrier_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_EnterBarrier_descriptor,
-              new java.lang.String[] { "Name", },
+              new java.lang.String[] { "Name", "Failed", },
               akka.remote.testconductor.TestConductorProtocol.EnterBarrier.class,
               akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder.class);
           internal_static_Address_descriptor =
diff --git a/akka-remote/src/main/protocol/TestConductorProtocol.proto b/akka-remote/src/main/protocol/TestConductorProtocol.proto
index e483bf4f01..007965b2e8 100644
--- a/akka-remote/src/main/protocol/TestConductorProtocol.proto
+++ b/akka-remote/src/main/protocol/TestConductorProtocol.proto
@@ -25,6 +25,7 @@ message Hello {
 
 message EnterBarrier {
   required string name = 1;
+  optional bool failed = 2;
 }
 
 message Address {
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala
index 7e3d315fea..2bbae6d28b 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala
+++ b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala
@@ -22,6 +22,8 @@ import akka.event.LoggingReceive
 import akka.actor.Address
 import java.net.InetSocketAddress
 import akka.dispatch.Future
+import akka.actor.OneForOneStrategy
+import akka.actor.SupervisorStrategy
 
 trait Conductor extends RunControl with FailureInject { this: TestConductorExt ⇒
 
@@ -194,6 +196,15 @@ class Controller(_participants: Int) extends Actor {
   val connection = RemoteConnection(Server, settings.host, settings.port,
     new ConductorHandler(context.system, self, Logging(context.system, "ConductorHandler")))
 
+  override def supervisorStrategy = OneForOneStrategy() {
+    case e: BarrierCoordinator.BarrierTimeoutException ⇒ SupervisorStrategy.Resume
+    case e: BarrierCoordinator.WrongBarrierException ⇒
+      // I think we are lacking a means of communication here: this is not correct!
+      for (i ← 1 to e.data.clients) barrier ! ClientConnected
+      for (c ← e.data.arrived) c ! BarrierFailed(e.barrier)
+      SupervisorStrategy.Restart
+  }
+
   val barrier = context.actorOf(Props[BarrierCoordinator], "barriers")
   var nodes = Map[String, NodeInfo]()
 
@@ -240,7 +251,8 @@ object BarrierCoordinator {
   case object Waiting extends State
 
   case class Data(clients: Int, barrier: String, arrived: List[ActorRef])
-  class BarrierTimeoutException(msg: String) extends RuntimeException(msg) with NoStackTrace
+  class BarrierTimeoutException(val data: Data) extends RuntimeException(data.barrier) with NoStackTrace
+  class WrongBarrierException(val barrier: String, val client: ActorRef, val data: Data) extends RuntimeException(barrier) with NoStackTrace
 }
 
 class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, BarrierCoordinator.Data] {
@@ -262,13 +274,13 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State,
   }
 
   onTransition {
-    case Idle -> Waiting ⇒ setTimer("Timeout", StateTimeout, 30 seconds, false)
+    case Idle -> Waiting ⇒ setTimer("Timeout", StateTimeout, TestConductor().Settings.BarrierTimeout.duration, false)
     case Waiting -> Idle ⇒ cancelTimer("Timeout")
   }
 
   when(Waiting) {
     case Event(e @ EnterBarrier(name), d @ Data(num, barrier, arrived)) ⇒
-      if (name != barrier) throw new IllegalStateException("trying enter barrier '" + name + "' while barrier '" + barrier + "' is active")
+      if (name != barrier) throw new WrongBarrierException(barrier, sender, d)
       val together = sender :: arrived
       if (together.size == num) {
         together foreach (_ ! Send(e))
@@ -287,8 +299,8 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State,
       } else {
         stay using d.copy(clients = expected)
       }
-    case Event(StateTimeout, Data(num, barrier, arrived)) ⇒
-      throw new BarrierTimeoutException("only " + arrived.size + " of " + num + " arrived at barrier " + barrier)
+    case Event(StateTimeout, data) ⇒
+      throw new BarrierTimeoutException(data)
   }
 
   initialize
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala
index 90d7eeccd5..cadd69f786 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala
+++ b/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala
@@ -19,6 +19,7 @@ sealed trait NetworkOp // messages sent over the wire
 
 case class Hello(name: String, addr: Address) extends NetworkOp
 case class EnterBarrier(name: String) extends ClientOp with ServerOp with NetworkOp
+case class BarrierFailed(name: String) extends NetworkOp
 case class Throttle(node: String, target: String, direction: Direction, rateMBit: Float) extends ServerOp
 case class ThrottleMsg(target: Address, direction: Direction, rateMBit: Float) extends NetworkOp
 case class Disconnect(node: String, target: String, abort: Boolean) extends ServerOp
@@ -41,6 +42,8 @@ class MsgEncoder extends OneToOneEncoder {
           w.setHello(TCP.Hello.newBuilder.setName(name).setAddress(addr))
         case EnterBarrier(name) ⇒
           w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name))
+        case BarrierFailed(name) ⇒
+          w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name).setFailed(true))
         case ThrottleMsg(target, dir, rate) ⇒
           w.setFailure(TCP.InjectFailure.newBuilder.setAddress(target)
             .setFailure(TCP.FailType.Throttle).setDirection(dir).setRateMBit(rate))
@@ -64,7 +67,9 @@ class MsgDecoder extends OneToOneDecoder {
         val h = w.getHello
         Hello(h.getName, h.getAddress)
       } else if (w.hasBarrier) {
-        EnterBarrier(w.getBarrier.getName)
+        val barrier = w.getBarrier
+        if (barrier.hasFailed && barrier.getFailed) BarrierFailed(barrier.getName)
+        else EnterBarrier(w.getBarrier.getName)
       } else if (w.hasFailure) {
         val f = w.getFailure
         import TCP.{ FailType ⇒ FT }
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala
index f7d2fbd532..6e78610cfb 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala
+++ b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala
@@ -129,6 +129,9 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client
         sender ! b
       }
       stay using Data(channel, None)
+    case Event(BarrierFailed(b), Data(channel, Some((_, sender)))) ⇒
+      sender ! Status.Failure(new RuntimeException("barrier failed: " + b))
+      stay using Data(channel, None)
     case Event(ThrottleMsg(target, dir, rate), _) ⇒
       import settings.QueryTimeout
       import context.dispatcher

From 9a33f468c0082ee6735d7f4684945ca06f531fcc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= 
Date: Tue, 8 May 2012 13:56:24 +0200
Subject: [PATCH 013/106] Adding a doc diagram for the Test Conductor Extension

---
 akka-docs/dev/multi-jvm-testing.rst            |   7 +++++++
 akka-docs/images/akka-remote-testconductor.png | Bin 0 -> 18288 bytes
 2 files changed, 7 insertions(+)
 create mode 100644 akka-docs/images/akka-remote-testconductor.png

diff --git a/akka-docs/dev/multi-jvm-testing.rst b/akka-docs/dev/multi-jvm-testing.rst
index 33c7dc7507..d19344e751 100644
--- a/akka-docs/dev/multi-jvm-testing.rst
+++ b/akka-docs/dev/multi-jvm-testing.rst
@@ -380,3 +380,10 @@ same machine at the same time.
 The machines that are used for testing (slaves) should have ssh access to the outside world and be able to talk
 to each other with the internal addresses given. On the master machine ssh client is required. Obviosly git
 and sbt should be installed on both master and slave machines.
+
+The Test Conductor Extension
+============================
+
+The Test Conductor Extension is aimed at enhancing the multi JVM and multi node testing facilities.
+
+.. image:: ../images/akka-remote-testconductor.png
diff --git a/akka-docs/images/akka-remote-testconductor.png b/akka-docs/images/akka-remote-testconductor.png
new file mode 100644
index 0000000000000000000000000000000000000000..b21353832670a12eb8130292416df62c6f1209a7
GIT binary patch
literal 18288
zcmeAS@N?(olHy`uVBq!ia0y~yU|z?-z<8g7je&t7;CZV(1B1J{r;B4q#jUq@t5@ix
z?|lA|&0NYP%fRc41iv(|+ZPQfp*;)Qbpw^OKK4!Q2yAi@IV17$-3;fZioWd&6n$I*
z4dr@VZuVJ99k*y(oWUWr$lX=3Pf&TWkHjBk9;1{6%j@@E-&`BM^w+j;yS_aA+$vwa
z{@trpuU=KXs;ztf`$}{(BLhi5=UBm#-7+BOGcY^``3puR#DGmgrFyi%hC?YZ8xQf|
zz()%i28OVX#nHUx7gxQyHqlS$6oW)K!>9B`U(e2OcgbFG_*MWzk2b?ky{#`w}$dbB4VU}j*LU_bd!a!a|%hU;9I_!$_WK0304iGcy3mtlgvFjRrTIv0?a
zp$cH0d~#l;UaZ1}iT^;+mU_8%W~ecUfcebGde6W9`}Bv~>;A0XS#tV(U44T6nXl%5
zGk%=EUQ@W{&;JGfQ{?}A>VKR(>G5_i^&j8!{=bdc|Fy1OAL>D!`f2vY>x8E^$IPF*
z^mBai!+A1a-W*Pq|5Nb3TK?aa=WYGge}4Ra>m9fI*{;RxWlZ~@%D$I>@BXcJ{kFoN
z?(1)U?LKq!eBHg6{a+W|e{H?}Z`zM_^}ocC3_m@;-2TI*&l4xFxH{9n@>WHxc-)_D
zx$EPWEN$_F)+Q+{SpRfPB`>}%UQ^DnY@&zw0
ze0Y4lu2Pp-UwqD*|C{6gew)V<_kH_Yt>;Jc{+fDZaQp4w
zcfWZhAcp
z`1PCbg11er{~R~1^5|qHgzPGXRQuWKED>OY+Nj=BL6EjhQEIK
zTJubHrM@cP-TMXVPbHDUCoQJj^uwp-5K9-nGS-us{grRFO;#}d^73ls2WL5>IJNiB
zjn7*=zxh~L-#&`3!fy7{PygqnRPL?n7TX)R@0+cy=gqWjbH8r?`uJt*Nu&I_-@A{$
zPrn%~UwZ4u)<3m}&u-6MZF|0a&HGvJC#23Re$sloFOW59d;9WtjVFG;?fq1$QzY|R
zu0>`#OO{@m<;2fX-?Z14*Ux_U-J79ofBe)gm&o|OpNnkWw$?E1h&%oN^R3F~`*(de
zzvH>q?lD(GA*(|DH2c%@C9lWw?wM0^_t%d(Yb-sBio(vj&kf1dbgGKk^zMs+uaw&V
zGvRqM`k!B2kKc2l?)j6|=bx&qt`?a0^Um{p>&jP0&Xi|-SKjMX`B!!Qo(FaJO@D{1
zUz(nIVJBzxy#uE=o;37&u*EiZ>-()S@hcBkum9un{(JSmO8vP%{(hPn!;@_*>hY8B
zrrZ2W$yehlHm%w0F&FYtd4_XD=f1(p+Dks|)|`{lt7d
zci<`AD=(NC7>P|1YET(UZ^J
zKI*ob9$EoKMcZD#=YI3%v}C$nDePxyiYat)No1XEiv>#O*q;S~zE(T>F=tCG1OH{aiff
zkL0ZU{6p{lWt47OR-SaLPOT$bezkgbkbdw~>odX?7uU=@`D(8#v;IMY8H?Ta?S8v2
z>VH`1JA>E1-gzSFTrC)DoXrp^Cbm)3uANqfZd
zGl}!$HqK>-CIaBi6+
zQfce_@MY?W&Pl>6_fE8V|5PP&`HFLn`RT!{4|uKplaL+tYMY6Y>65FUuY40IeJ{A;
zUZ41$A5R}&zkGO$$Ahh#7j0#n@Zex$pJs(w;^J>>4zwQv0Is@#*C&b4LN
zg)8@0E`2|3<`(hAIji@@$3A(+x#vvHwA%SgUaed``{8ZrkFV<*zOj9_$}^3Ye$16H
z|F(VqVMa)1WW87R^vHVVsS0QMXC{7_Ww)!k=~_=p;J^8)x8>JX#j{19Z_8@7e7*j=
z(21{mR=hfy*=+j%Jm;kEMTgR>zwTN7&F!@-lm71j+hEIwS`V_VOM5oG-4&2ByR_)q
z<^{KMpDsCnba|T5>h}@n7p_VVz54x3)uw+d`a-||-g?l&|H0WHCVSh!$ojhV-;^fa
zxBoq_cE#V^e7pHqP9F!=#Ed6EIWhg`=XI=cOltN~zvnF6x9jbf02PnLb8gg@KL0uK
zQ=IN}$*s5U&6ir`f4167dDGi{e@}AGdAV-!n*3X9l3woh^bguLee#0cIZdkf*S}x(
z@s9h2T809@buj^(_N_5*oc#Wlt(&FV&zxzeT^H{2zQ3)AC+J+@an<9SqQB3(H>>7-
zOwX;iTkh+>FJFFb-nYL(0hRgFKK=dl^Uwb1eHZ5`m*y1ryfV+3?|%%%x${`$F4lFU
zwjdsJ!I~-1s?`V7GEn)z04^5Qej0+d4tT``Nto^ns$%|+p(#TMxi074GC(b`WmduuRQ*{
zGt7^__QqjoO>caW?aBG?p&ta-Zt%O_#*`4lcw&C%HS_wqs~*!Y&wHsKt$r-D@94aj
zNpID`t&GatlHVTx?-ow5Pp;pYynn;<%Gjb2J09Y
z7!GVIc=_O;@%?>g5*a1J85kH6B7b=H7w$a><4RUsTJS$|alPzIk#=@)lfI$(*(uIr
zrk~CPHWacp^zO5(mf>U2VK4VLTxVZW!Eg-XO2?vwc
z2VeS&j>9Yr3=9VA4mf^1G4b8(q^S309iR@-F;R<*yje*x>q{ya9FkZ-LBKbQd%47Q
zV;MPy2@;G9G4mUnpFKIak#pv*WnaVE*`OXvh}^O6_>QX@(Ly1M)**S~jP*6SCF
z{#W^T)LtUoVBL`&M(bRJ!aw?}{5!a#sZcd3Ms=Ob>sdSWet&iTGx-PmV=kzg9_`0o
zM|K?gD%qng_*kn@wNv}C)<4fb%stu?;Yl%);Yyv_kG+r$=~?|lJt^klFP>vlI|`@n
zm~kLPzZ6|#bC67q~OS1c^nn+S7*S4^kMx<~Up{xm}rc*S@=;`-?SNIoIv
z;0_^>lP2xxs#gMe9_mDNey8>k`_B3u{|wh9#B>%;-67-|6Rv0|^dIDErXFp>bs&AG
zb_f}+E2w1yxdCLwsU47*;W{=|BK+7?k4L9=2zAyQfQ*m`?*xU#zfOtp9d^x-uv00V
zD$xm!N<11o{}`_WyV`J_i_mpnk2`a`p>D`sQCR!SLtQ@Mj3=u1s_gybU)n1FebgX*
zDd+dQiR)xudcK~uLvHWWZx2J~@2>mtF?+wwpX>F%zdr
zJ74=u+njIxk0)CXpRfCR`!J|~x%c(|bGiE^*DL?`yWKDU_t*L3+qZKa4|#NIhnr=1
zvFwiz%k9kfsXh61Vf!6W7j&D%iXFG#7ysR6{eGt8`@s9(|6DTPXaDDU_CAY0U&QyD
zeXM?F74Z7RwzRFq7nfwWgCgx%X=~|UUwf&2Po|3V_*b7vJ}!QS-R{MbhsNhBzgZOC
zSzqhHym$6%P&f7QeU1BT_a*FW3<>vibarvFNzQ673Ils3~&AeW8
zuHjzg|ySk{sES{c_*LJeikUmFI5h>U<6Al@`8Ht*rk2d!0o2j+8Bi
zoOd7d+1I5_+QxOv^uOA>UsD91nK7RKw7%l5>tn%XM{?W}=hffyZm+$5N9~}r`o+oL
z7OzX~(SE#(YuVOw-%I44@97IXYx?kPe*O0ylhv+wU)xjGo4roCQdfCnb!OD)U$giX4tj4f(o|zq%sdGXIgy
zYp*-yz1jP!t#1F$Gw!(bFkDmzR5~U2_GI^DO7(tu;P+~#Z*!3C{JYg&pUS-|m)&i?
zwl`eeGbP^h{+71w@9JN^OI~Yy`~B_zu07e`G=H0A{*PM9UUA9RLOU(yzl}v$k#&sm
z-JM>Y*`<8GUWbbIee0d~q}<0cQRt4&-f7Ru1g)986q{e^od0Z%QoK+i)5NTpB{QC;i0rU
z{^?}7_QN~gg;>^KR`WM1URrPS-SzRLhr8Bqy`n9Oej1aXb@NDl
zT=w<(*Nb~VLmYka{$i%<4CjLeHH7cipPMM27acBn-ROJmREQv@fa$eVQXqm-f1wn*6Nysxy*`c^_Id
zuWeKHe}Av`TYu*q-*N2EG9Sy|SKh@>%dFo0IodsQN%l^eW>Zbkb1CjKzr1*7>GO~^
z`uYF5>}z#*jW1nQw#~jOblq!%(I$iZ%=mL}=PWPPuR7{9>15f?sC=t=uV$#+`<(BS
zR=!vAQ>u31Ol$>9lv-_Rz**evaaYuHj8J^o@IPQy)T=C#RV6#!Bs-@pM=I`ga#SnJ2ec@+SO?@6Elfkv7}#SmvEm
z6P6vfbTi}3mwIZlDYE=)$ob@$)z>Xf*0=>N?vW^+{JCcR8}Drs+YC>atM^4rJ~{K8
z>`U(E-w|b9o?I_WIAu(-Nm~nb!J;t7ySz_nvtUSJmcK;eJOg%Tw7!2zV6)F
zwY}`fratF$Pmg#CJ?^>mJ@<3w9{#O!%^KGQ&Rn-^
z@sYQCGdq9hXkTCMx%gsg%;^PXr&~{7P+KP%eysGSDD(8cen$`GNnO{Pex4&u`t;Rn
zq3!&|Cqg0*6Vt-r0Tu68JGTbk?ZH*wpZz3WRS75ZA=3D=XkJgWv&
zD|4-xSs0sQvSjb`b(5ZXwrvx-m6a03e53EnvOgJ7D+^2)voB05TS{(%Fo>#oovG8e
z`utbtanyF@FK3e{zILAa+OPZRk-*)(na4^Ep7$L3>akklMILLY)xzCxbF*Vk_iR6X
zCHrMdGPqPsi^+bHBKUaL#ePq=by9n^Y}dIhi=Gsr+L+sHbaKZw;eai|JGMOa&3zQN
zbn<73>#y$SrJKGrJC%Cq^bS9%pOKHB?`U)9@3XCX*2kRVQGEZ^Q{}lE&vj<27Fqre
zdKA4_&~o|dlw&iFm3-?>Q(??Ke|c5U43XoZxf5@ArUz8}o;I80^DS51Gc{?g?fhRp
z8LAf~T|NJln!b(uC~;xNja}|r-BNGe42gbxOILhd;Eh)&l42yUr-rXmExY7sb$W+d
zpD=gr#yPJ&t=iI#7HymOWA^bK%Om#*&)v~?t*&a6S
zyuL+ZO|^gVzH65ErZM|p-lAe8Yd-bM(ipqNnz=J`qxTw||9NG)f?iTg@%bvD^O>hp
zOqTBTE9uR?`(5nt{P$Ph**`wDW8I;@?>cu!e!M$(e$uo;U-i&O+4myu@js4|4Xo{F
zoP8zmp6R;M+%q1frTea~bp-`;!r7keyZW=-axdj9%k)?_2q?{n0viF?pe_#E@
z6XBM1>*B0T&y=ItllINH_O9oMVXdik!TgtNEpF*PjuKw_^=PuS*Bv{%#n~~pf8YBw
zSLiXQP0>~C@AGQT*%!&*Ct7aq$-d{`?swl*ZDY(;q2HM?+F`%*QeWIV7ZMsB`#5TD
zq^5Y@`}FjfgFB4YML0jc^{q$PfM=j@D$*gaYK>ZVfv*Krcpzb$+k
zYIkL>%qlOfbjj;)+-=?0&0V6FwrJVQ^=y+1Q)9k+87^sF%NbbYpFX$yr@7j?w9V^&
zteMz4XY$&WYd-JQl1r1k-nDd=$WrUW`L^MvOHbw2T=^IF{9>Nu^&?TX##XY)Z~s0q
zyeT?4*Xh3Lx=_72@2^agFj^-)bH~~@w(nNjocKB0A|u(xFiLpG*11NTd$LO{j;m~Y
zTNWB{)ig={*w(p6Yc2*oj7nb@`PMj9`1eW6UBZsvB(H0UpEJx>uSo$H0U0s3C*AB>
zw}jEtdBTz{Lbs-HZtu*NT4N%T>2Ntib>bPL=5xg=;)d%=<>UXY{ryHw_OZ!5&%D(O
zPkqwlmMM`tZhZc6!733=&YFGRKKZIA-mcJltW|i|J9WwyHD$@1vn+T1?a$UaZL^G7
z{dtUU=&DnI=tD*e#j%z)KzSf-hRcE@ppwIui
z-7UMvQNQoh9#aOTzOGAw!RP&Db8MwnJzAvC_pGun`>a&l#yQzX6SA3(7#;7HxE`Xq
z_4=9^eo$*-#nyYj@^b$?x4Im6{#Q@-RBrcNchSdO$3!E7A4l14^8Av-v0CtiM@mnO
zoc4G1^K;)~!#~|qN;7w3DuI+vc+N2aQ*6H-QWfKP$?vy$P+9bl*i
zkeSGR_GZvn54g{MEcA~sN^f`7PSDT*BS?qgI-zi-&ecEEh5kePs+jyqJDmPZ{?Yzg
z{xP_>i|HxP7}a%;<{#M)?hisc(7Ud6PW+(`>ZgOc;i)m5pni2nVe4ySiEwbh356dE
z1&uu*jZYk#iq!A#lnDRmk1`gq`iKWX4Ugu73V+aGgzBUnc}|a>#ejnf5z`qK^SUN+_xbnU*$Xa1hHea6-TOYt;ncctVhU>3emK@;xIB8;DW|Efj+IWeVo_xFt
zhHMwk`_KBZ=hVZ57{!0j4c7lZxsX}x*#!^r-V%3V@14`XCO0R4-^LWNsqJG{J9C88
zxfd#nzvq~)0Vz+Pc|AT~^`W}ulmj7IZ4Y+O{_*qjgB!Xf%Pnjgjz}EHS@*`%&6nli
z4yQl<#_Pkq4>az4F2+2R_qH$CmnrLFHJ(4;D}`n5QWnt4n%h
zz#x?DP}5Y{`XgOq!;}{aj0%QqN7N<4JO0NybE=mKtGoIZXx`eWrTXU2~Iya!tBPWN_hW027({b>0nfZ>s_ckA(o
z9shlM>)AZpnIJ|U6Ma;#8ZfD9gT_O)1&J&QcA(^<-{9Hsjel|);{^kz8K-VGCoc^T
z^m?>P=k==#bH
zdpdQhC)io#eP$ag-yc{a+3@3d-o`v(k(o2N4jeP>QMXn(aOuM)HM{l277QQx+w#xy
zS}WRI#(&Q_`g|uJyTZSX%a2!2^~yObz>~08`q*>}nFE&!uRe%)|6j2^N%^CN
z%t!ry-{+^!yK;i(AcNVy#69kZIdoLh^N(tP!X{^(r-`${1}E{p?u_$a=DY8@S$HFO
zub0GQ7K=9mpm4A-;9I40o#)^Iu1~KI{*}M&v?VL*y3xGF(VeEC^+g9i`6X?7b1d}F
zWwoRA_gy@ye@YyT=kd!qr18=7)^p|ro6EPkuOmCWVxUz~m%q?_kl3n36<%e~AoH=Q9w3WAqtcfQy6TmVO)XUY
zcoJl@P`Fb6VZr|@|0Khe-Wr2OnT0YXK=w@q4IpR@D
z4qSRjFtX)``p5G-gfiz#hJy$FrQAT3s?jisVWZ9r*qJ
zF8GlaXn-8*uq+AB!zXrFP457OHz@EyxC7Ks>RJ6m1rkEAxH)3~*lS7cz4XUbr_a~f
z+x#~b|Mn{P{jUoje$B4ivYy@k_TE2>ufI9DYk&Fg{C3bXo#f^z@;^cIBR22^4lgpXH6CY3HPuaiz*PXAA_xIl``aLi2`||z2Pk+ocS|@b?LT~)SYG4
zZ9UqLt>$Z;`!&VJV&e_H`G4%+m&V`kuDx|Wx9hy9`MSt$I~RP>O3UBRSGP^Pc-!sQ
zy)Ua@9G0_-`wv>VkrRh-0xO7a_frZYGuirkh
z_0tqH^AkU3m?f|G{VJ`u^UC|9JJv;qMz8m)JhJj}%+6`+=M?L&w#c6Ut$6mMV&9(Z
z`H^Q%oB$0l|M?f!|7mK{sj}+mm!4k9Gvv3;R#^VA=9uOAd*#jhgdfkUFRuAjwWR#>
zkM!9+tB;hLe{Jlqd9}my?Em6<;1x~#e!hJ#XZJ&qU-EU^?EK3NnIP8%XzBXZUdcS}
zo3)NLbN1)=pU+pn{Fv=J^ZGs;)w$&YtLjh?@ey8idZvL7+YkE5Qhs{Fqwj?{H1EH|F0SES)MF8VsU!eb@RoJH_z{Td#>2$^UcaW*WMgIE3vKov5&3J
zf1j6r^Os%i{nBZLcYEKT*pc_l)pYO9+ULGi71OuAYV_NYSo6z6ef`QpTcv;H%R;_B
ziKzK=OaJ@6|F5pc+tk@}zOku1Ri}FO(fq<#_QzH)|CXpc|2reX@Ys%ZYYKZCPyPli
z$lG=|f6~R1&m4Nco_TIpe>)@7i+Qt;Ti$Ay531|;B>X9l^FK6e)7?Gue*12|x96Vj
z+05yyi`sJRPnU21`g48#_aECX{g^6PeXu*;?yJR;<>sfiz4lo=>+huk
zwlaA}+@l$fwMtKI%y@slyYuwFrxsxgmL1=DCRR`6r}g^dF7Y3Zc^^Bz>aXqX$C1Co
z7oYf9xAm`;U9GDB*4|s^-;|fir2PF_cWq+n^?y(A=PW+8>y>qxHS?#LHPc_MJZCuj
zyPeyy(%Px-|6bXAZsw1=b4%?W&#LfCp7p&buGD&GQQpx`k(L1n5}r$EI4jr?TSg=68o=hxmtK`C{
zJyZAbd^g+Ms{bJmGl^6w616>Yz2@chp@ziD$G-#ou+Uq)|c?(#<~dZjyG
z&*)hdXL9MR?RUSmn+tQlPM)YI`*Px=`b}D4yXU%hKhwNh9V7f&bbe;C_O#1xTXPeS
z7CqJ3>F08_F>a?{Z1JmG8(x0jWwh>fevG#GTEXk@{w@=>?wrh{rpoUVqjIdMZp$;%
zkFncdsIKqPTA%y4@S5$qt!IxE7d1Zldoosd_Fb=2PtI5Bo=)9=D_5^*Z(>aNksW?P
zQVnGyrktshcI~@3d6)T8x0kP2-t?7CD?H12)mP{CG0pc+SU>r0-G>Oa+%
z?aqvuyn@fFI8>$X^0sYn^NtqXww#*n^Hd~PeY=s{u~Oll>|57(cvSr+T@L+ib<*Y7
zmTfx=*^F=TJuLaBvhPygw$o4lpIklpdiXp0^@V56ZZmF?y5iM$;aBUr#&c$$Z+dQL
z>N+oKw$5_qE%%2hCJRrcd|F)I>+${A%RhH@bJq%Q$gVgg^Yv@Ow%%af@b5cIyg5F{
z30*9Uep#3~ef6=NiSGA8cDlsRt>4b*yWok1?=BCOlk0vv{dUV-m9r|}rQ*`ZH&WO4
zoiCIAX>ul3c;17)mv8P#fQH+OE`FCa{x#?6>zWtFmwHwo*_x!z8y&qa?)toMVJ$19
z_S!MeIln}GYj0!T%)-0d@4YoSy{*W0HJk3~EwcKP*D-FLyD$49o7y^kxtFUZ{xg|j
zsQh%&RyWxvW&7TBJN%C4{hR-N^`TYoUoyQ-IKStZ>HOET-hP`EP@GyaJ^T5q
z$mfCKOAqy~3SVsfwuUF%O*`qxuJ60fRn>T#s%+*7w>}m4Y~_@R56W)d3j1Dn>dU@o
zUtQ<%Myvl^5_pTHeCh!|sgpVG$=Tnw?Ry+^`_=CFbz8o!oWOOp8
zbk&rXYunaZ?Jweeou_<#nc22%>$`m$d3|I4C;MG?tG_(=QT=v~>W9gDN}5WRYb?=IsiPqj_!gI47&J>B3@t0!7}Gi~*(^~tdVI_xN@W66wRNfPfloA_pX1=^&RJoV&2xK
zMuyLSl``+ko8?}9yH$_WP7X+&$&zAPGPx&Pa_5&%e#YzGt~oHduQ)ebzc*XzN!1g7
zp8CFYA7{6Puk8-amAd}x?CQe}g{d*(p*>mWPD|V{sVe333ts2BIcWiCDqz`Vwc9Ih
zo?RWBH|y1&^>1@-wfF=}9aGQs^(y%;v+Qlt+8J@?XMf%M-c$0`8y=NRK>ovFvB7T$eQ9h3fafv9z@+~Tw4uf_I0nH4)ZX4ReAxW`u7aj%PC
zE||5|_v22+V_TQ4^FMU`+@;&PUuTBi+O?u^t
zxXI@$Q|jE%B7?OdJG_K9-_x>vv)e-@a9-J*d}6D
zYn7Y+@~N+{pQt^zf0z9pw}kXk3*Kcm`!w5jtWsXJcy0B%^TuE8A6sorv?*VrpELES
z+!Nb%zyDgy+4p`{b#1=H%hc%a46k?0`tW0Q&4^v~(
zrLWIYUNoCqE$;P7<-Rc+d>E2}h)ZB4(j=*-O}Ri91tLhGW`Z*)ps&z!7s
z)g!)lnUB%Bx14jAY<+Jz$<)#NE`Rv#v*qvZor(Q^Cakx9Dsy>t|KytO!s5PG=OXL3
zO%+_SO?iRsJoT&?^}GLPf4#P*GgK*Pzi)I^C9C|_
zhuB>Cn`OG{)?R(<(_w$k8m%)t(s@`aHn%$FwcN~oHyexd(yh1JJhloyJfpKiPv+&a
zwVYjMCwKfSSkmoo<@)D*-Mz}QN9;3&_IF)>_oQudPxh_TM=J0C;SxO^Dz&GhaINHV
z$?#)S3;#yHxzgg18$GLHUCnl{@4t%uc2%i~#7uFcHVGrG?EcvsV8
zW7U&u0#BH5d~IGXxI%MXa>}Q2shK*_o7>(^E6jDzHhML?O@02_i!-l9_!+M=Y%%q%
z^Sj65*AscByjN{``F1ak-M7zvKl5FmdHMsL$ur+xO-}qY^G*Dz`qhc+s_mDT3WV$|
zkUFXI^Jd((=w+W=db7{|5L2FT{+P@YW69oC_t$I-JUBmf^IjDeL+-D&uL|9M*2JB*
z+qO3!)Ro9}nr5Q)HfN9J_Eo~G=Bli;ntUs(`$*NU#3|t=aE+NlJF%cXpJ8ZhpFK`KD;G3BoJqRfl;d+8!>s
zV0`&~r^NMJan_6>euq@%F#9v#^}wTw|&37
z<^1$nrkmy-+cnoflQClP?%jr#!F7|3R+g^MX4)mxQDRr(zW)|?pq0idktKio?iM!}
zPTlcMb7NF*^w9#_cbOZ$_H276@~zZPCG9rn(OaLhrxoU&U%fBrr%B$`%FxMHSITY$
zxg9H2l{)s(EK|NMw@Uwo3=`lB%7Z8X1i{|
zzQ;>`x0YwFDmzx{JN4p!i4&_{d59j~lCHDd@XX$Ok#?ucylaiBX1C?tG7g>0kUsNW
z&*Qt2+k?N&f2(rxt@hvZR^L8XEl*5|F<g3(OuiQ<$Hh2GTH*fW)rz>l3nH_!3CJh<{IqU%)jZcrc9<9|68V3=4>;)Q-01w+2
zszS!gg$&m{0*{^atp1S#8ZY2g&XiF62U_Unax4AVRM4;nj
zTzl1EQrkrE5=iI}3`i~9M;)M%4^_|v3uyV`QFYMx2+Um2tVBA=FdcBs_);q<~f(W=entR>12F!P7X9;ScSJ-)4fWhK%-r0~c)w
z2_+z=8|<^;YbQ8bH4$VacytN01`{4j&=D?hV8J|f{p7K!8ArEXJN8xgeTDtSQ!f|t
zS6hJBcA@gNt?REQku6-|*{{FKzlefeQtmgcKi^{S4t}eTto}UB?
zA%@ilS8tOlS`>QqsJGps%IQDUtuFkqPhPUx{a1E!M7_nO)6;DJfyR*-85kN&pTBHQ
z4z15kJ$P~AyxmH&;h@Espmo)=itc%AH%T*v%uM{=BM*`Q?|v+p$
z;940Xg0H2vC4dqrD+9xUSsU}<#xPuKDV?i00~D2v3=A6{t@#Z%vOzR8816ELH4?MU
zV8$yHvW7MI&Vy;2u!G6F2kz)j?G00AAWL-;DOCd3#qhXm=@EoGNM;|#s+Z5EkoA`V
z%nuC5M7uTR=cgH-&Hi7#q?+w`8sES9&B>cN|KIx<6|tRfTDj2bSi^OO3@)pevCCM6
z9Q*3ytINP}qxZ#?D~f?(gZ8nn(f0pDFP?h2<+x3ma?k4fv%+lb
zB*PgP!j4GX+xq19^E#a({<}ib;rmV3R@4}+1Et6wZO(=`BBJ;sJUcvq>m9*Tn
zwZ2~VrRSr`;LQWarb=8-ee!B;?Vi#N6(=`940lNK>3Kb`_*24j)x%JAorZ?%ejl26
z@SIoMQFU+#g6#j^J9i3CH^fSnV@GzpleX5B6oc63(bHLYH@uHWOdBfFP;1;K*Y12|
zhgdgU(_<||wPU6CBbTH^)CfK10%cW@P3(sz9;|`cePD-IPiLX6!85qg$3j64Y&7(T
zIT#We{8N;hU!v(MST+^rK(Jk356lcR(y{9VrMU?b!jE@}$Vh&f4pRd1gC1`!%nSyQ
zTfCj)Vb*}dC8#iE!(NzkH*j@+1g*9H5N-eO>Icvm^_kRH?{#MHPhUCh|3Ur#7XN;4
zvwlBwM{iV`T&iv0zyFKl|5vW;OaG>Myt^ei&hW2JtfHGbXO_gVZum%D$a_PotG
z*XHiuEO&ek$ca6#%|J6Jz285A_Qw{_yf)`*`Rrv;|3Isnf4m2+3>V%Wc>2uZHR`AS
zgBFYbE}GMKuju!OPyKP5-)@br`}X_c?0Z!=w_V@lZyDF~`qHIoJjUyaS3N4ewezm{
z^Rt#Frt>!EBw5y{+xA&L=3+QtCFR_I_$KS)Uk%RfZSlX3eHS-c2O7G5tWrEl%y3=h
z`rkim)J{fz|C&?3zc;(@{Ll3}3vOq(&;M}b<3Gc7+Q$Wtm8Ksvt$IIoe#D6#c3%?t
z=U2^L@rviz)IwGUP?zz!(K^GV+NXOa_her`p=%^$+`nw*fxl+!YVVtODnG8WpJ!)b
zwyH9qtt4(;T8y~)x}APDKNR_ImT4c*d3>vPq9-WHgA&QH(q)Bfmuz02QgO2vy!iUz
z?R#GrK3pAN|FObELv_#FP4fG{od$1kJ}jNLFSp{Lw7%rN*Yp3S-mWc~|MOJu<4O0!
z_q<#5FjfA~hi%m%pZ|NUm|u8qbAP`6|9PJ}f{&m2zkdHq_u%h8XT9$0+isXCAAV;C
zbSU&ef_uj1x4YeU6>a!ixwdxBnH_q0F~zkf_JdcY-xWW<1C)y!3hx#_o~2^4bbqbt
z;nO)W^Xu<=S#+ZY9fE&l6eZ`vJ6Nw%=RqwfV{NGbsk=Ek7Olc>m{{
z$@9yueQ4yj%3L$!ZjIUL(54v^m;JGS|MY&2`KkZOZNI-+-afI7Jv#Ng=a0?*-guw+
z!XWuF8V{zs2|wYw$yR&!-x%>)d^$_TPLzIdD8M^t|3Db^VY1;V9!KrUUn1>Z_0~h(aGiQu
zO!2v8#Tv2(oy#MSZCxew>q?lp!MYx8P=)*0>U+<@sWr=%d~-d(4+L5jHV&><;(6^7HmIp7$wX56dFK;Q&sLX6r(Y?x;^}{X*+A#>c2+oYLDqJc6|GM(t}iyt+8sUyDm?9GDR*uP}tbCwlMEm42GtN5{0sdt;Y}6rZ)Jg+^{FN9<>wQ*^d)4ZFW$*N
zduu(Y_7SeTw(7`pBkAjTCpT2bfGUp-w~kdk?aAJrQX$)$UHk9t``t;8I$ler+x=|m
z$xaiJ)2X!lz^9&WRlY=@r*GnOyq53}$8%L9i6L(l7f
zQrEYo>_~eZtNwV_#YH_@>BsKBdv07crF4Gzztzj7t{<1J`xEkbR%bff+~2vk1K&>H
z^Qmd$&e(^?AEqo?%0d0@J-pbQ+&^#UV}{=BchP%mmKa}CwF^JEBkbgd1?h)WVg8sWecdea
zZti0%|4F;9A2a1Fmpn0rb-G}$=i6!Dr_Gc#S-Su0irE_f-`{@Tv+LT4(p=@#6V2z8
ztFIl{(RH5JbY1RA14t9I^W!{?laIEz7%KNvp3B_r!xR5+-SdU#f`o4B$RFN)q9gm>
ze3QzcYg519PG39CC^`T4&Ug3kzW)w765>z9T#1i&mQO$auw;5S5cK(FJY?
z94Jzm2d?EITn2Eh3!W%vWMBZb&b$1TKn)mB9S+ljO%Z5z9<(F<$PATZ(8f4;tr>=e
z44_IIYN$sKXm%gr6)&)-Kp`kP-vw0KM9e$#1JPId;U)oTcG-Ccb2A*sQki%9kHwtd
zT9#^2iF5ZYyWao*`F7QhoD2*S*AG4jRgU%Ba%fjY?|OMq_YrjT323J&17u4i1ITm+
zP;my@+z8rz3E2b*;t|Aa6y(~OL8_o;ftZAF@#p`Ht$qdRXIdBLgVcJu`njxgN@xNA
DUVNTH

literal 0
HcmV?d00001


From d931a6e727ee70952136f42250132e28a8aa34b4 Mon Sep 17 00:00:00 2001
From: Roland 
Date: Thu, 10 May 2012 10:24:05 +0200
Subject: [PATCH 014/106] break out TestConductor stuff into akka-remote-tests
 project

---
 .../testconductor/TestConductorProtocol.java  |  0
 .../main/protocol/TestConductorProtocol.proto |  0
 .../src/main/resources/reference.conf         | 34 +++++++++++++++++++
 .../akka/remote/testconductor/Conductor.scala | 12 +++----
 .../akka/remote/testconductor/DataTypes.scala |  0
 .../akka/remote/testconductor/Extension.scala |  0
 .../akka/remote/testconductor/Features.scala  |  0
 .../NetworkFailureInjector.scala              |  0
 .../akka/remote/testconductor/Player.scala    |  0
 .../testconductor/RemoteConnection.scala      |  0
 .../TestConductorTransport.scala              |  0
 .../akka/remote/testconductor/package.scala   |  0
 .../testconductor/TestConductorSpec.scala     |  0
 akka-remote/src/main/resources/reference.conf | 26 --------------
 project/AkkaBuild.scala                       | 20 ++++++++++-
 15 files changed, 59 insertions(+), 33 deletions(-)
 rename {akka-remote => akka-remote-tests}/src/main/java/akka/remote/testconductor/TestConductorProtocol.java (100%)
 rename {akka-remote => akka-remote-tests}/src/main/protocol/TestConductorProtocol.proto (100%)
 create mode 100644 akka-remote-tests/src/main/resources/reference.conf
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/Conductor.scala (97%)
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/DataTypes.scala (100%)
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/Extension.scala (100%)
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/Features.scala (100%)
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala (100%)
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/Player.scala (100%)
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/RemoteConnection.scala (100%)
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala (100%)
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/package.scala (100%)
 rename {akka-remote => akka-remote-tests}/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala (100%)

diff --git a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
similarity index 100%
rename from akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
rename to akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
diff --git a/akka-remote/src/main/protocol/TestConductorProtocol.proto b/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto
similarity index 100%
rename from akka-remote/src/main/protocol/TestConductorProtocol.proto
rename to akka-remote-tests/src/main/protocol/TestConductorProtocol.proto
diff --git a/akka-remote-tests/src/main/resources/reference.conf b/akka-remote-tests/src/main/resources/reference.conf
new file mode 100644
index 0000000000..f0d8a9d6ae
--- /dev/null
+++ b/akka-remote-tests/src/main/resources/reference.conf
@@ -0,0 +1,34 @@
+#############################################
+# Akka Remote Testing Reference Config File #
+#############################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits/overrides in your application.conf.
+
+akka {
+  testconductor {
+
+    # Timeout for joining a barrier: this is the maximum time any participants
+    # waits for everybody else to join a named barrier.
+    barrier-timeout = 30s
+    
+    # Timeout for interrogation of TestConductor’s Controller actor
+    query-timeout = 5s
+    
+    # Threshold for packet size in time unit above which the failure injector will
+    # split the packet and deliver in smaller portions; do not give value smaller
+    # than HashedWheelTimer resolution (would not make sense)
+    packet-split-threshold = 100ms
+    
+    # Default port to start the conductor on; 0 means 
+    port = 0
+    
+    # Hostname of the TestConductor server, used by the server to bind to the IP
+    # and by the client to connect to it.
+    host = localhost
+    
+    # Name of the TestConductor client (for identification on the server e.g. for
+    # failure injection)
+    name = "noname"
+  }
+}
\ No newline at end of file
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
similarity index 97%
rename from akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
index 2bbae6d28b..b25bd1838c 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala
+++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
@@ -24,6 +24,7 @@ import java.net.InetSocketAddress
 import akka.dispatch.Future
 import akka.actor.OneForOneStrategy
 import akka.actor.SupervisorStrategy
+import java.util.concurrent.ConcurrentHashMap
 
 trait Conductor extends RunControl with FailureInject { this: TestConductorExt ⇒
 
@@ -91,22 +92,21 @@ trait Conductor extends RunControl with FailureInject { this: TestConductorExt 
 
 class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler {
 
-  @volatile
-  var clients = Map[Channel, ActorRef]()
+  val clients = new ConcurrentHashMap[Channel, ActorRef]()
 
   override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = {
     val channel = event.getChannel
     log.debug("connection from {}", getAddrString(channel))
     val fsm = system.actorOf(Props(new ServerFSM(controller, channel)))
-    clients += channel -> fsm
+    clients.put(channel, fsm)
   }
 
   override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = {
     val channel = event.getChannel
     log.debug("disconnect from {}", getAddrString(channel))
-    val fsm = clients(channel)
+    val fsm = clients.get(channel)
     fsm ! PoisonPill
-    clients -= channel
+    clients.remove(channel)
   }
 
   override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) = {
@@ -114,7 +114,7 @@ class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAd
     log.debug("message from {}: {}", getAddrString(channel), event.getMessage)
     event.getMessage match {
       case msg: NetworkOp ⇒
-        clients(channel) ! msg
+        clients.get(channel) ! msg
       case msg ⇒
         log.info("client {} sent garbage '{}', disconnecting", getAddrString(channel), msg)
         channel.close()
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala
similarity index 100%
rename from akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala
similarity index 100%
rename from akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Features.scala
similarity index 100%
rename from akka-remote/src/main/scala/akka/remote/testconductor/Features.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/Features.scala
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala
similarity index 100%
rename from akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala
similarity index 100%
rename from akka-remote/src/main/scala/akka/remote/testconductor/Player.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala
similarity index 100%
rename from akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala
similarity index 100%
rename from akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/package.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/package.scala
similarity index 100%
rename from akka-remote/src/main/scala/akka/remote/testconductor/package.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/package.scala
diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
similarity index 100%
rename from akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
rename to akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf
index f14ee3d87c..1438904fe2 100644
--- a/akka-remote/src/main/resources/reference.conf
+++ b/akka-remote/src/main/resources/reference.conf
@@ -155,30 +155,4 @@ akka {
       type = PinnedDispatcher
     }
   }
-  
-  testconductor {
-
-    # Timeout for joining a barrier: this is the maximum time any participants
-    # waits for everybody else to join a named barrier.
-    barrier-timeout = 30s
-    
-    # Timeout for interrogation of TestConductor’s Controller actor
-    query-timeout = 5s
-    
-    # Threshold for packet size in time unit above which the failure injector will
-    # split the packet and deliver in smaller portions; do not give value smaller
-    # than HashedWheelTimer resolution (would not make sense)
-    packet-split-threshold = 100ms
-    
-    # Default port to start the conductor on; 0 means 
-    port = 0
-    
-    # Hostname of the TestConductor server, used by the server to bind to the IP
-    # and by the client to connect to it.
-    host = localhost
-    
-    # Name of the TestConductor client (for identification on the server e.g. for
-    # failure injection)
-    name = "noname"
-  }
 }
diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala
index f9fbfc6c4b..b899bdec45 100644
--- a/project/AkkaBuild.scala
+++ b/project/AkkaBuild.scala
@@ -87,6 +87,24 @@ object AkkaBuild extends Build {
     )
   ) configs (MultiJvm)
 
+  lazy val remoteTests = Project(
+    id = "akka-remote-tests",
+    base = file("akka-remote-tests"),
+    dependencies = Seq(remote % "compile;test->test;multi-jvm->multi-jvm", actorTests % "test->test", testkit % "test->test"),
+    settings = defaultSettings ++ multiJvmSettings ++ schoirSettings ++ Seq(
+      // disable parallel tests
+      parallelExecution in Test := false,
+      extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src =>
+        (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq
+      },
+      scalatestOptions in MultiJvm := Seq("-r", "org.scalatest.akka.QuietReporter"),
+      jvmOptions in MultiJvm := {
+        if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil
+      },
+      test in Test <<= (test in Test) dependsOn (test in MultiJvm)
+    )
+  ) configs (MultiJvm)
+
   lazy val cluster = Project(
     id = "akka-cluster",
     base = file("akka-cluster"),
@@ -438,7 +456,7 @@ object Dependencies {
     Test.zookeeper, Test.log4j // needed for ZkBarrier in multi-jvm tests
   )
 
- val cluster = Seq(Test.junit, Test.scalatest)
+  val cluster = Seq(Test.junit, Test.scalatest)
 
   val slf4j = Seq(slf4jApi, Test.logback)
 

From 160aa730667a82e4acd1eb6af5c54eb85ee6adc1 Mon Sep 17 00:00:00 2001
From: Roland 
Date: Thu, 10 May 2012 21:08:06 +0200
Subject: [PATCH 015/106] scaladoc for TestConductor

---
 .../akka/remote/testconductor/Conductor.scala | 287 ++++++++++++++----
 .../akka/remote/testconductor/Extension.scala |  16 +
 .../akka/remote/testconductor/Features.scala  |  89 ------
 .../akka/remote/testconductor/Player.scala    |  34 ++-
 4 files changed, 283 insertions(+), 143 deletions(-)
 delete mode 100644 akka-remote-tests/src/main/scala/akka/remote/testconductor/Features.scala

diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
index b25bd1838c..347973a255 100644
--- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
+++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
@@ -25,8 +25,26 @@ import akka.dispatch.Future
 import akka.actor.OneForOneStrategy
 import akka.actor.SupervisorStrategy
 import java.util.concurrent.ConcurrentHashMap
+import akka.actor.Status
 
-trait Conductor extends RunControl with FailureInject { this: TestConductorExt ⇒
+sealed trait Direction
+
+object Direction {
+  case object Send extends Direction
+  case object Receive extends Direction
+  case object Both extends Direction
+}
+
+/**
+ * The conductor is the one orchestrating the test: it governs the
+ * [[akka.remote.testconductor.Controller]]’s port to which all
+ * [[akka.remote.testconductor.Player]]s connect, it issues commands to their
+ * [[akka.remote.testconductor.NetworkFailureInjector]] and provides support
+ * for barriers using the [[akka.remote.testconductor.BarrierCoordinator]].
+ * All of this is bundled inside the [[akka.remote.testconductor.TestConductorExt]]
+ * extension.
+ */
+trait Conductor { this: TestConductorExt ⇒
 
   import Controller._
 
@@ -36,60 +54,154 @@ trait Conductor extends RunControl with FailureInject { this: TestConductorExt 
     case x    ⇒ x
   }
 
-  override def startController(participants: Int): Future[Int] = {
+  /**
+   * Start the [[akka.remote.testconductor.Controller]], which in turn will
+   * bind to a TCP port as specified in the `akka.testconductor.port` config
+   * property, where 0 denotes automatic allocation. Since the latter is
+   * actually preferred, a `Future[Int]` is returned which will be completed
+   * with the port number actually chosen, so that this can then be communicated
+   * to the players for their proper start-up.
+   *
+   * This method also invokes [[akka.remote.testconductor.Player]].startClient,
+   * since it is expected that the conductor participates in barriers for
+   * overall coordination. The returned Future will only be completed once the
+   * client’s start-up finishes, which in fact waits for all other players to
+   * connect.
+   *
+   * @param participants gives the number of participants which shall connect
+   * before any of their startClient() operations complete.
+   */
+  def startController(participants: Int): Future[Int] = {
     if (_controller ne null) throw new RuntimeException("TestConductorServer was already started")
     _controller = system.actorOf(Props(new Controller(participants)), "controller")
     import Settings.BarrierTimeout
     controller ? GetPort flatMap { case port: Int ⇒ startClient(port) map (_ ⇒ port) }
   }
 
-  override def port: Future[Int] = {
+  /**
+   * Obtain the port to which the controller’s socket is actually bound. This
+   * will deviate from the configuration in `akka.testconductor.port` in case
+   * that was given as zero.
+   */
+  def port: Future[Int] = {
     import Settings.QueryTimeout
     controller ? GetPort mapTo
   }
 
-  override def throttle(node: String, target: String, direction: Direction, rateMBit: Double): Future[Done] = {
+  /**
+   * Make the remoting pipeline on the node throttle data sent to or received
+   * from the given remote peer. Throttling works by delaying packet submission
+   * within the netty pipeline until the packet would have been completely sent
+   * according to the given rate, the previous packet completion and the current
+   * packet length. In case of large packets they are split up if the calculated
+   * send pause would exceed `akka.testconductor.packet-split-threshold`
+   * (roughly). All of this uses the system’s HashedWheelTimer, which is not
+   * terribly precise and will execute tasks later than they are schedule (even
+   * on average), but that is countered by using the actual execution time for
+   * determining how much to send, leading to the correct output rate, but with
+   * increased latency.
+   * 
+   * @param node is the symbolic name of the node which is to be affected
+   * @param target is the symbolic name of the other node to which connectivity shall be throttled
+   * @param direction can be either `Direction.Send`, `Direction.Receive` or `Direction.Both`
+   * @param rateMBit is the maximum data rate in MBit
+   */
+  def throttle(node: String, target: String, direction: Direction, rateMBit: Double): Future[Done] = {
     import Settings.QueryTimeout
     controller ? Throttle(node, target, direction, rateMBit.toFloat) mapTo
   }
 
-  override def blackhole(node: String, target: String, direction: Direction): Future[Done] = {
+  /**
+   * Switch the Netty pipeline of the remote support into blackhole mode for
+   * sending and/or receiving: it will just drop all messages right before
+   * submitting them to the Socket or right after receiving them from the
+   * Socket.
+   * 
+   * @param node is the symbolic name of the node which is to be affected
+   * @param target is the symbolic name of the other node to which connectivity shall be impeded
+   * @param direction can be either `Direction.Send`, `Direction.Receive` or `Direction.Both`
+   */
+  def blackhole(node: String, target: String, direction: Direction): Future[Done] = {
     import Settings.QueryTimeout
     controller ? Throttle(node, target, direction, 0f) mapTo
   }
 
-  override def disconnect(node: String, target: String): Future[Done] = {
+  /**
+   * Tell the remote support to shutdown the connection to the given remote
+   * peer. It works regardless of whether the recipient was initiator or
+   * responder.
+   * 
+   * @param node is the symbolic name of the node which is to be affected
+   * @param target is the symbolic name of the other node to which connectivity shall be impeded
+   */
+  def disconnect(node: String, target: String): Future[Done] = {
     import Settings.QueryTimeout
     controller ? Disconnect(node, target, false) mapTo
   }
 
-  override def abort(node: String, target: String): Future[Done] = {
+  /**
+   * Tell the remote support to TCP_RESET the connection to the given remote
+   * peer. It works regardless of whether the recipient was initiator or
+   * responder.
+   * 
+   * @param node is the symbolic name of the node which is to be affected
+   * @param target is the symbolic name of the other node to which connectivity shall be impeded
+   */
+  def abort(node: String, target: String): Future[Done] = {
     import Settings.QueryTimeout
     controller ? Disconnect(node, target, true) mapTo
   }
 
-  override def shutdown(node: String, exitValue: Int): Future[Done] = {
+  /**
+   * Tell the remote node to shut itself down using System.exit with the given
+   * exitValue.
+   * 
+   * @param node is the symbolic name of the node which is to be affected
+   * @param exitValue is the return code which shall be given to System.exit
+   */
+  def shutdown(node: String, exitValue: Int): Future[Done] = {
     import Settings.QueryTimeout
     controller ? Terminate(node, exitValue) mapTo
   }
 
-  override def kill(node: String): Future[Done] = {
+  /**
+   * Tell the SBT plugin to forcibly terminate the given remote node using Process.destroy.
+   * 
+   * @param node is the symbolic name of the node which is to be affected
+   */
+  def kill(node: String): Future[Done] = {
     import Settings.QueryTimeout
     controller ? Terminate(node, -1) mapTo
   }
 
-  override def getNodes: Future[List[String]] = {
+  /**
+   * Obtain the list of remote host names currently registered.
+   */
+  def getNodes: Future[List[String]] = {
     import Settings.QueryTimeout
     controller ? GetNodes mapTo
   }
 
-  override def removeNode(node: String): Future[Done] = {
+  /**
+   * Remove a remote host from the list, so that the remaining nodes may still
+   * pass subsequent barriers. This must be done before the client connection
+   * breaks down in order to affect an “orderly” removal (i.e. without failing
+   * present and future barriers).
+   * 
+   * @param node is the symbolic name of the node which is to be removed
+   */
+  def removeNode(node: String): Future[Done] = {
     import Settings.QueryTimeout
     controller ? Remove(node) mapTo
   }
 
 }
 
+/**
+ * This handler is installed at the end of the controller’s netty pipeline. Its only
+ * purpose is to dispatch incoming messages to the right ServerFSM actor. There is
+ * one shared instance of this class for all connections accepted by one Controller.
+ */
 class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler {
 
   val clients = new ConcurrentHashMap[Channel, ActorRef]()
@@ -105,7 +217,7 @@ class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAd
     val channel = event.getChannel
     log.debug("disconnect from {}", getAddrString(channel))
     val fsm = clients.get(channel)
-    fsm ! PoisonPill
+    fsm ! Controller.ClientDisconnected
     clients.remove(channel)
   }
 
@@ -129,6 +241,19 @@ object ServerFSM {
   case object Ready extends State
 }
 
+/**
+ * The server part of each client connection is represented by a ServerFSM.
+ * The Initial state handles reception of the new client’s
+ * [[akka.remote.testconductor.Hello]] message (which is needed for all subsequent
+ * node name translations).
+ *
+ * In the Ready state, messages from the client are forwarded to the controller
+ * and [[akka.remote.testconductor.Send]] requests are sent, but the latter is
+ * treated specially: all client operations are to be confirmed by a
+ * [[akka.remote.testconductor.Done]] message, and there can be only one such
+ * request outstanding at a given time (i.e. a Send fails if the previous has
+ * not yet been acknowledged).
+ */
 class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor with LoggingFSM[ServerFSM.State, Option[ActorRef]] {
   import ServerFSM._
   import akka.actor.FSM._
@@ -136,9 +261,20 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi
 
   startWith(Initial, None)
 
+  whenUnhandled {
+    case Event(ClientDisconnected, Some(s)) ⇒
+      s ! Status.Failure(new RuntimeException("client disconnected in state " + stateName + ": " + channel))
+      stop()
+    case Event(ClientDisconnected, None) ⇒ stop()
+  }
+
+  onTermination {
+    case _ ⇒ controller ! ClientDisconnected
+  }
+
   when(Initial, stateTimeout = 10 seconds) {
     case Event(Hello(name, addr), _) ⇒
-      controller ! ClientConnected(name, addr)
+      controller ! NodeInfo(name, addr, self)
       goto(Ready)
     case Event(x: NetworkOp, _) ⇒
       log.warning("client {} sent no Hello in first message (instead {}), disconnecting", getAddrString(channel), x)
@@ -162,7 +298,6 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi
       stay using None
     case Event(msg: NetworkOp, _) ⇒
       log.warning("client {} sent unsupported message {}", getAddrString(channel), msg)
-      channel.close()
       stop()
     case Event(Send(msg @ (_: EnterBarrier | _: Done)), _) ⇒
       channel.write(msg)
@@ -176,10 +311,13 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi
   }
 
   initialize
+
+  onTermination {
+    case _ ⇒ channel.close()
+  }
 }
 
 object Controller {
-  case class ClientConnected(name: String, address: Address)
   case class ClientDisconnected(name: String)
   case object GetNodes
   case object GetPort
@@ -187,6 +325,11 @@ object Controller {
   case class NodeInfo(name: String, addr: Address, fsm: ActorRef)
 }
 
+/**
+ * This controls test execution by managing barriers (delegated to
+ * [[akka.remote.testconductor.BarrierCoordinator]], its child) and allowing
+ * network and other failures to be injected at the test nodes.
+ */
 class Controller(_participants: Int) extends Actor {
   import Controller._
 
@@ -199,8 +342,8 @@ class Controller(_participants: Int) extends Actor {
   override def supervisorStrategy = OneForOneStrategy() {
     case e: BarrierCoordinator.BarrierTimeoutException ⇒ SupervisorStrategy.Resume
     case e: BarrierCoordinator.WrongBarrierException ⇒
-      // I think we are lacking a means of communication here: this is not correct!
-      for (i ← 1 to e.data.clients) barrier ! ClientConnected
+      for (NodeInfo(c, _, _) ← e.data.clients; info ← nodes get c)
+        barrier ! NodeInfo(c, info.addr, info.fsm)
       for (c ← e.data.arrived) c ! BarrierFailed(e.barrier)
       SupervisorStrategy.Restart
   }
@@ -209,17 +352,17 @@ class Controller(_participants: Int) extends Actor {
   var nodes = Map[String, NodeInfo]()
 
   override def receive = LoggingReceive {
-    case ClientConnected(name, addr) ⇒
-      nodes += name -> NodeInfo(name, addr, sender)
-      barrier forward ClientConnected
+    case c @ NodeInfo(name, addr, fsm) ⇒
+      nodes += name -> c
+      barrier forward c
       if (initialParticipants <= 0) sender ! Done
       else if (nodes.size == initialParticipants) {
         for (NodeInfo(_, _, client) ← nodes.values) client ! Send(Done)
         initialParticipants = 0
       }
-    case ClientDisconnected(name) ⇒
+    case c @ ClientDisconnected(name) ⇒
       nodes -= name
-      barrier forward ClientDisconnected
+      barrier forward c
     case e @ EnterBarrier(name) ⇒
       barrier forward e
     case Throttle(node, target, direction, rateMBit) ⇒
@@ -234,9 +377,9 @@ class Controller(_participants: Int) extends Actor {
       } else {
         nodes(node).fsm forward Send(TerminateMsg(exitValueOrKill))
       }
-    // TODO: properly remove node from BarrierCoordinator
-    //    case Remove(node) =>
-    //      nodes -= node
+    case Remove(node) ⇒
+      nodes -= node
+      barrier ! BarrierCoordinator.RemoveClient(node)
     case GetNodes ⇒ sender ! nodes.keys
     case GetPort ⇒
       sender ! (connection.getLocalAddress match {
@@ -250,27 +393,60 @@ object BarrierCoordinator {
   case object Idle extends State
   case object Waiting extends State
 
-  case class Data(clients: Int, barrier: String, arrived: List[ActorRef])
+  case class RemoveClient(name: String)
+
+  case class Data(clients: Set[Controller.NodeInfo], barrier: String, arrived: List[ActorRef])
   class BarrierTimeoutException(val data: Data) extends RuntimeException(data.barrier) with NoStackTrace
   class WrongBarrierException(val barrier: String, val client: ActorRef, val data: Data) extends RuntimeException(barrier) with NoStackTrace
 }
 
+/**
+ * This barrier coordinator gets informed of players connecting (NodeInfo),
+ * players being deliberately removed (RemoveClient) or failing (ClientDisconnected)
+ * by the controller. It also receives EnterBarrier requests, where upon the first
+ * one received the name of the current barrier is set and all other known clients
+ * are expected to join the barrier, whereupon all of the will be sent the successful
+ * EnterBarrier return message. In case of planned removals, this may just happen
+ * earlier, in case of failures the current barrier (and all subsequent ones) will
+ * be failed by sending BarrierFailed responses.
+ */
 class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, BarrierCoordinator.Data] {
   import BarrierCoordinator._
   import akka.actor.FSM._
   import Controller._
 
-  startWith(Idle, Data(0, "", Nil))
+  // this shall be set to false if all subsequent barriers shall fail
+  var failed = false
+  override def preRestart(reason: Throwable, message: Option[Any]) {}
+  override def postRestart(reason: Throwable) { failed = true }
+
+  // TODO what happens with the other waiting players in case of a test failure?
+
+  startWith(Idle, Data(Set(), "", Nil))
+
+  whenUnhandled {
+    case Event(n: NodeInfo, d @ Data(clients, _, _)) ⇒
+      stay using d.copy(clients = clients + n)
+  }
 
   when(Idle) {
-    case Event(EnterBarrier(name), Data(num, _, _)) ⇒
-      if (num == 0) throw new IllegalStateException("no client expected yet")
-      goto(Waiting) using Data(num, name, sender :: Nil)
-    case Event(ClientConnected, d @ Data(num, _, _)) ⇒
-      stay using d.copy(clients = num + 1)
-    case Event(ClientDisconnected, d @ Data(num, _, _)) ⇒
-      if (num == 0) throw new IllegalStateException("no client to disconnect")
-      stay using d.copy(clients = num - 1)
+    case Event(EnterBarrier(name), d @ Data(clients, _, _)) ⇒
+      if (clients.isEmpty) throw new IllegalStateException("no client expected yet")
+      if (failed)
+        stay replying BarrierFailed(name)
+      else
+        goto(Waiting) using d.copy(barrier = name, arrived = sender :: Nil)
+    case Event(ClientDisconnected(name), d @ Data(clients, _, _)) ⇒
+      if (clients.isEmpty) throw new IllegalStateException("no client to disconnect")
+      (clients filterNot (_.name == name)) match {
+        case `clients` ⇒ stay
+        case c ⇒
+          failed = true
+          stay using d.copy(clients = c)
+      }
+    case Event(RemoveClient(name), d @ Data(clients, _, _)) ⇒
+      if (clients.isEmpty) throw new IllegalStateException("no client to remove")
+      stay using d.copy(clients = clients filterNot (_.name == name))
   }
 
   onTransition {
@@ -279,30 +455,37 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State,
   }
 
   when(Waiting) {
-    case Event(e @ EnterBarrier(name), d @ Data(num, barrier, arrived)) ⇒
+    case Event(e @ EnterBarrier(name), d @ Data(clients, barrier, arrived)) ⇒
       if (name != barrier) throw new WrongBarrierException(barrier, sender, d)
       val together = sender :: arrived
-      if (together.size == num) {
-        together foreach (_ ! Send(e))
-        goto(Idle) using Data(num, "", Nil)
-      } else {
-        stay using d.copy(arrived = together)
-      }
-    case Event(ClientConnected, d @ Data(num, _, _)) ⇒
-      stay using d.copy(clients = num + 1)
-    case Event(ClientDisconnected, d @ Data(num, barrier, arrived)) ⇒
-      val expected = num - 1
-      if (arrived.size == expected) {
-        val e = EnterBarrier(barrier)
-        sender :: arrived foreach (_ ! Send(e))
-        goto(Idle) using Data(expected, "", Nil)
-      } else {
-        stay using d.copy(clients = expected)
+      handleBarrier(d.copy(arrived = together))
+    case Event(RemoveClient(name), d @ Data(clients, barrier, arrived)) ⇒
+      val newClients = clients filterNot (_.name == name)
+      val newArrived = arrived filterNot (_ == name)
+      handleBarrier(d.copy(clients = newClients, arrived = newArrived))
+    case Event(ClientDisconnected(name), d @ Data(clients, barrier, arrived)) ⇒
+      (clients filterNot (_.name == name)) match {
+        case `clients` ⇒ stay
+        case c ⇒
+          val f = BarrierFailed(barrier)
+          arrived foreach (_ ! Send(f))
+          failed = true
+          goto(Idle) using Data(c, "", Nil)
       }
     case Event(StateTimeout, data) ⇒
       throw new BarrierTimeoutException(data)
   }
 
   initialize
+
+  def handleBarrier(data: Data): State =
+    if ((data.clients.map(_.fsm) -- data.arrived).isEmpty) {
+      val e = EnterBarrier(data.barrier)
+      data.arrived foreach (_ ! Send(e))
+      goto(Idle) using data.copy(barrier = "", arrived = Nil)
+    } else {
+      stay using data
+    }
+
 }
 
diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala
index 97f5dd7295..ff1d77fb9d 100644
--- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala
+++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala
@@ -11,12 +11,28 @@ import akka.actor.ActorRef
 import java.util.concurrent.ConcurrentHashMap
 import akka.actor.Address
 
+/**
+ * Access to the [[akka.remote.testconductor.TestConductorExt]] extension:
+ * 
+ * {{{
+ * val tc = TestConductor(system)
+ * tc.startController(numPlayers)
+ * // OR
+ * tc.startClient(conductorPort)
+ * }}}
+ */
 object TestConductor extends ExtensionKey[TestConductorExt] {
 
   def apply()(implicit ctx: ActorContext): TestConductorExt = apply(ctx.system)
 
 }
 
+/**
+ * This binds together the [[akka.remote.testconductor.Conductor]] and
+ * [[akka.remote.testconductor.Player]] roles inside an Akka
+ * [[akka.actor.Extension]]. Please follow the aforementioned links for
+ * more information.
+ */
 class TestConductorExt(val system: ExtendedActorSystem) extends Extension with Conductor with Player {
 
   object Settings {
diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Features.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Features.scala
deleted file mode 100644
index 336d04c368..0000000000
--- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Features.scala
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- *  Copyright (C) 2009-2011 Typesafe Inc. 
- */
-package akka.remote.testconductor
-
-import akka.dispatch.Future
-
-trait BarrierSync {
-  /**
-   * Enter all given barriers in the order in which they were given.
-   */
-  def enter(name: String*): Unit
-}
-
-sealed trait Direction
-
-object Direction {
-  case object Send extends Direction
-  case object Receive extends Direction
-  case object Both extends Direction
-}
-
-trait FailureInject {
-
-  /**
-   * Make the remoting pipeline on the node throttle data sent to or received
-   * from the given remote peer.
-   */
-  def throttle(node: String, target: String, direction: Direction, rateMBit: Double): Future[Done]
-
-  /**
-   * Switch the Netty pipeline of the remote support into blackhole mode for
-   * sending and/or receiving: it will just drop all messages right before
-   * submitting them to the Socket or right after receiving them from the
-   * Socket.
-   */
-  def blackhole(node: String, target: String, direction: Direction): Future[Done]
-
-  /**
-   * Tell the remote support to shutdown the connection to the given remote
-   * peer. It works regardless of whether the recipient was initiator or
-   * responder.
-   */
-  def disconnect(node: String, target: String): Future[Done]
-
-  /**
-   * Tell the remote support to TCP_RESET the connection to the given remote
-   * peer. It works regardless of whether the recipient was initiator or
-   * responder.
-   */
-  def abort(node: String, target: String): Future[Done]
-
-}
-
-trait RunControl {
-
-  /**
-   * Start the server port, returns the port number.
-   */
-  def startController(participants: Int): Future[Int]
-
-  /**
-   * Get the actual port used by the server.
-   */
-  def port: Future[Int]
-
-  /**
-   * Tell the remote node to shut itself down using System.exit with the given
-   * exitValue.
-   */
-  def shutdown(node: String, exitValue: Int): Future[Done]
-
-  /**
-   * Tell the SBT plugin to forcibly terminate the given remote node using Process.destroy.
-   */
-  def kill(node: String): Future[Done]
-
-  /**
-   * Obtain the list of remote host names currently registered.
-   */
-  def getNodes: Future[List[String]]
-
-  /**
-   * Remove a remote host from the list, so that the remaining nodes may still
-   * pass subsequent barriers.
-   */
-  def removeNode(node: String): Future[Done]
-
-}
diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala
index 6e78610cfb..38d0f6ef34 100644
--- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala
+++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala
@@ -21,7 +21,13 @@ import akka.actor.PoisonPill
 import akka.event.Logging
 import akka.dispatch.Future
 
-trait Player extends BarrierSync { this: TestConductorExt ⇒
+/**
+ * The Player is the client component of the
+ * [[akka.remote.testconductor.TestConductorExt]] extension. It registers with
+ * the [[akka.remote.testconductor.Conductor]]’s [[akka.remote.testconductor.Controller]]
+ * in order to participate in barriers and enable network failure injection.
+ */
+trait Player { this: TestConductorExt ⇒
 
   private var _client: ActorRef = _
   private def client = _client match {
@@ -29,6 +35,14 @@ trait Player extends BarrierSync { this: TestConductorExt ⇒
     case x    ⇒ x
   }
 
+  /**
+   * Connect to the conductor on the given port (the host is taken from setting
+   * `akka.testconductor.host`). The connection is made asynchronously, but you
+   * should await completion of the returned Future because that implies that
+   * all expected participants of this test have successfully connected (i.e.
+   * this is a first barrier in itself). The number of expected participants is
+   * set in [[akka.remote.testconductor.Conductor]]`.startController()`.
+   */
   def startClient(port: Int): Future[Done] = {
     import ClientFSM._
     import akka.actor.FSM._
@@ -51,7 +65,11 @@ trait Player extends BarrierSync { this: TestConductorExt ⇒
     a ? client mapTo
   }
 
-  override def enter(name: String*) {
+  /**
+   * Enter the named barriers, one after the other, in the order given. Will
+   * throw an exception in case of timeouts or other errors.
+   */
+  def enter(name: String*) {
     system.log.debug("entering barriers " + name.mkString("(", ", ", ")"))
     name foreach { b ⇒
       import Settings.BarrierTimeout
@@ -73,6 +91,15 @@ object ClientFSM {
   case object Disconnected
 }
 
+/**
+ * This is the controlling entity on the [[akka.remote.testconductor.Player]]
+ * side: in a first step it registers itself with a symbolic name and its remote
+ * address at the [[akka.remote.testconductor.Controller]], then waits for the
+ * `Done` message which signals that all other expected test participants have
+ * done the same. After that, it will pass barrier requests to and from the
+ * coordinator and react to the [[akka.remote.testconductor.Conductor]]’s
+ * requests for failure injection.
+ */
 class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] {
   import ClientFSM._
 
@@ -162,6 +189,9 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client
 
 }
 
+/**
+ * This handler only forwards messages received from the conductor to the [[akka.remote.testconductor.ClientFSM]].
+ */
 class PlayerHandler(fsm: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler {
 
   import ClientFSM._

From 439f653427d4ad26d504b2b35633b2de9d421d8b Mon Sep 17 00:00:00 2001
From: Roland 
Date: Fri, 11 May 2012 11:31:44 +0200
Subject: [PATCH 016/106] add some tests for BarrierCoordinator and Controller

---
 .../akka/remote/testconductor/Conductor.scala | 129 +++--
 .../akka/remote/testconductor/Extension.scala |   2 +-
 .../akka/remote/testconductor/Player.scala    |  27 +-
 .../remote/testconductor/BarrierSpec.scala    | 465 ++++++++++++++++++
 .../remote/testconductor/ControllerSpec.scala |  38 ++
 5 files changed, 599 insertions(+), 62 deletions(-)
 create mode 100644 akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala
 create mode 100644 akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala

diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
index 347973a255..09a6faeeb0 100644
--- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
+++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
@@ -1,5 +1,5 @@
 /**
- *  Copyright (C) 2009-2011 Typesafe Inc. 
+ *  Copyright (C) 2009-2012 Typesafe Inc. 
  */
 package akka.remote.testconductor
 
@@ -100,7 +100,7 @@ trait Conductor { this: TestConductorExt ⇒
    * on average), but that is countered by using the actual execution time for
    * determining how much to send, leading to the correct output rate, but with
    * increased latency.
-   * 
+   *
    * @param node is the symbolic name of the node which is to be affected
    * @param target is the symbolic name of the other node to which connectivity shall be throttled
    * @param direction can be either `Direction.Send`, `Direction.Receive` or `Direction.Both`
@@ -116,7 +116,7 @@ trait Conductor { this: TestConductorExt ⇒
    * sending and/or receiving: it will just drop all messages right before
    * submitting them to the Socket or right after receiving them from the
    * Socket.
-   * 
+   *
    * @param node is the symbolic name of the node which is to be affected
    * @param target is the symbolic name of the other node to which connectivity shall be impeded
    * @param direction can be either `Direction.Send`, `Direction.Receive` or `Direction.Both`
@@ -130,7 +130,7 @@ trait Conductor { this: TestConductorExt ⇒
    * Tell the remote support to shutdown the connection to the given remote
    * peer. It works regardless of whether the recipient was initiator or
    * responder.
-   * 
+   *
    * @param node is the symbolic name of the node which is to be affected
    * @param target is the symbolic name of the other node to which connectivity shall be impeded
    */
@@ -143,7 +143,7 @@ trait Conductor { this: TestConductorExt ⇒
    * Tell the remote support to TCP_RESET the connection to the given remote
    * peer. It works regardless of whether the recipient was initiator or
    * responder.
-   * 
+   *
    * @param node is the symbolic name of the node which is to be affected
    * @param target is the symbolic name of the other node to which connectivity shall be impeded
    */
@@ -155,7 +155,7 @@ trait Conductor { this: TestConductorExt ⇒
   /**
    * Tell the remote node to shut itself down using System.exit with the given
    * exitValue.
-   * 
+   *
    * @param node is the symbolic name of the node which is to be affected
    * @param exitValue is the return code which shall be given to System.exit
    */
@@ -166,7 +166,7 @@ trait Conductor { this: TestConductorExt ⇒
 
   /**
    * Tell the SBT plugin to forcibly terminate the given remote node using Process.destroy.
-   * 
+   *
    * @param node is the symbolic name of the node which is to be affected
    */
   def kill(node: String): Future[Done] = {
@@ -177,7 +177,7 @@ trait Conductor { this: TestConductorExt ⇒
   /**
    * Obtain the list of remote host names currently registered.
    */
-  def getNodes: Future[List[String]] = {
+  def getNodes: Future[Iterable[String]] = {
     import Settings.QueryTimeout
     controller ? GetNodes mapTo
   }
@@ -187,7 +187,7 @@ trait Conductor { this: TestConductorExt ⇒
    * pass subsequent barriers. This must be done before the client connection
    * breaks down in order to affect an “orderly” removal (i.e. without failing
    * present and future barriers).
-   * 
+   *
    * @param node is the symbolic name of the node which is to be removed
    */
   def removeNode(node: String): Future[Done] = {
@@ -330,22 +330,32 @@ object Controller {
  * [[akka.remote.testconductor.BarrierCoordinator]], its child) and allowing
  * network and other failures to be injected at the test nodes.
  */
-class Controller(_participants: Int) extends Actor {
+class Controller(private var initialParticipants: Int) extends Actor {
   import Controller._
-
-  var initialParticipants = _participants
+  import BarrierCoordinator._
 
   val settings = TestConductor().Settings
   val connection = RemoteConnection(Server, settings.host, settings.port,
     new ConductorHandler(context.system, self, Logging(context.system, "ConductorHandler")))
 
+  /*
+   * Supervision of the BarrierCoordinator means to catch all his bad emotions
+   * and sometimes console him (BarrierEmpty, BarrierTimeout), sometimes tell
+   * him to hate the world (WrongBarrier, DuplicateNode, ClientLost). The latter shall help
+   * terminate broken tests as quickly as possible (i.e. without awaiting
+   * BarrierTimeouts in the players).
+   */
   override def supervisorStrategy = OneForOneStrategy() {
-    case e: BarrierCoordinator.BarrierTimeoutException ⇒ SupervisorStrategy.Resume
-    case e: BarrierCoordinator.WrongBarrierException ⇒
-      for (NodeInfo(c, _, _) ← e.data.clients; info ← nodes get c)
-        barrier ! NodeInfo(c, info.addr, info.fsm)
-      for (c ← e.data.arrived) c ! BarrierFailed(e.barrier)
-      SupervisorStrategy.Restart
+    case BarrierTimeout(data)             ⇒ SupervisorStrategy.Resume
+    case BarrierEmpty(data, msg)          ⇒ SupervisorStrategy.Resume
+    case WrongBarrier(name, client, data) ⇒ client ! Send(BarrierFailed(name)); failBarrier(data)
+    case ClientLost(data, node)           ⇒ failBarrier(data)
+    case DuplicateNode(data, node)        ⇒ failBarrier(data)
+  }
+
+  def failBarrier(data: Data): SupervisorStrategy.Directive = {
+    for (c ← data.arrived) c ! Send(BarrierFailed(data.barrier))
+    SupervisorStrategy.Restart
   }
 
   val barrier = context.actorOf(Props[BarrierCoordinator], "barriers")
@@ -353,12 +363,20 @@ class Controller(_participants: Int) extends Actor {
 
   override def receive = LoggingReceive {
     case c @ NodeInfo(name, addr, fsm) ⇒
-      nodes += name -> c
       barrier forward c
-      if (initialParticipants <= 0) sender ! Done
-      else if (nodes.size == initialParticipants) {
-        for (NodeInfo(_, _, client) ← nodes.values) client ! Send(Done)
-        initialParticipants = 0
+      if (nodes contains name) {
+        if (initialParticipants > 0) {
+          for (NodeInfo(_, _, client) ← nodes.values) client ! Send(BarrierFailed("initial startup"))
+          initialParticipants = 0
+        }
+        fsm ! Send(BarrierFailed("initial startup"))
+      } else {
+        nodes += name -> c
+        if (initialParticipants <= 0) fsm ! Send(Done)
+        else if (nodes.size == initialParticipants) {
+          for (NodeInfo(_, _, client) ← nodes.values) client ! Send(Done)
+          initialParticipants = 0
+        }
       }
     case c @ ClientDisconnected(name) ⇒
       nodes -= name
@@ -396,8 +414,16 @@ object BarrierCoordinator {
   case class RemoveClient(name: String)
 
   case class Data(clients: Set[Controller.NodeInfo], barrier: String, arrived: List[ActorRef])
-  class BarrierTimeoutException(val data: Data) extends RuntimeException(data.barrier) with NoStackTrace
-  class WrongBarrierException(val barrier: String, val client: ActorRef, val data: Data) extends RuntimeException(barrier) with NoStackTrace
+
+  trait Printer { this: Product with Throwable with NoStackTrace ⇒
+    override def toString = productPrefix + productIterator.mkString("(", ", ", ")")
+  }
+
+  case class BarrierTimeout(data: Data) extends RuntimeException(data.barrier) with NoStackTrace with Printer
+  case class DuplicateNode(data: Data, node: Controller.NodeInfo) extends RuntimeException with NoStackTrace with Printer
+  case class WrongBarrier(barrier: String, client: ActorRef, data: Data) extends RuntimeException(barrier) with NoStackTrace with Printer
+  case class BarrierEmpty(data: Data, msg: String) extends RuntimeException(msg) with NoStackTrace with Printer
+  case class ClientLost(data: Data, client: String) extends RuntimeException with NoStackTrace with Printer
 }
 
 /**
@@ -426,26 +452,28 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State,
 
   whenUnhandled {
     case Event(n: NodeInfo, d @ Data(clients, _, _)) ⇒
+      if (clients.find(_.name == n.name).isDefined) throw new DuplicateNode(d, n)
       stay using d.copy(clients = clients + n)
+    case Event(ClientDisconnected(name), d @ Data(clients, _, arrived)) ⇒
+      if (clients.isEmpty) throw BarrierEmpty(d, "no client to disconnect")
+      (clients find (_.name == name)) match {
+        case None    ⇒ stay
+        case Some(c) ⇒ throw ClientLost(d.copy(clients = clients - c, arrived = arrived filterNot (_ == c.fsm)), name)
+      }
   }
 
   when(Idle) {
-    case Event(EnterBarrier(name), d @ Data(clients, _, _)) ⇒
-      if (clients.isEmpty) throw new IllegalStateException("no client expected yet")
+    case Event(e @ EnterBarrier(name), d @ Data(clients, _, _)) ⇒
       if (failed)
-        stay replying BarrierFailed(name)
+        stay replying Send(BarrierFailed(name))
+      else if (clients.map(_.fsm) == Set(sender))
+        stay replying Send(e)
+      else if (clients.find(_.fsm == sender).isEmpty)
+        stay replying Send(BarrierFailed(name))
       else
         goto(Waiting) using d.copy(barrier = name, arrived = sender :: Nil)
-    case Event(ClientDisconnected(name), d @ Data(clients, _, _)) ⇒
-      if (clients.isEmpty) throw new IllegalStateException("no client to disconnect")
-      (clients filterNot (_.name == name)) match {
-        case `clients` ⇒ stay
-        case c ⇒
-          failed = true
-          stay using d.copy(clients = c)
-      }
     case Event(RemoveClient(name), d @ Data(clients, _, _)) ⇒
-      if (clients.isEmpty) throw new IllegalStateException("no client to remove")
+      if (clients.isEmpty) throw BarrierEmpty(d, "no client to remove")
       stay using d.copy(clients = clients filterNot (_.name == name))
   }
 
@@ -456,36 +484,33 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State,
 
   when(Waiting) {
     case Event(e @ EnterBarrier(name), d @ Data(clients, barrier, arrived)) ⇒
-      if (name != barrier) throw new WrongBarrierException(barrier, sender, d)
+      if (name != barrier || clients.find(_.fsm == sender).isEmpty) throw WrongBarrier(name, sender, d)
       val together = sender :: arrived
       handleBarrier(d.copy(arrived = together))
     case Event(RemoveClient(name), d @ Data(clients, barrier, arrived)) ⇒
-      val newClients = clients filterNot (_.name == name)
-      val newArrived = arrived filterNot (_ == name)
-      handleBarrier(d.copy(clients = newClients, arrived = newArrived))
-    case Event(ClientDisconnected(name), d @ Data(clients, barrier, arrived)) ⇒
-      (clients filterNot (_.name == name)) match {
-        case `clients` ⇒ stay
-        case c ⇒
-          val f = BarrierFailed(barrier)
-          arrived foreach (_ ! Send(f))
-          failed = true
-          goto(Idle) using Data(c, "", Nil)
+      clients find (_.name == name) match {
+        case None ⇒ stay
+        case Some(client) ⇒
+          handleBarrier(d.copy(clients = clients - client, arrived = arrived filterNot (_ == client.fsm)))
       }
     case Event(StateTimeout, data) ⇒
-      throw new BarrierTimeoutException(data)
+      throw BarrierTimeout(data)
   }
 
   initialize
 
-  def handleBarrier(data: Data): State =
-    if ((data.clients.map(_.fsm) -- data.arrived).isEmpty) {
+  def handleBarrier(data: Data): State = {
+    log.debug("handleBarrier({})", data)
+    if (data.arrived.isEmpty) {
+      goto(Idle) using data.copy(barrier = "")
+    } else if ((data.clients.map(_.fsm) -- data.arrived).isEmpty) {
       val e = EnterBarrier(data.barrier)
       data.arrived foreach (_ ! Send(e))
       goto(Idle) using data.copy(barrier = "", arrived = Nil)
     } else {
       stay using data
     }
+  }
 
 }
 
diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala
index ff1d77fb9d..5d7826c60c 100644
--- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala
+++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala
@@ -13,7 +13,7 @@ import akka.actor.Address
 
 /**
  * Access to the [[akka.remote.testconductor.TestConductorExt]] extension:
- * 
+ *
  * {{{
  * val tc = TestConductor(system)
  * tc.startController(numPlayers)
diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala
index 38d0f6ef34..a82a090b23 100644
--- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala
+++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala
@@ -55,9 +55,9 @@ trait Player { this: TestConductorExt ⇒
       def receive = {
         case fsm: ActorRef                        ⇒ waiting = sender; fsm ! SubscribeTransitionCallBack(self)
         case Transition(_, Connecting, AwaitDone) ⇒ // step 1, not there yet
-        case Transition(_, AwaitDone, Connected)  ⇒ waiting ! Done
-        case t: Transition[_]                     ⇒ waiting ! Status.Failure(new RuntimeException("unexpected transition: " + t))
-        case CurrentState(_, Connected)           ⇒ waiting ! Done
+        case Transition(_, AwaitDone, Connected)  ⇒ waiting ! Done; context stop self
+        case t: Transition[_]                     ⇒ waiting ! Status.Failure(new RuntimeException("unexpected transition: " + t)); context stop self
+        case CurrentState(_, Connected)           ⇒ waiting ! Done; context stop self
         case _: CurrentState[_]                   ⇒
       }
     }))
@@ -84,6 +84,7 @@ object ClientFSM {
   case object Connecting extends State
   case object AwaitDone extends State
   case object Connected extends State
+  case object Failed extends State
 
   case class Data(channel: Channel, barrier: Option[(String, ActorRef)])
 
@@ -116,24 +117,24 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client
       channel.write(Hello(settings.name, TestConductor().address))
       goto(AwaitDone)
     case Event(_: ConnectionFailure, _) ⇒
-      // System.exit(1)
-      stop
+      goto(Failed)
     case Event(StateTimeout, _) ⇒
       log.error("connect timeout to TestConductor")
-      // System.exit(1)
-      stop
+      goto(Failed)
   }
 
   when(AwaitDone, stateTimeout = settings.BarrierTimeout.duration) {
     case Event(Done, _) ⇒
       log.debug("received Done: starting test")
       goto(Connected)
+    case Event(msg: NetworkOp, _) ⇒
+      log.error("received {} instead of Done", msg)
+      goto(Failed)
     case Event(msg: ClientOp, _) ⇒
       stay replying Status.Failure(new IllegalStateException("not connected yet"))
     case Event(StateTimeout, _) ⇒
       log.error("connect timeout to TestConductor")
-      // System.exit(1)
-      stop
+      goto(Failed)
   }
 
   when(Connected) {
@@ -180,6 +181,14 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client
       stay // needed because Java doesn’t have Nothing
   }
 
+  when(Failed) {
+    case Event(msg: ClientOp, _) ⇒
+      stay replying Status.Failure(new RuntimeException("cannot do " + msg + " while Failed"))
+    case Event(msg: NetworkOp, _) ⇒
+      log.warning("ignoring network message {} while Failed", msg)
+      stay
+  }
+
   onTermination {
     case StopEvent(_, _, Data(channel, _)) ⇒
       channel.close()
diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala
new file mode 100644
index 0000000000..f0b668d1ed
--- /dev/null
+++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala
@@ -0,0 +1,465 @@
+/**
+ *  Copyright (C) 2009-2012 Typesafe Inc. 
+ */
+package akka.remote.testconductor
+
+import akka.testkit.AkkaSpec
+import akka.actor.Props
+import akka.actor.AddressFromURIString
+import akka.actor.ActorRef
+import akka.testkit.ImplicitSender
+import akka.actor.Actor
+import akka.actor.OneForOneStrategy
+import akka.actor.SupervisorStrategy
+import akka.testkit.EventFilter
+import akka.testkit.TestProbe
+import akka.util.duration._
+import akka.event.Logging
+import org.scalatest.BeforeAndAfterEach
+
+object BarrierSpec {
+  case class Failed(ref: ActorRef, thr: Throwable)
+  val config = """
+    akka.testconductor.barrier-timeout = 5s
+    akka.actor.provider = akka.remote.RemoteActorRefProvider
+    akka.remote.netty.port = 0
+    akka.actor.debug.fsm = on
+    akka.actor.debug.lifecycle = on
+    """
+}
+
+class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with BeforeAndAfterEach {
+
+  import BarrierSpec._
+  import Controller._
+  import BarrierCoordinator._
+
+  override def afterEach {
+    system.eventStream.setLogLevel(Logging.WarningLevel)
+  }
+
+  "A BarrierCoordinator" must {
+
+    "register clients and remove them" in {
+      val b = getBarrier()
+      b ! NodeInfo("a", AddressFromURIString("akka://sys"), system.deadLetters)
+      b ! RemoveClient("b")
+      b ! RemoveClient("a")
+      EventFilter[BarrierEmpty](occurrences = 1) intercept {
+        b ! RemoveClient("a")
+      }
+      expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "no client to remove")))
+    }
+
+    "register clients and disconnect them" in {
+      val b = getBarrier()
+      b ! NodeInfo("a", AddressFromURIString("akka://sys"), system.deadLetters)
+      b ! ClientDisconnected("b")
+      EventFilter[ClientLost](occurrences = 1) intercept {
+        b ! ClientDisconnected("a")
+      }
+      expectMsg(Failed(b, ClientLost(Data(Set(), "", Nil), "a")))
+      EventFilter[BarrierEmpty](occurrences = 1) intercept {
+        b ! ClientDisconnected("a")
+      }
+      expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "no client to disconnect")))
+    }
+
+    "fail entering barrier when nobody registered" in {
+      val b = getBarrier()
+      b ! EnterBarrier("b")
+      expectMsg(Send(BarrierFailed("b")))
+    }
+
+    "enter barrier" in {
+      val barrier = getBarrier()
+      val a, b = TestProbe()
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      a.send(barrier, EnterBarrier("bar"))
+      noMsg(a, b)
+      within(1 second) {
+        b.send(barrier, EnterBarrier("bar"))
+        a.expectMsg(Send(EnterBarrier("bar")))
+        b.expectMsg(Send(EnterBarrier("bar")))
+      }
+    }
+
+    "enter barrier with joining node" in {
+      val barrier = getBarrier()
+      val a, b, c = TestProbe()
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      a.send(barrier, EnterBarrier("bar"))
+      barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref)
+      b.send(barrier, EnterBarrier("bar"))
+      noMsg(a, b, c)
+      within(1 second) {
+        c.send(barrier, EnterBarrier("bar"))
+        a.expectMsg(Send(EnterBarrier("bar")))
+        b.expectMsg(Send(EnterBarrier("bar")))
+        c.expectMsg(Send(EnterBarrier("bar")))
+      }
+    }
+
+    "enter barrier with leaving node" in {
+      val barrier = getBarrier()
+      val a, b, c = TestProbe()
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref)
+      a.send(barrier, EnterBarrier("bar"))
+      b.send(barrier, EnterBarrier("bar"))
+      barrier ! RemoveClient("a")
+      barrier ! ClientDisconnected("a")
+      noMsg(a, b, c)
+      b.within(1 second) {
+        barrier ! RemoveClient("c")
+        b.expectMsg(Send(EnterBarrier("bar")))
+      }
+      barrier ! ClientDisconnected("c")
+      expectNoMsg(1 second)
+    }
+
+    "leave barrier when last “arrived” is removed" in {
+      val barrier = getBarrier()
+      val a, b = TestProbe()
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      a.send(barrier, EnterBarrier("bar"))
+      barrier ! RemoveClient("a")
+      b.send(barrier, EnterBarrier("foo"))
+      b.expectMsg(Send(EnterBarrier("foo")))
+    }
+
+    "fail barrier with disconnecing node" in {
+      val barrier = getBarrier()
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! nodeA
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      a.send(barrier, EnterBarrier("bar"))
+      EventFilter[ClientLost](occurrences = 1) intercept {
+        barrier ! ClientDisconnected("b")
+      }
+      expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA), "bar", a.ref :: Nil), "b")))
+    }
+
+    "fail barrier with disconnecing node who already arrived" in {
+      val barrier = getBarrier()
+      val a, b, c = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      val nodeC = NodeInfo("c", AddressFromURIString("akka://sys"), c.ref)
+      barrier ! nodeA
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! nodeC
+      a.send(barrier, EnterBarrier("bar"))
+      b.send(barrier, EnterBarrier("bar"))
+      EventFilter[ClientLost](occurrences = 1) intercept {
+        barrier ! ClientDisconnected("b")
+      }
+      expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA, nodeC), "bar", a.ref :: Nil), "b")))
+    }
+
+    "fail when entering wrong barrier" in {
+      val barrier = getBarrier()
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! nodeA
+      val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! nodeB
+      a.send(barrier, EnterBarrier("bar"))
+      EventFilter[WrongBarrier](occurrences = 1) intercept {
+        b.send(barrier, EnterBarrier("foo"))
+      }
+      expectMsg(Failed(barrier, WrongBarrier("foo", b.ref, Data(Set(nodeA, nodeB), "bar", a.ref :: Nil))))
+    }
+
+    "fail barrier after first failure" in {
+      val barrier = getBarrier()
+      val a = TestProbe()
+      EventFilter[BarrierEmpty](occurrences = 1) intercept {
+        barrier ! RemoveClient("a")
+      }
+      expectMsg(Failed(barrier, BarrierEmpty(Data(Set(), "", Nil), "no client to remove")))
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      a.send(barrier, EnterBarrier("right"))
+      a.expectMsg(Send(BarrierFailed("right")))
+    }
+
+    "fail after barrier timeout" in {
+      val barrier = getBarrier()
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! nodeA
+      barrier ! nodeB
+      a.send(barrier, EnterBarrier("right"))
+      EventFilter[BarrierTimeout](occurrences = 1) intercept {
+        expectMsg(7 seconds, Failed(barrier, BarrierTimeout(Data(Set(nodeA, nodeB), "right", a.ref :: Nil))))
+      }
+    }
+
+    "fail if a node registers twice" in {
+      val barrier = getBarrier()
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      val nodeB = NodeInfo("a", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! nodeA
+      EventFilter[DuplicateNode](occurrences = 1) intercept {
+        barrier ! nodeB
+      }
+      expectMsg(Failed(barrier, DuplicateNode(Data(Set(nodeA), "", Nil), nodeB)))
+    }
+
+    "finally have no failure messages left" in {
+      expectNoMsg(1 second)
+    }
+
+  }
+
+  "A Controller with BarrierCoordinator" must {
+
+    "register clients and remove them" in {
+      val b = getController(1)
+      b ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor)
+      expectMsg(Send(Done))
+      b ! Remove("b")
+      b ! Remove("a")
+      EventFilter[BarrierEmpty](occurrences = 1) intercept {
+        b ! Remove("a")
+      }
+    }
+
+    "register clients and disconnect them" in {
+      val b = getController(1)
+      b ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor)
+      expectMsg(Send(Done))
+      b ! ClientDisconnected("b")
+      EventFilter[ClientLost](occurrences = 1) intercept {
+        b ! ClientDisconnected("a")
+      }
+      EventFilter[BarrierEmpty](occurrences = 1) intercept {
+        b ! ClientDisconnected("a")
+      }
+    }
+
+    "fail entering barrier when nobody registered" in {
+      val b = getController(0)
+      b ! EnterBarrier("b")
+      expectMsg(Send(BarrierFailed("b")))
+    }
+
+    "enter barrier" in {
+      val barrier = getController(2)
+      val a, b = TestProbe()
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      a.expectMsg(Send(Done))
+      b.expectMsg(Send(Done))
+      a.send(barrier, EnterBarrier("bar"))
+      noMsg(a, b)
+      within(1 second) {
+        b.send(barrier, EnterBarrier("bar"))
+        a.expectMsg(Send(EnterBarrier("bar")))
+        b.expectMsg(Send(EnterBarrier("bar")))
+      }
+    }
+
+    "enter barrier with joining node" in {
+      val barrier = getController(2)
+      val a, b, c = TestProbe()
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      a.expectMsg(Send(Done))
+      b.expectMsg(Send(Done))
+      a.send(barrier, EnterBarrier("bar"))
+      barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref)
+      c.expectMsg(Send(Done))
+      b.send(barrier, EnterBarrier("bar"))
+      noMsg(a, b, c)
+      within(1 second) {
+        c.send(barrier, EnterBarrier("bar"))
+        a.expectMsg(Send(EnterBarrier("bar")))
+        b.expectMsg(Send(EnterBarrier("bar")))
+        c.expectMsg(Send(EnterBarrier("bar")))
+      }
+    }
+
+    "enter barrier with leaving node" in {
+      val barrier = getController(3)
+      val a, b, c = TestProbe()
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref)
+      a.expectMsg(Send(Done))
+      b.expectMsg(Send(Done))
+      c.expectMsg(Send(Done))
+      a.send(barrier, EnterBarrier("bar"))
+      b.send(barrier, EnterBarrier("bar"))
+      barrier ! Remove("a")
+      barrier ! ClientDisconnected("a")
+      noMsg(a, b, c)
+      b.within(1 second) {
+        barrier ! Remove("c")
+        b.expectMsg(Send(EnterBarrier("bar")))
+      }
+      barrier ! ClientDisconnected("c")
+      expectNoMsg(1 second)
+    }
+
+    "leave barrier when last “arrived” is removed" in {
+      val barrier = getController(2)
+      val a, b = TestProbe()
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      a.expectMsg(Send(Done))
+      b.expectMsg(Send(Done))
+      a.send(barrier, EnterBarrier("bar"))
+      barrier ! Remove("a")
+      b.send(barrier, EnterBarrier("foo"))
+      b.expectMsg(Send(EnterBarrier("foo")))
+    }
+
+    "fail barrier with disconnecing node" in {
+      val barrier = getController(2)
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! nodeA
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      a.expectMsg(Send(Done))
+      b.expectMsg(Send(Done))
+      a.send(barrier, EnterBarrier("bar"))
+      barrier ! ClientDisconnected("unknown")
+      noMsg(a)
+      EventFilter[ClientLost](occurrences = 1) intercept {
+        barrier ! ClientDisconnected("b")
+      }
+      a.expectMsg(Send(BarrierFailed("bar")))
+    }
+
+    "fail barrier with disconnecing node who already arrived" in {
+      val barrier = getController(3)
+      val a, b, c = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      val nodeC = NodeInfo("c", AddressFromURIString("akka://sys"), c.ref)
+      barrier ! nodeA
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! nodeC
+      a.expectMsg(Send(Done))
+      b.expectMsg(Send(Done))
+      c.expectMsg(Send(Done))
+      a.send(barrier, EnterBarrier("bar"))
+      b.send(barrier, EnterBarrier("bar"))
+      EventFilter[ClientLost](occurrences = 1) intercept {
+        barrier ! ClientDisconnected("b")
+      }
+      a.expectMsg(Send(BarrierFailed("bar")))
+    }
+
+    "fail when entering wrong barrier" in {
+      val barrier = getController(2)
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! nodeA
+      val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! nodeB
+      a.expectMsg(Send(Done))
+      b.expectMsg(Send(Done))
+      a.send(barrier, EnterBarrier("bar"))
+      EventFilter[WrongBarrier](occurrences = 1) intercept {
+        b.send(barrier, EnterBarrier("foo"))
+      }
+      a.expectMsg(Send(BarrierFailed("bar")))
+      b.expectMsg(Send(BarrierFailed("foo")))
+    }
+
+    "not really fail after barrier timeout" in {
+      val barrier = getController(2)
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! nodeA
+      barrier ! nodeB
+      a.expectMsg(Send(Done))
+      b.expectMsg(Send(Done))
+      a.send(barrier, EnterBarrier("right"))
+      EventFilter[BarrierTimeout](occurrences = 1) intercept {
+        Thread.sleep(5000)
+      }
+      b.send(barrier, EnterBarrier("right"))
+      a.expectMsg(Send(EnterBarrier("right")))
+      b.expectMsg(Send(EnterBarrier("right")))
+    }
+
+    "fail if a node registers twice" in {
+      val controller = getController(2)
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      val nodeB = NodeInfo("a", AddressFromURIString("akka://sys"), b.ref)
+      controller ! nodeA
+      EventFilter[DuplicateNode](occurrences = 1) intercept {
+        controller ! nodeB
+      }
+      a.expectMsg(Send(BarrierFailed("initial startup")))
+      b.expectMsg(Send(BarrierFailed("initial startup")))
+    }
+
+    "fail subsequent barriers if a node registers twice" in {
+      val controller = getController(1)
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      val nodeB = NodeInfo("a", AddressFromURIString("akka://sys"), b.ref)
+      controller ! nodeA
+      a.expectMsg(Send(Done))
+      EventFilter[DuplicateNode](occurrences = 1) intercept {
+        controller ! nodeB
+        b.expectMsg(Send(BarrierFailed("initial startup")))
+      }
+      a.send(controller, EnterBarrier("x"))
+      a.expectMsg(Send(BarrierFailed("x")))
+    }
+
+    "finally have no failure messages left" in {
+      expectNoMsg(1 second)
+    }
+
+  }
+
+  private def getController(participants: Int): ActorRef = {
+    system.actorOf(Props(new Actor {
+      val controller = context.actorOf(Props(new Controller(participants)))
+      controller ! GetPort
+      override def supervisorStrategy = OneForOneStrategy() {
+        case x ⇒ testActor ! Failed(controller, x); SupervisorStrategy.Restart
+      }
+      def receive = {
+        case x: Int ⇒ testActor ! controller
+      }
+    }))
+    expectMsgType[ActorRef]
+  }
+
+  /**
+   * Produce a BarrierCoordinator which is supervised with a strategy which
+   * forwards all failures to the testActor.
+   */
+  private def getBarrier(): ActorRef = {
+    system.actorOf(Props(new Actor {
+      val barrier = context.actorOf(Props[BarrierCoordinator])
+      override def supervisorStrategy = OneForOneStrategy() {
+        case x ⇒ testActor ! Failed(barrier, x); SupervisorStrategy.Restart
+      }
+      def receive = {
+        case _ ⇒ sender ! barrier
+      }
+    })) ! ""
+    expectMsgType[ActorRef]
+  }
+
+  private def noMsg(probes: TestProbe*) {
+    expectNoMsg(1 second)
+    probes foreach (_.msgAvailable must be(false))
+  }
+
+}
\ No newline at end of file
diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala
new file mode 100644
index 0000000000..db0e3cfe69
--- /dev/null
+++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala
@@ -0,0 +1,38 @@
+/**
+ *  Copyright (C) 2009-2012 Typesafe Inc. 
+ */
+package akka.remote.testconductor
+
+import akka.testkit.AkkaSpec
+import akka.actor.Props
+import akka.testkit.ImplicitSender
+import akka.remote.testconductor.Controller.NodeInfo
+import akka.actor.AddressFromURIString
+
+object ControllerSpec {
+  val config = """
+    akka.testconductor.barrier-timeout = 5s
+    akka.actor.provider = akka.remote.RemoteActorRefProvider
+    akka.remote.netty.port = 0
+    akka.actor.debug.fsm = on
+    akka.actor.debug.lifecycle = on
+    """
+}
+
+class ControllerSpec extends AkkaSpec(ControllerSpec.config) with ImplicitSender {
+
+  "A Controller" must {
+
+    "publish its nodes" in {
+      val c = system.actorOf(Props(new Controller(1)))
+      c ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor)
+      expectMsg(Send(Done))
+      c ! NodeInfo("b", AddressFromURIString("akka://sys"), testActor)
+      expectMsg(Send(Done))
+      c ! Controller.GetNodes
+      expectMsgType[Iterable[String]].toSet must be(Set("a", "b"))
+    }
+
+  }
+
+}
\ No newline at end of file

From 096025dc6463b11e0d7bea47384196b056cec06a Mon Sep 17 00:00:00 2001
From: Viktor Klang 
Date: Mon, 14 May 2012 11:35:29 +0200
Subject: [PATCH 017/106] Replacing MapBackedSet with newSetFromMap

---
 .../netty/akka/util/HashedWheelTimer.java     |  2 +-
 .../jboss/netty/akka/util/MapBackedSet.java   | 73 -------------------
 2 files changed, 1 insertion(+), 74 deletions(-)
 delete mode 100644 akka-actor/src/main/java/org/jboss/netty/akka/util/MapBackedSet.java

diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java b/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java
index 6e54fa2233..9eba51e53f 100644
--- a/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java
+++ b/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java
@@ -155,7 +155,7 @@ public class HashedWheelTimer implements Timer {
         ticksPerWheel = normalizeTicksPerWheel(ticksPerWheel);
         Set[] wheel = new Set[ticksPerWheel];
         for (int i = 0; i < wheel.length; i ++) {
-            wheel[i] = new MapBackedSet(new ConcurrentIdentityHashMap(16, 0.95f, 4));
+            wheel[i] = Collections.newSetFromMap(new ConcurrentIdentityHashMap(16, 0.95f, 4));
         }
         return wheel;
     }
diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/MapBackedSet.java b/akka-actor/src/main/java/org/jboss/netty/akka/util/MapBackedSet.java
deleted file mode 100644
index 2bc1bc25e0..0000000000
--- a/akka-actor/src/main/java/org/jboss/netty/akka/util/MapBackedSet.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright 2009 Red Hat, Inc.
- *
- * Red Hat licenses this file to you under the Apache License, version 2.0
- * (the "License"); you may not use this file except in compliance with the
- * License.  You may obtain a copy of the License at:
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
-package org.jboss.netty.akka.util;
-
-import java.io.Serializable;
-import java.util.AbstractSet;
-import java.util.Iterator;
-import java.util.Map;
-
-/**
- * A {@link java.util.Map}-backed {@link java.util.Set}.
- * 
- * @author The Netty Project
- * @author Trustin Lee
- * 
- * @version $Rev: 2080 $, $Date: 2010-01-26 18:04:19 +0900 (Tue, 26 Jan 2010) $
- */
-final class MapBackedSet extends AbstractSet implements Serializable {
-
-  private static final long serialVersionUID = -6761513279741915432L;
-
-  private final Map map;
-
-  /**
-   * Creates a new instance which wraps the specified {@code map}.
-   */
-  MapBackedSet(Map map) {
-    this.map = map;
-  }
-
-  @Override
-  public int size() {
-    return map.size();
-  }
-
-  @Override
-  public boolean contains(Object o) {
-    return map.containsKey(o);
-  }
-
-  @Override
-  public boolean add(E o) {
-    return map.put(o, Boolean.TRUE) == null;
-  }
-
-  @Override
-  public boolean remove(Object o) {
-    return map.remove(o) != null;
-  }
-
-  @Override
-  public void clear() {
-    map.clear();
-  }
-
-  @Override
-  public Iterator iterator() {
-    return map.keySet().iterator();
-  }
-}

From 14dc08b75a2c08f83b5357bba285150dcf7896c1 Mon Sep 17 00:00:00 2001
From: Patrik Nordwall 
Date: Mon, 14 May 2012 14:26:32 +0200
Subject: [PATCH 018/106] Added failing DirectRoutedRemoteActorMultiJvmSpec.
 See #2069

---
 .../remote/SimpleRemoteMultiJvmSpec.scala     | 82 +++++++++++++++++
 .../DirectRoutedRemoteActorMultiJvmSpec.scala | 90 +++++++++++++++++++
 2 files changed, 172 insertions(+)
 create mode 100644 akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteMultiJvmSpec.scala
 create mode 100644 akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala

diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteMultiJvmSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteMultiJvmSpec.scala
new file mode 100644
index 0000000000..9209deb9a5
--- /dev/null
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteMultiJvmSpec.scala
@@ -0,0 +1,82 @@
+/**
+ *  Copyright (C) 2009-2011 Typesafe Inc. 
+ */
+package akka.remote
+
+import akka.actor.Actor
+import akka.actor.ActorRef
+import akka.actor.Props
+import akka.dispatch.Await
+import akka.pattern.ask
+import akka.remote.testconductor.TestConductor
+import akka.testkit.DefaultTimeout
+import akka.testkit.ImplicitSender
+import akka.util.Duration
+import com.typesafe.config.ConfigFactory
+
+object SimpleRemoteMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec {
+  override def NrOfNodes = 2
+
+  class SomeActor extends Actor with Serializable {
+    def receive = {
+      case "identify" ⇒ sender ! self
+    }
+  }
+
+  override def commonConfig = ConfigFactory.parseString("""
+      akka {
+        loglevel = INFO
+        actor {
+          provider = akka.remote.RemoteActorRefProvider
+          debug {
+            receive = on
+            fsm = on
+          }
+        }
+        remote {
+          transport = akka.remote.testconductor.TestConductorTransport
+          log-received-messages = on
+          log-sent-messages = on
+        }
+        testconductor {
+          host = localhost
+          port = 4712
+        }
+      }""")
+
+  def nameConfig(n: Int) = ConfigFactory.parseString("akka.testconductor.name = node" + n).withFallback(nodeConfigs(n))
+}
+
+class SimpleRemoteMultiJvmNode1 extends AkkaRemoteSpec(SimpleRemoteMultiJvmSpec.nameConfig(0)) {
+  import SimpleRemoteMultiJvmSpec._
+  val nodes = NrOfNodes
+  val tc = TestConductor(system)
+
+  "lookup remote actor" in {
+    Await.result(tc.startController(2), Duration.Inf)
+    system.actorOf(Props[SomeActor], "service-hello")
+    tc.enter("begin", "done")
+  }
+
+}
+
+class SimpleRemoteMultiJvmNode2 extends AkkaRemoteSpec(SimpleRemoteMultiJvmSpec.nameConfig(1))
+  with ImplicitSender with DefaultTimeout {
+
+  import SimpleRemoteMultiJvmSpec._
+  val nodes = NrOfNodes
+  val tc = TestConductor(system)
+
+  "lookup remote actor" in {
+    Await.result(tc.startClient(4712), Duration.Inf)
+    tc.enter("begin")
+    log.info("### begin ok")
+    val actor = system.actorFor("akka://" + akkaSpec(0) + "/user/service-hello")
+    log.info("### actor lookup " + akkaSpec(0) + "/service-hello")
+    actor.isInstanceOf[RemoteActorRef] must be(true)
+    Await.result(actor ? "identify", timeout.duration).asInstanceOf[ActorRef].path.address.hostPort must equal(akkaSpec(0))
+    log.info("### actor ok")
+    tc.enter("done")
+  }
+
+}
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala
new file mode 100644
index 0000000000..d44beff605
--- /dev/null
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala
@@ -0,0 +1,90 @@
+/**
+ *  Copyright (C) 2009-2011 Typesafe Inc. 
+ */
+package akka.remote.router
+
+import akka.actor.{ Actor, ActorRef, Props }
+import akka.remote.AkkaRemoteSpec
+import akka.remote.AbstractRemoteActorMultiJvmSpec
+import akka.remote.RemoteActorRef
+import akka.remote.testconductor.TestConductor
+import akka.testkit._
+import akka.dispatch.Await
+import akka.pattern.ask
+import akka.util.Duration
+
+object DirectRoutedRemoteActorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec {
+  override def NrOfNodes = 2
+
+  class SomeActor extends Actor with Serializable {
+    def receive = {
+      case "identify" ⇒ sender ! self
+    }
+  }
+
+  import com.typesafe.config.ConfigFactory
+  override def commonConfig = ConfigFactory.parseString("""
+      akka {
+        loglevel = INFO
+        actor {
+          provider = akka.remote.RemoteActorRefProvider
+          deployment {
+            /service-hello.remote = %s
+          }
+          debug {
+            receive = on
+            fsm = on
+          }
+        }
+        remote {
+          transport = akka.remote.testconductor.TestConductorTransport
+          log-received-messages = on
+          log-sent-messages = on
+        }
+        testconductor {
+          host = localhost
+          port = 4712
+        }
+      }""" format akkaURIs(1))
+
+  def nameConfig(n: Int) = ConfigFactory.parseString("akka.testconductor.name = node" + n).withFallback(nodeConfigs(n))
+}
+
+class DirectRoutedRemoteActorMultiJvmNode1 extends AkkaRemoteSpec(DirectRoutedRemoteActorMultiJvmSpec.nameConfig(0)) {
+  import DirectRoutedRemoteActorMultiJvmSpec._
+  val nodes = NrOfNodes
+  val tc = TestConductor(system)
+
+  "A new remote actor configured with a Direct router" must {
+    "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in {
+      Await.result(tc.startController(2), Duration.Inf)
+      tc.enter("begin", "done")
+    }
+  }
+
+}
+
+class DirectRoutedRemoteActorMultiJvmNode2 extends AkkaRemoteSpec(DirectRoutedRemoteActorMultiJvmSpec.nameConfig(1))
+  with ImplicitSender with DefaultTimeout {
+
+  import DirectRoutedRemoteActorMultiJvmSpec._
+  val nodes = NrOfNodes
+  val tc = TestConductor(system)
+
+  "A new remote actor configured with a Direct router" must {
+    "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in {
+      Await.result(tc.startClient(4712), Duration.Inf)
+      tc.enter("begin")
+
+      val actor = system.actorOf(Props[SomeActor], "service-hello")
+      actor.isInstanceOf[RemoteActorRef] must be(true)
+
+      Await.result(actor ? "identify", timeout.duration).asInstanceOf[ActorRef].path.address.hostPort must equal(akkaSpec(0))
+
+      // shut down the actor before we let the other node(s) shut down so we don't try to send
+      // "Terminate" to a shut down node
+      system.stop(actor)
+      tc.enter("done")
+    }
+  }
+}

From 114f1c995b6df7f6a9ccfe1e0cc4b5da9e74cd86 Mon Sep 17 00:00:00 2001
From: Viktor Klang 
Date: Mon, 14 May 2012 18:09:36 +0200
Subject: [PATCH 019/106] Adding docs and privatizing parts of Actor.scala

---
 .../src/main/scala/akka/actor/Actor.scala     | 117 +++++++++++++++---
 1 file changed, 100 insertions(+), 17 deletions(-)

diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala
index 2499d42f10..3e233a2056 100644
--- a/akka-actor/src/main/scala/akka/actor/Actor.scala
+++ b/akka-actor/src/main/scala/akka/actor/Actor.scala
@@ -12,8 +12,9 @@ import java.util.regex.Pattern
 
 /**
  * Marker trait to show which Messages are automatically handled by Akka
+ * Internal use only
  */
-trait AutoReceivedMessage extends Serializable
+private[akka] trait AutoReceivedMessage extends Serializable
 
 /**
  * Marker trait to indicate that a message might be potentially harmful,
@@ -26,9 +27,16 @@ trait PossiblyHarmful
  */
 trait NoSerializationVerificationNeeded
 
-case class Failed(cause: Throwable) extends AutoReceivedMessage with PossiblyHarmful
+/**
+ * Internal use only
+ */
+private[akka] case class Failed(cause: Throwable) extends AutoReceivedMessage with PossiblyHarmful
 
 abstract class PoisonPill extends AutoReceivedMessage with PossiblyHarmful
+
+/**
+ * A message all Actors will understand, that when processed will terminate the Actor permanently.
+ */
 case object PoisonPill extends PoisonPill {
   /**
    * Java API: get the singleton instance
@@ -37,6 +45,10 @@ case object PoisonPill extends PoisonPill {
 }
 
 abstract class Kill extends AutoReceivedMessage with PossiblyHarmful
+/**
+ * A message all Actors will understand, that when processed will make the Actor throw an ActorKilledException,
+ * which will trigger supervision.
+ */
 case object Kill extends Kill {
   /**
    * Java API: get the singleton instance
@@ -44,9 +56,17 @@ case object Kill extends Kill {
   def getInstance = this
 }
 
+/**
+ * When Death Watch is used, the watcher will receive a Terminated(watched) message when watched is terminated.
+ */
 case class Terminated(@BeanProperty actor: ActorRef) extends PossiblyHarmful
 
 abstract class ReceiveTimeout extends PossiblyHarmful
+
+/**
+ * When using ActorContext.setReceiveTimeout, the singleton instance of ReceiveTimeout will be sent
+ * to the Actor when there hasn't been any message for that long.
+ */
 case object ReceiveTimeout extends ReceiveTimeout {
   /**
    * Java API: get the singleton instance
@@ -60,49 +80,85 @@ case object ReceiveTimeout extends ReceiveTimeout {
  * message is delivered by active routing of the various actors involved.
  */
 sealed trait SelectionPath extends AutoReceivedMessage
-case class SelectChildName(name: String, next: Any) extends SelectionPath
-case class SelectChildPattern(pattern: Pattern, next: Any) extends SelectionPath
-case class SelectParent(next: Any) extends SelectionPath
 
-// Exceptions for Actors
+/**
+ * Internal use only
+ */
+private[akka] case class SelectChildName(name: String, next: Any) extends SelectionPath
+
+/**
+ * Internal use only
+ */
+private[akka] case class SelectChildPattern(pattern: Pattern, next: Any) extends SelectionPath
+
+/**
+ * Internal use only
+ */
+private[akka] case class SelectParent(next: Any) extends SelectionPath
+
+/**
+ * IllegalActorStateException is thrown when a core invariant in the Actor implementation has been violated.
+ * For instance, if you try to create an Actor that doesn't extend Actor.
+ */
 class IllegalActorStateException private[akka] (message: String, cause: Throwable = null)
   extends AkkaException(message, cause) {
-  def this(msg: String) = this(msg, null);
+  def this(msg: String) = this(msg, null)
 }
 
+/**
+ * ActorKilledException is thrown when an Actor receives the akka.actor.Kill message
+ */
 class ActorKilledException private[akka] (message: String, cause: Throwable)
   extends AkkaException(message, cause)
   with NoStackTrace {
-  def this(msg: String) = this(msg, null);
+  def this(msg: String) = this(msg, null)
 }
 
+/**
+ * An InvalidActorNameException is thrown when you try to convert something, usually a String, to an Actor name
+ * which doesn't validate.
+ */
 case class InvalidActorNameException(message: String) extends AkkaException(message)
 
+/**
+ * An ActorInitializationException is thrown when the the initialization logic for an Actor fails.
+ */
 case class ActorInitializationException private[akka] (actor: ActorRef, message: String, cause: Throwable = null)
   extends AkkaException(message, cause)
   with NoStackTrace {
-  def this(msg: String) = this(null, msg, null);
+  def this(msg: String) = this(null, msg, null)
 }
 
+//FIXME: Only used by gracefulStop we should remove this if possible
 class ActorTimeoutException private[akka] (message: String, cause: Throwable = null)
   extends AkkaException(message, cause) {
-  def this(msg: String) = this(msg, null);
+  def this(msg: String) = this(msg, null)
 }
 
+/**
+ * InvalidMessageException is thrown when an invalid message is sent to an Actor.
+ * Technically it's only "null" which is an InvalidMessageException but who knows,
+ * there might be more of them in the future, or not.
+ */
 class InvalidMessageException private[akka] (message: String, cause: Throwable = null)
   extends AkkaException(message, cause)
   with NoStackTrace {
-  def this(msg: String) = this(msg, null);
+  def this(msg: String) = this(msg, null)
 }
 
+/**
+ * A DeathPactException is thrown by an Actor that receives a Terminated(someActor) message
+ * that it doesn't handle itself, effectively crashing the Actor and escalating to the supervisor.
+ */
 case class DeathPactException private[akka] (dead: ActorRef)
   extends AkkaException("Monitored actor [" + dead + "] terminated")
   with NoStackTrace
 
-// must not pass InterruptedException to other threads
-case class ActorInterruptedException private[akka] (cause: Throwable)
-  extends AkkaException(cause.getMessage, cause)
-  with NoStackTrace
+/**
+ * When an InterruptedException is thrown inside an Actor, it is wrapped as an ActorInterruptedException as to
+ * avoid cascading interrupts to other threads than the originally interrupted one.
+ */
+case class ActorInterruptedException private[akka] (cause: Throwable) extends AkkaException(cause.getMessage, cause) with NoStackTrace
 
 /**
  * This message is published to the EventStream whenever an Actor receives a message it doesn't understand
@@ -115,18 +171,42 @@ case class UnhandledMessage(@BeanProperty message: Any, @BeanProperty sender: Ac
  */
 object Status {
   sealed trait Status extends Serializable
+
+  /**
+   * This class/message type is preferably used to indicate success of some operation performed.
+   */
   case class Success(status: AnyRef) extends Status
+
+  /**
+   * This class/message type is preferably used to indicate failure of some operation performed.
+   */
   case class Failure(cause: Throwable) extends Status
 }
 
+/**
+ * Mix in ActorLogging into your Actor to easily obtain a reference to a logger, which is available under the name "log".
+ *
+ * {{
+ * class MyActor extends Actor with ActorLogging {
+ *   def receive = {
+ *     case "pigdog" => log.info("We've got yet another pigdog on our hands")
+ *   }
+ * }
+ * }}
+ */
 trait ActorLogging { this: Actor ⇒
   val log = akka.event.Logging(context.system, this)
 }
 
 object Actor {
-
+  /**
+   * Type alias representing a Receive-expression for Akka Actors.
+   */
   type Receive = PartialFunction[Any, Unit]
 
+  /**
+   * emptyBehavior is a Receive-expression that matches no messages at all, ever.
+   */
   object emptyBehavior extends Receive {
     def isDefinedAt(x: Any) = false
     def apply(x: Any) = throw new UnsupportedOperationException("Empty behavior apply()")
@@ -312,7 +392,7 @@ trait Actor {
    * For Akka internal use only.
    */
   private[akka] final def apply(msg: Any) = {
-    // TODO would it be more efficient to assume that most messages are matched and catch MatchError instead of using isDefinedAt?
+    //FIXME replace with behaviorStack.head.applyOrElse(msg, unhandled) + "-optimize"
     val head = behaviorStack.head
     if (head.isDefinedAt(msg)) head.apply(msg) else unhandled(msg)
   }
@@ -339,6 +419,9 @@ trait Actor {
   private[akka] def clearBehaviorStack(): Unit =
     behaviorStack = Stack.empty[Receive].push(behaviorStack.last)
 
+  /**
+   * For Akka internal use only.
+   */
   private var behaviorStack: Stack[Receive] = Stack.empty[Receive].push(receive)
 }
 

From 6d430122e951d30410de84e927ef0654adbe1f8a Mon Sep 17 00:00:00 2001
From: Viktor Klang 
Date: Tue, 15 May 2012 12:31:38 +0200
Subject: [PATCH 020/106] Removing com.eaio.Uuid and replacing its usage with
 java.util.UUID

---
 .../scala/akka/actor/ActorLifeCycleSpec.scala |   3 +-
 .../src/main/java/com/eaio/util/lang/Hex.java | 215 ----------
 .../java/com/eaio/uuid/MACAddressParser.java  | 116 ------
 .../src/main/java/com/eaio/uuid/UUID.java     | 308 ---------------
 .../src/main/java/com/eaio/uuid/UUIDGen.java  | 368 ------------------
 .../src/main/scala/akka/AkkaException.scala   |   6 +-
 akka-actor/src/main/scala/akka/actor/IO.scala |   7 +-
 .../src/main/scala/akka/actor/package.scala   |  14 +-
 8 files changed, 8 insertions(+), 1029 deletions(-)
 delete mode 100644 akka-actor/src/main/java/com/eaio/util/lang/Hex.java
 delete mode 100644 akka-actor/src/main/java/com/eaio/uuid/MACAddressParser.java
 delete mode 100644 akka-actor/src/main/java/com/eaio/uuid/UUID.java
 delete mode 100644 akka-actor/src/main/java/com/eaio/uuid/UUIDGen.java

diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala
index 16b4055d0e..d87aaaaee6 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala
@@ -13,6 +13,7 @@ import akka.util.duration._
 import java.util.concurrent.atomic._
 import akka.dispatch.Await
 import akka.pattern.ask
+import java.util.UUID.{ randomUUID ⇒ newUuid }
 
 object ActorLifeCycleSpec {
 
@@ -35,7 +36,7 @@ class ActorLifeCycleSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitS
 
     "invoke preRestart, preStart, postRestart when using OneForOneStrategy" in {
       filterException[ActorKilledException] {
-        val id = newUuid().toString
+        val id = newUuid.toString
         val supervisor = system.actorOf(Props(new Supervisor(
           OneForOneStrategy(maxNrOfRetries = 3)(List(classOf[Exception])))))
         val gen = new AtomicInteger(0)
diff --git a/akka-actor/src/main/java/com/eaio/util/lang/Hex.java b/akka-actor/src/main/java/com/eaio/util/lang/Hex.java
deleted file mode 100644
index 7794059517..0000000000
--- a/akka-actor/src/main/java/com/eaio/util/lang/Hex.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Hex.java
- *
- * Created 04.07.2003.
- *
- * eaio: UUID - an implementation of the UUID specification Copyright (c) 2003-2009 Johann Burkard (jb@eaio.com)
- * http://eaio.com.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
- * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
- * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
- * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-package com.eaio.util.lang;
-
-import java.io.IOException;
-
-/**
- * Number-to-hexadecimal and hexadecimal-to-number conversions.
- *
- * @see UUID
- * @author Johann Burkard
- * @version $Id: Hex.java 1888 2009-03-15 12:43:24Z johann $
- */
-public final class Hex {
-
-    /**
-     * No instances needed.
-     */
-    private Hex() {
-        super();
-    }
-
-    private static final char[] DIGITS = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e',
-            'f' };
-
-    /**
-     * Turns a short into hex octets.
-     *
-     * @param a the {@link Appendable}, may not be null
-     * @param in the integer
-     * @return {@link Appendable}
-     */
-    public static Appendable append(Appendable a, short in) {
-        return append(a, (long) in, 4);
-    }
-
-    /**
-     * Turns a short into hex octets.
-     *
-     * @param a the {@link Appendable}, may not be null
-     * @param in the integer
-     * @param length the number of octets to produce
-     * @return {@link Appendable}
-     */
-    public static Appendable append(Appendable a, short in, int length) {
-        return append(a, (long) in, length);
-    }
-
-    /**
-     * Turns an int into hex octets.
-     *
-     * @param a the {@link Appendable}, may not be null
-     * @param in the integer
-     * @return {@link Appendable}
-     */
-    public static Appendable append(Appendable a, int in) {
-        return append(a, (long) in, 8);
-    }
-
-    /**
-     * Turns an int into hex octets.
-     *
-     * @param a the {@link Appendable}, may not be null
-     * @param in the integer
-     * @param length the number of octets to produce
-     * @return {@link Appendable}
-     */
-    public static Appendable append(Appendable a, int in, int length) {
-        return append(a, (long) in, length);
-    }
-
-    /**
-     * Turns a long into hex octets.
-     *
-     * @param a the {@link Appendable}, may not be null
-     * @param in the long
-     * @return {@link Appendable}
-     */
-    public static Appendable append(Appendable a, long in) {
-        return append(a, in, 16);
-    }
-
-    /**
-     * Turns a long into hex octets.
-     *
-     * @param a the {@link Appendable}, may not be null
-     * @param in the long
-     * @param length the number of octets to produce
-     * @return {@link Appendable}
-     */
-    public static Appendable append(Appendable a, long in, int length) {
-        try {
-            int lim = (length << 2) - 4;
-            while (lim >= 0) {
-                a.append(DIGITS[(byte) (in >> lim) & 0x0f]);
-                lim -= 4;
-            }
-        }
-        catch (IOException ex) {
-            // Bla
-        }
-        return a;
-    }
-
-    /**
-     * Turns a byte array into hex octets.
-     *
-     * @param a the {@link Appendable}, may not be null
-     * @param bytes the byte array
-     * @return {@link Appendable}
-     */
-    public static Appendable append(Appendable a, byte[] bytes) {
-        try {
-            for (byte b : bytes) {
-                a.append(DIGITS[(byte) ((b & 0xF0) >> 4)]);
-                a.append(DIGITS[(byte) (b & 0x0F)]);
-            }
-        }
-        catch (IOException ex) {
-            // Bla
-        }
-        return a;
-    }
-
-    /**
-     * Parses a long from a hex encoded number. This method will skip all characters that are not 0-9,
-     * A-F and a-f.
-     * 

- * Returns 0 if the {@link CharSequence} does not contain any interesting characters. - * - * @param s the {@link CharSequence} to extract a long from, may not be null - * @return a long - * @throws NullPointerException if the {@link CharSequence} is null - */ - public static long parseLong(CharSequence s) { - long out = 0; - byte shifts = 0; - char c; - for (int i = 0; i < s.length() && shifts < 16; i++) { - c = s.charAt(i); - if ((c > 47) && (c < 58)) { - ++shifts; - out <<= 4; - out |= c - 48; - } - else if ((c > 64) && (c < 71)) { - ++shifts; - out <<= 4; - out |= c - 55; - } - else if ((c > 96) && (c < 103)) { - ++shifts; - out <<= 4; - out |= c - 87; - } - } - return out; - } - - /** - * Parses a short from a hex encoded number. This method will skip all characters that are not 0-9, - * A-F and a-f. - *

- * Returns 0 if the {@link CharSequence} does not contain any interesting characters. - * - * @param s the {@link CharSequence} to extract a short from, may not be null - * @return a short - * @throws NullPointerException if the {@link CharSequence} is null - */ - public static short parseShort(String s) { - short out = 0; - byte shifts = 0; - char c; - for (int i = 0; i < s.length() && shifts < 4; i++) { - c = s.charAt(i); - if ((c > 47) && (c < 58)) { - ++shifts; - out <<= 4; - out |= c - 48; - } - else if ((c > 64) && (c < 71)) { - ++shifts; - out <<= 4; - out |= c - 55; - } - else if ((c > 96) && (c < 103)) { - ++shifts; - out <<= 4; - out |= c - 87; - } - } - return out; - } - -} diff --git a/akka-actor/src/main/java/com/eaio/uuid/MACAddressParser.java b/akka-actor/src/main/java/com/eaio/uuid/MACAddressParser.java deleted file mode 100644 index c077147470..0000000000 --- a/akka-actor/src/main/java/com/eaio/uuid/MACAddressParser.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * MACAddressParserTest.java - * - * Created 30.01.2006. - * - * eaio: UUID - an implementation of the UUID specification - * Copyright (c) 2003-2009 Johann Burkard (jb@eaio.com) http://eaio.com. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN - * NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ -package com.eaio.uuid; - -/** - * The MAC address parser attempts to find the following patterns: - *

    - *
  • .{1,2}:.{1,2}:.{1,2}:.{1,2}:.{1,2}:.{1,2}
  • - *
  • .{1,2}-.{1,2}-.{1,2}-.{1,2}-.{1,2}-.{1,2}
  • - *
- * - * @see UUID - * @author Johann Burkard - * @version $Id: MACAddressParser.java 1888 2009-03-15 12:43:24Z johann $ - */ -class MACAddressParser { - - /** - * No instances needed. - */ - private MACAddressParser() { - super(); - } - - /** - * Attempts to find a pattern in the given String. - * - * @param in the String, may not be null - * @return the substring that matches this pattern or null - */ - static String parse(String in) { - - String out = in; - - // lanscan - - int hexStart = out.indexOf("0x"); - if (hexStart != -1 && out.indexOf("ETHER") != -1) { - int hexEnd = out.indexOf(' ', hexStart); - if (hexEnd > hexStart + 2) { - out = out.substring(hexStart, hexEnd); - } - } - - else { - - int octets = 0; - int lastIndex, old, end; - - if (out.indexOf('-') > -1) { - out = out.replace('-', ':'); - } - - lastIndex = out.lastIndexOf(':'); - - if (lastIndex > out.length() - 2) { - out = null; - } - else { - - end = Math.min(out.length(), lastIndex + 3); - - ++octets; - old = lastIndex; - while (octets != 5 && lastIndex != -1 && lastIndex > 1) { - lastIndex = out.lastIndexOf(':', --lastIndex); - if (old - lastIndex == 3 || old - lastIndex == 2) { - ++octets; - old = lastIndex; - } - } - - if (octets == 5 && lastIndex > 1) { - out = out.substring(lastIndex - 2, end).trim(); - } - else { - out = null; - } - - } - - } - - if (out != null && out.startsWith("0x")) { - out = out.substring(2); - } - - return out; - } - -} diff --git a/akka-actor/src/main/java/com/eaio/uuid/UUID.java b/akka-actor/src/main/java/com/eaio/uuid/UUID.java deleted file mode 100644 index a578a68c6d..0000000000 --- a/akka-actor/src/main/java/com/eaio/uuid/UUID.java +++ /dev/null @@ -1,308 +0,0 @@ -/* - * UUID.java - * - * Created 07.02.2003 - * - * eaio: UUID - an implementation of the UUID specification - * Copyright (c) 2003-2009 Johann Burkard (jb@eaio.com) http://eaio.com. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN - * NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ -package com.eaio.uuid; - -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; -import java.io.Serializable; - -import com.eaio.util.lang.Hex; - -/** - * Creates UUIDs according to the DCE Universal Token Identifier specification. - *

- * All you need to know: - *

- * UUID u = new UUID();
- * 
- * - * @see - * http://www.opengroup.org/onlinepubs/9629399/apdxa.htm - * - * @see - * http://www.uddi.org/pubs/draft-leach-uuids-guids-01.txt - * - * @see UUID - * @author Johann Burkard - * @version $Id: UUID.java 1888 2009-03-15 12:43:24Z johann $ - */ -public class UUID implements Comparable, Serializable, Cloneable { - - /** - * Hasn't ever changed between versions. - */ - static final long serialVersionUID = 7435962790062944603L; - - /** - * The time field of the UUID. - * - * @serial - */ - public long time; - - /** - * The clock sequence and node field of the UUID. - * - * @serial - */ - public long clockSeqAndNode; - - /** - * Constructor for UUID. Constructs a new, unique UUID. - * - * @see UUIDGen#newTime() - * @see UUIDGen#getClockSeqAndNode() - */ - public UUID() { - this(UUIDGen.newTime(), UUIDGen.getClockSeqAndNode()); - } - - /** - * Constructor for UUID. Constructs a UUID from two long values. - * - * @param time the upper 64 bits - * @param clockSeqAndNode the lower 64 bits - */ - public UUID(long time, long clockSeqAndNode) { - this.time = time; - this.clockSeqAndNode = clockSeqAndNode; - } - - /** - * Copy constructor for UUID. Values of the given UUID are copied. - * - * @param u the UUID, may not be null - */ - public UUID(UUID u) { - this(u.time, u.clockSeqAndNode); - } - - /** - * Parses a textual representation of a UUID. - *

- * No validation is performed. If the {@link CharSequence} is shorter than 36 characters, - * {@link ArrayIndexOutOfBoundsException}s will be thrown. - * - * @param s the {@link CharSequence}, may not be null - */ - public UUID(CharSequence s) { - this(Hex.parseLong(s.subSequence(0, 18)), Hex.parseLong(s.subSequence( - 19, 36))); - } - - /** - * Compares this UUID to another Object. Throws a {@link ClassCastException} if - * the other Object is not an instance of the UUID class. Returns a value - * smaller than zero if the other UUID is "larger" than this UUID and a value - * larger than zero if the other UUID is "smaller" than this UUID. - * - * @param t the other UUID, may not be null - * @return a value < 0, 0 or a value > 0 - * @see java.lang.Comparable#compareTo(java.lang.Object) - * @throws ClassCastException - */ - public int compareTo(UUID t) { - if (this == t) { - return 0; - } - if (time > t.time) { - return 1; - } - if (time < t.time) { - return -1; - } - if (clockSeqAndNode > t.clockSeqAndNode) { - return 1; - } - if (clockSeqAndNode < t.clockSeqAndNode) { - return -1; - } - return 0; - } - - /** - * Tweaked Serialization routine. - * - * @param out the ObjectOutputStream - * @throws IOException - */ - private void writeObject(ObjectOutputStream out) throws IOException { - out.writeLong(time); - out.writeLong(clockSeqAndNode); - } - - /** - * Tweaked Serialization routine. - * - * @param in the ObjectInputStream - * @throws IOException - */ - private void readObject(ObjectInputStream in) throws IOException { - time = in.readLong(); - clockSeqAndNode = in.readLong(); - } - - /** - * Returns this UUID as a String. - * - * @return a String, never null - * @see java.lang.Object#toString() - * @see #toAppendable(Appendable) - */ - @Override - public final String toString() { - return toAppendable(null).toString(); - } - - /** - * Appends a String representation of this to the given {@link StringBuffer} or - * creates a new one if none is given. - * - * @param in the StringBuffer to append to, may be null - * @return a StringBuffer, never null - * @see #toAppendable(Appendable) - */ - public StringBuffer toStringBuffer(StringBuffer in) { - StringBuffer out = in; - if (out == null) { - out = new StringBuffer(36); - } - else { - out.ensureCapacity(out.length() + 36); - } - return (StringBuffer) toAppendable(out); - } - - /** - * Appends a String representation of this object to the given {@link Appendable} object. - *

- * For reasons I'll probably never understand, Sun has decided to have a number of I/O classes implement - * Appendable which forced them to destroy an otherwise nice and simple interface with {@link IOException}s. - *

- * I decided to ignore any possible IOExceptions in this method. - * - * @param a the Appendable object, may be null - * @return an Appendable object, defaults to a {@link StringBuilder} if a is null - */ - public Appendable toAppendable(Appendable a) { - Appendable out = a; - if (out == null) { - out = new StringBuilder(36); - } - try { - Hex.append(out, (int) (time >> 32)).append('-'); - Hex.append(out, (short) (time >> 16)).append('-'); - Hex.append(out, (short) time).append('-'); - Hex.append(out, (short) (clockSeqAndNode >> 48)).append('-'); - Hex.append(out, clockSeqAndNode, 12); - } - catch (IOException ex) { - // What were they thinking? - } - return out; - } - - /** - * Returns a hash code of this UUID. The hash code is calculated by XOR'ing the - * upper 32 bits of the time and clockSeqAndNode fields and the lower 32 bits of - * the time and clockSeqAndNode fields. - * - * @return an int representing the hash code - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - return (int) ((time >> 32) ^ time ^ (clockSeqAndNode >> 32) ^ clockSeqAndNode); - } - - /** - * Clones this UUID. - * - * @return a new UUID with identical values, never null - */ - @Override - public Object clone() { - try { - return super.clone(); - } - catch (CloneNotSupportedException ex) { - // One of Sun's most epic fails. - return null; - } - } - - /** - * Returns the time field of the UUID (upper 64 bits). - * - * @return the time field - */ - public final long getTime() { - return time; - } - - /** - * Returns the clock and node field of the UUID (lower 64 bits). - * - * @return the clockSeqAndNode field - */ - public final long getClockSeqAndNode() { - return clockSeqAndNode; - } - - /** - * Compares two Objects for equality. - * - * @see java.lang.Object#equals(Object) - * @param obj the Object to compare this UUID with, may be null - * @return true if the other Object is equal to this UUID, - * false if not - */ - @Override - public boolean equals(Object obj) { - if (!(obj instanceof UUID)) { - return false; - } - return compareTo((UUID) obj) == 0; - } - - /** - * Returns the nil UUID (a UUID whose values are both set to zero). - *

- * Starting with version 2.0, this method does return a new UUID instance every - * time it is called. Earlier versions returned one instance. This has now been - * changed because this UUID has public, non-final instance fields. Returning a - * new instance is therefore more safe. - * - * @return a nil UUID, never null - */ - public static UUID nilUUID() { - return new UUID(0, 0); - } - -} diff --git a/akka-actor/src/main/java/com/eaio/uuid/UUIDGen.java b/akka-actor/src/main/java/com/eaio/uuid/UUIDGen.java deleted file mode 100644 index fb60e1727a..0000000000 --- a/akka-actor/src/main/java/com/eaio/uuid/UUIDGen.java +++ /dev/null @@ -1,368 +0,0 @@ -/* - * UUIDGen.java - * - * Created on 09.08.2003. - * - * eaio: UUID - an implementation of the UUID specification - * Copyright (c) 2003-2009 Johann Burkard (jb@eaio.com) http://eaio.com. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN - * NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ -package com.eaio.uuid; - -import java.io.BufferedReader; -import java.io.File; -import java.io.IOException; -import java.io.InputStreamReader; -import java.net.InetAddress; -import java.net.InterfaceAddress; -import java.net.NetworkInterface; -import java.net.SocketException; -import java.net.UnknownHostException; -import java.util.Enumeration; -import java.util.concurrent.atomic.AtomicLong; - -import com.eaio.util.lang.Hex; - -/** - * This class contains methods to generate UUID fields. These methods have been - * refactored out of {@link com.eaio.uuid.UUID}. - *

- * Starting with version 2, this implementation tries to obtain the MAC address - * of the network card. Under Microsoft Windows, the ifconfig - * command is used which may pop up a command window in Java Virtual Machines - * prior to 1.4 once this class is initialized. The command window is closed - * automatically. - *

- * The MAC address code has been tested extensively in Microsoft Windows, - * Linux, Solaris 8, HP-UX 11, but should work in MacOS X and BSDs, too. - *

- * If you use JDK 6 or later, the code in {@link InterfaceAddress} will be used. - * - * @see UUID - * @author Johann Burkard - * @version $Id: UUIDGen.java 2914 2010-04-23 11:35:00Z johann $ - * @see com.eaio.uuid.UUID - */ -public final class UUIDGen { - - /** - * No instances needed. - */ - private UUIDGen() { - super(); - } - - /** - * The last time value. Used to remove duplicate UUIDs. - */ - private final static AtomicLong lastTime = new AtomicLong(Long.MIN_VALUE); - - /** - * The cached MAC address. - */ - private static String macAddress = null; - - /** - * The current clock and node value. - */ - private static long clockSeqAndNode = 0x8000000000000000L; - - static { - - try { - Class.forName("java.net.InterfaceAddress"); - macAddress = Class.forName( - "com.eaio.uuid.UUIDGen$HardwareAddressLookup").newInstance().toString(); - } - catch (ExceptionInInitializerError err) { - // Ignored. - } - catch (ClassNotFoundException ex) { - // Ignored. - } - catch (LinkageError err) { - // Ignored. - } - catch (IllegalAccessException ex) { - // Ignored. - } - catch (InstantiationException ex) { - // Ignored. - } - catch (SecurityException ex) { - // Ignored. - } - - if (macAddress == null) { - - Process p = null; - BufferedReader in = null; - - try { - String osname = System.getProperty("os.name", ""); - - if (osname.startsWith("Windows")) { - p = Runtime.getRuntime().exec( - new String[] { "ipconfig", "/all" }, null); - } - // Solaris code must appear before the generic code - else if (osname.startsWith("Solaris") - || osname.startsWith("SunOS")) { - String hostName = getFirstLineOfCommand( - "uname", "-n" ); - if (hostName != null) { - p = Runtime.getRuntime().exec( - new String[] { "/usr/sbin/arp", hostName }, - null); - } - } - else if (new File("/usr/sbin/lanscan").exists()) { - p = Runtime.getRuntime().exec( - new String[] { "/usr/sbin/lanscan" }, null); - } - else if (new File("/sbin/ifconfig").exists()) { - p = Runtime.getRuntime().exec( - new String[] { "/sbin/ifconfig", "-a" }, null); - } - - if (p != null) { - in = new BufferedReader(new InputStreamReader( - p.getInputStream()), 128); - String l = null; - while ((l = in.readLine()) != null) { - macAddress = MACAddressParser.parse(l); - if (macAddress != null - && Hex.parseShort(macAddress) != 0xff) { - break; - } - } - } - - } - catch (SecurityException ex) { - // Ignore it. - } - catch (IOException ex) { - // Ignore it. - } - finally { - if (p != null) { - if (in != null) { - try { - in.close(); - } - catch (IOException ex) { - // Ignore it. - } - } - try { - p.getErrorStream().close(); - } - catch (IOException ex) { - // Ignore it. - } - try { - p.getOutputStream().close(); - } - catch (IOException ex) { - // Ignore it. - } - p.destroy(); - } - } - - } - - if (macAddress != null) { - clockSeqAndNode |= Hex.parseLong(macAddress); - } - else { - try { - byte[] local = InetAddress.getLocalHost().getAddress(); - clockSeqAndNode |= (local[0] << 24) & 0xFF000000L; - clockSeqAndNode |= (local[1] << 16) & 0xFF0000; - clockSeqAndNode |= (local[2] << 8) & 0xFF00; - clockSeqAndNode |= local[3] & 0xFF; - } - catch (UnknownHostException ex) { - clockSeqAndNode |= (long) (Math.random() * 0x7FFFFFFF); - } - } - - // Skip the clock sequence generation process and use random instead. - - clockSeqAndNode |= (long) (Math.random() * 0x3FFF) << 48; - - } - - /** - * Returns the current clockSeqAndNode value. - * - * @return the clockSeqAndNode value - * @see UUID#getClockSeqAndNode() - */ - public static long getClockSeqAndNode() { - return clockSeqAndNode; - } - - /** - * Generates a new time field. Each time field is unique and larger than the - * previously generated time field. - * - * @return a new time value - * @see UUID#getTime() - */ - public static long newTime() { - return createTime(System.currentTimeMillis()); - } - - /** - * Creates a new time field from the given timestamp. Note that even identical - * values of currentTimeMillis will produce different time fields. - * - * @param currentTimeMillis the timestamp - * @return a new time value - * @see UUID#getTime() - */ - public static long createTime(long currentTimeMillis) { - - long time; - - // UTC time - - long timeMillis = (currentTimeMillis * 10000) + 0x01B21DD213814000L; - - // Make sure our time is unique - - for(;;) { - final long c = lastTime.get(); - if (timeMillis <= c) { - timeMillis = lastTime.incrementAndGet(); - break; - } else if(lastTime.compareAndSet(c, timeMillis)) break; - } - - // time low - - time = timeMillis << 32; - - // time mid - - time |= (timeMillis & 0xFFFF00000000L) >> 16; - - // time hi and version - - time |= 0x1000 | ((timeMillis >> 48) & 0x0FFF); // version 1 - - return time; - - } - - /** - * Returns the MAC address. Not guaranteed to return anything. - * - * @return the MAC address, may be null - */ - public static String getMACAddress() { - return macAddress; - } - - /** - * Returns the first line of the shell command. - * - * @param commands the commands to run - * @return the first line of the command - * @throws IOException - */ - static String getFirstLineOfCommand(String... commands) throws IOException { - - Process p = null; - BufferedReader reader = null; - - try { - p = Runtime.getRuntime().exec(commands); - reader = new BufferedReader(new InputStreamReader( - p.getInputStream()), 128); - - return reader.readLine(); - } - finally { - if (p != null) { - if (reader != null) { - try { - reader.close(); - } - catch (IOException ex) { - // Ignore it. - } - } - try { - p.getErrorStream().close(); - } - catch (IOException ex) { - // Ignore it. - } - try { - p.getOutputStream().close(); - } - catch (IOException ex) { - // Ignore it. - } - p.destroy(); - } - } - - } - - /** - * Scans MAC addresses for good ones. - */ - static class HardwareAddressLookup { - - /** - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - String out = null; - try { - Enumeration ifs = NetworkInterface.getNetworkInterfaces(); - if (ifs != null) { - while (ifs.hasMoreElements()) { - NetworkInterface iface = ifs.nextElement(); - byte[] hardware = iface.getHardwareAddress(); - if (hardware != null && hardware.length == 6 - && hardware[1] != (byte) 0xff) { - out = Hex.append(new StringBuilder(36), hardware).toString(); - break; - } - } - } - } - catch (SocketException ex) { - // Ignore it. - } - return out; - } - - } - -} diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index 85de2504d3..002233ffe5 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -4,8 +4,6 @@ package akka -import akka.actor.newUuid - object AkkaException { def toStringWithStackTrace(throwable: Throwable): String = throwable match { @@ -34,7 +32,7 @@ object AkkaException { */ //TODO add @SerialVersionUID(1L) when SI-4804 is fixed class AkkaException(message: String = "", cause: Throwable = null) extends RuntimeException(message, cause) with Serializable { - lazy val uuid = newUuid.toString + lazy val uuid = java.util.UUID.randomUUID().toString override lazy val toString = "%s:%s\n[%s]".format(getClass.getName, message, uuid) @@ -42,7 +40,7 @@ class AkkaException(message: String = "", cause: Throwable = null) extends Runti lazy val toLongString = "%s:%s\n[%s]\n%s".format(getClass.getName, message, uuid, stackTraceToString) - def this(msg: String) = this(msg, null); + def this(msg: String) = this(msg, null) def stackTraceToString = AkkaException.stackTraceToString(this) } diff --git a/akka-actor/src/main/scala/akka/actor/IO.scala b/akka-actor/src/main/scala/akka/actor/IO.scala index 50ea8f229b..60ee528e45 100644 --- a/akka-actor/src/main/scala/akka/actor/IO.scala +++ b/akka-actor/src/main/scala/akka/actor/IO.scala @@ -21,8 +21,7 @@ import java.nio.channels.{ import scala.collection.mutable import scala.annotation.tailrec import scala.collection.generic.CanBuildFrom -import com.eaio.uuid.UUID - +import java.util.UUID /** * IO messages and iteratees. * @@ -89,7 +88,7 @@ object IO { * created by [[akka.actor.IOManager]].connect() and * [[akka.actor.IO.ServerHandle]].accept(). */ - case class SocketHandle(owner: ActorRef, ioManager: ActorRef, uuid: UUID = new UUID()) extends ReadHandle with WriteHandle { + case class SocketHandle(owner: ActorRef, ioManager: ActorRef, uuid: UUID = UUID.randomUUID()) extends ReadHandle with WriteHandle { override def asSocket = this } @@ -97,7 +96,7 @@ object IO { * A [[akka.actor.IO.Handle]] to a ServerSocketChannel. Instances are * normally created by [[akka.actor.IOManager]].listen(). */ - case class ServerHandle(owner: ActorRef, ioManager: ActorRef, uuid: UUID = new UUID()) extends Handle { + case class ServerHandle(owner: ActorRef, ioManager: ActorRef, uuid: UUID = UUID.randomUUID()) extends Handle { override def asServer = this /** diff --git a/akka-actor/src/main/scala/akka/actor/package.scala b/akka-actor/src/main/scala/akka/actor/package.scala index 9ec5348fee..617e3fee5c 100644 --- a/akka-actor/src/main/scala/akka/actor/package.scala +++ b/akka-actor/src/main/scala/akka/actor/package.scala @@ -8,19 +8,7 @@ package object actor { implicit def actorRef2Scala(ref: ActorRef): ScalaActorRef = ref.asInstanceOf[ScalaActorRef] implicit def scala2ActorRef(ref: ScalaActorRef): ActorRef = ref.asInstanceOf[ActorRef] - type Uuid = com.eaio.uuid.UUID - - def newUuid(): Uuid = new Uuid() - - def uuidFrom(time: Long, clockSeqAndNode: Long): Uuid = new Uuid(time, clockSeqAndNode) - - def uuidFrom(uuid: String): Uuid = new Uuid(uuid) - - def simpleName(obj: AnyRef): String = { - val n = obj.getClass.getName - val i = n.lastIndexOf('.') - n.substring(i + 1) - } + def simpleName(obj: AnyRef): String = simpleName(obj.getClass) def simpleName(clazz: Class[_]): String = { val n = clazz.getName From 1f7d2cef497fbfb60b0164f5e569344aec8ece60 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 15 May 2012 15:18:21 +0200 Subject: [PATCH 021/106] Adding some commends and making SuspendReason sealed --- .../src/main/scala/akka/actor/ActorCell.scala | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 8c68ba3315..bd5342fec4 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -134,10 +134,17 @@ trait ActorContext extends ActorRefFactory { */ def unwatch(subject: ActorRef): ActorRef + /** + * ActorContexts shouldn't be Serializable + */ final protected def writeObject(o: ObjectOutputStream): Unit = throw new NotSerializableException("ActorContext is not serializable!") } +/** + * UntypedActorContext is the UntypedActor equivalent of ActorContext, + * containing the Java API + */ trait UntypedActorContext extends ActorContext { /** @@ -178,7 +185,7 @@ private[akka] object ActorCell { final val emptyReceiveTimeoutData: (Long, Cancellable) = (-1, emptyCancellable) - trait SuspendReason + sealed trait SuspendReason case object UserRequest extends SuspendReason case class Recreation(cause: Throwable) extends SuspendReason case object Termination extends SuspendReason @@ -749,13 +756,11 @@ private[akka] class ActorCell( } - final def cancelReceiveTimeout() { - //Only cancel if + final def cancelReceiveTimeout(): Unit = if (receiveTimeoutData._2 ne emptyCancellable) { receiveTimeoutData._2.cancel() receiveTimeoutData = (receiveTimeoutData._1, emptyCancellable) } - } final def clearActorFields(actorInstance: Actor): Unit = { setActorFields(actorInstance, context = null, self = system.deadLetters) From 788d735f8c074b1827b323f7c9ba76eeadb82af4 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 15 May 2012 15:18:42 +0200 Subject: [PATCH 022/106] Moving ActorPath.split into PathUtils so it's only internally used --- .../src/main/scala/akka/actor/ActorPath.scala | 11 ------- .../src/main/scala/akka/actor/Address.scala | 32 ++++++++++++------- 2 files changed, 21 insertions(+), 22 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorPath.scala b/akka-actor/src/main/scala/akka/actor/ActorPath.scala index 4ebcec0dbb..15e5677775 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorPath.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorPath.scala @@ -6,17 +6,6 @@ import scala.annotation.tailrec import java.net.MalformedURLException object ActorPath { - def split(s: String): List[String] = { - @tailrec - def rec(pos: Int, acc: List[String]): List[String] = { - val from = s.lastIndexOf('/', pos - 1) - val sub = s.substring(from + 1, pos) - val l = sub :: acc - if (from == -1) l else rec(from, l) - } - rec(s.length, Nil) - } - /** * Parse string as actor path; throws java.net.MalformedURLException if unable to do so. */ diff --git a/akka-actor/src/main/scala/akka/actor/Address.scala b/akka-actor/src/main/scala/akka/actor/Address.scala index 651d970885..53f95e12d0 100644 --- a/akka-actor/src/main/scala/akka/actor/Address.scala +++ b/akka-actor/src/main/scala/akka/actor/Address.scala @@ -5,6 +5,7 @@ package akka.actor import java.net.URI import java.net.URISyntaxException import java.net.MalformedURLException +import annotation.tailrec /** * The address specifies the physical location under which an Actor can be @@ -44,12 +45,27 @@ object Address { def apply(protocol: String, system: String, host: String, port: Int) = new Address(protocol, system, Some(host), Some(port)) } -object RelativeActorPath { +private[akka] trait PathUtils { + def split(s: String): List[String] = { + @tailrec + def rec(pos: Int, acc: List[String]): List[String] = { + val from = s.lastIndexOf('/', pos - 1) + val sub = s.substring(from + 1, pos) + val l = sub :: acc + if (from == -1) l else rec(from, l) + } + rec(s.length, Nil) + } +} + +object RelativeActorPath extends PathUtils { def unapply(addr: String): Option[Iterable[String]] = { try { val uri = new URI(addr) if (uri.isAbsolute) None - else Some(ActorPath.split(uri.getPath)) + else Some(split(uri.getPath)) + } catch { + case _: URISyntaxException ⇒ None } } } @@ -58,13 +74,7 @@ object RelativeActorPath { * This object serves as extractor for Scala and as address parser for Java. */ object AddressFromURIString { - def unapply(addr: String): Option[Address] = - try { - val uri = new URI(addr) - unapply(uri) - } catch { - case _: URISyntaxException ⇒ None - } + def unapply(addr: String): Option[Address] = try unapply(new URI(addr)) catch { case _: URISyntaxException ⇒ None } def unapply(uri: URI): Option[Address] = if (uri eq null) None @@ -93,14 +103,14 @@ object AddressFromURIString { def parse(addr: String): Address = apply(addr) } -object ActorPathExtractor { +object ActorPathExtractor extends PathUtils { def unapply(addr: String): Option[(Address, Iterable[String])] = try { val uri = new URI(addr) if (uri.getPath == null) None else AddressFromURIString.unapply(uri) match { case None ⇒ None - case Some(addr) ⇒ Some((addr, ActorPath.split(uri.getPath).drop(1))) + case Some(addr) ⇒ Some((addr, split(uri.getPath).drop(1))) } } catch { case _: URISyntaxException ⇒ None From 6c2bee0533b5a4029b1822234b1f44b95fcfcc5d Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 15 May 2012 16:01:03 +0200 Subject: [PATCH 023/106] Future-proofing ActorRef --- .../src/main/scala/akka/actor/ActorPath.scala | 27 +++--- .../src/main/scala/akka/actor/ActorRef.scala | 84 ++++++++++--------- 2 files changed, 59 insertions(+), 52 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorPath.scala b/akka-actor/src/main/scala/akka/actor/ActorPath.scala index 15e5677775..e8c277660f 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorPath.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorPath.scala @@ -14,6 +14,9 @@ object ActorPath { case _ ⇒ throw new MalformedURLException("cannot parse as ActorPath: " + s) } + /** + * This Regular Expression is used to validate a path element (Actor Name) + */ val ElementRegex = """[-\w:@&=+,.!~*'_;][-\w:@&=+,.!~*'$_;]*""".r } @@ -101,21 +104,21 @@ sealed trait ActorPath extends Comparable[ActorPath] with Serializable { //TODO add @SerialVersionUID(1L) when SI-4804 is fixed final case class RootActorPath(address: Address, name: String = "/") extends ActorPath { - def parent: ActorPath = this + override def parent: ActorPath = this - def root: RootActorPath = this + override def root: RootActorPath = this - def /(child: String): ActorPath = new ChildActorPath(this, child) + override def /(child: String): ActorPath = new ChildActorPath(this, child) - val elements: Iterable[String] = List("") + override val elements: Iterable[String] = List("") - override val toString = address + name + override val toString: String = address + name - def toStringWithAddress(addr: Address): String = + override def toStringWithAddress(addr: Address): String = if (address.host.isDefined) address + name else addr + name - def compareTo(other: ActorPath) = other match { + override def compareTo(other: ActorPath): Int = other match { case r: RootActorPath ⇒ toString compareTo r.toString case c: ChildActorPath ⇒ 1 } @@ -125,11 +128,11 @@ final case class RootActorPath(address: Address, name: String = "/") extends Act final class ChildActorPath(val parent: ActorPath, val name: String) extends ActorPath { if (name.indexOf('/') != -1) throw new IllegalArgumentException("/ is a path separator and is not legal in ActorPath names: [%s]" format name) - def address: Address = root.address + override def address: Address = root.address - def /(child: String): ActorPath = new ChildActorPath(this, child) + override def /(child: String): ActorPath = new ChildActorPath(this, child) - def elements: Iterable[String] = { + override def elements: Iterable[String] = { @tailrec def rec(p: ActorPath, acc: List[String]): Iterable[String] = p match { case r: RootActorPath ⇒ acc @@ -138,7 +141,7 @@ final class ChildActorPath(val parent: ActorPath, val name: String) extends Acto rec(this, Nil) } - def root = { + override def root: RootActorPath = { @tailrec def rec(p: ActorPath): RootActorPath = p match { case r: RootActorPath ⇒ r @@ -198,7 +201,7 @@ final class ChildActorPath(val parent: ActorPath, val name: String) extends Acto finalizeHash(rec(this, startHash(42), startMagicA, startMagicB)) } - def compareTo(other: ActorPath) = { + override def compareTo(other: ActorPath): Int = { @tailrec def rec(left: ActorPath, right: ActorPath): Int = if (left eq right) 0 diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index b3d4ad19d1..32bb674865 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -6,7 +6,6 @@ package akka.actor import akka.dispatch._ import akka.util._ -import scala.collection.immutable.Stack import java.lang.{ UnsupportedOperationException, IllegalStateException } import akka.serialization.{ Serialization, JavaSerializer } import akka.event.EventStream @@ -155,6 +154,7 @@ trait ScalaActorRef { ref: ActorRef ⇒ } +//FIXME should ActorScope be private[akka], me thinks so - √ /** * All ActorRefs have a scope which describes where they live. Since it is * often necessary to distinguish between local and non-local references, this @@ -215,18 +215,20 @@ private[akka] abstract class InternalActorRef extends ActorRef with ScalaActorRe * This is an internal look-up failure token, not useful for anything else. */ private[akka] case object Nobody extends MinimalActorRef { - val path = new RootActorPath(Address("akka", "all-systems"), "/Nobody") - def provider = throw new UnsupportedOperationException("Nobody does not provide") + override val path: RootActorPath = new RootActorPath(Address("akka", "all-systems"), "/Nobody") + override def provider = throw new UnsupportedOperationException("Nobody does not provide") } /** * Local (serializable) ActorRef that is used when referencing the Actor on its "home" node. + * + * INTERNAL API */ private[akka] class LocalActorRef private[akka] ( _system: ActorSystemImpl, _props: Props, _supervisor: InternalActorRef, - val path: ActorPath, + override val path: ActorPath, val systemService: Boolean = false, _receiveTimeout: Option[Duration] = None) extends InternalActorRef with LocalRef { @@ -268,21 +270,21 @@ private[akka] class LocalActorRef private[akka] ( * message sends done from the same thread after calling this method will not * be processed until resumed. */ - def suspend(): Unit = actorCell.suspend() + override def suspend(): Unit = actorCell.suspend() /** * Resumes a suspended actor. */ - def resume(): Unit = actorCell.resume() + override def resume(): Unit = actorCell.resume() /** * Shuts down the actor and its message queue */ - def stop(): Unit = actorCell.stop() + override def stop(): Unit = actorCell.stop() - def getParent: InternalActorRef = actorCell.parent + override def getParent: InternalActorRef = actorCell.parent - def provider = actorCell.provider + override def provider: ActorRefProvider = actorCell.provider /** * Method for looking up a single child beneath this actor. Override in order @@ -294,7 +296,7 @@ private[akka] class LocalActorRef private[akka] ( case None ⇒ Nobody } - def getChild(names: Iterator[String]): InternalActorRef = { + override def getChild(names: Iterator[String]): InternalActorRef = { /* * The idea is to recursively descend as far as possible with LocalActor * Refs and hand over to that “foreign” child when we encounter it. @@ -313,6 +315,7 @@ private[akka] class LocalActorRef private[akka] ( case _ ⇒ ref.getChild(name) } + if (names.isEmpty) this else rec(this, names) } @@ -321,11 +324,11 @@ private[akka] class LocalActorRef private[akka] ( protected[akka] def underlying: ActorCell = actorCell - def sendSystemMessage(message: SystemMessage) { underlying.dispatcher.systemDispatch(underlying, message) } + override def sendSystemMessage(message: SystemMessage) { underlying.dispatcher.systemDispatch(underlying, message) } - def !(message: Any)(implicit sender: ActorRef = null): Unit = actorCell.tell(message, sender) + override def !(message: Any)(implicit sender: ActorRef = null): Unit = actorCell.tell(message, sender) - def restart(cause: Throwable): Unit = actorCell.restart(cause) + override def restart(cause: Throwable): Unit = actorCell.restart(cause) @throws(classOf[java.io.ObjectStreamException]) protected def writeReplace(): AnyRef = SerializedActorRef(path) @@ -348,7 +351,7 @@ case class SerializedActorRef private (path: String) { someSystem.actorFor(path) } } - +//FIXME: Should SerializedActorRef be private[akka] ? object SerializedActorRef { def apply(path: ActorPath): SerializedActorRef = { Serialization.currentTransportAddress.value match { @@ -360,33 +363,32 @@ object SerializedActorRef { /** * Trait for ActorRef implementations where all methods contain default stubs. + * + * INTERNAL API */ private[akka] trait MinimalActorRef extends InternalActorRef with LocalRef { - def getParent: InternalActorRef = Nobody + override def getParent: InternalActorRef = Nobody + override def getChild(names: Iterator[String]): InternalActorRef = if (names.forall(_.isEmpty)) this else Nobody - def getChild(names: Iterator[String]): InternalActorRef = { - val dropped = names.dropWhile(_.isEmpty) - if (dropped.isEmpty) this - else Nobody - } + override def suspend(): Unit = () + override def resume(): Unit = () + override def stop(): Unit = () + override def isTerminated = false - def suspend(): Unit = () - def resume(): Unit = () + override def !(message: Any)(implicit sender: ActorRef = null): Unit = () - def stop(): Unit = () - - def isTerminated = false - - def !(message: Any)(implicit sender: ActorRef = null): Unit = () - - def sendSystemMessage(message: SystemMessage): Unit = () - def restart(cause: Throwable): Unit = () + override def sendSystemMessage(message: SystemMessage): Unit = () + override def restart(cause: Throwable): Unit = () @throws(classOf[java.io.ObjectStreamException]) protected def writeReplace(): AnyRef = SerializedActorRef(path) } +/** + * When a message is sent to an Actor that is terminated before receiving the message, it will be sent as a DeadLetter + * to the ActorSystem's EventStream + */ case class DeadLetter(message: Any, sender: ActorRef, recipient: ActorRef) private[akka] object DeadLetterActorRef { @@ -402,10 +404,12 @@ private[akka] object DeadLetterActorRef { /** * This special dead letter reference has a name: it is that which is returned * by a local look-up which is unsuccessful. + * + * INTERNAL API */ private[akka] class EmptyLocalActorRef( - val provider: ActorRefProvider, - val path: ActorPath, + override val provider: ActorRefProvider, + override val path: ActorPath, val eventStream: EventStream) extends MinimalActorRef { override def isTerminated(): Boolean = true @@ -419,6 +423,8 @@ private[akka] class EmptyLocalActorRef( /** * Internal implementation of the dead letter destination: will publish any * received message to the eventStream, wrapped as [[akka.actor.DeadLetter]]. + * + * INTERNAL API */ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, _path: ActorPath, _eventStream: EventStream) extends EmptyLocalActorRef(_provider, _path, _eventStream) { @@ -434,10 +440,12 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, _path: Actor /** * Internal implementation detail used for paths like “/temp” + * + * INTERNAL API */ private[akka] class VirtualPathContainer( - val provider: ActorRefProvider, - val path: ActorPath, + override val provider: ActorRefProvider, + override val path: ActorPath, override val getParent: InternalActorRef, val log: LoggingAdapter) extends MinimalActorRef { @@ -450,12 +458,8 @@ private[akka] class VirtualPathContainer( } } - def removeChild(name: String): Unit = { - children.remove(name) match { - case null ⇒ log.warning("{} trying to remove non-child {}", path, name) - case _ ⇒ //okay - } - } + def removeChild(name: String): Unit = + if (children.remove(name) eq null) log.warning("{} trying to remove non-child {}", path, name) def getChild(name: String): InternalActorRef = children.get(name) From c6d60e1089e940e68d1bb1c0a4de0b632a17db5d Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 15 May 2012 16:26:08 +0200 Subject: [PATCH 024/106] Future proofing ActorRefProvider --- .../scala/akka/actor/ActorRefProvider.scala | 75 +++++++++++-------- .../akka/remote/RemoteActorRefProvider.scala | 10 +-- 2 files changed, 50 insertions(+), 35 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 536136934a..f1dac8e28d 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -49,8 +49,12 @@ trait ActorRefProvider { */ def rootPath: ActorPath + /** + * The Settings associated with this ActorRefProvider + */ def settings: ActorSystem.Settings + //FIXME WHY IS THIS HERE? def dispatcher: MessageDispatcher /** @@ -61,8 +65,12 @@ trait ActorRefProvider { */ def init(system: ActorSystemImpl): Unit + /** + * The Deployer associated with this ActorRefProvider + */ def deployer: Deployer + //FIXME WHY IS THIS HERE? def scheduler: Scheduler /** @@ -131,6 +139,7 @@ trait ActorRefProvider { */ def terminationFuture: Future[Unit] + //FIXME I PROPOSE TO REMOVE THIS IN 2.1 - √ /** * Obtain the address which is to be used within sender references when * sending to the given other address or none if the other address cannot be @@ -141,22 +150,33 @@ trait ActorRefProvider { } /** - * Interface implemented by ActorSystem and AkkaContext, the only two places + * Interface implemented by ActorSystem and ActorContext, the only two places * from which you can get fresh actors. */ trait ActorRefFactory { - + /** + * INTERNAL USE ONLY + */ protected def systemImpl: ActorSystemImpl - + /** + * INTERNAL USE ONLY + */ protected def provider: ActorRefProvider - + /** + * INTERNAL USE ONLY + */ protected def dispatcher: MessageDispatcher /** * Father of all children created by this interface. + * + * INTERNAL USE ONLY */ protected def guardian: InternalActorRef + /** + * INTERNAL USE ONLY + */ protected def lookupRoot: InternalActorRef /** @@ -276,8 +296,6 @@ trait ActorRefFactory { def stop(actor: ActorRef): Unit } -class ActorRefProviderException(message: String) extends AkkaException(message) - /** * Internal Akka use only, used in implementation of system.actorOf. */ @@ -298,10 +316,10 @@ private[akka] case class StopChild(child: ActorRef) */ class LocalActorRefProvider( _systemName: String, - val settings: ActorSystem.Settings, + override val settings: ActorSystem.Settings, val eventStream: EventStream, - val scheduler: Scheduler, - val deployer: Deployer) extends ActorRefProvider { + override val scheduler: Scheduler, + override val deployer: Deployer) extends ActorRefProvider { // this is the constructor needed for reflectively instantiating the provider def this(_systemName: String, @@ -315,13 +333,13 @@ class LocalActorRefProvider( scheduler, new Deployer(settings, dynamicAccess)) - val rootPath: ActorPath = RootActorPath(Address("akka", _systemName)) + override val rootPath: ActorPath = RootActorPath(Address("akka", _systemName)) - val log = Logging(eventStream, "LocalActorRefProvider(" + rootPath.address + ")") + private[akka] val log: LoggingAdapter = Logging(eventStream, "LocalActorRefProvider(" + rootPath.address + ")") - val deadLetters = new DeadLetterActorRef(this, rootPath / "deadLetters", eventStream) + override val deadLetters: InternalActorRef = new DeadLetterActorRef(this, rootPath / "deadLetters", eventStream) - val deathWatch = new LocalDeathWatch(1024) //TODO make configrable + override val deathWatch: DeathWatch = new LocalDeathWatch(1024) //TODO make configrable /* * generate name for temporary actor refs @@ -332,7 +350,7 @@ class LocalActorRefProvider( private val tempNode = rootPath / "temp" - def tempPath() = tempNode / tempName() + override def tempPath() = tempNode / tempName() /** * Top-level anchor for the supervision hierarchy of this actor system. Will @@ -348,11 +366,11 @@ class LocalActorRefProvider( def provider: ActorRefProvider = LocalActorRefProvider.this - override def stop() = stopped switchOn { + override def stop(): Unit = stopped switchOn { terminationFuture.complete(causeOfTermination.toLeft(())) } - override def isTerminated = stopped.isOn + override def isTerminated: Boolean = stopped.isOn override def !(message: Any)(implicit sender: ActorRef = null): Unit = stopped.ifOff(message match { case Failed(ex) if sender ne null ⇒ causeOfTermination = Some(ex); sender.asInstanceOf[InternalActorRef].stop() @@ -371,7 +389,7 @@ class LocalActorRefProvider( /** * Overridable supervision strategy to be used by the “/user” guardian. */ - protected def guardianSupervisionStrategy = { + protected def guardianSupervisionStrategy: SupervisorStrategy = { import akka.actor.SupervisorStrategy._ OneForOneStrategy() { case _: ActorKilledException ⇒ Stop @@ -387,12 +405,12 @@ class LocalActorRefProvider( */ private class Guardian extends Actor { - override val supervisorStrategy = guardianSupervisionStrategy + override val supervisorStrategy: SupervisorStrategy = guardianSupervisionStrategy def receive = { case Terminated(_) ⇒ context.stop(self) - case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case e: Exception ⇒ e }) - case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case e: Exception ⇒ e }) + case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case e: Exception ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? + case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case e: Exception ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? case StopChild(child) ⇒ context.stop(child); sender ! "ok" case m ⇒ deadLetters ! DeadLetter(m, sender, self) } @@ -404,12 +422,11 @@ class LocalActorRefProvider( /** * Overridable supervision strategy to be used by the “/system” guardian. */ - protected def systemGuardianSupervisionStrategy = { + protected def systemGuardianSupervisionStrategy: SupervisorStrategy = { import akka.actor.SupervisorStrategy._ OneForOneStrategy() { - case _: ActorKilledException ⇒ Stop - case _: ActorInitializationException ⇒ Stop - case _: Exception ⇒ Restart + case _: ActorKilledException | _: ActorInitializationException ⇒ Stop + case _: Exception ⇒ Restart } } @@ -420,14 +437,12 @@ class LocalActorRefProvider( */ private class SystemGuardian extends Actor { - override val supervisorStrategy = systemGuardianSupervisionStrategy + override val supervisorStrategy: SupervisorStrategy = systemGuardianSupervisionStrategy def receive = { - case Terminated(_) ⇒ - eventStream.stopDefaultLoggers() - context.stop(self) - case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case e: Exception ⇒ e }) - case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case e: Exception ⇒ e }) + case Terminated(_) ⇒ eventStream.stopDefaultLoggers(); context.stop(self) + case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case e: Exception ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? + case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case e: Exception ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? case StopChild(child) ⇒ context.stop(child); sender ! "ok" case m ⇒ deadLetters ! DeadLetter(m, sender, self) } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index ab53d9e99d..8f1ec6e1b7 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -256,9 +256,9 @@ private[akka] class RemoteActorRef private[akka] ( private def writeReplace(): AnyRef = SerializedActorRef(path) } -class RemoteDeathWatch(val local: LocalDeathWatch, val provider: RemoteActorRefProvider) extends DeathWatch { +class RemoteDeathWatch(val local: DeathWatch, val provider: RemoteActorRefProvider) extends DeathWatch { - def subscribe(watcher: ActorRef, watched: ActorRef): Boolean = watched match { + override def subscribe(watcher: ActorRef, watched: ActorRef): Boolean = watched match { case r: RemoteRef ⇒ val ret = local.subscribe(watcher, watched) provider.actorFor(r.path.root / "remote") ! DaemonMsgWatch(watcher, watched) @@ -270,10 +270,10 @@ class RemoteDeathWatch(val local: LocalDeathWatch, val provider: RemoteActorRefP false } - def unsubscribe(watcher: ActorRef, watched: ActorRef): Boolean = local.unsubscribe(watcher, watched) + override def unsubscribe(watcher: ActorRef, watched: ActorRef): Boolean = local.unsubscribe(watcher, watched) - def unsubscribe(watcher: ActorRef): Unit = local.unsubscribe(watcher) + override def unsubscribe(watcher: ActorRef): Unit = local.unsubscribe(watcher) - def publish(event: Terminated): Unit = local.publish(event) + override def publish(event: Terminated): Unit = local.publish(event) } From d092d17aed72bc69f698446c1147feecfc94f1ba Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 15 May 2012 17:44:56 +0200 Subject: [PATCH 025/106] Adding even more future proofing --- .../src/main/scala/akka/actor/ActorRef.scala | 7 ++-- .../scala/akka/actor/ActorSelection.scala | 21 ++++++++-- .../main/scala/akka/actor/ActorSystem.scala | 38 +++++++++++------- .../src/main/scala/akka/actor/Address.scala | 40 ++++++++++++------- 4 files changed, 70 insertions(+), 36 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 32bb674865..d0ad270957 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -305,11 +305,10 @@ private[akka] class LocalActorRef private[akka] ( def rec(ref: InternalActorRef, name: Iterator[String]): InternalActorRef = ref match { case l: LocalActorRef ⇒ - val n = name.next() - val next = n match { + val next = name.next() match { case ".." ⇒ l.getParent case "" ⇒ l - case _ ⇒ l.getSingleChild(n) + case any ⇒ l.getSingleChild(any) } if (next == Nobody || name.isEmpty) next else rec(next, name) case _ ⇒ @@ -324,7 +323,7 @@ private[akka] class LocalActorRef private[akka] ( protected[akka] def underlying: ActorCell = actorCell - override def sendSystemMessage(message: SystemMessage) { underlying.dispatcher.systemDispatch(underlying, message) } + override def sendSystemMessage(message: SystemMessage): Unit = underlying.dispatcher.systemDispatch(underlying, message) override def !(message: Any)(implicit sender: ActorRef = null): Unit = actorCell.tell(message, sender) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala index b407868270..44767cb0b6 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala @@ -5,6 +5,10 @@ package akka.actor import java.util.regex.Pattern import akka.util.Helpers +/** + * An ActorSelection is a logical view of a section of an ActorSystem's tree of Actors, + * allowing for broadcasting of messages to that section. + */ abstract class ActorSelection { this: ScalaActorSelection ⇒ @@ -12,11 +16,11 @@ abstract class ActorSelection { protected def path: Array[AnyRef] - def tell(msg: Any) { target ! toMessage(msg, path) } + def tell(msg: Any): Unit = target ! toMessage(msg, path) - def tell(msg: Any, sender: ActorRef) { target.tell(toMessage(msg, path), sender) } + def tell(msg: Any, sender: ActorRef): Unit = target.tell(toMessage(msg, path), sender) - // this may want to be fast ... + // FIXME make this so that "next" instead is the remaining path private def toMessage(msg: Any, path: Array[AnyRef]): Any = { var acc = msg var index = path.length - 1 @@ -32,7 +36,12 @@ abstract class ActorSelection { } } +/** + * An ActorSelection is a logical view of a section of an ActorSystem's tree of Actors, + * allowing for broadcasting of messages to that section. + */ object ActorSelection { + //This cast is safe because the self-type of ActorSelection requires that it mixes in ScalaActorSelection implicit def toScala(sel: ActorSelection): ScalaActorSelection = sel.asInstanceOf[ScalaActorSelection] /** @@ -43,7 +52,7 @@ object ActorSelection { */ def apply(anchor: ActorRef, path: String): ActorSelection = { val elems = path.split("/+").dropWhile(_.isEmpty) - val compiled: Array[AnyRef] = elems map (x ⇒ if (x.contains("?") || x.contains("*")) Helpers.makePattern(x) else x) + val compiled: Array[AnyRef] = elems map (x ⇒ if (x.contains('?') || x.contains('*')) Helpers.makePattern(x) else x) new ActorSelection with ScalaActorSelection { def target = anchor def path = compiled @@ -51,6 +60,10 @@ object ActorSelection { } } +/** + * Contains the Scala API (!-method) for ActorSelections) which provides automatic tracking of the sender, + * as per the usual implicit ActorRef pattern. + */ trait ScalaActorSelection { this: ActorSelection ⇒ diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index b84057b749..32e221a7a1 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -10,32 +10,30 @@ import akka.dispatch._ import akka.pattern.ask import org.jboss.netty.akka.util.HashedWheelTimer import java.util.concurrent.TimeUnit.MILLISECONDS -import com.typesafe.config.Config -import com.typesafe.config.ConfigFactory +import com.typesafe.config.{ Config, ConfigFactory } import scala.annotation.tailrec import org.jboss.netty.akka.util.internal.ConcurrentIdentityHashMap import java.io.Closeable -import akka.dispatch.Await.Awaitable -import akka.dispatch.Await.CanAwait +import akka.dispatch.Await.{ Awaitable, CanAwait } import akka.util._ import collection.immutable.Stack import java.util.concurrent.{ ThreadFactory, CountDownLatch, TimeoutException, RejectedExecutionException } object ActorSystem { - val Version = "2.1-SNAPSHOT" + val Version: String = "2.1-SNAPSHOT" - val EnvHome = System.getenv("AKKA_HOME") match { + val EnvHome: Option[String] = System.getenv("AKKA_HOME") match { case null | "" | "." ⇒ None case value ⇒ Some(value) } - val SystemHome = System.getProperty("akka.home") match { + val SystemHome: Option[String] = System.getProperty("akka.home") match { case null | "" ⇒ None case value ⇒ Some(value) } - val GlobalHome = SystemHome orElse EnvHome + val GlobalHome: Option[String] = SystemHome orElse EnvHome /** * Creates a new ActorSystem with the name "default", @@ -102,8 +100,16 @@ object ActorSystem { */ def apply(name: String, config: Config, classLoader: ClassLoader): ActorSystem = new ActorSystemImpl(name, config, classLoader).start() + /** + * Settings are the overall ActorSystem Settings which also provides a convenient access to the Config object. + * + * For more detailed information about the different possible configuration options, look in the Akka Documentation under "Configuration" + */ class Settings(classLoader: ClassLoader, cfg: Config, final val name: String) { + /** + * The backing Config of this ActorSystem's Settings + */ final val config: Config = { val config = cfg.withFallback(ConfigFactory.defaultReference(classLoader)) config.checkValid(ConfigFactory.defaultReference(classLoader), "akka") @@ -114,11 +120,9 @@ object ActorSystem { import config._ final val ConfigVersion = getString("akka.version") - final val ProviderClass = getString("akka.actor.provider") - final val CreationTimeout = Timeout(Duration(getMilliseconds("akka.actor.creation-timeout"), MILLISECONDS)) - final val ReaperInterval = Duration(getMilliseconds("akka.actor.reaper-interval"), MILLISECONDS) + final val SerializeAllMessages = getBoolean("akka.actor.serialize-messages") final val SerializeAllCreators = getBoolean("akka.actor.serialize-creators") @@ -148,11 +152,14 @@ object ActorSystem { if (ConfigVersion != Version) throw new ConfigurationException("Akka JAR version [" + Version + "] does not match the provided config version [" + ConfigVersion + "]") + /** + * Returns the String representation of the Config that this Settings is backed by + */ override def toString: String = config.root.render } /** - * INTERNAL + * INTERNAL USE ONLY */ private[akka] def findClassLoader(): ClassLoader = { def findCaller(get: Int ⇒ Class[_]): ClassLoader = @@ -422,6 +429,8 @@ abstract class ExtendedActorSystem extends ActorSystem { def dynamicAccess: DynamicAccess } +//FIXME This should most probably not be protected[akka] right? - √ +//FIXME We also need to decide whether this should be supported API or not - √ class ActorSystemImpl protected[akka] (val name: String, applicationConfig: Config, classLoader: ClassLoader) extends ExtendedActorSystem { if (!name.matches("""^[a-zA-Z0-9][a-zA-Z0-9-]*$""")) @@ -475,7 +484,7 @@ class ActorSystemImpl protected[akka] (val name: String, applicationConfig: Conf def logConfiguration(): Unit = log.info(settings.toString) - protected def systemImpl = this + protected def systemImpl: ActorSystemImpl = this private[akka] def systemActorOf(props: Props, name: String): ActorRef = { implicit val timeout = settings.CreationTimeout @@ -539,6 +548,7 @@ class ActorSystemImpl protected[akka] (val name: String, applicationConfig: Conf def deadLetters: ActorRef = provider.deadLetters + //FIXME Why do we need this at all? val deadLetterQueue: MessageQueue = new MessageQueue { def enqueue(receiver: ActorRef, envelope: Envelope) { deadLetters ! DeadLetter(envelope.message, envelope.sender, receiver) } def dequeue() = null @@ -546,7 +556,7 @@ class ActorSystemImpl protected[akka] (val name: String, applicationConfig: Conf def numberOfMessages = 0 def cleanUp(owner: ActorContext, deadLetters: MessageQueue): Unit = () } - + //FIXME Why do we need this at all? val deadLetterMailbox: Mailbox = new Mailbox(null, deadLetterQueue) { becomeClosed() def systemEnqueue(receiver: ActorRef, handle: SystemMessage): Unit = deadLetters ! DeadLetter(handle, receiver, receiver) diff --git a/akka-actor/src/main/scala/akka/actor/Address.scala b/akka-actor/src/main/scala/akka/actor/Address.scala index 53f95e12d0..67f147b836 100644 --- a/akka-actor/src/main/scala/akka/actor/Address.scala +++ b/akka-actor/src/main/scala/akka/actor/Address.scala @@ -21,32 +21,43 @@ final case class Address private (protocol: String, system: String, host: Option def this(protocol: String, system: String) = this(protocol, system, None, None) def this(protocol: String, system: String, host: String, port: Int) = this(protocol, system, Option(host), Some(port)) + /** + * Returns the canonical String representation of this Address formatted as: + * + * ://@: + */ @transient override lazy val toString: String = { - val sb = new StringBuilder(protocol) - sb.append("://") - sb.append(system) - if (host.isDefined) { - sb.append('@') - sb.append(host.get) - } - if (port.isDefined) { - sb.append(':') - sb.append(port.get) - } + val sb = (new StringBuilder(protocol)).append("://").append(system) + + if (host.isDefined) sb.append('@').append(host.get) + if (port.isDefined) sb.append(':').append(port.get) + sb.toString } - def hostPort: String = toString.substring(protocol.length() + 3) + /** + * Returns a String representation formatted as: + * + * @: + */ + def hostPort: String = toString.substring(protocol.length + 3) } object Address { + /** + * Constructs a new Address with the specified protocol and system name + */ def apply(protocol: String, system: String) = new Address(protocol, system) + + /** + * Constructs a new Address with the specified protocol, system name, host and port + */ def apply(protocol: String, system: String, host: String, port: Int) = new Address(protocol, system, Some(host), Some(port)) } private[akka] trait PathUtils { - def split(s: String): List[String] = { + protected def split(s: String): List[String] = { @tailrec def rec(pos: Int, acc: List[String]): List[String] = { val from = s.lastIndexOf('/', pos - 1) @@ -94,7 +105,7 @@ object AddressFromURIString { */ def apply(addr: String): Address = addr match { case AddressFromURIString(address) ⇒ address - case _ ⇒ throw new MalformedURLException + case _ ⇒ throw new MalformedURLException(addr) } /** @@ -103,6 +114,7 @@ object AddressFromURIString { def parse(addr: String): Address = apply(addr) } +//FIXME is this public API? - √ object ActorPathExtractor extends PathUtils { def unapply(addr: String): Option[(Address, Iterable[String])] = try { From fccbba0de54633be7beee2834ba4830f534ecf3b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 15:22:21 +0200 Subject: [PATCH 026/106] Docs, comments, cleanup, fairy dust... --- .../workbench/PerformanceSpec.scala | 7 +- .../src/main/scala/akka/actor/Deployer.scala | 20 ++- .../main/scala/akka/actor/DynamicAccess.scala | 4 +- .../src/main/scala/akka/actor/FSM.scala | 3 +- .../main/scala/akka/actor/FaultHandling.scala | 46 ++++--- akka-actor/src/main/scala/akka/actor/IO.scala | 123 +++++++++--------- .../src/main/scala/akka/actor/package.scala | 8 -- .../main/scala/akka/event/EventStream.scala | 3 +- .../src/main/scala/akka/event/Logging.scala | 16 ++- .../main/scala/akka/remote/netty/Client.scala | 23 ++-- 10 files changed, 133 insertions(+), 120 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala index ca6e42d67f..ca23dd5a33 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala @@ -3,12 +3,11 @@ package akka.performance.workbench import scala.collection.immutable.TreeMap import org.apache.commons.math.stat.descriptive.DescriptiveStatistics import org.scalatest.BeforeAndAfterEach -import akka.actor.simpleName import akka.testkit.AkkaSpec -import akka.actor.ActorSystem import akka.util.Duration import com.typesafe.config.Config import java.util.concurrent.TimeUnit +import akka.event.Logging abstract class PerformanceSpec(cfg: Config = BenchmarkConfig.config) extends AkkaSpec(cfg) with BeforeAndAfterEach { @@ -36,7 +35,7 @@ abstract class PerformanceSpec(cfg: Config = BenchmarkConfig.config) extends Akk } def logMeasurement(numberOfClients: Int, durationNs: Long, n: Long) { - val name = simpleName(this) + val name = Logging.simpleName(this) val durationS = durationNs.toDouble / 1000000000.0 val stats = Stats( @@ -51,7 +50,7 @@ abstract class PerformanceSpec(cfg: Config = BenchmarkConfig.config) extends Akk } def logMeasurement(numberOfClients: Int, durationNs: Long, stat: DescriptiveStatistics) { - val name = simpleName(this) + val name = Logging.simpleName(this) val durationS = durationNs.toDouble / 1000000000.0 val percentiles = TreeMap[Int, Long]( diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 2fd9538d77..47b8bf329c 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -34,8 +34,19 @@ final case class Deploy( routerConfig: RouterConfig = NoRouter, scope: Scope = NoScopeGiven) { + /** + * Java API to create a Deploy with the given RouterConfig + */ def this(routing: RouterConfig) = this("", ConfigFactory.empty, routing) + + /** + * Java API to create a Deploy with the given RouterConfig with Scope + */ def this(routing: RouterConfig, scope: Scope) = this("", ConfigFactory.empty, routing, scope) + + /** + * Java API to create a Deploy with the given Scope + */ def this(scope: Scope) = this("", ConfigFactory.empty, NoRouter, scope) /** @@ -67,13 +78,9 @@ trait Scope { //TODO add @SerialVersionUID(1L) when SI-4804 is fixed abstract class LocalScope extends Scope -case object LocalScope extends LocalScope { - /** - * Java API - */ - @deprecated("use instance() method instead", "2.0.1") - def scope: Scope = this +//FIXME docs +case object LocalScope extends LocalScope { /** * Java API: get the singleton instance */ @@ -162,5 +169,4 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce Some(Deploy(key, deployment, router, NoScopeGiven)) } - } diff --git a/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala b/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala index 8d3ac68852..72ffbbe76e 100644 --- a/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala +++ b/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala @@ -14,7 +14,7 @@ import java.lang.reflect.InvocationTargetException * This is an internal facility and users are not expected to encounter it * unless they are extending Akka in ways which go beyond simple Extensions. */ -trait DynamicAccess { +abstract class DynamicAccess { /** * Convenience method which given a `Class[_]` object and a constructor description @@ -88,7 +88,7 @@ trait DynamicAccess { * by default. */ class ReflectiveDynamicAccess(val classLoader: ClassLoader) extends DynamicAccess { - + //FIXME switch to Scala Reflection for 2.10 override def getClassFor[T: ClassManifest](fqcn: String): Either[Throwable, Class[_ <: T]] = try { val c = classLoader.loadClass(fqcn).asInstanceOf[Class[_ <: T]] diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 3d1f8930c4..71d1ec7e69 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -6,9 +6,10 @@ package akka.actor import akka.util._ import scala.collection.mutable -import akka.event.Logging import akka.routing.{ Deafen, Listen, Listeners } +//FIXME: Roland, could you go through this file? + object FSM { object NullFunction extends PartialFunction[Any, Nothing] { diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index 70246bab30..383010f9de 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -9,8 +9,13 @@ import scala.collection.JavaConversions._ import java.lang.{ Iterable ⇒ JIterable } import akka.util.Duration +/** + * ChildRestartStats is the statistics kept by every parent Actor for every child Actor + * and is used for SupervisorStrategies to know how to deal with problems that occur for the children. + */ case class ChildRestartStats(val child: ActorRef, var maxNrOfRetriesCount: Int = 0, var restartTimeWindowStartNanos: Long = 0L) { + //FIXME How about making ChildRestartStats immutable and then move these methods into the actual supervisor strategies? def requestRestartPermission(retriesWindow: (Option[Int], Option[Int])): Boolean = retriesWindow match { case (Some(retries), _) if retries < 1 ⇒ false @@ -160,19 +165,19 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { def makeDecider(flat: Iterable[CauseDirective]): Decider = { val directives = sort(flat) - { - case x ⇒ directives find (_._1 isInstance x) map (_._2) getOrElse Escalate - } + { case x ⇒ directives find (_._1 isInstance x) map (_._2) getOrElse Escalate } } - def makeDecider(func: JDecider): Decider = { - case x ⇒ func(x) - } + /** + * Converts a Java Decider into a Scala Decider + */ + def makeDecider(func: JDecider): Decider = { case x ⇒ func(x) } /** * Sort so that subtypes always precede their supertypes, but without * obeying any order between unrelated subtypes (insert sort). */ + //FIXME Should this really be public API? def sort(in: Iterable[CauseDirective]): Seq[CauseDirective] = (new ArrayBuffer[CauseDirective](in.size) /: in) { (buf, ca) ⇒ buf.indexWhere(_._1 isAssignableFrom ca._1) match { @@ -184,14 +189,21 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { private[akka] def withinTimeRangeOption(withinTimeRange: Duration): Option[Duration] = if (withinTimeRange.isFinite && withinTimeRange >= Duration.Zero) Some(withinTimeRange) else None + private[akka] def maxNrOfRetriesOption(maxNrOfRetries: Int): Option[Int] = if (maxNrOfRetries < 0) None else Some(maxNrOfRetries) } +/** + * An Akka SupervisorStrategy is + */ abstract class SupervisorStrategy { import SupervisorStrategy._ + /** + * Returns the Decider that is associated with this SupervisorStrategy + */ def decider: Decider /** @@ -204,21 +216,19 @@ abstract class SupervisorStrategy { */ def processFailure(context: ActorContext, restart: Boolean, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]): Unit - def handleSupervisorFailing(supervisor: ActorRef, children: Iterable[ActorRef]): Unit = { - if (children.nonEmpty) - children.foreach(_.asInstanceOf[InternalActorRef].suspend()) - } + //FIXME docs + def handleSupervisorFailing(supervisor: ActorRef, children: Iterable[ActorRef]): Unit = + if (children.nonEmpty) children.foreach(_.asInstanceOf[InternalActorRef].suspend()) - def handleSupervisorRestarted(cause: Throwable, supervisor: ActorRef, children: Iterable[ActorRef]): Unit = { - if (children.nonEmpty) - children.foreach(_.asInstanceOf[InternalActorRef].restart(cause)) - } + //FIXME docs + def handleSupervisorRestarted(cause: Throwable, supervisor: ActorRef, children: Iterable[ActorRef]): Unit = + if (children.nonEmpty) children.foreach(_.asInstanceOf[InternalActorRef].restart(cause)) /** * Returns whether it processed the failure or not */ def handleFailure(context: ActorContext, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]): Boolean = { - val directive = if (decider.isDefinedAt(cause)) decider(cause) else Escalate + val directive = if (decider.isDefinedAt(cause)) decider(cause) else Escalate //FIXME applyOrElse in Scala 2.10 directive match { case Resume ⇒ child.asInstanceOf[InternalActorRef].resume(); true case Restart ⇒ processFailure(context, true, child, cause, stats, children); true @@ -242,6 +252,8 @@ abstract class SupervisorStrategy { case class AllForOneStrategy(maxNrOfRetries: Int = -1, withinTimeRange: Duration = Duration.Inf)(val decider: SupervisorStrategy.Decider) extends SupervisorStrategy { + import SupervisorStrategy._ + def this(maxNrOfRetries: Int, withinTimeRange: Duration, decider: SupervisorStrategy.JDecider) = this(maxNrOfRetries, withinTimeRange)(SupervisorStrategy.makeDecider(decider)) @@ -256,9 +268,7 @@ case class AllForOneStrategy(maxNrOfRetries: Int = -1, withinTimeRange: Duration * every call to requestRestartPermission, assuming that strategies are shared * across actors and thus this field does not take up much space */ - private val retriesWindow = ( - SupervisorStrategy.maxNrOfRetriesOption(maxNrOfRetries), - SupervisorStrategy.withinTimeRangeOption(withinTimeRange).map(_.toMillis.toInt)) + private val retriesWindow = (maxNrOfRetriesOption(maxNrOfRetries), withinTimeRangeOption(withinTimeRange).map(_.toMillis.toInt)) def handleChildTerminated(context: ActorContext, child: ActorRef, children: Iterable[ActorRef]): Unit = {} diff --git a/akka-actor/src/main/scala/akka/actor/IO.scala b/akka-actor/src/main/scala/akka/actor/IO.scala index 72eaf32e83..3ff91c4fa8 100644 --- a/akka-actor/src/main/scala/akka/actor/IO.scala +++ b/akka-actor/src/main/scala/akka/actor/IO.scala @@ -30,7 +30,7 @@ import java.util.UUID */ object IO { - final class DivergentIterateeException extends Exception("Iteratees should not return a continuation when receiving EOF") + final class DivergentIterateeException extends IllegalStateException("Iteratees should not return a continuation when receiving EOF") /** * An immutable handle to a Java NIO Channel. Contains a reference to the @@ -64,14 +64,14 @@ object IO { * A [[akka.actor.IO.Handle]] to a ReadableByteChannel. */ sealed trait ReadHandle extends Handle with Product { - override def asReadable = this + override def asReadable: ReadHandle = this } /** * A [[akka.actor.IO.Handle]] to a WritableByteChannel. */ sealed trait WriteHandle extends Handle with Product { - override def asWritable = this + override def asWritable: WriteHandle = this /** * Sends a request to the [[akka.actor.IOManager]] to write to the @@ -89,7 +89,7 @@ object IO { * [[akka.actor.IO.ServerHandle]].accept(). */ case class SocketHandle(owner: ActorRef, ioManager: ActorRef, uuid: UUID = UUID.randomUUID()) extends ReadHandle with WriteHandle { - override def asSocket = this + override def asSocket: SocketHandle = this } /** @@ -97,7 +97,7 @@ object IO { * normally created by [[akka.actor.IOManager]].listen(). */ case class ServerHandle(owner: ActorRef, ioManager: ActorRef, uuid: UUID = UUID.randomUUID()) extends Handle { - override def asServer = this + override def asServer: ServerHandle = this /** * Sends a request to the [[akka.actor.IOManager]] to accept an incoming @@ -320,16 +320,18 @@ object IO { } object Chunk { - val empty = Chunk(ByteString.empty) + val empty = new Chunk(ByteString.empty) } /** * Part of an [[akka.actor.IO.Input]] stream that contains a chunk of bytes. */ case class Chunk(bytes: ByteString) extends Input { - def ++(that: Input) = that match { - case Chunk(more) ⇒ Chunk(bytes ++ more) - case _: EOF ⇒ that + final override def ++(that: Input): Input = that match { + case Chunk(more) if more.isEmpty ⇒ this + case c: Chunk if bytes.isEmpty ⇒ c + case Chunk(more) ⇒ Chunk(bytes ++ more) + case _: EOF ⇒ that } } @@ -342,7 +344,7 @@ object IO { * Iteratee.recover() in order to handle it properly. */ case class EOF(cause: Option[Exception]) extends Input { - def ++(that: Input) = that + final override def ++(that: Input) = that } object Iteratee { @@ -352,7 +354,15 @@ object IO { * inferred as an Iteratee and not as a Done. */ def apply[A](value: A): Iteratee[A] = Done(value) + + /** + * Returns Iteratee.unit + */ def apply(): Iteratee[Unit] = unit + + /** + * The single value representing Done(()) + */ val unit: Iteratee[Unit] = Done(()) } @@ -445,6 +455,7 @@ object IO { */ final case class Cont[+A](f: Input ⇒ (Iteratee[A], Input), error: Option[Exception] = None) extends Iteratee[A] + //FIXME general description of what an IterateeRef is and how it is used, potentially with link to docs object IterateeRef { /** @@ -477,13 +488,14 @@ object IO { * 'refFactory' is used to provide the default value for new keys. */ class Map[K, V] private (refFactory: ⇒ IterateeRef[V], underlying: mutable.Map[K, IterateeRef[V]] = mutable.Map.empty[K, IterateeRef[V]]) extends mutable.Map[K, IterateeRef[V]] { - def get(key: K) = Some(underlying.getOrElseUpdate(key, refFactory)) - def iterator = underlying.iterator - def +=(kv: (K, IterateeRef[V])) = { underlying += kv; this } - def -=(key: K) = { underlying -= key; this } + override def get(key: K) = Some(underlying.getOrElseUpdate(key, refFactory)) + override def iterator = underlying.iterator + override def +=(kv: (K, IterateeRef[V])) = { underlying += kv; this } + override def -=(key: K) = { underlying -= key; this } override def empty = new Map[K, V](refFactory) } + //FIXME general description of what an Map is and how it is used, potentially with link to docs object Map { /** * Uses a factory to create the initial IterateeRef for each new key. @@ -500,7 +512,6 @@ object IO { */ def async[K]()(implicit executor: ExecutionContext): IterateeRef.Map[K, Unit] = new Map(IterateeRef.async()) } - } /** @@ -510,8 +521,11 @@ object IO { * for details. */ trait IterateeRef[A] { + //FIXME Add docs def flatMap(f: A ⇒ Iteratee[A]): Unit + //FIXME Add docs def map(f: A ⇒ A): Unit + //FIXME Add docs def apply(input: Input): Unit } @@ -528,12 +542,16 @@ object IO { */ final class IterateeRefSync[A](initial: Iteratee[A]) extends IterateeRef[A] { private var _value: (Iteratee[A], Input) = (initial, Chunk.empty) - def flatMap(f: A ⇒ Iteratee[A]): Unit = _value = _value match { + override def flatMap(f: A ⇒ Iteratee[A]): Unit = _value = _value match { case (iter, chunk @ Chunk(bytes)) if bytes.nonEmpty ⇒ (iter flatMap f)(chunk) case (iter, input) ⇒ (iter flatMap f, input) } - def map(f: A ⇒ A): Unit = _value = (_value._1 map f, _value._2) - def apply(input: Input): Unit = _value = _value._1(_value._2 ++ input) + override def map(f: A ⇒ A): Unit = _value = (_value._1 map f, _value._2) + override def apply(input: Input): Unit = _value = _value._1(_value._2 ++ input) + + /** + * Returns the current value of this IterateeRefSync + */ def value: (Iteratee[A], Input) = _value } @@ -553,12 +571,16 @@ object IO { */ final class IterateeRefAsync[A](initial: Iteratee[A])(implicit executor: ExecutionContext) extends IterateeRef[A] { private var _value: Future[(Iteratee[A], Input)] = Future((initial, Chunk.empty)) - def flatMap(f: A ⇒ Iteratee[A]): Unit = _value = _value map { + override def flatMap(f: A ⇒ Iteratee[A]): Unit = _value = _value map { case (iter, chunk @ Chunk(bytes)) if bytes.nonEmpty ⇒ (iter flatMap f)(chunk) case (iter, input) ⇒ (iter flatMap f, input) } - def map(f: A ⇒ A): Unit = _value = _value map (v ⇒ (v._1 map f, v._2)) - def apply(input: Input): Unit = _value = _value map (v ⇒ v._1(v._2 ++ input)) + override def map(f: A ⇒ A): Unit = _value = _value map (v ⇒ (v._1 map f, v._2)) + override def apply(input: Input): Unit = _value = _value map (v ⇒ v._1(v._2 ++ input)) + + /** + * Returns a Future which will hold the future value of this IterateeRefAsync + */ def future: Future[(Iteratee[A], Input)] = _value } @@ -702,10 +724,9 @@ object IO { /** * An Iteratee that continually repeats an Iteratee. * - * TODO: Should terminate on EOF + * FIXME TODO: Should terminate on EOF */ - def repeat(iter: Iteratee[Unit]): Iteratee[Unit] = - iter flatMap (_ ⇒ repeat(iter)) + def repeat(iter: Iteratee[Unit]): Iteratee[Unit] = iter flatMap (_ ⇒ repeat(iter)) /** * An Iteratee that applies an Iteratee to each element of a Traversable @@ -780,7 +801,7 @@ object IO { * An IOManager does not need to be manually stopped when not in use as it will * automatically enter an idle state when it has no channels to manage. */ -final class IOManager private (system: ActorSystem) extends Extension { +final class IOManager private (system: ActorSystem) extends Extension { //FIXME how about taking an ActorContext /** * A reference to the [[akka.actor.IOManagerActor]] that performs the actual * IO. It communicates with other actors using subclasses of @@ -861,9 +882,10 @@ final class IOManager private (system: ActorSystem) extends Extension { } +//FIXME add docs object IOManager extends ExtensionId[IOManager] with ExtensionIdProvider { - override def lookup = this - override def createExtension(system: ExtendedActorSystem) = new IOManager(system) + override def lookup: IOManager.type = this + override def createExtension(system: ExtendedActorSystem): IOManager = new IOManager(system) } /** @@ -874,7 +896,7 @@ object IOManager extends ExtensionId[IOManager] with ExtensionIdProvider { final class IOManagerActor extends Actor with ActorLogging { import SelectionKey.{ OP_READ, OP_WRITE, OP_ACCEPT, OP_CONNECT } - private val bufferSize = 8192 // TODO: make buffer size configurable + private val bufferSize = 8192 // FIXME TODO: make configurable private type ReadChannel = ReadableByteChannel with SelectableChannel private type WriteChannel = WritableByteChannel with SelectableChannel @@ -897,7 +919,7 @@ final class IOManagerActor extends Actor with ActorLogging { private var lastSelect = 0 /** force a select when lastSelect reaches this amount */ - private val selectAt = 100 + private val selectAt = 100 // FIXME TODO: make configurable /** true while the selector is open and channels.nonEmpty */ private var running = false @@ -947,9 +969,7 @@ final class IOManagerActor extends Actor with ActorLogging { lastSelect = 0 } - private def forwardFailure(f: ⇒ Unit): Unit = { - try { f } catch { case NonFatal(e) ⇒ sender ! Status.Failure(e) } - } + private def forwardFailure(f: ⇒ Unit): Unit = try f catch { case NonFatal(e) ⇒ sender ! Status.Failure(e) } private def setSocketOptions(socket: java.net.Socket, options: Seq[IO.SocketOption]) { options foreach { @@ -985,7 +1005,7 @@ final class IOManagerActor extends Actor with ActorLogging { forwardFailure(sock.setPerformancePreferences(connTime, latency, bandwidth)) } - channel.socket bind (address, 1000) // TODO: make backlog configurable + channel.socket bind (address, 1000) // FIXME TODO: make backlog configurable channels update (server, channel) channel register (selector, OP_ACCEPT, server) server.owner ! IO.Listening(server, channel.socket.getLocalSocketAddress()) @@ -1048,29 +1068,13 @@ final class IOManagerActor extends Actor with ActorLogging { private def process(key: SelectionKey) { val handle = key.attachment.asInstanceOf[IO.Handle] try { - if (key.isConnectable) key.channel match { - case channel: SocketChannel ⇒ connect(handle.asSocket, channel) - } - if (key.isAcceptable) key.channel match { - case channel: ServerSocketChannel ⇒ accept(handle.asServer, channel) - } - if (key.isReadable) key.channel match { - case channel: ReadChannel ⇒ read(handle.asReadable, channel) - } - if (key.isWritable) key.channel match { - case channel: WriteChannel ⇒ - try { - write(handle.asWritable, channel) - } catch { - case e: IOException ⇒ - // ignore, let it fail on read to ensure nothing left in read buffer. - } - } + if (key.isConnectable) key.channel match { case channel: SocketChannel ⇒ connect(handle.asSocket, channel) } + if (key.isAcceptable) key.channel match { case channel: ServerSocketChannel ⇒ accept(handle.asServer, channel) } + if (key.isReadable) key.channel match { case channel: ReadChannel ⇒ read(handle.asReadable, channel) } + if (key.isWritable) key.channel match { case channel: WriteChannel ⇒ try write(handle.asWritable, channel) catch { case e: IOException ⇒ } } // ignore, let it fail on read to ensure nothing left in read buffer. } catch { - case e: ClassCastException ⇒ cleanup(handle, Some(e)) - case e: CancelledKeyException ⇒ cleanup(handle, Some(e)) - case e: IOException ⇒ cleanup(handle, Some(e)) - case e: ActorInitializationException ⇒ cleanup(handle, Some(e)) + case e @ (_: ClassCastException | _: CancelledKeyException | _: IOException | _: ActorInitializationException) ⇒ + cleanup(handle, Some(e.asInstanceOf[Exception])) //Scala patmat is broken } } @@ -1089,9 +1093,6 @@ final class IOManagerActor extends Actor with ActorLogging { } } - private def setOps(handle: IO.Handle, ops: Int): Unit = - channels(handle) keyFor selector interestOps ops - private def addOps(handle: IO.Handle, ops: Int) { val key = channels(handle) keyFor selector val cur = key.interestOps @@ -1157,9 +1158,9 @@ final class IOManagerActor extends Actor with ActorLogging { } } } - } +//FIXME is this public API? final class WriteBuffer(bufferSize: Int) { private val _queue = new java.util.ArrayDeque[ByteString] private val _buffer = ByteBuffer.allocate(bufferSize) @@ -1181,9 +1182,9 @@ final class WriteBuffer(bufferSize: Int) { this } - def length = _length + def length: Int = _length - def isEmpty = _length == 0 + def isEmpty: Boolean = _length == 0 def write(channel: WritableByteChannel with SelectableChannel): Int = { @tailrec diff --git a/akka-actor/src/main/scala/akka/actor/package.scala b/akka-actor/src/main/scala/akka/actor/package.scala index 617e3fee5c..3bf56b8bc4 100644 --- a/akka-actor/src/main/scala/akka/actor/package.scala +++ b/akka-actor/src/main/scala/akka/actor/package.scala @@ -7,12 +7,4 @@ package akka package object actor { implicit def actorRef2Scala(ref: ActorRef): ScalaActorRef = ref.asInstanceOf[ScalaActorRef] implicit def scala2ActorRef(ref: ScalaActorRef): ActorRef = ref.asInstanceOf[ActorRef] - - def simpleName(obj: AnyRef): String = simpleName(obj.getClass) - - def simpleName(clazz: Class[_]): String = { - val n = clazz.getName - val i = n.lastIndexOf('.') - n.substring(i + 1) - } } diff --git a/akka-actor/src/main/scala/akka/event/EventStream.scala b/akka-actor/src/main/scala/akka/event/EventStream.scala index 27f0c71515..172cf052ca 100644 --- a/akka-actor/src/main/scala/akka/event/EventStream.scala +++ b/akka-actor/src/main/scala/akka/event/EventStream.scala @@ -3,7 +3,8 @@ */ package akka.event -import akka.actor.{ ActorRef, ActorSystem, simpleName } +import akka.actor.{ ActorRef, ActorSystem } +import akka.event.Logging.simpleName import akka.util.Subclassification object EventStream { diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index bf4fc7996d..2cda6469da 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -275,8 +275,8 @@ object LogSource { // this one unfortunately does not work as implicit, because existential types have some weird behavior val fromClass: LogSource[Class[_]] = new LogSource[Class[_]] { - def genString(c: Class[_]) = simpleName(c) - override def genString(c: Class[_], system: ActorSystem) = simpleName(c) + "(" + system + ")" + def genString(c: Class[_]) = Logging.simpleName(c) + override def genString(c: Class[_], system: ActorSystem) = genString(c) + "(" + system + ")" override def getClazz(c: Class[_]) = c } implicit def fromAnyClass[T]: LogSource[Class[T]] = fromClass.asInstanceOf[LogSource[Class[T]]] @@ -310,7 +310,7 @@ object LogSource { case a: Actor ⇒ apply(a) case a: ActorRef ⇒ apply(a) case s: String ⇒ apply(s) - case x ⇒ (simpleName(x), x.getClass) + case x ⇒ (Logging.simpleName(x), x.getClass) } /** @@ -324,7 +324,7 @@ object LogSource { case a: Actor ⇒ apply(a) case a: ActorRef ⇒ apply(a) case s: String ⇒ apply(s) - case x ⇒ (simpleName(x) + "(" + system + ")", x.getClass) + case x ⇒ (Logging.simpleName(x) + "(" + system + ")", x.getClass) } } @@ -363,6 +363,14 @@ object LogSource { */ object Logging { + def simpleName(obj: AnyRef): String = simpleName(obj.getClass) + + def simpleName(clazz: Class[_]): String = { + val n = clazz.getName + val i = n.lastIndexOf('.') + n.substring(i + 1) + } + object Extension extends ExtensionKey[LogExt] class LogExt(system: ExtendedActorSystem) extends Extension { diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 7baf3011ee..84c5764cf5 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -3,27 +3,22 @@ */ package akka.remote.netty -import java.net.InetSocketAddress -import org.jboss.netty.util.HashedWheelTimer +import java.util.concurrent.TimeUnit +import java.net.{ InetAddress, InetSocketAddress } +import org.jboss.netty.util.{ Timeout, TimerTask, HashedWheelTimer } import org.jboss.netty.bootstrap.ClientBootstrap import org.jboss.netty.channel.group.DefaultChannelGroup -import org.jboss.netty.channel.{ ChannelHandler, StaticChannelPipeline, SimpleChannelUpstreamHandler, MessageEvent, ExceptionEvent, ChannelStateEvent, ChannelPipelineFactory, ChannelPipeline, ChannelHandlerContext, ChannelFuture, Channel } +import org.jboss.netty.channel.{ ChannelFutureListener, ChannelHandler, StaticChannelPipeline, MessageEvent, ExceptionEvent, ChannelStateEvent, ChannelPipelineFactory, ChannelPipeline, ChannelHandlerContext, ChannelFuture, Channel } import org.jboss.netty.handler.codec.frame.{ LengthFieldPrepender, LengthFieldBasedFrameDecoder } import org.jboss.netty.handler.execution.ExecutionHandler +import org.jboss.netty.handler.timeout.{ IdleState, IdleStateEvent, IdleStateAwareChannelHandler, IdleStateHandler } + import akka.remote.RemoteProtocol.{ RemoteControlProtocol, CommandType, AkkaRemoteProtocol } -import akka.remote.{ RemoteProtocol, RemoteMessage, RemoteLifeCycleEvent, RemoteClientStarted, RemoteClientShutdown, RemoteClientException, RemoteClientError, RemoteClientDisconnected, RemoteClientConnected } -import akka.actor.{ simpleName, Address } +import akka.remote.{ RemoteProtocol, RemoteMessage, RemoteLifeCycleEvent, RemoteClientStarted, RemoteClientShutdown, RemoteClientException, RemoteClientError, RemoteClientDisconnected, RemoteClientConnected, RemoteClientWriteFailed } +import akka.actor.{ Address, ActorRef } import akka.AkkaException import akka.event.Logging import akka.util.Switch -import akka.actor.ActorRef -import org.jboss.netty.channel.ChannelFutureListener -import akka.remote.RemoteClientWriteFailed -import java.net.InetAddress -import org.jboss.netty.util.TimerTask -import org.jboss.netty.util.Timeout -import java.util.concurrent.TimeUnit -import org.jboss.netty.handler.timeout.{ IdleState, IdleStateEvent, IdleStateAwareChannelHandler, IdleStateHandler } class RemoteClientMessageBufferException(message: String, cause: Throwable) extends AkkaException(message, cause) { def this(msg: String) = this(msg, null) @@ -40,7 +35,7 @@ abstract class RemoteClient private[akka] ( val log = Logging(netty.system, "RemoteClient") - val name = simpleName(this) + "@" + remoteAddress + val name = Logging.simpleName(this) + "@" + remoteAddress private[remote] val runSwitch = new Switch() From cd31b4b1039e07751fc706b39f25203006f7ecd6 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 15:38:22 +0200 Subject: [PATCH 027/106] Touch-up of Props --- .../src/main/scala/akka/actor/Props.scala | 27 +++++++++++++------ 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index 3751898c5c..ffc9574421 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -18,12 +18,24 @@ import akka.routing._ */ object Props { + /** + * The defaultCreator, simply throws an UnsupportedOperationException when applied, which is used when creating a Props + */ final val defaultCreator: () ⇒ Actor = () ⇒ throw new UnsupportedOperationException("No actor creator specified!") + /** + * The defaultRoutedProps is NoRouter which is used when creating a Props + */ final val defaultRoutedProps: RouterConfig = NoRouter + /** + * The default Deploy instance which is used when creating a Props + */ final val defaultDeploy = Deploy() + /** + * A Props instance whose creator will create an actor that doesn't respond to any message + */ final val empty = new Props(() ⇒ new Actor { def receive = Actor.emptyBehavior }) /** @@ -49,8 +61,7 @@ object Props { * Returns a Props that has default values except for "creator" which will be a function that creates an instance * of the supplied class using the default constructor. */ - def apply(actorClass: Class[_ <: Actor]): Props = - default.withCreator(actorClass) + def apply(actorClass: Class[_ <: Actor]): Props = default.withCreator(actorClass) /** * Returns a Props that has default values except for "creator" which will be a function that creates an instance @@ -58,18 +69,18 @@ object Props { * * Scala API. */ - def apply(creator: ⇒ Actor): Props = - default.withCreator(creator) + def apply(creator: ⇒ Actor): Props = default.withCreator(creator) /** * Returns a Props that has default values except for "creator" which will be a function that creates an instance * using the supplied thunk. */ - def apply(creator: Creator[_ <: Actor]): Props = - default.withCreator(creator.create) + def apply(creator: Creator[_ <: Actor]): Props = default.withCreator(creator.create) - def apply(behavior: ActorContext ⇒ Actor.Receive): Props = - apply(new Actor { def receive = behavior(context) }) + /** + * Returns a new Props whose creator will instantiate an Actor that has the behavior specified + */ + def apply(behavior: ActorContext ⇒ Actor.Receive): Props = apply(new Actor { def receive = behavior(context) }) } /** From c0cead3aad13ed178b4e2f0f6fd9ca031068db4f Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 16:16:31 +0200 Subject: [PATCH 028/106] Cleaning up Scheduler, rewriting ContinuousCancellable --- .../src/main/scala/akka/actor/Scheduler.scala | 56 +++++++------------ 1 file changed, 21 insertions(+), 35 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 827e511308..6155cab10c 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -9,6 +9,8 @@ import org.jboss.netty.akka.util.{ TimerTask, HashedWheelTimer, Timeout ⇒ HWTi import akka.event.LoggingAdapter import akka.dispatch.MessageDispatcher import java.io.Closeable +import java.util.concurrent.atomic.AtomicReference +import scala.annotation.tailrec //#scheduler /** @@ -188,11 +190,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, private trait ContinuousScheduling { this: TimerTask ⇒ def scheduleNext(timeout: HWTimeout, delay: Duration, delegator: ContinuousCancellable) { - try { - delegator.swap(timeout.getTimer.newTimeout(this, delay)) - } catch { - case _: IllegalStateException ⇒ // stop recurring if timer is stopped - } + try delegator.swap(timeout.getTimer.newTimeout(this, delay)) catch { case _: IllegalStateException ⇒ } // stop recurring if timer is stopped } } @@ -203,7 +201,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, } } - def close() = { + def close(): Unit = { import scala.collection.JavaConverters._ hashedWheelTimer.stop().asScala foreach execDirectly } @@ -214,43 +212,31 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, * methods. Needed to be able to cancel continuous tasks, * since they create new Timeout for each tick. */ -private[akka] class ContinuousCancellable extends Cancellable { - @volatile - private var delegate: HWTimeout = _ - @volatile - private var cancelled = false - +private[akka] class ContinuousCancellable extends AtomicReference[HWTimeout] with Cancellable { private[akka] def init(initialTimeout: HWTimeout): this.type = { - delegate = initialTimeout + assert(compareAndSet(null, initialTimeout)) this } - private[akka] def swap(newTimeout: HWTimeout): Unit = { - val wasCancelled = isCancelled - delegate = newTimeout - if (wasCancelled || isCancelled) cancel() + @tailrec private[akka] final def swap(newTimeout: HWTimeout): Unit = get match { + case null ⇒ newTimeout.cancel() + case some if some.isCancelled ⇒ cancel(); newTimeout.cancel() + case some ⇒ if (!compareAndSet(some, newTimeout)) swap(newTimeout) } - def isCancelled(): Boolean = { - // delegate is initially null, but this object will not be exposed to the world until after init - cancelled || delegate.isCancelled() + def isCancelled(): Boolean = get match { + case null ⇒ true + case some ⇒ isCancelled() } - def cancel(): Unit = { - // the underlying Timeout will not become cancelled once the task has been started to run, - // therefore we keep a flag here to make sure that rescheduling doesn't occur when cancelled - cancelled = true - // delegate is initially null, but this object will not be exposed to the world until after init - delegate.cancel() - } + def cancel(): Unit = + getAndSet(null) match { + case null ⇒ + case some ⇒ some.cancel() + } } -class DefaultCancellable(val timeout: HWTimeout) extends Cancellable { - def cancel() { - timeout.cancel() - } - - def isCancelled: Boolean = { - timeout.isCancelled - } +private[akka] class DefaultCancellable(val timeout: HWTimeout) extends Cancellable { + override def cancel(): Unit = timeout.cancel() + override def isCancelled: Boolean = timeout.isCancelled } From 07c9bfe6d7b058cd5883e3b62c00a0fd3cc4babf Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 16:22:59 +0200 Subject: [PATCH 029/106] Making sure that the ContinuousCancellable can never be re-initialized --- .../src/main/scala/akka/actor/Scheduler.scala | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 6155cab10c..3bb524ad92 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -5,7 +5,7 @@ package akka.actor import akka.util.Duration -import org.jboss.netty.akka.util.{ TimerTask, HashedWheelTimer, Timeout ⇒ HWTimeout } +import org.jboss.netty.akka.util.{ TimerTask, HashedWheelTimer, Timeout ⇒ HWTimeout, Timer } import akka.event.LoggingAdapter import akka.dispatch.MessageDispatcher import java.io.Closeable @@ -207,14 +207,23 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, } } +private[akka] object ContinuousCancellable { + val initial: HWTimeout = new HWTimeout { + override def getTimer: Timer = null + override def getTask: TimerTask = null + override def isExpired: Boolean = false + override def isCancelled: Boolean = false + override def cancel: Unit = () + } +} /** * Wrapper of a [[org.jboss.netty.akka.util.Timeout]] that delegates all * methods. Needed to be able to cancel continuous tasks, * since they create new Timeout for each tick. */ -private[akka] class ContinuousCancellable extends AtomicReference[HWTimeout] with Cancellable { +private[akka] class ContinuousCancellable extends AtomicReference[HWTimeout](ContinuousCancellable.initial) with Cancellable { private[akka] def init(initialTimeout: HWTimeout): this.type = { - assert(compareAndSet(null, initialTimeout)) + compareAndSet(ContinuousCancellable.initial, initialTimeout) this } From 7da74effe6d0118960b0441b293d2f0f389b5095 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 16:28:43 +0200 Subject: [PATCH 030/106] Adding overrides on the DefaultScheduler --- .../src/main/scala/akka/actor/Scheduler.scala | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 3bb524ad92..91e54a592d 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -121,7 +121,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter, dispatcher: ⇒ MessageDispatcher) extends Scheduler with Closeable { - def schedule(initialDelay: Duration, delay: Duration, receiver: ActorRef, message: Any): Cancellable = { + override def schedule(initialDelay: Duration, delay: Duration, receiver: ActorRef, message: Any): Cancellable = { val continuousCancellable = new ContinuousCancellable continuousCancellable.init( hashedWheelTimer.newTimeout( @@ -136,7 +136,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, initialDelay)) } - def schedule(initialDelay: Duration, delay: Duration)(f: ⇒ Unit): Cancellable = { + override def schedule(initialDelay: Duration, delay: Duration)(f: ⇒ Unit): Cancellable = { val continuousCancellable = new ContinuousCancellable continuousCancellable.init( hashedWheelTimer.newTimeout( @@ -150,7 +150,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, initialDelay)) } - def schedule(initialDelay: Duration, delay: Duration, runnable: Runnable): Cancellable = { + override def schedule(initialDelay: Duration, delay: Duration, runnable: Runnable): Cancellable = { val continuousCancellable = new ContinuousCancellable continuousCancellable.init( hashedWheelTimer.newTimeout( @@ -163,7 +163,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, initialDelay)) } - def scheduleOnce(delay: Duration, runnable: Runnable): Cancellable = + override def scheduleOnce(delay: Duration, runnable: Runnable): Cancellable = new DefaultCancellable( hashedWheelTimer.newTimeout( new TimerTask() { @@ -171,7 +171,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, }, delay)) - def scheduleOnce(delay: Duration, receiver: ActorRef, message: Any): Cancellable = + override def scheduleOnce(delay: Duration, receiver: ActorRef, message: Any): Cancellable = new DefaultCancellable( hashedWheelTimer.newTimeout( new TimerTask { @@ -179,7 +179,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, }, delay)) - def scheduleOnce(delay: Duration)(f: ⇒ Unit): Cancellable = + override def scheduleOnce(delay: Duration)(f: ⇒ Unit): Cancellable = new DefaultCancellable( hashedWheelTimer.newTimeout( new TimerTask with Runnable { @@ -201,7 +201,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, } } - def close(): Unit = { + override def close(): Unit = { import scala.collection.JavaConverters._ hashedWheelTimer.stop().asScala foreach execDirectly } From a4f990029827aa8798c94655ebf831a3442e524b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 16:30:28 +0200 Subject: [PATCH 031/106] Adding some docs to Stash --- akka-actor/src/main/scala/akka/actor/Stash.scala | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/Stash.scala b/akka-actor/src/main/scala/akka/actor/Stash.scala index 6672945522..386bc0f070 100644 --- a/akka-actor/src/main/scala/akka/actor/Stash.scala +++ b/akka-actor/src/main/scala/akka/actor/Stash.scala @@ -56,7 +56,7 @@ trait Stash { /* The capacity of the stash. Configured in the actor's dispatcher config. */ - private val capacity = { + private val capacity: Int = { val dispatcher = context.system.settings.config.getConfig(context.props.dispatcher) val config = dispatcher.withFallback(context.system.settings.config.getConfig("akka.actor.default-dispatcher")) config.getInt("stash-capacity") @@ -125,4 +125,7 @@ An (unbounded) deque-based mailbox can be configured as follows: } +/** + * Is thrown when the size of the Stash exceeds the capacity of the Stash + */ class StashOverflowException(message: String, cause: Throwable = null) extends AkkaException(message, cause) From 0527f81c20c868474926a7791596a96ad9ede0e0 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 16:45:36 +0200 Subject: [PATCH 032/106] Adding docs and making things private to akka for TypedActors --- .../main/scala/akka/actor/TypedActor.scala | 49 +++++++++++++++---- 1 file changed, 39 insertions(+), 10 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala index 4d85542d36..9bb560417b 100644 --- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala @@ -6,21 +6,28 @@ package akka.actor import akka.japi.{ Creator, Option ⇒ JOption } import java.lang.reflect.{ InvocationTargetException, Method, InvocationHandler, Proxy } -import akka.util.{ Timeout, NonFatal } +import akka.util.{ Timeout, NonFatal, Duration } import java.util.concurrent.atomic.{ AtomicReference ⇒ AtomVar } import akka.dispatch._ import java.util.concurrent.TimeoutException import java.util.concurrent.TimeUnit.MILLISECONDS -import java.lang.IllegalStateException -import akka.util.Duration import akka.actor.TypedActor.TypedActorInvocationHandler -import akka.serialization.{ JavaSerializer, Serialization, SerializationExtension } +import akka.serialization.{ JavaSerializer, SerializationExtension } import java.io.ObjectStreamException +/** + * A TypedActorFactory is something that can created TypedActor instances. + */ trait TypedActorFactory { + /** + * Underlying dependency is to be able to create normal Actors + */ protected def actorFactory: ActorRefFactory + /** + * Underlying dependency to a TypedActorExtension, which can either be contextual or ActorSystem "global" + */ protected def typedActor: TypedActorExtension /** @@ -80,6 +87,9 @@ trait TypedActorFactory { } +/** + * This represents the TypedActor Akka Extension, access to the functionality is done through a given ActorSystem. + */ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvider { override def get(system: ActorSystem): TypedActorExtension = super.get(system) @@ -145,8 +155,10 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi /** * Represents the serialized form of a MethodCall, uses readResolve and writeReplace to marshall the call + * + * INTERNAL USE ONLY */ - case class SerializedMethodCall(ownerType: Class[_], methodName: String, parameterTypes: Array[Class[_]], serializedParameters: Array[(Int, Class[_], Array[Byte])]) { + private[akka] case class SerializedMethodCall(ownerType: Class[_], methodName: String, parameterTypes: Array[Class[_]], serializedParameters: Array[(Int, Class[_], Array[Byte])]) { //TODO implement writeObject and readObject to serialize //TODO Possible optimization is to special encode the parameter-types to conserve space @@ -213,6 +225,8 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi /** * Implementation of TypedActor as an Actor + * + * INTERNAL USE ONLY */ private[akka] class TypedActor[R <: AnyRef, T <: R](val proxyVar: AtomVar[R], createInstance: ⇒ T) extends Actor { val me = try { @@ -371,6 +385,9 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi def postRestart(reason: Throwable): Unit } + /** + * INTERNAL USE ONLY + */ private[akka] class TypedActorInvocationHandler(@transient val extension: TypedActorExtension, @transient val actorVar: AtomVar[ActorRef], @transient val timeout: Timeout) extends InvocationHandler with Serializable { def actor = actorVar.get @throws(classOf[Throwable]) @@ -396,6 +413,9 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi @throws(classOf[ObjectStreamException]) private def writeReplace(): AnyRef = SerializedTypedActorInvocationHandler(actor, timeout.duration) } + /** + * INTERNAL USE ONLY + */ private[akka] case class SerializedTypedActorInvocationHandler(val actor: ActorRef, val timeout: Duration) { @throws(classOf[ObjectStreamException]) private def readResolve(): AnyRef = JavaSerializer.currentSystem.value match { case null ⇒ throw new IllegalStateException("SerializedTypedActorInvocationHandler.readResolve requires that JavaSerializer.currentSystem.value is set to a non-null value") @@ -569,12 +589,16 @@ case class TypedProps[T <: AnyRef] protected[TypedProps] ( def withoutInterface(interface: Class[_ >: T]): TypedProps[T] = this.copy(interfaces = interfaces diff TypedProps.extractInterfaces(interface)) - import akka.actor.{ Props ⇒ ActorProps } - def actorProps(): ActorProps = - if (dispatcher == ActorProps().dispatcher) ActorProps() - else ActorProps(dispatcher = dispatcher) + /** + * Returns the akka.actor.Props representation of this TypedProps + */ + def actorProps(): Props = if (dispatcher == Props().dispatcher) Props() else Props(dispatcher = dispatcher) } +/** + * ContextualTypedActorFactory allows TypedActors to create children, effectively forming the same Actor Supervision Hierarchies + * as normal Actors can. + */ case class ContextualTypedActorFactory(typedActor: TypedActorExtension, actorFactory: ActorContext) extends TypedActorFactory { override def getActorRefFor(proxy: AnyRef): ActorRef = typedActor.getActorRefFor(proxy) override def isTypedActor(proxyOrNot: AnyRef): Boolean = typedActor.isTypedActor(proxyOrNot) @@ -607,7 +631,9 @@ class TypedActorExtension(system: ExtendedActorSystem) extends TypedActorFactory def isTypedActor(proxyOrNot: AnyRef): Boolean = invocationHandlerFor(proxyOrNot) ne null // Private API - + /** + * INTERNAL USE ONLY + */ private[akka] def createActorRefProxy[R <: AnyRef, T <: R](props: TypedProps[T], proxyVar: AtomVar[R], actorRef: ⇒ ActorRef): R = { //Warning, do not change order of the following statements, it's some elaborate chicken-n-egg handling val actorVar = new AtomVar[ActorRef](null) @@ -631,6 +657,9 @@ class TypedActorExtension(system: ExtendedActorSystem) extends TypedActorFactory } } + /** + * INTERNAL USE ONLY + */ private[akka] def invocationHandlerFor(typedActor_? : AnyRef): TypedActorInvocationHandler = if ((typedActor_? ne null) && Proxy.isProxyClass(typedActor_?.getClass)) typedActor_? match { case null ⇒ null From 95db4bfd3744f0d44b91ca90b4adfbf31bffff3f Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 17:04:13 +0200 Subject: [PATCH 033/106] Moving out ConfigurationException from akka.config to akka --- .../routing/ConfiguredLocalRoutingSpec.scala | 2 +- .../test/scala/akka/routing/RoutingSpec.scala | 2 +- .../src/main/scala/akka/AkkaException.scala | 7 ++ .../main/scala/akka/actor/ActorSystem.scala | 3 +- .../akka/config/ConfigurationException.scala | 15 --- .../akka/dispatch/AbstractDispatcher.scala | 116 ++++++++++++++---- .../src/main/scala/akka/event/Logging.scala | 6 +- .../src/main/scala/akka/routing/Routing.scala | 2 +- .../akka/serialization/Serialization.scala | 2 +- .../src/main/scala/akka/cluster/Cluster.scala | 2 +- .../scala/akka/cluster/ClusterSettings.scala | 2 +- .../akka/actor/mailbox/FileBasedMailbox.scala | 2 +- .../akka/remote/RemoteActorRefProvider.scala | 2 +- .../scala/akka/remote/RemoteDeployer.scala | 2 +- .../scala/akka/remote/RemoteSettings.scala | 2 +- .../scala/akka/remote/netty/Settings.scala | 2 +- .../akka/routing/RemoteRouterConfig.scala | 2 +- 17 files changed, 111 insertions(+), 60 deletions(-) delete mode 100644 akka-actor/src/main/scala/akka/config/ConfigurationException.scala diff --git a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala index d01f1cda04..5bedc8fc33 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala @@ -8,7 +8,7 @@ import java.util.concurrent.atomic.AtomicInteger import org.junit.runner.RunWith import akka.actor.{ Props, LocalActorRef, Deploy, Actor, ActorRef } -import akka.config.ConfigurationException +import akka.ConfigurationException import akka.dispatch.Await import akka.pattern.{ ask, gracefulStop } import akka.testkit.{ TestLatch, ImplicitSender, DefaultTimeout, AkkaSpec } diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 2ae32cfcf5..5ad6da271f 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -10,7 +10,7 @@ import akka.testkit._ import akka.util.duration._ import akka.dispatch.Await import akka.util.Duration -import akka.config.ConfigurationException +import akka.ConfigurationException import com.typesafe.config.ConfigFactory import akka.pattern.ask import java.util.concurrent.ConcurrentHashMap diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index 002233ffe5..79d78b9d39 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -44,3 +44,10 @@ class AkkaException(message: String = "", cause: Throwable = null) extends Runti def stackTraceToString = AkkaException.stackTraceToString(this) } + +/** + * This exception is thrown when Akka detects a problem with the provided configuration + */ +class ConfigurationException(message: String, cause: Throwable = null) extends AkkaException(message, cause) { + def this(msg: String) = this(msg, null) +} diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 32e221a7a1..a1d30ddbc6 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -4,7 +4,6 @@ package akka.actor -import akka.config.ConfigurationException import akka.event._ import akka.dispatch._ import akka.pattern.ask @@ -150,7 +149,7 @@ object ActorSystem { final val JvmExitOnFatalError = getBoolean("akka.jvm-exit-on-fatal-error") if (ConfigVersion != Version) - throw new ConfigurationException("Akka JAR version [" + Version + "] does not match the provided config version [" + ConfigVersion + "]") + throw new akka.ConfigurationException("Akka JAR version [" + Version + "] does not match the provided config version [" + ConfigVersion + "]") /** * Returns the String representation of the Config that this Settings is backed by diff --git a/akka-actor/src/main/scala/akka/config/ConfigurationException.scala b/akka-actor/src/main/scala/akka/config/ConfigurationException.scala deleted file mode 100644 index ba0a3a2234..0000000000 --- a/akka-actor/src/main/scala/akka/config/ConfigurationException.scala +++ /dev/null @@ -1,15 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.config - -import akka.AkkaException - -class ConfigurationException(message: String, cause: Throwable = null) extends AkkaException(message, cause) { - def this(msg: String) = this(msg, null); -} - -class ModuleNotAvailableException(message: String, cause: Throwable = null) extends AkkaException(message, cause) { - def this(msg: String) = this(msg, null); -} diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index db5c71167b..9aec23b4c6 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -33,7 +33,10 @@ final case class Envelope(val message: Any, val sender: ActorRef)(system: ActorS } } -object SystemMessage { +/** + * INTERNAL API + */ +private[akka] object SystemMessage { @tailrec final def size(list: SystemMessage, acc: Int = 0): Int = { if (list eq null) acc else size(list.next, acc + 1) @@ -59,33 +62,57 @@ object SystemMessage { * system messages is handled in a single thread only and not ever passed around, * hence no further synchronization is needed. * + * INTERNAL API + * * ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ */ -sealed trait SystemMessage extends PossiblyHarmful { +private[akka] sealed trait SystemMessage extends PossiblyHarmful { @transient var next: SystemMessage = _ } -case class Create() extends SystemMessage // send to self from Dispatcher.register -case class Recreate(cause: Throwable) extends SystemMessage // sent to self from ActorCell.restart -case class Suspend() extends SystemMessage // sent to self from ActorCell.suspend -case class Resume() extends SystemMessage // sent to self from ActorCell.resume -case class Terminate() extends SystemMessage // sent to self from ActorCell.stop -case class Supervise(child: ActorRef) extends SystemMessage // sent to supervisor ActorRef from ActorCell.start -case class ChildTerminated(child: ActorRef) extends SystemMessage // sent to supervisor from ActorCell.doTerminate -case class Link(subject: ActorRef) extends SystemMessage // sent to self from ActorCell.watch -case class Unlink(subject: ActorRef) extends SystemMessage // sent to self from ActorCell.unwatch + +/** + * INTERNAL API + */ +private[akka] case class Create() extends SystemMessage // send to self from Dispatcher.register +/** + * INTERNAL API + */ +private[akka] case class Recreate(cause: Throwable) extends SystemMessage // sent to self from ActorCell.restart +/** + * INTERNAL API + */ +private[akka] case class Suspend() extends SystemMessage // sent to self from ActorCell.suspend +/** + * INTERNAL API + */ +private[akka] case class Resume() extends SystemMessage // sent to self from ActorCell.resume +/** + * INTERNAL API + */ +private[akka] case class Terminate() extends SystemMessage // sent to self from ActorCell.stop +/** + * INTERNAL API + */ +private[akka] case class Supervise(child: ActorRef) extends SystemMessage // sent to supervisor ActorRef from ActorCell.start +/** + * INTERNAL API + */ +private[akka] case class ChildTerminated(child: ActorRef) extends SystemMessage // sent to supervisor from ActorCell.doTerminate +/** + * INTERNAL API + */ +private[akka] case class Link(subject: ActorRef) extends SystemMessage // sent to self from ActorCell.watch +/** + * INTERNAL API + */ +private[akka] case class Unlink(subject: ActorRef) extends SystemMessage // sent to self from ActorCell.unwatch final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cleanup: () ⇒ Unit) extends Runnable { - def run() { - try { - runnable.run() - } catch { - case NonFatal(e) ⇒ - eventStream.publish(Error(e, "TaskInvocation", this.getClass, e.getMessage)) - } finally { - cleanup() - } - } + def run(): Unit = + try runnable.run() catch { + case NonFatal(e) ⇒ eventStream.publish(Error(e, "TaskInvocation", this.getClass, e.getMessage)) + } finally cleanup() } /** @@ -170,10 +197,16 @@ trait ExecutionContext { def reportFailure(t: Throwable): Unit } +/** + * INTERNAL API + */ private[akka] trait LoadMetrics { self: Executor ⇒ def atFullThrottle(): Boolean } +/** + * INTERNAL API + */ private[akka] object MessageDispatcher { val UNSCHEDULED = 0 //WARNING DO NOT CHANGE THE VALUE OF THIS: It relies on the faster init of 0 in AbstractMessageDispatcher val SCHEDULED = 1 @@ -228,7 +261,7 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext /** * Creates and returns a mailbox for the given actor. */ - protected[akka] def createMailbox(actor: ActorCell): Mailbox + protected[akka] def createMailbox(actor: ActorCell): Mailbox //FIXME should this really be private[akka]? /** * Identifier of this dispatcher, corresponds to the full key @@ -255,7 +288,7 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext ifSensibleToDoSoThenScheduleShutdown() } - final def execute(runnable: Runnable) { + final def execute(runnable: Runnable): Unit = { val invocation = TaskInvocation(eventStream, runnable, taskCleanup) addInhabitants(+1) try { @@ -300,6 +333,8 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext /** * If you override it, you must call it. But only ever once. See "attach" for only invocation. + * + * INTERNAL API */ protected[akka] def register(actor: ActorCell) { if (debug) actors.put(this, actor.self) @@ -308,6 +343,8 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext /** * If you override it, you must call it. But only ever once. See "detach" for the only invocation + * + * INTERNAL API */ protected[akka] def unregister(actor: ActorCell) { if (debug) actors.remove(this, actor.self) @@ -340,6 +377,8 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext * When the dispatcher no longer has any actors registered, how long will it wait until it shuts itself down, * defaulting to your akka configs "akka.actor.default-dispatcher.shutdown-timeout" or default specified in * reference.conf + * + * INTERNAL API */ protected[akka] def shutdownTimeout: Duration @@ -362,36 +401,59 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext } /** - * Will be called when the dispatcher is to queue an invocation for execution + * Will be called when the dispatcher is to queue an invocation for execution + * + * INTERNAL API */ protected[akka] def systemDispatch(receiver: ActorCell, invocation: SystemMessage) /** - * Will be called when the dispatcher is to queue an invocation for execution + * Will be called when the dispatcher is to queue an invocation for execution + * + * INTERNAL API */ protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope) /** * Suggest to register the provided mailbox for execution + * + * INTERNAL API */ protected[akka] def registerForExecution(mbox: Mailbox, hasMessageHint: Boolean, hasSystemMessageHint: Boolean): Boolean // TODO check whether this should not actually be a property of the mailbox + /** + * INTERNAL API + */ protected[akka] def throughput: Int + + /** + * INTERNAL API + */ protected[akka] def throughputDeadlineTime: Duration - @inline - protected[akka] final val isThroughputDeadlineTimeDefined = throughputDeadlineTime.toMillis > 0 + /** + * INTERNAL API + */ + @inline protected[akka] final val isThroughputDeadlineTimeDefined = throughputDeadlineTime.toMillis > 0 + /** + * INTERNAL API + */ protected[akka] def executeTask(invocation: TaskInvocation) /** * Called one time every time an actor is detached from this dispatcher and this dispatcher has no actors left attached * Must be idempotent + * + * INTERNAL API */ protected[akka] def shutdown(): Unit } +/** + * An ExecutorServiceConfigurator is a class that given some prerequisites and a configuration can create instances of ExecutorService + */ abstract class ExecutorServiceConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceFactoryProvider /** diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index 2cda6469da..ac31b133b3 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -4,12 +4,10 @@ package akka.event import akka.actor._ -import akka.AkkaException +import akka.{ ConfigurationException, AkkaException } import akka.actor.ActorSystem.Settings -import akka.config.ConfigurationException -import akka.util.ReentrantGuard +import akka.util.{ Timeout, ReentrantGuard } import akka.util.duration._ -import akka.util.Timeout import java.util.concurrent.atomic.AtomicInteger import scala.util.control.NoStackTrace import java.util.concurrent.TimeoutException diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index fdf14a5b96..211ef202f7 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -6,7 +6,7 @@ package akka.routing import akka.actor._ import akka.util.Duration import akka.util.duration._ -import akka.config.ConfigurationException +import akka.ConfigurationException import akka.pattern.pipe import akka.pattern.AskSupport import com.typesafe.config.Config diff --git a/akka-actor/src/main/scala/akka/serialization/Serialization.scala b/akka-actor/src/main/scala/akka/serialization/Serialization.scala index ce0f56a238..03d03dc785 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serialization.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serialization.scala @@ -85,7 +85,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { /** * Returns the Serializer configured for the given object, returns the NullSerializer if it's null. * - * @throws akka.config.ConfigurationException if no `serialization-bindings` is configured for the + * @throws akka.ConfigurationException if no `serialization-bindings` is configured for the * class of the object */ def findSerializerFor(o: AnyRef): Serializer = o match { diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index e3429cfdb3..76e3356143 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -13,7 +13,7 @@ import akka.dispatch.Await import akka.pattern.ask import akka.util._ import akka.util.duration._ -import akka.config.ConfigurationException +import akka.ConfigurationException import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean } import java.util.concurrent.TimeUnit._ diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index a24c75b436..8e9b9c770d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -6,7 +6,7 @@ package akka.cluster import com.typesafe.config.Config import akka.util.Duration import java.util.concurrent.TimeUnit.MILLISECONDS -import akka.config.ConfigurationException +import akka.ConfigurationException import scala.collection.JavaConverters._ import akka.actor.Address import akka.actor.AddressFromURIString diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala index ef8a28b2cf..e57bfd57d2 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala @@ -11,7 +11,7 @@ import akka.actor.ActorRef import akka.dispatch.MailboxType import com.typesafe.config.Config import akka.util.NonFatal -import akka.config.ConfigurationException +import akka.ConfigurationException import akka.actor.ActorSystem class FileBasedMailboxType(systemSettings: ActorSystem.Settings, config: Config) extends MailboxType { diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index 8f1ec6e1b7..bf55edf24c 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -9,7 +9,7 @@ import akka.actor._ import akka.dispatch._ import akka.event.{ DeathWatch, Logging, LoggingAdapter } import akka.event.EventStream -import akka.config.ConfigurationException +import akka.ConfigurationException import java.util.concurrent.{ TimeoutException } import com.typesafe.config.Config import akka.serialization.Serialization diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala index 0858c66405..e869c4ef4c 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala @@ -6,7 +6,7 @@ package akka.remote import akka.actor._ import akka.routing._ import com.typesafe.config._ -import akka.config.ConfigurationException +import akka.ConfigurationException case class RemoteScope(node: Address) extends Scope { def withFallback(other: Scope): Scope = this diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index ef30206a42..0b26311286 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -7,7 +7,7 @@ import com.typesafe.config.Config import akka.util.Duration import java.util.concurrent.TimeUnit.MILLISECONDS import java.net.InetAddress -import akka.config.ConfigurationException +import akka.ConfigurationException import scala.collection.JavaConverters._ import akka.actor.Address import akka.actor.AddressFromURIString diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index e2f69d77b5..95737e7584 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -7,7 +7,7 @@ import com.typesafe.config.Config import akka.util.Duration import java.util.concurrent.TimeUnit._ import java.net.InetAddress -import akka.config.ConfigurationException +import akka.ConfigurationException class NettySettings(config: Config, val systemName: String) { diff --git a/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala index c9bb6dba0f..714b854a69 100644 --- a/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala +++ b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala @@ -10,7 +10,7 @@ import akka.actor.ActorSystemImpl import akka.actor.Deploy import akka.actor.InternalActorRef import akka.actor.Props -import akka.config.ConfigurationException +import akka.ConfigurationException import akka.remote.RemoteScope import akka.actor.AddressFromURIString import akka.actor.SupervisorStrategy From 08ff967b08886af2fc815016546728e0a1219420 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 17:08:52 +0200 Subject: [PATCH 034/106] Moving in SharingMailbox into BalancingDispatcher and making team & messageQueue private[akka] --- .../akka/dispatch/BalancingDispatcher.scala | 46 ++++++++++--------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index e95f54b88b..ee492409ec 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -35,12 +35,35 @@ class BalancingDispatcher( attemptTeamWork: Boolean) extends Dispatcher(_prerequisites, _id, throughput, throughputDeadlineTime, mailboxType, _executorServiceFactoryProvider, _shutdownTimeout) { - val team = new ConcurrentSkipListSet[ActorCell]( + /** + * INTERNAL USE ONLY + */ + private[akka] val team = new ConcurrentSkipListSet[ActorCell]( Helpers.identityHashComparator(new Comparator[ActorCell] { def compare(l: ActorCell, r: ActorCell) = l.self.path compareTo r.self.path })) - val messageQueue: MessageQueue = mailboxType.create(None) + /** + * INTERNAL USE ONLY + */ + private[akka] val messageQueue: MessageQueue = mailboxType.create(None) + + private class SharingMailbox(_actor: ActorCell, _messageQueue: MessageQueue) extends Mailbox(_actor, _messageQueue) with DefaultSystemMessageQueue { + override def cleanUp(): Unit = { + //Don't call the original implementation of this since it scraps all messages, and we don't want to do that + if (hasSystemMessages) { + val dlq = actor.systemImpl.deadLetterMailbox + var message = systemDrain() + while (message ne null) { + // message must be “virgin” before being able to systemEnqueue again + val next = message.next + message.next = null + dlq.systemEnqueue(actor.self, message) + message = next + } + } + } + } protected[akka] override def createMailbox(actor: ActorCell): Mailbox = new SharingMailbox(actor, messageQueue) @@ -74,22 +97,3 @@ class BalancingDispatcher( scheduleOne() } } - -class SharingMailbox(_actor: ActorCell, _messageQueue: MessageQueue) - extends Mailbox(_actor, _messageQueue) with DefaultSystemMessageQueue { - - override def cleanUp(): Unit = { - //Don't call the original implementation of this since it scraps all messages, and we don't want to do that - if (hasSystemMessages) { - val dlq = actor.systemImpl.deadLetterMailbox - var message = systemDrain() - while (message ne null) { - // message must be “virgin” before being able to systemEnqueue again - val next = message.next - message.next = null - dlq.systemEnqueue(actor.self, message) - message = next - } - } - } -} From 630c4a0153fc8f96ff1f5d00f371e92f1b26a766 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 17:14:49 +0200 Subject: [PATCH 035/106] Adding docs and fixing a potential bug in use of getSimpleName in Dispatcher --- .../main/scala/akka/dispatch/Dispatcher.scala | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index fbffd08d7e..3a73bf0718 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -9,6 +9,7 @@ import java.util.concurrent.atomic.AtomicReference import akka.actor.ActorCell import akka.util.Duration import java.util.concurrent._ +import akka.event.Logging /** * The event-based ``Dispatcher`` binds a set of Actors to a thread pool backed up by a @@ -38,18 +39,27 @@ class Dispatcher( protected val executorService = new AtomicReference[ExecutorServiceDelegate]( new ExecutorServiceDelegate { lazy val executor = executorServiceFactory.createExecutorService }) + /** + * INTERNAL USE ONLY + */ protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope) = { val mbox = receiver.mailbox mbox.enqueue(receiver.self, invocation) registerForExecution(mbox, true, false) } + /** + * INTERNAL USE ONLY + */ protected[akka] def systemDispatch(receiver: ActorCell, invocation: SystemMessage) = { val mbox = receiver.mailbox mbox.systemEnqueue(receiver.self, invocation) registerForExecution(mbox, false, true) } + /** + * INTERNAL USE ONLY + */ protected[akka] def executeTask(invocation: TaskInvocation) { try { executorService.get() execute invocation @@ -65,8 +75,14 @@ class Dispatcher( } } + /** + * INTERNAL USE ONLY + */ protected[akka] def createMailbox(actor: ActorCell): Mailbox = new Mailbox(actor, mailboxType.create(Some(actor))) with DefaultSystemMessageQueue + /** + * INTERNAL USE ONLY + */ protected[akka] def shutdown: Unit = Option(executorService.getAndSet(new ExecutorServiceDelegate { lazy val executor = executorServiceFactory.createExecutorService @@ -74,6 +90,8 @@ class Dispatcher( /** * Returns if it was registered + * + * INTERNAL USE ONLY */ protected[akka] override def registerForExecution(mbox: Mailbox, hasMessageHint: Boolean, hasSystemMessageHint: Boolean): Boolean = { if (mbox.canBeScheduledForExecution(hasMessageHint, hasSystemMessageHint)) { //This needs to be here to ensure thread safety and no races @@ -97,7 +115,7 @@ class Dispatcher( } else false } - override val toString = getClass.getSimpleName + "[" + id + "]" + override val toString = Logging.simpleName(this) + "[" + id + "]" } object PriorityGenerator { From 72f12c89cd1a676c7fd2ca4e14c14ca63aedc90c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 17:21:58 +0200 Subject: [PATCH 036/106] Adding docs and privatizing some fields in Dispatchers.scala --- .../scala/akka/dispatch/Dispatchers.scala | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 93d44e007d..54173b8460 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -5,17 +5,15 @@ package akka.dispatch import java.util.concurrent.{ ConcurrentHashMap, TimeUnit, ThreadFactory } - -import scala.collection.JavaConverters.mapAsJavaMapConverter - import com.typesafe.config.{ ConfigFactory, Config } - -import Dispatchers.DefaultDispatcherId import akka.actor.{ Scheduler, DynamicAccess, ActorSystem } import akka.event.Logging.Warning import akka.event.EventStream import akka.util.Duration +/** + * DispatcherPrerequisites represents useful contextual pieces when constructing a MessageDispatcher + */ trait DispatcherPrerequisites { def threadFactory: ThreadFactory def eventStream: EventStream @@ -25,7 +23,10 @@ trait DispatcherPrerequisites { def settings: ActorSystem.Settings } -case class DefaultDispatcherPrerequisites( +/** + * INTERNAL USE ONLY + */ +private[akka] case class DefaultDispatcherPrerequisites( val threadFactory: ThreadFactory, val eventStream: EventStream, val deadLetterMailbox: Mailbox, @@ -110,7 +111,7 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc ConfigFactory.parseMap(Map("id" -> id).asJava) } - /* + /** * Creates a dispatcher from a Config. Internal test purpose only. * * ex: from(config.getConfig(id)) @@ -119,18 +120,22 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc * * Throws: IllegalArgumentException if the value of "type" is not valid * IllegalArgumentException if it cannot create the MessageDispatcherConfigurator + * + * INTERNAL USE ONLY */ private[akka] def from(cfg: Config): MessageDispatcher = { configuratorFrom(cfg).dispatcher() } - /* + /** * Creates a MessageDispatcherConfigurator from a Config. * * The Config must also contain a `id` property, which is the identifier of the dispatcher. * * Throws: IllegalArgumentException if the value of "type" is not valid * IllegalArgumentException if it cannot create the MessageDispatcherConfigurator + * + * INTERNAL USE ONLY */ private def configuratorFrom(cfg: Config): MessageDispatcherConfigurator = { if (!cfg.hasPath("id")) throw new IllegalArgumentException("Missing dispatcher 'id' property in config: " + cfg.root.render) @@ -208,7 +213,7 @@ class BalancingDispatcherConfigurator(config: Config, prerequisites: DispatcherP class PinnedDispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends MessageDispatcherConfigurator(config, prerequisites) { - val threadPoolConfig: ThreadPoolConfig = configureExecutor() match { + private val threadPoolConfig: ThreadPoolConfig = configureExecutor() match { case e: ThreadPoolExecutorConfigurator ⇒ e.threadPoolConfig case other ⇒ prerequisites.eventStream.publish( From 5ca3fe11f87f20b9cd54d3c24df21241c823ec7f Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 17:37:23 +0200 Subject: [PATCH 037/106] Adding tons of ScalaDocs for Mailbox.scala --- .../main/scala/akka/dispatch/Mailbox.scala | 49 +++++++++++++++++-- 1 file changed, 44 insertions(+), 5 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index 2e3a98e8d9..ba559093d0 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -14,9 +14,15 @@ import akka.actor.ActorContext import com.typesafe.config.Config import akka.actor.ActorSystem +/** + * This exception normally is thrown when a bounded mailbox is over capacity + */ class MessageQueueAppendFailedException(message: String, cause: Throwable = null) extends AkkaException(message, cause) -object Mailbox { +/** + * INTERNAL USE ONLY + */ +private[akka] object Mailbox { type Status = Int @@ -244,6 +250,10 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes } } +/** + * A MessageQueue is one of the core components in forming an Akka Mailbox. + * The MessageQueue is where the normal messages that are sent to Actors will be enqueued (and subsequently dequeued) + */ trait MessageQueue { /** * Try to enqueue the message to this queue, or throw an exception. @@ -277,7 +287,7 @@ trait MessageQueue { } /** - * Internal mailbox implementation detail. + * INTERNAL USE ONLY */ private[akka] trait SystemMessageQueue { /** @@ -294,7 +304,7 @@ private[akka] trait SystemMessageQueue { } /** - * Internal mailbox implementation detail. + * INTERNAL USE ONLY */ private[akka] trait DefaultSystemMessageQueue { self: Mailbox ⇒ @@ -325,6 +335,9 @@ private[akka] trait DefaultSystemMessageQueue { self: Mailbox ⇒ def hasSystemMessages: Boolean = systemQueueGet ne null } +/** + * A QueueBasedMessageQueue is a MessageQueue which is backed by a java.util.Queue + */ trait QueueBasedMessageQueue extends MessageQueue { def queue: Queue[Envelope] def numberOfMessages = queue.size @@ -340,11 +353,18 @@ trait QueueBasedMessageQueue extends MessageQueue { } } +/** + * UnboundedMessageQueueSemantics adds the enqueue/dequeue operations for unbounded java.util.Queues + */ trait UnboundedMessageQueueSemantics extends QueueBasedMessageQueue { def enqueue(receiver: ActorRef, handle: Envelope): Unit = queue add handle def dequeue(): Envelope = queue.poll() } +/** + * BoundedMessageQueueSemantics adds the enqueue/dequeue operations for bounded java.util.Queues, + * and it also forces the java.util.Queue to extend java.util.BlockingQueue + */ trait BoundedMessageQueueSemantics extends QueueBasedMessageQueue { def pushTimeOut: Duration override def queue: BlockingQueue[Envelope] @@ -360,17 +380,27 @@ trait BoundedMessageQueueSemantics extends QueueBasedMessageQueue { def dequeue(): Envelope = queue.poll() } +/** + * DequeBasedMessageQueue forces the underlying java.util.Queue extend java.util.Deque + */ trait DequeBasedMessageQueue extends QueueBasedMessageQueue { def queue: Deque[Envelope] def enqueueFirst(receiver: ActorRef, handle: Envelope): Unit } +/** + * UnboundedMessageQueueSemantics adds the enqueue/dequeue operations for unbounded java.util.Deque + */ trait UnboundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueue { def enqueue(receiver: ActorRef, handle: Envelope): Unit = queue add handle def enqueueFirst(receiver: ActorRef, handle: Envelope): Unit = queue addFirst handle def dequeue(): Envelope = queue.poll() } +/** + * BoundedMessageQueueSemantics adds the enqueue/dequeue operations for bounded java.util.Deque, + * and it also forces the java.util.Queue to extend java.util.BlockingQueue + */ trait BoundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueue { def pushTimeOut: Duration override def queue: BlockingDeque[Envelope] @@ -393,14 +423,14 @@ trait BoundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueue { } /** - * Mailbox configuration. + * MailboxType is used to construct a Messagequeue given an optional ActorContext owner. */ trait MailboxType { def create(owner: Option[ActorContext]): MessageQueue } /** - * It's a case class for Java (new UnboundedMailbox) + * UnboundedMailbox is the standard issue Akka Mailbox as it is unbounded and has quite good performance */ case class UnboundedMailbox() extends MailboxType { @@ -412,6 +442,9 @@ case class UnboundedMailbox() extends MailboxType { } } +/** + * BoundedMailbox is the default bounded mailbox + */ case class BoundedMailbox( final val capacity: Int, final val pushTimeOut: Duration) extends MailboxType { def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity"), @@ -452,6 +485,9 @@ class BoundedPriorityMailbox( final val cmp: Comparator[Envelope], final val cap } } +/** + * This is the default mailbox for Deques, which is unbounded + */ case class UnboundedDequeBasedMailbox() extends MailboxType { def this(settings: ActorSystem.Settings, config: Config) = this() @@ -462,6 +498,9 @@ case class UnboundedDequeBasedMailbox() extends MailboxType { } } +/** + * This is the default mailbox for Deques, which is bounded + */ case class BoundedDequeBasedMailbox( final val capacity: Int, final val pushTimeOut: Duration) extends MailboxType { def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity"), From 5eba9fceef884b16c8764a78d0d8bb274dfbc830 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 18 May 2012 13:37:26 +0200 Subject: [PATCH 038/106] Saving the planet and shufflin' --- .../scala/akka/event/EventStreamSpec.scala | 2 +- .../main/scala/akka/actor/UntypedActor.scala | 12 +- .../scala/akka/dispatch/Dispatchers.scala | 6 +- .../main/scala/akka/dispatch/Mailbox.scala | 56 +++++- .../main/scala/akka/event/DeathWatch.scala | 2 +- .../src/main/scala/akka/event/EventBus.scala | 15 +- .../src/main/scala/akka/event/Logging.scala | 167 ++++++++++-------- .../scala/akka/event/LoggingReceive.scala | 6 +- .../src/main/scala/akka/japi/JavaAPI.scala | 34 ++-- .../src/main/scala/akka/util/LockUtil.scala | 25 +-- 10 files changed, 177 insertions(+), 148 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala index d2497c4a69..a8cd32f5d3 100644 --- a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala @@ -19,7 +19,7 @@ object EventStreamSpec { loglevel = INFO event-handlers = ["akka.event.EventStreamSpec$MyLog", "%s"] } - """.format(Logging.StandardOutLoggerName)) + """.format(Logging.StandardOutLogger.getClass.getName)) val configUnhandled = ConfigFactory.parseString(""" akka { diff --git a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala index a5ebeb851c..c56a2a0167 100644 --- a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala @@ -93,11 +93,17 @@ import akka.japi.{ Creator } abstract class UntypedActor extends Actor { /** - * To be implemented by concrete UntypedActor. Defines the message handler. + * To be implemented by concrete UntypedActor, this defines the behavior of the + * UntypedActor. */ @throws(classOf[Exception]) def onReceive(message: Any): Unit + /** + * Returns this UntypedActor's UntypedActorContext + * The UntypedActorContext is not thread safe so do not expose it outside of the + * UntypedActor. + */ def getContext(): UntypedActorContext = context.asInstanceOf[UntypedActorContext] /** @@ -150,9 +156,7 @@ abstract class UntypedActor extends Actor { */ override def postRestart(reason: Throwable): Unit = super.postRestart(reason) - final protected def receive = { - case msg ⇒ onReceive(msg) - } + final protected def receive = { case msg ⇒ onReceive(msg) } } /** diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 54173b8460..9fae624e66 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -97,6 +97,7 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc } } + //INTERNAL API private def config(id: String): Config = { import scala.collection.JavaConverters._ def simpleName = id.substring(id.lastIndexOf('.') + 1) @@ -106,6 +107,7 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc .withFallback(defaultDispatcherConfig) } + //INTERNAL API private def idConfig(id: String): Config = { import scala.collection.JavaConverters._ ConfigFactory.parseMap(Map("id" -> id).asJava) @@ -123,9 +125,7 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc * * INTERNAL USE ONLY */ - private[akka] def from(cfg: Config): MessageDispatcher = { - configuratorFrom(cfg).dispatcher() - } + private[akka] def from(cfg: Config): MessageDispatcher = configuratorFrom(cfg).dispatcher() /** * Creates a MessageDispatcherConfigurator from a Config. diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index 2e3a98e8d9..1bb882d497 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -16,7 +16,10 @@ import akka.actor.ActorSystem class MessageQueueAppendFailedException(message: String, cause: Throwable = null) extends AkkaException(message, cause) -object Mailbox { +/** + * INTERNAL API + */ +private[akka] object Mailbox { type Status = Int @@ -40,6 +43,7 @@ object Mailbox { * Mailbox and InternalMailbox is separated in two classes because ActorCell is needed for implementation, * but can't be exposed to user defined mailbox subclasses. * + * INTERNAL API */ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: MessageQueue) extends SystemMessageQueue with Runnable { @@ -244,6 +248,10 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes } } +/** + * A MessageQueue is the user-message "lane" of an Akka Mailbox. + * It needs to atleast support N producers and 1 consumer thread-safely. + */ trait MessageQueue { /** * Try to enqueue the message to this queue, or throw an exception. @@ -325,6 +333,9 @@ private[akka] trait DefaultSystemMessageQueue { self: Mailbox ⇒ def hasSystemMessages: Boolean = systemQueueGet ne null } +/** + * A QueueBasedMessageQueue is a MessageQueue backed by a java.util.Queue + */ trait QueueBasedMessageQueue extends MessageQueue { def queue: Queue[Envelope] def numberOfMessages = queue.size @@ -340,11 +351,19 @@ trait QueueBasedMessageQueue extends MessageQueue { } } +/** + * UnboundedMessageQueueSemantics adds unbounded semantics to a QueueBasedMessageQueue, + * i.e. a non-blocking enqueue and dequeue. + */ trait UnboundedMessageQueueSemantics extends QueueBasedMessageQueue { def enqueue(receiver: ActorRef, handle: Envelope): Unit = queue add handle def dequeue(): Envelope = queue.poll() } +/** + * BoundedMessageQueueSemantics adds bounded semantics to a QueueBasedMessageQueue, + * i.e. blocking enqueue with timeout + */ trait BoundedMessageQueueSemantics extends QueueBasedMessageQueue { def pushTimeOut: Duration override def queue: BlockingQueue[Envelope] @@ -360,17 +379,28 @@ trait BoundedMessageQueueSemantics extends QueueBasedMessageQueue { def dequeue(): Envelope = queue.poll() } +/** + * DequeBasedMessageQueue refines QueueBasedMessageQueue to be backed by a java.util.Deque + */ trait DequeBasedMessageQueue extends QueueBasedMessageQueue { def queue: Deque[Envelope] def enqueueFirst(receiver: ActorRef, handle: Envelope): Unit } +/** + * UnboundedDequeBasedMessageQueueSemantics adds unbounded semantics to a DequeBasedMessageQueue, + * i.e. a non-blocking enqueue and dequeue. + */ trait UnboundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueue { def enqueue(receiver: ActorRef, handle: Envelope): Unit = queue add handle def enqueueFirst(receiver: ActorRef, handle: Envelope): Unit = queue addFirst handle def dequeue(): Envelope = queue.poll() } +/** + * BoundedMessageQueueSemantics adds bounded semantics to a DequeBasedMessageQueue, + * i.e. blocking enqueue with timeout + */ trait BoundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueue { def pushTimeOut: Duration override def queue: BlockingDeque[Envelope] @@ -393,14 +423,14 @@ trait BoundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueue { } /** - * Mailbox configuration. + * MailboxType is a factory to create MessageQueues for an optionally provided ActorContext */ trait MailboxType { def create(owner: Option[ActorContext]): MessageQueue } /** - * It's a case class for Java (new UnboundedMailbox) + * UnboundedMailbox is the default unbounded MailboxType used by Akka Actors. */ case class UnboundedMailbox() extends MailboxType { @@ -412,6 +442,9 @@ case class UnboundedMailbox() extends MailboxType { } } +/** + * BoundedMailbox is the default bounded MailboxType used by Akka Actors. + */ case class BoundedMailbox( final val capacity: Int, final val pushTimeOut: Duration) extends MailboxType { def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity"), @@ -428,17 +461,20 @@ case class BoundedMailbox( final val capacity: Int, final val pushTimeOut: Durat } /** - * Extend me to provide the comparator + * UnboundedPriorityMailbox is an unbounded mailbox that allows for priorization of its contents. + * Extend this class and provide the Comparator in the constructor. */ -class UnboundedPriorityMailbox( final val cmp: Comparator[Envelope]) extends MailboxType { +class UnboundedPriorityMailbox( final val cmp: Comparator[Envelope], final val initialCapacity: Int) extends MailboxType { + def this(cmp: Comparator[Envelope]) = this(cmp, 11) final override def create(owner: Option[ActorContext]): MessageQueue = - new PriorityBlockingQueue[Envelope](11, cmp) with QueueBasedMessageQueue with UnboundedMessageQueueSemantics { + new PriorityBlockingQueue[Envelope](initialCapacity, cmp) with QueueBasedMessageQueue with UnboundedMessageQueueSemantics { final def queue: Queue[Envelope] = this } } /** - * Extend me to provide the comparator + * BoundedPriorityMailbox is a bounded mailbox that allows for priorization of its contents. + * Extend this class and provide the Comparator in the constructor. */ class BoundedPriorityMailbox( final val cmp: Comparator[Envelope], final val capacity: Int, final val pushTimeOut: Duration) extends MailboxType { @@ -452,6 +488,9 @@ class BoundedPriorityMailbox( final val cmp: Comparator[Envelope], final val cap } } +/** + * UnboundedDequeBasedMailbox is an unbounded MailboxType, backed by a Deque. + */ case class UnboundedDequeBasedMailbox() extends MailboxType { def this(settings: ActorSystem.Settings, config: Config) = this() @@ -462,6 +501,9 @@ case class UnboundedDequeBasedMailbox() extends MailboxType { } } +/** + * BoundedDequeBasedMailbox is an bounded MailboxType, backed by a Deque. + */ case class BoundedDequeBasedMailbox( final val capacity: Int, final val pushTimeOut: Duration) extends MailboxType { def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity"), diff --git a/akka-actor/src/main/scala/akka/event/DeathWatch.scala b/akka-actor/src/main/scala/akka/event/DeathWatch.scala index 7469f6609f..8bf6935619 100644 --- a/akka-actor/src/main/scala/akka/event/DeathWatch.scala +++ b/akka-actor/src/main/scala/akka/event/DeathWatch.scala @@ -12,7 +12,7 @@ import akka.actor._ * A failed subscribe should also only mean that the Classifier (ActorRef) that is listened to is already shut down * See LocalDeathWatch for semantics */ -trait DeathWatch extends ActorEventBus with ActorClassifier { +abstract class DeathWatch extends ActorEventBus with ActorClassifier { type Event = Terminated protected final def classify(event: Event): Classifier = event.actor diff --git a/akka-actor/src/main/scala/akka/event/EventBus.scala b/akka-actor/src/main/scala/akka/event/EventBus.scala index 2dd22b3b54..6a5cc67cc4 100644 --- a/akka-actor/src/main/scala/akka/event/EventBus.scala +++ b/akka-actor/src/main/scala/akka/event/EventBus.scala @@ -182,10 +182,9 @@ trait SubchannelClassification { this: EventBus ⇒ */ trait ScanningClassification { self: EventBus ⇒ protected final val subscribers = new ConcurrentSkipListSet[(Classifier, Subscriber)](new Comparator[(Classifier, Subscriber)] { - def compare(a: (Classifier, Subscriber), b: (Classifier, Subscriber)): Int = { - val cM = compareClassifiers(a._1, b._1) - if (cM != 0) cM - else compareSubscribers(a._2, b._2) + def compare(a: (Classifier, Subscriber), b: (Classifier, Subscriber)): Int = compareClassifiers(a._1, b._1) match { + case 0 ⇒ compareSubscribers(a._2, b._2) + case other ⇒ other } }) @@ -238,7 +237,7 @@ trait ActorClassification { this: ActorEventBus with ActorClassifier ⇒ import java.util.concurrent.ConcurrentHashMap import scala.annotation.tailrec private val empty = TreeSet.empty[ActorRef] - protected val mappings = new ConcurrentHashMap[ActorRef, TreeSet[ActorRef]](mapSize) + private val mappings = new ConcurrentHashMap[ActorRef, TreeSet[ActorRef]](mapSize) @tailrec protected final def associate(monitored: ActorRef, monitor: ActorRef): Boolean = { @@ -320,9 +319,9 @@ trait ActorClassification { this: ActorEventBus with ActorClassifier ⇒ */ protected def mapSize: Int - def publish(event: Event): Unit = { - val receivers = mappings.get(classify(event)) - if (receivers ne null) receivers foreach { _ ! event } + def publish(event: Event): Unit = mappings.get(classify(event)) match { + case null ⇒ () + case some ⇒ some foreach { _ ! event } } def subscribe(subscriber: Subscriber, to: Classifier): Boolean = associate(to, subscriber) diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index ac31b133b3..1230756517 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -29,7 +29,7 @@ trait LoggingBus extends ActorEventBus { import Logging._ - private val guard = new ReentrantGuard + private val guard = new ReentrantGuard //Switch to ReentrantReadWrite private var loggers = Seq.empty[ActorRef] private var _logLevel: LogLevel = _ @@ -97,7 +97,7 @@ trait LoggingBus extends ActorEventBus { val myloggers = for { loggerName ← defaultLoggers - if loggerName != StandardOutLoggerName + if loggerName != StandardOutLogger.getClass.getName } yield { try { system.dynamicAccess.getClassFor[Actor](loggerName) match { @@ -129,7 +129,7 @@ trait LoggingBus extends ActorEventBus { case _: InvalidActorNameException ⇒ // ignore if it is already running } publish(Debug(logName, this.getClass, "Default Loggers started")) - if (!(defaultLoggers contains StandardOutLoggerName)) { + if (!(defaultLoggers contains StandardOutLogger.getClass.getName)) { unsubscribe(StandardOutLogger) } } catch { @@ -163,6 +163,9 @@ trait LoggingBus extends ActorEventBus { publish(Debug(simpleName(this), this.getClass, "all default loggers stopped")) } + /** + * INTERNAL API + */ private def addLogger(system: ActorSystemImpl, clazz: Class[_ <: Actor], level: LogLevel, logName: String): ActorRef = { val name = "log" + Extension(system).id() + "-" + simpleName(clazz) val actor = system.systemActorOf(Props(clazz), name) @@ -361,17 +364,33 @@ object LogSource { */ object Logging { + /** + * Returns a 'safe' getSimpleName for the provided object's Class + * @param obj + * @return the simple name of the given object's Class + */ def simpleName(obj: AnyRef): String = simpleName(obj.getClass) + /** + * Returns a 'safe' getSimpleName for the provided Class + * @param obj + * @return the simple name of the given Class + */ def simpleName(clazz: Class[_]): String = { val n = clazz.getName val i = n.lastIndexOf('.') n.substring(i + 1) } - object Extension extends ExtensionKey[LogExt] + /** + * INTERNAL API + */ + private[akka] object Extension extends ExtensionKey[LogExt] - class LogExt(system: ExtendedActorSystem) extends Extension { + /** + * INTERNAL API + */ + private[akka] class LogExt(system: ExtendedActorSystem) extends Extension { private val loggerId = new AtomicInteger def id() = loggerId.incrementAndGet() } @@ -431,12 +450,6 @@ object Logging { // these type ascriptions/casts are necessary to avoid CCEs during construction while retaining correct type val AllLogLevels = Seq(ErrorLevel: AnyRef, WarningLevel, InfoLevel, DebugLevel).asInstanceOf[Seq[LogLevel]] - val errorFormat = "[ERROR] [%s] [%s] [%s] %s\n%s".intern - val errorFormatWithoutCause = "[ERROR] [%s] [%s] [%s] %s".intern - val warningFormat = "[WARN] [%s] [%s] [%s] %s".intern - val infoFormat = "[INFO] [%s] [%s] [%s] %s".intern - val debugFormat = "[DEBUG] [%s] [%s] [%s] %s".intern - /** * Obtain LoggingAdapter for the given actor system and source object. This * will use the system’s event stream and include the system’s address in the @@ -624,27 +637,34 @@ object Logging { // weird return type due to binary compatibility def loggerInitialized(): LoggerInitialized.type = LoggerInitialized + /** + * LoggerInitializationException is thrown to indicate that there was a problem initializing a logger + * @param msg + */ class LoggerInitializationException(msg: String) extends AkkaException(msg) trait StdOutLogger { import java.text.SimpleDateFormat import java.util.Date - val dateFormat = new SimpleDateFormat("MM/dd/yyyy HH:mm:ss.S") + private val dateFormat = new SimpleDateFormat("MM/dd/yyyy HH:mm:ss.S") + private val errorFormat = "[ERROR] [%s] [%s] [%s] %s\n%s".intern + private val errorFormatWithoutCause = "[ERROR] [%s] [%s] [%s] %s".intern + private val warningFormat = "[WARN] [%s] [%s] [%s] %s".intern + private val infoFormat = "[INFO] [%s] [%s] [%s] %s".intern + private val debugFormat = "[DEBUG] [%s] [%s] [%s] %s".intern - def timestamp = dateFormat.format(new Date) + def timestamp(): String = synchronized { dateFormat.format(new Date) } // SDF isn't threadsafe - def print(event: Any) { - event match { - case e: Error ⇒ error(e) - case e: Warning ⇒ warning(e) - case e: Info ⇒ info(e) - case e: Debug ⇒ debug(e) - case e ⇒ warning(Warning(simpleName(this), this.getClass, "received unexpected event of class " + e.getClass + ": " + e)) - } + def print(event: Any): Unit = event match { + case e: Error ⇒ error(e) + case e: Warning ⇒ warning(e) + case e: Info ⇒ info(e) + case e: Debug ⇒ debug(e) + case e ⇒ warning(Warning(simpleName(this), this.getClass, "received unexpected event of class " + e.getClass + ": " + e)) } - def error(event: Error) = { + def error(event: Error): Unit = { val f = if (event.cause == Error.NoCause) errorFormatWithoutCause else errorFormat println(f.format( timestamp, @@ -654,21 +674,21 @@ object Logging { stackTraceFor(event.cause))) } - def warning(event: Warning) = + def warning(event: Warning): Unit = println(warningFormat.format( timestamp, event.thread.getName, event.logSource, event.message)) - def info(event: Info) = + def info(event: Info): Unit = println(infoFormat.format( timestamp, event.thread.getName, event.logSource, event.message)) - def debug(event: Debug) = + def debug(event: Debug): Unit = println(debugFormat.format( timestamp, event.thread.getName, @@ -689,8 +709,8 @@ object Logging { override val toString = "StandardOutLogger" override def !(message: Any)(implicit sender: ActorRef = null): Unit = print(message) } + val StandardOutLogger = new StandardOutLogger - val StandardOutLoggerName = StandardOutLogger.getClass.getName /** * Actor wrapper around the standard output logger. If @@ -708,7 +728,7 @@ object Logging { * Returns the StackTrace for the given Throwable as a String */ def stackTraceFor(e: Throwable): String = e match { - case null | Error.NoCause ⇒ "" + case null | Error.NoCause | _: NoStackTrace ⇒ "" case other ⇒ val sw = new java.io.StringWriter val pw = new java.io.PrintWriter(sw) @@ -752,51 +772,51 @@ trait LoggingAdapter { * These actually implement the passing on of the messages to be logged. * Will not be called if is...Enabled returned false. */ - protected def notifyError(message: String) - protected def notifyError(cause: Throwable, message: String) - protected def notifyWarning(message: String) - protected def notifyInfo(message: String) - protected def notifyDebug(message: String) + protected def notifyError(message: String): Unit + protected def notifyError(cause: Throwable, message: String): Unit + protected def notifyWarning(message: String): Unit + protected def notifyInfo(message: String): Unit + protected def notifyDebug(message: String): Unit /* * The rest is just the widening of the API for the user's convenience. */ - def error(cause: Throwable, message: String) { if (isErrorEnabled) notifyError(cause, message) } - def error(cause: Throwable, template: String, arg1: Any) { if (isErrorEnabled) notifyError(cause, format1(template, arg1)) } - def error(cause: Throwable, template: String, arg1: Any, arg2: Any) { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2)) } - def error(cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any) { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2, arg3)) } - def error(cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any) { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2, arg3, arg4)) } + def error(cause: Throwable, message: String): Unit = { if (isErrorEnabled) notifyError(cause, message) } + def error(cause: Throwable, template: String, arg1: Any): Unit = { if (isErrorEnabled) notifyError(cause, format1(template, arg1)) } + def error(cause: Throwable, template: String, arg1: Any, arg2: Any): Unit = { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2)) } + def error(cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2, arg3)) } + def error(cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2, arg3, arg4)) } - def error(message: String) { if (isErrorEnabled) notifyError(message) } - def error(template: String, arg1: Any) { if (isErrorEnabled) notifyError(format1(template, arg1)) } - def error(template: String, arg1: Any, arg2: Any) { if (isErrorEnabled) notifyError(format(template, arg1, arg2)) } - def error(template: String, arg1: Any, arg2: Any, arg3: Any) { if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3)) } - def error(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any) { if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3, arg4)) } + def error(message: String): Unit = { if (isErrorEnabled) notifyError(message) } + def error(template: String, arg1: Any): Unit = { if (isErrorEnabled) notifyError(format1(template, arg1)) } + def error(template: String, arg1: Any, arg2: Any): Unit = { if (isErrorEnabled) notifyError(format(template, arg1, arg2)) } + def error(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3)) } + def error(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3, arg4)) } - def warning(message: String) { if (isWarningEnabled) notifyWarning(message) } - def warning(template: String, arg1: Any) { if (isWarningEnabled) notifyWarning(format1(template, arg1)) } - def warning(template: String, arg1: Any, arg2: Any) { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2)) } - def warning(template: String, arg1: Any, arg2: Any, arg3: Any) { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3)) } - def warning(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any) { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3, arg4)) } + def warning(message: String): Unit = { if (isWarningEnabled) notifyWarning(message) } + def warning(template: String, arg1: Any): Unit = { if (isWarningEnabled) notifyWarning(format1(template, arg1)) } + def warning(template: String, arg1: Any, arg2: Any): Unit = { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2)) } + def warning(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3)) } + def warning(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3, arg4)) } def info(message: String) { if (isInfoEnabled) notifyInfo(message) } - def info(template: String, arg1: Any) { if (isInfoEnabled) notifyInfo(format1(template, arg1)) } - def info(template: String, arg1: Any, arg2: Any) { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2)) } - def info(template: String, arg1: Any, arg2: Any, arg3: Any) { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2, arg3)) } - def info(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any) { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2, arg3, arg4)) } + def info(template: String, arg1: Any): Unit = { if (isInfoEnabled) notifyInfo(format1(template, arg1)) } + def info(template: String, arg1: Any, arg2: Any): Unit = { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2)) } + def info(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2, arg3)) } + def info(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2, arg3, arg4)) } def debug(message: String) { if (isDebugEnabled) notifyDebug(message) } - def debug(template: String, arg1: Any) { if (isDebugEnabled) notifyDebug(format1(template, arg1)) } - def debug(template: String, arg1: Any, arg2: Any) { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2)) } - def debug(template: String, arg1: Any, arg2: Any, arg3: Any) { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2, arg3)) } - def debug(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any) { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2, arg3, arg4)) } + def debug(template: String, arg1: Any): Unit = { if (isDebugEnabled) notifyDebug(format1(template, arg1)) } + def debug(template: String, arg1: Any, arg2: Any): Unit = { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2)) } + def debug(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2, arg3)) } + def debug(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2, arg3, arg4)) } def log(level: Logging.LogLevel, message: String) { if (isEnabled(level)) notifyLog(level, message) } - def log(level: Logging.LogLevel, template: String, arg1: Any) { if (isEnabled(level)) notifyLog(level, format1(template, arg1)) } - def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any) { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2)) } - def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any) { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2, arg3)) } - def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any) { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2, arg3, arg4)) } + def log(level: Logging.LogLevel, template: String, arg1: Any): Unit = { if (isEnabled(level)) notifyLog(level, format1(template, arg1)) } + def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any): Unit = { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2)) } + def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2, arg3)) } + def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2, arg3, arg4)) } final def isEnabled(level: Logging.LogLevel): Boolean = level match { case Logging.ErrorLevel ⇒ isErrorEnabled @@ -812,14 +832,14 @@ trait LoggingAdapter { case Logging.DebugLevel ⇒ if (isDebugEnabled) notifyDebug(message) } - private def format1(t: String, arg: Any) = arg match { + private def format1(t: String, arg: Any): String = arg match { case a: Array[_] if !a.getClass.getComponentType.isPrimitive ⇒ format(t, a: _*) case a: Array[_] ⇒ format(t, (a map (_.asInstanceOf[AnyRef]): _*)) case x ⇒ format(t, x) } - def format(t: String, arg: Any*) = { - val sb = new StringBuilder + def format(t: String, arg: Any*): String = { + val sb = new StringBuilder //FIXME add some decent size hint here var p = 0 var rest = t while (p < arg.length) { @@ -829,17 +849,15 @@ trait LoggingAdapter { rest = "" p = arg.length } else { - sb.append(rest.substring(0, index)) - sb.append(arg(p)) + sb.append(rest.substring(0, index)).append(arg(p)) rest = rest.substring(index + 2) p += 1 } } - sb.append(rest) - sb.toString + sb.append(rest).toString } } - +//FIXME DOCUMENT class BusLogging(val bus: LoggingBus, val logSource: String, val logClass: Class[_]) extends LoggingAdapter { import Logging._ @@ -849,14 +867,9 @@ class BusLogging(val bus: LoggingBus, val logSource: String, val logClass: Class def isInfoEnabled = bus.logLevel >= InfoLevel def isDebugEnabled = bus.logLevel >= DebugLevel - protected def notifyError(message: String) { bus.publish(Error(logSource, logClass, message)) } - - protected def notifyError(cause: Throwable, message: String) { bus.publish(Error(cause, logSource, logClass, message)) } - - protected def notifyWarning(message: String) { bus.publish(Warning(logSource, logClass, message)) } - - protected def notifyInfo(message: String) { bus.publish(Info(logSource, logClass, message)) } - - protected def notifyDebug(message: String) { bus.publish(Debug(logSource, logClass, message)) } - + protected def notifyError(message: String): Unit = bus.publish(Error(logSource, logClass, message)) + protected def notifyError(cause: Throwable, message: String): Unit = bus.publish(Error(cause, logSource, logClass, message)) + protected def notifyWarning(message: String): Unit = bus.publish(Warning(logSource, logClass, message)) + protected def notifyInfo(message: String): Unit = bus.publish(Info(logSource, logClass, message)) + protected def notifyDebug(message: String): Unit = bus.publish(Debug(logSource, logClass, message)) } diff --git a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala index 452b2b6b19..337815eed1 100644 --- a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala +++ b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala @@ -26,9 +26,7 @@ object LoggingReceive { */ def apply(r: Receive)(implicit context: ActorContext): Receive = r match { case _: LoggingReceive ⇒ r - case _ ⇒ - if (context.system.settings.AddLoggingReceive) new LoggingReceive(None, r) - else r + case _ ⇒ if (context.system.settings.AddLoggingReceive) new LoggingReceive(None, r) else r } } @@ -37,7 +35,7 @@ object LoggingReceive { * @param source the log source, if not defined the actor of the context will be used */ class LoggingReceive(source: Option[AnyRef], r: Receive)(implicit context: ActorContext) extends Receive { - def isDefinedAt(o: Any) = { + def isDefinedAt(o: Any): Boolean = { val handled = r.isDefinedAt(o) val (str, clazz) = LogSource.fromAnyRef(source getOrElse context.asInstanceOf[ActorCell].actor) context.system.eventStream.publish(Debug(str, clazz, "received " + (if (handled) "handled" else "unhandled") + " message " + o)) diff --git a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala index 47ce667759..5bd38ad52a 100644 --- a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala +++ b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala @@ -24,28 +24,14 @@ trait Function2[T1, T2, R] { * A Procedure is like a Function, but it doesn't produce a return value. */ trait Procedure[T] { - def apply(param: T) -} - -/** - * A Procedure is like a Function, but it doesn't produce a return value. - */ -trait Procedure2[T1, T2] { - def apply(param: T1, param2: T2) -} - -/** - * An executable piece of code that takes no parameters and doesn't return any value. - */ -trait SideEffect { - def apply() + def apply(param: T): Unit } /** * An executable piece of code that takes no parameters and doesn't return any value. */ trait Effect { - def apply() + def apply(): Unit } /** @@ -67,9 +53,9 @@ sealed abstract class Option[A] extends java.lang.Iterable[A] { def get: A def isEmpty: Boolean - def isDefined = !isEmpty + def isDefined: Boolean = !isEmpty def asScala: scala.Option[A] - def iterator = if (isEmpty) Iterator.empty else Iterator.single(get) + def iterator: java.util.Iterator[A] = if (isEmpty) Iterator.empty else Iterator.single(get) } object Option { @@ -102,18 +88,18 @@ object Option { * A. */ final case class Some[A](v: A) extends Option[A] { - def get = v - def isEmpty = false - def asScala = scala.Some(v) + def get: A = v + def isEmpty: Boolean = false + def asScala: scala.Some[A] = scala.Some(v) } /** * This case object represents non-existent values. */ private case object None extends Option[Nothing] { - def get = throw new NoSuchElementException("None.get") - def isEmpty = true - def asScala = scala.None + def get: Nothing = throw new NoSuchElementException("None.get") + def isEmpty: Boolean = true + def asScala: scala.None.type = scala.None } implicit def java2ScalaOption[A](o: Option[A]): scala.Option[A] = o.asScala diff --git a/akka-actor/src/main/scala/akka/util/LockUtil.scala b/akka-actor/src/main/scala/akka/util/LockUtil.scala index c3295d4b52..14c787d3f6 100644 --- a/akka-actor/src/main/scala/akka/util/LockUtil.scala +++ b/akka-actor/src/main/scala/akka/util/LockUtil.scala @@ -7,17 +7,12 @@ package akka.util import java.util.concurrent.locks.{ ReentrantLock } import java.util.concurrent.atomic.{ AtomicBoolean } -final class ReentrantGuard { - final val lock = new ReentrantLock +final class ReentrantGuard extends ReentrantLock { @inline final def withGuard[T](body: ⇒ T): T = { - lock.lock - try { - body - } finally { - lock.unlock - } + lock() + try body finally unlock() } } @@ -104,19 +99,13 @@ class Switch(startAsOn: Boolean = false) { * Executes the provided action and returns its value if the switch is on, waiting for any pending changes to happen before (locking) * Be careful of longrunning or blocking within the provided action as it can lead to deadlocks or bad performance */ - def whileOnYield[T](action: ⇒ T): Option[T] = synchronized { - if (switch.get) Some(action) - else None - } + def whileOnYield[T](action: ⇒ T): Option[T] = synchronized { if (switch.get) Some(action) else None } /** * Executes the provided action and returns its value if the switch is off, waiting for any pending changes to happen before (locking) * Be careful of longrunning or blocking within the provided action as it can lead to deadlocks or bad performance */ - def whileOffYield[T](action: ⇒ T): Option[T] = synchronized { - if (!switch.get) Some(action) - else None - } + def whileOffYield[T](action: ⇒ T): Option[T] = synchronized { if (!switch.get) Some(action) else None } /** * Executes the provided action and returns if the action was executed or not, if the switch is on, waiting for any pending changes to happen before (locking) @@ -144,9 +133,7 @@ class Switch(startAsOn: Boolean = false) { * Executes the provided callbacks depending on if the switch is either on or off waiting for any pending changes to happen before (locking) * Be careful of longrunning or blocking within the provided action as it can lead to deadlocks or bad performance */ - def fold[T](on: ⇒ T)(off: ⇒ T) = synchronized { - if (switch.get) on else off - } + def fold[T](on: ⇒ T)(off: ⇒ T): T = synchronized { if (switch.get) on else off } /** * Executes the given code while holding this switch’s lock, i.e. protected from concurrent modification of the switch status. From 5afe6601ff3ca7168ee8fe1aeb3d52bd8a3cfbbd Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 18 May 2012 14:55:38 +0200 Subject: [PATCH 039/106] Removing ActorTimeoutException since it was only used in GracefulStop, and changed GracefulStop to use PromiseActorRef instead of spawning a toplevel actor --- .../test/scala/akka/pattern/PatternSpec.scala | 9 +---- .../src/main/scala/akka/actor/Actor.scala | 6 --- .../main/scala/akka/pattern/AskSupport.scala | 4 +- .../akka/pattern/GracefulStopSupport.scala | 37 ++++++++----------- 4 files changed, 18 insertions(+), 38 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala index 2776beabce..68e6d40824 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala @@ -7,11 +7,9 @@ package akka.pattern import akka.testkit.AkkaSpec import akka.actor.Props import akka.actor.Actor -import akka.actor.ActorTimeoutException import akka.util.Duration import akka.util.duration._ import akka.dispatch.{ Future, Promise, Await } -import java.lang.IllegalStateException object PatternSpec { case class Work(duration: Duration) @@ -41,13 +39,10 @@ class PatternSpec extends AkkaSpec { Await.ready(gracefulStop(target, 1 millis), 1 second) } - "complete Future with ActorTimeoutException when actor not terminated within timeout" in { + "complete Future with AskTimeoutException when actor not terminated within timeout" in { val target = system.actorOf(Props[TargetActor]) target ! Work(250 millis) - val result = gracefulStop(target, 10 millis) - intercept[ActorTimeoutException] { - Await.result(result, 200 millis) - } + intercept[AskTimeoutException] { Await.result(gracefulStop(target, 10 millis), 200 millis) } } } diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 3e233a2056..7c020925eb 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -129,12 +129,6 @@ case class ActorInitializationException private[akka] (actor: ActorRef, message: def this(msg: String) = this(null, msg, null) } -//FIXME: Only used by gracefulStop we should remove this if possible -class ActorTimeoutException private[akka] (message: String, cause: Throwable = null) - extends AkkaException(message, cause) { - def this(msg: String) = this(msg, null) -} - /** * InvalidMessageException is thrown when an invalid message is sent to an Actor. * Technically it's only "null" which is an InvalidMessageException but who knows, diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index ef4217039d..ede65b17da 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -272,9 +272,7 @@ private[akka] object PromiseActorRef { val result = Promise[Any]()(provider.dispatcher) val a = new PromiseActorRef(provider, result) val f = provider.scheduler.scheduleOnce(timeout.duration) { result.tryComplete(Left(new AskTimeoutException("Timed out"))) } - result onComplete { _ ⇒ - try a.stop() finally f.cancel() - } + result onComplete { _ ⇒ try a.stop() finally f.cancel() } a } } \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index d6fbd31c1e..8b441f3d5b 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -4,9 +4,9 @@ package akka.pattern -import akka.actor.{ ActorRef, Actor, ActorSystem, Props, PoisonPill, Terminated, ReceiveTimeout, ActorTimeoutException } import akka.dispatch.{ Promise, Future } -import akka.util.Duration +import akka.actor._ +import akka.util.{ Timeout, Duration } trait GracefulStopSupport { /** @@ -14,7 +14,8 @@ trait GracefulStopSupport { * existing messages of the target actor has been processed and the actor has been * terminated. * - * Useful when you need to wait for termination or compose ordered termination of several actors. + * Useful when you need to wait for termination or compose ordered termination of several actors, + * which should only be done outside of the ActorSystem as blocking inside Actors is discouraged. * * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] * is completed with failure [[akka.actor.ActorTimeoutException]]. @@ -22,26 +23,18 @@ trait GracefulStopSupport { def gracefulStop(target: ActorRef, timeout: Duration)(implicit system: ActorSystem): Future[Boolean] = { if (target.isTerminated) { Promise.successful(true) - } else { - val result = Promise[Boolean]() - system.actorOf(Props(new Actor { - // Terminated will be received when target has been stopped - context watch target + } else system match { + case e: ExtendedActorSystem ⇒ + val ref = PromiseActorRef(e.provider, Timeout(timeout)) + e.deathWatch.subscribe(ref, target) + ref.result onComplete { case x ⇒ println(x) } + ref.result onComplete { + case Right(Terminated(`target`)) ⇒ () // Ignore + case _ ⇒ e.deathWatch.unsubscribe(ref, target) + } // Just making sure we're not leaking here target ! PoisonPill - // ReceiveTimeout will be received if nothing else is received within the timeout - context setReceiveTimeout timeout - - def receive = { - case Terminated(a) if a == target ⇒ - result success true - context stop self - case ReceiveTimeout ⇒ - result failure new ActorTimeoutException( - "Failed to stop [%s] within [%s]".format(target.path, context.receiveTimeout)) - context stop self - } - })) - result + ref.result map { case Terminated(`target`) ⇒ true } + case s ⇒ throw new IllegalArgumentException("Unknown ActorSystem implementation: '" + s + "'") } } } \ No newline at end of file From 2bb255b480d64aa60b5540705d4b0d62114595a3 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 18 May 2012 15:04:08 +0200 Subject: [PATCH 040/106] Removing ActorTimeoutException everywhere... I swear it... --- akka-actor/src/main/scala/akka/pattern/AskSupport.scala | 6 +++--- .../src/main/scala/akka/pattern/GracefulStopSupport.scala | 2 +- akka-actor/src/main/scala/akka/pattern/Patterns.scala | 6 +++--- .../java/code/akka/docs/actor/UntypedActorDocTestBase.java | 4 ++-- akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala | 3 +-- .../scala/akka/transactor/CoordinatedIncrementSpec.scala | 4 ++-- .../src/test/scala/akka/transactor/FickleFriendsSpec.scala | 4 ++-- .../src/test/scala/akka/transactor/TransactorSpec.scala | 4 ++-- 8 files changed, 16 insertions(+), 17 deletions(-) diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index ede65b17da..cfaa0a182b 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -46,7 +46,7 @@ trait AskSupport { * Sends a message asynchronously and returns a [[akka.dispatch.Future]] * holding the eventual reply message; this means that the target actor * needs to send the result to the `sender` reference provided. The Future - * will be completed with an [[akka.actor.AskTimeoutException]] after the + * will be completed with an [[akka.pattern.AskTimeoutException]] after the * given timeout has expired; this is independent from any timeout applied * while awaiting a result for this future (i.e. in * `Await.result(..., timeout)`). @@ -96,7 +96,7 @@ trait AskSupport { * Sends a message asynchronously and returns a [[akka.dispatch.Future]] * holding the eventual reply message; this means that the target actor * needs to send the result to the `sender` reference provided. The Future - * will be completed with an [[akka.actor.AskTimeoutException]] after the + * will be completed with an [[akka.pattern.AskTimeoutException]] after the * given timeout has expired; this is independent from any timeout applied * while awaiting a result for this future (i.e. in * `Await.result(..., timeout)`). @@ -126,7 +126,7 @@ trait AskSupport { * Sends a message asynchronously and returns a [[akka.dispatch.Future]] * holding the eventual reply message; this means that the target actor * needs to send the result to the `sender` reference provided. The Future - * will be completed with an [[akka.actor.AskTimeoutException]] after the + * will be completed with an [[akka.pattern.AskTimeoutException]] after the * given timeout has expired; this is independent from any timeout applied * while awaiting a result for this future (i.e. in * `Await.result(..., timeout)`). diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index 8b441f3d5b..9c8b6ae5ff 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -18,7 +18,7 @@ trait GracefulStopSupport { * which should only be done outside of the ActorSystem as blocking inside Actors is discouraged. * * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] - * is completed with failure [[akka.actor.ActorTimeoutException]]. + * is completed with failure [[akka.pattern.AskTimeoutException]]. */ def gracefulStop(target: ActorRef, timeout: Duration)(implicit system: ActorSystem): Future[Boolean] = { if (target.isTerminated) { diff --git a/akka-actor/src/main/scala/akka/pattern/Patterns.scala b/akka-actor/src/main/scala/akka/pattern/Patterns.scala index b58e9a8fc1..853b46e318 100644 --- a/akka-actor/src/main/scala/akka/pattern/Patterns.scala +++ b/akka-actor/src/main/scala/akka/pattern/Patterns.scala @@ -18,7 +18,7 @@ object Patterns { * Sends a message asynchronously and returns a [[akka.dispatch.Future]] * holding the eventual reply message; this means that the target actor * needs to send the result to the `sender` reference provided. The Future - * will be completed with an [[akka.actor.AskTimeoutException]] after the + * will be completed with an [[akka.pattern.AskTimeoutException]] after the * given timeout has expired; this is independent from any timeout applied * while awaiting a result for this future (i.e. in * `Await.result(..., timeout)`). @@ -49,7 +49,7 @@ object Patterns { * Sends a message asynchronously and returns a [[akka.dispatch.Future]] * holding the eventual reply message; this means that the target actor * needs to send the result to the `sender` reference provided. The Future - * will be completed with an [[akka.actor.AskTimeoutException]] after the + * will be completed with an [[akka.pattern.AskTimeoutException]] after the * given timeout has expired; this is independent from any timeout applied * while awaiting a result for this future (i.e. in * `Await.result(..., timeout)`). @@ -100,7 +100,7 @@ object Patterns { * Useful when you need to wait for termination or compose ordered termination of several actors. * * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] - * is completed with failure [[akka.actor.ActorTimeoutException]]. + * is completed with failure [[akka.pattern.AskTimeoutException]]. */ def gracefulStop(target: ActorRef, timeout: Duration, system: ActorSystem): Future[java.lang.Boolean] = scalaGracefulStop(target, timeout)(system).asInstanceOf[Future[java.lang.Boolean]] diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java b/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java index 65ff37c10e..146131f61e 100644 --- a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java +++ b/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java @@ -36,7 +36,7 @@ import static akka.pattern.Patterns.gracefulStop; import akka.dispatch.Future; import akka.dispatch.Await; import akka.util.Duration; -import akka.actor.ActorTimeoutException; +import akka.pattern.AskTimeoutException; //#import-gracefulStop //#import-askPipe @@ -207,7 +207,7 @@ public class UntypedActorDocTestBase { Future stopped = gracefulStop(actorRef, Duration.create(5, TimeUnit.SECONDS), system); Await.result(stopped, Duration.create(6, TimeUnit.SECONDS)); // the actor has been stopped - } catch (ActorTimeoutException e) { + } catch (AskTimeoutException e) { // the actor wasn't stopped within 5 seconds } //#gracefulStop diff --git a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala index 0bc540f970..8aed17605c 100644 --- a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala @@ -326,14 +326,13 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { //#gracefulStop import akka.pattern.gracefulStop import akka.dispatch.Await - import akka.actor.ActorTimeoutException try { val stopped: Future[Boolean] = gracefulStop(actorRef, 5 seconds)(system) Await.result(stopped, 6 seconds) // the actor has been stopped } catch { - case e: ActorTimeoutException ⇒ // the actor wasn't stopped within 5 seconds + case e: akka.pattern.AskTimeoutException ⇒ // the actor wasn't stopped within 5 seconds } //#gracefulStop } diff --git a/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala b/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala index 9c019a56a5..c76a5a701c 100644 --- a/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala +++ b/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala @@ -12,7 +12,7 @@ import akka.util.duration._ import akka.util.Timeout import akka.testkit._ import scala.concurrent.stm._ -import akka.pattern.ask +import akka.pattern.{ AskTimeoutException, ask } object CoordinatedIncrement { @@ -96,7 +96,7 @@ class CoordinatedIncrementSpec extends AkkaSpec(CoordinatedIncrement.config) wit val ignoreExceptions = Seq( EventFilter[ExpectedFailureException](), EventFilter[CoordinatedTransactionException](), - EventFilter[ActorTimeoutException]()) + EventFilter[AskTimeoutException]()) filterEvents(ignoreExceptions) { val (counters, failer) = actorOfs val coordinated = Coordinated() diff --git a/akka-transactor/src/test/scala/akka/transactor/FickleFriendsSpec.scala b/akka-transactor/src/test/scala/akka/transactor/FickleFriendsSpec.scala index 4f7fc89c14..9deee7b9cc 100644 --- a/akka-transactor/src/test/scala/akka/transactor/FickleFriendsSpec.scala +++ b/akka-transactor/src/test/scala/akka/transactor/FickleFriendsSpec.scala @@ -15,7 +15,7 @@ import akka.testkit.TestEvent.Mute import scala.concurrent.stm._ import scala.util.Random.{ nextInt ⇒ random } import java.util.concurrent.CountDownLatch -import akka.pattern.ask +import akka.pattern.{ AskTimeoutException, ask } object FickleFriends { case class FriendlyIncrement(friends: Seq[ActorRef], timeout: Timeout, latch: CountDownLatch) @@ -120,7 +120,7 @@ class FickleFriendsSpec extends AkkaSpec with BeforeAndAfterAll { val ignoreExceptions = Seq( EventFilter[ExpectedFailureException](), EventFilter[CoordinatedTransactionException](), - EventFilter[ActorTimeoutException]()) + EventFilter[AskTimeoutException]()) system.eventStream.publish(Mute(ignoreExceptions)) val (counters, coordinator) = actorOfs val latch = new CountDownLatch(1) diff --git a/akka-transactor/src/test/scala/akka/transactor/TransactorSpec.scala b/akka-transactor/src/test/scala/akka/transactor/TransactorSpec.scala index 1954c9a13b..df9723ffd2 100644 --- a/akka-transactor/src/test/scala/akka/transactor/TransactorSpec.scala +++ b/akka-transactor/src/test/scala/akka/transactor/TransactorSpec.scala @@ -10,7 +10,7 @@ import akka.util.duration._ import akka.util.Timeout import akka.testkit._ import scala.concurrent.stm._ -import akka.pattern.ask +import akka.pattern.{ AskTimeoutException, ask } object TransactorIncrement { case class Increment(friends: Seq[ActorRef], latch: TestLatch) @@ -105,7 +105,7 @@ class TransactorSpec extends AkkaSpec { val ignoreExceptions = Seq( EventFilter[ExpectedFailureException](), EventFilter[CoordinatedTransactionException](), - EventFilter[ActorTimeoutException]()) + EventFilter[AskTimeoutException]()) filterEvents(ignoreExceptions) { val (counters, failer) = createTransactors val failLatch = TestLatch(numCounters) From 07bf11d326132745fa1215570b26c12f7e49dee7 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 18 May 2012 15:40:51 +0200 Subject: [PATCH 041/106] Removing debug equipment left inside the patient. --- akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala | 1 - 1 file changed, 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index 9c8b6ae5ff..5f78e8ba27 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -27,7 +27,6 @@ trait GracefulStopSupport { case e: ExtendedActorSystem ⇒ val ref = PromiseActorRef(e.provider, Timeout(timeout)) e.deathWatch.subscribe(ref, target) - ref.result onComplete { case x ⇒ println(x) } ref.result onComplete { case Right(Terminated(`target`)) ⇒ () // Ignore case _ ⇒ e.deathWatch.unsubscribe(ref, target) From c86051505b3c78629ec416f6a26b60552ac93c80 Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 18 May 2012 15:55:04 +0200 Subject: [PATCH 042/106] wrap up MultiNodeSpec, see #1934 and #2063 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - restructure message classes in sealed traits according to message flow direction and include confirmed/unconfirmed status in the type - add GetAddress query for obtaining the remote transport address of another test participant - add reconnects to Player - add small DSL with runOn(node...), ifNode(node...)()() and node():ActorPath - rewrite TestConductorSpec to use that DSL and run within a single test procedure instead of separate NodeX classes - hook up that test into current multi-jvm infrastructure temporarily for testing (will use Björn’s new remote-multi-jvm stuff later) --- .../testconductor/TestConductorProtocol.java | 809 ++++++++++++++++-- .../main/protocol/TestConductorProtocol.proto | 8 +- .../src/main/resources/reference.conf | 15 +- .../akka/remote/testconductor/Conductor.scala | 109 +-- .../akka/remote/testconductor/DataTypes.scala | 55 +- .../akka/remote/testconductor/Extension.scala | 8 +- .../akka/remote/testconductor/Player.scala | 172 ++-- .../testconductor/RemoteConnection.scala | 3 +- .../testconductor/TestConductorSpec.scala | 140 +-- .../remote/testconductor/BarrierSpec.scala | 106 +-- .../remote/testconductor/ControllerSpec.scala | 8 +- .../akka/remote/testkit/MultiNodeSpec.scala | 157 ++++ scripts/fix-protobuf.sh | 3 + 13 files changed, 1291 insertions(+), 302 deletions(-) create mode 100644 akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala create mode 100755 scripts/fix-protobuf.sh diff --git a/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java index 3d6c145097..4ae1aae07a 100644 --- a/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java +++ b/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java @@ -176,6 +176,11 @@ public final class TestConductorProtocol { // optional string done = 4; boolean hasDone(); String getDone(); + + // optional .AddressRequest addr = 5; + boolean hasAddr(); + akka.remote.testconductor.TestConductorProtocol.AddressRequest getAddr(); + akka.remote.testconductor.TestConductorProtocol.AddressRequestOrBuilder getAddrOrBuilder(); } public static final class Wrapper extends com.google.protobuf.GeneratedMessage @@ -277,11 +282,25 @@ public final class TestConductorProtocol { } } + // optional .AddressRequest addr = 5; + public static final int ADDR_FIELD_NUMBER = 5; + private akka.remote.testconductor.TestConductorProtocol.AddressRequest addr_; + public boolean hasAddr() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public akka.remote.testconductor.TestConductorProtocol.AddressRequest getAddr() { + return addr_; + } + public akka.remote.testconductor.TestConductorProtocol.AddressRequestOrBuilder getAddrOrBuilder() { + return addr_; + } + private void initFields() { hello_ = akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance(); barrier_ = akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance(); failure_ = akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance(); done_ = ""; + addr_ = akka.remote.testconductor.TestConductorProtocol.AddressRequest.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -306,6 +325,12 @@ public final class TestConductorProtocol { return false; } } + if (hasAddr()) { + if (!getAddr().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -325,6 +350,9 @@ public final class TestConductorProtocol { if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBytes(4, getDoneBytes()); } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeMessage(5, addr_); + } getUnknownFields().writeTo(output); } @@ -350,6 +378,10 @@ public final class TestConductorProtocol { size += com.google.protobuf.CodedOutputStream .computeBytesSize(4, getDoneBytes()); } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, addr_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -460,7 +492,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -469,6 +501,7 @@ public final class TestConductorProtocol { getHelloFieldBuilder(); getBarrierFieldBuilder(); getFailureFieldBuilder(); + getAddrFieldBuilder(); } } private static Builder create() { @@ -497,6 +530,12 @@ public final class TestConductorProtocol { bitField0_ = (bitField0_ & ~0x00000004); done_ = ""; bitField0_ = (bitField0_ & ~0x00000008); + if (addrBuilder_ == null) { + addr_ = akka.remote.testconductor.TestConductorProtocol.AddressRequest.getDefaultInstance(); + } else { + addrBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); return this; } @@ -563,6 +602,14 @@ public final class TestConductorProtocol { to_bitField0_ |= 0x00000008; } result.done_ = done_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + if (addrBuilder_ == null) { + result.addr_ = addr_; + } else { + result.addr_ = addrBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -591,6 +638,9 @@ public final class TestConductorProtocol { if (other.hasDone()) { setDone(other.getDone()); } + if (other.hasAddr()) { + mergeAddr(other.getAddr()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -614,6 +664,12 @@ public final class TestConductorProtocol { return false; } } + if (hasAddr()) { + if (!getAddr().isInitialized()) { + + return false; + } + } return true; } @@ -672,6 +728,15 @@ public final class TestConductorProtocol { done_ = input.readBytes(); break; } + case 42: { + akka.remote.testconductor.TestConductorProtocol.AddressRequest.Builder subBuilder = akka.remote.testconductor.TestConductorProtocol.AddressRequest.newBuilder(); + if (hasAddr()) { + subBuilder.mergeFrom(getAddr()); + } + input.readMessage(subBuilder, extensionRegistry); + setAddr(subBuilder.buildPartial()); + break; + } } } } @@ -984,6 +1049,96 @@ public final class TestConductorProtocol { onChanged(); } + // optional .AddressRequest addr = 5; + private akka.remote.testconductor.TestConductorProtocol.AddressRequest addr_ = akka.remote.testconductor.TestConductorProtocol.AddressRequest.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.AddressRequest, akka.remote.testconductor.TestConductorProtocol.AddressRequest.Builder, akka.remote.testconductor.TestConductorProtocol.AddressRequestOrBuilder> addrBuilder_; + public boolean hasAddr() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public akka.remote.testconductor.TestConductorProtocol.AddressRequest getAddr() { + if (addrBuilder_ == null) { + return addr_; + } else { + return addrBuilder_.getMessage(); + } + } + public Builder setAddr(akka.remote.testconductor.TestConductorProtocol.AddressRequest value) { + if (addrBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + addr_ = value; + onChanged(); + } else { + addrBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + return this; + } + public Builder setAddr( + akka.remote.testconductor.TestConductorProtocol.AddressRequest.Builder builderForValue) { + if (addrBuilder_ == null) { + addr_ = builderForValue.build(); + onChanged(); + } else { + addrBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + return this; + } + public Builder mergeAddr(akka.remote.testconductor.TestConductorProtocol.AddressRequest value) { + if (addrBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010) && + addr_ != akka.remote.testconductor.TestConductorProtocol.AddressRequest.getDefaultInstance()) { + addr_ = + akka.remote.testconductor.TestConductorProtocol.AddressRequest.newBuilder(addr_).mergeFrom(value).buildPartial(); + } else { + addr_ = value; + } + onChanged(); + } else { + addrBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000010; + return this; + } + public Builder clearAddr() { + if (addrBuilder_ == null) { + addr_ = akka.remote.testconductor.TestConductorProtocol.AddressRequest.getDefaultInstance(); + onChanged(); + } else { + addrBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + public akka.remote.testconductor.TestConductorProtocol.AddressRequest.Builder getAddrBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getAddrFieldBuilder().getBuilder(); + } + public akka.remote.testconductor.TestConductorProtocol.AddressRequestOrBuilder getAddrOrBuilder() { + if (addrBuilder_ != null) { + return addrBuilder_.getMessageOrBuilder(); + } else { + return addr_; + } + } + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.AddressRequest, akka.remote.testconductor.TestConductorProtocol.AddressRequest.Builder, akka.remote.testconductor.TestConductorProtocol.AddressRequestOrBuilder> + getAddrFieldBuilder() { + if (addrBuilder_ == null) { + addrBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.AddressRequest, akka.remote.testconductor.TestConductorProtocol.AddressRequest.Builder, akka.remote.testconductor.TestConductorProtocol.AddressRequestOrBuilder>( + addr_, + getParentForChildren(), + isClean()); + addr_ = null; + } + return addrBuilder_; + } + // @@protoc_insertion_point(builder_scope:Wrapper) } @@ -1242,7 +1397,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1544,9 +1699,9 @@ public final class TestConductorProtocol { boolean hasName(); String getName(); - // optional bool failed = 2; - boolean hasFailed(); - boolean getFailed(); + // optional bool status = 2; + boolean hasStatus(); + boolean getStatus(); } public static final class EnterBarrier extends com.google.protobuf.GeneratedMessage @@ -1609,19 +1764,19 @@ public final class TestConductorProtocol { } } - // optional bool failed = 2; - public static final int FAILED_FIELD_NUMBER = 2; - private boolean failed_; - public boolean hasFailed() { + // optional bool status = 2; + public static final int STATUS_FIELD_NUMBER = 2; + private boolean status_; + public boolean hasStatus() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public boolean getFailed() { - return failed_; + public boolean getStatus() { + return status_; } private void initFields() { name_ = ""; - failed_ = false; + status_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -1643,7 +1798,7 @@ public final class TestConductorProtocol { output.writeBytes(1, getNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(2, failed_); + output.writeBool(2, status_); } getUnknownFields().writeTo(output); } @@ -1660,7 +1815,7 @@ public final class TestConductorProtocol { } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(2, failed_); + .computeBoolSize(2, status_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -1772,7 +1927,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1788,7 +1943,7 @@ public final class TestConductorProtocol { super.clear(); name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - failed_ = false; + status_ = false; bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -1835,7 +1990,7 @@ public final class TestConductorProtocol { if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.failed_ = failed_; + result.status_ = status_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -1855,8 +2010,8 @@ public final class TestConductorProtocol { if (other.hasName()) { setName(other.getName()); } - if (other.hasFailed()) { - setFailed(other.getFailed()); + if (other.hasStatus()) { + setStatus(other.getStatus()); } this.mergeUnknownFields(other.getUnknownFields()); return this; @@ -1900,7 +2055,7 @@ public final class TestConductorProtocol { } case 16: { bitField0_ |= 0x00000002; - failed_ = input.readBool(); + status_ = input.readBool(); break; } } @@ -1945,23 +2100,23 @@ public final class TestConductorProtocol { onChanged(); } - // optional bool failed = 2; - private boolean failed_ ; - public boolean hasFailed() { + // optional bool status = 2; + private boolean status_ ; + public boolean hasStatus() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public boolean getFailed() { - return failed_; + public boolean getStatus() { + return status_; } - public Builder setFailed(boolean value) { + public Builder setStatus(boolean value) { bitField0_ |= 0x00000002; - failed_ = value; + status_ = value; onChanged(); return this; } - public Builder clearFailed() { + public Builder clearStatus() { bitField0_ = (bitField0_ & ~0x00000002); - failed_ = false; + status_ = false; onChanged(); return this; } @@ -1977,6 +2132,544 @@ public final class TestConductorProtocol { // @@protoc_insertion_point(class_scope:EnterBarrier) } + public interface AddressRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string node = 1; + boolean hasNode(); + String getNode(); + + // optional .Address addr = 2; + boolean hasAddr(); + akka.remote.testconductor.TestConductorProtocol.Address getAddr(); + akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddrOrBuilder(); + } + public static final class AddressRequest extends + com.google.protobuf.GeneratedMessage + implements AddressRequestOrBuilder { + // Use AddressRequest.newBuilder() to construct. + private AddressRequest(Builder builder) { + super(builder); + } + private AddressRequest(boolean noInit) {} + + private static final AddressRequest defaultInstance; + public static AddressRequest getDefaultInstance() { + return defaultInstance; + } + + public AddressRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_AddressRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_AddressRequest_fieldAccessorTable; + } + + private int bitField0_; + // required string node = 1; + public static final int NODE_FIELD_NUMBER = 1; + private java.lang.Object node_; + public boolean hasNode() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getNode() { + java.lang.Object ref = node_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + node_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getNodeBytes() { + java.lang.Object ref = node_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + node_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .Address addr = 2; + public static final int ADDR_FIELD_NUMBER = 2; + private akka.remote.testconductor.TestConductorProtocol.Address addr_; + public boolean hasAddr() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public akka.remote.testconductor.TestConductorProtocol.Address getAddr() { + return addr_; + } + public akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddrOrBuilder() { + return addr_; + } + + private void initFields() { + node_ = ""; + addr_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasNode()) { + memoizedIsInitialized = 0; + return false; + } + if (hasAddr()) { + if (!getAddr().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNodeBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, addr_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNodeBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, addr_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.remote.testconductor.TestConductorProtocol.AddressRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.remote.testconductor.TestConductorProtocol.AddressRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_AddressRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_AddressRequest_fieldAccessorTable; + } + + // Construct using akka.remote.testconductor.TestConductorProtocol.AddressRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getAddrFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + node_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (addrBuilder_ == null) { + addr_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + } else { + addrBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.remote.testconductor.TestConductorProtocol.AddressRequest.getDescriptor(); + } + + public akka.remote.testconductor.TestConductorProtocol.AddressRequest getDefaultInstanceForType() { + return akka.remote.testconductor.TestConductorProtocol.AddressRequest.getDefaultInstance(); + } + + public akka.remote.testconductor.TestConductorProtocol.AddressRequest build() { + akka.remote.testconductor.TestConductorProtocol.AddressRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private akka.remote.testconductor.TestConductorProtocol.AddressRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + akka.remote.testconductor.TestConductorProtocol.AddressRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public akka.remote.testconductor.TestConductorProtocol.AddressRequest buildPartial() { + akka.remote.testconductor.TestConductorProtocol.AddressRequest result = new akka.remote.testconductor.TestConductorProtocol.AddressRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.node_ = node_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (addrBuilder_ == null) { + result.addr_ = addr_; + } else { + result.addr_ = addrBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.remote.testconductor.TestConductorProtocol.AddressRequest) { + return mergeFrom((akka.remote.testconductor.TestConductorProtocol.AddressRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.remote.testconductor.TestConductorProtocol.AddressRequest other) { + if (other == akka.remote.testconductor.TestConductorProtocol.AddressRequest.getDefaultInstance()) return this; + if (other.hasNode()) { + setNode(other.getNode()); + } + if (other.hasAddr()) { + mergeAddr(other.getAddr()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasNode()) { + + return false; + } + if (hasAddr()) { + if (!getAddr().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + node_ = input.readBytes(); + break; + } + case 18: { + akka.remote.testconductor.TestConductorProtocol.Address.Builder subBuilder = akka.remote.testconductor.TestConductorProtocol.Address.newBuilder(); + if (hasAddr()) { + subBuilder.mergeFrom(getAddr()); + } + input.readMessage(subBuilder, extensionRegistry); + setAddr(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required string node = 1; + private java.lang.Object node_ = ""; + public boolean hasNode() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getNode() { + java.lang.Object ref = node_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + node_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setNode(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + node_ = value; + onChanged(); + return this; + } + public Builder clearNode() { + bitField0_ = (bitField0_ & ~0x00000001); + node_ = getDefaultInstance().getNode(); + onChanged(); + return this; + } + void setNode(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + node_ = value; + onChanged(); + } + + // optional .Address addr = 2; + private akka.remote.testconductor.TestConductorProtocol.Address addr_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder> addrBuilder_; + public boolean hasAddr() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public akka.remote.testconductor.TestConductorProtocol.Address getAddr() { + if (addrBuilder_ == null) { + return addr_; + } else { + return addrBuilder_.getMessage(); + } + } + public Builder setAddr(akka.remote.testconductor.TestConductorProtocol.Address value) { + if (addrBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + addr_ = value; + onChanged(); + } else { + addrBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder setAddr( + akka.remote.testconductor.TestConductorProtocol.Address.Builder builderForValue) { + if (addrBuilder_ == null) { + addr_ = builderForValue.build(); + onChanged(); + } else { + addrBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder mergeAddr(akka.remote.testconductor.TestConductorProtocol.Address value) { + if (addrBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + addr_ != akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance()) { + addr_ = + akka.remote.testconductor.TestConductorProtocol.Address.newBuilder(addr_).mergeFrom(value).buildPartial(); + } else { + addr_ = value; + } + onChanged(); + } else { + addrBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder clearAddr() { + if (addrBuilder_ == null) { + addr_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + onChanged(); + } else { + addrBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + public akka.remote.testconductor.TestConductorProtocol.Address.Builder getAddrBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getAddrFieldBuilder().getBuilder(); + } + public akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddrOrBuilder() { + if (addrBuilder_ != null) { + return addrBuilder_.getMessageOrBuilder(); + } else { + return addr_; + } + } + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder> + getAddrFieldBuilder() { + if (addrBuilder_ == null) { + addrBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder>( + addr_, + getParentForChildren(), + isClean()); + addr_ = null; + } + return addrBuilder_; + } + + // @@protoc_insertion_point(builder_scope:AddressRequest) + } + + static { + defaultInstance = new AddressRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:AddressRequest) + } + public interface AddressOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -2312,7 +3005,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -2918,7 +3611,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3334,6 +4027,11 @@ public final class TestConductorProtocol { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_EnterBarrier_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_AddressRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_AddressRequest_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_Address_descriptor; private static @@ -3353,21 +4051,24 @@ public final class TestConductorProtocol { descriptor; static { java.lang.String[] descriptorData = { - "\n\033TestConductorProtocol.proto\"o\n\007Wrapper" + - "\022\025\n\005hello\030\001 \001(\0132\006.Hello\022\036\n\007barrier\030\002 \001(\013" + - "2\r.EnterBarrier\022\037\n\007failure\030\003 \001(\0132\016.Injec" + - "tFailure\022\014\n\004done\030\004 \001(\t\"0\n\005Hello\022\014\n\004name\030" + - "\001 \002(\t\022\031\n\007address\030\002 \002(\0132\010.Address\",\n\014Ente" + - "rBarrier\022\014\n\004name\030\001 \002(\t\022\016\n\006failed\030\002 \001(\010\"G" + - "\n\007Address\022\020\n\010protocol\030\001 \002(\t\022\016\n\006system\030\002 " + - "\002(\t\022\014\n\004host\030\003 \002(\t\022\014\n\004port\030\004 \002(\005\"\212\001\n\rInje" + - "ctFailure\022\032\n\007failure\030\001 \002(\0162\t.FailType\022\035\n" + - "\tdirection\030\002 \001(\0162\n.Direction\022\031\n\007address\030", - "\003 \001(\0132\010.Address\022\020\n\010rateMBit\030\006 \001(\002\022\021\n\texi" + - "tValue\030\007 \001(\005*A\n\010FailType\022\014\n\010Throttle\020\001\022\016" + - "\n\nDisconnect\020\002\022\t\n\005Abort\020\003\022\014\n\010Shutdown\020\004*" + - ",\n\tDirection\022\010\n\004Send\020\001\022\013\n\007Receive\020\002\022\010\n\004B" + - "oth\020\003B\035\n\031akka.remote.testconductorH\001" + "\n\033TestConductorProtocol.proto\"\216\001\n\007Wrappe" + + "r\022\025\n\005hello\030\001 \001(\0132\006.Hello\022\036\n\007barrier\030\002 \001(" + + "\0132\r.EnterBarrier\022\037\n\007failure\030\003 \001(\0132\016.Inje" + + "ctFailure\022\014\n\004done\030\004 \001(\t\022\035\n\004addr\030\005 \001(\0132\017." + + "AddressRequest\"0\n\005Hello\022\014\n\004name\030\001 \002(\t\022\031\n" + + "\007address\030\002 \002(\0132\010.Address\",\n\014EnterBarrier" + + "\022\014\n\004name\030\001 \002(\t\022\016\n\006status\030\002 \001(\010\"6\n\016Addres" + + "sRequest\022\014\n\004node\030\001 \002(\t\022\026\n\004addr\030\002 \001(\0132\010.A" + + "ddress\"G\n\007Address\022\020\n\010protocol\030\001 \002(\t\022\016\n\006s" + + "ystem\030\002 \002(\t\022\014\n\004host\030\003 \002(\t\022\014\n\004port\030\004 \002(\005\"", + "\212\001\n\rInjectFailure\022\032\n\007failure\030\001 \002(\0162\t.Fai" + + "lType\022\035\n\tdirection\030\002 \001(\0162\n.Direction\022\031\n\007" + + "address\030\003 \001(\0132\010.Address\022\020\n\010rateMBit\030\006 \001(" + + "\002\022\021\n\texitValue\030\007 \001(\005*A\n\010FailType\022\014\n\010Thro" + + "ttle\020\001\022\016\n\nDisconnect\020\002\022\t\n\005Abort\020\003\022\014\n\010Shu" + + "tdown\020\004*,\n\tDirection\022\010\n\004Send\020\001\022\013\n\007Receiv" + + "e\020\002\022\010\n\004Both\020\003B\035\n\031akka.remote.testconduct" + + "orH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -3379,7 +4080,7 @@ public final class TestConductorProtocol { internal_static_Wrapper_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Wrapper_descriptor, - new java.lang.String[] { "Hello", "Barrier", "Failure", "Done", }, + new java.lang.String[] { "Hello", "Barrier", "Failure", "Done", "Addr", }, akka.remote.testconductor.TestConductorProtocol.Wrapper.class, akka.remote.testconductor.TestConductorProtocol.Wrapper.Builder.class); internal_static_Hello_descriptor = @@ -3395,11 +4096,19 @@ public final class TestConductorProtocol { internal_static_EnterBarrier_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EnterBarrier_descriptor, - new java.lang.String[] { "Name", "Failed", }, + new java.lang.String[] { "Name", "Status", }, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.class, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder.class); - internal_static_Address_descriptor = + internal_static_AddressRequest_descriptor = getDescriptor().getMessageTypes().get(3); + internal_static_AddressRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_AddressRequest_descriptor, + new java.lang.String[] { "Node", "Addr", }, + akka.remote.testconductor.TestConductorProtocol.AddressRequest.class, + akka.remote.testconductor.TestConductorProtocol.AddressRequest.Builder.class); + internal_static_Address_descriptor = + getDescriptor().getMessageTypes().get(4); internal_static_Address_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Address_descriptor, @@ -3407,7 +4116,7 @@ public final class TestConductorProtocol { akka.remote.testconductor.TestConductorProtocol.Address.class, akka.remote.testconductor.TestConductorProtocol.Address.Builder.class); internal_static_InjectFailure_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(5); internal_static_InjectFailure_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_InjectFailure_descriptor, diff --git a/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto b/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto index 007965b2e8..648234614e 100644 --- a/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto +++ b/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto @@ -16,6 +16,7 @@ message Wrapper { optional EnterBarrier barrier = 2; optional InjectFailure failure = 3; optional string done = 4; + optional AddressRequest addr = 5; } message Hello { @@ -25,7 +26,12 @@ message Hello { message EnterBarrier { required string name = 1; - optional bool failed = 2; + optional bool status = 2; +} + +message AddressRequest { + required string node = 1; + optional Address addr = 2; } message Address { diff --git a/akka-remote-tests/src/main/resources/reference.conf b/akka-remote-tests/src/main/resources/reference.conf index f0d8a9d6ae..40c16c4ccd 100644 --- a/akka-remote-tests/src/main/resources/reference.conf +++ b/akka-remote-tests/src/main/resources/reference.conf @@ -20,15 +20,14 @@ akka { # than HashedWheelTimer resolution (would not make sense) packet-split-threshold = 100ms - # Default port to start the conductor on; 0 means - port = 0 + # amount of time for the ClientFSM to wait for the connection to the conductor + # to be successful + connect-timeout = 20s - # Hostname of the TestConductor server, used by the server to bind to the IP - # and by the client to connect to it. - host = localhost + # Number of connect attempts to be made to the conductor controller + client-reconnects = 10 - # Name of the TestConductor client (for identification on the server e.g. for - # failure injection) - name = "noname" + # minimum time interval which is to be inserted between reconnect attempts + reconnect-backoff = 1s } } \ No newline at end of file diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index 09a6faeeb0..d4fa3152e6 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -71,11 +71,11 @@ trait Conductor { this: TestConductorExt ⇒ * @param participants gives the number of participants which shall connect * before any of their startClient() operations complete. */ - def startController(participants: Int): Future[Int] = { + def startController(participants: Int, name: String, controllerPort: InetSocketAddress): Future[InetSocketAddress] = { if (_controller ne null) throw new RuntimeException("TestConductorServer was already started") - _controller = system.actorOf(Props(new Controller(participants)), "controller") + _controller = system.actorOf(Props(new Controller(participants, controllerPort)), "controller") import Settings.BarrierTimeout - controller ? GetPort flatMap { case port: Int ⇒ startClient(port) map (_ ⇒ port) } + controller ? GetSockAddr flatMap { case sockAddr: InetSocketAddress ⇒ startClient(name, sockAddr) map (_ ⇒ sockAddr) } } /** @@ -83,9 +83,9 @@ trait Conductor { this: TestConductorExt ⇒ * will deviate from the configuration in `akka.testconductor.port` in case * that was given as zero. */ - def port: Future[Int] = { + def sockAddr: Future[InetSocketAddress] = { import Settings.QueryTimeout - controller ? GetPort mapTo + controller ? GetSockAddr mapTo } /** @@ -280,7 +280,7 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi log.warning("client {} sent no Hello in first message (instead {}), disconnecting", getAddrString(channel), x) channel.close() stop() - case Event(Send(msg), _) ⇒ + case Event(ToClient(msg), _) ⇒ log.warning("cannot send {} in state Initial", msg) stay case Event(StateTimeout, _) ⇒ @@ -290,22 +290,22 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi } when(Ready) { - case Event(msg: EnterBarrier, _) ⇒ - controller ! msg - stay case Event(d: Done, Some(s)) ⇒ s ! d stay using None + case Event(op: ServerOp, _) ⇒ + controller ! op + stay case Event(msg: NetworkOp, _) ⇒ log.warning("client {} sent unsupported message {}", getAddrString(channel), msg) stop() - case Event(Send(msg @ (_: EnterBarrier | _: Done)), _) ⇒ + case Event(ToClient(msg: UnconfirmedClientOp), _) ⇒ channel.write(msg) stay - case Event(Send(msg), None) ⇒ + case Event(ToClient(msg), None) ⇒ channel.write(msg) stay using Some(sender) - case Event(Send(msg), _) ⇒ + case Event(ToClient(msg), _) ⇒ log.warning("cannot send {} while waiting for previous ACK", msg) stay } @@ -320,7 +320,7 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi object Controller { case class ClientDisconnected(name: String) case object GetNodes - case object GetPort + case object GetSockAddr case class NodeInfo(name: String, addr: Address, fsm: ActorRef) } @@ -330,12 +330,12 @@ object Controller { * [[akka.remote.testconductor.BarrierCoordinator]], its child) and allowing * network and other failures to be injected at the test nodes. */ -class Controller(private var initialParticipants: Int) extends Actor { +class Controller(private var initialParticipants: Int, controllerPort: InetSocketAddress) extends Actor { import Controller._ import BarrierCoordinator._ val settings = TestConductor().Settings - val connection = RemoteConnection(Server, settings.host, settings.port, + val connection = RemoteConnection(Server, controllerPort, new ConductorHandler(context.system, self, Logging(context.system, "ConductorHandler"))) /* @@ -348,61 +348,73 @@ class Controller(private var initialParticipants: Int) extends Actor { override def supervisorStrategy = OneForOneStrategy() { case BarrierTimeout(data) ⇒ SupervisorStrategy.Resume case BarrierEmpty(data, msg) ⇒ SupervisorStrategy.Resume - case WrongBarrier(name, client, data) ⇒ client ! Send(BarrierFailed(name)); failBarrier(data) + case WrongBarrier(name, client, data) ⇒ client ! ToClient(BarrierResult(name, false)); failBarrier(data) case ClientLost(data, node) ⇒ failBarrier(data) case DuplicateNode(data, node) ⇒ failBarrier(data) } def failBarrier(data: Data): SupervisorStrategy.Directive = { - for (c ← data.arrived) c ! Send(BarrierFailed(data.barrier)) + for (c ← data.arrived) c ! ToClient(BarrierResult(data.barrier, false)) SupervisorStrategy.Restart } val barrier = context.actorOf(Props[BarrierCoordinator], "barriers") var nodes = Map[String, NodeInfo]() + // map keeping unanswered queries for node addresses (enqueued upon GetAddress, serviced upon NodeInfo) + var addrInterest = Map[String, Set[ActorRef]]() + override def receive = LoggingReceive { case c @ NodeInfo(name, addr, fsm) ⇒ barrier forward c if (nodes contains name) { if (initialParticipants > 0) { - for (NodeInfo(_, _, client) ← nodes.values) client ! Send(BarrierFailed("initial startup")) + for (NodeInfo(_, _, client) ← nodes.values) client ! ToClient(BarrierResult("initial startup", false)) initialParticipants = 0 } - fsm ! Send(BarrierFailed("initial startup")) + fsm ! ToClient(BarrierResult("initial startup", false)) } else { nodes += name -> c - if (initialParticipants <= 0) fsm ! Send(Done) + if (initialParticipants <= 0) fsm ! ToClient(Done) else if (nodes.size == initialParticipants) { - for (NodeInfo(_, _, client) ← nodes.values) client ! Send(Done) + for (NodeInfo(_, _, client) ← nodes.values) client ! ToClient(Done) initialParticipants = 0 } + if (addrInterest contains name) { + addrInterest(name) foreach (_ ! ToClient(AddressReply(name, addr))) + addrInterest -= name + } } case c @ ClientDisconnected(name) ⇒ nodes -= name barrier forward c - case e @ EnterBarrier(name) ⇒ - barrier forward e - case Throttle(node, target, direction, rateMBit) ⇒ - val t = nodes(target) - nodes(node).fsm forward Send(ThrottleMsg(t.addr, direction, rateMBit)) - case Disconnect(node, target, abort) ⇒ - val t = nodes(target) - nodes(node).fsm forward Send(DisconnectMsg(t.addr, abort)) - case Terminate(node, exitValueOrKill) ⇒ - if (exitValueOrKill < 0) { - // TODO: kill via SBT - } else { - nodes(node).fsm forward Send(TerminateMsg(exitValueOrKill)) + case op: ServerOp ⇒ + op match { + case _: EnterBarrier ⇒ barrier forward op + case GetAddress(node) ⇒ + if (nodes contains node) sender ! ToClient(AddressReply(node, nodes(node).addr)) + else addrInterest += node -> ((addrInterest get node getOrElse Set()) + sender) } - case Remove(node) ⇒ - nodes -= node - barrier ! BarrierCoordinator.RemoveClient(node) - case GetNodes ⇒ sender ! nodes.keys - case GetPort ⇒ - sender ! (connection.getLocalAddress match { - case inet: InetSocketAddress ⇒ inet.getPort - }) + case op: CommandOp ⇒ + op match { + case Throttle(node, target, direction, rateMBit) ⇒ + val t = nodes(target) + nodes(node).fsm forward ToClient(ThrottleMsg(t.addr, direction, rateMBit)) + case Disconnect(node, target, abort) ⇒ + val t = nodes(target) + nodes(node).fsm forward ToClient(DisconnectMsg(t.addr, abort)) + case Terminate(node, exitValueOrKill) ⇒ + if (exitValueOrKill < 0) { + // TODO: kill via SBT + } else { + nodes(node).fsm forward ToClient(TerminateMsg(exitValueOrKill)) + } + case Remove(node) ⇒ + nodes -= node + barrier ! BarrierCoordinator.RemoveClient(node) + } + case GetNodes ⇒ sender ! nodes.keys + case GetSockAddr ⇒ sender ! connection.getLocalAddress } } @@ -463,13 +475,13 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, } when(Idle) { - case Event(e @ EnterBarrier(name), d @ Data(clients, _, _)) ⇒ + case Event(EnterBarrier(name), d @ Data(clients, _, _)) ⇒ if (failed) - stay replying Send(BarrierFailed(name)) + stay replying ToClient(BarrierResult(name, false)) else if (clients.map(_.fsm) == Set(sender)) - stay replying Send(e) + stay replying ToClient(BarrierResult(name, true)) else if (clients.find(_.fsm == sender).isEmpty) - stay replying Send(BarrierFailed(name)) + stay replying ToClient(BarrierResult(name, false)) else goto(Waiting) using d.copy(barrier = name, arrived = sender :: Nil) case Event(RemoveClient(name), d @ Data(clients, _, _)) ⇒ @@ -483,7 +495,7 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, } when(Waiting) { - case Event(e @ EnterBarrier(name), d @ Data(clients, barrier, arrived)) ⇒ + case Event(EnterBarrier(name), d @ Data(clients, barrier, arrived)) ⇒ if (name != barrier || clients.find(_.fsm == sender).isEmpty) throw WrongBarrier(name, sender, d) val together = sender :: arrived handleBarrier(d.copy(arrived = together)) @@ -504,8 +516,7 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, if (data.arrived.isEmpty) { goto(Idle) using data.copy(barrier = "") } else if ((data.clients.map(_.fsm) -- data.arrived).isEmpty) { - val e = EnterBarrier(data.barrier) - data.arrived foreach (_ ! Send(e)) + data.arrived foreach (_ ! ToClient(BarrierResult(data.barrier, true))) goto(Idle) using data.copy(barrier = "", arrived = Nil) } else { stay using data diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala index cadd69f786..0273055469 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -11,27 +11,42 @@ import com.google.protobuf.Message import akka.actor.Address import org.jboss.netty.handler.codec.oneone.OneToOneDecoder -case class Send(msg: NetworkOp) +case class ToClient(msg: ClientOp with NetworkOp) +case class ToServer(msg: ServerOp with NetworkOp) -sealed trait ClientOp // messages sent to Player FSM -sealed trait ServerOp // messages sent to Conductor FSM +sealed trait ClientOp // messages sent to from Conductor to Player +sealed trait ServerOp // messages sent to from Player to Conductor +sealed trait CommandOp // messages sent from TestConductorExt to Conductor sealed trait NetworkOp // messages sent over the wire +sealed trait UnconfirmedClientOp extends ClientOp // unconfirmed messages going to the Player +sealed trait ConfirmedClientOp extends ClientOp +/** + * First message of connection sets names straight. + */ case class Hello(name: String, addr: Address) extends NetworkOp -case class EnterBarrier(name: String) extends ClientOp with ServerOp with NetworkOp -case class BarrierFailed(name: String) extends NetworkOp -case class Throttle(node: String, target: String, direction: Direction, rateMBit: Float) extends ServerOp -case class ThrottleMsg(target: Address, direction: Direction, rateMBit: Float) extends NetworkOp -case class Disconnect(node: String, target: String, abort: Boolean) extends ServerOp -case class DisconnectMsg(target: Address, abort: Boolean) extends NetworkOp -case class Terminate(node: String, exitValueOrKill: Int) extends ServerOp -case class TerminateMsg(exitValue: Int) extends NetworkOp -abstract class Done extends NetworkOp + +case class EnterBarrier(name: String) extends ServerOp with NetworkOp +case class BarrierResult(name: String, success: Boolean) extends UnconfirmedClientOp with NetworkOp + +case class Throttle(node: String, target: String, direction: Direction, rateMBit: Float) extends CommandOp +case class ThrottleMsg(target: Address, direction: Direction, rateMBit: Float) extends ConfirmedClientOp with NetworkOp + +case class Disconnect(node: String, target: String, abort: Boolean) extends CommandOp +case class DisconnectMsg(target: Address, abort: Boolean) extends ConfirmedClientOp with NetworkOp + +case class Terminate(node: String, exitValueOrKill: Int) extends CommandOp +case class TerminateMsg(exitValue: Int) extends ConfirmedClientOp with NetworkOp + +case class GetAddress(node: String) extends ServerOp with NetworkOp +case class AddressReply(node: String, addr: Address) extends UnconfirmedClientOp with NetworkOp + +abstract class Done extends ServerOp with UnconfirmedClientOp with NetworkOp case object Done extends Done { def getInstance: Done = this } -case class Remove(node: String) extends ServerOp +case class Remove(node: String) extends CommandOp class MsgEncoder extends OneToOneEncoder { def encode(ctx: ChannelHandlerContext, ch: Channel, msg: AnyRef): AnyRef = msg match { @@ -42,8 +57,8 @@ class MsgEncoder extends OneToOneEncoder { w.setHello(TCP.Hello.newBuilder.setName(name).setAddress(addr)) case EnterBarrier(name) ⇒ w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name)) - case BarrierFailed(name) ⇒ - w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name).setFailed(true)) + case BarrierResult(name, success) ⇒ + w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name).setStatus(success)) case ThrottleMsg(target, dir, rate) ⇒ w.setFailure(TCP.InjectFailure.newBuilder.setAddress(target) .setFailure(TCP.FailType.Throttle).setDirection(dir).setRateMBit(rate)) @@ -52,6 +67,10 @@ class MsgEncoder extends OneToOneEncoder { .setFailure(if (abort) TCP.FailType.Abort else TCP.FailType.Disconnect)) case TerminateMsg(exitValue) ⇒ w.setFailure(TCP.InjectFailure.newBuilder.setFailure(TCP.FailType.Shutdown).setExitValue(exitValue)) + case GetAddress(node) ⇒ + w.setAddr(TCP.AddressRequest.newBuilder.setNode(node)) + case AddressReply(node, addr) ⇒ + w.setAddr(TCP.AddressRequest.newBuilder.setNode(node).setAddr(addr)) case _: Done ⇒ w.setDone("") } @@ -68,7 +87,7 @@ class MsgDecoder extends OneToOneDecoder { Hello(h.getName, h.getAddress) } else if (w.hasBarrier) { val barrier = w.getBarrier - if (barrier.hasFailed && barrier.getFailed) BarrierFailed(barrier.getName) + if (barrier.hasStatus) BarrierResult(barrier.getName, barrier.getStatus) else EnterBarrier(w.getBarrier.getName) } else if (w.hasFailure) { val f = w.getFailure @@ -79,6 +98,10 @@ class MsgDecoder extends OneToOneDecoder { case FT.Disconnect ⇒ DisconnectMsg(f.getAddress, false) case FT.Shutdown ⇒ TerminateMsg(f.getExitValue) } + } else if (w.hasAddr) { + val a = w.getAddr + if (a.hasAddr) AddressReply(a.getNode, a.getAddr) + else GetAddress(a.getNode) } else if (w.hasDone) { Done } else { diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala index 5d7826c60c..7f6b576128 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala @@ -38,13 +38,13 @@ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with C object Settings { val config = system.settings.config + val ConnectTimeout = Duration(config.getMilliseconds("akka.testconductor.connect-timeout"), MILLISECONDS) + val ClientReconnects = config.getInt("akka.testconductor.client-reconnects") + val ReconnectBackoff = Duration(config.getMilliseconds("akka.testconductor.reconnect-backoff"), MILLISECONDS) + implicit val BarrierTimeout = Timeout(Duration(config.getMilliseconds("akka.testconductor.barrier-timeout"), MILLISECONDS)) implicit val QueryTimeout = Timeout(Duration(config.getMilliseconds("akka.testconductor.query-timeout"), MILLISECONDS)) val PacketSplitThreshold = Duration(config.getMilliseconds("akka.testconductor.packet-split-threshold"), MILLISECONDS) - - val name = config.getString("akka.testconductor.name") - val host = config.getString("akka.testconductor.host") - val port = config.getInt("akka.testconductor.port") } val transport = system.provider.asInstanceOf[RemoteActorRefProvider].transport diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala index a82a090b23..27a2487364 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala @@ -20,6 +20,13 @@ import akka.event.LoggingAdapter import akka.actor.PoisonPill import akka.event.Logging import akka.dispatch.Future +import java.net.InetSocketAddress +import akka.actor.Address +import org.jboss.netty.channel.ExceptionEvent +import org.jboss.netty.channel.WriteCompletionEvent +import java.net.ConnectException +import akka.util.Deadline +import akka.actor.Scheduler /** * The Player is the client component of the @@ -43,13 +50,13 @@ trait Player { this: TestConductorExt ⇒ * this is a first barrier in itself). The number of expected participants is * set in [[akka.remote.testconductor.Conductor]]`.startController()`. */ - def startClient(port: Int): Future[Done] = { + def startClient(name: String, controllerAddr: InetSocketAddress): Future[Done] = { import ClientFSM._ import akka.actor.FSM._ import Settings.BarrierTimeout if (_client ne null) throw new IllegalStateException("TestConductorClient already started") - _client = system.actorOf(Props(new ClientFSM(port)), "TestConductorClient") + _client = system.actorOf(Props(new ClientFSM(name, controllerAddr)), "TestConductorClient") val a = system.actorOf(Props(new Actor { var waiting: ActorRef = _ def receive = { @@ -73,10 +80,18 @@ trait Player { this: TestConductorExt ⇒ system.log.debug("entering barriers " + name.mkString("(", ", ", ")")) name foreach { b ⇒ import Settings.BarrierTimeout - Await.result(client ? Send(EnterBarrier(b)), Duration.Inf) + Await.result(client ? ToServer(EnterBarrier(b)), Duration.Inf) system.log.debug("passed barrier {}", b) } } + + /** + * Query remote transport address of named node. + */ + def getAddressFor(name: String): Future[Address] = { + import Settings.BarrierTimeout + client ? ToServer(GetAddress(name)) mapTo + } } object ClientFSM { @@ -86,9 +101,10 @@ object ClientFSM { case object Connected extends State case object Failed extends State - case class Data(channel: Channel, barrier: Option[(String, ActorRef)]) + case class Data(channel: Option[Channel], runningOp: Option[(String, ActorRef)]) - class ConnectionFailure(msg: String) extends RuntimeException(msg) with NoStackTrace + case class Connected(channel: Channel) + case class ConnectionFailure(msg: String) extends RuntimeException(msg) with NoStackTrace case object Disconnected } @@ -101,21 +117,22 @@ object ClientFSM { * coordinator and react to the [[akka.remote.testconductor.Conductor]]’s * requests for failure injection. */ -class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { +class ClientFSM(name: String, controllerAddr: InetSocketAddress) extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { import ClientFSM._ val settings = TestConductor().Settings - val handler = new PlayerHandler(self, Logging(context.system, "PlayerHandler")) + val handler = new PlayerHandler(controllerAddr, settings.ClientReconnects, settings.ReconnectBackoff, + self, Logging(context.system, "PlayerHandler"), context.system.scheduler) - startWith(Connecting, Data(RemoteConnection(Client, settings.host, port, handler), None)) + startWith(Connecting, Data(None, None)) - when(Connecting, stateTimeout = 10 seconds) { + when(Connecting, stateTimeout = settings.ConnectTimeout) { case Event(msg: ClientOp, _) ⇒ stay replying Status.Failure(new IllegalStateException("not connected yet")) - case Event(Connected, d @ Data(channel, _)) ⇒ - channel.write(Hello(settings.name, TestConductor().address)) - goto(AwaitDone) + case Event(Connected(channel), _) ⇒ + channel.write(Hello(name, TestConductor().address)) + goto(AwaitDone) using Data(Some(channel), None) case Event(_: ConnectionFailure, _) ⇒ goto(Failed) case Event(StateTimeout, _) ⇒ @@ -130,7 +147,7 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client case Event(msg: NetworkOp, _) ⇒ log.error("received {} instead of Done", msg) goto(Failed) - case Event(msg: ClientOp, _) ⇒ + case Event(msg: ServerOp, _) ⇒ stay replying Status.Failure(new IllegalStateException("not connected yet")) case Event(StateTimeout, _) ⇒ log.error("connect timeout to TestConductor") @@ -141,44 +158,63 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client case Event(Disconnected, _) ⇒ log.info("disconnected from TestConductor") throw new ConnectionFailure("disconnect") - case Event(Send(msg: EnterBarrier), Data(channel, None)) ⇒ + case Event(ToServer(Done), Data(Some(channel), _)) ⇒ + channel.write(Done) + stay + case Event(ToServer(msg), d @ Data(Some(channel), None)) ⇒ channel.write(msg) - stay using Data(channel, Some(msg.name, sender)) - case Event(Send(d: Done), Data(channel, _)) ⇒ - channel.write(d) - stay - case Event(Send(x), _) ⇒ - log.warning("cannot send message {}", x) - stay - case Event(EnterBarrier(b), Data(channel, Some((barrier, sender)))) ⇒ - if (b != barrier) { - sender ! Status.Failure(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) - } else { - sender ! b - } - stay using Data(channel, None) - case Event(BarrierFailed(b), Data(channel, Some((_, sender)))) ⇒ - sender ! Status.Failure(new RuntimeException("barrier failed: " + b)) - stay using Data(channel, None) - case Event(ThrottleMsg(target, dir, rate), _) ⇒ - import settings.QueryTimeout - import context.dispatcher - TestConductor().failureInjectors.get(target.copy(system = "")) match { - case null ⇒ log.warning("cannot throttle unknown address {}", target) - case inj ⇒ - Future.sequence(inj.refs(dir) map (_ ? NetworkFailureInjector.SetRate(rate))) map (_ ⇒ Send(Done)) pipeTo self + val token = msg match { + case EnterBarrier(barrier) ⇒ barrier + case GetAddress(node) ⇒ node } + stay using d.copy(runningOp = Some(token, sender)) + case Event(ToServer(op), Data(channel, Some((token, _)))) ⇒ + log.error("cannot write {} while waiting for {}", op, token) stay - case Event(DisconnectMsg(target, abort), _) ⇒ - import settings.QueryTimeout - TestConductor().failureInjectors.get(target.copy(system = "")) match { - case null ⇒ log.warning("cannot disconnect unknown address {}", target) - case inj ⇒ inj.sender ? NetworkFailureInjector.Disconnect(abort) map (_ ⇒ Send(Done)) pipeTo self + case Event(op: ClientOp, d @ Data(Some(channel), runningOp)) ⇒ + op match { + case BarrierResult(b, success) ⇒ + runningOp match { + case Some((barrier, requester)) ⇒ + if (b != barrier) { + requester ! Status.Failure(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) + } else if (!success) { + requester ! Status.Failure(new RuntimeException("barrier failed: " + b)) + } else { + requester ! b + } + case None ⇒ + log.warning("did not expect {}", op) + } + stay using d.copy(runningOp = None) + case AddressReply(node, addr) ⇒ + runningOp match { + case Some((_, requester)) ⇒ + requester ! addr + case None ⇒ + log.warning("did not expect {}", op) + } + stay using d.copy(runningOp = None) + case ThrottleMsg(target, dir, rate) ⇒ + import settings.QueryTimeout + import context.dispatcher + TestConductor().failureInjectors.get(target.copy(system = "")) match { + case null ⇒ log.warning("cannot throttle unknown address {}", target) + case inj ⇒ + Future.sequence(inj.refs(dir) map (_ ? NetworkFailureInjector.SetRate(rate))) map (_ ⇒ ToServer(Done)) pipeTo self + } + stay + case DisconnectMsg(target, abort) ⇒ + import settings.QueryTimeout + TestConductor().failureInjectors.get(target.copy(system = "")) match { + case null ⇒ log.warning("cannot disconnect unknown address {}", target) + case inj ⇒ inj.sender ? NetworkFailureInjector.Disconnect(abort) map (_ ⇒ ToServer(Done)) pipeTo self + } + stay + case TerminateMsg(exit) ⇒ + System.exit(exit) + stay // needed because Java doesn’t have Nothing } - stay - case Event(TerminateMsg(exit), _) ⇒ - System.exit(exit) - stay // needed because Java doesn’t have Nothing } when(Failed) { @@ -190,7 +226,7 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client } onTermination { - case StopEvent(_, _, Data(channel, _)) ⇒ + case StopEvent(_, _, Data(Some(channel), _)) ⇒ channel.close() } @@ -201,14 +237,46 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client /** * This handler only forwards messages received from the conductor to the [[akka.remote.testconductor.ClientFSM]]. */ -class PlayerHandler(fsm: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { +class PlayerHandler( + server: InetSocketAddress, + private var reconnects: Int, + backoff: Duration, + fsm: ActorRef, + log: LoggingAdapter, + scheduler: Scheduler) + extends SimpleChannelUpstreamHandler { import ClientFSM._ + reconnect() + + var nextAttempt: Deadline = _ + + override def channelOpen(ctx: ChannelHandlerContext, event: ChannelStateEvent) = log.debug("channel {} open", event.getChannel) + override def channelClosed(ctx: ChannelHandlerContext, event: ChannelStateEvent) = log.debug("channel {} closed", event.getChannel) + override def channelBound(ctx: ChannelHandlerContext, event: ChannelStateEvent) = log.debug("channel {} bound", event.getChannel) + override def channelUnbound(ctx: ChannelHandlerContext, event: ChannelStateEvent) = log.debug("channel {} unbound", event.getChannel) + override def writeComplete(ctx: ChannelHandlerContext, event: WriteCompletionEvent) = log.debug("channel {} written {}", event.getChannel, event.getWrittenAmount) + + override def exceptionCaught(ctx: ChannelHandlerContext, event: ExceptionEvent) = { + log.debug("channel {} exception {}", event.getChannel, event.getCause) + event.getCause match { + case c: ConnectException if reconnects > 0 ⇒ + reconnects -= 1 + scheduler.scheduleOnce(nextAttempt.timeLeft)(reconnect()) + case e ⇒ fsm ! ConnectionFailure(e.getMessage) + } + } + + private def reconnect(): Unit = { + nextAttempt = Deadline.now + backoff + RemoteConnection(Client, server, this) + } + override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { - val channel = event.getChannel - log.debug("connected to {}", getAddrString(channel)) - fsm ! Connected + val ch = event.getChannel + log.debug("connected to {}", getAddrString(ch)) + fsm ! Connected(ch) } override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala index b2f4baebbb..5b1c454b0c 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala @@ -27,8 +27,7 @@ case object Client extends Role case object Server extends Role object RemoteConnection { - def apply(role: Role, host: String, port: Int, handler: ChannelUpstreamHandler): Channel = { - val sockaddr = new InetSocketAddress(host, port) + def apply(role: Role, sockaddr: InetSocketAddress, handler: ChannelUpstreamHandler): Channel = { role match { case Client ⇒ val socketfactory = new NioClientSocketChannelFactory(Executors.newCachedThreadPool, Executors.newCachedThreadPool) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 512757c130..39d25981aa 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -10,14 +10,15 @@ import akka.dispatch.Await.Awaitable import akka.util.Duration import akka.util.duration._ import akka.testkit.ImplicitSender +import java.net.InetSocketAddress +import java.net.InetAddress +import akka.remote.testkit.MultiNodeSpec object TestConductorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { override def NrOfNodes = 2 override def commonConfig = ConfigFactory.parseString(""" akka.loglevel = DEBUG - akka.actor.provider = akka.remote.RemoteActorRefProvider akka.remote { - transport = akka.remote.testconductor.TestConductorTransport log-received-messages = on log-sent-messages = on } @@ -25,87 +26,96 @@ object TestConductorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { receive = on fsm = on } - akka.testconductor { - host = localhost - port = 4712 - } """) - def nameConfig(n: Int) = ConfigFactory.parseString("akka.testconductor.name = node" + n).withFallback(nodeConfigs(n)) +} - implicit def awaitHelper[T](w: Awaitable[T]) = new AwaitHelper(w) - class AwaitHelper[T](w: Awaitable[T]) { - def await: T = Await.result(w, Duration.Inf) +object H { + def apply(x: Int) = { + System.setProperty("multinode.hosts", "localhost,localhost") + System.setProperty("multinode.index", x.toString) } } -class TestConductorMultiJvmNode1 extends AkkaRemoteSpec(TestConductorMultiJvmSpec.nameConfig(0)) { +class TestConductorMultiJvmNode1 extends { val dummy = H(0) } with TestConductorSpec +class TestConductorMultiJvmNode2 extends { val dummy = H(1) } with TestConductorSpec - import TestConductorMultiJvmSpec._ +class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec.commonConfig) with ImplicitSender { - val nodes = NrOfNodes + def initialParticipants = 2 + lazy val roles = Seq("master", "slave") - val tc = TestConductor(system) - - val echo = system.actorOf(Props(new Actor { - def receive = { - case x ⇒ testActor ! x; sender ! x - } - }), "echo") - - "running a test with barrier" in { - tc.startController(2).await - tc.enter("begin") + runOn("master") { + system.actorOf(Props(new Actor { + def receive = { + case x ⇒ testActor ! x; sender ! x + } + }), "echo") } - "throttling" in { - expectMsg("start") - tc.throttle("node1", "node0", Direction.Send, 0.01).await - tc.enter("throttled_send") - within(0.6 seconds, 2 seconds) { - receiveN(10) must be(0 to 9) + val echo = system.actorFor(node("master") / "user" / "echo") + + "A TestConductor" must { + + "enter a barrier" in { + testConductor.enter("name") } - tc.enter("throttled_send2") - tc.throttle("node1", "node0", Direction.Send, -1).await - - tc.throttle("node1", "node0", Direction.Receive, 0.01).await - tc.enter("throttled_recv") - receiveN(10, 500 millis) must be(10 to 19) - tc.enter("throttled_recv2") - tc.throttle("node1", "node0", Direction.Receive, -1).await - } -} -class TestConductorMultiJvmNode2 extends AkkaRemoteSpec(TestConductorMultiJvmSpec.nameConfig(1)) with ImplicitSender { + "support throttling of network connections" in { - import TestConductorMultiJvmSpec._ + runOn("slave") { + // start remote network connection so that it can be throttled + echo ! "start" + } - val nodes = NrOfNodes + expectMsg("start") - val tc = TestConductor(system) - - val echo = system.actorFor("akka://" + akkaSpec(0) + "/user/echo") + runOn("master") { + testConductor.throttle("slave", "master", Direction.Send, rateMBit = 0.01).await + } - "running a test with barrier" in { - tc.startClient(4712).await - tc.enter("begin") - } + testConductor.enter("throttled_send") - "throttling" in { - echo ! "start" - expectMsg("start") - tc.enter("throttled_send") - for (i <- 0 to 9) echo ! i - expectMsg(500 millis, 0) - within(0.6 seconds, 2 seconds) { - receiveN(9) must be(1 to 9) + runOn("slave") { + for (i ← 0 to 9) echo ! i + } + + within(0.6 seconds, 2 seconds) { + expectMsg(500 millis, 0) + receiveN(9) must be(1 to 9) + } + + testConductor.enter("throttled_send2") + + runOn("master") { + testConductor.throttle("slave", "master", Direction.Send, -1).await + testConductor.throttle("slave", "master", Direction.Receive, rateMBit = 0.01).await + } + + testConductor.enter("throttled_recv") + + runOn("slave") { + for (i ← 10 to 19) echo ! i + } + + val (min, max) = + ifNode("master") { + (0 seconds, 500 millis) + } { + (0.6 seconds, 2 seconds) + } + + within(min, max) { + expectMsg(500 millis, 10) + receiveN(9) must be(11 to 19) + } + + testConductor.enter("throttled_recv2") + + runOn("master") { + testConductor.throttle("slave", "master", Direction.Receive, -1).await + } } - tc.enter("throttled_send2", "throttled_recv") - for (i <- 10 to 19) echo ! i - expectMsg(500 millis, 10) - within(0.6 seconds, 2 seconds) { - receiveN(9) must be(11 to 19) - } - tc.enter("throttled_recv2") + } } diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala index f0b668d1ed..aa14b93f9d 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala @@ -16,6 +16,8 @@ import akka.testkit.TestProbe import akka.util.duration._ import akka.event.Logging import org.scalatest.BeforeAndAfterEach +import java.net.InetSocketAddress +import java.net.InetAddress object BarrierSpec { case class Failed(ref: ActorRef, thr: Throwable) @@ -68,7 +70,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail entering barrier when nobody registered" in { val b = getBarrier() b ! EnterBarrier("b") - expectMsg(Send(BarrierFailed("b"))) + expectMsg(ToClient(BarrierResult("b", false))) } "enter barrier" in { @@ -80,8 +82,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with noMsg(a, b) within(1 second) { b.send(barrier, EnterBarrier("bar")) - a.expectMsg(Send(EnterBarrier("bar"))) - b.expectMsg(Send(EnterBarrier("bar"))) + a.expectMsg(ToClient(BarrierResult("bar", true))) + b.expectMsg(ToClient(BarrierResult("bar", true))) } } @@ -96,9 +98,9 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with noMsg(a, b, c) within(1 second) { c.send(barrier, EnterBarrier("bar")) - a.expectMsg(Send(EnterBarrier("bar"))) - b.expectMsg(Send(EnterBarrier("bar"))) - c.expectMsg(Send(EnterBarrier("bar"))) + a.expectMsg(ToClient(BarrierResult("bar", true))) + b.expectMsg(ToClient(BarrierResult("bar", true))) + c.expectMsg(ToClient(BarrierResult("bar", true))) } } @@ -115,7 +117,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with noMsg(a, b, c) b.within(1 second) { barrier ! RemoveClient("c") - b.expectMsg(Send(EnterBarrier("bar"))) + b.expectMsg(ToClient(BarrierResult("bar", true))) } barrier ! ClientDisconnected("c") expectNoMsg(1 second) @@ -129,7 +131,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with a.send(barrier, EnterBarrier("bar")) barrier ! RemoveClient("a") b.send(barrier, EnterBarrier("foo")) - b.expectMsg(Send(EnterBarrier("foo"))) + b.expectMsg(ToClient(BarrierResult("foo", true))) } "fail barrier with disconnecing node" in { @@ -184,7 +186,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectMsg(Failed(barrier, BarrierEmpty(Data(Set(), "", Nil), "no client to remove"))) barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) a.send(barrier, EnterBarrier("right")) - a.expectMsg(Send(BarrierFailed("right"))) + a.expectMsg(ToClient(BarrierResult("right", false))) } "fail after barrier timeout" in { @@ -223,7 +225,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "register clients and remove them" in { val b = getController(1) b ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor) - expectMsg(Send(Done)) + expectMsg(ToClient(Done)) b ! Remove("b") b ! Remove("a") EventFilter[BarrierEmpty](occurrences = 1) intercept { @@ -234,7 +236,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "register clients and disconnect them" in { val b = getController(1) b ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor) - expectMsg(Send(Done)) + expectMsg(ToClient(Done)) b ! ClientDisconnected("b") EventFilter[ClientLost](occurrences = 1) intercept { b ! ClientDisconnected("a") @@ -247,7 +249,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail entering barrier when nobody registered" in { val b = getController(0) b ! EnterBarrier("b") - expectMsg(Send(BarrierFailed("b"))) + expectMsg(ToClient(BarrierResult("b", false))) } "enter barrier" in { @@ -255,14 +257,14 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val a, b = TestProbe() barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) - a.expectMsg(Send(Done)) - b.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) noMsg(a, b) within(1 second) { b.send(barrier, EnterBarrier("bar")) - a.expectMsg(Send(EnterBarrier("bar"))) - b.expectMsg(Send(EnterBarrier("bar"))) + a.expectMsg(ToClient(BarrierResult("bar", true))) + b.expectMsg(ToClient(BarrierResult("bar", true))) } } @@ -271,18 +273,18 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val a, b, c = TestProbe() barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) - a.expectMsg(Send(Done)) - b.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref) - c.expectMsg(Send(Done)) + c.expectMsg(ToClient(Done)) b.send(barrier, EnterBarrier("bar")) noMsg(a, b, c) within(1 second) { c.send(barrier, EnterBarrier("bar")) - a.expectMsg(Send(EnterBarrier("bar"))) - b.expectMsg(Send(EnterBarrier("bar"))) - c.expectMsg(Send(EnterBarrier("bar"))) + a.expectMsg(ToClient(BarrierResult("bar", true))) + b.expectMsg(ToClient(BarrierResult("bar", true))) + c.expectMsg(ToClient(BarrierResult("bar", true))) } } @@ -292,9 +294,9 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref) - a.expectMsg(Send(Done)) - b.expectMsg(Send(Done)) - c.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) + c.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) b.send(barrier, EnterBarrier("bar")) barrier ! Remove("a") @@ -302,7 +304,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with noMsg(a, b, c) b.within(1 second) { barrier ! Remove("c") - b.expectMsg(Send(EnterBarrier("bar"))) + b.expectMsg(ToClient(BarrierResult("bar", true))) } barrier ! ClientDisconnected("c") expectNoMsg(1 second) @@ -313,12 +315,12 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val a, b = TestProbe() barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) - a.expectMsg(Send(Done)) - b.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) barrier ! Remove("a") b.send(barrier, EnterBarrier("foo")) - b.expectMsg(Send(EnterBarrier("foo"))) + b.expectMsg(ToClient(BarrierResult("foo", true))) } "fail barrier with disconnecing node" in { @@ -327,15 +329,15 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) barrier ! nodeA barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) - a.expectMsg(Send(Done)) - b.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) barrier ! ClientDisconnected("unknown") noMsg(a) EventFilter[ClientLost](occurrences = 1) intercept { barrier ! ClientDisconnected("b") } - a.expectMsg(Send(BarrierFailed("bar"))) + a.expectMsg(ToClient(BarrierResult("bar", false))) } "fail barrier with disconnecing node who already arrived" in { @@ -346,15 +348,15 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! nodeA barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) barrier ! nodeC - a.expectMsg(Send(Done)) - b.expectMsg(Send(Done)) - c.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) + c.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) b.send(barrier, EnterBarrier("bar")) EventFilter[ClientLost](occurrences = 1) intercept { barrier ! ClientDisconnected("b") } - a.expectMsg(Send(BarrierFailed("bar"))) + a.expectMsg(ToClient(BarrierResult("bar", false))) } "fail when entering wrong barrier" in { @@ -364,14 +366,14 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! nodeA val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) barrier ! nodeB - a.expectMsg(Send(Done)) - b.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) EventFilter[WrongBarrier](occurrences = 1) intercept { b.send(barrier, EnterBarrier("foo")) } - a.expectMsg(Send(BarrierFailed("bar"))) - b.expectMsg(Send(BarrierFailed("foo"))) + a.expectMsg(ToClient(BarrierResult("bar", false))) + b.expectMsg(ToClient(BarrierResult("foo", false))) } "not really fail after barrier timeout" in { @@ -381,15 +383,15 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) barrier ! nodeA barrier ! nodeB - a.expectMsg(Send(Done)) - b.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("right")) EventFilter[BarrierTimeout](occurrences = 1) intercept { Thread.sleep(5000) } b.send(barrier, EnterBarrier("right")) - a.expectMsg(Send(EnterBarrier("right"))) - b.expectMsg(Send(EnterBarrier("right"))) + a.expectMsg(ToClient(BarrierResult("right", true))) + b.expectMsg(ToClient(BarrierResult("right", true))) } "fail if a node registers twice" in { @@ -401,8 +403,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with EventFilter[DuplicateNode](occurrences = 1) intercept { controller ! nodeB } - a.expectMsg(Send(BarrierFailed("initial startup"))) - b.expectMsg(Send(BarrierFailed("initial startup"))) + a.expectMsg(ToClient(BarrierResult("initial startup", false))) + b.expectMsg(ToClient(BarrierResult("initial startup", false))) } "fail subsequent barriers if a node registers twice" in { @@ -411,13 +413,13 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) val nodeB = NodeInfo("a", AddressFromURIString("akka://sys"), b.ref) controller ! nodeA - a.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) EventFilter[DuplicateNode](occurrences = 1) intercept { controller ! nodeB - b.expectMsg(Send(BarrierFailed("initial startup"))) + b.expectMsg(ToClient(BarrierResult("initial startup", false))) } a.send(controller, EnterBarrier("x")) - a.expectMsg(Send(BarrierFailed("x"))) + a.expectMsg(ToClient(BarrierResult("x", false))) } "finally have no failure messages left" in { @@ -428,13 +430,13 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with private def getController(participants: Int): ActorRef = { system.actorOf(Props(new Actor { - val controller = context.actorOf(Props(new Controller(participants))) - controller ! GetPort + val controller = context.actorOf(Props(new Controller(participants, new InetSocketAddress(InetAddress.getLocalHost, 0)))) + controller ! GetSockAddr override def supervisorStrategy = OneForOneStrategy() { case x ⇒ testActor ! Failed(controller, x); SupervisorStrategy.Restart } def receive = { - case x: Int ⇒ testActor ! controller + case x: InetSocketAddress ⇒ testActor ! controller } })) expectMsgType[ActorRef] diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala index db0e3cfe69..c4e0ca6cd0 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala @@ -8,6 +8,8 @@ import akka.actor.Props import akka.testkit.ImplicitSender import akka.remote.testconductor.Controller.NodeInfo import akka.actor.AddressFromURIString +import java.net.InetSocketAddress +import java.net.InetAddress object ControllerSpec { val config = """ @@ -24,11 +26,11 @@ class ControllerSpec extends AkkaSpec(ControllerSpec.config) with ImplicitSender "A Controller" must { "publish its nodes" in { - val c = system.actorOf(Props(new Controller(1))) + val c = system.actorOf(Props(new Controller(1, new InetSocketAddress(InetAddress.getLocalHost, 0)))) c ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor) - expectMsg(Send(Done)) + expectMsg(ToClient(Done)) c ! NodeInfo("b", AddressFromURIString("akka://sys"), testActor) - expectMsg(Send(Done)) + expectMsg(ToClient(Done)) c ! Controller.GetNodes expectMsgType[Iterable[String]].toSet must be(Set("a", "b")) } diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala new file mode 100644 index 0000000000..7acde4eac9 --- /dev/null +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -0,0 +1,157 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.remote.testkit + +import akka.testkit.AkkaSpec +import akka.actor.ActorSystem +import akka.remote.testconductor.TestConductor +import java.net.InetAddress +import java.net.InetSocketAddress +import akka.remote.testconductor.TestConductorExt +import com.typesafe.config.Config +import com.typesafe.config.ConfigFactory +import akka.dispatch.Await.Awaitable +import akka.dispatch.Await +import akka.util.Duration +import akka.actor.ActorPath +import akka.actor.RootActorPath + +object MultiNodeSpec { + + /** + * Names (or IP addresses; must be resolvable using InetAddress.getByName) + * of all nodes taking part in this test, including symbolic name and host + * definition: + * + * {{{ + * -D"multinode.hosts=host1@workerA.example.com,host2@workerB.example.com" + * }}} + */ + val nodeNames: Seq[String] = Vector.empty ++ ( + Option(System.getProperty("multinode.hosts")) getOrElse + (throw new IllegalStateException("need system property multinode.hosts to be set")) split ",") + + require(nodeNames != List(""), "multinode.hosts must not be empty") + + /** + * Index of this node in the nodeNames / nodeAddresses lists. The TestConductor + * is started in “controller” mode on selfIndex 0, i.e. there you can inject + * failures and shutdown other nodes etc. + */ + val selfIndex = Option(Integer.getInteger("multinode.index")) getOrElse + (throw new IllegalStateException("need system property multinode.index to be set")) + + require(selfIndex >= 0 && selfIndex < nodeNames.size, "selfIndex out of bounds: " + selfIndex) + + val nodeConfig = AkkaSpec.mapToConfig(Map( + "akka.actor.provider" -> "akka.remote.RemoteActorRefProvider", + "akka.remote.transport" -> "akka.remote.testconductor.TestConductorTransport", + "akka.remote.netty.hostname" -> nodeNames(selfIndex), + "akka.remote.netty.port" -> 0)) + +} + +abstract class MultiNodeSpec(_system: ActorSystem) extends AkkaSpec(_system) { + + import MultiNodeSpec._ + + def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName, + MultiNodeSpec.nodeConfig.withFallback(config.withFallback(AkkaSpec.testConf)))) + + def this(s: String) = this(ConfigFactory.parseString(s)) + + def this(configMap: Map[String, _]) = this(AkkaSpec.mapToConfig(configMap)) + + def this() = this(AkkaSpec.testConf) + + /* + * Test Class Interface + */ + + /** + * TO BE DEFINED BY USER: Defines the number of participants required for starting the test. This + * might not be equals to the number of nodes available to the test. + * + * Must be a `def`: + * {{{ + * def initialParticipants = 5 + * }}} + */ + def initialParticipants: Int + require(initialParticipants > 0, "initialParticipants must be a 'def' or early initializer, and it must be greater zero") + require(initialParticipants <= nodeNames.size, "not enough nodes to run this test") + + /** + * Access to the barriers, failure injection, etc. The extension will have + * been started either in Conductor or Player mode when the constructor of + * MultiNodeSpec finishes, i.e. do not call the start*() methods yourself! + */ + val testConductor: TestConductorExt = TestConductor(system) + + /** + * TO BE DEFINED BY USER: The test class must define a set of role names to + * be used throughout the run, e.g. in naming nodes in failure injections. + * These will be mapped to the available nodes such that the first name will + * be the Controller, i.e. on this one you can do failure injection. + * + * Should be a lazy val due to initialization order: + * {{{ + * lazy val roles = Seq("master", "slave") + * }}} + */ + def roles: Seq[String] + + require(roles.size >= initialParticipants, "not enough roles for initialParticipants") + require(roles.size <= nodeNames.size, "not enough nodes for number of roles") + require(roles.distinct.size == roles.size, "role names must be distinct") + + val mySelf = { + if (selfIndex >= roles.size) System.exit(0) + roles(selfIndex) + } + + /** + * Execute the given block of code only on the given nodes (names according + * to the `roleMap`). + */ + def runOn(nodes: String*)(thunk: ⇒ Unit): Unit = { + if (nodes exists (_ == mySelf)) { + thunk + } + } + + def ifNode[T](nodes: String*)(yes: ⇒ T)(no: ⇒ T): T = { + if (nodes exists (_ == mySelf)) yes else no + } + + /** + * Query the controller for the transport address of the given node (by role name) and + * return that as an ActorPath for easy composition: + * + * {{{ + * val serviceA = system.actorFor(node("master") / "user" / "serviceA") + * }}} + */ + def node(name: String): ActorPath = RootActorPath(testConductor.getAddressFor(name).await) + + /** + * Enrich `.await()` onto all Awaitables, using BarrierTimeout. + */ + implicit def awaitHelper[T](w: Awaitable[T]) = new AwaitHelper(w) + class AwaitHelper[T](w: Awaitable[T]) { + def await: T = Await.result(w, testConductor.Settings.BarrierTimeout.duration) + } + + /* + * Implementation (i.e. wait for start etc.) + */ + + private val controllerAddr = new InetSocketAddress(nodeNames(0), 4711) + if (selfIndex == 0) { + testConductor.startController(initialParticipants, roles(0), controllerAddr).await + } else { + testConductor.startClient(roles(selfIndex), controllerAddr).await + } + +} \ No newline at end of file diff --git a/scripts/fix-protobuf.sh b/scripts/fix-protobuf.sh new file mode 100755 index 0000000000..b0c8831091 --- /dev/null +++ b/scripts/fix-protobuf.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +find . -name \*.proto -print0 | xargs -0 perl -pi -e 's/\Qprivate Builder(BuilderParent parent)/private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent)/' From 4217d639f96e25d900e40d8c2e33b5130d35f32a Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 18 May 2012 16:00:33 +0200 Subject: [PATCH 043/106] add utility for fixing up broken PROTOC code (and apply it) --- .../remote/testconductor/TestConductorProtocol.java | 12 ++++++------ scripts/fix-protobuf.sh | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java index 4ae1aae07a..99c33e6728 100644 --- a/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java +++ b/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java @@ -492,7 +492,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1397,7 +1397,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1927,7 +1927,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -2377,7 +2377,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3005,7 +3005,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3611,7 +3611,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } diff --git a/scripts/fix-protobuf.sh b/scripts/fix-protobuf.sh index b0c8831091..e53ce297ab 100755 --- a/scripts/fix-protobuf.sh +++ b/scripts/fix-protobuf.sh @@ -1,3 +1,3 @@ #!/bin/bash -find . -name \*.proto -print0 | xargs -0 perl -pi -e 's/\Qprivate Builder(BuilderParent parent)/private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent)/' +find . -name \*.java -print0 | xargs -0 perl -pi -e 's/\Qprivate Builder(BuilderParent parent)/private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent)/' From 483083708e0be1dcc226842c96bad2849024712e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 18 May 2012 16:05:38 +0200 Subject: [PATCH 044/106] Added verification that a BalancingDispatcher can not be used with any kind of Router (impl + test). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also had to 'ignore' one test that violates this principle. Should be looked into later. Signed-off-by: Jonas Bonér --- .../ActorConfigurationVerificationSpec.scala | 80 +++++++++++++++++++ .../test/scala/akka/routing/ResizerSpec.scala | 3 +- .../src/main/scala/akka/actor/ActorCell.scala | 12 +++ 3 files changed, 94 insertions(+), 1 deletion(-) create mode 100644 akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala new file mode 100644 index 0000000000..cdaa421a59 --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala @@ -0,0 +1,80 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.actor + +import akka.testkit._ +import akka.testkit.DefaultTimeout +import akka.testkit.TestEvent._ +import akka.dispatch.Await +import akka.util.duration._ +import akka.routing._ +import akka.config.ConfigurationException +import com.typesafe.config.{ Config, ConfigFactory } +import org.scalatest.BeforeAndAfterEach +import org.scalatest.junit.JUnitSuite + +object ActorConfigurationVerificationSpec { + + class TestActor extends Actor { + def receive: Receive = { + case _ ⇒ + } + } + + val config = """ + balancing-dispatcher { + type = BalancingDispatcher + throughput = 1 + } + pinned-dispatcher { + executor = "thread-pool-executor" + type = PinnedDispatcher + } + """ +} + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class ActorConfigurationVerificationSpec extends AkkaSpec(ActorConfigurationVerificationSpec.config) with DefaultTimeout with BeforeAndAfterEach { + import ActorConfigurationVerificationSpec._ + + override def atStartup { + system.eventStream.publish(Mute(EventFilter[ConfigurationException](""))) + } + + "An Actor configured with a BalancingDispatcher" must { + "fail verification with a ConfigurationException if also configured with a RoundRobinRouter" in { + intercept[ConfigurationException] { + system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher").withRouter(RoundRobinRouter(2))) + } + } + "fail verification with a ConfigurationException if also configured with a BroadcastRouter" in { + intercept[ConfigurationException] { + system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher").withRouter(BroadcastRouter(2))) + } + } + "fail verification with a ConfigurationException if also configured with a RandomRouter" in { + intercept[ConfigurationException] { + system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher").withRouter(RandomRouter(2))) + } + } + "fail verification with a ConfigurationException if also configured with a SmallestMailboxRouter" in { + intercept[ConfigurationException] { + system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher").withRouter(SmallestMailboxRouter(2))) + } + } + "fail verification with a ConfigurationException if also configured with a ScatterGatherFirstCompletedRouter" in { + intercept[ConfigurationException] { + system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher").withRouter(ScatterGatherFirstCompletedRouter(nrOfInstances = 2, within = 2 seconds))) + } + } + "not fail verification with a ConfigurationException also not configured with a Router" in { + system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher")) + } + } + "An Actor configured with a non-balancing dispatcher" must { + "not fail verification with a ConfigurationException if also configured with a Router" in { + system.actorOf(Props[TestActor].withDispatcher("pinned-dispatcher").withRouter(RoundRobinRouter(2))) + } + } +} diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala index 457c4ab411..111460e3ac 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -128,7 +128,8 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with current.routees.size must be(2) } - "resize when busy" in { + // FIXME this test violates the rule that you can not use a BalancingDispatcher with any kind of Router - now throws a ConfigurationException in verification process + "resize when busy" ignore { val busy = new TestLatch(1) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 8c68ba3315..9cc993062f 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -357,7 +357,19 @@ private[akka] class ActorCell( case _ ⇒ true } + private def verifyActorConfiguration(system: ActorSystem, props: Props, actorName: String): Unit = { + import akka.config.ConfigurationException + import akka.routing.NoRouter + // verify that a BalancingDispatcher is not used with a Router + if (system.dispatchers.lookup(props.dispatcher).isInstanceOf[BalancingDispatcher] && props.routerConfig != NoRouter) + throw new ConfigurationException( + "Configuration for actor [" + actorName + + "] is invalid - you can not use a 'BalancingDispatcher' together with any type of 'Router'") + } + private def _actorOf(props: Props, name: String): ActorRef = { + verifyActorConfiguration(systemImpl, props, name) + if (system.settings.SerializeAllCreators && !props.creator.isInstanceOf[NoSerializationVerificationNeeded]) { val ser = SerializationExtension(system) ser.serialize(props.creator) match { From 6d962174fedb48dc3d494f1f6ae47e9996885f8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 18 May 2012 16:16:41 +0200 Subject: [PATCH 045/106] Added documentation about the verification of BalancingDispatcher + Router (added to both Dispatcher and Routing docs) --- akka-docs/java/dispatchers.rst | 6 ++++-- akka-docs/java/routing.rst | 3 ++- akka-docs/scala/dispatchers.rst | 10 ++++++---- akka-docs/scala/routing.rst | 4 +++- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/akka-docs/java/dispatchers.rst b/akka-docs/java/dispatchers.rst index 6ef0d44d7e..90a0e9cb6a 100644 --- a/akka-docs/java/dispatchers.rst +++ b/akka-docs/java/dispatchers.rst @@ -70,7 +70,7 @@ There are 4 different types of message dispatchers: * BalancingDispatcher - - This is an executor based event driven dispatcher that will try to redistribute work from busy actors to idle actors. + - This is an executor based event driven dispatcher that will try to redistribute work from busy actors to idle actors. - It is assumed that all actors using the same instance of this dispatcher can process all messages that have been sent to one of the actors; i.e. the actors belong to a pool of actors, and to the client there is no guarantee about which actor instance actually processes a given message. @@ -85,9 +85,11 @@ There are 4 different types of message dispatchers: "thread-pool-executor" or the FQCN of an ``akka.dispatcher.ExecutorServiceConfigurator`` + - Note that you can **not** use a ``BalancingDispatcher`` together with any kind of ``Router``, trying to do so will make your actor fail verification. + * CallingThreadDispatcher - - This dispatcher runs invocations on the current thread only. This dispatcher does not create any new threads, + - This dispatcher runs invocations on the current thread only. This dispatcher does not create any new threads, but it can be used from different threads concurrently for the same actor. See :ref:`TestCallingThreadDispatcherRef` for details and restrictions. diff --git a/akka-docs/java/routing.rst b/akka-docs/java/routing.rst index 4d01642a72..e006c7db63 100644 --- a/akka-docs/java/routing.rst +++ b/akka-docs/java/routing.rst @@ -375,7 +375,8 @@ The dispatcher for created children of the router will be taken from makes sense to configure the :class:`BalancingDispatcher` if the precise routing is not so important (i.e. no consistent hashing or round-robin is required); this enables newly created routees to pick up work immediately by -stealing it from their siblings. +stealing it from their siblings. Note that you can **not** use a ``BalancingDispatcher`` +together with any kind of ``Router``, trying to do so will make your actor fail verification. The “head” router, of course, cannot run on the same balancing dispatcher, because it does not process the same messages, hence this special actor does diff --git a/akka-docs/scala/dispatchers.rst b/akka-docs/scala/dispatchers.rst index 7d6a1f6334..a1cc431643 100644 --- a/akka-docs/scala/dispatchers.rst +++ b/akka-docs/scala/dispatchers.rst @@ -71,7 +71,7 @@ There are 4 different types of message dispatchers: * BalancingDispatcher - - This is an executor based event driven dispatcher that will try to redistribute work from busy actors to idle actors. + - This is an executor based event driven dispatcher that will try to redistribute work from busy actors to idle actors. - It is assumed that all actors using the same instance of this dispatcher can process all messages that have been sent to one of the actors; i.e. the actors belong to a pool of actors, and to the client there is no guarantee about which actor instance actually processes a given message. @@ -86,9 +86,11 @@ There are 4 different types of message dispatchers: "thread-pool-executor" or the FQCN of an ``akka.dispatcher.ExecutorServiceConfigurator`` + - Note that you can **not** use a ``BalancingDispatcher`` together with any kind of ``Router``, trying to do so will make your actor fail verification. + * CallingThreadDispatcher - - This dispatcher runs invocations on the current thread only. This dispatcher does not create any new threads, + - This dispatcher runs invocations on the current thread only. This dispatcher does not create any new threads, but it can be used from different threads concurrently for the same actor. See :ref:`TestCallingThreadDispatcherRef` for details and restrictions. @@ -112,8 +114,8 @@ And then using it: .. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#defining-pinned-dispatcher -Note that ``thread-pool-executor`` configuration as per the above ``my-thread-pool-dispatcher`` exmaple is -NOT applicable. This is because every actor will have its own thread pool when using ``PinnedDispatcher``, +Note that ``thread-pool-executor`` configuration as per the above ``my-thread-pool-dispatcher`` exmaple is +NOT applicable. This is because every actor will have its own thread pool when using ``PinnedDispatcher``, and that pool will have only one thread. Mailboxes diff --git a/akka-docs/scala/routing.rst b/akka-docs/scala/routing.rst index 737c9e31e7..0d0625be36 100644 --- a/akka-docs/scala/routing.rst +++ b/akka-docs/scala/routing.rst @@ -375,7 +375,9 @@ The dispatcher for created children of the router will be taken from makes sense to configure the :class:`BalancingDispatcher` if the precise routing is not so important (i.e. no consistent hashing or round-robin is required); this enables newly created routees to pick up work immediately by -stealing it from their siblings. +stealing it from their siblings. Note that you can **not** use a ``BalancingDispatcher`` +together with any kind of ``Router``, trying to do so will make your actor fail verification. + .. note:: From e99c9385283256a6254b78dd4c6fe24eed464fb0 Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 18 May 2012 16:26:48 +0200 Subject: [PATCH 046/106] =?UTF-8?q?switch=20to=20Bj=C3=B6rn=E2=80=99s=20ne?= =?UTF-8?q?w=20multi-jvm=20setup=20(i.e.=20remove=20system=20properties)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../remote/testconductor/TestConductorSpec.scala | 16 ++++------------ project/plugins.sbt | 2 +- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 39d25981aa..7f3763fcc1 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -14,9 +14,8 @@ import java.net.InetSocketAddress import java.net.InetAddress import akka.remote.testkit.MultiNodeSpec -object TestConductorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { - override def NrOfNodes = 2 - override def commonConfig = ConfigFactory.parseString(""" +object TestConductorMultiJvmSpec { + def commonConfig = ConfigFactory.parseString(""" akka.loglevel = DEBUG akka.remote { log-received-messages = on @@ -29,15 +28,8 @@ object TestConductorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { """) } -object H { - def apply(x: Int) = { - System.setProperty("multinode.hosts", "localhost,localhost") - System.setProperty("multinode.index", x.toString) - } -} - -class TestConductorMultiJvmNode1 extends { val dummy = H(0) } with TestConductorSpec -class TestConductorMultiJvmNode2 extends { val dummy = H(1) } with TestConductorSpec +class TestConductorMultiJvmNode1 extends TestConductorSpec +class TestConductorMultiJvmNode2 extends TestConductorSpec class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec.commonConfig) with ImplicitSender { diff --git a/project/plugins.sbt b/project/plugins.sbt index 80ff9db95a..f49cfb688d 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -1,7 +1,7 @@ resolvers += Classpaths.typesafeResolver -addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.1.9") +addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.2.0-SNAPSHOT") addSbtPlugin("com.typesafe.schoir" % "schoir" % "0.1.2") From 4fb4903225e9b7f0d770d8812fb0a12f63c1bd77 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 18 May 2012 16:41:19 +0200 Subject: [PATCH 047/106] Further work on binary compatibility --- .../main/scala/akka/pattern/AskSupport.scala | 15 +++-- .../akka/routing/ConnectionManager.scala | 8 +-- .../scala/akka/routing/ConsistentHash.scala | 60 +++++++++---------- .../src/main/scala/akka/routing/Routing.scala | 2 - 4 files changed, 42 insertions(+), 43 deletions(-) diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index cfaa0a182b..a20baaf533 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -157,6 +157,8 @@ trait AskSupport { /** * Akka private optimized representation of the temporary actor spawned to * receive the reply to an "ask" operation. + * + * INTERNAL API */ private[akka] final class PromiseActorRef private (val provider: ActorRefProvider, val result: Promise[Any]) extends MinimalActorRef { @@ -182,14 +184,12 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide private def state: AnyRef = Unsafe.instance.getObjectVolatile(this, stateOffset) @inline - private def updateState(oldState: AnyRef, newState: AnyRef): Boolean = - Unsafe.instance.compareAndSwapObject(this, stateOffset, oldState, newState) + private def updateState(oldState: AnyRef, newState: AnyRef): Boolean = Unsafe.instance.compareAndSwapObject(this, stateOffset, oldState, newState) @inline - private def setState(newState: AnyRef): Unit = - Unsafe.instance.putObjectVolatile(this, stateOffset, newState) + private def setState(newState: AnyRef): Unit = Unsafe.instance.putObjectVolatile(this, stateOffset, newState) - override def getParent = provider.tempContainer + override def getParent: InternalActorRef = provider.tempContainer /** * Contract of this method: @@ -234,7 +234,7 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide case _ ⇒ } - override def isTerminated = state match { + override def isTerminated: Boolean = state match { case Stopped | _: StoppedWithPath ⇒ true case _ ⇒ false } @@ -263,6 +263,9 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide } } +/** + * INTERNAL API + */ private[akka] object PromiseActorRef { private case object Registering private case object Stopped diff --git a/akka-actor/src/main/scala/akka/routing/ConnectionManager.scala b/akka-actor/src/main/scala/akka/routing/ConnectionManager.scala index 3136a2342d..9029c1f78b 100644 --- a/akka-actor/src/main/scala/akka/routing/ConnectionManager.scala +++ b/akka-actor/src/main/scala/akka/routing/ConnectionManager.scala @@ -10,10 +10,8 @@ import akka.actor._ * An Iterable that also contains a version. */ trait VersionedIterable[A] { - val version: Long - + def version: Long def iterable: Iterable[A] - def apply(): Iterable[A] = iterable } @@ -42,7 +40,7 @@ trait ConnectionManager { /** * Shuts the connection manager down, which stops all managed actors */ - def shutdown() + def shutdown(): Unit /** * Returns a VersionedIterator containing all connected ActorRefs at some moment in time. Since there is @@ -59,5 +57,5 @@ trait ConnectionManager { * * @param ref the dead */ - def remove(deadRef: ActorRef) + def remove(deadRef: ActorRef): Unit } diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala index 130db2be3e..afa321d07d 100644 --- a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala +++ b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala @@ -23,7 +23,7 @@ class ConsistentHash[T](nodes: Seq[T], replicas: Int) { nodes.foreach(this += _) - def +=(node: T) { + def +=(node: T): Unit = { cluster += node (1 to replicas) foreach { replica ⇒ val key = hashFor((node + ":" + replica).getBytes("UTF-8")) @@ -32,7 +32,7 @@ class ConsistentHash[T](nodes: Seq[T], replicas: Int) { } } - def -=(node: T) { + def -=(node: T): Unit = { cluster -= node (1 to replicas) foreach { replica ⇒ val key = hashFor((node + ":" + replica).getBytes("UTF-8")) @@ -96,7 +96,7 @@ class MurmurHash[@specialized(Int, Long, Float, Double) T](seed: Int) extends (T private var hashvalue = h /** Begin a new hash using the same seed. */ - def reset() { + def reset(): Unit = { h = startHash(seed) c = hiddenMagicA k = hiddenMagicB @@ -104,7 +104,7 @@ class MurmurHash[@specialized(Int, Long, Float, Double) T](seed: Int) extends (T } /** Incorporate the hash value of one item. */ - def apply(t: T) { + def apply(t: T): Unit = { h = extendHash(h, t.##, c, k) c = nextMagicA(c) k = nextMagicB(k) @@ -112,7 +112,7 @@ class MurmurHash[@specialized(Int, Long, Float, Double) T](seed: Int) extends (T } /** Incorporate a known hash value. */ - def append(i: Int) { + def append(i: Int): Unit = { h = extendHash(h, i, c, k) c = nextMagicA(c) k = nextMagicB(k) @@ -120,14 +120,15 @@ class MurmurHash[@specialized(Int, Long, Float, Double) T](seed: Int) extends (T } /** Retrieve the hash value */ - def hash = { + def hash: Int = { if (!hashed) { hashvalue = finalizeHash(h) hashed = true } hashvalue } - override def hashCode = hash + + override def hashCode: Int = hash } /** @@ -143,35 +144,35 @@ class MurmurHash[@specialized(Int, Long, Float, Double) T](seed: Int) extends (T object MurmurHash { // Magic values used for MurmurHash's 32 bit hash. // Don't change these without consulting a hashing expert! - final private val visibleMagic = 0x971e137b - final private val hiddenMagicA = 0x95543787 - final private val hiddenMagicB = 0x2ad7eb25 - final private val visibleMixer = 0x52dce729 - final private val hiddenMixerA = 0x7b7d159c - final private val hiddenMixerB = 0x6bce6396 - final private val finalMixer1 = 0x85ebca6b - final private val finalMixer2 = 0xc2b2ae35 + final private val visibleMagic: Int = 0x971e137b + final private val hiddenMagicA: Int = 0x95543787 + final private val hiddenMagicB: Int = 0x2ad7eb25 + final private val visibleMixer: Int = 0x52dce729 + final private val hiddenMixerA: Int = 0x7b7d159c + final private val hiddenMixerB: Int = 0x6bce6396 + final private val finalMixer1: Int = 0x85ebca6b + final private val finalMixer2: Int = 0xc2b2ae35 // Arbitrary values used for hashing certain classes - final private val seedString = 0xf7ca7fd2 - final private val seedArray = 0x3c074a61 + final private val seedString: Int = 0xf7ca7fd2 + final private val seedArray: Int = 0x3c074a61 /** The first 23 magic integers from the first stream are stored here */ - val storedMagicA = + val storedMagicA: Array[Int] = Iterator.iterate(hiddenMagicA)(nextMagicA).take(23).toArray /** The first 23 magic integers from the second stream are stored here */ - val storedMagicB = + val storedMagicB: Array[Int] = Iterator.iterate(hiddenMagicB)(nextMagicB).take(23).toArray /** Begin a new hash with a seed value. */ - def startHash(seed: Int) = seed ^ visibleMagic + def startHash(seed: Int): Int = seed ^ visibleMagic /** The initial magic integers in the first stream. */ - def startMagicA = hiddenMagicA + def startMagicA: Int = hiddenMagicA /** The initial magic integer in the second stream. */ - def startMagicB = hiddenMagicB + def startMagicB: Int = hiddenMagicB /** * Incorporates a new value into an existing hash. @@ -182,18 +183,17 @@ object MurmurHash { * @param magicB a magic integer from a different stream * @return the updated hash value */ - def extendHash(hash: Int, value: Int, magicA: Int, magicB: Int) = { + def extendHash(hash: Int, value: Int, magicA: Int, magicB: Int): Int = (hash ^ rotl(value * magicA, 11) * magicB) * 3 + visibleMixer - } /** Given a magic integer from the first stream, compute the next */ - def nextMagicA(magicA: Int) = magicA * 5 + hiddenMixerA + def nextMagicA(magicA: Int): Int = magicA * 5 + hiddenMixerA /** Given a magic integer from the second stream, compute the next */ - def nextMagicB(magicB: Int) = magicB * 5 + hiddenMixerB + def nextMagicB(magicB: Int): Int = magicB * 5 + hiddenMixerB /** Once all hashes have been incorporated, this performs a final mixing */ - def finalizeHash(hash: Int) = { + def finalizeHash(hash: Int): Int = { var i = (hash ^ (hash >>> 16)) i *= finalMixer1 i ^= (i >>> 13) @@ -203,7 +203,7 @@ object MurmurHash { } /** Compute a high-quality hash of an array */ - def arrayHash[@specialized T](a: Array[T]) = { + def arrayHash[@specialized T](a: Array[T]): Int = { var h = startHash(a.length * seedArray) var c = hiddenMagicA var k = hiddenMagicB @@ -218,7 +218,7 @@ object MurmurHash { } /** Compute a high-quality hash of a string */ - def stringHash(s: String) = { + def stringHash(s: String): Int = { var h = startHash(s.length * seedString) var c = hiddenMagicA var k = hiddenMagicB @@ -239,7 +239,7 @@ object MurmurHash { * where the order of appearance of elements does not matter. * This is useful for hashing sets, for example. */ - def symmetricHash[T](xs: TraversableOnce[T], seed: Int) = { + def symmetricHash[T](xs: TraversableOnce[T], seed: Int): Int = { var a, b, n = 0 var c = 1 xs.foreach(i ⇒ { diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 211ef202f7..c3db8293d2 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -8,12 +8,10 @@ import akka.util.Duration import akka.util.duration._ import akka.ConfigurationException import akka.pattern.pipe -import akka.pattern.AskSupport import com.typesafe.config.Config import scala.collection.JavaConversions.iterableAsScalaIterable import java.util.concurrent.atomic.{ AtomicLong, AtomicBoolean } import java.util.concurrent.TimeUnit -import java.util.concurrent.locks.ReentrantLock import akka.jsr166y.ThreadLocalRandom import akka.util.Unsafe import akka.dispatch.Dispatchers From 134f1a19a50331acabac6b84eb3599d6edc24303 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 18 May 2012 16:41:39 +0200 Subject: [PATCH 048/106] Reworking Listeners so that senders can be supplied --- .../main/scala/akka/routing/Listeners.scala | 29 ++++++++++++++----- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/akka-actor/src/main/scala/akka/routing/Listeners.scala b/akka-actor/src/main/scala/akka/routing/Listeners.scala index 39fbf6355d..5ac02e2945 100644 --- a/akka-actor/src/main/scala/akka/routing/Listeners.scala +++ b/akka-actor/src/main/scala/akka/routing/Listeners.scala @@ -5,8 +5,7 @@ package akka.routing import akka.actor.{ Actor, ActorRef } -import java.util.concurrent.ConcurrentSkipListSet -import scala.collection.JavaConversions._ +import java.util.{ Set, TreeSet } sealed trait ListenerMessage case class Listen(listener: ActorRef) extends ListenerMessage @@ -25,13 +24,29 @@ case class WithListeners(f: (ActorRef) ⇒ Unit) extends ListenerMessage * Send WithListeners(fun) to traverse the current listeners. */ trait Listeners { self: Actor ⇒ - protected val listeners = new ConcurrentSkipListSet[ActorRef] + protected val listeners: Set[ActorRef] = new TreeSet[ActorRef] + /** + * Chain this into the receive function. + * + * {{ def receive = listenerManagement orElse … }} + */ protected def listenerManagement: Actor.Receive = { - case Listen(l) ⇒ listeners add l - case Deafen(l) ⇒ listeners remove l - case WithListeners(f) ⇒ listeners foreach f + case Listen(l) ⇒ listeners add l + case Deafen(l) ⇒ listeners remove l + case WithListeners(f) ⇒ + val i = listeners.iterator + while (i.hasNext) f(i.next) } - protected def gossip(msg: Any) = listeners foreach (_ ! msg) + /** + * Sends the supplied message to all current listeners using the provided sender as sender. + * + * @param msg + * @param sender + */ + protected def gossip(msg: Any)(implicit sender: ActorRef = null): Unit = { + val i = listeners.iterator + while (i.hasNext) i.next ! msg + } } From 66600f9c52dfefb577fd1ef4bd89a0fde685d724 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 18 May 2012 16:51:12 +0200 Subject: [PATCH 049/106] Moved the dispatcher/router verification to RoutedActorRef, also checks dispatcher only through the config so we don't trigger creation of dispatcher. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 12 ------------ .../src/main/scala/akka/dispatch/Dispatchers.scala | 6 +++--- akka-actor/src/main/scala/akka/routing/Routing.scala | 10 ++++++++-- 3 files changed, 11 insertions(+), 17 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 9cc993062f..8c68ba3315 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -357,19 +357,7 @@ private[akka] class ActorCell( case _ ⇒ true } - private def verifyActorConfiguration(system: ActorSystem, props: Props, actorName: String): Unit = { - import akka.config.ConfigurationException - import akka.routing.NoRouter - // verify that a BalancingDispatcher is not used with a Router - if (system.dispatchers.lookup(props.dispatcher).isInstanceOf[BalancingDispatcher] && props.routerConfig != NoRouter) - throw new ConfigurationException( - "Configuration for actor [" + actorName + - "] is invalid - you can not use a 'BalancingDispatcher' together with any type of 'Router'") - } - private def _actorOf(props: Props, name: String): ActorRef = { - verifyActorConfiguration(systemImpl, props, name) - if (system.settings.SerializeAllCreators && !props.creator.isInstanceOf[NoSerializationVerificationNeeded]) { val ser = SerializationExtension(system) ser.serialize(props.creator) match { diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 93d44e007d..a81a8e6c2b 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -120,9 +120,9 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc * Throws: IllegalArgumentException if the value of "type" is not valid * IllegalArgumentException if it cannot create the MessageDispatcherConfigurator */ - private[akka] def from(cfg: Config): MessageDispatcher = { - configuratorFrom(cfg).dispatcher() - } + private[akka] def from(cfg: Config): MessageDispatcher = configuratorFrom(cfg).dispatcher() + + private[akka] def isBalancingDispatcher(id: String): Boolean = settings.config.hasPath(id) && config(id).getString("type") == "BalancingDispatcher" /* * Creates a MessageDispatcherConfigurator from a Config. diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index fdf14a5b96..58ecbfcdc5 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -31,11 +31,17 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup _supervisor, _path) { + // verify that a BalancingDispatcher is not used with a Router + if (_system.dispatchers.isBalancingDispatcher(_props.dispatcher) && _props.routerConfig != NoRouter) + throw new ConfigurationException( + "Configuration for actor [" + _path.toString + + "] is invalid - you can not use a 'BalancingDispatcher' together with any type of 'Router'") + /* * CAUTION: RoutedActorRef is PROBLEMATIC * ====================================== - * - * We are constructing/assembling the children outside of the scope of the + * + * We are constructing/assembling the children outside of the scope of the * Router actor, inserting them in its childrenRef list, which is not at all * synchronized. This is done exactly once at start-up, all other accesses * are done from the Router actor. This means that the only thing which is From e825a8ac4f0dd84297fb7d93f01f1f755e573631 Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 18 May 2012 18:44:53 +0200 Subject: [PATCH 050/106] switch MultiNodeSpec to use RoleName type when referring to participants - also add MultiNodeConfig base class for conveniently declaring a test setup including roles and (node specific) config settings --- .../akka/remote/testconductor/Conductor.scala | 32 ++-- .../akka/remote/testconductor/DataTypes.scala | 22 +-- .../akka/remote/testconductor/Player.scala | 10 +- .../testconductor/TestConductorSpec.scala | 41 +++-- .../remote/testconductor/BarrierSpec.scala | 164 +++++++++--------- .../remote/testconductor/ControllerSpec.scala | 9 +- .../akka/remote/testkit/MultiNodeSpec.scala | 87 ++++++---- 7 files changed, 197 insertions(+), 168 deletions(-) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index d4fa3152e6..6c26fcaae2 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -71,7 +71,7 @@ trait Conductor { this: TestConductorExt ⇒ * @param participants gives the number of participants which shall connect * before any of their startClient() operations complete. */ - def startController(participants: Int, name: String, controllerPort: InetSocketAddress): Future[InetSocketAddress] = { + def startController(participants: Int, name: RoleName, controllerPort: InetSocketAddress): Future[InetSocketAddress] = { if (_controller ne null) throw new RuntimeException("TestConductorServer was already started") _controller = system.actorOf(Props(new Controller(participants, controllerPort)), "controller") import Settings.BarrierTimeout @@ -106,7 +106,7 @@ trait Conductor { this: TestConductorExt ⇒ * @param direction can be either `Direction.Send`, `Direction.Receive` or `Direction.Both` * @param rateMBit is the maximum data rate in MBit */ - def throttle(node: String, target: String, direction: Direction, rateMBit: Double): Future[Done] = { + def throttle(node: RoleName, target: RoleName, direction: Direction, rateMBit: Double): Future[Done] = { import Settings.QueryTimeout controller ? Throttle(node, target, direction, rateMBit.toFloat) mapTo } @@ -121,7 +121,7 @@ trait Conductor { this: TestConductorExt ⇒ * @param target is the symbolic name of the other node to which connectivity shall be impeded * @param direction can be either `Direction.Send`, `Direction.Receive` or `Direction.Both` */ - def blackhole(node: String, target: String, direction: Direction): Future[Done] = { + def blackhole(node: RoleName, target: RoleName, direction: Direction): Future[Done] = { import Settings.QueryTimeout controller ? Throttle(node, target, direction, 0f) mapTo } @@ -134,7 +134,7 @@ trait Conductor { this: TestConductorExt ⇒ * @param node is the symbolic name of the node which is to be affected * @param target is the symbolic name of the other node to which connectivity shall be impeded */ - def disconnect(node: String, target: String): Future[Done] = { + def disconnect(node: RoleName, target: RoleName): Future[Done] = { import Settings.QueryTimeout controller ? Disconnect(node, target, false) mapTo } @@ -147,7 +147,7 @@ trait Conductor { this: TestConductorExt ⇒ * @param node is the symbolic name of the node which is to be affected * @param target is the symbolic name of the other node to which connectivity shall be impeded */ - def abort(node: String, target: String): Future[Done] = { + def abort(node: RoleName, target: RoleName): Future[Done] = { import Settings.QueryTimeout controller ? Disconnect(node, target, true) mapTo } @@ -159,7 +159,7 @@ trait Conductor { this: TestConductorExt ⇒ * @param node is the symbolic name of the node which is to be affected * @param exitValue is the return code which shall be given to System.exit */ - def shutdown(node: String, exitValue: Int): Future[Done] = { + def shutdown(node: RoleName, exitValue: Int): Future[Done] = { import Settings.QueryTimeout controller ? Terminate(node, exitValue) mapTo } @@ -169,7 +169,7 @@ trait Conductor { this: TestConductorExt ⇒ * * @param node is the symbolic name of the node which is to be affected */ - def kill(node: String): Future[Done] = { + def kill(node: RoleName): Future[Done] = { import Settings.QueryTimeout controller ? Terminate(node, -1) mapTo } @@ -177,7 +177,7 @@ trait Conductor { this: TestConductorExt ⇒ /** * Obtain the list of remote host names currently registered. */ - def getNodes: Future[Iterable[String]] = { + def getNodes: Future[Iterable[RoleName]] = { import Settings.QueryTimeout controller ? GetNodes mapTo } @@ -190,7 +190,7 @@ trait Conductor { this: TestConductorExt ⇒ * * @param node is the symbolic name of the node which is to be removed */ - def removeNode(node: String): Future[Done] = { + def removeNode(node: RoleName): Future[Done] = { import Settings.QueryTimeout controller ? Remove(node) mapTo } @@ -274,7 +274,7 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi when(Initial, stateTimeout = 10 seconds) { case Event(Hello(name, addr), _) ⇒ - controller ! NodeInfo(name, addr, self) + controller ! NodeInfo(RoleName(name), addr, self) goto(Ready) case Event(x: NetworkOp, _) ⇒ log.warning("client {} sent no Hello in first message (instead {}), disconnecting", getAddrString(channel), x) @@ -318,11 +318,11 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi } object Controller { - case class ClientDisconnected(name: String) + case class ClientDisconnected(name: RoleName) case object GetNodes case object GetSockAddr - case class NodeInfo(name: String, addr: Address, fsm: ActorRef) + case class NodeInfo(name: RoleName, addr: Address, fsm: ActorRef) } /** @@ -359,10 +359,10 @@ class Controller(private var initialParticipants: Int, controllerPort: InetSocke } val barrier = context.actorOf(Props[BarrierCoordinator], "barriers") - var nodes = Map[String, NodeInfo]() + var nodes = Map[RoleName, NodeInfo]() // map keeping unanswered queries for node addresses (enqueued upon GetAddress, serviced upon NodeInfo) - var addrInterest = Map[String, Set[ActorRef]]() + var addrInterest = Map[RoleName, Set[ActorRef]]() override def receive = LoggingReceive { case c @ NodeInfo(name, addr, fsm) ⇒ @@ -423,7 +423,7 @@ object BarrierCoordinator { case object Idle extends State case object Waiting extends State - case class RemoveClient(name: String) + case class RemoveClient(name: RoleName) case class Data(clients: Set[Controller.NodeInfo], barrier: String, arrived: List[ActorRef]) @@ -435,7 +435,7 @@ object BarrierCoordinator { case class DuplicateNode(data: Data, node: Controller.NodeInfo) extends RuntimeException with NoStackTrace with Printer case class WrongBarrier(barrier: String, client: ActorRef, data: Data) extends RuntimeException(barrier) with NoStackTrace with Printer case class BarrierEmpty(data: Data, msg: String) extends RuntimeException(msg) with NoStackTrace with Printer - case class ClientLost(data: Data, client: String) extends RuntimeException with NoStackTrace with Printer + case class ClientLost(data: Data, client: RoleName) extends RuntimeException with NoStackTrace with Printer } /** diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala index 0273055469..2bb7d50c37 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -11,6 +11,8 @@ import com.google.protobuf.Message import akka.actor.Address import org.jboss.netty.handler.codec.oneone.OneToOneDecoder +case class RoleName(name: String) + case class ToClient(msg: ClientOp with NetworkOp) case class ToServer(msg: ServerOp with NetworkOp) @@ -29,24 +31,24 @@ case class Hello(name: String, addr: Address) extends NetworkOp case class EnterBarrier(name: String) extends ServerOp with NetworkOp case class BarrierResult(name: String, success: Boolean) extends UnconfirmedClientOp with NetworkOp -case class Throttle(node: String, target: String, direction: Direction, rateMBit: Float) extends CommandOp +case class Throttle(node: RoleName, target: RoleName, direction: Direction, rateMBit: Float) extends CommandOp case class ThrottleMsg(target: Address, direction: Direction, rateMBit: Float) extends ConfirmedClientOp with NetworkOp -case class Disconnect(node: String, target: String, abort: Boolean) extends CommandOp +case class Disconnect(node: RoleName, target: RoleName, abort: Boolean) extends CommandOp case class DisconnectMsg(target: Address, abort: Boolean) extends ConfirmedClientOp with NetworkOp -case class Terminate(node: String, exitValueOrKill: Int) extends CommandOp +case class Terminate(node: RoleName, exitValueOrKill: Int) extends CommandOp case class TerminateMsg(exitValue: Int) extends ConfirmedClientOp with NetworkOp -case class GetAddress(node: String) extends ServerOp with NetworkOp -case class AddressReply(node: String, addr: Address) extends UnconfirmedClientOp with NetworkOp +case class GetAddress(node: RoleName) extends ServerOp with NetworkOp +case class AddressReply(node: RoleName, addr: Address) extends UnconfirmedClientOp with NetworkOp abstract class Done extends ServerOp with UnconfirmedClientOp with NetworkOp case object Done extends Done { def getInstance: Done = this } -case class Remove(node: String) extends CommandOp +case class Remove(node: RoleName) extends CommandOp class MsgEncoder extends OneToOneEncoder { def encode(ctx: ChannelHandlerContext, ch: Channel, msg: AnyRef): AnyRef = msg match { @@ -68,9 +70,9 @@ class MsgEncoder extends OneToOneEncoder { case TerminateMsg(exitValue) ⇒ w.setFailure(TCP.InjectFailure.newBuilder.setFailure(TCP.FailType.Shutdown).setExitValue(exitValue)) case GetAddress(node) ⇒ - w.setAddr(TCP.AddressRequest.newBuilder.setNode(node)) + w.setAddr(TCP.AddressRequest.newBuilder.setNode(node.name)) case AddressReply(node, addr) ⇒ - w.setAddr(TCP.AddressRequest.newBuilder.setNode(node).setAddr(addr)) + w.setAddr(TCP.AddressRequest.newBuilder.setNode(node.name).setAddr(addr)) case _: Done ⇒ w.setDone("") } @@ -100,8 +102,8 @@ class MsgDecoder extends OneToOneDecoder { } } else if (w.hasAddr) { val a = w.getAddr - if (a.hasAddr) AddressReply(a.getNode, a.getAddr) - else GetAddress(a.getNode) + if (a.hasAddr) AddressReply(RoleName(a.getNode), a.getAddr) + else GetAddress(RoleName(a.getNode)) } else if (w.hasDone) { Done } else { diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala index 27a2487364..10434007e1 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala @@ -50,7 +50,7 @@ trait Player { this: TestConductorExt ⇒ * this is a first barrier in itself). The number of expected participants is * set in [[akka.remote.testconductor.Conductor]]`.startController()`. */ - def startClient(name: String, controllerAddr: InetSocketAddress): Future[Done] = { + def startClient(name: RoleName, controllerAddr: InetSocketAddress): Future[Done] = { import ClientFSM._ import akka.actor.FSM._ import Settings.BarrierTimeout @@ -88,7 +88,7 @@ trait Player { this: TestConductorExt ⇒ /** * Query remote transport address of named node. */ - def getAddressFor(name: String): Future[Address] = { + def getAddressFor(name: RoleName): Future[Address] = { import Settings.BarrierTimeout client ? ToServer(GetAddress(name)) mapTo } @@ -117,7 +117,7 @@ object ClientFSM { * coordinator and react to the [[akka.remote.testconductor.Conductor]]’s * requests for failure injection. */ -class ClientFSM(name: String, controllerAddr: InetSocketAddress) extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { +class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { import ClientFSM._ val settings = TestConductor().Settings @@ -131,7 +131,7 @@ class ClientFSM(name: String, controllerAddr: InetSocketAddress) extends Actor w case Event(msg: ClientOp, _) ⇒ stay replying Status.Failure(new IllegalStateException("not connected yet")) case Event(Connected(channel), _) ⇒ - channel.write(Hello(name, TestConductor().address)) + channel.write(Hello(name.name, TestConductor().address)) goto(AwaitDone) using Data(Some(channel), None) case Event(_: ConnectionFailure, _) ⇒ goto(Failed) @@ -165,7 +165,7 @@ class ClientFSM(name: String, controllerAddr: InetSocketAddress) extends Actor w channel.write(msg) val token = msg match { case EnterBarrier(barrier) ⇒ barrier - case GetAddress(node) ⇒ node + case GetAddress(node) ⇒ node.name } stay using d.copy(runningOp = Some(token, sender)) case Event(ToServer(op), Data(channel, Some((token, _)))) ⇒ diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 7f3763fcc1..087aac55c7 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -13,9 +13,10 @@ import akka.testkit.ImplicitSender import java.net.InetSocketAddress import java.net.InetAddress import akka.remote.testkit.MultiNodeSpec +import akka.remote.testkit.MultiNodeConfig -object TestConductorMultiJvmSpec { - def commonConfig = ConfigFactory.parseString(""" +object TestConductorMultiJvmSpec extends MultiNodeConfig { + commonConfig(ConfigFactory.parseString(""" akka.loglevel = DEBUG akka.remote { log-received-messages = on @@ -25,18 +26,22 @@ object TestConductorMultiJvmSpec { receive = on fsm = on } - """) + """)) + + val master = role("master") + val slave = role("slave") } class TestConductorMultiJvmNode1 extends TestConductorSpec class TestConductorMultiJvmNode2 extends TestConductorSpec -class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec.commonConfig) with ImplicitSender { +class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec) with ImplicitSender { + + import TestConductorMultiJvmSpec._ def initialParticipants = 2 - lazy val roles = Seq("master", "slave") - runOn("master") { + runOn(master) { system.actorOf(Props(new Actor { def receive = { case x ⇒ testActor ! x; sender ! x @@ -44,7 +49,7 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec.commonCo }), "echo") } - val echo = system.actorFor(node("master") / "user" / "echo") + val echo = system.actorFor(node(master) / "user" / "echo") "A TestConductor" must { @@ -54,20 +59,20 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec.commonCo "support throttling of network connections" in { - runOn("slave") { + runOn(slave) { // start remote network connection so that it can be throttled echo ! "start" } expectMsg("start") - runOn("master") { - testConductor.throttle("slave", "master", Direction.Send, rateMBit = 0.01).await + runOn(master) { + testConductor.throttle(slave, master, Direction.Send, rateMBit = 0.01).await } testConductor.enter("throttled_send") - runOn("slave") { + runOn(slave) { for (i ← 0 to 9) echo ! i } @@ -78,19 +83,19 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec.commonCo testConductor.enter("throttled_send2") - runOn("master") { - testConductor.throttle("slave", "master", Direction.Send, -1).await - testConductor.throttle("slave", "master", Direction.Receive, rateMBit = 0.01).await + runOn(master) { + testConductor.throttle(slave, master, Direction.Send, -1).await + testConductor.throttle(slave, master, Direction.Receive, rateMBit = 0.01).await } testConductor.enter("throttled_recv") - runOn("slave") { + runOn(slave) { for (i ← 10 to 19) echo ! i } val (min, max) = - ifNode("master") { + ifNode(master) { (0 seconds, 500 millis) } { (0.6 seconds, 2 seconds) @@ -103,8 +108,8 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec.commonCo testConductor.enter("throttled_recv2") - runOn("master") { - testConductor.throttle("slave", "master", Direction.Receive, -1).await + runOn(master) { + testConductor.throttle(slave, master, Direction.Receive, -1).await } } diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala index aa14b93f9d..e0fd5dfb97 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala @@ -36,6 +36,10 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with import Controller._ import BarrierCoordinator._ + val A = RoleName("a") + val B = RoleName("b") + val C = RoleName("c") + override def afterEach { system.eventStream.setLogLevel(Logging.WarningLevel) } @@ -44,25 +48,25 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "register clients and remove them" in { val b = getBarrier() - b ! NodeInfo("a", AddressFromURIString("akka://sys"), system.deadLetters) - b ! RemoveClient("b") - b ! RemoveClient("a") + b ! NodeInfo(A, AddressFromURIString("akka://sys"), system.deadLetters) + b ! RemoveClient(B) + b ! RemoveClient(A) EventFilter[BarrierEmpty](occurrences = 1) intercept { - b ! RemoveClient("a") + b ! RemoveClient(A) } expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "no client to remove"))) } "register clients and disconnect them" in { val b = getBarrier() - b ! NodeInfo("a", AddressFromURIString("akka://sys"), system.deadLetters) - b ! ClientDisconnected("b") + b ! NodeInfo(A, AddressFromURIString("akka://sys"), system.deadLetters) + b ! ClientDisconnected(B) EventFilter[ClientLost](occurrences = 1) intercept { - b ! ClientDisconnected("a") + b ! ClientDisconnected(A) } - expectMsg(Failed(b, ClientLost(Data(Set(), "", Nil), "a"))) + expectMsg(Failed(b, ClientLost(Data(Set(), "", Nil), A))) EventFilter[BarrierEmpty](occurrences = 1) intercept { - b ! ClientDisconnected("a") + b ! ClientDisconnected(A) } expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "no client to disconnect"))) } @@ -76,8 +80,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "enter barrier" in { val barrier = getBarrier() val a, b = TestProbe() - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.send(barrier, EnterBarrier("bar")) noMsg(a, b) within(1 second) { @@ -90,10 +94,10 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "enter barrier with joining node" in { val barrier = getBarrier() val a, b, c = TestProbe() - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.send(barrier, EnterBarrier("bar")) - barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref) + barrier ! NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) b.send(barrier, EnterBarrier("bar")) noMsg(a, b, c) within(1 second) { @@ -107,29 +111,29 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "enter barrier with leaving node" in { val barrier = getBarrier() val a, b, c = TestProbe() - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) - barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) a.send(barrier, EnterBarrier("bar")) b.send(barrier, EnterBarrier("bar")) - barrier ! RemoveClient("a") - barrier ! ClientDisconnected("a") + barrier ! RemoveClient(A) + barrier ! ClientDisconnected(A) noMsg(a, b, c) b.within(1 second) { - barrier ! RemoveClient("c") + barrier ! RemoveClient(C) b.expectMsg(ToClient(BarrierResult("bar", true))) } - barrier ! ClientDisconnected("c") + barrier ! ClientDisconnected(C) expectNoMsg(1 second) } "leave barrier when last “arrived” is removed" in { val barrier = getBarrier() val a, b = TestProbe() - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.send(barrier, EnterBarrier("bar")) - barrier ! RemoveClient("a") + barrier ! RemoveClient(A) b.send(barrier, EnterBarrier("foo")) b.expectMsg(ToClient(BarrierResult("foo", true))) } @@ -137,38 +141,38 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail barrier with disconnecing node" in { val barrier = getBarrier() val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! nodeA - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.send(barrier, EnterBarrier("bar")) EventFilter[ClientLost](occurrences = 1) intercept { - barrier ! ClientDisconnected("b") + barrier ! ClientDisconnected(B) } - expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA), "bar", a.ref :: Nil), "b"))) + expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA), "bar", a.ref :: Nil), B))) } "fail barrier with disconnecing node who already arrived" in { val barrier = getBarrier() val a, b, c = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - val nodeC = NodeInfo("c", AddressFromURIString("akka://sys"), c.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeC = NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) barrier ! nodeA - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeC a.send(barrier, EnterBarrier("bar")) b.send(barrier, EnterBarrier("bar")) EventFilter[ClientLost](occurrences = 1) intercept { - barrier ! ClientDisconnected("b") + barrier ! ClientDisconnected(B) } - expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA, nodeC), "bar", a.ref :: Nil), "b"))) + expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA, nodeC), "bar", a.ref :: Nil), B))) } "fail when entering wrong barrier" in { val barrier = getBarrier() val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! nodeA - val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + val nodeB = NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeB a.send(barrier, EnterBarrier("bar")) EventFilter[WrongBarrier](occurrences = 1) intercept { @@ -181,10 +185,10 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val barrier = getBarrier() val a = TestProbe() EventFilter[BarrierEmpty](occurrences = 1) intercept { - barrier ! RemoveClient("a") + barrier ! RemoveClient(A) } expectMsg(Failed(barrier, BarrierEmpty(Data(Set(), "", Nil), "no client to remove"))) - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) a.send(barrier, EnterBarrier("right")) a.expectMsg(ToClient(BarrierResult("right", false))) } @@ -192,8 +196,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail after barrier timeout" in { val barrier = getBarrier() val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeB = NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeA barrier ! nodeB a.send(barrier, EnterBarrier("right")) @@ -205,8 +209,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail if a node registers twice" in { val barrier = getBarrier() val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - val nodeB = NodeInfo("a", AddressFromURIString("akka://sys"), b.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeB = NodeInfo(A, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeA EventFilter[DuplicateNode](occurrences = 1) intercept { barrier ! nodeB @@ -224,25 +228,25 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "register clients and remove them" in { val b = getController(1) - b ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor) + b ! NodeInfo(A, AddressFromURIString("akka://sys"), testActor) expectMsg(ToClient(Done)) - b ! Remove("b") - b ! Remove("a") + b ! Remove(B) + b ! Remove(A) EventFilter[BarrierEmpty](occurrences = 1) intercept { - b ! Remove("a") + b ! Remove(A) } } "register clients and disconnect them" in { val b = getController(1) - b ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor) + b ! NodeInfo(A, AddressFromURIString("akka://sys"), testActor) expectMsg(ToClient(Done)) - b ! ClientDisconnected("b") + b ! ClientDisconnected(B) EventFilter[ClientLost](occurrences = 1) intercept { - b ! ClientDisconnected("a") + b ! ClientDisconnected(A) } EventFilter[BarrierEmpty](occurrences = 1) intercept { - b ! ClientDisconnected("a") + b ! ClientDisconnected(A) } } @@ -255,8 +259,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "enter barrier" in { val barrier = getController(2) val a, b = TestProbe() - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) @@ -271,12 +275,12 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "enter barrier with joining node" in { val barrier = getController(2) val a, b, c = TestProbe() - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) - barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref) + barrier ! NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) c.expectMsg(ToClient(Done)) b.send(barrier, EnterBarrier("bar")) noMsg(a, b, c) @@ -291,34 +295,34 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "enter barrier with leaving node" in { val barrier = getController(3) val a, b, c = TestProbe() - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) - barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) c.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) b.send(barrier, EnterBarrier("bar")) - barrier ! Remove("a") - barrier ! ClientDisconnected("a") + barrier ! Remove(A) + barrier ! ClientDisconnected(A) noMsg(a, b, c) b.within(1 second) { - barrier ! Remove("c") + barrier ! Remove(C) b.expectMsg(ToClient(BarrierResult("bar", true))) } - barrier ! ClientDisconnected("c") + barrier ! ClientDisconnected(C) expectNoMsg(1 second) } "leave barrier when last “arrived” is removed" in { val barrier = getController(2) val a, b = TestProbe() - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) - barrier ! Remove("a") + barrier ! Remove(A) b.send(barrier, EnterBarrier("foo")) b.expectMsg(ToClient(BarrierResult("foo", true))) } @@ -326,16 +330,16 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail barrier with disconnecing node" in { val barrier = getController(2) val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! nodeA - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) - barrier ! ClientDisconnected("unknown") + barrier ! ClientDisconnected(RoleName("unknown")) noMsg(a) EventFilter[ClientLost](occurrences = 1) intercept { - barrier ! ClientDisconnected("b") + barrier ! ClientDisconnected(B) } a.expectMsg(ToClient(BarrierResult("bar", false))) } @@ -343,10 +347,10 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail barrier with disconnecing node who already arrived" in { val barrier = getController(3) val a, b, c = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - val nodeC = NodeInfo("c", AddressFromURIString("akka://sys"), c.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeC = NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) barrier ! nodeA - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeC a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) @@ -354,7 +358,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with a.send(barrier, EnterBarrier("bar")) b.send(barrier, EnterBarrier("bar")) EventFilter[ClientLost](occurrences = 1) intercept { - barrier ! ClientDisconnected("b") + barrier ! ClientDisconnected(B) } a.expectMsg(ToClient(BarrierResult("bar", false))) } @@ -362,9 +366,9 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail when entering wrong barrier" in { val barrier = getController(2) val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! nodeA - val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + val nodeB = NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeB a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) @@ -379,8 +383,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "not really fail after barrier timeout" in { val barrier = getController(2) val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeB = NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeA barrier ! nodeB a.expectMsg(ToClient(Done)) @@ -397,8 +401,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail if a node registers twice" in { val controller = getController(2) val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - val nodeB = NodeInfo("a", AddressFromURIString("akka://sys"), b.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeB = NodeInfo(A, AddressFromURIString("akka://sys"), b.ref) controller ! nodeA EventFilter[DuplicateNode](occurrences = 1) intercept { controller ! nodeB @@ -410,8 +414,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail subsequent barriers if a node registers twice" in { val controller = getController(1) val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - val nodeB = NodeInfo("a", AddressFromURIString("akka://sys"), b.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeB = NodeInfo(A, AddressFromURIString("akka://sys"), b.ref) controller ! nodeA a.expectMsg(ToClient(Done)) EventFilter[DuplicateNode](occurrences = 1) intercept { diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala index c4e0ca6cd0..13140adfb5 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala @@ -23,16 +23,19 @@ object ControllerSpec { class ControllerSpec extends AkkaSpec(ControllerSpec.config) with ImplicitSender { + val A = RoleName("a") + val B = RoleName("b") + "A Controller" must { "publish its nodes" in { val c = system.actorOf(Props(new Controller(1, new InetSocketAddress(InetAddress.getLocalHost, 0)))) - c ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor) + c ! NodeInfo(A, AddressFromURIString("akka://sys"), testActor) expectMsg(ToClient(Done)) - c ! NodeInfo("b", AddressFromURIString("akka://sys"), testActor) + c ! NodeInfo(B, AddressFromURIString("akka://sys"), testActor) expectMsg(ToClient(Done)) c ! Controller.GetNodes - expectMsgType[Iterable[String]].toSet must be(Set("a", "b")) + expectMsgType[Iterable[RoleName]].toSet must be(Set(A, B)) } } diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 7acde4eac9..92e65247fb 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -16,6 +16,50 @@ import akka.dispatch.Await import akka.util.Duration import akka.actor.ActorPath import akka.actor.RootActorPath +import akka.remote.testconductor.RoleName + +/** + * Configure the role names and participants of the test, including configuration settings. + */ +abstract class MultiNodeConfig { + + private var _commonConf: Option[Config] = None + private var _nodeConf = Map[RoleName, Config]() + private var _roles = Seq[RoleName]() + + /** + * Register a common base config for all test participants, if so desired. + */ + def commonConfig(config: Config): Unit = _commonConf = Some(config) + + /** + * Register a config override for a specific participant. + */ + def nodeConfig(role: RoleName, config: Config): Unit = _nodeConf += role -> config + + /** + * Construct a RoleName and return it, to be used as an identifier in the + * test. Registration of a role name creates a role which then needs to be + * filled. + */ + def role(name: String): RoleName = { + if (_roles exists (_.name == name)) throw new IllegalArgumentException("non-unique role name " + name) + val r = RoleName(name) + _roles :+= r + r + } + + private[testkit] lazy val mySelf: RoleName = { + require(_roles.size > MultiNodeSpec.selfIndex, "not enough roles declared for this test") + _roles(MultiNodeSpec.selfIndex) + } + + private[testkit] def config: Config = { + val configs = (_nodeConf get mySelf).toList ::: _commonConf.toList ::: MultiNodeSpec.nodeConfig :: AkkaSpec.testConf :: Nil + configs reduce (_ withFallback _) + } + +} object MultiNodeSpec { @@ -52,18 +96,11 @@ object MultiNodeSpec { } -abstract class MultiNodeSpec(_system: ActorSystem) extends AkkaSpec(_system) { +abstract class MultiNodeSpec(val mySelf: RoleName, _system: ActorSystem) extends AkkaSpec(_system) { import MultiNodeSpec._ - def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName, - MultiNodeSpec.nodeConfig.withFallback(config.withFallback(AkkaSpec.testConf)))) - - def this(s: String) = this(ConfigFactory.parseString(s)) - - def this(configMap: Map[String, _]) = this(AkkaSpec.mapToConfig(configMap)) - - def this() = this(AkkaSpec.testConf) + def this(config: MultiNodeConfig) = this(config.mySelf, ActorSystem(AkkaSpec.getCallerName, config.config)) /* * Test Class Interface @@ -89,39 +126,17 @@ abstract class MultiNodeSpec(_system: ActorSystem) extends AkkaSpec(_system) { */ val testConductor: TestConductorExt = TestConductor(system) - /** - * TO BE DEFINED BY USER: The test class must define a set of role names to - * be used throughout the run, e.g. in naming nodes in failure injections. - * These will be mapped to the available nodes such that the first name will - * be the Controller, i.e. on this one you can do failure injection. - * - * Should be a lazy val due to initialization order: - * {{{ - * lazy val roles = Seq("master", "slave") - * }}} - */ - def roles: Seq[String] - - require(roles.size >= initialParticipants, "not enough roles for initialParticipants") - require(roles.size <= nodeNames.size, "not enough nodes for number of roles") - require(roles.distinct.size == roles.size, "role names must be distinct") - - val mySelf = { - if (selfIndex >= roles.size) System.exit(0) - roles(selfIndex) - } - /** * Execute the given block of code only on the given nodes (names according * to the `roleMap`). */ - def runOn(nodes: String*)(thunk: ⇒ Unit): Unit = { + def runOn(nodes: RoleName*)(thunk: ⇒ Unit): Unit = { if (nodes exists (_ == mySelf)) { thunk } } - def ifNode[T](nodes: String*)(yes: ⇒ T)(no: ⇒ T): T = { + def ifNode[T](nodes: RoleName*)(yes: ⇒ T)(no: ⇒ T): T = { if (nodes exists (_ == mySelf)) yes else no } @@ -133,7 +148,7 @@ abstract class MultiNodeSpec(_system: ActorSystem) extends AkkaSpec(_system) { * val serviceA = system.actorFor(node("master") / "user" / "serviceA") * }}} */ - def node(name: String): ActorPath = RootActorPath(testConductor.getAddressFor(name).await) + def node(role: RoleName): ActorPath = RootActorPath(testConductor.getAddressFor(role).await) /** * Enrich `.await()` onto all Awaitables, using BarrierTimeout. @@ -149,9 +164,9 @@ abstract class MultiNodeSpec(_system: ActorSystem) extends AkkaSpec(_system) { private val controllerAddr = new InetSocketAddress(nodeNames(0), 4711) if (selfIndex == 0) { - testConductor.startController(initialParticipants, roles(0), controllerAddr).await + testConductor.startController(initialParticipants, mySelf, controllerAddr).await } else { - testConductor.startClient(roles(selfIndex), controllerAddr).await + testConductor.startClient(mySelf, controllerAddr).await } } \ No newline at end of file From 94e71b7a18f05eb85bbe0a30f3364d999c3575b6 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 18 May 2012 19:25:43 +0200 Subject: [PATCH 051/106] Huge refactor + preparing for binary compatibility, last stretch for akka-actor.jar... --- .../src/main/scala/akka/AkkaException.scala | 37 ++- .../src/main/scala/akka/experimental.scala | 19 -- .../src/main/scala/akka/routing/Routing.scala | 127 +++----- .../akka/serialization/Serialization.scala | 12 +- .../scala/akka/serialization/Serializer.scala | 1 - .../akka/util/BoundedBlockingQueue.scala | 8 +- .../src/main/scala/akka/util/ByteString.scala | 48 ++- .../util/ClassLoaderObjectInputStream.scala | 7 + .../src/main/scala/akka/util/Convert.scala | 2 +- .../src/main/scala/akka/util/Crypt.scala | 4 +- .../src/main/scala/akka/util/Duration.scala | 276 ++++++------------ .../src/main/scala/akka/util/Helpers.scala | 17 +- .../src/main/scala/akka/util/Index.scala | 4 +- .../src/main/scala/akka/util/LockUtil.scala | 20 +- .../src/main/scala/akka/util/Reflect.scala | 4 +- .../src/main/scala/akka/util/Unsafe.java | 3 + .../main/scala/akka/util/cps/package.scala | 1 + .../scala/akka/util/duration/package.scala | 36 +-- 18 files changed, 218 insertions(+), 408 deletions(-) delete mode 100644 akka-actor/src/main/scala/akka/experimental.scala diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index 79d78b9d39..e5b0cb6c80 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -5,19 +5,26 @@ package akka object AkkaException { - + //FIXME DOC def toStringWithStackTrace(throwable: Throwable): String = throwable match { case null ⇒ "Unknown Throwable: was 'null'" case ae: AkkaException ⇒ ae.toLongString case e ⇒ "%s:%s\n%s" format (e.getClass.getName, e.getMessage, stackTraceToString(e)) } - def stackTraceToString(throwable: Throwable): String = { - val trace = throwable.getStackTrace - val sb = new StringBuilder - for (i ← 0 until trace.length) - sb.append("\tat %s\n" format trace(i)) - sb.toString + /** + * Returns the given Throwables stack trace as a String, or the empty String if no trace is found + * @param throwable + * @return + */ + def stackTraceToString(throwable: Throwable): String = throwable.getStackTrace match { + case null ⇒ "" + case x if x.length == 0 ⇒ "" + case trace ⇒ + val sb = new StringBuilder + for (i ← 0 until trace.length) + sb.append("\tat %s\n" format trace(i)) + sb.toString } } @@ -32,17 +39,15 @@ object AkkaException { */ //TODO add @SerialVersionUID(1L) when SI-4804 is fixed class AkkaException(message: String = "", cause: Throwable = null) extends RuntimeException(message, cause) with Serializable { - lazy val uuid = java.util.UUID.randomUUID().toString - - override lazy val toString = - "%s:%s\n[%s]".format(getClass.getName, message, uuid) - - lazy val toLongString = - "%s:%s\n[%s]\n%s".format(getClass.getName, message, uuid, stackTraceToString) - def this(msg: String) = this(msg, null) - def stackTraceToString = AkkaException.stackTraceToString(this) + lazy val uuid = java.util.UUID.randomUUID().toString + + override def toString: String = "%s:%s\n[%s]".format(getClass.getName, message, uuid) + + def toLongString: String = "%s:%s\n[%s]\n%s".format(getClass.getName, message, uuid, stackTraceToString) + + def stackTraceToString: String = AkkaException.stackTraceToString(this) } /** diff --git a/akka-actor/src/main/scala/akka/experimental.scala b/akka-actor/src/main/scala/akka/experimental.scala deleted file mode 100644 index aef3cb5c85..0000000000 --- a/akka-actor/src/main/scala/akka/experimental.scala +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka - -import annotation.target._ - -/** - * This annotation marks a feature which is not yet considered stable and may - * change or be removed in a future release. - * - * @since 1.2 - */ -@getter -@setter -@beanGetter -@beanSetter -final class experimental(since: String) extends annotation.StaticAnnotation diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index c3db8293d2..f27919d316 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -47,12 +47,11 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup ref: InternalActorRef, props: Props, supervisor: InternalActorRef, - receiveTimeout: Option[Duration]): ActorCell = - { - val cell = super.newActorCell(system, ref, props, supervisor, receiveTimeout) - Unsafe.instance.monitorEnter(cell) - cell - } + receiveTimeout: Option[Duration]): ActorCell = { + val cell = super.newActorCell(system, ref, props, supervisor, receiveTimeout) + Unsafe.instance.monitorEnter(cell) + cell + } private[akka] val routerConfig = _props.routerConfig private[akka] val routeeProps = _props.copy(routerConfig = NoRouter) @@ -303,8 +302,8 @@ trait Router extends Actor { final def receive = ({ case Router.Resize ⇒ - try ref.routerConfig.resizer foreach (_.resize(ref.routeeProps, ref.routeeProvider)) - finally assert(ref.resizeInProgress.getAndSet(false)) + val ab = ref.resizeInProgress + if (ab.get) try ref.routerConfig.resizer foreach (_.resize(ref.routeeProps, ref.routeeProvider)) finally ab.set(false) case Terminated(child) ⇒ ref.removeRoutees(IndexedSeq(child)) @@ -319,6 +318,9 @@ trait Router extends Actor { } } +/** + * INTERNAL API + */ private object Router { case object Resize @@ -372,9 +374,9 @@ case class Destination(sender: ActorRef, recipient: ActorRef) //TODO add @SerialVersionUID(1L) when SI-4804 is fixed abstract class NoRouter extends RouterConfig case object NoRouter extends NoRouter { - def createRoute(props: Props, routeeProvider: RouteeProvider): Route = null + def createRoute(props: Props, routeeProvider: RouteeProvider): Route = null // FIXME, null, really?? def routerDispatcher: String = "" - def supervisorStrategy = null + def supervisorStrategy = null // FIXME null, really?? override def withFallback(other: RouterConfig): RouterConfig = other /** @@ -404,9 +406,7 @@ case object FromConfig extends FromConfig { //TODO add @SerialVersionUID(1L) when SI-4804 is fixed class FromConfig(val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends RouterConfig - with Product - with Serializable - with Equals { + with Serializable { def this() = this(Dispatchers.DefaultDispatcherId) @@ -414,38 +414,6 @@ class FromConfig(val routerDispatcher: String = Dispatchers.DefaultDispatcherId) throw new ConfigurationException("router " + routeeProvider.context.self + " needs external configuration from file (e.g. application.conf)") def supervisorStrategy: SupervisorStrategy = Router.defaultSupervisorStrategy - - // open-coded case class to preserve binary compatibility, all deprecated for 2.1 - @deprecated("FromConfig does not make sense as case class", "2.0.1") - override def productPrefix = "FromConfig" - - @deprecated("FromConfig does not make sense as case class", "2.0.1") - def productArity = 1 - - @deprecated("FromConfig does not make sense as case class", "2.0.1") - def productElement(x: Int) = x match { - case 0 ⇒ routerDispatcher - case _ ⇒ throw new IndexOutOfBoundsException(x.toString) - } - - @deprecated("FromConfig does not make sense as case class", "2.0.1") - def copy(d: String = Dispatchers.DefaultDispatcherId): FromConfig = new FromConfig(d) - - @deprecated("FromConfig does not make sense as case class", "2.0.1") - def canEqual(o: Any) = o.isInstanceOf[FromConfig] - - @deprecated("FromConfig does not make sense as case class", "2.0.1") - override def hashCode = ScalaRunTime._hashCode(this) - - @deprecated("FromConfig does not make sense as case class", "2.0.1") - override def toString = "FromConfig(" + routerDispatcher + ")" - - @deprecated("FromConfig does not make sense as case class", "2.0.1") - override def equals(other: Any): Boolean = other match { - case FromConfig(x) ⇒ x == routerDispatcher - case _ ⇒ false - } - } object RoundRobinRouter { @@ -510,9 +478,7 @@ case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = * Constructor that sets nrOfInstances to be created. * Java API */ - def this(nr: Int) = { - this(nrOfInstances = nr) - } + def this(nr: Int) = this(nrOfInstances = nr) /** * Constructor that sets the routees to be used. @@ -520,9 +486,7 @@ case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = * @param routeePaths string representation of the actor paths of the routees that will be looked up * using `actorFor` in [[akka.actor.ActorRefProvider]] */ - def this(routeePaths: java.lang.Iterable[String]) = { - this(routees = iterableAsScalaIterable(routeePaths)) - } + def this(routeePaths: java.lang.Iterable[String]) = this(routees = iterableAsScalaIterable(routeePaths)) /** * Constructor that sets the resizer to be used. @@ -533,13 +497,13 @@ case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = /** * Java API for setting routerDispatcher */ - def withDispatcher(dispatcherId: String) = copy(routerDispatcher = dispatcherId) + def withDispatcher(dispatcherId: String): RoundRobinRouter = copy(routerDispatcher = dispatcherId) /** * Java API for setting the supervisor strategy to be used for the “head” * Router actor. */ - def withSupervisorStrategy(strategy: SupervisorStrategy) = copy(supervisorStrategy = strategy) + def withSupervisorStrategy(strategy: SupervisorStrategy): RoundRobinRouter = copy(supervisorStrategy = strategy) } trait RoundRobinLike { this: RouterConfig ⇒ @@ -630,9 +594,7 @@ case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, * Constructor that sets nrOfInstances to be created. * Java API */ - def this(nr: Int) = { - this(nrOfInstances = nr) - } + def this(nr: Int) = this(nrOfInstances = nr) /** * Constructor that sets the routees to be used. @@ -640,9 +602,7 @@ case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, * @param routeePaths string representation of the actor paths of the routees that will be looked up * using `actorFor` in [[akka.actor.ActorRefProvider]] */ - def this(routeePaths: java.lang.Iterable[String]) = { - this(routees = iterableAsScalaIterable(routeePaths)) - } + def this(routeePaths: java.lang.Iterable[String]) = this(routees = iterableAsScalaIterable(routeePaths)) /** * Constructor that sets the resizer to be used. @@ -653,13 +613,13 @@ case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, /** * Java API for setting routerDispatcher */ - def withDispatcher(dispatcherId: String) = copy(routerDispatcher = dispatcherId) + def withDispatcher(dispatcherId: String): RandomRouter = copy(routerDispatcher = dispatcherId) /** * Java API for setting the supervisor strategy to be used for the “head” * Router actor. */ - def withSupervisorStrategy(strategy: SupervisorStrategy) = copy(supervisorStrategy = strategy) + def withSupervisorStrategy(strategy: SupervisorStrategy): RandomRouter = copy(supervisorStrategy = strategy) } trait RandomLike { this: RouterConfig ⇒ @@ -756,9 +716,7 @@ case class SmallestMailboxRouter(nrOfInstances: Int = 0, routees: Iterable[Strin * Constructor that sets nrOfInstances to be created. * Java API */ - def this(nr: Int) = { - this(nrOfInstances = nr) - } + def this(nr: Int) = this(nrOfInstances = nr) /** * Constructor that sets the routees to be used. @@ -766,9 +724,7 @@ case class SmallestMailboxRouter(nrOfInstances: Int = 0, routees: Iterable[Strin * @param routeePaths string representation of the actor paths of the routees that will be looked up * using `actorFor` in [[akka.actor.ActorRefProvider]] */ - def this(routeePaths: java.lang.Iterable[String]) = { - this(routees = iterableAsScalaIterable(routeePaths)) - } + def this(routeePaths: java.lang.Iterable[String]) = this(routees = iterableAsScalaIterable(routeePaths)) /** * Constructor that sets the resizer to be used. @@ -779,19 +735,16 @@ case class SmallestMailboxRouter(nrOfInstances: Int = 0, routees: Iterable[Strin /** * Java API for setting routerDispatcher */ - def withDispatcher(dispatcherId: String) = copy(routerDispatcher = dispatcherId) + def withDispatcher(dispatcherId: String): SmallestMailboxRouter = copy(routerDispatcher = dispatcherId) /** * Java API for setting the supervisor strategy to be used for the “head” * Router actor. */ - def withSupervisorStrategy(strategy: SupervisorStrategy) = copy(supervisorStrategy = strategy) + def withSupervisorStrategy(strategy: SupervisorStrategy): SmallestMailboxRouter = copy(supervisorStrategy = strategy) } trait SmallestMailboxLike { this: RouterConfig ⇒ - - import java.security.SecureRandom - def nrOfInstances: Int def routees: Iterable[String] @@ -954,9 +907,7 @@ case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = N * Constructor that sets nrOfInstances to be created. * Java API */ - def this(nr: Int) = { - this(nrOfInstances = nr) - } + def this(nr: Int) = this(nrOfInstances = nr) /** * Constructor that sets the routees to be used. @@ -964,9 +915,7 @@ case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = N * @param routeePaths string representation of the actor paths of the routees that will be looked up * using `actorFor` in [[akka.actor.ActorRefProvider]] */ - def this(routeePaths: java.lang.Iterable[String]) = { - this(routees = iterableAsScalaIterable(routeePaths)) - } + def this(routeePaths: java.lang.Iterable[String]) = this(routees = iterableAsScalaIterable(routeePaths)) /** * Constructor that sets the resizer to be used. @@ -977,13 +926,13 @@ case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = N /** * Java API for setting routerDispatcher */ - def withDispatcher(dispatcherId: String) = copy(routerDispatcher = dispatcherId) + def withDispatcher(dispatcherId: String): BroadcastRouter = copy(routerDispatcher = dispatcherId) /** * Java API for setting the supervisor strategy to be used for the “head” * Router actor. */ - def withSupervisorStrategy(strategy: SupervisorStrategy) = copy(supervisorStrategy = strategy) + def withSupervisorStrategy(strategy: SupervisorStrategy): BroadcastRouter = copy(supervisorStrategy = strategy) } trait BroadcastLike { this: RouterConfig ⇒ @@ -1069,9 +1018,7 @@ case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: It * Constructor that sets nrOfInstances to be created. * Java API */ - def this(nr: Int, w: Duration) = { - this(nrOfInstances = nr, within = w) - } + def this(nr: Int, w: Duration) = this(nrOfInstances = nr, within = w) /** * Constructor that sets the routees to be used. @@ -1079,9 +1026,8 @@ case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: It * @param routeePaths string representation of the actor paths of the routees that will be looked up * using `actorFor` in [[akka.actor.ActorRefProvider]] */ - def this(routeePaths: java.lang.Iterable[String], w: Duration) = { + def this(routeePaths: java.lang.Iterable[String], w: Duration) = this(routees = iterableAsScalaIterable(routeePaths), within = w) - } /** * Constructor that sets the resizer to be used. @@ -1150,7 +1096,7 @@ trait Resizer { * This method is invoked only in the context of the Router actor in order to safely * create/stop children. */ - def resize(props: Props, routeeProvider: RouteeProvider) + def resize(props: Props, routeeProvider: RouteeProvider): Unit } case object DefaultResizer { @@ -1166,6 +1112,7 @@ case object DefaultResizer { messagesPerResize = resizerConfig.getInt("messages-per-resize")) } +//FIXME DOCUMENT ME case class DefaultResizer( /** * The fewest number of routees the router should ever have. @@ -1240,7 +1187,7 @@ case class DefaultResizer( def isTimeForResize(messageCounter: Long): Boolean = (messageCounter % messagesPerResize == 0) - def resize(props: Props, routeeProvider: RouteeProvider) { + def resize(props: Props, routeeProvider: RouteeProvider): Unit = { val currentRoutees = routeeProvider.routees val requestedCapacity = capacity(currentRoutees) @@ -1258,7 +1205,7 @@ case class DefaultResizer( * Give concurrent messages a chance to be placed in mailbox before * sending PoisonPill. */ - protected def delayedStop(scheduler: Scheduler, abandon: IndexedSeq[ActorRef]) { + protected def delayedStop(scheduler: Scheduler, abandon: IndexedSeq[ActorRef]): Unit = { if (abandon.nonEmpty) { if (stopDelay <= Duration.Zero) { abandon foreach (_ ! PoisonPill) @@ -1327,9 +1274,7 @@ case class DefaultResizer( * @param capacity current number of routees * @return proposed change in the capacity */ - def filter(pressure: Int, capacity: Int): Int = { - rampup(pressure, capacity) + backoff(pressure, capacity) - } + def filter(pressure: Int, capacity: Int): Int = rampup(pressure, capacity) + backoff(pressure, capacity) /** * Computes a proposed positive (or zero) capacity delta using diff --git a/akka-actor/src/main/scala/akka/serialization/Serialization.scala b/akka-actor/src/main/scala/akka/serialization/Serialization.scala index 03d03dc785..7355e4f7fb 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serialization.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serialization.scala @@ -14,8 +14,6 @@ import akka.util.NonFatal import scala.collection.mutable.ArrayBuffer import java.io.NotSerializableException -case class NoSerializerFoundException(m: String) extends AkkaException(m) - object Serialization { /** @@ -120,9 +118,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { possibilities(0)._2 } serializerMap.putIfAbsent(clazz, ser) match { - case null ⇒ - log.debug("Using serializer[{}] for message [{}]", ser.getClass.getName, clazz.getName) - ser + case null ⇒ log.debug("Using serializer[{}] for message [{}]", ser.getClass.getName, clazz.getName); ser case some ⇒ some } case ser ⇒ ser @@ -140,10 +136,8 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { * A Map of serializer from alias to implementation (class implementing akka.serialization.Serializer) * By default always contains the following mapping: "java" -> akka.serialization.JavaSerializer */ - private val serializers: Map[String, Serializer] = { - for ((k: String, v: String) ← settings.Serializers) - yield k -> serializerOf(v).fold(throw _, identity) - } + private val serializers: Map[String, Serializer] = + for ((k: String, v: String) ← settings.Serializers) yield k -> serializerOf(v).fold(throw _, identity) /** * bindings is a Seq of tuple representing the mapping from Class to Serializer. diff --git a/akka-actor/src/main/scala/akka/serialization/Serializer.scala b/akka-actor/src/main/scala/akka/serialization/Serializer.scala index 5696201f62..f6300ca998 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serializer.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serializer.scala @@ -6,7 +6,6 @@ package akka.serialization import java.io.{ ObjectOutputStream, ByteArrayOutputStream, ObjectInputStream, ByteArrayInputStream } import akka.util.ClassLoaderObjectInputStream -import akka.actor.DynamicAccess import akka.actor.ExtendedActorSystem import scala.util.DynamicVariable diff --git a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala index 7eb90b8ef0..c7c8308de0 100644 --- a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala +++ b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala @@ -8,6 +8,12 @@ import java.util.concurrent.locks.ReentrantLock import java.util.concurrent.{ TimeUnit, BlockingQueue } import java.util.{ AbstractQueue, Queue, Collection, Iterator } +/** + * BoundedBlockingQueue wraps any Queue and turns the result into a BlockingQueue with a limited capacity + * @param maxCapacity - the maximum capacity of this Queue, needs to be > 0 + * @param backing - the backing Queue + * @tparam E - The type of the contents of this Queue + */ class BoundedBlockingQueue[E <: AnyRef]( val maxCapacity: Int, private val backing: Queue[E]) extends AbstractQueue[E] with BlockingQueue[E] { @@ -22,7 +28,7 @@ class BoundedBlockingQueue[E <: AnyRef]( require(maxCapacity > 0) } - protected val lock = new ReentrantLock(false) + protected val lock = new ReentrantLock(false) // TODO might want to switch to ReentrantReadWriteLock private val notEmpty = lock.newCondition() private val notFull = lock.newCondition() diff --git a/akka-actor/src/main/scala/akka/util/ByteString.scala b/akka-actor/src/main/scala/akka/util/ByteString.scala index 6d869826a8..ac074d5b28 100644 --- a/akka-actor/src/main/scala/akka/util/ByteString.scala +++ b/akka-actor/src/main/scala/akka/util/ByteString.scala @@ -11,6 +11,7 @@ import scala.collection.mutable.{ Builder, WrappedArray } import scala.collection.immutable.{ IndexedSeq, VectorBuilder } import scala.collection.generic.CanBuildFrom +//FIXME MORE DOCS object ByteString { /** @@ -53,15 +54,16 @@ object ByteString { val empty: ByteString = CompactByteString(Array.empty[Byte]) - def newBuilder = new ByteStringBuilder + def newBuilder: ByteStringBuilder = new ByteStringBuilder - implicit def canBuildFrom = new CanBuildFrom[TraversableOnce[Byte], Byte, ByteString] { - def apply(from: TraversableOnce[Byte]) = newBuilder - def apply() = newBuilder - } + implicit val canBuildFrom: CanBuildFrom[TraversableOnce[Byte], Byte, ByteString] = + new CanBuildFrom[TraversableOnce[Byte], Byte, ByteString] { + def apply(ignore: TraversableOnce[Byte]): ByteStringBuilder = newBuilder + def apply(): ByteStringBuilder = newBuilder + } private[akka] object ByteString1C { - def apply(bytes: Array[Byte]) = new ByteString1C(bytes) + def apply(bytes: Array[Byte]): ByteString1C = new ByteString1C(bytes) } /** @@ -71,7 +73,7 @@ object ByteString { final class ByteString1C private (private val bytes: Array[Byte]) extends CompactByteString { def apply(idx: Int): Byte = bytes(idx) - override def length = bytes.length + override def length: Int = bytes.length def toArray: Array[Byte] = bytes.clone @@ -81,13 +83,11 @@ object ByteString { def compact: ByteString1C = this - def asByteBuffer: ByteBuffer = - toByteString1.asByteBuffer + def asByteBuffer: ByteBuffer = toByteString1.asByteBuffer def decodeString(charset: String): String = new String(bytes, charset) - def ++(that: ByteString): ByteString = - if (!that.isEmpty) toByteString1 ++ that else this + def ++(that: ByteString): ByteString = if (!that.isEmpty) toByteString1 ++ that else this override def slice(from: Int, until: Int): ByteString = if ((from != 0) || (until != length)) toByteString1.slice(from, until) @@ -96,12 +96,11 @@ object ByteString { override def copyToArray[A >: Byte](xs: Array[A], start: Int, len: Int): Unit = toByteString1.copyToArray(xs, start, len) - def copyToBuffer(buffer: ByteBuffer): Int = - toByteString1.copyToBuffer(buffer) + def copyToBuffer(buffer: ByteBuffer): Int = toByteString1.copyToBuffer(buffer) } private[akka] object ByteString1 { - def apply(bytes: Array[Byte]) = new ByteString1(bytes) + def apply(bytes: Array[Byte]): ByteString1 = new ByteString1(bytes) } /** @@ -113,7 +112,7 @@ object ByteString { def apply(idx: Int): Byte = bytes(checkRangeConvert(idx)) - private def checkRangeConvert(index: Int) = { + private def checkRangeConvert(index: Int): Int = { if (0 <= index && length > index) index + startIndex else @@ -128,8 +127,7 @@ object ByteString { override def clone: CompactByteString = ByteString1C(toArray) - def compact: CompactByteString = - if (length == bytes.length) ByteString1C(bytes) else clone + def compact: CompactByteString = if (length == bytes.length) ByteString1C(bytes) else clone def asByteBuffer: ByteBuffer = { val buffer = ByteBuffer.wrap(bytes, startIndex, length).asReadOnlyBuffer @@ -161,7 +159,6 @@ object ByteString { if (copyLength > 0) buffer.put(bytes, startIndex, copyLength) copyLength } - } private[akka] object ByteStrings { @@ -198,10 +195,11 @@ object ByteString { } // 0: both empty, 1: 2nd empty, 2: 1st empty, 3: neither empty + // Using length to check emptiness is prohibited by law def compare(b1: ByteString, b2: ByteString): Int = - if (b1.length == 0) - if (b2.length == 0) 0 else 2 - else if (b2.length == 0) 1 else 3 + if (b1.isEmpty) + if (b2.isEmpty) 0 else 2 + else if (b2.isEmpty) 1 else 3 } @@ -439,7 +437,7 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { private var _tempLength = 0 private var _tempCapacity = 0 - private def clearTemp() { + private def clearTemp(): Unit = { if (_tempLength > 0) { val arr = new Array[Byte](_tempLength) Array.copy(_temp, 0, arr, 0, _tempLength) @@ -448,14 +446,14 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { } } - private def resizeTemp(size: Int) { + private def resizeTemp(size: Int): Unit = { val newtemp = new Array[Byte](size) if (_tempLength > 0) Array.copy(_temp, 0, newtemp, 0, _tempLength) _temp = newtemp _tempCapacity = _temp.length } - private def ensureTempSize(size: Int) { + private def ensureTempSize(size: Int): Unit = { if (_tempCapacity < size || _tempCapacity == 0) { var newSize = if (_tempCapacity == 0) 16 else _tempCapacity * 2 while (newSize < size) newSize *= 2 @@ -498,7 +496,7 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { this } - def clear() { + def clear(): Unit = { _builder.clear _length = 0 _tempLength = 0 diff --git a/akka-actor/src/main/scala/akka/util/ClassLoaderObjectInputStream.scala b/akka-actor/src/main/scala/akka/util/ClassLoaderObjectInputStream.scala index 3ad55d69eb..ab2514861e 100644 --- a/akka-actor/src/main/scala/akka/util/ClassLoaderObjectInputStream.scala +++ b/akka-actor/src/main/scala/akka/util/ClassLoaderObjectInputStream.scala @@ -6,6 +6,13 @@ package akka.util import java.io.{ InputStream, ObjectInputStream, ObjectStreamClass } +/** + * ClassLoaderObjectInputStream tries to utilize the provided ClassLoader to load Classes and falls + * back to ObjectInputStreams resolver. + * + * @param classLoader - the ClassLoader which is to be used primarily + * @param is - the InputStream that is wrapped + */ class ClassLoaderObjectInputStream(classLoader: ClassLoader, is: InputStream) extends ObjectInputStream(is) { override protected def resolveClass(objectStreamClass: ObjectStreamClass): Class[_] = try Class.forName(objectStreamClass.getName, false, classLoader) catch { diff --git a/akka-actor/src/main/scala/akka/util/Convert.scala b/akka-actor/src/main/scala/akka/util/Convert.scala index a805b17fb2..3fead7aef7 100644 --- a/akka-actor/src/main/scala/akka/util/Convert.scala +++ b/akka-actor/src/main/scala/akka/util/Convert.scala @@ -3,7 +3,7 @@ */ package akka.util - +//FIXME DOCS! object Convert { def intToBytes(value: Int): Array[Byte] = { diff --git a/akka-actor/src/main/scala/akka/util/Crypt.scala b/akka-actor/src/main/scala/akka/util/Crypt.scala index 7dd678e748..280cd90768 100644 --- a/akka-actor/src/main/scala/akka/util/Crypt.scala +++ b/akka-actor/src/main/scala/akka/util/Crypt.scala @@ -5,7 +5,7 @@ package akka.util import java.security.{ MessageDigest, SecureRandom } - +//FIXME DOCS object Crypt { val hex = "0123456789ABCDEF" val lineSeparator = System.getProperty("line.separator") @@ -32,7 +32,7 @@ object Crypt { } def hexify(bytes: Array[Byte]): String = { - val builder = new StringBuilder + val builder = new StringBuilder(bytes.length * 2) bytes.foreach { byte ⇒ builder.append(hex.charAt((byte & 0xF0) >> 4)).append(hex.charAt(byte & 0xF)) } builder.toString } diff --git a/akka-actor/src/main/scala/akka/util/Duration.scala b/akka-actor/src/main/scala/akka/util/Duration.scala index a213fe1869..b37cf24c3b 100644 --- a/akka-actor/src/main/scala/akka/util/Duration.scala +++ b/akka-actor/src/main/scala/akka/util/Duration.scala @@ -110,6 +110,7 @@ object Duration { } val Zero: FiniteDuration = new FiniteDuration(0, NANOSECONDS) + val Undefined: Duration = new Duration with Infinite { override def toString = "Duration.Undefined" override def equals(other: Any) = other.asInstanceOf[AnyRef] eq this @@ -166,8 +167,8 @@ object Duration { * including itself. */ val Inf: Duration = new Duration with Infinite { - override def toString = "Duration.Inf" - def compare(other: Duration) = if (other eq this) 0 else 1 + override def toString: String = "Duration.Inf" + def compare(other: Duration): Int = if (other eq this) 0 else 1 def unary_- : Duration = MinusInf } @@ -177,7 +178,7 @@ object Duration { */ val MinusInf: Duration = new Duration with Infinite { override def toString = "Duration.MinusInf" - def compare(other: Duration) = if (other eq this) 0 else -1 + def compare(other: Duration): Int = if (other eq this) 0 else -1 def unary_- : Duration = Inf } @@ -188,7 +189,7 @@ object Duration { def parse(s: String): Duration = unapply(s).get implicit object DurationIsOrdered extends Ordering[Duration] { - def compare(a: Duration, b: Duration) = a compare b + def compare(a: Duration, b: Duration): Int = a compare b } } @@ -263,17 +264,17 @@ abstract class Duration extends Serializable with Ordered[Duration] { def fromNow: Deadline = Deadline.now + this // Java API - def lt(other: Duration) = this < other - def lteq(other: Duration) = this <= other - def gt(other: Duration) = this > other - def gteq(other: Duration) = this >= other - def plus(other: Duration) = this + other - def minus(other: Duration) = this - other - def mul(factor: Double) = this * factor - def div(factor: Double) = this / factor - def div(other: Duration) = this / other - def neg() = -this - def isFinite() = finite_? + def lt(other: Duration): Boolean = this < other + def lteq(other: Duration): Boolean = this <= other + def gt(other: Duration): Boolean = this > other + def gteq(other: Duration): Boolean = this >= other + def plus(other: Duration): Duration = this + other + def minus(other: Duration): Duration = this - other + def mul(factor: Double): Duration = this * factor + def div(factor: Double): Duration = this / factor + def div(other: Duration): Double = this / other + def neg(): Duration = -this + def isFinite(): Boolean = finite_? } object FiniteDuration { @@ -349,31 +350,19 @@ class FiniteDuration(val length: Long, val unit: TimeUnit) extends Duration { else c } - def +(other: Duration) = { - if (!other.finite_?) { - other - } else { - fromNanos(add(toNanos, other.toNanos)) - } - } + def +(other: Duration): Duration = if (!other.finite_?) other else fromNanos(add(toNanos, other.toNanos)) - def -(other: Duration) = { - if (!other.finite_?) { - other - } else { - fromNanos(add(toNanos, -other.toNanos)) - } - } + def -(other: Duration): Duration = if (!other.finite_?) other else fromNanos(add(toNanos, -other.toNanos)) - def *(factor: Double) = fromNanos(long2double(toNanos) * factor) + def *(factor: Double): FiniteDuration = fromNanos(long2double(toNanos) * factor) - def /(factor: Double) = fromNanos(long2double(toNanos) / factor) + def /(factor: Double): FiniteDuration = fromNanos(long2double(toNanos) / factor) - def /(other: Duration) = if (other.finite_?) long2double(toNanos) / other.toNanos else 0 + def /(other: Duration): Double = if (other.finite_?) long2double(toNanos) / other.toNanos else 0 - def unary_- = Duration(-length, unit) + def unary_- : FiniteDuration = Duration(-length, unit) - def finite_? = true + def finite_? : Boolean = true override def equals(other: Any) = (other.asInstanceOf[AnyRef] eq this) || other.isInstanceOf[FiniteDuration] && @@ -385,178 +374,74 @@ class FiniteDuration(val length: Long, val unit: TimeUnit) extends Duration { } } -class DurationInt(n: Int) { +private[akka] trait DurationOps { import duration.Classifier + protected def from(timeUnit: TimeUnit): FiniteDuration + def nanoseconds: FiniteDuration = from(NANOSECONDS) + def nanos: FiniteDuration = from(NANOSECONDS) + def nanosecond: FiniteDuration = from(NANOSECONDS) + def nano: FiniteDuration = from(NANOSECONDS) - def nanoseconds = Duration(n, NANOSECONDS) - def nanos = Duration(n, NANOSECONDS) - def nanosecond = Duration(n, NANOSECONDS) - def nano = Duration(n, NANOSECONDS) + def microseconds: FiniteDuration = from(MICROSECONDS) + def micros: FiniteDuration = from(MICROSECONDS) + def microsecond: FiniteDuration = from(MICROSECONDS) + def micro: FiniteDuration = from(MICROSECONDS) - def microseconds = Duration(n, MICROSECONDS) - def micros = Duration(n, MICROSECONDS) - def microsecond = Duration(n, MICROSECONDS) - def micro = Duration(n, MICROSECONDS) + def milliseconds: FiniteDuration = from(MILLISECONDS) + def millis: FiniteDuration = from(MILLISECONDS) + def millisecond: FiniteDuration = from(MILLISECONDS) + def milli: FiniteDuration = from(MILLISECONDS) - def milliseconds = Duration(n, MILLISECONDS) - def millis = Duration(n, MILLISECONDS) - def millisecond = Duration(n, MILLISECONDS) - def milli = Duration(n, MILLISECONDS) + def seconds: FiniteDuration = from(SECONDS) + def second: FiniteDuration = from(SECONDS) - def seconds = Duration(n, SECONDS) - def second = Duration(n, SECONDS) + def minutes: FiniteDuration = from(MINUTES) + def minute: FiniteDuration = from(MINUTES) - def minutes = Duration(n, MINUTES) - def minute = Duration(n, MINUTES) + def hours: FiniteDuration = from(HOURS) + def hour: FiniteDuration = from(HOURS) - def hours = Duration(n, HOURS) - def hour = Duration(n, HOURS) + def days: FiniteDuration = from(DAYS) + def day: FiniteDuration = from(DAYS) - def days = Duration(n, DAYS) - def day = Duration(n, DAYS) + def nanoseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(NANOSECONDS)) + def nanos[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(NANOSECONDS)) + def nanosecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(NANOSECONDS)) + def nano[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(NANOSECONDS)) - def nanoseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) - def nanos[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) - def nanosecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) - def nano[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) + def microseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MICROSECONDS)) + def micros[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MICROSECONDS)) + def microsecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MICROSECONDS)) + def micro[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MICROSECONDS)) - def microseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) - def micros[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) - def microsecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) - def micro[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) + def milliseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MILLISECONDS)) + def millis[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MILLISECONDS)) + def millisecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MILLISECONDS)) + def milli[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MILLISECONDS)) - def milliseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) - def millis[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) - def millisecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) - def milli[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) + def seconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(SECONDS)) + def second[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(SECONDS)) - def seconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, SECONDS)) - def second[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, SECONDS)) + def minutes[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MINUTES)) + def minute[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MINUTES)) - def minutes[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MINUTES)) - def minute[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MINUTES)) + def hours[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(HOURS)) + def hour[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(HOURS)) - def hours[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, HOURS)) - def hour[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, HOURS)) - - def days[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, DAYS)) - def day[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, DAYS)) + def days[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(DAYS)) + def day[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(DAYS)) } -class DurationLong(n: Long) { - import duration.Classifier - - def nanoseconds = Duration(n, NANOSECONDS) - def nanos = Duration(n, NANOSECONDS) - def nanosecond = Duration(n, NANOSECONDS) - def nano = Duration(n, NANOSECONDS) - - def microseconds = Duration(n, MICROSECONDS) - def micros = Duration(n, MICROSECONDS) - def microsecond = Duration(n, MICROSECONDS) - def micro = Duration(n, MICROSECONDS) - - def milliseconds = Duration(n, MILLISECONDS) - def millis = Duration(n, MILLISECONDS) - def millisecond = Duration(n, MILLISECONDS) - def milli = Duration(n, MILLISECONDS) - - def seconds = Duration(n, SECONDS) - def second = Duration(n, SECONDS) - - def minutes = Duration(n, MINUTES) - def minute = Duration(n, MINUTES) - - def hours = Duration(n, HOURS) - def hour = Duration(n, HOURS) - - def days = Duration(n, DAYS) - def day = Duration(n, DAYS) - - def nanoseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) - def nanos[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) - def nanosecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) - def nano[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) - - def microseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) - def micros[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) - def microsecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) - def micro[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) - - def milliseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) - def millis[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) - def millisecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) - def milli[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) - - def seconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, SECONDS)) - def second[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, SECONDS)) - - def minutes[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MINUTES)) - def minute[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MINUTES)) - - def hours[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, HOURS)) - def hour[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, HOURS)) - - def days[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, DAYS)) - def day[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, DAYS)) +class DurationInt(n: Int) extends DurationOps { + override protected def from(timeUnit: TimeUnit): FiniteDuration = Duration(n, timeUnit) } -class DurationDouble(d: Double) { - import duration.Classifier +class DurationLong(n: Long) extends DurationOps { + override protected def from(timeUnit: TimeUnit): FiniteDuration = Duration(n, timeUnit) +} - def nanoseconds = Duration(d, NANOSECONDS) - def nanos = Duration(d, NANOSECONDS) - def nanosecond = Duration(d, NANOSECONDS) - def nano = Duration(d, NANOSECONDS) - - def microseconds = Duration(d, MICROSECONDS) - def micros = Duration(d, MICROSECONDS) - def microsecond = Duration(d, MICROSECONDS) - def micro = Duration(d, MICROSECONDS) - - def milliseconds = Duration(d, MILLISECONDS) - def millis = Duration(d, MILLISECONDS) - def millisecond = Duration(d, MILLISECONDS) - def milli = Duration(d, MILLISECONDS) - - def seconds = Duration(d, SECONDS) - def second = Duration(d, SECONDS) - - def minutes = Duration(d, MINUTES) - def minute = Duration(d, MINUTES) - - def hours = Duration(d, HOURS) - def hour = Duration(d, HOURS) - - def days = Duration(d, DAYS) - def day = Duration(d, DAYS) - - def nanoseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, NANOSECONDS)) - def nanos[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, NANOSECONDS)) - def nanosecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, NANOSECONDS)) - def nano[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, NANOSECONDS)) - - def microseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MICROSECONDS)) - def micros[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MICROSECONDS)) - def microsecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MICROSECONDS)) - def micro[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MICROSECONDS)) - - def milliseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MILLISECONDS)) - def millis[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MILLISECONDS)) - def millisecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MILLISECONDS)) - def milli[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MILLISECONDS)) - - def seconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, SECONDS)) - def second[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, SECONDS)) - - def minutes[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MINUTES)) - def minute[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MINUTES)) - - def hours[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, HOURS)) - def hour[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, HOURS)) - - def days[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, DAYS)) - def day[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, DAYS)) +class DurationDouble(d: Double) extends DurationOps { + override protected def from(timeUnit: TimeUnit): FiniteDuration = Duration(d, timeUnit) } //TODO add @SerialVersionUID(1L) when SI-4804 is fixed @@ -565,24 +450,27 @@ case class Timeout(duration: Duration) { def this(length: Long, unit: TimeUnit) = this(Duration(length, unit)) } +/** + * A Timeout is a wrapper on top of Duration to be more specific about what the duration means. + */ object Timeout { /** * A timeout with zero duration, will cause most requests to always timeout. */ - val zero = new Timeout(Duration.Zero) + val zero: Timeout = new Timeout(Duration.Zero) /** * A Timeout with infinite duration. Will never timeout. Use extreme caution with this * as it may cause memory leaks, blocked threads, or may not even be supported by * the receiver, which would result in an exception. */ - val never = new Timeout(Duration.Inf) + val never: Timeout = new Timeout(Duration.Inf) - def apply(timeout: Long) = new Timeout(timeout) - def apply(length: Long, unit: TimeUnit) = new Timeout(length, unit) + def apply(timeout: Long): Timeout = new Timeout(timeout) + def apply(length: Long, unit: TimeUnit): Timeout = new Timeout(length, unit) - implicit def durationToTimeout(duration: Duration) = new Timeout(duration) - implicit def intToTimeout(timeout: Int) = new Timeout(timeout) - implicit def longToTimeout(timeout: Long) = new Timeout(timeout) + implicit def durationToTimeout(duration: Duration): Timeout = new Timeout(duration) + implicit def intToTimeout(timeout: Int): Timeout = new Timeout(timeout) + implicit def longToTimeout(timeout: Long): Timeout = new Timeout(timeout) } diff --git a/akka-actor/src/main/scala/akka/util/Helpers.scala b/akka-actor/src/main/scala/akka/util/Helpers.scala index 25cb279f2e..a3618359ac 100644 --- a/akka-actor/src/main/scala/akka/util/Helpers.scala +++ b/akka-actor/src/main/scala/akka/util/Helpers.scala @@ -45,18 +45,13 @@ object Helpers { else base64(next, sb) } - def ignore[E: Manifest](body: ⇒ Unit) { - try { - body - } catch { - case e if manifest[E].erasure.isAssignableFrom(e.getClass) ⇒ () - } - } + //FIXME docs + def ignore[E: Manifest](body: ⇒ Unit): Unit = + try body catch { case e if manifest[E].erasure.isAssignableFrom(e.getClass) ⇒ () } - def withPrintStackTraceOnError(body: ⇒ Unit) { - try { - body - } catch { + //FIXME docs + def withPrintStackTraceOnError(body: ⇒ Unit): Unit = { + try body catch { case e: Throwable ⇒ val sw = new java.io.StringWriter() var root = e diff --git a/akka-actor/src/main/scala/akka/util/Index.scala b/akka-actor/src/main/scala/akka/util/Index.scala index 1153c9e045..3289ed8f13 100644 --- a/akka-actor/src/main/scala/akka/util/Index.scala +++ b/akka-actor/src/main/scala/akka/util/Index.scala @@ -91,7 +91,7 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { /** * Applies the supplied function to all keys and their values */ - def foreach(fun: (K, V) ⇒ Unit) { + def foreach(fun: (K, V) ⇒ Unit): Unit = { import scala.collection.JavaConversions._ container.entrySet foreach { e ⇒ e.getValue.foreach(fun(e.getKey, _)) } } @@ -112,7 +112,7 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { /** * Returns the key set. */ - def keys = scala.collection.JavaConversions.collectionAsScalaIterable(container.keySet) + def keys: Iterable[K] = scala.collection.JavaConversions.collectionAsScalaIterable(container.keySet) /** * Disassociates the value of type V from the key of type K diff --git a/akka-actor/src/main/scala/akka/util/LockUtil.scala b/akka-actor/src/main/scala/akka/util/LockUtil.scala index 14c787d3f6..da93170019 100644 --- a/akka-actor/src/main/scala/akka/util/LockUtil.scala +++ b/akka-actor/src/main/scala/akka/util/LockUtil.scala @@ -24,9 +24,7 @@ class Switch(startAsOn: Boolean = false) { protected def transcend(from: Boolean, action: ⇒ Unit): Boolean = synchronized { if (switch.compareAndSet(from, !from)) { - try { - action - } catch { + try action catch { case e ⇒ switch.compareAndSet(!from, from) // revert status throw e @@ -62,18 +60,12 @@ class Switch(startAsOn: Boolean = false) { /** * Executes the provided action and returns its value if the switch is IMMEDIATELY on (i.e. no lock involved) */ - def ifOnYield[T](action: ⇒ T): Option[T] = { - if (switch.get) Some(action) - else None - } + def ifOnYield[T](action: ⇒ T): Option[T] = if (switch.get) Some(action) else None /** * Executes the provided action and returns its value if the switch is IMMEDIATELY off (i.e. no lock involved) */ - def ifOffYield[T](action: ⇒ T): Option[T] = { - if (!switch.get) Some(action) - else None - } + def ifOffYield[T](action: ⇒ T): Option[T] = if (!switch.get) Some(action) else None /** * Executes the provided action and returns if the action was executed or not, if the switch is IMMEDIATELY on (i.e. no lock involved) @@ -138,15 +130,15 @@ class Switch(startAsOn: Boolean = false) { /** * Executes the given code while holding this switch’s lock, i.e. protected from concurrent modification of the switch status. */ - def locked[T](code: ⇒ T) = synchronized { code } + def locked[T](code: ⇒ T): T = synchronized { code } /** * Returns whether the switch is IMMEDIATELY on (no locking) */ - def isOn = switch.get + def isOn: Boolean = switch.get /** * Returns whether the switch is IMMEDDIATELY off (no locking) */ - def isOff = !isOn + def isOff: Boolean = !isOn } diff --git a/akka-actor/src/main/scala/akka/util/Reflect.scala b/akka-actor/src/main/scala/akka/util/Reflect.scala index 25c56a983f..3a46edeab1 100644 --- a/akka-actor/src/main/scala/akka/util/Reflect.scala +++ b/akka-actor/src/main/scala/akka/util/Reflect.scala @@ -6,8 +6,10 @@ package akka.util /** * Collection of internal reflection utilities which may or may not be * available (most services specific to HotSpot, but fails gracefully). + * + * INTERNAL API */ -object Reflect { +private[akka] object Reflect { /** * This optionally holds a function which looks N levels above itself diff --git a/akka-actor/src/main/scala/akka/util/Unsafe.java b/akka-actor/src/main/scala/akka/util/Unsafe.java index 608cb3d46e..ace3c1baac 100644 --- a/akka-actor/src/main/scala/akka/util/Unsafe.java +++ b/akka-actor/src/main/scala/akka/util/Unsafe.java @@ -7,6 +7,9 @@ package akka.util; import java.lang.reflect.Field; +/** + * INTERNAL API + */ public final class Unsafe { public final static sun.misc.Unsafe instance; static { diff --git a/akka-actor/src/main/scala/akka/util/cps/package.scala b/akka-actor/src/main/scala/akka/util/cps/package.scala index 198c2beacd..a1b4bc39eb 100644 --- a/akka-actor/src/main/scala/akka/util/cps/package.scala +++ b/akka-actor/src/main/scala/akka/util/cps/package.scala @@ -7,6 +7,7 @@ package akka.util import scala.util.continuations._ import akka.dispatch.MessageDispatcher +//FIXME Needs docs package object cps { def matchC[A, B, C, D](in: A)(pf: PartialFunction[A, B @cpsParam[C, D]]): B @cpsParam[C, D] = pf(in) diff --git a/akka-actor/src/main/scala/akka/util/duration/package.scala b/akka-actor/src/main/scala/akka/util/duration/package.scala index 7f14a0be48..6a7d28a6e6 100644 --- a/akka-actor/src/main/scala/akka/util/duration/package.scala +++ b/akka-actor/src/main/scala/akka/util/duration/package.scala @@ -5,7 +5,7 @@ package akka.util import java.util.concurrent.TimeUnit - +//FIXME Needs docs package object duration { trait Classifier[C] { type R @@ -15,38 +15,32 @@ package object duration { object span implicit object spanConvert extends Classifier[span.type] { type R = FiniteDuration - def convert(d: FiniteDuration) = d + def convert(d: FiniteDuration): FiniteDuration = d } object fromNow implicit object fromNowConvert extends Classifier[fromNow.type] { type R = Deadline - def convert(d: FiniteDuration) = Deadline.now + d + def convert(d: FiniteDuration): Deadline = Deadline.now + d } - implicit def intToDurationInt(n: Int) = new DurationInt(n) - implicit def longToDurationLong(n: Long) = new DurationLong(n) - implicit def doubleToDurationDouble(d: Double) = new DurationDouble(d) + implicit def intToDurationInt(n: Int): DurationInt = new DurationInt(n) + implicit def longToDurationLong(n: Long): DurationLong = new DurationLong(n) + implicit def doubleToDurationDouble(d: Double): DurationDouble = new DurationDouble(d) - implicit def pairIntToDuration(p: (Int, TimeUnit)) = Duration(p._1, p._2) - implicit def pairLongToDuration(p: (Long, TimeUnit)) = Duration(p._1, p._2) - implicit def durationToPair(d: Duration) = (d.length, d.unit) + implicit def pairIntToDuration(p: (Int, TimeUnit)): FiniteDuration = Duration(p._1, p._2) + implicit def pairLongToDuration(p: (Long, TimeUnit)): FiniteDuration = Duration(p._1, p._2) + implicit def durationToPair(d: Duration): (Long, TimeUnit) = (d.length, d.unit) /* * avoid reflection based invocation by using non-duck type */ - class IntMult(i: Int) { - def *(d: Duration) = d * i - } - implicit def intMult(i: Int) = new IntMult(i) + class IntMult(i: Int) { def *(d: Duration): Duration = d * i } + implicit def intMult(i: Int): IntMult = new IntMult(i) - class LongMult(l: Long) { - def *(d: Duration) = d * l - } - implicit def longMult(l: Long) = new LongMult(l) + class LongMult(l: Long) { def *(d: Duration): Duration = d * l } + implicit def longMult(l: Long): LongMult = new LongMult(l) - class DoubleMult(f: Double) { - def *(d: Duration) = d * f - } - implicit def doubleMult(f: Double) = new DoubleMult(f) + class DoubleMult(f: Double) { def *(d: Duration): Duration = d * f } + implicit def doubleMult(f: Double): DoubleMult = new DoubleMult(f) } From 5cbcb612b2469d5b140798380d8016be5a2642f2 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Sun, 20 May 2012 15:56:52 +0200 Subject: [PATCH 052/106] Moving the HWT stuff from org.jboss.netty.akka.util to akka.util.internal --- .../util/internal/ConcurrentIdentityHashMap.java | 2 +- .../util/internal}/HashedWheelTimer.java | 12 +++++------- .../akka/util/internal/ReusableIterator.java | 2 +- .../akka/util/internal/SystemPropertyUtil.java | 2 +- .../akka/util => akka/util/internal}/Timeout.java | 2 +- .../akka/util => akka/util/internal}/Timer.java | 4 ++-- .../akka/util => akka/util/internal}/TimerTask.java | 6 +++--- .../src/main/scala/akka/actor/ActorSystem.scala | 5 ++--- akka-actor/src/main/scala/akka/actor/Scheduler.scala | 3 ++- 9 files changed, 18 insertions(+), 20 deletions(-) rename akka-actor/src/main/java/{org/jboss/netty => }/akka/util/internal/ConcurrentIdentityHashMap.java (99%) rename akka-actor/src/main/java/{org/jboss/netty/akka/util => akka/util/internal}/HashedWheelTimer.java (97%) rename akka-actor/src/main/java/{org/jboss/netty => }/akka/util/internal/ReusableIterator.java (95%) rename akka-actor/src/main/java/{org/jboss/netty => }/akka/util/internal/SystemPropertyUtil.java (98%) rename akka-actor/src/main/java/{org/jboss/netty/akka/util => akka/util/internal}/Timeout.java (97%) rename akka-actor/src/main/java/{org/jboss/netty/akka/util => akka/util/internal}/Timer.java (92%) rename akka-actor/src/main/java/{org/jboss/netty/akka/util => akka/util/internal}/TimerTask.java (82%) diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/internal/ConcurrentIdentityHashMap.java b/akka-actor/src/main/java/akka/util/internal/ConcurrentIdentityHashMap.java similarity index 99% rename from akka-actor/src/main/java/org/jboss/netty/akka/util/internal/ConcurrentIdentityHashMap.java rename to akka-actor/src/main/java/akka/util/internal/ConcurrentIdentityHashMap.java index ff8a568d02..eb83c98f35 100644 --- a/akka-actor/src/main/java/org/jboss/netty/akka/util/internal/ConcurrentIdentityHashMap.java +++ b/akka-actor/src/main/java/akka/util/internal/ConcurrentIdentityHashMap.java @@ -18,7 +18,7 @@ * Expert Group and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain */ -package org.jboss.netty.akka.util.internal; +package akka.util.internal; import java.util.AbstractCollection; import java.util.AbstractMap; diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java b/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java similarity index 97% rename from akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java rename to akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java index 9eba51e53f..25841861c5 100644 --- a/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java +++ b/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java @@ -13,12 +13,10 @@ * License for the specific language governing permissions and limitations * under the License. */ -package org.jboss.netty.akka.util; +package akka.util.internal; import akka.event.LoggingAdapter; import akka.util.Duration; -import org.jboss.netty.akka.util.internal.ConcurrentIdentityHashMap; -import org.jboss.netty.akka.util.internal.ReusableIterator; import java.util.*; import java.util.concurrent.ThreadFactory; @@ -34,7 +32,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; *

Tick Duration

* * As described with 'approximated', this timer does not execute the scheduled - * {@link TimerTask} on time. {@link org.jboss.netty.akka.util.HashedWheelTimer}, on every tick, will + * {@link TimerTask} on time. {@link HashedWheelTimer}, on every tick, will * check if there are any {@link TimerTask}s behind the schedule and execute * them. *

@@ -46,7 +44,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; * *

Ticks per Wheel (Wheel Size)

* - * {@link org.jboss.netty.akka.util.HashedWheelTimer} maintains a data structure called 'wheel'. + * {@link HashedWheelTimer} maintains a data structure called 'wheel'. * To put simply, a wheel is a hash table of {@link TimerTask}s whose hash * function is 'dead line of the task'. The default number of ticks per wheel * (i.e. the size of the wheel) is 512. You could specify a larger value @@ -54,7 +52,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; * *

Do not create many instances.

* - * {@link org.jboss.netty.akka.util.HashedWheelTimer} creates a new thread whenever it is instantiated and + * {@link HashedWheelTimer} creates a new thread whenever it is instantiated and * started. Therefore, you should make sure to create only one instance and * share it across your application. One of the common mistakes, that makes * your application unresponsive, is to create a new instance in @@ -63,7 +61,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; * *

Implementation Details

* - * {@link org.jboss.netty.akka.util.HashedWheelTimer} is based on + * {@link HashedWheelTimer} is based on * George Varghese and * Tony Lauck's paper, * 'Hashed diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/internal/ReusableIterator.java b/akka-actor/src/main/java/akka/util/internal/ReusableIterator.java similarity index 95% rename from akka-actor/src/main/java/org/jboss/netty/akka/util/internal/ReusableIterator.java rename to akka-actor/src/main/java/akka/util/internal/ReusableIterator.java index 210edbe65d..8c8e5e50e5 100644 --- a/akka-actor/src/main/java/org/jboss/netty/akka/util/internal/ReusableIterator.java +++ b/akka-actor/src/main/java/akka/util/internal/ReusableIterator.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package org.jboss.netty.akka.util.internal; +package akka.util.internal; import java.util.Iterator; diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/internal/SystemPropertyUtil.java b/akka-actor/src/main/java/akka/util/internal/SystemPropertyUtil.java similarity index 98% rename from akka-actor/src/main/java/org/jboss/netty/akka/util/internal/SystemPropertyUtil.java rename to akka-actor/src/main/java/akka/util/internal/SystemPropertyUtil.java index bf3e2ac571..affef54bfc 100644 --- a/akka-actor/src/main/java/org/jboss/netty/akka/util/internal/SystemPropertyUtil.java +++ b/akka-actor/src/main/java/akka/util/internal/SystemPropertyUtil.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package org.jboss.netty.akka.util.internal; +package akka.util.internal; import java.util.regex.Pattern; diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/Timeout.java b/akka-actor/src/main/java/akka/util/internal/Timeout.java similarity index 97% rename from akka-actor/src/main/java/org/jboss/netty/akka/util/Timeout.java rename to akka-actor/src/main/java/akka/util/internal/Timeout.java index dbda2110d3..a03534bb8d 100644 --- a/akka-actor/src/main/java/org/jboss/netty/akka/util/Timeout.java +++ b/akka-actor/src/main/java/akka/util/internal/Timeout.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package org.jboss.netty.akka.util; +package akka.util.internal; /** * A handle associated with a {@link TimerTask} that is returned by a diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/Timer.java b/akka-actor/src/main/java/akka/util/internal/Timer.java similarity index 92% rename from akka-actor/src/main/java/org/jboss/netty/akka/util/Timer.java rename to akka-actor/src/main/java/akka/util/internal/Timer.java index b5bd8c6a7c..9cb02794de 100644 --- a/akka-actor/src/main/java/org/jboss/netty/akka/util/Timer.java +++ b/akka-actor/src/main/java/akka/util/internal/Timer.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package org.jboss.netty.akka.util; +package akka.util.internal; import akka.util.Duration; import java.util.Set; @@ -45,7 +45,7 @@ public interface Timer { Timeout newTimeout(TimerTask task, Duration delay); /** - * Releases all resources acquired by this {@link org.jboss.netty.akka.util.Timer} and cancels all + * Releases all resources acquired by this {@link Timer} and cancels all * tasks which were scheduled but not executed yet. * * @return the handles associated with the tasks which were canceled by diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/TimerTask.java b/akka-actor/src/main/java/akka/util/internal/TimerTask.java similarity index 82% rename from akka-actor/src/main/java/org/jboss/netty/akka/util/TimerTask.java rename to akka-actor/src/main/java/akka/util/internal/TimerTask.java index 3d0190d8f5..673dde67c7 100644 --- a/akka-actor/src/main/java/org/jboss/netty/akka/util/TimerTask.java +++ b/akka-actor/src/main/java/akka/util/internal/TimerTask.java @@ -13,11 +13,11 @@ * License for the specific language governing permissions and limitations * under the License. */ -package org.jboss.netty.akka.util; +package akka.util.internal; /** * A task which is executed after the delay specified with - * {@link Timer#newTimeout(org.jboss.netty.akka.util.TimerTask, long, java.util.concurrent.TimeUnit)} + * {@link Timer#newTimeout(TimerTask, long, java.util.concurrent.TimeUnit)} * . * * @author The Netty Project @@ -28,7 +28,7 @@ public interface TimerTask { /** * Executed after the delay specified with - * {@link Timer#newTimeout(org.jboss.netty.akka.util.TimerTask, long, java.util.concurrent.TimeUnit)} + * {@link Timer#newTimeout(TimerTask, long, java.util.concurrent.TimeUnit)} * . * * @param timeout diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index a1d30ddbc6..ab2996f0a7 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -7,16 +7,15 @@ package akka.actor import akka.event._ import akka.dispatch._ import akka.pattern.ask -import org.jboss.netty.akka.util.HashedWheelTimer -import java.util.concurrent.TimeUnit.MILLISECONDS import com.typesafe.config.{ Config, ConfigFactory } import scala.annotation.tailrec -import org.jboss.netty.akka.util.internal.ConcurrentIdentityHashMap import java.io.Closeable import akka.dispatch.Await.{ Awaitable, CanAwait } import akka.util._ +import akka.util.internal.{ HashedWheelTimer, ConcurrentIdentityHashMap } import collection.immutable.Stack import java.util.concurrent.{ ThreadFactory, CountDownLatch, TimeoutException, RejectedExecutionException } +import java.util.concurrent.TimeUnit.MILLISECONDS object ActorSystem { diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 91e54a592d..8ad3d8ee98 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -5,12 +5,13 @@ package akka.actor import akka.util.Duration -import org.jboss.netty.akka.util.{ TimerTask, HashedWheelTimer, Timeout ⇒ HWTimeout, Timer } +import akka.util.internal.{ TimerTask, HashedWheelTimer, Timeout ⇒ HWTimeout, Timer } import akka.event.LoggingAdapter import akka.dispatch.MessageDispatcher import java.io.Closeable import java.util.concurrent.atomic.AtomicReference import scala.annotation.tailrec +import akka.util.internal._ //#scheduler /** From 1a3329baa2c9376288d2b534c6935ae870df26a4 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Sun, 20 May 2012 16:00:24 +0200 Subject: [PATCH 053/106] #2091 - Adding a small intro to the Microkernel docs to state what the purpose of it is. --- akka-docs/java/microkernel.rst | 4 ++++ akka-docs/scala/microkernel.rst | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/akka-docs/java/microkernel.rst b/akka-docs/java/microkernel.rst index 551c118e94..12afbc93c9 100644 --- a/akka-docs/java/microkernel.rst +++ b/akka-docs/java/microkernel.rst @@ -4,6 +4,10 @@ Microkernel (Java) ================== +The purpose of the Akka Microkernel is to offer a bundling mechanism so that you can distribute +an Akka application as a single payload, without the need to run in a Java Application Server or manually +having to create a launcher script. + The Akka Microkernel is included in the Akka download found at `downloads`_. .. _downloads: http://akka.io/downloads diff --git a/akka-docs/scala/microkernel.rst b/akka-docs/scala/microkernel.rst index 8fb1aec2c2..df0e623eee 100644 --- a/akka-docs/scala/microkernel.rst +++ b/akka-docs/scala/microkernel.rst @@ -4,6 +4,10 @@ Microkernel (Scala) =================== +The purpose of the Akka Microkernel is to offer a bundling mechanism so that you can distribute +an Akka application as a single payload, without the need to run in a Java Application Server or manually +having to create a launcher script. + The Akka Microkernel is included in the Akka download found at `downloads`_. .. _downloads: http://akka.io/downloads From 4a2227fc95314610577fd27eb75c669da1e98ad6 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Sun, 20 May 2012 19:03:20 +0200 Subject: [PATCH 054/106] Removing the AtomicReference from Dispatcher and restructured the code a bit --- .../akka/dispatch/BalancingDispatcher.scala | 2 +- .../main/scala/akka/dispatch/Dispatcher.scala | 33 ++++++++++++------- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index ee492409ec..dea29643c7 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -87,7 +87,7 @@ class BalancingDispatcher( @tailrec def scheduleOne(i: Iterator[ActorCell] = team.iterator): Unit = if (messageQueue.hasMessages && i.hasNext - && (executorService.get().executor match { + && (executorService match { case lm: LoadMetrics ⇒ lm.atFullThrottle == false case other ⇒ true }) diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index 3a73bf0718..8dd7ecf8a2 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -33,11 +33,15 @@ class Dispatcher( val shutdownTimeout: Duration) extends MessageDispatcher(_prerequisites) { - protected val executorServiceFactory: ExecutorServiceFactory = - executorServiceFactoryProvider.createExecutorServiceFactory(id, prerequisites.threadFactory) + private class LazyExecutorServiceDelegate(factory: ExecutorServiceFactory) extends ExecutorServiceDelegate { + lazy val executor: ExecutorService = factory.createExecutorService + def copy(): LazyExecutorServiceDelegate = new LazyExecutorServiceDelegate(factory) + } - protected val executorService = new AtomicReference[ExecutorServiceDelegate]( - new ExecutorServiceDelegate { lazy val executor = executorServiceFactory.createExecutorService }) + @volatile private var executorServiceDelegate: LazyExecutorServiceDelegate = + new LazyExecutorServiceDelegate(executorServiceFactoryProvider.createExecutorServiceFactory(id, prerequisites.threadFactory)) + + protected final def executorService: ExecutorService = executorServiceDelegate /** * INTERNAL USE ONLY @@ -62,11 +66,11 @@ class Dispatcher( */ protected[akka] def executeTask(invocation: TaskInvocation) { try { - executorService.get() execute invocation + executorService execute invocation } catch { case e: RejectedExecutionException ⇒ try { - executorService.get() execute invocation + executorService execute invocation } catch { case e2: RejectedExecutionException ⇒ prerequisites.eventStream.publish(Error(e, getClass.getName, getClass, "executeTask was rejected twice!")) @@ -83,10 +87,15 @@ class Dispatcher( /** * INTERNAL USE ONLY */ - protected[akka] def shutdown: Unit = - Option(executorService.getAndSet(new ExecutorServiceDelegate { - lazy val executor = executorServiceFactory.createExecutorService - })) foreach { _.shutdown() } + protected[akka] def shutdown: Unit = { + val newDelegate = executorServiceDelegate.copy() // Doesn't matter which one we copy + val es = synchronized { // FIXME getAndSet using ARFU or Unsafe + val service = executorServiceDelegate + executorServiceDelegate = newDelegate // just a quick getAndSet + service + } + es.shutdown() + } /** * Returns if it was registered @@ -97,12 +106,12 @@ class Dispatcher( if (mbox.canBeScheduledForExecution(hasMessageHint, hasSystemMessageHint)) { //This needs to be here to ensure thread safety and no races if (mbox.setAsScheduled()) { try { - executorService.get() execute mbox + executorService execute mbox true } catch { case e: RejectedExecutionException ⇒ try { - executorService.get() execute mbox + executorService execute mbox true } catch { //Retry once case e: RejectedExecutionException ⇒ From e357b9825b11617679a1ba44e1e5fdbb44e45f4c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Sun, 20 May 2012 19:06:31 +0200 Subject: [PATCH 055/106] Adding return types in Dispatcher --- akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index 8dd7ecf8a2..c8ae187c66 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -46,7 +46,7 @@ class Dispatcher( /** * INTERNAL USE ONLY */ - protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope) = { + protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope): Unit = { val mbox = receiver.mailbox mbox.enqueue(receiver.self, invocation) registerForExecution(mbox, true, false) @@ -55,7 +55,7 @@ class Dispatcher( /** * INTERNAL USE ONLY */ - protected[akka] def systemDispatch(receiver: ActorCell, invocation: SystemMessage) = { + protected[akka] def systemDispatch(receiver: ActorCell, invocation: SystemMessage): Unit = { val mbox = receiver.mailbox mbox.systemEnqueue(receiver.self, invocation) registerForExecution(mbox, false, true) @@ -124,7 +124,7 @@ class Dispatcher( } else false } - override val toString = Logging.simpleName(this) + "[" + id + "]" + override val toString: String = Logging.simpleName(this) + "[" + id + "]" } object PriorityGenerator { From 162d59db35b680594bcbdae56934807697371845 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 21 May 2012 08:08:51 +0200 Subject: [PATCH 056/106] Removed ResizerSpec test violating routing rules --- .../test/scala/akka/routing/ResizerSpec.scala | 30 ------------------- 1 file changed, 30 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala index 111460e3ac..ede4a69d7c 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -128,36 +128,6 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with current.routees.size must be(2) } - // FIXME this test violates the rule that you can not use a BalancingDispatcher with any kind of Router - now throws a ConfigurationException in verification process - "resize when busy" ignore { - - val busy = new TestLatch(1) - - val resizer = DefaultResizer( - lowerBound = 1, - upperBound = 3, - pressureThreshold = 0, - messagesPerResize = 1) - - val router = system.actorOf(Props[BusyActor].withRouter(RoundRobinRouter(resizer = Some(resizer))).withDispatcher("bal-disp")) - - val latch1 = new TestLatch(1) - router ! (latch1, busy) - Await.ready(latch1, 2 seconds) - - val latch2 = new TestLatch(1) - router ! (latch2, busy) - Await.ready(latch2, 2 seconds) - - val latch3 = new TestLatch(1) - router ! (latch3, busy) - Await.ready(latch3, 2 seconds) - - Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(3) - - busy.countDown() - } - "grow as needed under pressure" in { // make sure the pool starts at the expected lower limit and grows to the upper as needed // as influenced by the backlog of blocking pooled actors From 7784513537df669f048484feea900963fc1a9cb1 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 13:39:39 +0200 Subject: [PATCH 057/106] Fixing logic bug introduced in refactor --- .../src/main/scala/akka/dispatch/BalancingDispatcher.scala | 2 +- akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index dea29643c7..43e8944105 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -87,7 +87,7 @@ class BalancingDispatcher( @tailrec def scheduleOne(i: Iterator[ActorCell] = team.iterator): Unit = if (messageQueue.hasMessages && i.hasNext - && (executorService match { + && (executorService.executor match { case lm: LoadMetrics ⇒ lm.atFullThrottle == false case other ⇒ true }) diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index c8ae187c66..3c17ab8db4 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -41,7 +41,7 @@ class Dispatcher( @volatile private var executorServiceDelegate: LazyExecutorServiceDelegate = new LazyExecutorServiceDelegate(executorServiceFactoryProvider.createExecutorServiceFactory(id, prerequisites.threadFactory)) - protected final def executorService: ExecutorService = executorServiceDelegate + protected final def executorService: ExecutorServiceDelegate = executorServiceDelegate /** * INTERNAL USE ONLY From e14f9d01304db0fc42a96f641d6107ab48ee57ee Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 13:40:14 +0200 Subject: [PATCH 058/106] Switching to indexOf instead of Seq.contains --- akka-actor/src/main/scala/akka/actor/ActorSelection.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala index 44767cb0b6..93a26c6289 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala @@ -52,7 +52,7 @@ object ActorSelection { */ def apply(anchor: ActorRef, path: String): ActorSelection = { val elems = path.split("/+").dropWhile(_.isEmpty) - val compiled: Array[AnyRef] = elems map (x ⇒ if (x.contains('?') || x.contains('*')) Helpers.makePattern(x) else x) + val compiled: Array[AnyRef] = elems map (x ⇒ if ((x.indexOf('?') != -1) || (x.indexOf('*') != -1)) Helpers.makePattern(x) else x) new ActorSelection with ScalaActorSelection { def target = anchor def path = compiled From 26f6c48ae1c5642b8fb4752829211e16bc0e7762 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 13:47:48 +0200 Subject: [PATCH 059/106] HUGE CHANGE - moving behaviorStack into ActorCell --- .../test/scala/akka/actor/ActorRefSpec.scala | 2 +- .../scala/akka/actor/ActorWithStashSpec.scala | 2 +- .../test/scala/akka/actor/FSMActorSpec.scala | 2 +- .../scala/akka/actor/ReceiveTimeoutSpec.scala | 8 ++-- .../akka/actor/RestartStrategySpec.scala | 10 ++-- .../akka/actor/SupervisorHierarchySpec.scala | 4 +- .../scala/akka/actor/SupervisorMiscSpec.scala | 2 +- .../akka/actor/dispatch/ActorModelSpec.scala | 4 +- .../src/main/scala/akka/actor/Actor.scala | 45 ++---------------- .../src/main/scala/akka/actor/ActorCell.scala | 29 +++++++---- .../src/main/scala/akka/actor/ActorPath.scala | 4 +- .../src/main/scala/akka/actor/ActorRef.scala | 10 ++-- .../src/main/scala/akka/actor/FSM.scala | 2 +- .../main/scala/akka/actor/FaultHandling.scala | 7 +-- akka-actor/src/main/scala/akka/actor/IO.scala | 2 +- .../src/main/scala/akka/actor/Scheduler.scala | 2 +- .../main/scala/akka/actor/UntypedActor.scala | 2 +- .../src/main/scala/akka/camel/Producer.scala | 2 +- .../akka/camelexamples/ExamplesSupport.scala | 6 +-- .../_2_SupervisedConsumers.scala | 4 +- .../akka/camel/ProducerFeatureTest.scala | 4 +- .../component/ActorProducerTest.scala | 2 +- akka-docs/scala/actors.rst | 2 +- .../scala/akka/testkit/TestActorRef.scala | 2 +- .../zeromq/ConcurrentSocketActorSpec.scala | 2 +- file-based/mailbox_user__a | 0 file-based/mailbox_user__b | 0 file-based/mailbox_user__c | Bin 12892 -> 0 bytes file-based/mailbox_user__d | Bin 703 -> 0 bytes file-based/mailbox_user__e | Bin 703 -> 0 bytes file-based/mailbox_user__f | Bin 703 -> 0 bytes 31 files changed, 71 insertions(+), 90 deletions(-) delete mode 100644 file-based/mailbox_user__a delete mode 100644 file-based/mailbox_user__b delete mode 100644 file-based/mailbox_user__c delete mode 100644 file-based/mailbox_user__d delete mode 100644 file-based/mailbox_user__e delete mode 100644 file-based/mailbox_user__f diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala index e8c667bc7e..bec066d97a 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala @@ -393,7 +393,7 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { override def postRestart(reason: Throwable) = latch.countDown() })) - protected def receive = { case "sendKill" ⇒ ref ! Kill } + def receive = { case "sendKill" ⇒ ref ! Kill } })) boss ! "sendKill" diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala index c516a17a42..524913b01d 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala @@ -131,7 +131,7 @@ class ActorWithStashSpec extends AkkaSpec(ActorWithStashSpec.testConf) with Defa val hasMsgLatch = new TestLatch val slaveProps = myProps(new Actor with Stash { - protected def receive = { + def receive = { case "crash" ⇒ throw new Exception("Crashing...") diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala index ef49cbc18d..cc98a23f1f 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala @@ -147,7 +147,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im object Hello object Bye val tester = system.actorOf(Props(new Actor { - protected def receive = { + def receive = { case Hello ⇒ lock ! "hello" case "world" ⇒ answerLatch.open case Bye ⇒ lock ! "bye" diff --git a/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala index dc08df1c98..09fe9c103f 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala @@ -22,7 +22,7 @@ class ReceiveTimeoutSpec extends AkkaSpec { val timeoutActor = system.actorOf(Props(new Actor { context.setReceiveTimeout(500 milliseconds) - protected def receive = { + def receive = { case ReceiveTimeout ⇒ timeoutLatch.open } })) @@ -38,7 +38,7 @@ class ReceiveTimeoutSpec extends AkkaSpec { val timeoutActor = system.actorOf(Props(new Actor { context.setReceiveTimeout(500 milliseconds) - protected def receive = { + def receive = { case Tick ⇒ () case ReceiveTimeout ⇒ timeoutLatch.open } @@ -58,7 +58,7 @@ class ReceiveTimeoutSpec extends AkkaSpec { val timeoutActor = system.actorOf(Props(new Actor { context.setReceiveTimeout(500 milliseconds) - protected def receive = { + def receive = { case Tick ⇒ () case ReceiveTimeout ⇒ count.incrementAndGet @@ -78,7 +78,7 @@ class ReceiveTimeoutSpec extends AkkaSpec { val timeoutLatch = TestLatch() val timeoutActor = system.actorOf(Props(new Actor { - protected def receive = { + def receive = { case ReceiveTimeout ⇒ timeoutLatch.open } })) diff --git a/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala index 829ab081e0..8d114bc396 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala @@ -40,7 +40,7 @@ class RestartStrategySpec extends AkkaSpec with DefaultTimeout { val slaveProps = Props(new Actor { - protected def receive = { + def receive = { case Ping ⇒ countDownLatch.countDown() case Crash ⇒ throw new Exception("Crashing...") } @@ -83,7 +83,7 @@ class RestartStrategySpec extends AkkaSpec with DefaultTimeout { val slaveProps = Props(new Actor { - protected def receive = { + def receive = { case Crash ⇒ throw new Exception("Crashing...") } @@ -110,7 +110,7 @@ class RestartStrategySpec extends AkkaSpec with DefaultTimeout { val slaveProps = Props(new Actor { - protected def receive = { + def receive = { case Ping ⇒ if (!pingLatch.isOpen) pingLatch.open else secondPingLatch.open case Crash ⇒ throw new Exception("Crashing...") @@ -166,7 +166,7 @@ class RestartStrategySpec extends AkkaSpec with DefaultTimeout { val slaveProps = Props(new Actor { - protected def receive = { + def receive = { case Ping ⇒ countDownLatch.countDown() case Crash ⇒ throw new Exception("Crashing...") } @@ -221,7 +221,7 @@ class RestartStrategySpec extends AkkaSpec with DefaultTimeout { val slaveProps = Props(new Actor { - protected def receive = { + def receive = { case Ping ⇒ countDownLatch.countDown() case Crash ⇒ throw new Exception("Crashing...") } diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala index a04e83f39b..62752d8052 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala @@ -20,7 +20,7 @@ object SupervisorHierarchySpec { */ class CountDownActor(countDown: CountDownLatch, override val supervisorStrategy: SupervisorStrategy) extends Actor { - protected def receive = { + def receive = { case p: Props ⇒ sender ! context.actorOf(p) } // test relies on keeping children around during restart @@ -67,7 +67,7 @@ class SupervisorHierarchySpec extends AkkaSpec with DefaultTimeout { val crasher = context.watch(context.actorOf(Props(new CountDownActor(countDownMessages, SupervisorStrategy.defaultStrategy)))) - protected def receive = { + def receive = { case "killCrasher" ⇒ crasher ! Kill case Terminated(_) ⇒ countDownMax.countDown() } diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala index 92af540a9a..197e749d2e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala @@ -37,7 +37,7 @@ class SupervisorMiscSpec extends AkkaSpec(SupervisorMiscSpec.config) with Defaul val workerProps = Props(new Actor { override def postRestart(cause: Throwable) { countDownLatch.countDown() } - protected def receive = { + def receive = { case "status" ⇒ this.sender ! "OK" case _ ⇒ this.context.stop(self) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index 88358e9f16..854a562745 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -400,9 +400,9 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa val a = newTestActor(dispatcher.id) val f1 = a ? Reply("foo") val f2 = a ? Reply("bar") - val f3 = try { a ? Interrupt } catch { case ie: InterruptedException ⇒ Promise.failed(ActorInterruptedException(ie)) } + val f3 = try { a ? Interrupt } catch { case ie: InterruptedException ⇒ Promise.failed(new ActorInterruptedException(ie)) } val f4 = a ? Reply("foo2") - val f5 = try { a ? Interrupt } catch { case ie: InterruptedException ⇒ Promise.failed(ActorInterruptedException(ie)) } + val f5 = try { a ? Interrupt } catch { case ie: InterruptedException ⇒ Promise.failed(new ActorInterruptedException(ie)) } val f6 = a ? Reply("bar2") assert(Await.result(f1, timeout.duration) === "foo") diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 7c020925eb..b26da76ff2 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -152,7 +152,7 @@ case class DeathPactException private[akka] (dead: ActorRef) * When an InterruptedException is thrown inside an Actor, it is wrapped as an ActorInterruptedException as to * avoid cascading interrupts to other threads than the originally interrupted one. */ -case class ActorInterruptedException private[akka] (cause: Throwable) extends AkkaException(cause.getMessage, cause) with NoStackTrace +class ActorInterruptedException private[akka] (cause: Throwable) extends AkkaException(cause.getMessage, cause) with NoStackTrace /** * This message is published to the EventStream whenever an Actor receives a message it doesn't understand @@ -173,6 +173,7 @@ object Status { /** * This class/message type is preferably used to indicate failure of some operation performed. + * As an example, it is used to signal failure with AskSupport is used (ask/?). */ case class Failure(cause: Throwable) extends Status } @@ -317,7 +318,7 @@ trait Actor { * This defines the initial actor behavior, it must return a partial function * with the actor logic. */ - protected def receive: Receive + def receive: Receive /** * User overridable definition the strategy to use for supervising @@ -377,45 +378,5 @@ trait Actor { case _ ⇒ context.system.eventStream.publish(UnhandledMessage(message, sender, self)) } } - - // ========================================= - // ==== INTERNAL IMPLEMENTATION DETAILS ==== - // ========================================= - - /** - * For Akka internal use only. - */ - private[akka] final def apply(msg: Any) = { - //FIXME replace with behaviorStack.head.applyOrElse(msg, unhandled) + "-optimize" - val head = behaviorStack.head - if (head.isDefinedAt(msg)) head.apply(msg) else unhandled(msg) - } - - /** - * For Akka internal use only. - */ - private[akka] def pushBehavior(behavior: Receive): Unit = { - behaviorStack = behaviorStack.push(behavior) - } - - /** - * For Akka internal use only. - */ - private[akka] def popBehavior(): Unit = { - val original = behaviorStack - val popped = original.pop - behaviorStack = if (popped.isEmpty) original else popped - } - - /** - * For Akka internal use only. - */ - private[akka] def clearBehaviorStack(): Unit = - behaviorStack = Stack.empty[Receive].push(behaviorStack.last) - - /** - * For Akka internal use only. - */ - private var behaviorStack: Stack[Receive] = Stack.empty[Receive].push(receive) } diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index bd5342fec4..6a25ac04a9 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -409,6 +409,8 @@ private[akka] class ActorCell( var actor: Actor = _ + private var behaviorStack: Stack[Actor.Receive] = Stack.empty + @volatile //This must be volatile since it isn't protected by the mailbox status var mailbox: Mailbox = _ @@ -489,8 +491,7 @@ private[akka] class ActorCell( //This method is in charge of setting up the contextStack and create a new instance of the Actor protected def newActor(): Actor = { - val stackBefore = contextStack.get - contextStack.set(stackBefore.push(this)) + contextStack.set(contextStack.get.push(this)) try { val instance = props.creator() @@ -511,6 +512,7 @@ private[akka] class ActorCell( def create(): Unit = if (isNormal) { try { val created = newActor() + behaviorStack = Stack.empty.push(created.receive) actor = created created.preStart() checkReceiveTimeout @@ -612,7 +614,7 @@ private[akka] class ActorCell( cancelReceiveTimeout() // FIXME: leave this here??? messageHandle.message match { case msg: AutoReceivedMessage ⇒ autoReceiveMessage(messageHandle) - case msg ⇒ actor(msg) + case msg ⇒ receiveMessage(msg) } currentMessage = null // reset current message after successful invocation } catch { @@ -628,14 +630,14 @@ private[akka] class ActorCell( if (actor ne null) actor.supervisorStrategy.handleSupervisorFailing(self, children) } finally { t match { // Wrap InterruptedExceptions and rethrow - case _: InterruptedException ⇒ parent.tell(Failed(ActorInterruptedException(t)), self); throw t + case _: InterruptedException ⇒ parent.tell(Failed(new ActorInterruptedException(t)), self); throw t case _ ⇒ parent.tell(Failed(t), self) } } def become(behavior: Actor.Receive, discardOld: Boolean = true): Unit = { if (discardOld) unbecome() - actor.pushBehavior(behavior) + behaviorStack = behaviorStack.push(behavior) } /** @@ -651,9 +653,13 @@ private[akka] class ActorCell( become(newReceive, discardOld) } - def unbecome(): Unit = actor.popBehavior() + def unbecome(): Unit = { + val original = behaviorStack + val popped = original.pop + behaviorStack = if (popped.isEmpty) original else popped + } - def autoReceiveMessage(msg: Envelope) { + def autoReceiveMessage(msg: Envelope): Unit = { if (system.settings.DebugAutoReceive) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "received AutoReceiveMessage " + msg)) @@ -667,6 +673,12 @@ private[akka] class ActorCell( } } + final def receiveMessage(msg: Any): Unit = { + //FIXME replace with behaviorStack.head.applyOrElse(msg, unhandled) + "-optimize" + val head = behaviorStack.head + if (head.isDefinedAt(msg)) head.apply(msg) else actor.unhandled(msg) + } + private def doTerminate() { val a = actor try { @@ -682,7 +694,7 @@ private[akka] class ActorCell( if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped")) } finally { - if (a ne null) a.clearBehaviorStack() + behaviorStack = Stack.empty clearActorFields(a) actor = null } @@ -694,6 +706,7 @@ private[akka] class ActorCell( actor.supervisorStrategy.handleSupervisorRestarted(cause, self, children) val freshActor = newActor() + behaviorStack = Stack.empty.push(freshActor.receive) actor = freshActor // this must happen before postRestart has a chance to fail if (freshActor eq failedActor) setActorFields(freshActor, this, self) // If the creator returns the same instance, we need to restore our nulled out fields. diff --git a/akka-actor/src/main/scala/akka/actor/ActorPath.scala b/akka-actor/src/main/scala/akka/actor/ActorPath.scala index e8c277660f..aa93dbcc47 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorPath.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorPath.scala @@ -15,7 +15,9 @@ object ActorPath { } /** - * This Regular Expression is used to validate a path element (Actor Name) + * This Regular Expression is used to validate a path element (Actor Name). + * Since Actors form a tree, it is addressable using an URL, therefor an Actor Name has to conform to: + * http://www.ietf.org/rfc/rfc2396.txt */ val ElementRegex = """[-\w:@&=+,.!~*'_;][-\w:@&=+,.!~*'$_;]*""".r } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index d0ad270957..46e3440f95 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -335,9 +335,10 @@ private[akka] class LocalActorRef private[akka] ( /** * Memento pattern for serializing ActorRefs transparently + * INTERNAL API */ //TODO add @SerialVersionUID(1L) when SI-4804 is fixed -case class SerializedActorRef private (path: String) { +private[akka] case class SerializedActorRef private (path: String) { import akka.serialization.JavaSerializer.currentSystem @throws(classOf[java.io.ObjectStreamException]) @@ -350,8 +351,11 @@ case class SerializedActorRef private (path: String) { someSystem.actorFor(path) } } -//FIXME: Should SerializedActorRef be private[akka] ? -object SerializedActorRef { + +/** + * INTERNAL API + */ +private[akka] object SerializedActorRef { def apply(path: ActorPath): SerializedActorRef = { Serialization.currentTransportAddress.value match { case null ⇒ new SerializedActorRef(path.toString) diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 71d1ec7e69..762f23b16b 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -437,7 +437,7 @@ trait FSM[S, D] extends Listeners with ActorLogging { * Main actor receive() method * ******************************************* */ - override final protected def receive: Receive = { + override final def receive: Receive = { case TimeoutMarker(gen) ⇒ if (generation == gen) { processMsg(StateTimeout, "state timeout") diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index 383010f9de..8641153476 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -176,9 +176,10 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { /** * Sort so that subtypes always precede their supertypes, but without * obeying any order between unrelated subtypes (insert sort). + * + * INTERNAL API */ - //FIXME Should this really be public API? - def sort(in: Iterable[CauseDirective]): Seq[CauseDirective] = + private[akka] def sort(in: Iterable[CauseDirective]): Seq[CauseDirective] = (new ArrayBuffer[CauseDirective](in.size) /: in) { (buf, ca) ⇒ buf.indexWhere(_._1 isAssignableFrom ca._1) match { case -1 ⇒ buf append ca @@ -195,7 +196,7 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { } /** - * An Akka SupervisorStrategy is + * An Akka SupervisorStrategy is the policy to apply for crashing children */ abstract class SupervisorStrategy { diff --git a/akka-actor/src/main/scala/akka/actor/IO.scala b/akka-actor/src/main/scala/akka/actor/IO.scala index 3ff91c4fa8..63eb2e4b3a 100644 --- a/akka-actor/src/main/scala/akka/actor/IO.scala +++ b/akka-actor/src/main/scala/akka/actor/IO.scala @@ -987,7 +987,7 @@ final class IOManagerActor extends Actor with ActorLogging { } } - protected def receive = { + def receive = { case Select ⇒ select() if (running) self ! Select diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 8ad3d8ee98..850cb02048 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -236,7 +236,7 @@ private[akka] class ContinuousCancellable extends AtomicReference[HWTimeout](Con def isCancelled(): Boolean = get match { case null ⇒ true - case some ⇒ isCancelled() + case some ⇒ some.isCancelled() } def cancel(): Unit = diff --git a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala index c56a2a0167..9420ab84cc 100644 --- a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala @@ -156,7 +156,7 @@ abstract class UntypedActor extends Actor { */ override def postRestart(reason: Throwable): Unit = super.postRestart(reason) - final protected def receive = { case msg ⇒ onReceive(msg) } + final def receive = { case msg ⇒ onReceive(msg) } } /** diff --git a/akka-camel/src/main/scala/akka/camel/Producer.scala b/akka-camel/src/main/scala/akka/camel/Producer.scala index 80537fda12..33541d4611 100644 --- a/akka-camel/src/main/scala/akka/camel/Producer.scala +++ b/akka-camel/src/main/scala/akka/camel/Producer.scala @@ -134,7 +134,7 @@ trait Producer extends ProducerSupport { this: Actor ⇒ * Default implementation of Actor.receive. Any messages received by this actors * will be produced to the endpoint specified by endpointUri. */ - protected def receive = produce + def receive = produce } /** diff --git a/akka-camel/src/main/scala/akka/camelexamples/ExamplesSupport.scala b/akka-camel/src/main/scala/akka/camelexamples/ExamplesSupport.scala index 3e35b8c7c9..df5b0e5508 100644 --- a/akka-camel/src/main/scala/akka/camelexamples/ExamplesSupport.scala +++ b/akka-camel/src/main/scala/akka/camelexamples/ExamplesSupport.scala @@ -19,7 +19,7 @@ private[camelexamples] class SysOutConsumer extends Consumer { override def activationTimeout = 10 seconds def endpointUri = "file://data/input/CamelConsumer" - protected def receive = { + def receive = { case msg: CamelMessage ⇒ { printf("Received '%s'\n", msg.bodyAs[String]) } @@ -30,12 +30,12 @@ private[camelexamples] class TroubleMaker extends Consumer { def endpointUri = "WRONG URI" println("Trying to instantiate conumer with uri: " + endpointUri) - protected def receive = { case _ ⇒ } + def receive = { case _ ⇒ } } private[camelexamples] class SysOutActor(implicit camel: Camel) extends Actor { implicit val camelContext = camel.context - protected def receive = { + def receive = { case msg: CamelMessage ⇒ { printf("Received '%s'\n", msg.bodyAs[String]) } diff --git a/akka-camel/src/main/scala/akka/camelexamples/_2_SupervisedConsumers.scala b/akka-camel/src/main/scala/akka/camelexamples/_2_SupervisedConsumers.scala index 5d321b28db..cdf46f012f 100644 --- a/akka-camel/src/main/scala/akka/camelexamples/_2_SupervisedConsumers.scala +++ b/akka-camel/src/main/scala/akka/camelexamples/_2_SupervisedConsumers.scala @@ -14,7 +14,7 @@ private[camelexamples] object SupervisedConsumersExample extends App { system.actorOf(Props(new Actor { context.watch(context.actorOf(Props[EndpointManager])) - protected def receive = { + def receive = { case Terminated(ref) ⇒ system.shutdown() } })) @@ -30,7 +30,7 @@ private[camelexamples] class EndpointManager extends Actor { watch(actorOf(Props[SysOutConsumer])) watch(actorOf(Props[TroubleMaker])) - protected def receive = { + def receive = { case Terminated(ref) ⇒ { printf("Hey! One of the endpoints has died: %s. I am doing sepuku...\n", ref) self ! PoisonPill diff --git a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala index cef098b8fe..a7e5b9e4cb 100644 --- a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala @@ -244,7 +244,7 @@ object ProducerFeatureTest { } class TestResponder extends Actor { - protected def receive = { + def receive = { case msg: CamelMessage ⇒ msg.body match { case "fail" ⇒ context.sender ! akka.actor.Status.Failure(new AkkaCamelException(new Exception("failure"), msg.headers)) case _ ⇒ @@ -256,7 +256,7 @@ object ProducerFeatureTest { } class ReplyingForwardTarget extends Actor { - protected def receive = { + def receive = { case msg: CamelMessage ⇒ context.sender ! (msg.addHeader("test" -> "result")) case msg: akka.actor.Status.Failure ⇒ diff --git a/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala b/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala index 8146b17399..a0e153fd54 100644 --- a/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala +++ b/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala @@ -334,7 +334,7 @@ trait ActorProducerFixture extends MockitoSugar with BeforeAndAfterAll with Befo } def echoActor = system.actorOf(Props(new Actor { - protected def receive = { + def receive = { case msg ⇒ sender ! "received " + msg } })) diff --git a/akka-docs/scala/actors.rst b/akka-docs/scala/actors.rst index 5374c8a37c..9269c841f5 100644 --- a/akka-docs/scala/actors.rst +++ b/akka-docs/scala/actors.rst @@ -443,7 +443,7 @@ An Actor has to implement the ``receive`` method to receive messages: .. code-block:: scala - protected def receive: PartialFunction[Any, Unit] + def receive: PartialFunction[Any, Unit] Note: Akka has an alias to the ``PartialFunction[Any, Unit]`` type called ``Receive`` (``akka.actor.Actor.Receive``), so you can use this type instead for diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index 8a2f61bf76..0a5d6163e8 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -56,7 +56,7 @@ class TestActorRef[T <: Actor]( * thrown will be available to you, while still being able to use * become/unbecome. */ - def receive(o: Any) { underlyingActor.apply(o) } + def receive(o: Any): Unit = underlying.receiveMessage(o) /** * Retrieve reference to the underlying actor, where the static type matches the factory used inside the diff --git a/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala b/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala index dcc456b544..fe0d715dba 100644 --- a/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala +++ b/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala @@ -138,7 +138,7 @@ class ConcurrentSocketActorSpec extends AkkaSpec { } } - protected def receive = { + def receive = { case _ ⇒ val payload = "%s".format(messageNumber) messageNumber += 1 diff --git a/file-based/mailbox_user__a b/file-based/mailbox_user__a deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/file-based/mailbox_user__b b/file-based/mailbox_user__b deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/file-based/mailbox_user__c b/file-based/mailbox_user__c deleted file mode 100644 index e84f2a5e4f42e4329047049aad7186418b4bb478..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12892 zcmZQ7Wnf_NcU_QY&d2}E-d`)^;Mm4L+D95PfYcdKlYFSN2Nk%QJ$tcRGWi=UP8MUk?qcEeE)dN?W z84X-%W<+Lb7D9O$ZbW8jMh&aUEX}B4HJPOuHLNDHG^2*qWR_;su$s)$j2c#xS(;J9 zYBEbRDp+kyW@$zZs|T($GbXb%3#GguH72t(qlVRFmS)tjn#|IS8dj58no+}QGD|aR zSWRYWMh&aUEX}B3wF#M}88xgXvoxcI)dN?WnUGnUg;8FRnvhwVQNwC7OEYR%O=f9E z4Xep4&8T5DnWY&utR}NGqk`3@WR_;(lxHqeGD|aRSWRYWMh&aUEX}B4HJPOuHLNDH zG^2*qWR_;su$s)$j2c#xS(-&qAzzbOno+@OGcrpvYFJHXX+{mJ$t=yNVKteh88xgX zvoxcI)nt}t)UcY&(kznld~HT%X+{mJ$t=yNVKteh85OKHC$lu8hSg-2X4J5n%+ib+ zR+CwpMNyu)%*iaxs9`mkrCBr;d`)I)Mh&aUEX}B4HJPOuHLNDHG^2vm7G#!Y)UcY& z(u^8blUbTk!)h{1vslXWwFQ}_88xgXvoxcI)nt}t)UcY&(u^8blUbTk!)h{1vpC9A zxFwmT88xgXvoxcI)nt}t)UcY&(u^8blUbTk!)h{1Giq2(W@$zZtH~_Qs9`mkrCB@` oiWD+RvvLLo2J);H_o@E2Co+Z6FV+5~?A0N{bSca#DR0Gjo#iD}oDBll4oBQ;YOf zQiMXe0@u7{U@b0U5X?#}OVrCr%uCnv%qvMvPc0I9v|!2i#&!!P21ZW?*0RK$($tC~ z1_6XRztY^K)S|XEQ)RiQJm_X(V5lfyU|?VrU{q4)QpIg|N@`+?Pijd?YEd!c;Bq<> z2~KANh4kQZJ~IiN{bSca#DR0Gjo#iD}oDBll4oBQ;YOf zQiVdf0@u7{U@b0U5X?#}OVrCr%uCnv%qvMvPc0I9v|!2i#&!!P21ZW?*0RK$($tC~ z1_6XRztY^K)S|XEQ)RiQJm_X(V5lfyU|?VrU{q4)QpIg|N@`+?Pijd?YEd!c;Bq<> z2~KANh4kQZJ~IiX)PzmpCSuN{bSca#DR0Gjo#iD}oDBll4oBQ;YOf z(u6{}0@u7{U@b0U5X?#}OVrCr%uCnv%qvMvPc0I9v|!2i#&!!P21ZW?*0RK$($tC~ z1_6XRztY^K)S|XEQ)RiQJm_X(V5lfyU|?VrU{q4)QpIg|N@`+?Pijd?YEdyG8_2z2 z_uCB?_cM{;{=wyPW)fTu33&*~RKmc(U{6-~^Ko&3(oYEkb8%^|0HcyFmp1N*FRm;u UNzK(SNi8mMOfJbUGK5$U024mYO#lD@ From 2a370520fd5fd7f8799d35c23950692056c94717 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 14:50:49 +0200 Subject: [PATCH 060/106] Cleaning up the cancellables, null-object pattern ftw --- .../src/main/scala/akka/actor/Scheduler.scala | 29 +++++++++---------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 850cb02048..30cadc5a3b 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -216,6 +216,14 @@ private[akka] object ContinuousCancellable { override def isCancelled: Boolean = false override def cancel: Unit = () } + + val cancelled: HWTimeout = new HWTimeout { + override def getTimer: Timer = null + override def getTask: TimerTask = null + override def isExpired: Boolean = false + override def isCancelled: Boolean = true + override def cancel: Unit = () + } } /** * Wrapper of a [[org.jboss.netty.akka.util.Timeout]] that delegates all @@ -229,24 +237,15 @@ private[akka] class ContinuousCancellable extends AtomicReference[HWTimeout](Con } @tailrec private[akka] final def swap(newTimeout: HWTimeout): Unit = get match { - case null ⇒ newTimeout.cancel() - case some if some.isCancelled ⇒ cancel(); newTimeout.cancel() + case some if some.isCancelled ⇒ try cancel() finally newTimeout.cancel() case some ⇒ if (!compareAndSet(some, newTimeout)) swap(newTimeout) } - def isCancelled(): Boolean = get match { - case null ⇒ true - case some ⇒ some.isCancelled() - } - - def cancel(): Unit = - getAndSet(null) match { - case null ⇒ - case some ⇒ some.cancel() - } + def isCancelled(): Boolean = get().isCancelled() + def cancel(): Unit = getAndSet(ContinuousCancellable.cancelled).cancel() } -private[akka] class DefaultCancellable(val timeout: HWTimeout) extends Cancellable { - override def cancel(): Unit = timeout.cancel() - override def isCancelled: Boolean = timeout.isCancelled +private[akka] class DefaultCancellable(timeout: HWTimeout) extends AtomicReference[HWTimeout](timeout) with Cancellable { + override def cancel(): Unit = getAndSet(ContinuousCancellable.cancelled).cancel() + override def isCancelled: Boolean = get().isCancelled } From c7ca6af9274dfb2b92f44f17062a831717fbc279 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 14:53:07 +0200 Subject: [PATCH 061/106] Adding a more robust test for the SchedulerSpec --- .../src/test/scala/akka/actor/SchedulerSpec.scala | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala index 3b87af2aad..beeb2a4c3b 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala @@ -18,7 +18,12 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout } override def afterEach { - while (cancellables.peek() ne null) { Option(cancellables.poll()).foreach(_.cancel()) } + while (cancellables.peek() ne null) { + for (c ← Option(cancellables.poll())) { + c.cancel() + c.isCancelled must be === true + } + } } "A Scheduler" must { From 6e46b089ff44617980fe8ce807a3000cbc088137 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 16:42:33 +0200 Subject: [PATCH 062/106] Adding the arguably more correct behavior stacking --- .../src/main/scala/akka/actor/ActorCell.scala | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 6a25ac04a9..c73e6fc4b2 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -185,6 +185,8 @@ private[akka] object ActorCell { final val emptyReceiveTimeoutData: (Long, Cancellable) = (-1, emptyCancellable) + final val behaviorStackPlaceHolder: Stack[Actor.Receive] = Stack.empty.push(Actor.emptyBehavior) + sealed trait SuspendReason case object UserRequest extends SuspendReason case class Recreation(cause: Throwable) extends SuspendReason @@ -493,11 +495,18 @@ private[akka] class ActorCell( protected def newActor(): Actor = { contextStack.set(contextStack.get.push(this)) try { + import ActorCell.behaviorStackPlaceHolder + + behaviorStack = behaviorStackPlaceHolder val instance = props.creator() if (instance eq null) throw ActorInitializationException(self, "Actor instance passed to actorOf can't be 'null'") + behaviorStack = behaviorStack match { + case `behaviorStackPlaceHolder` ⇒ Stack.empty.push(instance.receive) + case newBehaviors ⇒ Stack.empty.push(instance.receive).pushAll(newBehaviors.reverse.drop(1)) + } instance } finally { val stackAfter = contextStack.get @@ -512,7 +521,6 @@ private[akka] class ActorCell( def create(): Unit = if (isNormal) { try { val created = newActor() - behaviorStack = Stack.empty.push(created.receive) actor = created created.preStart() checkReceiveTimeout @@ -648,10 +656,8 @@ private[akka] class ActorCell( /* * UntypedActorContext impl */ - def become(behavior: Procedure[Any], discardOld: Boolean): Unit = { - def newReceive: Actor.Receive = { case msg ⇒ behavior.apply(msg) } - become(newReceive, discardOld) - } + def become(behavior: Procedure[Any], discardOld: Boolean): Unit = + become({ case msg ⇒ behavior.apply(msg) }: Actor.Receive, discardOld) def unbecome(): Unit = { val original = behaviorStack @@ -694,7 +700,7 @@ private[akka] class ActorCell( if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped")) } finally { - behaviorStack = Stack.empty + behaviorStack = ActorCell.behaviorStackPlaceHolder clearActorFields(a) actor = null } @@ -704,9 +710,7 @@ private[akka] class ActorCell( private def doRecreate(cause: Throwable, failedActor: Actor): Unit = try { // after all killed children have terminated, recreate the rest, then go on to start the new instance actor.supervisorStrategy.handleSupervisorRestarted(cause, self, children) - val freshActor = newActor() - behaviorStack = Stack.empty.push(freshActor.receive) actor = freshActor // this must happen before postRestart has a chance to fail if (freshActor eq failedActor) setActorFields(freshActor, this, self) // If the creator returns the same instance, we need to restore our nulled out fields. From ac71d404422c9699be84987c9071884964879144 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 19:59:42 +0200 Subject: [PATCH 063/106] Removing most of AkkaException since it's to be considered outdated residue by now --- .../src/main/scala/akka/AkkaException.scala | 35 ++----------------- .../src/main/scala/akka/event/Logging.scala | 2 +- 2 files changed, 4 insertions(+), 33 deletions(-) diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index e5b0cb6c80..5f5910ae44 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -4,31 +4,6 @@ package akka -object AkkaException { - //FIXME DOC - def toStringWithStackTrace(throwable: Throwable): String = throwable match { - case null ⇒ "Unknown Throwable: was 'null'" - case ae: AkkaException ⇒ ae.toLongString - case e ⇒ "%s:%s\n%s" format (e.getClass.getName, e.getMessage, stackTraceToString(e)) - } - - /** - * Returns the given Throwables stack trace as a String, or the empty String if no trace is found - * @param throwable - * @return - */ - def stackTraceToString(throwable: Throwable): String = throwable.getStackTrace match { - case null ⇒ "" - case x if x.length == 0 ⇒ "" - case trace ⇒ - val sb = new StringBuilder - for (i ← 0 until trace.length) - sb.append("\tat %s\n" format trace(i)) - sb.toString - } - -} - /** * Akka base Exception. Each Exception gets: *
    @@ -38,21 +13,17 @@ object AkkaException { *
*/ //TODO add @SerialVersionUID(1L) when SI-4804 is fixed -class AkkaException(message: String = "", cause: Throwable = null) extends RuntimeException(message, cause) with Serializable { +class AkkaException(message: String, cause: Throwable) extends RuntimeException(message, cause) with Serializable { def this(msg: String) = this(msg, null) lazy val uuid = java.util.UUID.randomUUID().toString - override def toString: String = "%s:%s\n[%s]".format(getClass.getName, message, uuid) - - def toLongString: String = "%s:%s\n[%s]\n%s".format(getClass.getName, message, uuid, stackTraceToString) - - def stackTraceToString: String = AkkaException.stackTraceToString(this) + override def getMessage(): String = "[" + uuid + "] " + super.getMessage } /** * This exception is thrown when Akka detects a problem with the provided configuration */ -class ConfigurationException(message: String, cause: Throwable = null) extends AkkaException(message, cause) { +class ConfigurationException(message: String, cause: Throwable) extends AkkaException(message, cause) { def this(msg: String) = this(msg, null) } diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index 1230756517..8cb79fd299 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -530,7 +530,7 @@ object Logging { * Artificial exception injected into Error events if no Throwable is * supplied; used for getting a stack dump of error locations. */ - class EventHandlerException extends AkkaException + class EventHandlerException extends AkkaException("") /** * Exception that wraps a LogEvent. From 64cda1f3e75455c59f5e2cd4c2b397efb5fc730a Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 20:21:12 +0200 Subject: [PATCH 064/106] Fixing AkkaException once and for all --- .../src/test/scala/akka/actor/dispatch/ActorModelSpec.scala | 4 ++-- akka-actor/src/main/scala/akka/AkkaException.scala | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index 854a562745..acc416f04f 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -408,9 +408,9 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa assert(Await.result(f1, timeout.duration) === "foo") assert(Await.result(f2, timeout.duration) === "bar") assert(Await.result(f4, timeout.duration) === "foo2") - assert(intercept[ActorInterruptedException](Await.result(f3, timeout.duration)).getMessage === "Ping!") + assert(intercept[ActorInterruptedException](Await.result(f3, timeout.duration)).getCause.getMessage === "Ping!") assert(Await.result(f6, timeout.duration) === "bar2") - assert(intercept[ActorInterruptedException](Await.result(f5, timeout.duration)).getMessage === "Ping!") + assert(intercept[ActorInterruptedException](Await.result(f5, timeout.duration)).getCause.getMessage === "Ping!") } } diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index 7c58972d8c..04e820419f 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -18,7 +18,7 @@ class AkkaException(message: String, cause: Throwable) extends RuntimeException( lazy val uuid: String = java.util.UUID.randomUUID().toString - override def getMessage(): String = "[" + uuid + "] " + super.getMessage + override def toString(): String = uuid + super.toString() } /** From 8b3dbc2f7c7a0685fb03265a15ef6fb57d7c0c61 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 21:17:28 +0200 Subject: [PATCH 065/106] Removing NoStackTrace from AkkaInitializationException --- akka-actor/src/main/scala/akka/actor/Actor.scala | 6 +++--- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index b26da76ff2..6fea72a5cf 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -123,10 +123,10 @@ case class InvalidActorNameException(message: String) extends AkkaException(mess /** * An ActorInitializationException is thrown when the the initialization logic for an Actor fails. */ -case class ActorInitializationException private[akka] (actor: ActorRef, message: String, cause: Throwable = null) - extends AkkaException(message, cause) - with NoStackTrace { +class ActorInitializationException private[akka] (actor: ActorRef, message: String, cause: Throwable) + extends AkkaException(message, cause) /*with NoStackTrace*/ { def this(msg: String) = this(null, msg, null) + def this(actor: ActorRef, msg: String) = this(actor, msg, null) } /** diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index a670af5ca6..3380d51de0 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -501,7 +501,7 @@ private[akka] class ActorCell( val instance = props.creator() if (instance eq null) - throw ActorInitializationException(self, "Actor instance passed to actorOf can't be 'null'") + throw new ActorInitializationException(self, "Actor instance passed to actorOf can't be 'null'") behaviorStack = behaviorStack match { case `behaviorStackPlaceHolder` ⇒ Stack.empty.push(instance.receive) @@ -527,13 +527,13 @@ private[akka] class ActorCell( if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(created), "started (" + created + ")")) } catch { case NonFatal(i: InstantiationException) ⇒ - throw ActorInitializationException(self, + throw new ActorInitializationException(self, """exception during creation, this problem is likely to occur because the class of the Actor you tried to create is either, a non-static inner class (in which case make it a static inner class or use Props(new ...) or Props( new UntypedActorFactory ... ) or is missing an appropriate, reachable no-args constructor. """, i.getCause) case NonFatal(e) ⇒ - throw ActorInitializationException(self, "exception during creation", e) + throw new ActorInitializationException(self, "exception during creation", e) } } @@ -557,7 +557,7 @@ private[akka] class ActorCell( doRecreate(cause, failedActor) } } catch { - case NonFatal(e) ⇒ throw ActorInitializationException(self, "exception during creation", e) + case NonFatal(e) ⇒ throw new ActorInitializationException(self, "exception during creation", e) } } @@ -726,7 +726,7 @@ private[akka] class ActorCell( actor.supervisorStrategy.handleSupervisorFailing(self, children) clearActorFields(actor) // If this fails, we need to ensure that preRestart isn't called. } finally { - parent.tell(Failed(ActorInitializationException(self, "exception during re-creation", e)), self) + parent.tell(Failed(new ActorInitializationException(self, "exception during re-creation", e)), self) } } From 623d0f070327754939a0f9b4825a43a38b1794e5 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 22 May 2012 10:44:28 +0200 Subject: [PATCH 066/106] Use sbt 0.11.3. See #2087 * Temporarily disabled sbt-ls * Removed unused schoir plugin --- ls.sbt | 14 +++++++------- project/AkkaBuild.scala | 5 ++--- project/build.properties | 2 +- project/plugins.sbt | 4 +--- 4 files changed, 11 insertions(+), 14 deletions(-) diff --git a/ls.sbt b/ls.sbt index 83e5babc79..87e68ed303 100644 --- a/ls.sbt +++ b/ls.sbt @@ -1,13 +1,13 @@ -seq(lsSettings:_*) +// seq(lsSettings:_*) -(description in LsKeys.lsync) := "Akka is the platform for the next generation of event-driven, scalable and fault-tolerant architectures on the JVM." +// (description in LsKeys.lsync) := "Akka is the platform for the next generation of event-driven, scalable and fault-tolerant architectures on the JVM." -(homepage in LsKeys.lsync) := Some(url("http://akka.io")) +// (homepage in LsKeys.lsync) := Some(url("http://akka.io")) -(LsKeys.tags in LsKeys.lsync) := Seq("actors", "stm", "concurrency", "distributed", "fault-tolerance", "scala", "java", "futures", "dataflow", "remoting") +// (LsKeys.tags in LsKeys.lsync) := Seq("actors", "stm", "concurrency", "distributed", "fault-tolerance", "scala", "java", "futures", "dataflow", "remoting") -(LsKeys.docsUrl in LsKeys.lsync) := Some(url("http://akka.io/docs")) +// (LsKeys.docsUrl in LsKeys.lsync) := Some(url("http://akka.io/docs")) -(licenses in LsKeys.lsync) := Seq(("Apache 2", url("http://www.apache.org/licenses/LICENSE-2.0.html"))) +// (licenses in LsKeys.lsync) := Seq(("Apache 2", url("http://www.apache.org/licenses/LICENSE-2.0.html"))) -(externalResolvers in LsKeys.lsync) := Seq("Typesafe Releases" at "http://repo.typesafe.com/typesafe/releases") +// (externalResolvers in LsKeys.lsync) := Seq("Typesafe Releases" at "http://repo.typesafe.com/typesafe/releases") diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index d216464fde..0dedada9f8 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -8,7 +8,6 @@ import sbt._ import sbt.Keys._ import com.typesafe.sbtmultijvm.MultiJvmPlugin import com.typesafe.sbtmultijvm.MultiJvmPlugin.{ MultiJvm, extraOptions, jvmOptions, scalatestOptions } -import com.typesafe.schoir.SchoirPlugin.schoirSettings import com.typesafe.sbtscalariform.ScalariformPlugin import com.typesafe.sbtscalariform.ScalariformPlugin.ScalariformKeys import java.lang.Boolean.getBoolean @@ -79,7 +78,7 @@ object AkkaBuild extends Build { id = "akka-remote", base = file("akka-remote"), dependencies = Seq(actor, actorTests % "test->test", testkit % "test->test"), - settings = defaultSettings ++ multiJvmSettings ++ schoirSettings ++ Seq( + settings = defaultSettings ++ multiJvmSettings ++ Seq( libraryDependencies ++= Dependencies.remote, // disable parallel tests parallelExecution in Test := false, @@ -98,7 +97,7 @@ object AkkaBuild extends Build { id = "akka-cluster", base = file("akka-cluster"), dependencies = Seq(remote, remote % "test->test", testkit % "test->test"), - settings = defaultSettings ++ multiJvmSettings ++ schoirSettings ++ Seq( + settings = defaultSettings ++ multiJvmSettings ++ Seq( libraryDependencies ++= Dependencies.cluster, // disable parallel tests parallelExecution in Test := false, diff --git a/project/build.properties b/project/build.properties index f4ff7a5afa..d4287112c6 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=0.11.2 +sbt.version=0.11.3 diff --git a/project/plugins.sbt b/project/plugins.sbt index 80ff9db95a..cb2b285a8a 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -3,12 +3,10 @@ resolvers += Classpaths.typesafeResolver addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.1.9") -addSbtPlugin("com.typesafe.schoir" % "schoir" % "0.1.2") - addSbtPlugin("com.typesafe.sbtscalariform" % "sbtscalariform" % "0.3.1") resolvers ++= Seq( "less is" at "http://repo.lessis.me", "coda" at "http://repo.codahale.com") -addSbtPlugin("me.lessis" % "ls-sbt" % "0.1.1") +// addSbtPlugin("me.lessis" % "ls-sbt" % "0.1.1") From 8ddd0ed262720263ca1a93a30a490eaa7cfa0a04 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 22 May 2012 10:50:42 +0200 Subject: [PATCH 067/106] sbtscalariform 0.4.0 --- project/plugins.sbt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/plugins.sbt b/project/plugins.sbt index cb2b285a8a..768904eacb 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -3,7 +3,7 @@ resolvers += Classpaths.typesafeResolver addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.1.9") -addSbtPlugin("com.typesafe.sbtscalariform" % "sbtscalariform" % "0.3.1") +addSbtPlugin("com.typesafe.sbtscalariform" % "sbtscalariform" % "0.4.0") resolvers ++= Seq( "less is" at "http://repo.lessis.me", From 1f38866b5b744bb6ef52a5b9c822312d17cf5d93 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 22 May 2012 10:51:38 +0200 Subject: [PATCH 068/106] Adding more robust tests to HotSwapSpec --- .../test/scala/akka/actor/HotSwapSpec.scala | 61 ++++++++++++++++--- 1 file changed, 54 insertions(+), 7 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala index 236d3bd014..120caa3e93 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala @@ -6,10 +6,60 @@ package akka.actor import akka.testkit._ +object HotSwapSpec { + abstract class Becomer extends Actor { + + } +} + @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class HotSwapSpec extends AkkaSpec with ImplicitSender { + import HotSwapSpec.Becomer "An Actor" must { + "be able to become in its constructor" in { + val a = system.actorOf(Props(new Becomer { + context.become { case always ⇒ sender ! always } + def receive = { case always ⇒ sender ! "FAILURE" } + })) + a ! "pigdog" + expectMsg("pigdog") + } + + "be able to become multiple times in its constructor" in { + val a = system.actorOf(Props(new Becomer { + for (i ← 1 to 4) context.become({ case always ⇒ sender ! i + ":" + always }) + def receive = { case always ⇒ sender ! "FAILURE" } + })) + a ! "pigdog" + expectMsg("4:pigdog") + } + + "be able to become with stacking in its constructor" in { + val a = system.actorOf(Props(new Becomer { + context.become({ case always ⇒ sender ! "pigdog:" + always; context.unbecome() }, false) + def receive = { case always ⇒ sender ! "badass:" + always } + })) + a ! "pigdog" + expectMsg("pigdog:pigdog") + a ! "badass" + expectMsg("badass:badass") + } + + "be able to become, with stacking, multiple times in its constructor" in { + val a = system.actorOf(Props(new Becomer { + for (i ← 1 to 4) context.become({ case always ⇒ sender ! i + ":" + always; context.unbecome() }, false) + def receive = { case always ⇒ sender ! "FAILURE" } + })) + a ! "pigdog" + a ! "pigdog" + a ! "pigdog" + a ! "pigdog" + expectMsg("4:pigdog") + expectMsg("3:pigdog") + expectMsg("2:pigdog") + expectMsg("1:pigdog") + } "be able to hotswap its behavior with become(..)" in { val a = system.actorOf(Props(new Actor { @@ -30,13 +80,10 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { val a = system.actorOf(Props(new Actor { def receive = { case "init" ⇒ sender ! "init" - case "swap" ⇒ - context.become({ - case "swapped" ⇒ - sender ! "swapped" - case "revert" ⇒ - context.unbecome() - }) + case "swap" ⇒ context.become({ + case "swapped" ⇒ sender ! "swapped" + case "revert" ⇒ context.unbecome() + }) } })) From 2e53513718fa4a6400b5649eb56e5171363f7fcd Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 22 May 2012 10:54:13 +0200 Subject: [PATCH 069/106] harden up FSM.scala, improve gracefulStop ScalaDoc --- .../src/main/scala/akka/actor/FSM.scala | 190 +++++++++++++----- .../akka/pattern/GracefulStopSupport.scala | 13 ++ 2 files changed, 154 insertions(+), 49 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 762f23b16b..50c769e156 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -8,29 +8,84 @@ import akka.util._ import scala.collection.mutable import akka.routing.{ Deafen, Listen, Listeners } -//FIXME: Roland, could you go through this file? - object FSM { + /** + * A partial function value which does not match anything and can be used to + * “reset” `whenUnhandled` and `onTermination` handlers. + * + * {{{ + * onTermination(FSM.NullFunction) + * }}} + */ object NullFunction extends PartialFunction[Any, Nothing] { def isDefinedAt(o: Any) = false def apply(o: Any) = sys.error("undefined") } + /** + * Message type which is sent directly to the subscribed actor in + * [[akka.actor.FSM.SubscribeTransitionCallback]] before sending any + * [[akka.actor.FSM.Transition]] messages. + */ case class CurrentState[S](fsmRef: ActorRef, state: S) + + /** + * Message type which is used to communicate transitions between states to + * all subscribed listeners (use [[akka.actor.FSM.SubscribeTransitionCallback]]). + */ case class Transition[S](fsmRef: ActorRef, from: S, to: S) + + /** + * Send this to an [[akka.actor.FSM]] to request first the [[akka.actor.CurrentState]] + * and then a series of [[akka.actor.Transition]] updates. Cancel the subscription + * using [[akka.actor.FSM.UnsubscribeTransitionCallback]]. + */ case class SubscribeTransitionCallBack(actorRef: ActorRef) + + /** + * Unsubscribe from [[akka.actor.FSM.Transition]] notifications which was + * effected by sending the corresponding [[akka.actor.FSM.SubscribeTransitionCallback]]. + */ case class UnsubscribeTransitionCallBack(actorRef: ActorRef) + /** + * Reason why this [[akka.actor.FSM]] is shutting down. + */ sealed trait Reason + + /** + * Default reason if calling `stop()`. + */ case object Normal extends Reason + + /** + * Reason given when someone was calling `system.stop(fsm)` from outside; + * also applies to `Stop` supervision directive. + */ case object Shutdown extends Reason + + /** + * Signifies that the [[akka.actor.FSM]] is shutting itself down because of + * an error, e.g. if the state to transition into does not exist. You can use + * this to communicate a more precise cause to the [[akka.actor.FSM$onTermination]] block. + */ case class Failure(cause: Any) extends Reason + /** + * This case object is received in case of a state timeout. + */ case object StateTimeout - case class TimeoutMarker(generation: Long) - case class Timer(name: String, msg: Any, repeat: Boolean, generation: Int)(implicit system: ActorSystem) { + /** + * Internal API + */ + private case class TimeoutMarker(generation: Long) + + /** + * Internal API + */ + private[akka] case class Timer(name: String, msg: Any, repeat: Boolean, generation: Int)(implicit system: ActorSystem) { private var ref: Option[Cancellable] = _ def schedule(actor: ActorRef, timeout: Duration) { @@ -57,8 +112,16 @@ object FSM { def unapply[S](in: (S, S)) = Some(in) } + /** + * Log Entry of the [[akka.actor.LoggingFSM]], can be obtained by calling `getLog`. + */ case class LogEntry[S, D](stateName: S, stateData: D, event: Any) + /** + * This captures all of the managed state of the [[akka.actor.FSM]]: the state + * name, the state data, possibly custom timeout, stop reason and replies + * accumulated while processing the last message. + */ case class State[S, D](stateName: S, stateData: D, timeout: Option[Duration] = None, stopReason: Option[Reason] = None, replies: List[Any] = Nil) { /** @@ -87,6 +150,9 @@ object FSM { copy(stateData = nextStateDate) } + /** + * Internal API. + */ private[akka] def withStopReason(reason: Reason): State[S, D] = { copy(stopReason = Some(reason)) } @@ -183,8 +249,19 @@ trait FSM[S, D] extends Listeners with ActorLogging { type Timeout = Option[Duration] type TransitionHandler = PartialFunction[(S, S), Unit] - // “import” so that it is visible without an import + /* + * “import” so that these are visible without an import + */ + + /** + * This extractor is just convenience for matching a (S, S) pair, including a + * reminder what the new state is. + */ val -> = FSM.-> + + /** + * This case object is received in case of a state timeout. + */ val StateTimeout = FSM.StateTimeout /** @@ -203,13 +280,9 @@ trait FSM[S, D] extends Listeners with ActorLogging { * @param stateTimeout default state timeout for this state * @param stateFunction partial function describing response to input */ - protected final def when(stateName: S, stateTimeout: Duration = null)(stateFunction: StateFunction): Unit = + final def when(stateName: S, stateTimeout: Duration = null)(stateFunction: StateFunction): Unit = register(stateName, stateFunction, Option(stateTimeout)) - @deprecated("use the more import-friendly variant taking a Duration", "2.0") - protected final def when(stateName: S, stateTimeout: Timeout)(stateFunction: StateFunction): Unit = - register(stateName, stateFunction, stateTimeout) - /** * Set initial state. Call this method from the constructor before the #initialize method. * @@ -217,9 +290,7 @@ trait FSM[S, D] extends Listeners with ActorLogging { * @param stateData initial state data * @param timeout state timeout for the initial state, overriding the default timeout for that state */ - protected final def startWith(stateName: S, - stateData: D, - timeout: Timeout = None): Unit = + final def startWith(stateName: S, stateData: D, timeout: Timeout = None): Unit = currentState = FSM.State(stateName, stateData, timeout) /** @@ -229,7 +300,7 @@ trait FSM[S, D] extends Listeners with ActorLogging { * @param nextStateName state designator for the next state * @return state transition descriptor */ - protected final def goto(nextStateName: S): State = FSM.State(nextStateName, currentState.stateData) + final def goto(nextStateName: S): State = FSM.State(nextStateName, currentState.stateData) /** * Produce "empty" transition descriptor. Return this from a state function @@ -237,29 +308,29 @@ trait FSM[S, D] extends Listeners with ActorLogging { * * @return descriptor for staying in current state */ - protected final def stay(): State = goto(currentState.stateName) // cannot directly use currentState because of the timeout field + final def stay(): State = goto(currentState.stateName) // cannot directly use currentState because of the timeout field /** * Produce change descriptor to stop this FSM actor with reason "Normal". */ - protected final def stop(): State = stop(Normal) + final def stop(): State = stop(Normal) /** * Produce change descriptor to stop this FSM actor including specified reason. */ - protected final def stop(reason: Reason): State = stop(reason, currentState.stateData) + final def stop(reason: Reason): State = stop(reason, currentState.stateData) /** * Produce change descriptor to stop this FSM actor including specified reason. */ - protected final def stop(reason: Reason, stateData: D): State = stay using stateData withStopReason (reason) + final def stop(reason: Reason, stateData: D): State = stay using stateData withStopReason (reason) - protected final class TransformHelper(func: StateFunction) { + final class TransformHelper(func: StateFunction) { def using(andThen: PartialFunction[State, State]): StateFunction = func andThen (andThen orElse { case x ⇒ x }) } - protected final def transform(func: StateFunction): TransformHelper = new TransformHelper(func) + final def transform(func: StateFunction): TransformHelper = new TransformHelper(func) /** * Schedule named timer to deliver message after given delay, possibly repeating. @@ -269,7 +340,9 @@ trait FSM[S, D] extends Listeners with ActorLogging { * @param repeat send once if false, scheduleAtFixedRate if true * @return current state descriptor */ - protected[akka] def setTimer(name: String, msg: Any, timeout: Duration, repeat: Boolean): State = { + final def setTimer(name: String, msg: Any, timeout: Duration, repeat: Boolean): State = { + if (debugEvent) + log.debug("setting " + (if (repeat) "repeating " else "") + "timer '" + name + "'/" + timeout + ": " + msg) if (timers contains name) { timers(name).cancel } @@ -283,24 +356,27 @@ trait FSM[S, D] extends Listeners with ActorLogging { * Cancel named timer, ensuring that the message is not subsequently delivered (no race). * @param name of the timer to cancel */ - protected[akka] def cancelTimer(name: String): Unit = + final def cancelTimer(name: String): Unit = { + if (debugEvent) + log.debug("canceling timer '" + name + "'") if (timers contains name) { timers(name).cancel timers -= name } + } /** * Inquire whether the named timer is still active. Returns true unless the * timer does not exist, has previously been canceled or if it was a * single-shot timer whose message was already received. */ - protected[akka] final def timerActive_?(name: String) = timers contains name + final def timerActive_?(name: String) = timers contains name /** * Set state timeout explicitly. This method can safely be used from within a * state handler. */ - protected final def setStateTimeout(state: S, timeout: Timeout): Unit = stateTimeouts(state) = timeout + final def setStateTimeout(state: S, timeout: Timeout): Unit = stateTimeouts(state) = timeout /** * Set handler which is called upon each state transition, i.e. not when @@ -327,50 +403,52 @@ trait FSM[S, D] extends Listeners with ActorLogging { * Multiple handlers may be installed, and every one of them will be * called, not only the first one matching. */ - protected final def onTransition(transitionHandler: TransitionHandler): Unit = transitionEvent :+= transitionHandler + final def onTransition(transitionHandler: TransitionHandler): Unit = transitionEvent :+= transitionHandler /** * Convenience wrapper for using a total function instead of a partial * function literal. To be used with onTransition. */ - implicit protected final def total2pf(transitionHandler: (S, S) ⇒ Unit): TransitionHandler = + implicit final def total2pf(transitionHandler: (S, S) ⇒ Unit): TransitionHandler = new TransitionHandler { def isDefinedAt(in: (S, S)) = true def apply(in: (S, S)) { transitionHandler(in._1, in._2) } } /** - * Set handler which is called upon termination of this FSM actor. + * Set handler which is called upon termination of this FSM actor. Calling + * this method again will overwrite the previous contents. */ - protected final def onTermination(terminationHandler: PartialFunction[StopEvent, Unit]): Unit = + final def onTermination(terminationHandler: PartialFunction[StopEvent, Unit]): Unit = terminateEvent = terminationHandler /** - * Set handler which is called upon reception of unhandled messages. + * Set handler which is called upon reception of unhandled messages. Calling + * this method again will overwrite the previous contents. */ - protected final def whenUnhandled(stateFunction: StateFunction): Unit = + final def whenUnhandled(stateFunction: StateFunction): Unit = handleEvent = stateFunction orElse handleEventDefault /** * Verify existence of initial state and setup timers. This should be the * last call within the constructor. */ - protected final def initialize: Unit = makeTransition(currentState) + final def initialize: Unit = makeTransition(currentState) /** * Return current state name (i.e. object of type S) */ - protected[akka] def stateName: S = currentState.stateName + final def stateName: S = currentState.stateName /** * Return current state data (i.e. object of type D) */ - protected[akka] def stateData: D = currentState.stateData + final def stateData: D = currentState.stateData /** * Return next state data (available in onTransition handlers) */ - protected[akka] def nextStateData = nextState.stateData + final def nextStateData = nextState.stateData /* * **************************************************************** @@ -378,6 +456,8 @@ trait FSM[S, D] extends Listeners with ActorLogging { * **************************************************************** */ + private[akka] def debugEvent: Boolean = false + /* * FSM State data and current timeout handling */ @@ -525,7 +605,21 @@ trait FSM[S, D] extends Listeners with ActorLogging { } } - override def postStop(): Unit = { terminate(stay withStopReason Shutdown) } + /** + * Call `onTermination` hook; if you want to retain this behavior when + * overriding make sure to call `super.postStop()`. + * + * Please note that this method is called by default from `preRestart()`, + * so override that one if `onTermination` shall not be called during + * restart. + */ + override def postStop(): Unit = { + /* + * setting this instance’s state to terminated does no harm during restart + * since the new instance will initialize fresh using startWith() + */ + terminate(stay withStopReason Shutdown) + } private def terminate(nextState: State): Unit = { if (!currentState.stopReason.isDefined) { @@ -542,13 +636,22 @@ trait FSM[S, D] extends Listeners with ActorLogging { } } + /** + * All messages sent to the [[akka.actor.FSM]] will be wrapped inside an + * `Event`, which allows pattern matching to extract both state and data. + */ case class Event(event: Any, stateData: D) + /** + * Case class representing the state of the [[akka.actor.FSM]] whithin the + * `onTermination` block. + */ case class StopEvent(reason: Reason, currentState: S, stateData: D) } /** - * Stackable trait for FSM which adds a rolling event log. + * Stackable trait for [[akka.actor.FSM]] which adds a rolling event log and + * debug logging capabilities (analogous to [[akka.event.LoggingReceive]]). * * @since 1.2 */ @@ -558,7 +661,7 @@ trait LoggingFSM[S, D] extends FSM[S, D] { this: Actor ⇒ def logDepth: Int = 0 - private val debugEvent = context.system.settings.FsmDebugEvent + private[akka] override val debugEvent = context.system.settings.FsmDebugEvent private val events = new Array[Event](logDepth) private val states = new Array[AnyRef](logDepth) @@ -575,18 +678,6 @@ trait LoggingFSM[S, D] extends FSM[S, D] { this: Actor ⇒ } } - protected[akka] abstract override def setTimer(name: String, msg: Any, timeout: Duration, repeat: Boolean): State = { - if (debugEvent) - log.debug("setting " + (if (repeat) "repeating " else "") + "timer '" + name + "'/" + timeout + ": " + msg) - super.setTimer(name, msg, timeout, repeat) - } - - protected[akka] abstract override def cancelTimer(name: String): Unit = { - if (debugEvent) - log.debug("canceling timer '" + name + "'") - super.cancelTimer(name) - } - private[akka] abstract override def processEvent(event: Event, source: AnyRef): Unit = { if (debugEvent) { val srcstr = source match { @@ -615,6 +706,7 @@ trait LoggingFSM[S, D] extends FSM[S, D] { this: Actor ⇒ /** * Retrieve current rolling log in oldest-first order. The log is filled with * each incoming event before processing by the user supplied state handler. + * The log entries are lost when this actor is restarted. */ protected def getLog: IndexedSeq[LogEntry[S, D]] = { val log = events zip states filter (_._1 ne null) map (x ⇒ LogEntry(x._2.asInstanceOf[S], x._1.stateData, x._1.event)) diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index 5f78e8ba27..adcbe53f0b 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -17,6 +17,19 @@ trait GracefulStopSupport { * Useful when you need to wait for termination or compose ordered termination of several actors, * which should only be done outside of the ActorSystem as blocking inside Actors is discouraged. * + * IMPORTANT NOTICE: the actor being terminated and its supervisor + * being informed of the availability of the deceased actor’s name are two + * distinct operations, which do not obey any reliable ordering. Especially + * the following will NOT work: + * + * {{{ + * def receive = { + * case msg => + * Await.result(gracefulStop(someChild, timeout), timeout) + * context.actorOf(Props(...), "someChild") // assuming that that was someChild’s name, this will NOT work + * } + * }}} + * * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] * is completed with failure [[akka.pattern.AskTimeoutException]]. */ From d66d642b8fd5a21cdc24003a9d23f45d38ab4bf0 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 22 May 2012 10:57:26 +0200 Subject: [PATCH 070/106] Fixing bad ScalaDoc --- akka-actor/src/main/scala/akka/actor/Actor.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 6fea72a5cf..b611d96842 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -181,13 +181,13 @@ object Status { /** * Mix in ActorLogging into your Actor to easily obtain a reference to a logger, which is available under the name "log". * - * {{ + * {{{ * class MyActor extends Actor with ActorLogging { * def receive = { * case "pigdog" => log.info("We've got yet another pigdog on our hands") * } * } - * }} + * }}} */ trait ActorLogging { this: Actor ⇒ val log = akka.event.Logging(context.system, this) From 09469b73e1ad45c286e0c39bfa60a001c4c36e80 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 22 May 2012 11:08:29 +0200 Subject: [PATCH 071/106] Cleaning up some horribly written Java code --- akka-kernel/src/main/java/akka/jmx/Client.java | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/akka-kernel/src/main/java/akka/jmx/Client.java b/akka-kernel/src/main/java/akka/jmx/Client.java index 9ebf63e9eb..83a8f9246f 100644 --- a/akka-kernel/src/main/java/akka/jmx/Client.java +++ b/akka-kernel/src/main/java/akka/jmx/Client.java @@ -201,12 +201,10 @@ public class Client { * @param password * @return Credentials as map for RMI. */ - protected Map formatCredentials(final String login, + protected Map formatCredentials(final String login, final String password) { - Map env = null; - String[] creds = new String[] {login, password}; - env = new HashMap(1); - env.put(JMXConnector.CREDENTIALS, creds); + Map env = new HashMap(1); + env.put(JMXConnector.CREDENTIALS, new String[] {login, password}); return env; } @@ -214,10 +212,8 @@ public class Client { final String login, final String password) throws IOException { // Make up the jmx rmi URL and get a connector. - JMXServiceURL rmiurl = new JMXServiceURL("service:jmx:rmi://" - + hostport + "/jndi/rmi://" + hostport + "/jmxrmi"); - return JMXConnectorFactory.connect(rmiurl, - formatCredentials(login, password)); + JMXServiceURL rmiurl = new JMXServiceURL("service:jmx:rmi://"+hostport+"/jndi/rmi://"+hostport+"/jmxrmi"); + return JMXConnectorFactory.connect(rmiurl,formatCredentials(login, password)); } protected ObjectName getObjectName(final String beanname) From 0eae9d8d2289d1e4db4594e8cc149b662e609e41 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 22 May 2012 11:37:09 +0200 Subject: [PATCH 072/106] Move all doc examples out of the akka-package to avoid use of private APIs. See #2092 --- .../general/code/{akka => }/docs/config/ConfigDoc.java | 2 +- .../code/{akka => }/docs/config/ConfigDocSpec.scala | 2 +- .../java/code/{akka => }/docs/actor/FSMDocTest.scala | 2 +- .../code/{akka => }/docs/actor/FSMDocTestBase.java | 2 +- .../code/{akka => }/docs/actor/FaultHandlingTest.scala | 2 +- .../{akka => }/docs/actor/FaultHandlingTestBase.java | 2 +- .../code/{akka => }/docs/actor/FirstUntypedActor.java | 2 +- .../code/{akka => }/docs/actor/ImmutableMessage.java | 2 +- .../docs/actor/MyReceivedTimeoutUntypedActor.java | 2 +- .../code/{akka => }/docs/actor/MyUntypedActor.java | 2 +- .../code/{akka => }/docs/actor/SchedulerDocTest.scala | 2 +- .../{akka => }/docs/actor/SchedulerDocTestBase.java | 2 +- .../code/{akka => }/docs/actor/TypedActorDocTest.scala | 2 +- .../{akka => }/docs/actor/TypedActorDocTestBase.java | 2 +- .../{akka => }/docs/actor/UntypedActorDocTest.scala | 2 +- .../{akka => }/docs/actor/UntypedActorDocTestBase.java | 2 +- .../{akka => }/docs/actor/UntypedActorSwapper.java | 4 ++-- .../docs/actor/japi/FaultHandlingDocSample.java | 10 +++++----- .../code/{akka => }/docs/agent/AgentDocJavaSpec.scala | 4 ++-- .../java/code/{akka => }/docs/agent/AgentDocTest.java | 2 +- .../{akka => }/docs/dispatcher/DispatcherDocTest.scala | 2 +- .../docs/dispatcher/DispatcherDocTestBase.java | 6 +++--- .../code/{akka => }/docs/event/LoggingDocTest.scala | 2 +- .../code/{akka => }/docs/event/LoggingDocTestBase.java | 2 +- .../{akka => }/docs/extension/ExtensionDocTest.scala | 2 +- .../docs/extension/ExtensionDocTestBase.java | 2 +- .../docs/extension/SettingsExtensionDocTest.scala | 2 +- .../docs/extension/SettingsExtensionDocTestBase.java | 2 +- .../code/{akka => }/docs/future/FutureDocTest.scala | 2 +- .../code/{akka => }/docs/future/FutureDocTestBase.java | 2 +- .../{akka => }/docs/jrouting/CustomRouterDocTest.scala | 2 +- .../docs/jrouting/CustomRouterDocTestBase.java | 8 ++++---- .../code/{akka => }/docs/jrouting/FibonacciActor.java | 2 +- .../code/{akka => }/docs/jrouting/ParentActor.java | 2 +- .../code/{akka => }/docs/jrouting/PrintlnActor.java | 2 +- .../docs/jrouting/RouterViaConfigExample.java | 2 +- .../docs/jrouting/RouterViaProgramExample.java | 2 +- .../{akka => }/docs/remoting/RemoteActorExample.java | 2 +- .../docs/remoting/RemoteDeploymentDocTest.scala | 2 +- .../docs/remoting/RemoteDeploymentDocTestBase.java | 2 +- .../docs/serialization/SerializationDocTest.scala | 2 +- .../docs/serialization/SerializationDocTestBase.java | 2 +- .../{akka => }/docs/transactor/CoordinatedCounter.java | 2 +- .../code/{akka => }/docs/transactor/Coordinator.java | 2 +- .../java/code/{akka => }/docs/transactor/Counter.java | 2 +- .../{akka => }/docs/transactor/FriendlyCounter.java | 2 +- .../code/{akka => }/docs/transactor/Increment.java | 2 +- .../java/code/{akka => }/docs/transactor/Message.java | 2 +- .../docs/transactor/TransactorDocJavaSpec.scala | 4 ++-- .../{akka => }/docs/transactor/TransactorDocTest.java | 2 +- .../code/{akka => }/docs/zeromq/ZeromqDocTest.scala | 2 +- .../code/{akka => }/docs/zeromq/ZeromqDocTestBase.java | 2 +- akka-docs/java/extending-akka.rst | 2 +- akka-docs/java/serialization.rst | 2 +- .../docs/actor/mailbox/DurableMailboxDocSpec.scala | 4 ++-- .../docs/actor/mailbox/DurableMailboxDocTest.scala | 2 +- .../docs/actor/mailbox/DurableMailboxDocTestBase.java | 2 +- .../code/{akka => }/docs/actor/ActorDocSpec.scala | 2 +- .../scala/code/{akka => }/docs/actor/FSMDocSpec.scala | 2 +- .../{akka => }/docs/actor/FaultHandlingDocSample.scala | 2 +- .../{akka => }/docs/actor/FaultHandlingDocSpec.scala | 2 +- .../code/{akka => }/docs/actor/SchedulerDocSpec.scala | 2 +- .../code/{akka => }/docs/actor/TypedActorDocSpec.scala | 2 +- .../code/{akka => }/docs/actor/UnnestedReceives.scala | 2 +- .../code/{akka => }/docs/agent/AgentDocSpec.scala | 2 +- .../scala/code/{akka => }/docs/camel/Consumers.scala | 2 +- .../code/{akka => }/docs/camel/Introduction.scala | 2 +- .../{akka => }/docs/dispatcher/DispatcherDocSpec.scala | 6 +++--- .../code/{akka => }/docs/event/LoggingDocSpec.scala | 2 +- .../{akka => }/docs/extension/ExtensionDocSpec.scala | 4 ++-- .../docs/extension/SettingsExtensionDocSpec.scala | 2 +- .../code/{akka => }/docs/future/FutureDocSpec.scala | 2 +- .../scala/code/{akka => }/docs/io/HTTPServer.scala | 2 +- .../docs/remoting/RemoteDeploymentDocSpec.scala | 2 +- .../code/{akka => }/docs/routing/RouterDocSpec.scala | 2 +- .../{akka => }/docs/routing/RouterTypeExample.scala | 2 +- .../docs/routing/RouterViaConfigExample.scala | 2 +- .../docs/routing/RouterViaProgramExample.scala | 2 +- .../docs/serialization/SerializationDocSpec.scala | 10 +++++----- .../code/{akka => }/docs/testkit/PlainWordSpec.scala | 2 +- .../{akka => }/docs/testkit/TestKitUsageSpec.scala | 2 +- .../code/{akka => }/docs/testkit/TestkitDocSpec.scala | 2 +- .../{akka => }/docs/transactor/TransactorDocSpec.scala | 2 +- .../code/{akka => }/docs/zeromq/ZeromqDocSpec.scala | 2 +- akka-docs/scala/serialization.rst | 2 +- 85 files changed, 105 insertions(+), 105 deletions(-) rename akka-docs/general/code/{akka => }/docs/config/ConfigDoc.java (97%) rename akka-docs/general/code/{akka => }/docs/config/ConfigDocSpec.scala (97%) rename akka-docs/java/code/{akka => }/docs/actor/FSMDocTest.scala (87%) rename akka-docs/java/code/{akka => }/docs/actor/FSMDocTestBase.java (99%) rename akka-docs/java/code/{akka => }/docs/actor/FaultHandlingTest.scala (88%) rename akka-docs/java/code/{akka => }/docs/actor/FaultHandlingTestBase.java (99%) rename akka-docs/java/code/{akka => }/docs/actor/FirstUntypedActor.java (95%) rename akka-docs/java/code/{akka => }/docs/actor/ImmutableMessage.java (96%) rename akka-docs/java/code/{akka => }/docs/actor/MyReceivedTimeoutUntypedActor.java (96%) rename akka-docs/java/code/{akka => }/docs/actor/MyUntypedActor.java (95%) rename akka-docs/java/code/{akka => }/docs/actor/SchedulerDocTest.scala (88%) rename akka-docs/java/code/{akka => }/docs/actor/SchedulerDocTestBase.java (98%) rename akka-docs/java/code/{akka => }/docs/actor/TypedActorDocTest.scala (88%) rename akka-docs/java/code/{akka => }/docs/actor/TypedActorDocTestBase.java (99%) rename akka-docs/java/code/{akka => }/docs/actor/UntypedActorDocTest.scala (88%) rename akka-docs/java/code/{akka => }/docs/actor/UntypedActorDocTestBase.java (99%) rename akka-docs/java/code/{akka => }/docs/actor/UntypedActorSwapper.java (93%) rename akka-docs/java/code/{akka => }/docs/actor/japi/FaultHandlingDocSample.java (97%) rename akka-docs/java/code/{akka => }/docs/agent/AgentDocJavaSpec.scala (78%) rename akka-docs/java/code/{akka => }/docs/agent/AgentDocTest.java (98%) rename akka-docs/java/code/{akka => }/docs/dispatcher/DispatcherDocTest.scala (86%) rename akka-docs/java/code/{akka => }/docs/dispatcher/DispatcherDocTestBase.java (96%) rename akka-docs/java/code/{akka => }/docs/event/LoggingDocTest.scala (88%) rename akka-docs/java/code/{akka => }/docs/event/LoggingDocTestBase.java (99%) rename akka-docs/java/code/{akka => }/docs/extension/ExtensionDocTest.scala (86%) rename akka-docs/java/code/{akka => }/docs/extension/ExtensionDocTestBase.java (98%) rename akka-docs/java/code/{akka => }/docs/extension/SettingsExtensionDocTest.scala (87%) rename akka-docs/java/code/{akka => }/docs/extension/SettingsExtensionDocTestBase.java (98%) rename akka-docs/java/code/{akka => }/docs/future/FutureDocTest.scala (87%) rename akka-docs/java/code/{akka => }/docs/future/FutureDocTestBase.java (99%) rename akka-docs/java/code/{akka => }/docs/jrouting/CustomRouterDocTest.scala (80%) rename akka-docs/java/code/{akka => }/docs/jrouting/CustomRouterDocTestBase.java (95%) rename akka-docs/java/code/{akka => }/docs/jrouting/FibonacciActor.java (97%) rename akka-docs/java/code/{akka => }/docs/jrouting/ParentActor.java (98%) rename akka-docs/java/code/{akka => }/docs/jrouting/PrintlnActor.java (92%) rename akka-docs/java/code/{akka => }/docs/jrouting/RouterViaConfigExample.java (98%) rename akka-docs/java/code/{akka => }/docs/jrouting/RouterViaProgramExample.java (99%) rename akka-docs/java/code/{akka => }/docs/remoting/RemoteActorExample.java (95%) rename akka-docs/java/code/{akka => }/docs/remoting/RemoteDeploymentDocTest.scala (87%) rename akka-docs/java/code/{akka => }/docs/remoting/RemoteDeploymentDocTestBase.java (97%) rename akka-docs/java/code/{akka => }/docs/serialization/SerializationDocTest.scala (85%) rename akka-docs/java/code/{akka => }/docs/serialization/SerializationDocTestBase.java (99%) rename akka-docs/java/code/{akka => }/docs/transactor/CoordinatedCounter.java (97%) rename akka-docs/java/code/{akka => }/docs/transactor/Coordinator.java (96%) rename akka-docs/java/code/{akka => }/docs/transactor/Counter.java (95%) rename akka-docs/java/code/{akka => }/docs/transactor/FriendlyCounter.java (97%) rename akka-docs/java/code/{akka => }/docs/transactor/Increment.java (93%) rename akka-docs/java/code/{akka => }/docs/transactor/Message.java (77%) rename akka-docs/java/code/{akka => }/docs/transactor/TransactorDocJavaSpec.scala (75%) rename akka-docs/java/code/{akka => }/docs/transactor/TransactorDocTest.java (99%) rename akka-docs/java/code/{akka => }/docs/zeromq/ZeromqDocTest.scala (87%) rename akka-docs/java/code/{akka => }/docs/zeromq/ZeromqDocTestBase.java (99%) rename akka-docs/modules/code/{akka => }/docs/actor/mailbox/DurableMailboxDocSpec.scala (97%) rename akka-docs/modules/code/{akka => }/docs/actor/mailbox/DurableMailboxDocTest.scala (85%) rename akka-docs/modules/code/{akka => }/docs/actor/mailbox/DurableMailboxDocTestBase.java (97%) rename akka-docs/scala/code/{akka => }/docs/actor/ActorDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/actor/FSMDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/actor/FaultHandlingDocSample.scala (99%) rename akka-docs/scala/code/{akka => }/docs/actor/FaultHandlingDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/actor/SchedulerDocSpec.scala (98%) rename akka-docs/scala/code/{akka => }/docs/actor/TypedActorDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/actor/UnnestedReceives.scala (98%) rename akka-docs/scala/code/{akka => }/docs/agent/AgentDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/camel/Consumers.scala (96%) rename akka-docs/scala/code/{akka => }/docs/camel/Introduction.scala (98%) rename akka-docs/scala/code/{akka => }/docs/dispatcher/DispatcherDocSpec.scala (97%) rename akka-docs/scala/code/{akka => }/docs/event/LoggingDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/extension/ExtensionDocSpec.scala (96%) rename akka-docs/scala/code/{akka => }/docs/extension/SettingsExtensionDocSpec.scala (98%) rename akka-docs/scala/code/{akka => }/docs/future/FutureDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/io/HTTPServer.scala (99%) rename akka-docs/scala/code/{akka => }/docs/remoting/RemoteDeploymentDocSpec.scala (98%) rename akka-docs/scala/code/{akka => }/docs/routing/RouterDocSpec.scala (96%) rename akka-docs/scala/code/{akka => }/docs/routing/RouterTypeExample.scala (99%) rename akka-docs/scala/code/{akka => }/docs/routing/RouterViaConfigExample.scala (98%) rename akka-docs/scala/code/{akka => }/docs/routing/RouterViaProgramExample.scala (98%) rename akka-docs/scala/code/{akka => }/docs/serialization/SerializationDocSpec.scala (94%) rename akka-docs/scala/code/{akka => }/docs/testkit/PlainWordSpec.scala (97%) rename akka-docs/scala/code/{akka => }/docs/testkit/TestKitUsageSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/testkit/TestkitDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/transactor/TransactorDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/zeromq/ZeromqDocSpec.scala (99%) diff --git a/akka-docs/general/code/akka/docs/config/ConfigDoc.java b/akka-docs/general/code/docs/config/ConfigDoc.java similarity index 97% rename from akka-docs/general/code/akka/docs/config/ConfigDoc.java rename to akka-docs/general/code/docs/config/ConfigDoc.java index 69d856947f..ee6393fb1a 100644 --- a/akka-docs/general/code/akka/docs/config/ConfigDoc.java +++ b/akka-docs/general/code/docs/config/ConfigDoc.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.config; +package docs.config; import akka.actor.ActorSystem; import com.typesafe.config.*; diff --git a/akka-docs/general/code/akka/docs/config/ConfigDocSpec.scala b/akka-docs/general/code/docs/config/ConfigDocSpec.scala similarity index 97% rename from akka-docs/general/code/akka/docs/config/ConfigDocSpec.scala rename to akka-docs/general/code/docs/config/ConfigDocSpec.scala index 3b7cb10ed2..643116e14f 100644 --- a/akka-docs/general/code/akka/docs/config/ConfigDocSpec.scala +++ b/akka-docs/general/code/docs/config/ConfigDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.config +package docs.config import org.scalatest.WordSpec import org.scalatest.matchers.MustMatchers diff --git a/akka-docs/java/code/akka/docs/actor/FSMDocTest.scala b/akka-docs/java/code/docs/actor/FSMDocTest.scala similarity index 87% rename from akka-docs/java/code/akka/docs/actor/FSMDocTest.scala rename to akka-docs/java/code/docs/actor/FSMDocTest.scala index 11bb542808..7077365d6d 100644 --- a/akka-docs/java/code/akka/docs/actor/FSMDocTest.scala +++ b/akka-docs/java/code/docs/actor/FSMDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/actor/FSMDocTestBase.java b/akka-docs/java/code/docs/actor/FSMDocTestBase.java similarity index 99% rename from akka-docs/java/code/akka/docs/actor/FSMDocTestBase.java rename to akka-docs/java/code/docs/actor/FSMDocTestBase.java index aeaca63f92..9064833cb0 100644 --- a/akka-docs/java/code/akka/docs/actor/FSMDocTestBase.java +++ b/akka-docs/java/code/docs/actor/FSMDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; //#imports-data import java.util.ArrayList; diff --git a/akka-docs/java/code/akka/docs/actor/FaultHandlingTest.scala b/akka-docs/java/code/docs/actor/FaultHandlingTest.scala similarity index 88% rename from akka-docs/java/code/akka/docs/actor/FaultHandlingTest.scala rename to akka-docs/java/code/docs/actor/FaultHandlingTest.scala index 03802d6968..9b6fad0609 100644 --- a/akka-docs/java/code/akka/docs/actor/FaultHandlingTest.scala +++ b/akka-docs/java/code/docs/actor/FaultHandlingTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor import org.scalatest.junit.JUnitSuite class FaultHandlingTest extends FaultHandlingTestBase with JUnitSuite diff --git a/akka-docs/java/code/akka/docs/actor/FaultHandlingTestBase.java b/akka-docs/java/code/docs/actor/FaultHandlingTestBase.java similarity index 99% rename from akka-docs/java/code/akka/docs/actor/FaultHandlingTestBase.java rename to akka-docs/java/code/docs/actor/FaultHandlingTestBase.java index dc2ce9bae7..bcc4705948 100644 --- a/akka-docs/java/code/akka/docs/actor/FaultHandlingTestBase.java +++ b/akka-docs/java/code/docs/actor/FaultHandlingTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; //#testkit import akka.actor.ActorRef; diff --git a/akka-docs/java/code/akka/docs/actor/FirstUntypedActor.java b/akka-docs/java/code/docs/actor/FirstUntypedActor.java similarity index 95% rename from akka-docs/java/code/akka/docs/actor/FirstUntypedActor.java rename to akka-docs/java/code/docs/actor/FirstUntypedActor.java index 6cfbe75b99..fa5d3d35a0 100644 --- a/akka-docs/java/code/akka/docs/actor/FirstUntypedActor.java +++ b/akka-docs/java/code/docs/actor/FirstUntypedActor.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; import akka.actor.ActorRef; import akka.actor.Props; diff --git a/akka-docs/java/code/akka/docs/actor/ImmutableMessage.java b/akka-docs/java/code/docs/actor/ImmutableMessage.java similarity index 96% rename from akka-docs/java/code/akka/docs/actor/ImmutableMessage.java rename to akka-docs/java/code/docs/actor/ImmutableMessage.java index 41bc4eb0e5..60e72ecfb5 100644 --- a/akka-docs/java/code/akka/docs/actor/ImmutableMessage.java +++ b/akka-docs/java/code/docs/actor/ImmutableMessage.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; import java.util.ArrayList; import java.util.Collections; diff --git a/akka-docs/java/code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java b/akka-docs/java/code/docs/actor/MyReceivedTimeoutUntypedActor.java similarity index 96% rename from akka-docs/java/code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java rename to akka-docs/java/code/docs/actor/MyReceivedTimeoutUntypedActor.java index 97742d9bd1..025d634b09 100644 --- a/akka-docs/java/code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java +++ b/akka-docs/java/code/docs/actor/MyReceivedTimeoutUntypedActor.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; //#receive-timeout import akka.actor.ReceiveTimeout; diff --git a/akka-docs/java/code/akka/docs/actor/MyUntypedActor.java b/akka-docs/java/code/docs/actor/MyUntypedActor.java similarity index 95% rename from akka-docs/java/code/akka/docs/actor/MyUntypedActor.java rename to akka-docs/java/code/docs/actor/MyUntypedActor.java index 93a817ef2c..f31fc402c7 100644 --- a/akka-docs/java/code/akka/docs/actor/MyUntypedActor.java +++ b/akka-docs/java/code/docs/actor/MyUntypedActor.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; //#my-untyped-actor import akka.actor.UntypedActor; diff --git a/akka-docs/java/code/akka/docs/actor/SchedulerDocTest.scala b/akka-docs/java/code/docs/actor/SchedulerDocTest.scala similarity index 88% rename from akka-docs/java/code/akka/docs/actor/SchedulerDocTest.scala rename to akka-docs/java/code/docs/actor/SchedulerDocTest.scala index ecad03213e..9e6b4c9613 100644 --- a/akka-docs/java/code/akka/docs/actor/SchedulerDocTest.scala +++ b/akka-docs/java/code/docs/actor/SchedulerDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/actor/SchedulerDocTestBase.java b/akka-docs/java/code/docs/actor/SchedulerDocTestBase.java similarity index 98% rename from akka-docs/java/code/akka/docs/actor/SchedulerDocTestBase.java rename to akka-docs/java/code/docs/actor/SchedulerDocTestBase.java index 7a58da0f5e..d7e8fa644f 100644 --- a/akka-docs/java/code/akka/docs/actor/SchedulerDocTestBase.java +++ b/akka-docs/java/code/docs/actor/SchedulerDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; //#imports1 import akka.actor.Props; diff --git a/akka-docs/java/code/akka/docs/actor/TypedActorDocTest.scala b/akka-docs/java/code/docs/actor/TypedActorDocTest.scala similarity index 88% rename from akka-docs/java/code/akka/docs/actor/TypedActorDocTest.scala rename to akka-docs/java/code/docs/actor/TypedActorDocTest.scala index 476d570b4a..0d9796ca56 100644 --- a/akka-docs/java/code/akka/docs/actor/TypedActorDocTest.scala +++ b/akka-docs/java/code/docs/actor/TypedActorDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/actor/TypedActorDocTestBase.java b/akka-docs/java/code/docs/actor/TypedActorDocTestBase.java similarity index 99% rename from akka-docs/java/code/akka/docs/actor/TypedActorDocTestBase.java rename to akka-docs/java/code/docs/actor/TypedActorDocTestBase.java index 30db92ee0f..99dda513ab 100644 --- a/akka-docs/java/code/akka/docs/actor/TypedActorDocTestBase.java +++ b/akka-docs/java/code/docs/actor/TypedActorDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; //#imports diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTest.scala b/akka-docs/java/code/docs/actor/UntypedActorDocTest.scala similarity index 88% rename from akka-docs/java/code/akka/docs/actor/UntypedActorDocTest.scala rename to akka-docs/java/code/docs/actor/UntypedActorDocTest.scala index e341914c8c..8047b94df9 100644 --- a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTest.scala +++ b/akka-docs/java/code/docs/actor/UntypedActorDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java b/akka-docs/java/code/docs/actor/UntypedActorDocTestBase.java similarity index 99% rename from akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java rename to akka-docs/java/code/docs/actor/UntypedActorDocTestBase.java index 146131f61e..c82ce30661 100644 --- a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java +++ b/akka-docs/java/code/docs/actor/UntypedActorDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; //#imports import akka.actor.ActorRef; diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorSwapper.java b/akka-docs/java/code/docs/actor/UntypedActorSwapper.java similarity index 93% rename from akka-docs/java/code/akka/docs/actor/UntypedActorSwapper.java rename to akka-docs/java/code/docs/actor/UntypedActorSwapper.java index b2fb98c305..985c75bfd7 100644 --- a/akka-docs/java/code/akka/docs/actor/UntypedActorSwapper.java +++ b/akka-docs/java/code/docs/actor/UntypedActorSwapper.java @@ -1,9 +1,9 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; -import static akka.docs.actor.UntypedActorSwapper.Swap.SWAP; +import static docs.actor.UntypedActorSwapper.Swap.SWAP; import akka.actor.ActorRef; import akka.actor.Props; import akka.actor.ActorSystem; diff --git a/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java b/akka-docs/java/code/docs/actor/japi/FaultHandlingDocSample.java similarity index 97% rename from akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java rename to akka-docs/java/code/docs/actor/japi/FaultHandlingDocSample.java index 4ba8358174..1213ab5949 100644 --- a/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java +++ b/akka-docs/java/code/docs/actor/japi/FaultHandlingDocSample.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor.japi; +package docs.actor.japi; //#all //#imports @@ -26,10 +26,10 @@ import static akka.actor.SupervisorStrategy.*; import static akka.pattern.Patterns.ask; import static akka.pattern.Patterns.pipe; -import static akka.docs.actor.japi.FaultHandlingDocSample.WorkerApi.*; -import static akka.docs.actor.japi.FaultHandlingDocSample.CounterServiceApi.*; -import static akka.docs.actor.japi.FaultHandlingDocSample.CounterApi.*; -import static akka.docs.actor.japi.FaultHandlingDocSample.StorageApi.*; +import static docs.actor.japi.FaultHandlingDocSample.WorkerApi.*; +import static docs.actor.japi.FaultHandlingDocSample.CounterServiceApi.*; +import static docs.actor.japi.FaultHandlingDocSample.CounterApi.*; +import static docs.actor.japi.FaultHandlingDocSample.StorageApi.*; //#imports diff --git a/akka-docs/java/code/akka/docs/agent/AgentDocJavaSpec.scala b/akka-docs/java/code/docs/agent/AgentDocJavaSpec.scala similarity index 78% rename from akka-docs/java/code/akka/docs/agent/AgentDocJavaSpec.scala rename to akka-docs/java/code/docs/agent/AgentDocJavaSpec.scala index c3c0c296ed..566a439c62 100644 --- a/akka-docs/java/code/akka/docs/agent/AgentDocJavaSpec.scala +++ b/akka-docs/java/code/docs/agent/AgentDocJavaSpec.scala @@ -1,10 +1,10 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.agent +package docs.agent import org.scalatest.junit.JUnitWrapperSuite class AgentDocJavaSpec extends JUnitWrapperSuite( - "akka.docs.agent.AgentDocTest", + "docs.agent.AgentDocTest", Thread.currentThread.getContextClassLoader) \ No newline at end of file diff --git a/akka-docs/java/code/akka/docs/agent/AgentDocTest.java b/akka-docs/java/code/docs/agent/AgentDocTest.java similarity index 98% rename from akka-docs/java/code/akka/docs/agent/AgentDocTest.java rename to akka-docs/java/code/docs/agent/AgentDocTest.java index 553d64eee5..0da96ebfc9 100644 --- a/akka-docs/java/code/akka/docs/agent/AgentDocTest.java +++ b/akka-docs/java/code/docs/agent/AgentDocTest.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.agent; +package docs.agent; import static org.junit.Assert.*; diff --git a/akka-docs/java/code/akka/docs/dispatcher/DispatcherDocTest.scala b/akka-docs/java/code/docs/dispatcher/DispatcherDocTest.scala similarity index 86% rename from akka-docs/java/code/akka/docs/dispatcher/DispatcherDocTest.scala rename to akka-docs/java/code/docs/dispatcher/DispatcherDocTest.scala index 8216c36757..62c9e37051 100644 --- a/akka-docs/java/code/akka/docs/dispatcher/DispatcherDocTest.scala +++ b/akka-docs/java/code/docs/dispatcher/DispatcherDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.dispatcher +package docs.dispatcher import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/dispatcher/DispatcherDocTestBase.java b/akka-docs/java/code/docs/dispatcher/DispatcherDocTestBase.java similarity index 96% rename from akka-docs/java/code/akka/docs/dispatcher/DispatcherDocTestBase.java rename to akka-docs/java/code/docs/dispatcher/DispatcherDocTestBase.java index f080dd52b9..94e4b38121 100644 --- a/akka-docs/java/code/akka/docs/dispatcher/DispatcherDocTestBase.java +++ b/akka-docs/java/code/docs/dispatcher/DispatcherDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.dispatcher; +package docs.dispatcher; //#imports import akka.actor.*; @@ -32,8 +32,8 @@ import static org.junit.Assert.*; import com.typesafe.config.ConfigFactory; -import akka.docs.actor.MyUntypedActor; -import akka.docs.actor.UntypedActorDocTestBase.MyActor; +import docs.actor.MyUntypedActor; +import docs.actor.UntypedActorDocTestBase.MyActor; import akka.testkit.AkkaSpec; public class DispatcherDocTestBase { diff --git a/akka-docs/java/code/akka/docs/event/LoggingDocTest.scala b/akka-docs/java/code/docs/event/LoggingDocTest.scala similarity index 88% rename from akka-docs/java/code/akka/docs/event/LoggingDocTest.scala rename to akka-docs/java/code/docs/event/LoggingDocTest.scala index ee44f502a4..1d7f34827f 100644 --- a/akka-docs/java/code/akka/docs/event/LoggingDocTest.scala +++ b/akka-docs/java/code/docs/event/LoggingDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.event +package docs.event import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/event/LoggingDocTestBase.java b/akka-docs/java/code/docs/event/LoggingDocTestBase.java similarity index 99% rename from akka-docs/java/code/akka/docs/event/LoggingDocTestBase.java rename to akka-docs/java/code/docs/event/LoggingDocTestBase.java index 8f7b63d8a8..77e46b3f92 100644 --- a/akka-docs/java/code/akka/docs/event/LoggingDocTestBase.java +++ b/akka-docs/java/code/docs/event/LoggingDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.event; +package docs.event; //#imports import akka.event.Logging; diff --git a/akka-docs/java/code/akka/docs/extension/ExtensionDocTest.scala b/akka-docs/java/code/docs/extension/ExtensionDocTest.scala similarity index 86% rename from akka-docs/java/code/akka/docs/extension/ExtensionDocTest.scala rename to akka-docs/java/code/docs/extension/ExtensionDocTest.scala index 7b1b43b6ca..f22e300cfc 100644 --- a/akka-docs/java/code/akka/docs/extension/ExtensionDocTest.scala +++ b/akka-docs/java/code/docs/extension/ExtensionDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.extension +package docs.extension import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/extension/ExtensionDocTestBase.java b/akka-docs/java/code/docs/extension/ExtensionDocTestBase.java similarity index 98% rename from akka-docs/java/code/akka/docs/extension/ExtensionDocTestBase.java rename to akka-docs/java/code/docs/extension/ExtensionDocTestBase.java index 11dfe4c198..7623d1cc0a 100644 --- a/akka-docs/java/code/akka/docs/extension/ExtensionDocTestBase.java +++ b/akka-docs/java/code/docs/extension/ExtensionDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.extension; +package docs.extension; //#imports import akka.actor.*; diff --git a/akka-docs/java/code/akka/docs/extension/SettingsExtensionDocTest.scala b/akka-docs/java/code/docs/extension/SettingsExtensionDocTest.scala similarity index 87% rename from akka-docs/java/code/akka/docs/extension/SettingsExtensionDocTest.scala rename to akka-docs/java/code/docs/extension/SettingsExtensionDocTest.scala index 0979c00d4f..60289bfdca 100644 --- a/akka-docs/java/code/akka/docs/extension/SettingsExtensionDocTest.scala +++ b/akka-docs/java/code/docs/extension/SettingsExtensionDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.extension +package docs.extension import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/extension/SettingsExtensionDocTestBase.java b/akka-docs/java/code/docs/extension/SettingsExtensionDocTestBase.java similarity index 98% rename from akka-docs/java/code/akka/docs/extension/SettingsExtensionDocTestBase.java rename to akka-docs/java/code/docs/extension/SettingsExtensionDocTestBase.java index 9aef290ecb..265c91b206 100644 --- a/akka-docs/java/code/akka/docs/extension/SettingsExtensionDocTestBase.java +++ b/akka-docs/java/code/docs/extension/SettingsExtensionDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.extension; +package docs.extension; //#imports import akka.actor.Extension; diff --git a/akka-docs/java/code/akka/docs/future/FutureDocTest.scala b/akka-docs/java/code/docs/future/FutureDocTest.scala similarity index 87% rename from akka-docs/java/code/akka/docs/future/FutureDocTest.scala rename to akka-docs/java/code/docs/future/FutureDocTest.scala index 8716beeced..fef5f3d967 100644 --- a/akka-docs/java/code/akka/docs/future/FutureDocTest.scala +++ b/akka-docs/java/code/docs/future/FutureDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.future +package docs.future import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java b/akka-docs/java/code/docs/future/FutureDocTestBase.java similarity index 99% rename from akka-docs/java/code/akka/docs/future/FutureDocTestBase.java rename to akka-docs/java/code/docs/future/FutureDocTestBase.java index d8e59f5d30..2fe2220223 100644 --- a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java +++ b/akka-docs/java/code/docs/future/FutureDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.future; +package docs.future; //#imports1 import akka.dispatch.*; diff --git a/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTest.scala b/akka-docs/java/code/docs/jrouting/CustomRouterDocTest.scala similarity index 80% rename from akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTest.scala rename to akka-docs/java/code/docs/jrouting/CustomRouterDocTest.scala index 48e323c634..d11b07f22a 100644 --- a/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTest.scala +++ b/akka-docs/java/code/docs/jrouting/CustomRouterDocTest.scala @@ -1,4 +1,4 @@ -package akka.docs.jrouting; +package docs.jrouting; import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java b/akka-docs/java/code/docs/jrouting/CustomRouterDocTestBase.java similarity index 95% rename from akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java rename to akka-docs/java/code/docs/jrouting/CustomRouterDocTestBase.java index dc4d140ec3..74e7759b62 100644 --- a/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java +++ b/akka-docs/java/code/docs/jrouting/CustomRouterDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.jrouting; +package docs.jrouting; import java.util.List; import java.util.Arrays; @@ -22,9 +22,9 @@ import akka.testkit.AkkaSpec; import com.typesafe.config.ConfigFactory; import static akka.pattern.Patterns.ask; -import static akka.docs.jrouting.CustomRouterDocTestBase.DemocratActor; -import static akka.docs.jrouting.CustomRouterDocTestBase.RepublicanActor; -import static akka.docs.jrouting.CustomRouterDocTestBase.Message.*; +import static docs.jrouting.CustomRouterDocTestBase.DemocratActor; +import static docs.jrouting.CustomRouterDocTestBase.RepublicanActor; +import static docs.jrouting.CustomRouterDocTestBase.Message.*; public class CustomRouterDocTestBase { diff --git a/akka-docs/java/code/akka/docs/jrouting/FibonacciActor.java b/akka-docs/java/code/docs/jrouting/FibonacciActor.java similarity index 97% rename from akka-docs/java/code/akka/docs/jrouting/FibonacciActor.java rename to akka-docs/java/code/docs/jrouting/FibonacciActor.java index 8e426cf8fe..e316f27bce 100644 --- a/akka-docs/java/code/akka/docs/jrouting/FibonacciActor.java +++ b/akka-docs/java/code/docs/jrouting/FibonacciActor.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.jrouting; +package docs.jrouting; import java.io.Serializable; diff --git a/akka-docs/java/code/akka/docs/jrouting/ParentActor.java b/akka-docs/java/code/docs/jrouting/ParentActor.java similarity index 98% rename from akka-docs/java/code/akka/docs/jrouting/ParentActor.java rename to akka-docs/java/code/docs/jrouting/ParentActor.java index 32a33b3a1b..ada9e92138 100644 --- a/akka-docs/java/code/akka/docs/jrouting/ParentActor.java +++ b/akka-docs/java/code/docs/jrouting/ParentActor.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.jrouting; +package docs.jrouting; import akka.routing.ScatterGatherFirstCompletedRouter; import akka.routing.BroadcastRouter; diff --git a/akka-docs/java/code/akka/docs/jrouting/PrintlnActor.java b/akka-docs/java/code/docs/jrouting/PrintlnActor.java similarity index 92% rename from akka-docs/java/code/akka/docs/jrouting/PrintlnActor.java rename to akka-docs/java/code/docs/jrouting/PrintlnActor.java index d6ad652ebe..adf56fe863 100644 --- a/akka-docs/java/code/akka/docs/jrouting/PrintlnActor.java +++ b/akka-docs/java/code/docs/jrouting/PrintlnActor.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.jrouting; +package docs.jrouting; import akka.actor.UntypedActor; diff --git a/akka-docs/java/code/akka/docs/jrouting/RouterViaConfigExample.java b/akka-docs/java/code/docs/jrouting/RouterViaConfigExample.java similarity index 98% rename from akka-docs/java/code/akka/docs/jrouting/RouterViaConfigExample.java rename to akka-docs/java/code/docs/jrouting/RouterViaConfigExample.java index 61b9a573d7..1505766196 100644 --- a/akka-docs/java/code/akka/docs/jrouting/RouterViaConfigExample.java +++ b/akka-docs/java/code/docs/jrouting/RouterViaConfigExample.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.jrouting; +package docs.jrouting; import akka.routing.FromConfig; import akka.actor.ActorRef; diff --git a/akka-docs/java/code/akka/docs/jrouting/RouterViaProgramExample.java b/akka-docs/java/code/docs/jrouting/RouterViaProgramExample.java similarity index 99% rename from akka-docs/java/code/akka/docs/jrouting/RouterViaProgramExample.java rename to akka-docs/java/code/docs/jrouting/RouterViaProgramExample.java index 44984c3ec7..ce46307eb7 100644 --- a/akka-docs/java/code/akka/docs/jrouting/RouterViaProgramExample.java +++ b/akka-docs/java/code/docs/jrouting/RouterViaProgramExample.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.jrouting; +package docs.jrouting; import akka.routing.RoundRobinRouter; import akka.routing.DefaultResizer; diff --git a/akka-docs/java/code/akka/docs/remoting/RemoteActorExample.java b/akka-docs/java/code/docs/remoting/RemoteActorExample.java similarity index 95% rename from akka-docs/java/code/akka/docs/remoting/RemoteActorExample.java rename to akka-docs/java/code/docs/remoting/RemoteActorExample.java index f7686e744a..3ca25bd153 100644 --- a/akka-docs/java/code/akka/docs/remoting/RemoteActorExample.java +++ b/akka-docs/java/code/docs/remoting/RemoteActorExample.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.remoting; +package docs.remoting; import akka.actor.ActorRef; import akka.actor.UntypedActor; diff --git a/akka-docs/java/code/akka/docs/remoting/RemoteDeploymentDocTest.scala b/akka-docs/java/code/docs/remoting/RemoteDeploymentDocTest.scala similarity index 87% rename from akka-docs/java/code/akka/docs/remoting/RemoteDeploymentDocTest.scala rename to akka-docs/java/code/docs/remoting/RemoteDeploymentDocTest.scala index 9290b7c897..4ac46c4504 100644 --- a/akka-docs/java/code/akka/docs/remoting/RemoteDeploymentDocTest.scala +++ b/akka-docs/java/code/docs/remoting/RemoteDeploymentDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.remoting +package docs.remoting import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/remoting/RemoteDeploymentDocTestBase.java b/akka-docs/java/code/docs/remoting/RemoteDeploymentDocTestBase.java similarity index 97% rename from akka-docs/java/code/akka/docs/remoting/RemoteDeploymentDocTestBase.java rename to akka-docs/java/code/docs/remoting/RemoteDeploymentDocTestBase.java index b105e2b42a..cfb12ac7c4 100644 --- a/akka-docs/java/code/akka/docs/remoting/RemoteDeploymentDocTestBase.java +++ b/akka-docs/java/code/docs/remoting/RemoteDeploymentDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.remoting; +package docs.remoting; import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/akka-docs/java/code/akka/docs/serialization/SerializationDocTest.scala b/akka-docs/java/code/docs/serialization/SerializationDocTest.scala similarity index 85% rename from akka-docs/java/code/akka/docs/serialization/SerializationDocTest.scala rename to akka-docs/java/code/docs/serialization/SerializationDocTest.scala index 26685dea80..ffac606928 100644 --- a/akka-docs/java/code/akka/docs/serialization/SerializationDocTest.scala +++ b/akka-docs/java/code/docs/serialization/SerializationDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.serialization +package docs.serialization import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/serialization/SerializationDocTestBase.java b/akka-docs/java/code/docs/serialization/SerializationDocTestBase.java similarity index 99% rename from akka-docs/java/code/akka/docs/serialization/SerializationDocTestBase.java rename to akka-docs/java/code/docs/serialization/SerializationDocTestBase.java index aa24c92249..a62827fc98 100644 --- a/akka-docs/java/code/akka/docs/serialization/SerializationDocTestBase.java +++ b/akka-docs/java/code/docs/serialization/SerializationDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.serialization; +package docs.serialization; import org.junit.Test; import static org.junit.Assert.*; diff --git a/akka-docs/java/code/akka/docs/transactor/CoordinatedCounter.java b/akka-docs/java/code/docs/transactor/CoordinatedCounter.java similarity index 97% rename from akka-docs/java/code/akka/docs/transactor/CoordinatedCounter.java rename to akka-docs/java/code/docs/transactor/CoordinatedCounter.java index dd7f119005..4bd679f1eb 100644 --- a/akka-docs/java/code/akka/docs/transactor/CoordinatedCounter.java +++ b/akka-docs/java/code/docs/transactor/CoordinatedCounter.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor; +package docs.transactor; //#class import akka.actor.*; diff --git a/akka-docs/java/code/akka/docs/transactor/Coordinator.java b/akka-docs/java/code/docs/transactor/Coordinator.java similarity index 96% rename from akka-docs/java/code/akka/docs/transactor/Coordinator.java rename to akka-docs/java/code/docs/transactor/Coordinator.java index f1f04761cd..644eb4312e 100644 --- a/akka-docs/java/code/akka/docs/transactor/Coordinator.java +++ b/akka-docs/java/code/docs/transactor/Coordinator.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor; +package docs.transactor; import akka.actor.*; import akka.transactor.*; diff --git a/akka-docs/java/code/akka/docs/transactor/Counter.java b/akka-docs/java/code/docs/transactor/Counter.java similarity index 95% rename from akka-docs/java/code/akka/docs/transactor/Counter.java rename to akka-docs/java/code/docs/transactor/Counter.java index ea2291afeb..06092c5db0 100644 --- a/akka-docs/java/code/akka/docs/transactor/Counter.java +++ b/akka-docs/java/code/docs/transactor/Counter.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor; +package docs.transactor; //#class import akka.transactor.*; diff --git a/akka-docs/java/code/akka/docs/transactor/FriendlyCounter.java b/akka-docs/java/code/docs/transactor/FriendlyCounter.java similarity index 97% rename from akka-docs/java/code/akka/docs/transactor/FriendlyCounter.java rename to akka-docs/java/code/docs/transactor/FriendlyCounter.java index 18f2137ea4..f24c044750 100644 --- a/akka-docs/java/code/akka/docs/transactor/FriendlyCounter.java +++ b/akka-docs/java/code/docs/transactor/FriendlyCounter.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor; +package docs.transactor; //#class import akka.actor.*; diff --git a/akka-docs/java/code/akka/docs/transactor/Increment.java b/akka-docs/java/code/docs/transactor/Increment.java similarity index 93% rename from akka-docs/java/code/akka/docs/transactor/Increment.java rename to akka-docs/java/code/docs/transactor/Increment.java index 1d789c99e2..3794ce631d 100644 --- a/akka-docs/java/code/akka/docs/transactor/Increment.java +++ b/akka-docs/java/code/docs/transactor/Increment.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor; +package docs.transactor; //#class import akka.actor.ActorRef; diff --git a/akka-docs/java/code/akka/docs/transactor/Message.java b/akka-docs/java/code/docs/transactor/Message.java similarity index 77% rename from akka-docs/java/code/akka/docs/transactor/Message.java rename to akka-docs/java/code/docs/transactor/Message.java index 6a8da72070..0f1edfc51f 100644 --- a/akka-docs/java/code/akka/docs/transactor/Message.java +++ b/akka-docs/java/code/docs/transactor/Message.java @@ -2,6 +2,6 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor; +package docs.transactor; public class Message {} diff --git a/akka-docs/java/code/akka/docs/transactor/TransactorDocJavaSpec.scala b/akka-docs/java/code/docs/transactor/TransactorDocJavaSpec.scala similarity index 75% rename from akka-docs/java/code/akka/docs/transactor/TransactorDocJavaSpec.scala rename to akka-docs/java/code/docs/transactor/TransactorDocJavaSpec.scala index 4c61a156e8..6293b2effa 100644 --- a/akka-docs/java/code/akka/docs/transactor/TransactorDocJavaSpec.scala +++ b/akka-docs/java/code/docs/transactor/TransactorDocJavaSpec.scala @@ -2,10 +2,10 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor +package docs.transactor import org.scalatest.junit.JUnitWrapperSuite class TransactorDocJavaSpec extends JUnitWrapperSuite( - "akka.docs.transactor.TransactorDocTest", + "docs.transactor.TransactorDocTest", Thread.currentThread.getContextClassLoader) \ No newline at end of file diff --git a/akka-docs/java/code/akka/docs/transactor/TransactorDocTest.java b/akka-docs/java/code/docs/transactor/TransactorDocTest.java similarity index 99% rename from akka-docs/java/code/akka/docs/transactor/TransactorDocTest.java rename to akka-docs/java/code/docs/transactor/TransactorDocTest.java index bb1d38651b..4eaaa0bb31 100644 --- a/akka-docs/java/code/akka/docs/transactor/TransactorDocTest.java +++ b/akka-docs/java/code/docs/transactor/TransactorDocTest.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor; +package docs.transactor; import static org.junit.Assert.*; import org.junit.Test; diff --git a/akka-docs/java/code/akka/docs/zeromq/ZeromqDocTest.scala b/akka-docs/java/code/docs/zeromq/ZeromqDocTest.scala similarity index 87% rename from akka-docs/java/code/akka/docs/zeromq/ZeromqDocTest.scala rename to akka-docs/java/code/docs/zeromq/ZeromqDocTest.scala index a9747959e3..c5e6f224da 100644 --- a/akka-docs/java/code/akka/docs/zeromq/ZeromqDocTest.scala +++ b/akka-docs/java/code/docs/zeromq/ZeromqDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.zeromq +package docs.zeromq import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/zeromq/ZeromqDocTestBase.java b/akka-docs/java/code/docs/zeromq/ZeromqDocTestBase.java similarity index 99% rename from akka-docs/java/code/akka/docs/zeromq/ZeromqDocTestBase.java rename to akka-docs/java/code/docs/zeromq/ZeromqDocTestBase.java index ee8252a6ad..1a311c9529 100644 --- a/akka-docs/java/code/akka/docs/zeromq/ZeromqDocTestBase.java +++ b/akka-docs/java/code/docs/zeromq/ZeromqDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.zeromq; +package docs.zeromq; //#pub-socket import akka.zeromq.Bind; diff --git a/akka-docs/java/extending-akka.rst b/akka-docs/java/extending-akka.rst index 6e8bfca4cb..38fedf575a 100644 --- a/akka-docs/java/extending-akka.rst +++ b/akka-docs/java/extending-akka.rst @@ -54,7 +54,7 @@ in the "akka.extensions" section of the config you provide to your ``ActorSystem :: akka { - extensions = ["akka.docs.extension.ExtensionDocTestBase.CountExtension"] + extensions = ["docs.extension.ExtensionDocTestBase.CountExtension"] } Applicability diff --git a/akka-docs/java/serialization.rst b/akka-docs/java/serialization.rst index 4c7b023959..b57a6494e5 100644 --- a/akka-docs/java/serialization.rst +++ b/akka-docs/java/serialization.rst @@ -85,7 +85,7 @@ Customization ============= So, lets say that you want to create your own ``Serializer``, -you saw the ``akka.docs.serialization.MyOwnSerializer`` in the config example above? +you saw the ``docs.serialization.MyOwnSerializer`` in the config example above? Creating new Serializers ------------------------ diff --git a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala similarity index 97% rename from akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala rename to akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala index c81f16e896..ac6c58ad08 100644 --- a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala +++ b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor.mailbox +package docs.actor.mailbox //#imports import akka.actor.Props @@ -107,7 +107,7 @@ import akka.actor.mailbox.DurableMailboxSpec object MyMailboxSpec { val config = """ MyStorage-dispatcher { - mailbox-type = akka.docs.actor.mailbox.MyMailboxType + mailbox-type = docs.actor.mailbox.MyMailboxType } """ } diff --git a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTest.scala b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocTest.scala similarity index 85% rename from akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTest.scala rename to akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocTest.scala index eba732e6a7..6b156ef791 100644 --- a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTest.scala +++ b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor.mailbox +package docs.actor.mailbox import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocTestBase.java similarity index 97% rename from akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java rename to akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocTestBase.java index 25158446b6..06e867c786 100644 --- a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java +++ b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor.mailbox; +package docs.actor.mailbox; //#imports import akka.actor.Props; diff --git a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala b/akka-docs/scala/code/docs/actor/ActorDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala rename to akka-docs/scala/code/docs/actor/ActorDocSpec.scala index 8aed17605c..ee05e95d42 100644 --- a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/ActorDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor //#imports1 import akka.actor.Actor diff --git a/akka-docs/scala/code/akka/docs/actor/FSMDocSpec.scala b/akka-docs/scala/code/docs/actor/FSMDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/actor/FSMDocSpec.scala rename to akka-docs/scala/code/docs/actor/FSMDocSpec.scala index 158f8979a0..75b0309a42 100644 --- a/akka-docs/scala/code/akka/docs/actor/FSMDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/FSMDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor //#test-code import akka.testkit.AkkaSpec diff --git a/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala b/akka-docs/scala/code/docs/actor/FaultHandlingDocSample.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala rename to akka-docs/scala/code/docs/actor/FaultHandlingDocSample.scala index d08bcb53b2..79f5a5d084 100644 --- a/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala +++ b/akka-docs/scala/code/docs/actor/FaultHandlingDocSample.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor //#all //#imports diff --git a/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSpec.scala b/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSpec.scala rename to akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala index ca1eccb73a..8ce16f1021 100644 --- a/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor //#testkit import akka.testkit.{ AkkaSpec, ImplicitSender, EventFilter } diff --git a/akka-docs/scala/code/akka/docs/actor/SchedulerDocSpec.scala b/akka-docs/scala/code/docs/actor/SchedulerDocSpec.scala similarity index 98% rename from akka-docs/scala/code/akka/docs/actor/SchedulerDocSpec.scala rename to akka-docs/scala/code/docs/actor/SchedulerDocSpec.scala index b6bffecb46..f711d85129 100644 --- a/akka-docs/scala/code/akka/docs/actor/SchedulerDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/SchedulerDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor //#imports1 import akka.actor.Actor diff --git a/akka-docs/scala/code/akka/docs/actor/TypedActorDocSpec.scala b/akka-docs/scala/code/docs/actor/TypedActorDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/actor/TypedActorDocSpec.scala rename to akka-docs/scala/code/docs/actor/TypedActorDocSpec.scala index f7c5fa9bf7..e2c8db16a4 100644 --- a/akka-docs/scala/code/akka/docs/actor/TypedActorDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/TypedActorDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor //#imports import akka.dispatch.{ Promise, Future, Await } diff --git a/akka-docs/scala/code/akka/docs/actor/UnnestedReceives.scala b/akka-docs/scala/code/docs/actor/UnnestedReceives.scala similarity index 98% rename from akka-docs/scala/code/akka/docs/actor/UnnestedReceives.scala rename to akka-docs/scala/code/docs/actor/UnnestedReceives.scala index 194a958cce..bb77fe4d1d 100644 --- a/akka-docs/scala/code/akka/docs/actor/UnnestedReceives.scala +++ b/akka-docs/scala/code/docs/actor/UnnestedReceives.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor import akka.actor._ import scala.collection.mutable.ListBuffer diff --git a/akka-docs/scala/code/akka/docs/agent/AgentDocSpec.scala b/akka-docs/scala/code/docs/agent/AgentDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/agent/AgentDocSpec.scala rename to akka-docs/scala/code/docs/agent/AgentDocSpec.scala index 1e9ec1fd69..418159f638 100644 --- a/akka-docs/scala/code/akka/docs/agent/AgentDocSpec.scala +++ b/akka-docs/scala/code/docs/agent/AgentDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.agent +package docs.agent import akka.agent.Agent import akka.util.duration._ diff --git a/akka-docs/scala/code/akka/docs/camel/Consumers.scala b/akka-docs/scala/code/docs/camel/Consumers.scala similarity index 96% rename from akka-docs/scala/code/akka/docs/camel/Consumers.scala rename to akka-docs/scala/code/docs/camel/Consumers.scala index 90f181df3f..df7161b9e6 100644 --- a/akka-docs/scala/code/akka/docs/camel/Consumers.scala +++ b/akka-docs/scala/code/docs/camel/Consumers.scala @@ -1,4 +1,4 @@ -package akka.docs.camel +package docs.camel object Consumers { { diff --git a/akka-docs/scala/code/akka/docs/camel/Introduction.scala b/akka-docs/scala/code/docs/camel/Introduction.scala similarity index 98% rename from akka-docs/scala/code/akka/docs/camel/Introduction.scala rename to akka-docs/scala/code/docs/camel/Introduction.scala index 4899843a27..eaf4c400f6 100644 --- a/akka-docs/scala/code/akka/docs/camel/Introduction.scala +++ b/akka-docs/scala/code/docs/camel/Introduction.scala @@ -1,4 +1,4 @@ -package akka.docs.camel +package docs.camel object Introduction { { diff --git a/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala b/akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala similarity index 97% rename from akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala rename to akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala index 1452d72088..3ff8d9c1ea 100644 --- a/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala +++ b/akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.dispatcher +package docs.dispatcher import org.scalatest.{ BeforeAndAfterAll, WordSpec } import org.scalatest.matchers.MustMatchers @@ -91,13 +91,13 @@ object DispatcherDocSpec { //#prio-dispatcher-config prio-dispatcher { - mailbox-type = "akka.docs.dispatcher.DispatcherDocSpec$MyPrioMailbox" + mailbox-type = "docs.dispatcher.DispatcherDocSpec$MyPrioMailbox" } //#prio-dispatcher-config //#prio-dispatcher-config-java prio-dispatcher-java { - mailbox-type = "akka.docs.dispatcher.DispatcherDocTestBase$MyPrioMailbox" + mailbox-type = "docs.dispatcher.DispatcherDocTestBase$MyPrioMailbox" //Other dispatcher configuration goes here } //#prio-dispatcher-config-java diff --git a/akka-docs/scala/code/akka/docs/event/LoggingDocSpec.scala b/akka-docs/scala/code/docs/event/LoggingDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/event/LoggingDocSpec.scala rename to akka-docs/scala/code/docs/event/LoggingDocSpec.scala index 0aa29549c9..7e2fccb876 100644 --- a/akka-docs/scala/code/akka/docs/event/LoggingDocSpec.scala +++ b/akka-docs/scala/code/docs/event/LoggingDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.event +package docs.event import akka.testkit.AkkaSpec import akka.actor.Actor diff --git a/akka-docs/scala/code/akka/docs/extension/ExtensionDocSpec.scala b/akka-docs/scala/code/docs/extension/ExtensionDocSpec.scala similarity index 96% rename from akka-docs/scala/code/akka/docs/extension/ExtensionDocSpec.scala rename to akka-docs/scala/code/docs/extension/ExtensionDocSpec.scala index 05baa28ecb..c2558fb4f1 100644 --- a/akka-docs/scala/code/akka/docs/extension/ExtensionDocSpec.scala +++ b/akka-docs/scala/code/docs/extension/ExtensionDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.extension +package docs.extension import java.util.concurrent.atomic.AtomicLong import akka.actor.Actor @@ -45,7 +45,7 @@ object ExtensionDocSpec { val config = """ //#config akka { - extensions = ["akka.docs.extension.CountExtension$"] + extensions = ["docs.extension.CountExtension$"] } //#config """ diff --git a/akka-docs/scala/code/akka/docs/extension/SettingsExtensionDocSpec.scala b/akka-docs/scala/code/docs/extension/SettingsExtensionDocSpec.scala similarity index 98% rename from akka-docs/scala/code/akka/docs/extension/SettingsExtensionDocSpec.scala rename to akka-docs/scala/code/docs/extension/SettingsExtensionDocSpec.scala index 05765d27a5..a1e033e386 100644 --- a/akka-docs/scala/code/akka/docs/extension/SettingsExtensionDocSpec.scala +++ b/akka-docs/scala/code/docs/extension/SettingsExtensionDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.extension +package docs.extension //#imports import akka.actor.Extension diff --git a/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala b/akka-docs/scala/code/docs/future/FutureDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala rename to akka-docs/scala/code/docs/future/FutureDocSpec.scala index cee2eaeef8..66e80578fd 100644 --- a/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala +++ b/akka-docs/scala/code/docs/future/FutureDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.future +package docs.future import org.scalatest.{ BeforeAndAfterAll, WordSpec } import org.scalatest.matchers.MustMatchers diff --git a/akka-docs/scala/code/akka/docs/io/HTTPServer.scala b/akka-docs/scala/code/docs/io/HTTPServer.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/io/HTTPServer.scala rename to akka-docs/scala/code/docs/io/HTTPServer.scala index 01bb53023b..b6b80aa27f 100644 --- a/akka-docs/scala/code/akka/docs/io/HTTPServer.scala +++ b/akka-docs/scala/code/docs/io/HTTPServer.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.io +package docs.io //#imports import akka.actor._ diff --git a/akka-docs/scala/code/akka/docs/remoting/RemoteDeploymentDocSpec.scala b/akka-docs/scala/code/docs/remoting/RemoteDeploymentDocSpec.scala similarity index 98% rename from akka-docs/scala/code/akka/docs/remoting/RemoteDeploymentDocSpec.scala rename to akka-docs/scala/code/docs/remoting/RemoteDeploymentDocSpec.scala index 0c65b3dc76..b391494a3b 100644 --- a/akka-docs/scala/code/akka/docs/remoting/RemoteDeploymentDocSpec.scala +++ b/akka-docs/scala/code/docs/remoting/RemoteDeploymentDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.remoting +package docs.remoting import akka.actor.{ ExtendedActorSystem, ActorSystem, Actor, ActorRef } import akka.testkit.{ AkkaSpec, ImplicitSender } diff --git a/akka-docs/scala/code/akka/docs/routing/RouterDocSpec.scala b/akka-docs/scala/code/docs/routing/RouterDocSpec.scala similarity index 96% rename from akka-docs/scala/code/akka/docs/routing/RouterDocSpec.scala rename to akka-docs/scala/code/docs/routing/RouterDocSpec.scala index 229c66f13e..c71228d06c 100644 --- a/akka-docs/scala/code/akka/docs/routing/RouterDocSpec.scala +++ b/akka-docs/scala/code/docs/routing/RouterDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.routing +package docs.routing import RouterDocSpec.MyActor import akka.actor.{ Props, Actor } diff --git a/akka-docs/scala/code/akka/docs/routing/RouterTypeExample.scala b/akka-docs/scala/code/docs/routing/RouterTypeExample.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/routing/RouterTypeExample.scala rename to akka-docs/scala/code/docs/routing/RouterTypeExample.scala index 6ec475a874..421c7af3bb 100644 --- a/akka-docs/scala/code/akka/docs/routing/RouterTypeExample.scala +++ b/akka-docs/scala/code/docs/routing/RouterTypeExample.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.routing +package docs.routing import akka.routing.{ ScatterGatherFirstCompletedRouter, BroadcastRouter, RandomRouter, RoundRobinRouter } import annotation.tailrec diff --git a/akka-docs/scala/code/akka/docs/routing/RouterViaConfigExample.scala b/akka-docs/scala/code/docs/routing/RouterViaConfigExample.scala similarity index 98% rename from akka-docs/scala/code/akka/docs/routing/RouterViaConfigExample.scala rename to akka-docs/scala/code/docs/routing/RouterViaConfigExample.scala index cc840eedc5..5d34e429bb 100644 --- a/akka-docs/scala/code/akka/docs/routing/RouterViaConfigExample.scala +++ b/akka-docs/scala/code/docs/routing/RouterViaConfigExample.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.routing +package docs.routing import akka.actor.{ Actor, Props, ActorSystem } import com.typesafe.config.ConfigFactory diff --git a/akka-docs/scala/code/akka/docs/routing/RouterViaProgramExample.scala b/akka-docs/scala/code/docs/routing/RouterViaProgramExample.scala similarity index 98% rename from akka-docs/scala/code/akka/docs/routing/RouterViaProgramExample.scala rename to akka-docs/scala/code/docs/routing/RouterViaProgramExample.scala index 50b141e7b7..195fc20445 100644 --- a/akka-docs/scala/code/akka/docs/routing/RouterViaProgramExample.scala +++ b/akka-docs/scala/code/docs/routing/RouterViaProgramExample.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.routing +package docs.routing import akka.routing.RoundRobinRouter import akka.actor.{ ActorRef, Props, Actor, ActorSystem } diff --git a/akka-docs/scala/code/akka/docs/serialization/SerializationDocSpec.scala b/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala similarity index 94% rename from akka-docs/scala/code/akka/docs/serialization/SerializationDocSpec.scala rename to akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala index e614cc9903..61086b78a6 100644 --- a/akka-docs/scala/code/akka/docs/serialization/SerializationDocSpec.scala +++ b/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.serialization +package docs.serialization import org.scalatest.matchers.MustMatchers import akka.testkit._ @@ -87,7 +87,7 @@ class SerializationDocSpec extends AkkaSpec { serializers { java = "akka.serialization.JavaSerializer" proto = "akka.serialization.ProtobufSerializer" - myown = "akka.docs.serialization.MyOwnSerializer" + myown = "docs.serialization.MyOwnSerializer" } } } @@ -105,14 +105,14 @@ class SerializationDocSpec extends AkkaSpec { serializers { java = "akka.serialization.JavaSerializer" proto = "akka.serialization.ProtobufSerializer" - myown = "akka.docs.serialization.MyOwnSerializer" + myown = "docs.serialization.MyOwnSerializer" } serialization-bindings { "java.lang.String" = java - "akka.docs.serialization.Customer" = java + "docs.serialization.Customer" = java "com.google.protobuf.Message" = proto - "akka.docs.serialization.MyOwnSerializable" = myown + "docs.serialization.MyOwnSerializable" = myown "java.lang.Boolean" = myown } } diff --git a/akka-docs/scala/code/akka/docs/testkit/PlainWordSpec.scala b/akka-docs/scala/code/docs/testkit/PlainWordSpec.scala similarity index 97% rename from akka-docs/scala/code/akka/docs/testkit/PlainWordSpec.scala rename to akka-docs/scala/code/docs/testkit/PlainWordSpec.scala index 8df13da2ca..2da67c9156 100644 --- a/akka-docs/scala/code/akka/docs/testkit/PlainWordSpec.scala +++ b/akka-docs/scala/code/docs/testkit/PlainWordSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.testkit +package docs.testkit //#plain-spec import akka.actor.ActorSystem diff --git a/akka-docs/scala/code/akka/docs/testkit/TestKitUsageSpec.scala b/akka-docs/scala/code/docs/testkit/TestKitUsageSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/testkit/TestKitUsageSpec.scala rename to akka-docs/scala/code/docs/testkit/TestKitUsageSpec.scala index d2b2f9367d..2ca1dbcef8 100644 --- a/akka-docs/scala/code/akka/docs/testkit/TestKitUsageSpec.scala +++ b/akka-docs/scala/code/docs/testkit/TestKitUsageSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.testkit +package docs.testkit //#testkit-usage import scala.util.Random diff --git a/akka-docs/scala/code/akka/docs/testkit/TestkitDocSpec.scala b/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/testkit/TestkitDocSpec.scala rename to akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala index 2b2cb003a9..ddb3eeaf1d 100644 --- a/akka-docs/scala/code/akka/docs/testkit/TestkitDocSpec.scala +++ b/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.testkit +package docs.testkit //#imports-test-probe import akka.testkit.TestProbe diff --git a/akka-docs/scala/code/akka/docs/transactor/TransactorDocSpec.scala b/akka-docs/scala/code/docs/transactor/TransactorDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/transactor/TransactorDocSpec.scala rename to akka-docs/scala/code/docs/transactor/TransactorDocSpec.scala index fa76f54744..c1556b837d 100644 --- a/akka-docs/scala/code/akka/docs/transactor/TransactorDocSpec.scala +++ b/akka-docs/scala/code/docs/transactor/TransactorDocSpec.scala @@ -2,7 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor +package docs.transactor import akka.actor._ import akka.transactor._ diff --git a/akka-docs/scala/code/akka/docs/zeromq/ZeromqDocSpec.scala b/akka-docs/scala/code/docs/zeromq/ZeromqDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/zeromq/ZeromqDocSpec.scala rename to akka-docs/scala/code/docs/zeromq/ZeromqDocSpec.scala index dba4989d87..812e0edaaa 100644 --- a/akka-docs/scala/code/akka/docs/zeromq/ZeromqDocSpec.scala +++ b/akka-docs/scala/code/docs/zeromq/ZeromqDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.zeromq +package docs.zeromq import akka.actor.Actor import akka.actor.Props diff --git a/akka-docs/scala/serialization.rst b/akka-docs/scala/serialization.rst index 2ab0a7b633..15525a29f2 100644 --- a/akka-docs/scala/serialization.rst +++ b/akka-docs/scala/serialization.rst @@ -84,7 +84,7 @@ Customization ============= So, lets say that you want to create your own ``Serializer``, -you saw the ``akka.docs.serialization.MyOwnSerializer`` in the config example above? +you saw the ``docs.serialization.MyOwnSerializer`` in the config example above? Creating new Serializers ------------------------ From 5c48cbb1451764da682cfbc302eb9fe846e460d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 22 May 2012 12:08:49 +0200 Subject: [PATCH 073/106] Added section to remote docs about remote security as well as improved doc in reference config --- akka-docs/java/remoting.rst | 60 ++++++++++++++++++ akka-docs/scala/remoting.rst | 61 +++++++++++++++++++ akka-remote/src/main/resources/reference.conf | 4 +- 3 files changed, 123 insertions(+), 2 deletions(-) diff --git a/akka-docs/java/remoting.rst b/akka-docs/java/remoting.rst index 376eab2584..c4c5edee5f 100644 --- a/akka-docs/java/remoting.rst +++ b/akka-docs/java/remoting.rst @@ -294,3 +294,63 @@ which holds the transport used (RemoteTransport) and optionally the address that To intercept when an inbound remote client has been closed you listen to ``RemoteServerClientClosed`` which holds the transport used (RemoteTransport) and optionally the address of the remote client that was closed (Option
). +Remote Security +^^^^^^^^^^^^^^^ + +Akka provides a couple of ways to enhance security between remote nodes (client/server): + +* Untrusted Mode +* Security Cookie Handshake + +Untrusted Mode +-------------- + +You can enable untrusted mode for preventing system messages to be send by clients, e.g. messages like. +This will prevent the client to send these messages to the server: + +* ``Create`` +* ``Recreate`` +* ``Suspend`` +* ``Resume`` +* ``Terminate`` +* ``Supervise`` +* ``ChildTerminated`` +* ``Link`` +* ``Unlink`` + +Here is how to turn it on in the config:: + + akka { + actor { + remote { + untrusted-mode = on + } + } + } + +Secure Cookie Handshake +----------------------- + +Akka remoting also allows you to specify a secure cookie that will be exchanged and ensured to be identical +in the connection handshake between the client and the server. If they are not identical then the client +will be refused to connect to the server. + +The secure cookie can be any kind of string. But the recommended approach is to generate a cryptographically +secure cookie using this script ``$AKKA_HOME/scripts/generate_config_with_secure_cookie.sh`` or from code +using the ``akka.util.Crypt.generateSecureCookie()`` utility method. + +You have to ensure that both the connecting client and the server have the same secure cookie as well +as the ``require-cookie`` option turned on. + +Here is an example config:: + + akka { + actor { + remote { + netty { + secure-cookie = "090A030E0F0A05010900000A0C0E0C0B03050D05" + require-cookie = on + } + } + } + } diff --git a/akka-docs/scala/remoting.rst b/akka-docs/scala/remoting.rst index 88096d90d1..5b36226b24 100644 --- a/akka-docs/scala/remoting.rst +++ b/akka-docs/scala/remoting.rst @@ -301,3 +301,64 @@ which holds the transport used (RemoteTransport) and optionally the address that To intercept when an inbound remote client has been closed you listen to ``RemoteServerClientClosed`` which holds the transport used (RemoteTransport) and optionally the address of the remote client that was closed (Option[Address]). + +Remote Security +^^^^^^^^^^^^^^^ + +Akka provides a couple of ways to enhance security between remote nodes (client/server): + +* Untrusted Mode +* Security Cookie Handshake + +Untrusted Mode +-------------- + +You can enable untrusted mode for preventing system messages to be send by clients, e.g. messages like. +This will prevent the client to send these messages to the server: + +* ``Create`` +* ``Recreate`` +* ``Suspend`` +* ``Resume`` +* ``Terminate`` +* ``Supervise`` +* ``ChildTerminated`` +* ``Link`` +* ``Unlink`` + +Here is how to turn it on in the config:: + + akka { + actor { + remote { + untrusted-mode = on + } + } + } + +Secure Cookie Handshake +----------------------- + +Akka remoting also allows you to specify a secure cookie that will be exchanged and ensured to be identical +in the connection handshake between the client and the server. If they are not identical then the client +will be refused to connect to the server. + +The secure cookie can be any kind of string. But the recommended approach is to generate a cryptographically +secure cookie using this script ``$AKKA_HOME/scripts/generate_config_with_secure_cookie.sh`` or from code +using the ``akka.util.Crypt.generateSecureCookie()`` utility method. + +You have to ensure that both the connecting client and the server have the same secure cookie as well +as the ``require-cookie`` option turned on. + +Here is an example config:: + + akka { + actor { + remote { + netty { + secure-cookie = "090A030E0F0A05010900000A0C0E0C0B03050D05" + require-cookie = on + } + } + } + } diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 11a4da0711..97b85895ed 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -60,8 +60,8 @@ akka { # default is a TCP-based remote transport based on Netty transport = "akka.remote.netty.NettyRemoteTransport" - # Enable untrusted mode for full security of server managed actors, allows - # untrusted clients to connect. + # Enable untrusted mode for full security of server managed actors, prevents system messages to be send + # by clients, e.g. messages like 'Create', 'Suspend', 'Resume', 'Terminate', 'Supervise', 'Link' etc. untrusted-mode = off # Timeout for ACK of cluster operations, like checking actor out etc. From 916c2d4d11f4701dbf0cf11f94b5dd5f20180cdf Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 22 May 2012 12:14:33 +0200 Subject: [PATCH 074/106] Switching to checking InstantiationException for both create and recreate --- akka-actor/src/main/scala/akka/actor/Actor.scala | 2 +- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index b611d96842..3d93e52a54 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -118,7 +118,7 @@ class ActorKilledException private[akka] (message: String, cause: Throwable) * An InvalidActorNameException is thrown when you try to convert something, usually a String, to an Actor name * which doesn't validate. */ -case class InvalidActorNameException(message: String) extends AkkaException(message) +class InvalidActorNameException(message: String) extends AkkaException(message) /** * An ActorInitializationException is thrown when the the initialization logic for an Actor fails. diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 3380d51de0..3b2c743a6b 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -498,7 +498,7 @@ private[akka] class ActorCell( import ActorCell.behaviorStackPlaceHolder behaviorStack = behaviorStackPlaceHolder - val instance = props.creator() + val instance = props.creator.apply() if (instance eq null) throw new ActorInitializationException(self, "Actor instance passed to actorOf can't be 'null'") @@ -532,8 +532,7 @@ private[akka] class ActorCell( a non-static inner class (in which case make it a static inner class or use Props(new ...) or Props( new UntypedActorFactory ... ) or is missing an appropriate, reachable no-args constructor. """, i.getCause) - case NonFatal(e) ⇒ - throw new ActorInitializationException(self, "exception during creation", e) + case NonFatal(e) ⇒ throw new ActorInitializationException(self, "exception during creation", e) } } @@ -557,7 +556,10 @@ private[akka] class ActorCell( doRecreate(cause, failedActor) } } catch { - case NonFatal(e) ⇒ throw new ActorInitializationException(self, "exception during creation", e) + case NonFatal(e) ⇒ throw new ActorInitializationException(self, "exception during creation", e match { + case i: InstantiationException => i.getCause + case other => other + }) } } From 9ac11a643201a27eb65c4963da794e3a042ed326 Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 22 May 2012 13:07:05 +0200 Subject: [PATCH 075/106] improve documentation of explicitly given routees --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 4 ++-- .../java/code/docs/jrouting/RouterViaProgramExample.java | 2 +- akka-docs/java/routing.rst | 7 ++++++- .../scala/code/docs/routing/RouterViaProgramExample.scala | 2 +- akka-docs/scala/routing.rst | 7 ++++++- 5 files changed, 16 insertions(+), 6 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 3b2c743a6b..0955595640 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -557,8 +557,8 @@ private[akka] class ActorCell( } } catch { case NonFatal(e) ⇒ throw new ActorInitializationException(self, "exception during creation", e match { - case i: InstantiationException => i.getCause - case other => other + case i: InstantiationException ⇒ i.getCause + case other ⇒ other }) } } diff --git a/akka-docs/java/code/docs/jrouting/RouterViaProgramExample.java b/akka-docs/java/code/docs/jrouting/RouterViaProgramExample.java index ce46307eb7..72843b44c6 100644 --- a/akka-docs/java/code/docs/jrouting/RouterViaProgramExample.java +++ b/akka-docs/java/code/docs/jrouting/RouterViaProgramExample.java @@ -55,7 +55,7 @@ public class RouterViaProgramExample { ActorRef actor2 = system.actorOf(new Props(ExampleActor.class)); ActorRef actor3 = system.actorOf(new Props(ExampleActor.class)); Iterable routees = Arrays.asList(new ActorRef[] { actor1, actor2, actor3 }); - ActorRef router2 = system.actorOf(new Props(ExampleActor.class).withRouter(RoundRobinRouter.create(routees))); + ActorRef router2 = system.actorOf(new Props().withRouter(RoundRobinRouter.create(routees))); //#programmaticRoutingRoutees for (int i = 1; i <= 6; i++) { router2.tell(new ExampleActor.Message(i)); diff --git a/akka-docs/java/routing.rst b/akka-docs/java/routing.rst index 4d01642a72..9bd770f9f6 100644 --- a/akka-docs/java/routing.rst +++ b/akka-docs/java/routing.rst @@ -33,6 +33,11 @@ You can also give the router already created routees as in: .. includecode:: code/akka/docs/jrouting/RouterViaProgramExample.java#programmaticRoutingRoutees +It should be noted that no actor factory or class needs to be provided in this +case, as the ``Router`` will not create any children on its own (which is not +true anymore when using a resizer). The routees can also be specified by giving +their path strings. + When you create a router programmatically you define the number of routees *or* you pass already created routees to it. If you send both parameters to the router *only* the latter will be used, i.e. ``nrOfInstances`` is disregarded. @@ -48,7 +53,7 @@ Once you have the router actor it is just to send messages to it as you would to router.tell(new MyMsg()); -The router will apply its behavior to the message it receives and forward it to the routees. +The router will forward the message to its routees according to its routing policy. Remotely Deploying Routees ************************** diff --git a/akka-docs/scala/code/docs/routing/RouterViaProgramExample.scala b/akka-docs/scala/code/docs/routing/RouterViaProgramExample.scala index 195fc20445..79219b742b 100644 --- a/akka-docs/scala/code/docs/routing/RouterViaProgramExample.scala +++ b/akka-docs/scala/code/docs/routing/RouterViaProgramExample.scala @@ -29,7 +29,7 @@ object RoutingProgrammaticallyExample extends App { val actor2 = system.actorOf(Props[ExampleActor1]) val actor3 = system.actorOf(Props[ExampleActor1]) val routees = Vector[ActorRef](actor1, actor2, actor3) - val router2 = system.actorOf(Props[ExampleActor1].withRouter( + val router2 = system.actorOf(Props().withRouter( RoundRobinRouter(routees = routees))) //#programmaticRoutingRoutees 1 to 6 foreach { i ⇒ router2 ! Message1(i) } diff --git a/akka-docs/scala/routing.rst b/akka-docs/scala/routing.rst index 737c9e31e7..a66e7f890d 100644 --- a/akka-docs/scala/routing.rst +++ b/akka-docs/scala/routing.rst @@ -33,6 +33,11 @@ You can also give the router already created routees as in: .. includecode:: code/akka/docs/routing/RouterViaProgramExample.scala#programmaticRoutingRoutees +It should be noted that no actor factory or class needs to be provided in this +case, as the ``Router`` will not create any children on its own (which is not +true anymore when using a resizer). The routees can also be specified by giving +their path strings. + When you create a router programmatically you define the number of routees *or* you pass already created routees to it. If you send both parameters to the router *only* the latter will be used, i.e. ``nrOfInstances`` is disregarded. @@ -48,7 +53,7 @@ Once you have the router actor it is just to send messages to it as you would to router ! MyMsg -The router will apply its behavior to the message it receives and forward it to the routees. +The router will forward the message to its routees according to its routing policy. Remotely Deploying Routees ************************** From 4dc4cdde26538a43a16e90deaffc368b7d7de403 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 22 May 2012 13:49:04 +0200 Subject: [PATCH 076/106] Arrow-formatting ;-) --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 3b2c743a6b..0955595640 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -557,8 +557,8 @@ private[akka] class ActorCell( } } catch { case NonFatal(e) ⇒ throw new ActorInitializationException(self, "exception during creation", e match { - case i: InstantiationException => i.getCause - case other => other + case i: InstantiationException ⇒ i.getCause + case other ⇒ other }) } } From f92f7431dd0f53532e5bd99389aedf5315024331 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 22 May 2012 14:57:27 +0200 Subject: [PATCH 077/106] Adding references to Roman Levensteins akka-protostuff and akka-quickser in the serialization docs --- akka-docs/java/serialization.rst | 7 +++++++ akka-docs/scala/serialization.rst | 8 ++++++++ 2 files changed, 15 insertions(+) diff --git a/akka-docs/java/serialization.rst b/akka-docs/java/serialization.rst index 4c7b023959..3e8c8162e7 100644 --- a/akka-docs/java/serialization.rst +++ b/akka-docs/java/serialization.rst @@ -137,3 +137,10 @@ representation into a real reference. :class:`DynamicVariable` is a thread-local variable, so be sure to have it set while deserializing anything which might contain actor references. +External Akka Serializers +========================= + +`Akka-protostuff by Roman Levenstein`_ + + +`Akka-quickser by Roman Levenstein`_ diff --git a/akka-docs/scala/serialization.rst b/akka-docs/scala/serialization.rst index 2ab0a7b633..296afa7068 100644 --- a/akka-docs/scala/serialization.rst +++ b/akka-docs/scala/serialization.rst @@ -135,3 +135,11 @@ representation into a real reference. :class:`DynamicVariable` is a thread-local variable, so be sure to have it set while deserializing anything which might contain actor references. + +External Akka Serializers +========================= + +`Akka-protostuff by Roman Levenstein`_ + + +`Akka-quickser by Roman Levenstein`_ From 508d8f70a5a24e02e5462f58f747e109fef2daf7 Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 22 May 2012 15:19:45 +0200 Subject: [PATCH 078/106] incorporate review comments into TestConductor work - protect all internal API using private[akka] and ScalaDoc - remove package object which was after a previous refactoring only used from a single place anyway - document all public API methods, add brief description how failure injector works - include remoteTests in the top-level aggregate project --- .../akka/remote/testconductor/Conductor.scala | 42 ++++++++--- .../akka/remote/testconductor/DataTypes.scala | 73 +++++++++++++------ .../akka/remote/testconductor/Extension.scala | 18 ++++- .../NetworkFailureInjector.scala | 24 +++++- .../akka/remote/testconductor/Player.scala | 13 +++- .../testconductor/RemoteConnection.scala | 25 +++++-- .../TestConductorTransport.scala | 5 +- .../akka/remote/testconductor/package.scala | 31 -------- .../remote/netty/NettyRemoteSupport.scala | 37 +++++++++- project/AkkaBuild.scala | 2 +- 10 files changed, 188 insertions(+), 82 deletions(-) delete mode 100644 akka-remote-tests/src/main/scala/akka/remote/testconductor/package.scala diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index 6c26fcaae2..1ec172e9ce 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -50,7 +50,7 @@ trait Conductor { this: TestConductorExt ⇒ private var _controller: ActorRef = _ private def controller: ActorRef = _controller match { - case null ⇒ throw new RuntimeException("TestConductorServer was not started") + case null ⇒ throw new IllegalStateException("TestConductorServer was not started") case x ⇒ x } @@ -169,10 +169,11 @@ trait Conductor { this: TestConductorExt ⇒ * * @param node is the symbolic name of the node which is to be affected */ - def kill(node: RoleName): Future[Done] = { - import Settings.QueryTimeout - controller ? Terminate(node, -1) mapTo - } + // TODO: uncomment (and implement in Controller) if really needed + // def kill(node: RoleName): Future[Done] = { + // import Settings.QueryTimeout + // controller ? Terminate(node, -1) mapTo + // } /** * Obtain the list of remote host names currently registered. @@ -201,8 +202,10 @@ trait Conductor { this: TestConductorExt ⇒ * This handler is installed at the end of the controller’s netty pipeline. Its only * purpose is to dispatch incoming messages to the right ServerFSM actor. There is * one shared instance of this class for all connections accepted by one Controller. + * + * INTERNAL API. */ -class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { +private[akka] class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { val clients = new ConcurrentHashMap[Channel, ActorRef]() @@ -235,7 +238,10 @@ class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAd } -object ServerFSM { +/** + * INTERNAL API. + */ +private[akka] object ServerFSM { sealed trait State case object Initial extends State case object Ready extends State @@ -253,8 +259,10 @@ object ServerFSM { * [[akka.remote.testconductor.Done]] message, and there can be only one such * request outstanding at a given time (i.e. a Send fails if the previous has * not yet been acknowledged). + * + * INTERNAL API. */ -class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor with LoggingFSM[ServerFSM.State, Option[ActorRef]] { +private[akka] class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor with LoggingFSM[ServerFSM.State, Option[ActorRef]] { import ServerFSM._ import akka.actor.FSM._ import Controller._ @@ -317,7 +325,10 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi } } -object Controller { +/** + * INTERNAL API. + */ +private[akka] object Controller { case class ClientDisconnected(name: RoleName) case object GetNodes case object GetSockAddr @@ -329,8 +340,10 @@ object Controller { * This controls test execution by managing barriers (delegated to * [[akka.remote.testconductor.BarrierCoordinator]], its child) and allowing * network and other failures to be injected at the test nodes. + * + * INTERNAL API. */ -class Controller(private var initialParticipants: Int, controllerPort: InetSocketAddress) extends Actor { +private[akka] class Controller(private var initialParticipants: Int, controllerPort: InetSocketAddress) extends Actor { import Controller._ import BarrierCoordinator._ @@ -418,7 +431,10 @@ class Controller(private var initialParticipants: Int, controllerPort: InetSocke } } -object BarrierCoordinator { +/** + * INTERNAL API. + */ +private[akka] object BarrierCoordinator { sealed trait State case object Idle extends State case object Waiting extends State @@ -447,8 +463,10 @@ object BarrierCoordinator { * EnterBarrier return message. In case of planned removals, this may just happen * earlier, in case of failures the current barrier (and all subsequent ones) will * be failed by sending BarrierFailed responses. + * + * INTERNAL API. */ -class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, BarrierCoordinator.Data] { +private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, BarrierCoordinator.Data] { import BarrierCoordinator._ import akka.actor.FSM._ import Controller._ diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala index 2bb7d50c37..022ae2d89b 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -13,44 +13,59 @@ import org.jboss.netty.handler.codec.oneone.OneToOneDecoder case class RoleName(name: String) -case class ToClient(msg: ClientOp with NetworkOp) -case class ToServer(msg: ServerOp with NetworkOp) +private[akka] case class ToClient(msg: ClientOp with NetworkOp) +private[akka] case class ToServer(msg: ServerOp with NetworkOp) -sealed trait ClientOp // messages sent to from Conductor to Player -sealed trait ServerOp // messages sent to from Player to Conductor -sealed trait CommandOp // messages sent from TestConductorExt to Conductor -sealed trait NetworkOp // messages sent over the wire -sealed trait UnconfirmedClientOp extends ClientOp // unconfirmed messages going to the Player -sealed trait ConfirmedClientOp extends ClientOp +private[akka] sealed trait ClientOp // messages sent to from Conductor to Player +private[akka] sealed trait ServerOp // messages sent to from Player to Conductor +private[akka] sealed trait CommandOp // messages sent from TestConductorExt to Conductor +private[akka] sealed trait NetworkOp // messages sent over the wire +private[akka] sealed trait UnconfirmedClientOp extends ClientOp // unconfirmed messages going to the Player +private[akka] sealed trait ConfirmedClientOp extends ClientOp /** * First message of connection sets names straight. */ -case class Hello(name: String, addr: Address) extends NetworkOp +private[akka] case class Hello(name: String, addr: Address) extends NetworkOp -case class EnterBarrier(name: String) extends ServerOp with NetworkOp -case class BarrierResult(name: String, success: Boolean) extends UnconfirmedClientOp with NetworkOp +private[akka] case class EnterBarrier(name: String) extends ServerOp with NetworkOp +private[akka] case class BarrierResult(name: String, success: Boolean) extends UnconfirmedClientOp with NetworkOp -case class Throttle(node: RoleName, target: RoleName, direction: Direction, rateMBit: Float) extends CommandOp -case class ThrottleMsg(target: Address, direction: Direction, rateMBit: Float) extends ConfirmedClientOp with NetworkOp +private[akka] case class Throttle(node: RoleName, target: RoleName, direction: Direction, rateMBit: Float) extends CommandOp +private[akka] case class ThrottleMsg(target: Address, direction: Direction, rateMBit: Float) extends ConfirmedClientOp with NetworkOp -case class Disconnect(node: RoleName, target: RoleName, abort: Boolean) extends CommandOp -case class DisconnectMsg(target: Address, abort: Boolean) extends ConfirmedClientOp with NetworkOp +private[akka] case class Disconnect(node: RoleName, target: RoleName, abort: Boolean) extends CommandOp +private[akka] case class DisconnectMsg(target: Address, abort: Boolean) extends ConfirmedClientOp with NetworkOp -case class Terminate(node: RoleName, exitValueOrKill: Int) extends CommandOp -case class TerminateMsg(exitValue: Int) extends ConfirmedClientOp with NetworkOp +private[akka] case class Terminate(node: RoleName, exitValueOrKill: Int) extends CommandOp +private[akka] case class TerminateMsg(exitValue: Int) extends ConfirmedClientOp with NetworkOp -case class GetAddress(node: RoleName) extends ServerOp with NetworkOp -case class AddressReply(node: RoleName, addr: Address) extends UnconfirmedClientOp with NetworkOp +private[akka] case class GetAddress(node: RoleName) extends ServerOp with NetworkOp +private[akka] case class AddressReply(node: RoleName, addr: Address) extends UnconfirmedClientOp with NetworkOp -abstract class Done extends ServerOp with UnconfirmedClientOp with NetworkOp -case object Done extends Done { +private[akka] abstract class Done extends ServerOp with UnconfirmedClientOp with NetworkOp +private[akka] case object Done extends Done { def getInstance: Done = this } -case class Remove(node: RoleName) extends CommandOp +private[akka] case class Remove(node: RoleName) extends CommandOp + +private[akka] class MsgEncoder extends OneToOneEncoder { + + implicit def address2proto(addr: Address): TCP.Address = + TCP.Address.newBuilder + .setProtocol(addr.protocol) + .setSystem(addr.system) + .setHost(addr.host.get) + .setPort(addr.port.get) + .build + + implicit def direction2proto(dir: Direction): TCP.Direction = dir match { + case Direction.Send ⇒ TCP.Direction.Send + case Direction.Receive ⇒ TCP.Direction.Receive + case Direction.Both ⇒ TCP.Direction.Both + } -class MsgEncoder extends OneToOneEncoder { def encode(ctx: ChannelHandlerContext, ch: Channel, msg: AnyRef): AnyRef = msg match { case x: NetworkOp ⇒ val w = TCP.Wrapper.newBuilder @@ -81,7 +96,17 @@ class MsgEncoder extends OneToOneEncoder { } } -class MsgDecoder extends OneToOneDecoder { +private[akka] class MsgDecoder extends OneToOneDecoder { + + implicit def address2scala(addr: TCP.Address): Address = + Address(addr.getProtocol, addr.getSystem, addr.getHost, addr.getPort) + + implicit def direction2scala(dir: TCP.Direction): Direction = dir match { + case TCP.Direction.Send ⇒ Direction.Send + case TCP.Direction.Receive ⇒ Direction.Receive + case TCP.Direction.Both ⇒ Direction.Both + } + def decode(ctx: ChannelHandlerContext, ch: Channel, msg: AnyRef): AnyRef = msg match { case w: TCP.Wrapper if w.getAllFields.size == 1 ⇒ if (w.hasHello) { diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala index 7f6b576128..6800253ae0 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala @@ -32,6 +32,9 @@ object TestConductor extends ExtensionKey[TestConductorExt] { * [[akka.remote.testconductor.Player]] roles inside an Akka * [[akka.actor.Extension]]. Please follow the aforementioned links for * more information. + * + * This extension requires the `akka.actor.provider` + * to be a [[akka.remote.RemoteActorRefProvider]]. */ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with Conductor with Player { @@ -47,9 +50,22 @@ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with C val PacketSplitThreshold = Duration(config.getMilliseconds("akka.testconductor.packet-split-threshold"), MILLISECONDS) } + /** + * Remote transport used by the actor ref provider. + */ val transport = system.provider.asInstanceOf[RemoteActorRefProvider].transport + + /** + * Transport address of this Netty-like remote transport. + */ val address = transport.address - val failureInjectors = new ConcurrentHashMap[Address, FailureInjector] + /** + * INTERNAL API. + * + * [[akka.remote.testconductor.FailureInjector]]s register themselves here so that + * failures can be injected. + */ + private[akka] val failureInjectors = new ConcurrentHashMap[Address, FailureInjector] } \ No newline at end of file diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index b853523979..629a15d51f 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -31,7 +31,10 @@ import org.jboss.netty.channel.ChannelFuture import org.jboss.netty.channel.ChannelFutureListener import org.jboss.netty.channel.ChannelFuture -case class FailureInjector(sender: ActorRef, receiver: ActorRef) { +/** + * INTERNAL API. + */ +private[akka] case class FailureInjector(sender: ActorRef, receiver: ActorRef) { def refs(dir: Direction) = dir match { case Direction.Send ⇒ Seq(sender) case Direction.Receive ⇒ Seq(receiver) @@ -39,12 +42,27 @@ case class FailureInjector(sender: ActorRef, receiver: ActorRef) { } } -object NetworkFailureInjector { +/** + * INTERNAL API. + */ +private[akka] object NetworkFailureInjector { case class SetRate(rateMBit: Float) case class Disconnect(abort: Boolean) } -class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { +/** + * Brief overview: all network traffic passes through the `sender`/`receiver` FSMs, which can + * pass through requests immediately, drop them or throttle to a desired rate. The FSMs are + * registered in the TestConductorExt.failureInjectors so that settings can be applied from + * the ClientFSMs. + * + * I found that simply forwarding events using ctx.sendUpstream/sendDownstream does not work, + * it deadlocks and gives strange errors; in the end I just trusted the Netty docs which + * recommend to prefer `Channels.write()` and `Channels.fireMessageReceived()`. + * + * INTERNAL API. + */ +private[akka] class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { val log = Logging(system, "FailureInjector") diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala index 10434007e1..2a4eeb6ad1 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala @@ -94,7 +94,10 @@ trait Player { this: TestConductorExt ⇒ } } -object ClientFSM { +/** + * INTERNAL API. + */ +private[akka] object ClientFSM { sealed trait State case object Connecting extends State case object AwaitDone extends State @@ -116,8 +119,10 @@ object ClientFSM { * done the same. After that, it will pass barrier requests to and from the * coordinator and react to the [[akka.remote.testconductor.Conductor]]’s * requests for failure injection. + * + * INTERNAL API. */ -class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { +private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { import ClientFSM._ val settings = TestConductor().Settings @@ -236,8 +241,10 @@ class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) extends Actor /** * This handler only forwards messages received from the conductor to the [[akka.remote.testconductor.ClientFSM]]. + * + * INTERNAL API. */ -class PlayerHandler( +private[akka] class PlayerHandler( server: InetSocketAddress, private var reconnects: Int, backoff: Duration, diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala index 5b1c454b0c..5aeb484c42 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala @@ -13,7 +13,10 @@ import org.jboss.netty.handler.timeout.{ ReadTimeoutHandler, ReadTimeoutExceptio import java.net.InetSocketAddress import java.util.concurrent.Executors -class TestConductorPipelineFactory(handler: ChannelUpstreamHandler) extends ChannelPipelineFactory { +/** + * INTERNAL API. + */ +private[akka] class TestConductorPipelineFactory(handler: ChannelUpstreamHandler) extends ChannelPipelineFactory { def getPipeline: ChannelPipeline = { val encap = List(new LengthFieldPrepender(4), new LengthFieldBasedFrameDecoder(10000, 0, 4, 0, 4)) val proto = List(new ProtobufEncoder, new ProtobufDecoder(TestConductorProtocol.Wrapper.getDefaultInstance)) @@ -22,11 +25,23 @@ class TestConductorPipelineFactory(handler: ChannelUpstreamHandler) extends Chan } } -sealed trait Role -case object Client extends Role -case object Server extends Role +/** + * INTERNAL API. + */ +private[akka] sealed trait Role +/** + * INTERNAL API. + */ +private[akka] case object Client extends Role +/** + * INTERNAL API. + */ +private[akka] case object Server extends Role -object RemoteConnection { +/** + * INTERNAL API. + */ +private[akka] object RemoteConnection { def apply(role: Role, sockaddr: InetSocketAddress, handler: ChannelUpstreamHandler): Channel = { role match { case Client ⇒ diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala index 2c51c2cf18..a036bcfff0 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala @@ -10,7 +10,10 @@ import akka.remote.RemoteActorRefProvider import org.jboss.netty.channel.ChannelHandler import org.jboss.netty.channel.ChannelPipelineFactory -class TestConductorTransport(_remoteSettings: RemoteSettings, _system: ActorSystemImpl, _provider: RemoteActorRefProvider) +/** + * INTERNAL API. + */ +private[akka] class TestConductorTransport(_remoteSettings: RemoteSettings, _system: ActorSystemImpl, _provider: RemoteActorRefProvider) extends NettyRemoteTransport(_remoteSettings, _system, _provider) { override def createPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean): ChannelPipelineFactory = diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/package.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/package.scala deleted file mode 100644 index b24279dbf6..0000000000 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/package.scala +++ /dev/null @@ -1,31 +0,0 @@ -package akka.remote - -import akka.actor.Address -import testconductor.{ TestConductorProtocol ⇒ TCP } - -package object testconductor { - - implicit def address2proto(addr: Address): TCP.Address = - TCP.Address.newBuilder - .setProtocol(addr.protocol) - .setSystem(addr.system) - .setHost(addr.host.get) - .setPort(addr.port.get) - .build - - implicit def address2scala(addr: TCP.Address): Address = - Address(addr.getProtocol, addr.getSystem, addr.getHost, addr.getPort) - - implicit def direction2proto(dir: Direction): TCP.Direction = dir match { - case Direction.Send ⇒ TCP.Direction.Send - case Direction.Receive ⇒ TCP.Direction.Receive - case Direction.Both ⇒ TCP.Direction.Both - } - - implicit def direction2scala(dir: TCP.Direction): Direction = dir match { - case TCP.Direction.Send ⇒ Direction.Send - case TCP.Direction.Receive ⇒ Direction.Receive - case TCP.Direction.Both ⇒ Direction.Both - } - -} \ No newline at end of file diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 60c2ac6097..f0b6cd1870 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -46,13 +46,31 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor Executors.newCachedThreadPool(system.threadFactory), Executors.newCachedThreadPool(system.threadFactory)) + /** + * Backing scaffolding for the default implementation of NettyRemoteSupport.createPipeline. + */ object PipelineFactory { + /** + * Construct a StaticChannelPipeline from a sequence of handlers; to be used + * in implementations of ChannelPipelineFactory. + */ def apply(handlers: Seq[ChannelHandler]): StaticChannelPipeline = new StaticChannelPipeline(handlers: _*) + + /** + * Constructs the NettyRemoteTransport default pipeline with the give “head” handler, which + * is taken by-name to allow it not to be shared across pipelines. + * + * @param withTimeout determines whether an IdleStateHandler shall be included + */ def apply(endpoint: ⇒ Seq[ChannelHandler], withTimeout: Boolean): ChannelPipelineFactory = new ChannelPipelineFactory { def getPipeline = apply(defaultStack(withTimeout) ++ endpoint) } + /** + * Construct a default protocol stack, excluding the “head” handler (i.e. the one which + * actually dispatches the received messages to the local target actors). + */ def defaultStack(withTimeout: Boolean): Seq[ChannelHandler] = (if (withTimeout) timeout :: Nil else Nil) ::: msgFormat ::: @@ -60,17 +78,28 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor executionHandler :: Nil + /** + * Construct an IdleStateHandler which uses [[akka.remote.netty.NettyRemoteTransport]].timer. + */ def timeout = new IdleStateHandler(timer, settings.ReadTimeout.toSeconds.toInt, settings.WriteTimeout.toSeconds.toInt, settings.AllTimeout.toSeconds.toInt) + /** + * Construct frame&protobuf encoder/decoder. + */ def msgFormat = new LengthFieldBasedFrameDecoder(settings.MessageFrameSize, 0, 4, 0, 4) :: new LengthFieldPrepender(4) :: new RemoteMessageDecoder :: new RemoteMessageEncoder(NettyRemoteTransport.this) :: Nil + /** + * Construct an ExecutionHandler which is used to ensure that message dispatch does not + * happen on a netty thread (that could be bad if re-sending over the network for + * remote-deployed actors). + */ val executionHandler = new ExecutionHandler(new OrderedMemoryAwareThreadPoolExecutor( settings.ExecutionPoolSize, settings.MaxChannelMemorySize, @@ -79,6 +108,11 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor settings.ExecutionPoolKeepalive.unit, system.threadFactory)) + /** + * Construct and authentication handler which uses the SecureCookie to somewhat + * protect the TCP port from unauthorized use (don’t rely on it too much, though, + * as this is NOT a cryptographic feature). + */ def authenticator = if (settings.RequireCookie) new RemoteServerAuthenticationHandler(settings.SecureCookie) :: Nil else Nil } @@ -98,7 +132,8 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor /** * Override this method to inject a subclass of NettyRemoteServer instead of - * the normal one, e.g. for inserting security hooks. + * the normal one, e.g. for inserting security hooks. If this method throws + * an exception, the transport will shut itself down and re-throw. */ protected def createServer(): NettyRemoteServer = new NettyRemoteServer(this) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index b899bdec45..26eab59037 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -32,7 +32,7 @@ object AkkaBuild extends Build { Unidoc.unidocExclude := Seq(samples.id, tutorials.id), Dist.distExclude := Seq(actorTests.id, akkaSbtPlugin.id, docs.id) ), - aggregate = Seq(actor, testkit, actorTests, remote, camel, cluster, slf4j, agent, transactor, mailboxes, zeroMQ, kernel, akkaSbtPlugin, actorMigration, samples, tutorials, docs) + aggregate = Seq(actor, testkit, actorTests, remote, remoteTests, camel, cluster, slf4j, agent, transactor, mailboxes, zeroMQ, kernel, akkaSbtPlugin, actorMigration, samples, tutorials, docs) ) lazy val actor = Project( From 5a8f79b619f5a5f43dfa2929211f11de75093130 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 22 May 2012 15:27:19 +0200 Subject: [PATCH 079/106] Preparing Agent for binary compat --- .../src/main/scala/akka/agent/Agent.scala | 30 +++++++++++-------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/akka-agent/src/main/scala/akka/agent/Agent.scala b/akka-agent/src/main/scala/akka/agent/Agent.scala index af551d00c8..64834178a8 100644 --- a/akka-agent/src/main/scala/akka/agent/Agent.scala +++ b/akka-agent/src/main/scala/akka/agent/Agent.scala @@ -96,18 +96,18 @@ object Agent { * }}} */ class Agent[T](initialValue: T, system: ActorSystem) { - private[akka] val ref = Ref(initialValue) - private[akka] val updater = system.actorOf(Props(new AgentUpdater(this))).asInstanceOf[LocalActorRef] //TODO can we avoid this somehow? + private val ref = Ref(initialValue) + private val updater = system.actorOf(Props(new AgentUpdater(this, ref))).asInstanceOf[LocalActorRef] //TODO can we avoid this somehow? /** * Read the internal state of the agent. */ - def get() = ref.single.get + def get(): T = ref.single.get /** * Read the internal state of the agent. */ - def apply() = get + def apply(): T = get /** * Dispatch a function to update the internal state. @@ -154,7 +154,7 @@ class Agent[T](initialValue: T, system: ActorSystem) { def sendOff(f: T ⇒ T): Unit = { send((value: T) ⇒ { suspend() - val threadBased = system.actorOf(Props(new ThreadBasedAgentUpdater(this)).withDispatcher("akka.agent.send-off-dispatcher")) + val threadBased = system.actorOf(Props(new ThreadBasedAgentUpdater(this, ref)).withDispatcher("akka.agent.send-off-dispatcher")) threadBased ! Update(f) value }) @@ -171,7 +171,7 @@ class Agent[T](initialValue: T, system: ActorSystem) { val result = Promise[T]()(system.dispatcher) send((value: T) ⇒ { suspend() - val threadBased = system.actorOf(Props(new ThreadBasedAgentUpdater(this)).withDispatcher("akka.agent.alter-off-dispatcher")) + val threadBased = system.actorOf(Props(new ThreadBasedAgentUpdater(this, ref)).withDispatcher("akka.agent.alter-off-dispatcher")) result completeWith ask(threadBased, Alter(f))(timeout).asInstanceOf[Future[T]] value }) @@ -209,18 +209,18 @@ class Agent[T](initialValue: T, system: ActorSystem) { /** * Suspends processing of `send` actions for the agent. */ - def suspend() = updater.suspend() + def suspend(): Unit = updater.suspend() /** * Resumes processing of `send` actions for the agent. */ - def resume() = updater.resume() + def resume(): Unit = updater.resume() /** * Closes the agents and makes it eligible for garbage collection. * A closed agent cannot accept any `send` actions. */ - def close() = updater.stop() + def close(): Unit = updater.stop() // --------------------------------------------- // Support for Java API Functions and Procedures @@ -281,8 +281,10 @@ class Agent[T](initialValue: T, system: ActorSystem) { /** * Agent updater actor. Used internally for `send` actions. + * + * INTERNAL API */ -class AgentUpdater[T](agent: Agent[T]) extends Actor { +private[akka] class AgentUpdater[T](agent: Agent[T], ref: Ref[T]) extends Actor { def receive = { case u: Update[_] ⇒ update(u.function.asInstanceOf[T ⇒ T]) case a: Alter[_] ⇒ sender ! update(a.function.asInstanceOf[T ⇒ T]) @@ -290,13 +292,15 @@ class AgentUpdater[T](agent: Agent[T]) extends Actor { case _ ⇒ } - def update(function: T ⇒ T): T = agent.ref.single.transformAndGet(function) + def update(function: T ⇒ T): T = ref.single.transformAndGet(function) } /** * Thread-based agent updater actor. Used internally for `sendOff` actions. + * + * INTERNAL API */ -class ThreadBasedAgentUpdater[T](agent: Agent[T]) extends Actor { +private[akka] class ThreadBasedAgentUpdater[T](agent: Agent[T], ref: Ref[T]) extends Actor { def receive = { case u: Update[_] ⇒ try { update(u.function.asInstanceOf[T ⇒ T]) @@ -313,5 +317,5 @@ class ThreadBasedAgentUpdater[T](agent: Agent[T]) extends Actor { case _ ⇒ context.stop(self) } - def update(function: T ⇒ T): T = agent.ref.single.transformAndGet(function) + def update(function: T ⇒ T): T = ref.single.transformAndGet(function) } From a211e4daf6a2e866546813a3303cd866b1fc9d63 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 22 May 2012 16:35:27 +0200 Subject: [PATCH 080/106] Adjusted tests to latest testconductor, remote deploy still fails. See #2069 --- .../{SimpleRemoteMultiJvmSpec.scala => SimpleRemoteSpec.scala} | 0 project/plugins.sbt | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename akka-remote-tests/src/multi-jvm/scala/akka/remote/{SimpleRemoteMultiJvmSpec.scala => SimpleRemoteSpec.scala} (100%) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteMultiJvmSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala similarity index 100% rename from akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteMultiJvmSpec.scala rename to akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala diff --git a/project/plugins.sbt b/project/plugins.sbt index f49cfb688d..0a7f9999a7 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -1,7 +1,7 @@ resolvers += Classpaths.typesafeResolver -addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.2.0-SNAPSHOT") +addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.2.0-M1") addSbtPlugin("com.typesafe.schoir" % "schoir" % "0.1.2") From 1577bffe17db53da665c843a71de5d1a2689b012 Mon Sep 17 00:00:00 2001 From: Roland Date: Wed, 23 May 2012 09:25:12 +0200 Subject: [PATCH 081/106] =?UTF-8?q?make=20too=20verbose=20logging=20in=20T?= =?UTF-8?q?estConductorSpec=20go=20away=20(d=E2=80=99oh)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../scala/akka/remote/testconductor/TestConductorSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 087aac55c7..e311fa0023 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -17,7 +17,7 @@ import akka.remote.testkit.MultiNodeConfig object TestConductorMultiJvmSpec extends MultiNodeConfig { commonConfig(ConfigFactory.parseString(""" - akka.loglevel = DEBUG + # akka.loglevel = DEBUG akka.remote { log-received-messages = on log-sent-messages = on From 12ff07f0251525fc7232f25570432241b938f966 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 23 May 2012 09:26:20 +0200 Subject: [PATCH 082/106] Adjusted tests to latest testconductor, remote deploy still fails. See #2069 --- .../scala/akka/remote/SimpleRemoteSpec.scala | 97 ++++++-------- .../DirectRoutedRemoteActorMultiJvmSpec.scala | 118 +++++++++--------- 2 files changed, 97 insertions(+), 118 deletions(-) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala index 9209deb9a5..70cca7c34b 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala @@ -1,21 +1,19 @@ /** - * Copyright (C) 2009-2011 Typesafe Inc. + * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.remote +import com.typesafe.config.ConfigFactory + import akka.actor.Actor import akka.actor.ActorRef import akka.actor.Props -import akka.dispatch.Await import akka.pattern.ask -import akka.remote.testconductor.TestConductor -import akka.testkit.DefaultTimeout -import akka.testkit.ImplicitSender -import akka.util.Duration -import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ -object SimpleRemoteMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { - override def NrOfNodes = 2 +object SimpleRemoteMultiJvmSpec extends MultiNodeConfig { class SomeActor extends Actor with Serializable { def receive = { @@ -23,60 +21,47 @@ object SimpleRemoteMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { } } - override def commonConfig = ConfigFactory.parseString(""" - akka { - loglevel = INFO - actor { - provider = akka.remote.RemoteActorRefProvider - debug { - receive = on - fsm = on - } - } - remote { - transport = akka.remote.testconductor.TestConductorTransport - log-received-messages = on - log-sent-messages = on - } - testconductor { - host = localhost - port = 4712 - } - }""") + commonConfig(ConfigFactory.parseString(""" + akka.loglevel = DEBUG + akka.remote { + log-received-messages = on + log-sent-messages = on + } + akka.actor.debug { + receive = on + fsm = on + } + """)) - def nameConfig(n: Int) = ConfigFactory.parseString("akka.testconductor.name = node" + n).withFallback(nodeConfigs(n)) -} - -class SimpleRemoteMultiJvmNode1 extends AkkaRemoteSpec(SimpleRemoteMultiJvmSpec.nameConfig(0)) { - import SimpleRemoteMultiJvmSpec._ - val nodes = NrOfNodes - val tc = TestConductor(system) - - "lookup remote actor" in { - Await.result(tc.startController(2), Duration.Inf) - system.actorOf(Props[SomeActor], "service-hello") - tc.enter("begin", "done") - } + val master = role("master") + val slave = role("slave") } -class SimpleRemoteMultiJvmNode2 extends AkkaRemoteSpec(SimpleRemoteMultiJvmSpec.nameConfig(1)) +class SimpleRemoteMultiJvmNode1 extends SimpleRemoteSpec +class SimpleRemoteMultiJvmNode2 extends SimpleRemoteSpec + +class SimpleRemoteSpec extends MultiNodeSpec(SimpleRemoteMultiJvmSpec) with ImplicitSender with DefaultTimeout { - import SimpleRemoteMultiJvmSpec._ - val nodes = NrOfNodes - val tc = TestConductor(system) - "lookup remote actor" in { - Await.result(tc.startClient(4712), Duration.Inf) - tc.enter("begin") - log.info("### begin ok") - val actor = system.actorFor("akka://" + akkaSpec(0) + "/user/service-hello") - log.info("### actor lookup " + akkaSpec(0) + "/service-hello") - actor.isInstanceOf[RemoteActorRef] must be(true) - Await.result(actor ? "identify", timeout.duration).asInstanceOf[ActorRef].path.address.hostPort must equal(akkaSpec(0)) - log.info("### actor ok") - tc.enter("done") + def initialParticipants = 2 + + runOn(master) { + system.actorOf(Props[SomeActor], "service-hello") + } + + "Remoting" must { + "lookup remote actor" in { + runOn(slave) { + val hello = system.actorFor(node(master) / "user" / "service-hello") + hello.isInstanceOf[RemoteActorRef] must be(true) + val masterAddress = testConductor.getAddressFor(master).await + (hello ? "identify").await.asInstanceOf[ActorRef].path.address must equal(masterAddress) + } + testConductor.enter("done") + } } } + diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala index d44beff605..2690378ef1 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala @@ -1,20 +1,20 @@ /** - * Copyright (C) 2009-2011 Typesafe Inc. + * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.remote.router -import akka.actor.{ Actor, ActorRef, Props } -import akka.remote.AkkaRemoteSpec -import akka.remote.AbstractRemoteActorMultiJvmSpec -import akka.remote.RemoteActorRef -import akka.remote.testconductor.TestConductor -import akka.testkit._ -import akka.dispatch.Await -import akka.pattern.ask -import akka.util.Duration +import com.typesafe.config.ConfigFactory -object DirectRoutedRemoteActorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { - override def NrOfNodes = 2 +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.Props +import akka.pattern.ask +import akka.remote.RemoteActorRef +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ + +object DirectRoutedRemoteActorMultiJvmSpec extends MultiNodeConfig { class SomeActor extends Actor with Serializable { def receive = { @@ -23,68 +23,62 @@ object DirectRoutedRemoteActorMultiJvmSpec extends AbstractRemoteActorMultiJvmSp } import com.typesafe.config.ConfigFactory - override def commonConfig = ConfigFactory.parseString(""" - akka { - loglevel = INFO - actor { - provider = akka.remote.RemoteActorRefProvider - deployment { - /service-hello.remote = %s - } - debug { - receive = on - fsm = on - } - } - remote { - transport = akka.remote.testconductor.TestConductorTransport - log-received-messages = on - log-sent-messages = on - } - testconductor { - host = localhost - port = 4712 - } - }""" format akkaURIs(1)) - - def nameConfig(n: Int) = ConfigFactory.parseString("akka.testconductor.name = node" + n).withFallback(nodeConfigs(n)) -} - -class DirectRoutedRemoteActorMultiJvmNode1 extends AkkaRemoteSpec(DirectRoutedRemoteActorMultiJvmSpec.nameConfig(0)) { - import DirectRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - val tc = TestConductor(system) - - "A new remote actor configured with a Direct router" must { - "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { - Await.result(tc.startController(2), Duration.Inf) - tc.enter("begin", "done") + commonConfig(ConfigFactory.parseString(""" + akka.loglevel = DEBUG + akka.remote { + log-received-messages = on + log-sent-messages = on } - } + akka.actor.debug { + receive = on + fsm = on + } + """)) + + val master = role("master") + val slave = role("slave") + + nodeConfig(master, ConfigFactory.parseString(""" + akka.actor { + deployment { + /service-hello.remote = "akka://MultiNodeSpec@%s" + } + } + # FIXME When using NettyRemoteTransport instead of TestConductorTransport it works + # akka.remote.transport = "akka.remote.netty.NettyRemoteTransport" + """.format("localhost:2553"))) // FIXME is there a way to avoid hardcoding the host:port here? + + nodeConfig(slave, ConfigFactory.parseString(""" + akka.remote.netty.port = 2553 + """)) } -class DirectRoutedRemoteActorMultiJvmNode2 extends AkkaRemoteSpec(DirectRoutedRemoteActorMultiJvmSpec.nameConfig(1)) +class DirectRoutedRemoteActorMultiJvmNode1 extends DirectRoutedRemoteActorSpec +class DirectRoutedRemoteActorMultiJvmNode2 extends DirectRoutedRemoteActorSpec + +class DirectRoutedRemoteActorSpec extends MultiNodeSpec(DirectRoutedRemoteActorMultiJvmSpec) with ImplicitSender with DefaultTimeout { - import DirectRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - val tc = TestConductor(system) + + def initialParticipants = 2 "A new remote actor configured with a Direct router" must { "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { - Await.result(tc.startClient(4712), Duration.Inf) - tc.enter("begin") - val actor = system.actorOf(Props[SomeActor], "service-hello") - actor.isInstanceOf[RemoteActorRef] must be(true) + runOn(master) { + val actor = system.actorOf(Props[SomeActor], "service-hello") + actor.isInstanceOf[RemoteActorRef] must be(true) - Await.result(actor ? "identify", timeout.duration).asInstanceOf[ActorRef].path.address.hostPort must equal(akkaSpec(0)) + val slaveAddress = testConductor.getAddressFor(slave).await + (actor ? "identify").await.asInstanceOf[ActorRef].path.address must equal(slaveAddress) - // shut down the actor before we let the other node(s) shut down so we don't try to send - // "Terminate" to a shut down node - system.stop(actor) - tc.enter("done") + // shut down the actor before we let the other node(s) shut down so we don't try to send + // "Terminate" to a shut down node + system.stop(actor) + } + + testConductor.enter("done") } } } From b45cec3da4011311f2d01a95a066b227325818a6 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 23 May 2012 15:17:49 +0200 Subject: [PATCH 083/106] Preparing Akka Camel for bin compat --- .../akka/dispatch/AbstractDispatcher.scala | 2 +- .../main/scala/akka/dispatch/Mailbox.scala | 10 ++-- .../main/scala/akka/camel/Activation.scala | 28 ++++------- .../camel/ActorNotRegisteredException.scala | 2 +- .../akka/camel/ActorRouteDefinition.scala | 6 ++- .../src/main/scala/akka/camel/Camel.scala | 4 +- .../main/scala/akka/camel/CamelMessage.scala | 23 ++++----- .../src/main/scala/akka/camel/Consumer.scala | 6 +-- .../src/main/scala/akka/camel/Producer.scala | 40 ++++++++-------- .../camel/internal/ActivationMessage.scala | 2 +- .../camel/internal/ActivationTracker.scala | 10 ++-- .../camel/internal/CamelExchangeAdapter.scala | 14 +++--- .../akka/camel/internal/DefaultCamel.scala | 10 ++-- .../camel/internal/ProducerRegistry.scala | 16 ++++--- .../internal/component/ActorComponent.scala | 48 ++++++++----------- .../akka/camel/javaapi/UntypedConsumer.scala | 2 +- .../camel/javaapi/UntypedProducerActor.scala | 10 ++-- akka-camel/src/main/scala/akka/package.scala | 1 + 18 files changed, 109 insertions(+), 125 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 9aec23b4c6..682e6ba4bf 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -214,7 +214,7 @@ private[akka] object MessageDispatcher { // dispatcher debugging helper using println (see below) // since this is a compile-time constant, scalac will elide code behind if (MessageDispatcher.debug) (RK checked with 2.9.1) - final val debug = false + final val debug = false // Deliberately without type ascription to make it a compile-time constant lazy val actors = new Index[MessageDispatcher, ActorRef](16, _ compareTo _) def printActors: Unit = if (debug) { for { diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index 0f0bbad1ee..35b1e35012 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -31,15 +31,15 @@ private[akka] object Mailbox { */ // primary status: only first three - final val Open = 0 // _status is not initialized in AbstractMailbox, so default must be zero! - final val Suspended = 1 - final val Closed = 2 + final val Open = 0 // _status is not initialized in AbstractMailbox, so default must be zero! Deliberately without type ascription to make it a compile-time constant + final val Suspended = 1 // Deliberately without type ascription to make it a compile-time constant + final val Closed = 2 // Deliberately without type ascription to make it a compile-time constant // secondary status: Scheduled bit may be added to Open/Suspended - final val Scheduled = 4 + final val Scheduled = 4 // Deliberately without type ascription to make it a compile-time constant // mailbox debugging helper using println (see below) // since this is a compile-time constant, scalac will elide code behind if (Mailbox.debug) (RK checked with 2.9.1) - final val debug = false + final val debug = false // Deliberately without type ascription to make it a compile-time constant } /** diff --git a/akka-camel/src/main/scala/akka/camel/Activation.scala b/akka-camel/src/main/scala/akka/camel/Activation.scala index 56d116dca8..d01c990136 100644 --- a/akka-camel/src/main/scala/akka/camel/Activation.scala +++ b/akka-camel/src/main/scala/akka/camel/Activation.scala @@ -18,9 +18,9 @@ import akka.pattern._ trait Activation { import akka.dispatch.Await - def system: ActorSystem + def system: ActorSystem //FIXME Why is this here, what's it needed for and who should use it? - private val activationTracker = system.actorOf(Props[ActivationTracker], "camelActivationTracker") + private val activationTracker = system.actorOf(Props[ActivationTracker], "camelActivationTracker") //FIXME Why is this also top level? /** * Awaits for endpoint to be activated. It blocks until the endpoint is registered in camel context or timeout expires. @@ -29,13 +29,10 @@ trait Activation { * @throws akka.camel.ActivationTimeoutException if endpoint is not activated within timeout. * @return the activated ActorRef */ - def awaitActivation(endpoint: ActorRef, timeout: Duration): ActorRef = { - try { - Await.result(activationFutureFor(endpoint, timeout), timeout) - } catch { + def awaitActivation(endpoint: ActorRef, timeout: Duration): ActorRef = + try Await.result(activationFutureFor(endpoint, timeout), timeout) catch { case e: TimeoutException ⇒ throw new ActivationTimeoutException(endpoint, timeout) } - } /** * Awaits for endpoint to be de-activated. It is blocking until endpoint is unregistered in camel context or timeout expires. @@ -43,37 +40,32 @@ trait Activation { * @param timeout the timeout for the wait * @throws akka.camel.DeActivationTimeoutException if endpoint is not de-activated within timeout. */ - def awaitDeactivation(endpoint: ActorRef, timeout: Duration) { - try { - Await.result(deactivationFutureFor(endpoint, timeout), timeout) - } catch { + def awaitDeactivation(endpoint: ActorRef, timeout: Duration): Unit = + try Await.result(deactivationFutureFor(endpoint, timeout), timeout) catch { case e: TimeoutException ⇒ throw new DeActivationTimeoutException(endpoint, timeout) } - } /** * Similar to `awaitActivation` but returns a future instead. * @param endpoint the endpoint to be activated * @param timeout the timeout for the Future */ - def activationFutureFor(endpoint: ActorRef, timeout: Duration): Future[ActorRef] = { + def activationFutureFor(endpoint: ActorRef, timeout: Duration): Future[ActorRef] = (activationTracker.ask(AwaitActivation(endpoint))(Timeout(timeout))).map[ActorRef] { case EndpointActivated(_) ⇒ endpoint case EndpointFailedToActivate(_, cause) ⇒ throw cause } - } /** * Similar to awaitDeactivation but returns a future instead. * @param endpoint the endpoint to be deactivated * @param timeout the timeout of the Future */ - def deactivationFutureFor(endpoint: ActorRef, timeout: Duration): Future[Unit] = { + def deactivationFutureFor(endpoint: ActorRef, timeout: Duration): Future[Unit] = (activationTracker.ask(AwaitDeActivation(endpoint))(Timeout(timeout))).map[Unit] { case EndpointDeActivated(_) ⇒ () case EndpointFailedToDeActivate(_, cause) ⇒ throw cause } - } } /** @@ -82,7 +74,7 @@ trait Activation { * @param timeout the timeout */ class DeActivationTimeoutException(endpoint: ActorRef, timeout: Duration) extends TimeoutException { - override def getMessage = "Timed out after %s, while waiting for de-activation of %s" format (timeout, endpoint.path) + override def getMessage: String = "Timed out after %s, while waiting for de-activation of %s" format (timeout, endpoint.path) } /** @@ -91,5 +83,5 @@ class DeActivationTimeoutException(endpoint: ActorRef, timeout: Duration) extend * @param timeout the timeout */ class ActivationTimeoutException(endpoint: ActorRef, timeout: Duration) extends TimeoutException { - override def getMessage = "Timed out after %s, while waiting for activation of %s" format (timeout, endpoint.path) + override def getMessage: String = "Timed out after %s, while waiting for activation of %s" format (timeout, endpoint.path) } \ No newline at end of file diff --git a/akka-camel/src/main/scala/akka/camel/ActorNotRegisteredException.scala b/akka-camel/src/main/scala/akka/camel/ActorNotRegisteredException.scala index a468eeace5..7a303e47b3 100644 --- a/akka-camel/src/main/scala/akka/camel/ActorNotRegisteredException.scala +++ b/akka-camel/src/main/scala/akka/camel/ActorNotRegisteredException.scala @@ -6,5 +6,5 @@ package akka.camel * @author Martin Krasser */ class ActorNotRegisteredException(uri: String) extends RuntimeException { - override def getMessage = "Actor [%s] doesn't exist" format uri + override def getMessage: String = "Actor [%s] doesn't exist" format uri } diff --git a/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala b/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala index f5175b90eb..6286edad87 100644 --- a/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala +++ b/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala @@ -29,7 +29,8 @@ class ActorRouteDefinition(definition: ProcessorDefinition[_]) { * @param actorRef the consumer with a default configuration. * @return the path to the actor, as a camel uri String */ - def to(actorRef: ActorRef) = definition.to(ActorEndpointPath(actorRef).toCamelPath()) + def to(actorRef: ActorRef) = //FIXME What is the return type of this? + definition.to(ActorEndpointPath(actorRef).toCamelPath()) /** * Sends the message to an ActorRef endpoint @@ -37,6 +38,7 @@ class ActorRouteDefinition(definition: ProcessorDefinition[_]) { * @param consumerConfig the configuration for the consumer * @return the path to the actor, as a camel uri String */ - def to(actorRef: ActorRef, consumerConfig: ConsumerConfig) = definition.to(ActorEndpointPath(actorRef).toCamelPath(consumerConfig)) + def to(actorRef: ActorRef, consumerConfig: ConsumerConfig) = //FIXME What is the return type of this? + definition.to(ActorEndpointPath(actorRef).toCamelPath(consumerConfig)) } diff --git a/akka-camel/src/main/scala/akka/camel/Camel.scala b/akka-camel/src/main/scala/akka/camel/Camel.scala index 4e96f038e5..72252212cf 100644 --- a/akka-camel/src/main/scala/akka/camel/Camel.scala +++ b/akka-camel/src/main/scala/akka/camel/Camel.scala @@ -50,13 +50,13 @@ object CamelExtension extends ExtensionId[Camel] with ExtensionIdProvider { /** * Creates a new instance of Camel and makes sure it gets stopped when the actor system is shutdown. */ - def createExtension(system: ExtendedActorSystem) = { + override def createExtension(system: ExtendedActorSystem): Camel = { val camel = new DefaultCamel(system).start system.registerOnTermination(camel.shutdown()) camel } - def lookup(): ExtensionId[Camel] = CamelExtension + override def lookup(): ExtensionId[Camel] = CamelExtension override def get(system: ActorSystem): Camel = super.get(system) } \ No newline at end of file diff --git a/akka-camel/src/main/scala/akka/camel/CamelMessage.scala b/akka-camel/src/main/scala/akka/camel/CamelMessage.scala index 2ea046b856..4f617c83a4 100644 --- a/akka-camel/src/main/scala/akka/camel/CamelMessage.scala +++ b/akka-camel/src/main/scala/akka/camel/CamelMessage.scala @@ -21,12 +21,12 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) { def this(body: Any, headers: JMap[String, Any]) = this(body, headers.toMap) //for Java - override def toString = "CamelMessage(%s, %s)" format (body, headers) + override def toString: String = "CamelMessage(%s, %s)" format (body, headers) /** * Returns those headers from this message whose name is contained in names. */ - def headers(names: Set[String]): Map[String, Any] = headers.filterKeys(names contains _) + def headers(names: Set[String]): Map[String, Any] = headers filterKeys names /** * Returns those headers from this message whose name is contained in names. @@ -75,7 +75,7 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) { /** * Creates a CamelMessage with a given body. */ - def withBody(body: Any) = CamelMessage(body, this.headers) + def withBody(body: Any): CamelMessage = CamelMessage(body, this.headers) /** * Creates a new CamelMessage with given headers. @@ -119,9 +119,9 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) { * Creates a new CamelMessage where the header with given headerName is removed from * the existing headers. */ - def withoutHeader(headerName: String) = copy(this.body, this.headers - headerName) + def withoutHeader(headerName: String): CamelMessage = copy(this.body, this.headers - headerName) - def copyContentTo(to: JCamelMessage) = { + def copyContentTo(to: JCamelMessage): Unit = { to.setBody(this.body) for ((name, value) ← this.headers) to.getHeaders.put(name, value.asInstanceOf[AnyRef]) } @@ -145,8 +145,7 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) { * Java API * */ - def getBodyAs[T](clazz: Class[T], camelContext: CamelContext): T = - camelContext.getTypeConverter.mandatoryConvertTo[T](clazz, body) + def getBodyAs[T](clazz: Class[T], camelContext: CamelContext): T = camelContext.getTypeConverter.mandatoryConvertTo[T](clazz, body) /** * Creates a CamelMessage with current body converted to type T. @@ -184,7 +183,7 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) { *

* Java API */ - def getHeaderAs[T](name: String, clazz: Class[T], camelContext: CamelContext) = headerAs[T](name)(Manifest.classType(clazz), camelContext).get + def getHeaderAs[T](name: String, clazz: Class[T], camelContext: CamelContext): T = headerAs[T](name)(Manifest.classType(clazz), camelContext).get } @@ -201,7 +200,7 @@ object CamelMessage { * so that it can be correlated with an asynchronous response. Messages send to Consumer * actors have this header already set. */ - val MessageExchangeId = "MessageExchangeId".intern + val MessageExchangeId = "MessageExchangeId".intern //Deliberately without type ascription to make it a constant /** * Creates a canonical form of the given message msg. If msg of type @@ -244,5 +243,7 @@ case object Ack { * message or Exchange.getOut message, depending on the exchange pattern. * */ -class AkkaCamelException private[akka] (cause: Throwable, val headers: Map[String, Any] = Map.empty) - extends AkkaException(cause.getMessage, cause) +class AkkaCamelException private[akka] (cause: Throwable, val headers: Map[String, Any]) + extends AkkaException(cause.getMessage, cause) { + def this(cause: Throwable) = this(cause, Map.empty) +} diff --git a/akka-camel/src/main/scala/akka/camel/Consumer.scala b/akka-camel/src/main/scala/akka/camel/Consumer.scala index 1d21ffbec7..0351ce39cb 100644 --- a/akka-camel/src/main/scala/akka/camel/Consumer.scala +++ b/akka-camel/src/main/scala/akka/camel/Consumer.scala @@ -31,7 +31,7 @@ trait ConsumerConfig { /** * How long the actor should wait for activation before it fails. */ - def activationTimeout: Duration = 10 seconds + def activationTimeout: Duration = 10 seconds // FIXME Should be configured in reference.conf /** * When endpoint is out-capable (can produce responses) replyTimeout is the maximum time @@ -39,14 +39,14 @@ trait ConsumerConfig { * This setting is used for out-capable, in-only, manually acknowledged communication. * When the blocking is set to Blocking replyTimeout is ignored. */ - def replyTimeout: Duration = 1 minute + def replyTimeout: Duration = 1 minute // FIXME Should be configured in reference.conf /** * Determines whether one-way communications between an endpoint and this consumer actor * should be auto-acknowledged or application-acknowledged. * This flag has only effect when exchange is in-only. */ - def autoack: Boolean = true + def autoack: Boolean = true // FIXME Should be configured in reference.conf /** * The route definition handler for creating a custom route to this consumer instance. diff --git a/akka-camel/src/main/scala/akka/camel/Producer.scala b/akka-camel/src/main/scala/akka/camel/Producer.scala index 33541d4611..5a7262a133 100644 --- a/akka-camel/src/main/scala/akka/camel/Producer.scala +++ b/akka-camel/src/main/scala/akka/camel/Producer.scala @@ -6,8 +6,9 @@ package akka.camel import akka.actor.Actor import internal.CamelExchangeAdapter -import org.apache.camel.{ Exchange, ExchangePattern, AsyncCallback } import akka.actor.Status.Failure +import org.apache.camel.{ Endpoint, Exchange, ExchangePattern, AsyncCallback } +import org.apache.camel.processor.SendProcessor /** * Support trait for producing messages to Camel endpoints. @@ -15,19 +16,19 @@ import akka.actor.Status.Failure * @author Martin Krasser */ trait ProducerSupport { this: Actor ⇒ - protected[this] implicit def camel = CamelExtension(context.system) + protected[this] implicit def camel = CamelExtension(context.system) // FIXME This is duplicated from Consumer, create a common base-trait? /** * camelContext implicit is useful when using advanced methods of CamelMessage. */ - protected[this] implicit def camelContext = camel.context + protected[this] implicit def camelContext = camel.context // FIXME This is duplicated from Consumer, create a common base-trait? - protected[this] lazy val (endpoint, processor) = camel.registerProducer(self, endpointUri) + protected[this] lazy val (endpoint: Endpoint, processor: SendProcessor) = camel.registerProducer(self, endpointUri) /** * CamelMessage headers to copy by default from request message to response-message. */ - private val headersToCopyDefault = Set(CamelMessage.MessageExchangeId) + private val headersToCopyDefault: Set[String] = Set(CamelMessage.MessageExchangeId) /** * If set to false (default), this producer expects a response message from the Camel endpoint. @@ -64,20 +65,21 @@ trait ProducerSupport { this: Actor ⇒ * @param pattern exchange pattern */ protected def produce(msg: Any, pattern: ExchangePattern): Unit = { - implicit def toExchangeAdapter(exchange: Exchange): CamelExchangeAdapter = new CamelExchangeAdapter(exchange) + // Need copies of sender reference here since the callback could be done + // later by another thread. + val producer = self + val originalSender = sender val cmsg = CamelMessage.canonicalize(msg) - val exchange = endpoint.createExchange(pattern) - exchange.setRequest(cmsg) - processor.process(exchange, new AsyncCallback { - val producer = self - // Need copies of sender reference here since the callback could be done - // later by another thread. - val originalSender = sender + val xchg = new CamelExchangeAdapter(endpoint.createExchange(pattern)) + + xchg.setRequest(cmsg) + + processor.process(xchg.exchange, new AsyncCallback { // Ignoring doneSync, sending back async uniformly. def done(doneSync: Boolean): Unit = producer.tell( - if (exchange.isFailed) exchange.toFailureResult(cmsg.headers(headersToCopy)) - else MessageResult(exchange.toResponseMessage(cmsg.headers(headersToCopy))), originalSender) + if (xchg.exchange.isFailed) xchg.toFailureResult(cmsg.headers(headersToCopy)) + else MessageResult(xchg.toResponseMessage(cmsg.headers(headersToCopy))), originalSender) }) } @@ -94,9 +96,7 @@ trait ProducerSupport { this: Actor ⇒ val e = new AkkaCamelException(res.cause, res.headers) routeResponse(Failure(e)) throw e - case msg ⇒ - val exchangePattern = if (oneway) ExchangePattern.InOnly else ExchangePattern.InOut - produce(transformOutgoingMessage(msg), exchangePattern) + case msg ⇒ produce(transformOutgoingMessage(msg), if (oneway) ExchangePattern.InOnly else ExchangePattern.InOut) } /** @@ -134,7 +134,7 @@ trait Producer extends ProducerSupport { this: Actor ⇒ * Default implementation of Actor.receive. Any messages received by this actors * will be produced to the endpoint specified by endpointUri. */ - def receive = produce + def receive: Actor.Receive = produce } /** @@ -153,6 +153,6 @@ private case class FailureResult(cause: Throwable, headers: Map[String, Any] = M * @author Martin Krasser */ trait Oneway extends Producer { this: Actor ⇒ - override def oneway = true + override def oneway: Boolean = true } diff --git a/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala b/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala index b8c3f42a47..bdd915ff70 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala @@ -20,7 +20,7 @@ private[camel] abstract class ActivationMessage(val actor: ActorRef) * */ private[camel] object ActivationMessage { - def unapply(msg: ActivationMessage): Option[ActorRef] = Some(msg.actor) + def unapply(msg: ActivationMessage): Option[ActorRef] = Option(msg.actor) } /** diff --git a/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala b/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala index 0b93460be0..f5a87eff25 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala @@ -96,17 +96,15 @@ private[akka] final class ActivationTracker extends Actor with ActorLogging { /** * Subscribes self to messages of type ActivationMessage */ - override def preStart() { - context.system.eventStream.subscribe(self, classOf[ActivationMessage]) - } + override def preStart(): Unit = context.system.eventStream.subscribe(self, classOf[ActivationMessage]) override def receive = { case msg @ ActivationMessage(ref) ⇒ - val state = activations.getOrElseUpdate(ref, new ActivationStateMachine) - (state.receive orElse logStateWarning(ref))(msg) + (activations.getOrElseUpdate(ref, new ActivationStateMachine).receive orElse logStateWarning(ref))(msg) } - private[this] def logStateWarning(actorRef: ActorRef): Receive = { case msg ⇒ log.warning("Message [{}] not expected in current state of actor [{}]", msg, actorRef) } + private[this] def logStateWarning(actorRef: ActorRef): Receive = + { case msg ⇒ log.warning("Message [{}] not expected in current state of actor [{}]", msg, actorRef) } } /** diff --git a/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala b/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala index 1f2d80e6df..5de9eb447d 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala @@ -16,34 +16,34 @@ import akka.camel.{ FailureResult, AkkaCamelException, CamelMessage } * * @author Martin Krasser */ -private[camel] class CamelExchangeAdapter(exchange: Exchange) { +private[camel] class CamelExchangeAdapter(val exchange: Exchange) { /** * Returns the exchange id */ - def getExchangeId = exchange.getExchangeId + def getExchangeId: String = exchange.getExchangeId /** * Returns if the exchange is out capable. */ - def isOutCapable = exchange.getPattern.isOutCapable + def isOutCapable: Boolean = exchange.getPattern.isOutCapable /** * Sets Exchange.getIn from the given CamelMessage object. */ - def setRequest(msg: CamelMessage) { msg.copyContentTo(request) } + def setRequest(msg: CamelMessage): Unit = msg.copyContentTo(request) /** * Depending on the exchange pattern, sets Exchange.getIn or Exchange.getOut from the given * CamelMessage object. If the exchange is out-capable then the Exchange.getOut is set, otherwise * Exchange.getIn. */ - def setResponse(msg: CamelMessage) { msg.copyContentTo(response) } + def setResponse(msg: CamelMessage): Unit = msg.copyContentTo(response) /** * Sets Exchange.getException from the given FailureResult message. Headers of the FailureResult message * are ignored. */ - def setFailure(msg: FailureResult) { exchange.setException(msg.cause) } + def setFailure(msg: FailureResult): Unit = exchange.setException(msg.cause) /** * Creates an immutable CamelMessage object from Exchange.getIn so it can be used with Actors. @@ -120,7 +120,7 @@ private[camel] class CamelExchangeAdapter(exchange: Exchange) { */ def toResponseMessage(headers: Map[String, Any]): CamelMessage = CamelMessage.from(response, headers) - private def request = exchange.getIn + private def request: JCamelMessage = exchange.getIn private def response: JCamelMessage = ExchangeHelper.getResultMessage(exchange) diff --git a/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala b/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala index 1754bb0073..2ac35fdec2 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala @@ -2,12 +2,12 @@ package akka.camel.internal import akka.actor.ActorSystem import component.{ DurationTypeConverter, ActorComponent } -import org.apache.camel.CamelContext import org.apache.camel.impl.DefaultCamelContext import scala.Predef._ import akka.event.Logging import akka.camel.Camel import akka.util.{ NonFatal, Duration } +import org.apache.camel.{ ProducerTemplate, CamelContext } /** * For internal use only. @@ -33,14 +33,14 @@ private[camel] class DefaultCamel(val system: ActorSystem) extends Camel { ctx } - lazy val template = context.createProducerTemplate() + lazy val template: ProducerTemplate = context.createProducerTemplate() /** * Starts camel and underlying camel context and template. * Only the creator of Camel should start and stop it. * @see akka.camel.DefaultCamel#stop() */ - def start = { + def start(): this.type = { context.start() try template.start() catch { case NonFatal(e) ⇒ context.stop(); throw e } log.debug("Started CamelContext[{}] for ActorSystem[{}]", context.getName, system.name) @@ -54,9 +54,9 @@ private[camel] class DefaultCamel(val system: ActorSystem) extends Camel { * * @see akka.camel.DefaultCamel#start() */ - def shutdown() { + def shutdown(): Unit = { try context.stop() finally { - try { template.stop() } catch { case NonFatal(e) ⇒ log.debug("Swallowing non-fatal exception [{}] on stopping Camel producer template", e) } + try template.stop() catch { case NonFatal(e) ⇒ log.debug("Swallowing non-fatal exception [{}] on stopping Camel producer template", e) } } log.debug("Stopped CamelContext[{}] for ActorSystem[{}]", context.getName, system.name) } diff --git a/akka-camel/src/main/scala/akka/camel/internal/ProducerRegistry.scala b/akka-camel/src/main/scala/akka/camel/internal/ProducerRegistry.scala index 03d130efe2..d338dbfdea 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ProducerRegistry.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ProducerRegistry.scala @@ -11,6 +11,8 @@ import akka.util.NonFatal * Watches the end of life of Producers. * Removes a Producer from the ProducerRegistry when it is Terminated, * which in turn stops the SendProcessor. + * + * INTERNAL API */ private class ProducerWatcher(registry: ProducerRegistry) extends Actor { override def receive = { @@ -19,6 +21,9 @@ private class ProducerWatcher(registry: ProducerRegistry) extends Actor { } } +/** + * INTERNAL API + */ private case class RegisterProducer(actorRef: ActorRef) /** @@ -27,14 +32,11 @@ private case class RegisterProducer(actorRef: ActorRef) * Every Producer needs an Endpoint and a SendProcessor * to produce messages over an Exchange. */ -private[camel] trait ProducerRegistry { - this: Camel ⇒ +private[camel] trait ProducerRegistry { this: Camel ⇒ private val camelObjects = new ConcurrentHashMap[ActorRef, (Endpoint, SendProcessor)]() - private val watcher = system.actorOf(Props(new ProducerWatcher(this))) + private val watcher = system.actorOf(Props(new ProducerWatcher(this))) //FIXME should this really be top level? - private def registerWatch(actorRef: ActorRef) { - watcher ! RegisterProducer(actorRef) - } + private def registerWatch(actorRef: ActorRef): Unit = watcher ! RegisterProducer(actorRef) /** * For internal use only. @@ -77,7 +79,7 @@ private[camel] trait ProducerRegistry { case NonFatal(e) ⇒ { system.eventStream.publish(EndpointFailedToActivate(actorRef, e)) // can't return null to the producer actor, so blow up actor in initialization. - throw e + throw e //FIXME I'm not a huge fan of log-rethrow, either log or rethrow } } } diff --git a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala index 7ec5919dc9..a8d7a59b61 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala @@ -35,10 +35,8 @@ private[camel] class ActorComponent(camel: Camel) extends DefaultComponent { /** * @see org.apache.camel.Component */ - def createEndpoint(uri: String, remaining: String, parameters: JMap[String, Object]): ActorEndpoint = { - val path = ActorEndpointPath.fromCamelPath(remaining) - new ActorEndpoint(uri, this, path, camel) - } + def createEndpoint(uri: String, remaining: String, parameters: JMap[String, Object]): ActorEndpoint = + new ActorEndpoint(uri, this, ActorEndpointPath.fromCamelPath(remaining), camel) } /** @@ -92,7 +90,7 @@ private[camel] class ActorEndpoint(uri: String, private[camel] trait ActorEndpointConfig { def path: ActorEndpointPath - @BeanProperty var replyTimeout: Duration = 1 minute + @BeanProperty var replyTimeout: Duration = 1 minute // FIXME default should be in config, not code /** * Whether to auto-acknowledge one-way message exchanges with (untyped) actors. This is @@ -117,7 +115,7 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex * Calls the asynchronous version of the method and waits for the result (blocking). * @param exchange the exchange to process */ - def process(exchange: Exchange) { processExchangeAdapter(new CamelExchangeAdapter(exchange)) } + def process(exchange: Exchange): Unit = processExchangeAdapter(new CamelExchangeAdapter(exchange)) /** * Processes the message exchange. the caller supports having the exchange asynchronously processed. @@ -129,13 +127,15 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex * The callback should therefore be careful of starting recursive loop. * @return (doneSync) true to continue execute synchronously, false to continue being executed asynchronously */ - def process(exchange: Exchange, callback: AsyncCallback): Boolean = { processExchangeAdapter(new CamelExchangeAdapter(exchange), callback) } + def process(exchange: Exchange, callback: AsyncCallback): Boolean = processExchangeAdapter(new CamelExchangeAdapter(exchange), callback) /** * For internal use only. Processes the [[akka.camel.internal.CamelExchangeAdapter]] * @param exchange the [[akka.camel.internal.CamelExchangeAdapter]] + * + * WARNING UNBOUNDED BLOCKING AWAITS */ - private[camel] def processExchangeAdapter(exchange: CamelExchangeAdapter) { + private[camel] def processExchangeAdapter(exchange: CamelExchangeAdapter): Unit = { val isDone = new CountDownLatch(1) processExchangeAdapter(exchange, new AsyncCallback { def done(doneSync: Boolean) { isDone.countDown() } }) isDone.await() // this should never wait forever as the process(exchange, callback) method guarantees that. @@ -151,10 +151,10 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex private[camel] def processExchangeAdapter(exchange: CamelExchangeAdapter, callback: AsyncCallback): Boolean = { // these notify methods are just a syntax sugar - def notifyDoneSynchronously[A](a: A = null) = callback.done(true) - def notifyDoneAsynchronously[A](a: A = null) = callback.done(false) + def notifyDoneSynchronously[A](a: A = null): Unit = callback.done(true) + def notifyDoneAsynchronously[A](a: A = null): Unit = callback.done(false) - def message = messageFor(exchange) + def message: CamelMessage = messageFor(exchange) if (exchange.isOutCapable) { //InOut sendAsync(message, onComplete = forwardResponseTo(exchange) andThen notifyDoneAsynchronously) @@ -186,39 +186,29 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex private def sendAsync(message: CamelMessage, onComplete: PartialFunction[Either[Throwable, Any], Unit]): Boolean = { try { - val actor = actorFor(endpoint.path) - val future = actor.ask(message)(new Timeout(endpoint.replyTimeout)) - future.onComplete(onComplete) + actorFor(endpoint.path).ask(message)(Timeout(endpoint.replyTimeout)).onComplete(onComplete) } catch { case NonFatal(e) ⇒ onComplete(Left(e)) } false // Done async } - private def fireAndForget(message: CamelMessage, exchange: CamelExchangeAdapter) { - try { - actorFor(endpoint.path) ! message - } catch { - case e ⇒ exchange.setFailure(new FailureResult(e)) - } - } + private def fireAndForget(message: CamelMessage, exchange: CamelExchangeAdapter): Unit = + try { actorFor(endpoint.path) ! message } catch { case NonFatal(e) ⇒ exchange.setFailure(new FailureResult(e)) } private[this] def actorFor(path: ActorEndpointPath): ActorRef = path.findActorIn(camel.system) getOrElse (throw new ActorNotRegisteredException(path.actorPath)) private[this] def messageFor(exchange: CamelExchangeAdapter) = exchange.toRequestMessage(Map(CamelMessage.MessageExchangeId -> exchange.getExchangeId)) - } /** * For internal use only. Converts Strings to [[akka.util.Duration]]s */ private[camel] object DurationTypeConverter extends TypeConverter { - def convertTo[T](`type`: Class[T], value: AnyRef) = { - Duration(value.toString).asInstanceOf[T] - } - def convertTo[T](`type`: Class[T], exchange: Exchange, value: AnyRef) = convertTo(`type`, value) + def convertTo[T](`type`: Class[T], value: AnyRef) = Duration(value.toString).asInstanceOf[T] //FIXME WTF + def convertTo[T](`type`: Class[T], exchange: Exchange, value: AnyRef): T = convertTo(`type`, value) def mandatoryConvertTo[T](`type`: Class[T], value: AnyRef) = convertTo(`type`, value) def mandatoryConvertTo[T](`type`: Class[T], exchange: Exchange, value: AnyRef) = convertTo(`type`, value) def toString(duration: Duration) = duration.toNanos + " nanos" @@ -243,15 +233,15 @@ private[camel] case class ActorEndpointPath private (actorPath: String) { * For internal use only. Companion of `ActorEndpointPath` */ private[camel] object ActorEndpointPath { - private val consumerConfig = new ConsumerConfig {} + private val consumerConfig: ConsumerConfig = new ConsumerConfig {} - def apply(actorRef: ActorRef) = new ActorEndpointPath(actorRef.path.toString) + def apply(actorRef: ActorRef): ActorEndpointPath = new ActorEndpointPath(actorRef.path.toString) /** * Creates an [[akka.camel.internal.component.ActorEndpointPath]] from the remaining part of the endpoint URI (the part after the scheme, without the parameters of the URI). * Expects the remaining part of the URI (the actor path) in a format: path:%s */ - def fromCamelPath(camelPath: String) = camelPath match { + def fromCamelPath(camelPath: String): ActorEndpointPath = camelPath match { case id if id startsWith "path:" ⇒ new ActorEndpointPath(id substring 5) case _ ⇒ throw new IllegalArgumentException("Invalid path: [%s] - should be path:" format camelPath) } diff --git a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumer.scala b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumer.scala index 56f11831d0..a4671583bb 100644 --- a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumer.scala +++ b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumer.scala @@ -13,7 +13,7 @@ import org.apache.camel.{ ProducerTemplate, CamelContext } * class is meant to be used from Java. */ abstract class UntypedConsumerActor extends UntypedActor with Consumer { - final def endpointUri = getEndpointUri + final def endpointUri: String = getEndpointUri /** * Returns the Camel endpoint URI to consume messages from. diff --git a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala index c4d0a9c1a0..f44daf0725 100644 --- a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala +++ b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala @@ -40,16 +40,14 @@ abstract class UntypedProducerActor extends UntypedActor with ProducerSupport { final override def transformResponse(msg: Any): AnyRef = onTransformResponse(msg.asInstanceOf[AnyRef]) final override def routeResponse(msg: Any): Unit = onRouteResponse(msg.asInstanceOf[AnyRef]) - final override def endpointUri = getEndpointUri + final override def endpointUri: String = getEndpointUri - final override def oneway = isOneway + final override def oneway: Boolean = isOneway /** * Default implementation of UntypedActor.onReceive */ - def onReceive(message: Any) { - produce(message) - } + def onReceive(message: Any): Unit = produce(message) /** * Returns the Camel endpoint URI to produce messages to. @@ -61,7 +59,7 @@ abstract class UntypedProducerActor extends UntypedActor with ProducerSupport { * If set to true, this producer communicates with the Camel endpoint with an in-only message * exchange pattern (fire and forget). */ - def isOneway() = super.oneway + def isOneway(): Boolean = super.oneway /** * Returns the CamelContext. diff --git a/akka-camel/src/main/scala/akka/package.scala b/akka-camel/src/main/scala/akka/package.scala index 436d2fc1b3..10382d96ee 100644 --- a/akka-camel/src/main/scala/akka/package.scala +++ b/akka-camel/src/main/scala/akka/package.scala @@ -7,5 +7,6 @@ package akka import org.apache.camel.model.ProcessorDefinition package object camel { + //TODO Why do I exist? implicit def toActorRouteDefinition(definition: ProcessorDefinition[_]) = new ActorRouteDefinition(definition) } \ No newline at end of file From 975d73dea642df31592c54381175668658183520 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 23 May 2012 15:46:29 +0200 Subject: [PATCH 084/106] Adding some docs to the Camel package object --- akka-camel/src/main/scala/akka/package.scala | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/akka-camel/src/main/scala/akka/package.scala b/akka-camel/src/main/scala/akka/package.scala index 10382d96ee..d3e60ae24f 100644 --- a/akka-camel/src/main/scala/akka/package.scala +++ b/akka-camel/src/main/scala/akka/package.scala @@ -7,6 +7,12 @@ package akka import org.apache.camel.model.ProcessorDefinition package object camel { - //TODO Why do I exist? + /** + * To allow using Actors with the Camel Route DSL: + * + * {{{ + * from("file://data/input/CamelConsumer").to(actor) + * }}} + */ implicit def toActorRouteDefinition(definition: ProcessorDefinition[_]) = new ActorRouteDefinition(definition) } \ No newline at end of file From 96c5c9392b0baf9ed9feefd9c885eb0dd6e4649e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 23 May 2012 16:18:01 +0200 Subject: [PATCH 085/106] Preparing the FileBasedMailbox for binary compatibility --- .../akka/actor/mailbox/FileBasedMailbox.scala | 31 ++++++------------- .../mailbox/FileBasedMailboxSettings.scala | 28 ++++++++--------- 2 files changed, 23 insertions(+), 36 deletions(-) diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala index e57bfd57d2..c595fdcdd3 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala @@ -23,47 +23,37 @@ class FileBasedMailboxType(systemSettings: ActorSystem.Settings, config: Config) } class FileBasedMessageQueue(_owner: ActorContext, val settings: FileBasedMailboxSettings) extends DurableMessageQueue(_owner) with DurableMessageSerialization { - - val log = Logging(system, "FileBasedMessageQueue") - - val queuePath = settings.QueuePath + // TODO Is it reasonable for all FileBasedMailboxes to have their own logger? + private val log = Logging(system, "FileBasedMessageQueue") private val queue = try { - (new java.io.File(queuePath)) match { + (new java.io.File(settings.QueuePath)) match { case dir if dir.exists && !dir.isDirectory ⇒ throw new IllegalStateException("Path already occupied by non-directory " + dir) case dir if !dir.exists ⇒ if (!dir.mkdirs() && !dir.isDirectory) throw new IllegalStateException("Creation of directory failed " + dir) case _ ⇒ //All good } - val queue = new filequeue.PersistentQueue(queuePath, name, settings, log) + val queue = new filequeue.PersistentQueue(settings.QueuePath, name, settings, log) queue.setup // replays journal queue.discardExpired queue } catch { - case e: Exception ⇒ + case NonFatal(e) ⇒ log.error(e, "Could not create a file-based mailbox") throw e } - def enqueue(receiver: ActorRef, envelope: Envelope) { - queue.add(serialize(envelope)) - } + def enqueue(receiver: ActorRef, envelope: Envelope): Unit = queue.add(serialize(envelope)) def dequeue(): Envelope = try { - val item = queue.remove - if (item.isDefined) { - queue.confirmRemove(item.get.xid) - deserialize(item.get.data) - } else null + queue.remove.map(item ⇒ { queue.confirmRemove(item.xid); deserialize(item.data) }).orNull } catch { - case e: java.util.NoSuchElementException ⇒ null - case e: Exception ⇒ + case _: java.util.NoSuchElementException ⇒ null + case NonFatal(e) ⇒ log.error(e, "Couldn't dequeue from file-based mailbox") throw e } - def numberOfMessages: Int = { - queue.length.toInt - } + def numberOfMessages: Int = queue.length.toInt def hasMessages: Boolean = numberOfMessages > 0 @@ -78,5 +68,4 @@ class FileBasedMessageQueue(_owner: ActorContext, val settings: FileBasedMailbox } def cleanUp(owner: ActorContext, deadLetters: MessageQueue): Unit = () - } diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala index 6511bf9e00..87dc25840f 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala @@ -11,24 +11,22 @@ import akka.actor.ActorSystem class FileBasedMailboxSettings(val systemSettings: ActorSystem.Settings, val userConfig: Config) extends DurableMailboxSettings { - def name = "file-based" + def name: String = "file-based" val config = initialize - import config._ - val QueuePath = getString("directory-path") - - val MaxItems = getInt("max-items") - val MaxSize = getBytes("max-size") - val MaxItemSize = getBytes("max-item-size") - val MaxAge = Duration(getMilliseconds("max-age"), MILLISECONDS) - val MaxJournalSize = getBytes("max-journal-size") - val MaxMemorySize = getBytes("max-memory-size") - val MaxJournalOverflow = getInt("max-journal-overflow") - val MaxJournalSizeAbsolute = getBytes("max-journal-size-absolute") - val DiscardOldWhenFull = getBoolean("discard-old-when-full") - val KeepJournal = getBoolean("keep-journal") - val SyncJournal = getBoolean("sync-journal") + val QueuePath: String = getString("directory-path") + val MaxItems: Int = getInt("max-items") + val MaxSize: Long = getBytes("max-size") + val MaxItemSize: Long = getBytes("max-item-size") + val MaxAge: Duration = Duration(getMilliseconds("max-age"), MILLISECONDS) + val MaxJournalSize: Long = getBytes("max-journal-size") + val MaxMemorySize: Long = getBytes("max-memory-size") + val MaxJournalOverflow: Int = getInt("max-journal-overflow") + val MaxJournalSizeAbsolute: Long = getBytes("max-journal-size-absolute") + val DiscardOldWhenFull: Boolean = getBoolean("discard-old-when-full") + val KeepJournal: Boolean = getBoolean("keep-journal") + val SyncJournal: Boolean = getBoolean("sync-journal") } \ No newline at end of file From ee4a7ce76a4cf245c2f86ef33951568fa7f5be33 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 23 May 2012 16:27:30 +0200 Subject: [PATCH 086/106] Preparing mailbox-commons for bin compat --- .../akka/actor/mailbox/DurableMailbox.scala | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala b/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala index 0744215bae..b21878d00e 100644 --- a/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala +++ b/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala @@ -3,12 +3,11 @@ */ package akka.actor.mailbox -import akka.actor.{ ActorContext, ActorRef, ExtendedActorSystem } import akka.dispatch.{ Envelope, MessageQueue } import akka.remote.MessageSerializer import akka.remote.RemoteProtocol.{ ActorRefProtocol, RemoteMessageProtocol } import com.typesafe.config.Config -import akka.actor.ActorSystem +import akka.actor._ private[akka] object DurableExecutableMailboxConfig { val Name = "[\\.\\/\\$\\s]".r @@ -18,14 +17,21 @@ abstract class DurableMessageQueue(val owner: ActorContext) extends MessageQueue import DurableExecutableMailboxConfig._ def system: ExtendedActorSystem = owner.system.asInstanceOf[ExtendedActorSystem] - def ownerPath = owner.self.path - val ownerPathString = ownerPath.elements.mkString("/") - val name = "mailbox_" + Name.replaceAllIn(ownerPathString, "_") + def ownerPath: ActorPath = owner.self.path + val ownerPathString: String = ownerPath.elements.mkString("/") + val name: String = "mailbox_" + Name.replaceAllIn(ownerPathString, "_") } +/** + * DurableMessageSerialization can be mixed into a DurableMessageQueue and adds functionality + * to serialize and deserialize Envelopes (messages) + */ trait DurableMessageSerialization { this: DurableMessageQueue ⇒ + /** + * Serializes the given Envelope into an Array of Bytes using an efficient serialization/deserialization strategy + */ def serialize(durableMessage: Envelope): Array[Byte] = { // It's alright to use ref.path.toString here @@ -42,6 +48,10 @@ trait DurableMessageSerialization { this: DurableMessageQueue ⇒ builder.build.toByteArray } + /** + * Deserializes an array of Bytes that were serialized using the DurableMessageSerialization.serialize method, + * into an Envelope. + */ def deserialize(bytes: Array[Byte]): Envelope = { def deserializeActorRef(refProtocol: ActorRefProtocol): ActorRef = system.actorFor(refProtocol.getPath) @@ -50,7 +60,7 @@ trait DurableMessageSerialization { this: DurableMessageQueue ⇒ val message = MessageSerializer.deserialize(system, durableMessage.getMessage) val sender = deserializeActorRef(durableMessage.getSender) - new Envelope(message, sender)(system) + Envelope(message, sender)(system) } } From 25ff921b42f607a00de842017245574cfabbcef8 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 23 May 2012 16:31:13 +0200 Subject: [PATCH 087/106] Making most of the innards of microkernel.Main private --- akka-kernel/src/main/scala/akka/kernel/Main.scala | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/akka-kernel/src/main/scala/akka/kernel/Main.scala b/akka-kernel/src/main/scala/akka/kernel/Main.scala index ead2c28121..97ff625ab8 100644 --- a/akka-kernel/src/main/scala/akka/kernel/Main.scala +++ b/akka-kernel/src/main/scala/akka/kernel/Main.scala @@ -59,9 +59,9 @@ trait Bootable { * Main class for running the microkernel. */ object Main { - val quiet = getBoolean("akka.kernel.quiet") + private val quiet = getBoolean("akka.kernel.quiet") - def log(s: String) = if (!quiet) println(s) + private def log(s: String) = if (!quiet) println(s) def main(args: Array[String]) = { if (args.isEmpty) { @@ -90,7 +90,7 @@ object Main { log("Successfully started Akka") } - def createClassLoader(): ClassLoader = { + private def createClassLoader(): ClassLoader = { if (ActorSystem.GlobalHome.isDefined) { val home = ActorSystem.GlobalHome.get val deploy = new File(home, "deploy") @@ -106,7 +106,7 @@ object Main { } } - def loadDeployJars(deploy: File): ClassLoader = { + private def loadDeployJars(deploy: File): ClassLoader = { val jars = deploy.listFiles.filter(_.getName.endsWith(".jar")) val nestedJars = jars flatMap { jar ⇒ @@ -122,7 +122,7 @@ object Main { new URLClassLoader(urls, Thread.currentThread.getContextClassLoader) } - def addShutdownHook(bootables: Seq[Bootable]): Unit = { + private def addShutdownHook(bootables: Seq[Bootable]): Unit = { Runtime.getRuntime.addShutdownHook(new Thread(new Runnable { def run = { log("") @@ -138,7 +138,7 @@ object Main { })) } - def banner = """ + private def banner = """ ============================================================================== ZZ: From 3ab02e95199cb3b386e8f25a9b50138500fefc8d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 23 May 2012 16:35:42 +0200 Subject: [PATCH 088/106] Convert NodeStartupSpec to MultiNodeSpec. See #1948 --- .../scala/akka/cluster/NodeStartupSpec.scala | 87 +++++++++++++++++++ .../scala/akka/cluster/NodeStartupSpec.scala | 84 ------------------ .../testconductor/TestConductorSpec.scala | 19 ++-- .../akka/remote/testkit/MultiNodeSpec.scala | 19 ++++ project/AkkaBuild.scala | 2 +- 5 files changed, 113 insertions(+), 98 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala delete mode 100644 akka-cluster/src/test/scala/akka/cluster/NodeStartupSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala new file mode 100644 index 0000000000..a0e0e19943 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -0,0 +1,87 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory + +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ + +object NodeStartupMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + # FIXME get rid of this hardcoded host:port + akka.cluster.node-to-join = "akka://MultiNodeSpec@localhost:2601" + """))) + + nodeConfig(first, ConfigFactory.parseString(""" + # FIXME get rid of this hardcoded port + akka.remote.netty.port=2601 + """)) + +} + +class NodeStartupMultiJvmNode1 extends NodeStartupSpec +class NodeStartupMultiJvmNode2 extends NodeStartupSpec + +class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with ImplicitSender { + import NodeStartupMultiJvmSpec._ + + override def initialParticipants = 2 + + var firstNode: Cluster = _ + + runOn(first) { + firstNode = Cluster(system) + } + + "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { + + "be a singleton cluster when started up" taggedAs LongRunningTest in { + runOn(first) { + awaitCond(firstNode.isSingletonCluster) + } + + testConductor.enter("done") + } + + "be in 'Joining' phase when started up" taggedAs LongRunningTest in { + runOn(first) { + val members = firstNode.latestGossip.members + members.size must be(1) + val firstAddress = testConductor.getAddressFor(first).await + val joiningMember = members find (_.address == firstAddress) + joiningMember must not be (None) + joiningMember.get.status must be(MemberStatus.Joining) + } + + testConductor.enter("done") + } + } + + "A second cluster node with a 'node-to-join' config defined" must { + "join the other node cluster when sending a Join command" taggedAs LongRunningTest in { + runOn(second) { + // start cluster on second node, and join + Cluster(system) + } + + runOn(first) { + val secondAddress = testConductor.getAddressFor(second).await + awaitCond { + firstNode.latestGossip.members.exists { member ⇒ + member.address == secondAddress && member.status == MemberStatus.Up + } + } + firstNode.latestGossip.members.size must be(2) + } + + testConductor.enter("done") + } + } + +} diff --git a/akka-cluster/src/test/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/test/scala/akka/cluster/NodeStartupSpec.scala deleted file mode 100644 index 711a0552b4..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/NodeStartupSpec.scala +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import java.net.InetSocketAddress - -import akka.testkit._ -import akka.dispatch._ -import akka.actor._ -import akka.remote._ -import akka.util.duration._ - -import com.typesafe.config._ - -class NodeStartupSpec extends ClusterSpec with ImplicitSender { - val portPrefix = 8 - - var node0: Cluster = _ - var node1: Cluster = _ - var system0: ActorSystemImpl = _ - var system1: ActorSystemImpl = _ - - try { - "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { - system0 = ActorSystem("system0", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port=%d550 - }""".format(portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote0 = system0.provider.asInstanceOf[RemoteActorRefProvider] - node0 = Cluster(system0) - - "be a singleton cluster when started up" taggedAs LongRunningTest in { - Thread.sleep(1.seconds.dilated.toMillis) - node0.isSingletonCluster must be(true) - } - - "be in 'Joining' phase when started up" taggedAs LongRunningTest in { - val members = node0.latestGossip.members - val joiningMember = members find (_.address.port.get == 550.withPortPrefix) - joiningMember must be('defined) - joiningMember.get.status must be(MemberStatus.Joining) - } - } - - "A second cluster node with a 'node-to-join' config defined" must { - "join the other node cluster when sending a Join command" taggedAs LongRunningTest in { - system1 = ActorSystem("system1", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port=%d551 - cluster.node-to-join = "akka://system0@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote1 = system1.provider.asInstanceOf[RemoteActorRefProvider] - node1 = Cluster(system1) - - Thread.sleep(10.seconds.dilated.toMillis) // give enough time for node1 to JOIN node0 and leader to move him to UP - val members = node0.latestGossip.members - val joiningMember = members find (_.address.port.get == 551.withPortPrefix) - joiningMember must be('defined) - joiningMember.get.status must be(MemberStatus.Up) - } - } - } catch { - case e: Exception ⇒ - e.printStackTrace - fail(e.toString) - } - - override def atTermination() { - if (node0 ne null) node0.shutdown() - if (system0 ne null) system0.shutdown() - - if (node1 ne null) node1.shutdown() - if (system1 ne null) system1.shutdown() - } -} diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index e311fa0023..5ff19a806b 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -1,3 +1,6 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ package akka.remote.testconductor import akka.remote.AkkaRemoteSpec @@ -16,18 +19,8 @@ import akka.remote.testkit.MultiNodeSpec import akka.remote.testkit.MultiNodeConfig object TestConductorMultiJvmSpec extends MultiNodeConfig { - commonConfig(ConfigFactory.parseString(""" - # akka.loglevel = DEBUG - akka.remote { - log-received-messages = on - log-sent-messages = on - } - akka.actor.debug { - receive = on - fsm = on - } - """)) - + commonConfig(debugConfig(on = true)) + val master = role("master") val slave = role("slave") } @@ -36,7 +29,7 @@ class TestConductorMultiJvmNode1 extends TestConductorSpec class TestConductorMultiJvmNode2 extends TestConductorSpec class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec) with ImplicitSender { - + import TestConductorMultiJvmSpec._ def initialParticipants = 2 diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 92e65247fb..3822a1f529 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -37,6 +37,25 @@ abstract class MultiNodeConfig { */ def nodeConfig(role: RoleName, config: Config): Unit = _nodeConf += role -> config + /** + * Include for verbose debug logging + * @param on when `true` debug Config is returned, otherwise empty Config + */ + def debugConfig(on: Boolean): Config = + if (on) + ConfigFactory.parseString(""" + akka.loglevel = DEBUG + akka.remote { + log-received-messages = on + log-sent-messages = on + } + akka.actor.debug { + receive = on + fsm = on + } + """) + else ConfigFactory.empty + /** * Construct a RoleName and return it, to be used as an identifier in the * test. Registration of a role name creates a role which then needs to be diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 206e32e1f3..f884894d52 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -114,7 +114,7 @@ object AkkaBuild extends Build { lazy val cluster = Project( id = "akka-cluster", base = file("akka-cluster"), - dependencies = Seq(remote, remote % "test->test", testkit % "test->test"), + dependencies = Seq(remote, remoteTests % "compile;test->test;multi-jvm->multi-jvm", testkit % "test->test"), settings = defaultSettings ++ multiJvmSettings ++ Seq( libraryDependencies ++= Dependencies.cluster, // disable parallel tests From 3e416a4b2c2b9a3dcc3d8723f62b38cf95142166 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 23 May 2012 17:39:34 +0200 Subject: [PATCH 089/106] Add convergence verification to NodeStartupSpec. See #1948 --- .../src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index a0e0e19943..10b5945ee5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -67,7 +67,8 @@ class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with Implic "join the other node cluster when sending a Join command" taggedAs LongRunningTest in { runOn(second) { // start cluster on second node, and join - Cluster(system) + val secondNode = Cluster(system) + awaitCond(secondNode.convergence.isDefined) } runOn(first) { @@ -78,6 +79,7 @@ class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with Implic } } firstNode.latestGossip.members.size must be(2) + awaitCond(firstNode.convergence.isDefined) } testConductor.enter("done") From a934d7f29d92c2ff0268b20c1c07e0b519e213e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Wed, 23 May 2012 22:52:43 +0200 Subject: [PATCH 090/106] Make sure normal tests are run even if multi-jvm tests fail --- project/AkkaBuild.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 206e32e1f3..62c81bd3eb 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -89,7 +89,7 @@ object AkkaBuild extends Build { jvmOptions in MultiJvm := { if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil }, - test in Test <<= (test in Test) dependsOn (test in MultiJvm) + test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } ) ) configs (MultiJvm) @@ -107,7 +107,7 @@ object AkkaBuild extends Build { jvmOptions in MultiJvm := { if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil }, - test in Test <<= (test in Test) dependsOn (test in MultiJvm) + test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } ) ) configs (MultiJvm) @@ -126,7 +126,7 @@ object AkkaBuild extends Build { jvmOptions in MultiJvm := { if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil }, - test in Test <<= (test in Test) dependsOn (test in MultiJvm) + test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } ) ) configs (MultiJvm) From b9bfcaf0563896b1b6c212ae3219a7f359fad9c0 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 23 May 2012 23:59:55 +0200 Subject: [PATCH 091/106] Fixing the DurationConverterTest and the DurationConverter to conform to Camel Spec --- .../internal/component/ActorComponent.scala | 17 +++++++++++---- .../component/DurationConverterTest.scala | 21 ++++++++++++------- 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala index a8d7a59b61..d52f74f2f6 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala @@ -207,11 +207,20 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex * For internal use only. Converts Strings to [[akka.util.Duration]]s */ private[camel] object DurationTypeConverter extends TypeConverter { - def convertTo[T](`type`: Class[T], value: AnyRef) = Duration(value.toString).asInstanceOf[T] //FIXME WTF + override def convertTo[T](`type`: Class[T], value: AnyRef): T = `type`.cast(try { + val d = Duration(value.toString) + if (`type`.isInstance(d)) d else null + } catch { + case NonFatal(_) ⇒ null + }) + def convertTo[T](`type`: Class[T], exchange: Exchange, value: AnyRef): T = convertTo(`type`, value) - def mandatoryConvertTo[T](`type`: Class[T], value: AnyRef) = convertTo(`type`, value) - def mandatoryConvertTo[T](`type`: Class[T], exchange: Exchange, value: AnyRef) = convertTo(`type`, value) - def toString(duration: Duration) = duration.toNanos + " nanos" + def mandatoryConvertTo[T](`type`: Class[T], value: AnyRef): T = convertTo(`type`, value) match { + case null ⇒ throw new NoTypeConversionAvailableException(value, `type`) + case some ⇒ some + } + def mandatoryConvertTo[T](`type`: Class[T], exchange: Exchange, value: AnyRef): T = mandatoryConvertTo(`type`, value) + def toString(duration: Duration): String = duration.toNanos + " nanos" } /** diff --git a/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala b/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala index 3787a9f46f..53729a0b6f 100644 --- a/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala +++ b/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala @@ -8,8 +8,9 @@ import org.scalatest.matchers.MustMatchers import akka.util.duration._ import akka.util.Duration import org.scalatest.WordSpec +import org.apache.camel.NoTypeConversionAvailableException -class DurationConverterTest extends WordSpec with MustMatchers { +class DurationConverterSpec extends WordSpec with MustMatchers { import DurationTypeConverter._ "DurationTypeConverter must convert '10 nanos'" in { @@ -21,15 +22,19 @@ class DurationConverterTest extends WordSpec with MustMatchers { } "DurationTypeConverter must throw if invalid format" in { - intercept[Exception] { - convertTo(classOf[Duration], "abc nanos") must be(10 nanos) - } + convertTo(classOf[Duration], "abc nanos") must be === null + + intercept[NoTypeConversionAvailableException] { + mandatoryConvertTo(classOf[Duration], "abc nanos") must be(10 nanos) + }.getValue must be === "abc nanos" } - "DurationTypeConverter must throw if doesn't end with nanos" in { - intercept[Exception] { - convertTo(classOf[Duration], "10233") must be(10 nanos) - } + "DurationTypeConverter must throw if doesn't end with time unit" in { + convertTo(classOf[Duration], "10233") must be === null + + intercept[NoTypeConversionAvailableException] { + mandatoryConvertTo(classOf[Duration], "10233") must be(10 nanos) + }.getValue must be === "10233" } } From 2198462ed2b4afadaf0514196c0120bb1d8d491c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 24 May 2012 01:16:50 +0200 Subject: [PATCH 092/106] Binary compat for SLF4J module --- .../akka/event/slf4j/Slf4jEventHandler.scala | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jEventHandler.scala b/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jEventHandler.scala index 966f57b938..9e2fefffd9 100644 --- a/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jEventHandler.scala +++ b/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jEventHandler.scala @@ -18,12 +18,29 @@ trait SLF4JLogging { lazy val log = Logger(this.getClass.getName) } +/** + * Logger is a factory for obtaining SLF4J-Loggers + */ object Logger { + /** + * @param logger - which logger + * @return a Logger that corresponds for the given logger name + */ def apply(logger: String): SLFLogger = SLFLoggerFactory getLogger logger + + /** + * @param logClass - the class to log for + * @param logSource - the textual representation of the source of this log stream + * @return a Logger for the specified parameters + */ def apply(logClass: Class[_], logSource: String): SLFLogger = logClass match { case c if c == classOf[DummyClassForStringSources] ⇒ apply(logSource) case _ ⇒ SLFLoggerFactory getLogger logClass } + + /** + * Returns the SLF4J Root Logger + */ def root: SLFLogger = apply(SLFLogger.ROOT_LOGGER_NAME) } From 568c02d1580b1ee707bacbb2f49fbfbba5710ffb Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 24 May 2012 01:38:41 +0200 Subject: [PATCH 093/106] Preparing transactors for binary compat --- .../scala/akka/transactor/Coordinated.scala | 27 ++++++++++++------- .../scala/akka/transactor/Transactor.scala | 10 ++++--- .../akka/transactor/TransactorExtension.scala | 4 +-- 3 files changed, 26 insertions(+), 15 deletions(-) diff --git a/akka-transactor/src/main/scala/akka/transactor/Coordinated.scala b/akka-transactor/src/main/scala/akka/transactor/Coordinated.scala index 2463f0e436..792824be24 100644 --- a/akka-transactor/src/main/scala/akka/transactor/Coordinated.scala +++ b/akka-transactor/src/main/scala/akka/transactor/Coordinated.scala @@ -12,19 +12,29 @@ import java.util.concurrent.Callable /** * Akka-specific exception for coordinated transactions. */ -class CoordinatedTransactionException(message: String, cause: Throwable = null) extends AkkaException(message, cause) { - def this(msg: String) = this(msg, null); +class CoordinatedTransactionException(message: String, cause: Throwable) extends AkkaException(message, cause) { + def this(msg: String) = this(msg, null) } /** * Coordinated transactions across actors. */ object Coordinated { - def apply(message: Any = null)(implicit timeout: Timeout) = new Coordinated(message, createInitialMember(timeout)) + /** + * Creates a new Coordinated with the given message and Timeout + * @param message - the message which will be coordinated + * @param timeout - the timeout for the coordination + * @return a new Coordinated + */ + def apply(message: Any = null)(implicit timeout: Timeout): Coordinated = + new Coordinated(message, CommitBarrier(timeout.duration.toMillis).addMember()) + + /** + * @param c - a Coordinated to be unapplied + * @return the message associated with the given Coordinated + */ def unapply(c: Coordinated): Option[Any] = Some(c.message) - - def createInitialMember(timeout: Timeout) = CommitBarrier(timeout.duration.toMillis).addMember() } /** @@ -91,16 +101,15 @@ class Coordinated(val message: Any, member: CommitBarrier.Member) { // Java API constructors - def this(message: Any, timeout: Timeout) = this(message, Coordinated.createInitialMember(timeout)) + def this(message: Any, timeout: Timeout) = this(message, CommitBarrier(timeout.duration.toMillis).addMember()) - def this(timeout: Timeout) = this(null, Coordinated.createInitialMember(timeout)) + def this(timeout: Timeout) = this(null, timeout) /** * Create a new Coordinated object and increment the number of members by one. * Use this method to ''pass on'' the coordination. */ - def apply(msg: Any): Coordinated = - new Coordinated(msg, member.commitBarrier.addMember()) + def apply(msg: Any): Coordinated = new Coordinated(msg, member.commitBarrier.addMember()) /** * Create a new Coordinated object but *do not* increment the number of members by one. diff --git a/akka-transactor/src/main/scala/akka/transactor/Transactor.scala b/akka-transactor/src/main/scala/akka/transactor/Transactor.scala index 6e390a6623..fd802e1f21 100644 --- a/akka-transactor/src/main/scala/akka/transactor/Transactor.scala +++ b/akka-transactor/src/main/scala/akka/transactor/Transactor.scala @@ -176,8 +176,10 @@ trait Transactor extends Actor { /** * Default catch-all for the different Receive methods. */ - def doNothing: Receive = new Receive { - def apply(any: Any) = {} - def isDefinedAt(any: Any) = false - } + def doNothing: Receive = EmptyReceive +} + +private[akka] object EmptyReceive extends PartialFunction[Any, Unit] { + def apply(any: Any): Unit = () + def isDefinedAt(any: Any): Boolean = false } diff --git a/akka-transactor/src/main/scala/akka/transactor/TransactorExtension.scala b/akka-transactor/src/main/scala/akka/transactor/TransactorExtension.scala index 96aea8904c..85cb8c46fd 100644 --- a/akka-transactor/src/main/scala/akka/transactor/TransactorExtension.scala +++ b/akka-transactor/src/main/scala/akka/transactor/TransactorExtension.scala @@ -15,11 +15,11 @@ import java.util.concurrent.TimeUnit.MILLISECONDS */ object TransactorExtension extends ExtensionId[TransactorSettings] with ExtensionIdProvider { override def get(system: ActorSystem): TransactorSettings = super.get(system) - override def lookup = TransactorExtension + override def lookup: TransactorExtension.type = TransactorExtension override def createExtension(system: ExtendedActorSystem): TransactorSettings = new TransactorSettings(system.settings.config) } class TransactorSettings(val config: Config) extends Extension { import config._ - val CoordinatedTimeout = Timeout(Duration(getMilliseconds("akka.transactor.coordinated-timeout"), MILLISECONDS)) + val CoordinatedTimeout: Timeout = Timeout(Duration(getMilliseconds("akka.transactor.coordinated-timeout"), MILLISECONDS)) } \ No newline at end of file From d3511d25a497e7b0df7a945ef86e18fa9a18b965 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 08:32:50 +0200 Subject: [PATCH 094/106] Placed the barrier in after instead. See #1948 --- .../scala/akka/cluster/NodeStartupSpec.scala | 31 ++++++++++--------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index 10b5945ee5..6807d7032a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -4,7 +4,7 @@ package akka.cluster import com.typesafe.config.ConfigFactory - +import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -13,43 +13,48 @@ object NodeStartupMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - # FIXME get rid of this hardcoded host:port - akka.cluster.node-to-join = "akka://MultiNodeSpec@localhost:2601" - """))) + commonConfig(debugConfig(on = false)) nodeConfig(first, ConfigFactory.parseString(""" # FIXME get rid of this hardcoded port akka.remote.netty.port=2601 """)) + nodeConfig(second, ConfigFactory.parseString(""" + # FIXME get rid of this hardcoded host:port + akka.cluster.node-to-join = "akka://MultiNodeSpec@localhost:2601" + """)) + } class NodeStartupMultiJvmNode1 extends NodeStartupSpec class NodeStartupMultiJvmNode2 extends NodeStartupSpec -class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with ImplicitSender { +class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with ImplicitSender with BeforeAndAfter { import NodeStartupMultiJvmSpec._ override def initialParticipants = 2 var firstNode: Cluster = _ + after { + testConductor.enter("after") + } + runOn(first) { firstNode = Cluster(system) } "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { - "be a singleton cluster when started up" taggedAs LongRunningTest in { + "be a singleton cluster when started up" in { runOn(first) { awaitCond(firstNode.isSingletonCluster) + firstNode.convergence must be(None) } - - testConductor.enter("done") } - "be in 'Joining' phase when started up" taggedAs LongRunningTest in { + "be in 'Joining' phase when started up" in { runOn(first) { val members = firstNode.latestGossip.members members.size must be(1) @@ -58,13 +63,11 @@ class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with Implic joiningMember must not be (None) joiningMember.get.status must be(MemberStatus.Joining) } - - testConductor.enter("done") } } "A second cluster node with a 'node-to-join' config defined" must { - "join the other node cluster when sending a Join command" taggedAs LongRunningTest in { + "join the other node cluster when sending a Join command" in { runOn(second) { // start cluster on second node, and join val secondNode = Cluster(system) @@ -81,8 +84,6 @@ class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with Implic firstNode.latestGossip.members.size must be(2) awaitCond(firstNode.convergence.isDefined) } - - testConductor.enter("done") } } From db4730978f77a0a22c8c2b1a5f8cd8d954bfddb8 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 08:51:36 +0200 Subject: [PATCH 095/106] FIXME singletonCluster should reach convergence. See #2117 --- .../src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index 6807d7032a..694d4ac57d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -50,7 +50,8 @@ class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with Implic "be a singleton cluster when started up" in { runOn(first) { awaitCond(firstNode.isSingletonCluster) - firstNode.convergence must be(None) + // FIXME #2117 singletonCluster should reach convergence + //awaitCond(firstNode.convergence.isDefined) } } From be87215fc6c9b11de736c803c9b90d9c1b8c5d23 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 10:00:35 +0200 Subject: [PATCH 096/106] Pass timefactor to multi-jvm process --- project/AkkaBuild.scala | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 9e0a32ebce..fd18e931c7 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -86,9 +86,7 @@ object AkkaBuild extends Build { (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq }, scalatestOptions in MultiJvm := Seq("-r", "org.scalatest.akka.QuietReporter"), - jvmOptions in MultiJvm := { - if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil - }, + jvmOptions in MultiJvm := defaultMultiJvmOptions, test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } ) ) configs (MultiJvm) @@ -104,9 +102,7 @@ object AkkaBuild extends Build { (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq }, scalatestOptions in MultiJvm := Seq("-r", "org.scalatest.akka.QuietReporter"), - jvmOptions in MultiJvm := { - if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil - }, + jvmOptions in MultiJvm := defaultMultiJvmOptions, test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } ) ) configs (MultiJvm) @@ -123,9 +119,7 @@ object AkkaBuild extends Build { (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq }, scalatestOptions in MultiJvm := Seq("-r", "org.scalatest.akka.QuietReporter"), - jvmOptions in MultiJvm := { - if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil - }, + jvmOptions in MultiJvm := defaultMultiJvmOptions, test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } ) ) configs (MultiJvm) @@ -304,6 +298,14 @@ object AkkaBuild extends Build { val defaultExcludedTags = Seq("timing", "long-running") + val defaultMultiJvmOptions: Seq[String] = { + (System.getProperty("akka.test.timefactor") match { + case null => Nil + case x => List("-Dakka.test.timefactor=" + x) + }) ::: + (if (getBoolean("sbt.log.noformat")) List("-Dakka.test.nocolor=true") else Nil) + } + lazy val defaultSettings = baseSettings ++ formatSettings ++ Seq( resolvers += "Typesafe Repo" at "http://repo.typesafe.com/typesafe/releases/", From e05481604734ea287c9185c1c499673f91a86d72 Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 24 May 2012 10:56:32 +0200 Subject: [PATCH 097/106] make failure injection idempotent - instead of creating local top-level actors per pipeline, just create one system actor through which everything is sent - this enables storing settings (like what to throttle how) within this actor and applying settings when connections come up later - it also gets rid of the blocking actor creation from NetworkFailureInjector, fixing the dead-lock - moved also the ServerFSMs to be children of the Controller - all actors have proper names now for easier debugging --- .../src/main/scala/akka/event/Logging.scala | 2 +- .../akka/remote/testconductor/Conductor.scala | 37 +- .../akka/remote/testconductor/Extension.scala | 4 +- .../NetworkFailureInjector.scala | 475 ++++++++++-------- .../akka/remote/testconductor/Player.scala | 16 +- .../scala/akka/remote/SimpleRemoteSpec.scala | 2 +- .../DirectRoutedRemoteActorMultiJvmSpec.scala | 2 +- .../testconductor/TestConductorSpec.scala | 2 +- .../main/scala/akka/remote/netty/Client.scala | 5 +- .../remote/netty/NettyRemoteSupport.scala | 5 + .../main/scala/akka/remote/netty/Server.scala | 16 +- 11 files changed, 325 insertions(+), 241 deletions(-) diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index 93019318dd..b044fd09ab 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -601,7 +601,7 @@ object Logging { import java.text.SimpleDateFormat import java.util.Date - val dateFormat = new SimpleDateFormat("MM/dd/yyyy HH:mm:ss.S") + val dateFormat = new SimpleDateFormat("MM/dd/yyyy HH:mm:ss.SSS") def timestamp = dateFormat.format(new Date) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index 6c26fcaae2..5e467fde19 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -27,12 +27,26 @@ import akka.actor.SupervisorStrategy import java.util.concurrent.ConcurrentHashMap import akka.actor.Status -sealed trait Direction +sealed trait Direction { + def includes(other: Direction): Boolean +} object Direction { - case object Send extends Direction - case object Receive extends Direction - case object Both extends Direction + case object Send extends Direction { + override def includes(other: Direction): Boolean = other match { + case Send ⇒ true + case _ ⇒ false + } + } + case object Receive extends Direction { + override def includes(other: Direction): Boolean = other match { + case Receive ⇒ true + case _ ⇒ false + } + } + case object Both extends Direction { + override def includes(other: Direction): Boolean = true + } } /** @@ -202,14 +216,15 @@ trait Conductor { this: TestConductorExt ⇒ * purpose is to dispatch incoming messages to the right ServerFSM actor. There is * one shared instance of this class for all connections accepted by one Controller. */ -class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { +class ConductorHandler(_createTimeout: Timeout, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { + implicit val createTimeout = _createTimeout val clients = new ConcurrentHashMap[Channel, ActorRef]() override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { val channel = event.getChannel log.debug("connection from {}", getAddrString(channel)) - val fsm = system.actorOf(Props(new ServerFSM(controller, channel))) + val fsm: ActorRef = Await.result(controller ? Controller.CreateServerFSM(channel) mapTo, Duration.Inf) clients.put(channel, fsm) } @@ -321,6 +336,7 @@ object Controller { case class ClientDisconnected(name: RoleName) case object GetNodes case object GetSockAddr + case class CreateServerFSM(channel: Channel) case class NodeInfo(name: RoleName, addr: Address, fsm: ActorRef) } @@ -336,7 +352,7 @@ class Controller(private var initialParticipants: Int, controllerPort: InetSocke val settings = TestConductor().Settings val connection = RemoteConnection(Server, controllerPort, - new ConductorHandler(context.system, self, Logging(context.system, "ConductorHandler"))) + new ConductorHandler(settings.QueryTimeout, self, Logging(context.system, "ConductorHandler"))) /* * Supervision of the BarrierCoordinator means to catch all his bad emotions @@ -363,8 +379,15 @@ class Controller(private var initialParticipants: Int, controllerPort: InetSocke // map keeping unanswered queries for node addresses (enqueued upon GetAddress, serviced upon NodeInfo) var addrInterest = Map[RoleName, Set[ActorRef]]() + val generation = Iterator from 1 override def receive = LoggingReceive { + case CreateServerFSM(channel) ⇒ + val (ip, port) = channel.getRemoteAddress match { + case s: InetSocketAddress ⇒ (s.getHostString, s.getPort) + } + val name = ip + ":" + port + "-server" + generation.next + sender ! context.actorOf(Props(new ServerFSM(self, channel)), name) case c @ NodeInfo(name, addr, fsm) ⇒ barrier forward c if (nodes contains name) { diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala index 7f6b576128..09ffd7319f 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala @@ -10,6 +10,8 @@ import java.util.concurrent.TimeUnit.MILLISECONDS import akka.actor.ActorRef import java.util.concurrent.ConcurrentHashMap import akka.actor.Address +import akka.actor.ActorSystemImpl +import akka.actor.Props /** * Access to the [[akka.remote.testconductor.TestConductorExt]] extension: @@ -50,6 +52,6 @@ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with C val transport = system.provider.asInstanceOf[RemoteActorRefProvider].transport val address = transport.address - val failureInjectors = new ConcurrentHashMap[Address, FailureInjector] + val failureInjector = system.asInstanceOf[ActorSystemImpl].systemActorOf(Props[FailureInjector], "FailureInjector") } \ No newline at end of file diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index b853523979..1fcb1a7bf9 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -4,236 +4,303 @@ package akka.remote.testconductor import java.net.InetSocketAddress -import scala.collection.immutable.Queue -import org.jboss.netty.buffer.ChannelBuffer -import org.jboss.netty.channel.ChannelState.BOUND -import org.jboss.netty.channel.ChannelState.OPEN -import org.jboss.netty.channel.Channel -import org.jboss.netty.channel.ChannelEvent -import org.jboss.netty.channel.ChannelHandlerContext -import org.jboss.netty.channel.ChannelStateEvent -import org.jboss.netty.channel.MessageEvent -import akka.actor.FSM -import akka.actor.Actor -import akka.util.duration.doubleToDurationDouble -import akka.util.Index -import akka.actor.Address -import akka.actor.ActorSystem -import akka.actor.Props -import akka.actor.ActorRef -import akka.event.Logging -import org.jboss.netty.channel.SimpleChannelHandler -import scala.annotation.tailrec -import akka.util.Duration -import akka.actor.LoggingFSM -import org.jboss.netty.channel.Channels -import org.jboss.netty.channel.ChannelFuture -import org.jboss.netty.channel.ChannelFutureListener -import org.jboss.netty.channel.ChannelFuture -case class FailureInjector(sender: ActorRef, receiver: ActorRef) { - def refs(dir: Direction) = dir match { - case Direction.Send ⇒ Seq(sender) - case Direction.Receive ⇒ Seq(receiver) - case Direction.Both ⇒ Seq(sender, receiver) +import scala.annotation.tailrec +import scala.collection.immutable.Queue + +import org.jboss.netty.buffer.ChannelBuffer +import org.jboss.netty.channel.{ SimpleChannelHandler, MessageEvent, Channels, ChannelStateEvent, ChannelHandlerContext, ChannelFutureListener, ChannelFuture } + +import akka.actor.{ Props, LoggingFSM, Address, ActorSystem, ActorRef, ActorLogging, Actor, FSM } +import akka.event.Logging +import akka.remote.netty.ChannelAddress +import akka.util.Duration +import akka.util.duration._ + +class FailureInjector extends Actor with ActorLogging { + import ThrottleActor._ + import NetworkFailureInjector._ + + case class ChannelSettings( + ctx: Option[ChannelHandlerContext] = None, + throttleSend: Option[SetRate] = None, + throttleReceive: Option[SetRate] = None) + case class Injectors(sender: ActorRef, receiver: ActorRef) + + var channels = Map[ChannelHandlerContext, Injectors]() + var settings = Map[Address, ChannelSettings]() + var generation = Iterator from 1 + + /** + * Only for a NEW ctx, start ThrottleActors, prime them and update all maps. + */ + def ingestContextAddress(ctx: ChannelHandlerContext, addr: Address): Injectors = { + val gen = generation.next + val name = addr.host.get + ":" + addr.port.get + val thrSend = context.actorOf(Props(new ThrottleActor(ctx)), name + "-snd" + gen) + val thrRecv = context.actorOf(Props(new ThrottleActor(ctx)), name + "-rcv" + gen) + val injectors = Injectors(thrSend, thrRecv) + channels += ctx -> injectors + settings += addr -> (settings get addr map { + case c @ ChannelSettings(prevCtx, ts, tr) ⇒ + ts foreach (thrSend ! _) + tr foreach (thrRecv ! _) + prevCtx match { + case Some(p) ⇒ log.warning("installing context {} instead of {} for address {}", ctx, p, addr) + case None ⇒ // okay + } + c.copy(ctx = Some(ctx)) + } getOrElse ChannelSettings(Some(ctx))) + injectors + } + + /** + * Retrieve target settings, also if they were sketchy before (i.e. no system name) + */ + def retrieveTargetSettings(target: Address): Option[ChannelSettings] = { + settings get target orElse { + val host = target.host + val port = target.port + settings find { + case (Address("akka", "", `host`, `port`), s) ⇒ true + case _ ⇒ false + } map { + case (_, s) ⇒ settings += target -> s; s + } + } + } + + def receive = { + case RemoveContext(ctx) ⇒ + channels get ctx foreach { inj ⇒ + context stop inj.sender + context stop inj.receiver + } + channels -= ctx + settings ++= settings collect { case (addr, c @ ChannelSettings(Some(`ctx`), _, _)) ⇒ (addr, c.copy(ctx = None)) } + case ThrottleMsg(target, dir, rateMBit) ⇒ + val setting = retrieveTargetSettings(target) + settings += target -> ((setting getOrElse ChannelSettings() match { + case cs @ ChannelSettings(ctx, _, _) if dir includes Direction.Send ⇒ + ctx foreach (c ⇒ channels get c foreach (_.sender ! SetRate(rateMBit))) + cs.copy(throttleSend = Some(SetRate(rateMBit))) + case x ⇒ x + }) match { + case cs @ ChannelSettings(ctx, _, _) if dir includes Direction.Receive ⇒ + ctx foreach (c ⇒ channels get c foreach (_.receiver ! SetRate(rateMBit))) + cs.copy(throttleReceive = Some(SetRate(rateMBit))) + case x ⇒ x + }) + sender ! "ok" + case DisconnectMsg(target, abort) ⇒ + retrieveTargetSettings(target) foreach { + case ChannelSettings(Some(ctx), _, _) ⇒ + val ch = ctx.getChannel + if (abort) { + ch.getConfig.setOption("soLinger", 0) + log.info("aborting connection {}", ch) + } else log.info("closing connection {}", ch) + ch.close + case _ ⇒ log.debug("no connection to {} to close or abort", target) + } + sender ! "ok" + case s @ Send(ctx, direction, future, msg) ⇒ + channels get ctx match { + case Some(Injectors(snd, rcv)) ⇒ + if (direction includes Direction.Send) snd ! s + if (direction includes Direction.Receive) rcv ! s + case None ⇒ + val (ipaddr, ip, port) = ctx.getChannel.getRemoteAddress match { + case s: InetSocketAddress ⇒ (s.getAddress, s.getAddress.getHostAddress, s.getPort) + } + val addr = ChannelAddress.get(ctx.getChannel) orElse { + settings collect { case (a @ Address("akka", _, Some(`ip`), Some(`port`)), _) ⇒ a } headOption + } orElse { + val name = ipaddr.getHostName + if (name == ip) None + else settings collect { case (a @ Address("akka", _, Some(`name`), Some(`port`)), _) ⇒ a } headOption + } getOrElse Address("akka", "", ip, port) // this will not match later requests directly, but be picked up by retrieveTargetSettings + val inj = ingestContextAddress(ctx, addr) + if (direction includes Direction.Send) inj.sender ! s + if (direction includes Direction.Receive) inj.receiver ! s + } } } object NetworkFailureInjector { - case class SetRate(rateMBit: Float) - case class Disconnect(abort: Boolean) + case class RemoveContext(ctx: ChannelHandlerContext) } class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { + import NetworkFailureInjector._ - val log = Logging(system, "FailureInjector") + private val log = Logging(system, "FailureInjector") - // everything goes via these Throttle actors to enable easy steering - private val sender = system.actorOf(Props(new Throttle(Direction.Send))) - private val receiver = system.actorOf(Props(new Throttle(Direction.Receive))) - - private val packetSplitThreshold = TestConductor(system).Settings.PacketSplitThreshold - - /* - * State, Data and Messages for the internal Throttle actor - */ - sealed private trait State - private case object PassThrough extends State - private case object Throttle extends State - private case object Blackhole extends State - - private case class Data(lastSent: Long, rateMBit: Float, queue: Queue[Send]) - - private case class Send(ctx: ChannelHandlerContext, future: Option[ChannelFuture], msg: AnyRef) - private case class SetContext(ctx: ChannelHandlerContext) - private case object Tick - - private class Throttle(dir: Direction) extends Actor with LoggingFSM[State, Data] { - import FSM._ - - var channelContext: ChannelHandlerContext = _ - - startWith(PassThrough, Data(0, -1, Queue())) - - when(PassThrough) { - case Event(s @ Send(_, _, msg), _) ⇒ - log.debug("sending msg (PassThrough): {}", msg) - send(s) - stay - } - - when(Throttle) { - case Event(s: Send, data @ Data(_, _, Queue())) ⇒ - stay using sendThrottled(data.copy(lastSent = System.nanoTime, queue = Queue(s))) - case Event(s: Send, data) ⇒ - stay using sendThrottled(data.copy(queue = data.queue.enqueue(s))) - case Event(Tick, data) ⇒ - stay using sendThrottled(data) - } - - onTransition { - case Throttle -> PassThrough ⇒ - for (s ← stateData.queue) { - log.debug("sending msg (Transition): {}", s.msg) - send(s) - } - cancelTimer("send") - case Throttle -> Blackhole ⇒ - cancelTimer("send") - } - - when(Blackhole) { - case Event(Send(_, _, msg), _) ⇒ - log.debug("dropping msg {}", msg) - stay - } - - whenUnhandled { - case Event(NetworkFailureInjector.SetRate(rate), d) ⇒ - sender ! "ok" - if (rate > 0) { - goto(Throttle) using d.copy(lastSent = System.nanoTime, rateMBit = rate, queue = Queue()) - } else if (rate == 0) { - goto(Blackhole) - } else { - goto(PassThrough) - } - case Event(SetContext(ctx), _) ⇒ channelContext = ctx; stay - case Event(NetworkFailureInjector.Disconnect(abort), Data(ctx, _, _)) ⇒ - sender ! "ok" - // TODO implement abort - channelContext.getChannel.disconnect() - stay - } - - initialize - - private def sendThrottled(d: Data): Data = { - val (data, toSend, toTick) = schedule(d) - for (s ← toSend) { - log.debug("sending msg (Tick): {}", s.msg) - send(s) - } - if (!timerActive_?("send")) - for (time ← toTick) { - log.debug("scheduling next Tick in {}", time) - setTimer("send", Tick, time, false) - } - data - } - - private def send(s: Send): Unit = dir match { - case Direction.Send ⇒ Channels.write(s.ctx, s.future getOrElse Channels.future(s.ctx.getChannel), s.msg) - case Direction.Receive ⇒ Channels.fireMessageReceived(s.ctx, s.msg) - case _ ⇒ - } - - private def schedule(d: Data): (Data, Seq[Send], Option[Duration]) = { - val now = System.nanoTime - @tailrec def rec(d: Data, toSend: Seq[Send]): (Data, Seq[Send], Option[Duration]) = { - if (d.queue.isEmpty) (d, toSend, None) - else { - val timeForPacket = d.lastSent + (1000 * size(d.queue.head.msg) / d.rateMBit).toLong - if (timeForPacket <= now) rec(Data(timeForPacket, d.rateMBit, d.queue.tail), toSend :+ d.queue.head) - else { - val splitThreshold = d.lastSent + packetSplitThreshold.toNanos - if (now < splitThreshold) (d, toSend, Some((timeForPacket - now).nanos min (splitThreshold - now).nanos)) - else { - val microsToSend = (now - d.lastSent) / 1000 - val (s1, s2) = split(d.queue.head, (microsToSend * d.rateMBit / 8).toInt) - (d.copy(queue = s2 +: d.queue.tail), toSend :+ s1, Some((timeForPacket - now).nanos min packetSplitThreshold)) - } - } - } - } - rec(d, Seq()) - } - - private def split(s: Send, bytes: Int): (Send, Send) = { - s.msg match { - case buf: ChannelBuffer ⇒ - val f = s.future map { f ⇒ - val newF = Channels.future(s.ctx.getChannel) - newF.addListener(new ChannelFutureListener { - def operationComplete(future: ChannelFuture) { - if (future.isCancelled) f.cancel() - else future.getCause match { - case null ⇒ - case thr ⇒ f.setFailure(thr) - } - } - }) - newF - } - val b = buf.slice() - b.writerIndex(b.readerIndex + bytes) - buf.readerIndex(buf.readerIndex + bytes) - (Send(s.ctx, f, b), Send(s.ctx, s.future, buf)) - } - } - - private def size(msg: AnyRef) = msg match { - case b: ChannelBuffer ⇒ b.readableBytes() * 8 - case _ ⇒ throw new UnsupportedOperationException("NetworkFailureInjector only supports ChannelBuffer messages") - } - } - - private var remote: Option[Address] = None - - override def messageReceived(ctx: ChannelHandlerContext, msg: MessageEvent) { - log.debug("upstream(queued): {}", msg) - receiver ! Send(ctx, Option(msg.getFuture), msg.getMessage) - } + private val conductor = TestConductor(system) + private var announced = false override def channelConnected(ctx: ChannelHandlerContext, state: ChannelStateEvent) { state.getValue match { case a: InetSocketAddress ⇒ val addr = Address("akka", "", a.getHostName, a.getPort) log.debug("connected to {}", addr) - TestConductor(system).failureInjectors.put(addr, FailureInjector(sender, receiver)) match { - case null ⇒ // okay - case fi ⇒ system.log.error("{} already registered for address {}", fi, addr) - } - remote = Some(addr) - sender ! SetContext(ctx) case x ⇒ throw new IllegalArgumentException("unknown address type: " + x) } } override def channelDisconnected(ctx: ChannelHandlerContext, state: ChannelStateEvent) { - log.debug("disconnected from {}", remote) - remote = remote flatMap { addr ⇒ - TestConductor(system).failureInjectors.remove(addr) - system.stop(sender) - system.stop(receiver) - None - } + log.debug("disconnected from {}", state.getChannel) + conductor.failureInjector ! RemoveContext(ctx) + } + + override def messageReceived(ctx: ChannelHandlerContext, msg: MessageEvent) { + log.debug("upstream(queued): {}", msg) + conductor.failureInjector ! ThrottleActor.Send(ctx, Direction.Receive, Option(msg.getFuture), msg.getMessage) } override def writeRequested(ctx: ChannelHandlerContext, msg: MessageEvent) { log.debug("downstream(queued): {}", msg) - sender ! Send(ctx, Option(msg.getFuture), msg.getMessage) + conductor.failureInjector ! ThrottleActor.Send(ctx, Direction.Send, Option(msg.getFuture), msg.getMessage) } } +private[akka] object ThrottleActor { + sealed trait State + case object PassThrough extends State + case object Throttle extends State + case object Blackhole extends State + + case class Data(lastSent: Long, rateMBit: Float, queue: Queue[Send]) + + case class Send(ctx: ChannelHandlerContext, direction: Direction, future: Option[ChannelFuture], msg: AnyRef) + case class SetRate(rateMBit: Float) + case object Tick +} + +private[akka] class ThrottleActor(channelContext: ChannelHandlerContext) + extends Actor with LoggingFSM[ThrottleActor.State, ThrottleActor.Data] { + + import ThrottleActor._ + import FSM._ + + private val packetSplitThreshold = TestConductor(context.system).Settings.PacketSplitThreshold + + startWith(PassThrough, Data(0, -1, Queue())) + + when(PassThrough) { + case Event(s @ Send(_, _, _, msg), _) ⇒ + log.debug("sending msg (PassThrough): {}", msg) + send(s) + stay + } + + when(Throttle) { + case Event(s: Send, data @ Data(_, _, Queue())) ⇒ + stay using sendThrottled(data.copy(lastSent = System.nanoTime, queue = Queue(s))) + case Event(s: Send, data) ⇒ + stay using sendThrottled(data.copy(queue = data.queue.enqueue(s))) + case Event(Tick, data) ⇒ + stay using sendThrottled(data) + } + + onTransition { + case Throttle -> PassThrough ⇒ + for (s ← stateData.queue) { + log.debug("sending msg (Transition): {}", s.msg) + send(s) + } + cancelTimer("send") + case Throttle -> Blackhole ⇒ + cancelTimer("send") + } + + when(Blackhole) { + case Event(Send(_, _, _, msg), _) ⇒ + log.debug("dropping msg {}", msg) + stay + } + + whenUnhandled { + case Event(SetRate(rate), d) ⇒ + if (rate > 0) { + goto(Throttle) using d.copy(lastSent = System.nanoTime, rateMBit = rate, queue = Queue()) + } else if (rate == 0) { + goto(Blackhole) + } else { + goto(PassThrough) + } + } + + initialize + + private def sendThrottled(d: Data): Data = { + val (data, toSend, toTick) = schedule(d) + for (s ← toSend) { + log.debug("sending msg (Tick): {}", s.msg) + send(s) + } + if (!timerActive_?("send")) + for (time ← toTick) { + log.debug("scheduling next Tick in {}", time) + setTimer("send", Tick, time, false) + } + data + } + + private def send(s: Send): Unit = s.direction match { + case Direction.Send ⇒ Channels.write(s.ctx, s.future getOrElse Channels.future(s.ctx.getChannel), s.msg) + case Direction.Receive ⇒ Channels.fireMessageReceived(s.ctx, s.msg) + case _ ⇒ + } + + private def schedule(d: Data): (Data, Seq[Send], Option[Duration]) = { + val now = System.nanoTime + @tailrec def rec(d: Data, toSend: Seq[Send]): (Data, Seq[Send], Option[Duration]) = { + if (d.queue.isEmpty) (d, toSend, None) + else { + val timeForPacket = d.lastSent + (1000 * size(d.queue.head.msg) / d.rateMBit).toLong + if (timeForPacket <= now) rec(Data(timeForPacket, d.rateMBit, d.queue.tail), toSend :+ d.queue.head) + else { + val splitThreshold = d.lastSent + packetSplitThreshold.toNanos + if (now < splitThreshold) (d, toSend, Some((timeForPacket - now).nanos min (splitThreshold - now).nanos)) + else { + val microsToSend = (now - d.lastSent) / 1000 + val (s1, s2) = split(d.queue.head, (microsToSend * d.rateMBit / 8).toInt) + (d.copy(queue = s2 +: d.queue.tail), toSend :+ s1, Some((timeForPacket - now).nanos min packetSplitThreshold)) + } + } + } + } + rec(d, Seq()) + } + + private def split(s: Send, bytes: Int): (Send, Send) = { + s.msg match { + case buf: ChannelBuffer ⇒ + val f = s.future map { f ⇒ + val newF = Channels.future(s.ctx.getChannel) + newF.addListener(new ChannelFutureListener { + def operationComplete(future: ChannelFuture) { + if (future.isCancelled) f.cancel() + else future.getCause match { + case null ⇒ + case thr ⇒ f.setFailure(thr) + } + } + }) + newF + } + val b = buf.slice() + b.writerIndex(b.readerIndex + bytes) + buf.readerIndex(buf.readerIndex + bytes) + (Send(s.ctx, s.direction, f, b), Send(s.ctx, s.direction, s.future, buf)) + } + } + + private def size(msg: AnyRef) = msg match { + case b: ChannelBuffer ⇒ b.readableBytes() * 8 + case _ ⇒ throw new UnsupportedOperationException("NetworkFailureInjector only supports ChannelBuffer messages") + } +} + diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala index 10434007e1..254b1a7d45 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala @@ -195,21 +195,13 @@ class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) extends Actor log.warning("did not expect {}", op) } stay using d.copy(runningOp = None) - case ThrottleMsg(target, dir, rate) ⇒ + case t: ThrottleMsg ⇒ import settings.QueryTimeout - import context.dispatcher - TestConductor().failureInjectors.get(target.copy(system = "")) match { - case null ⇒ log.warning("cannot throttle unknown address {}", target) - case inj ⇒ - Future.sequence(inj.refs(dir) map (_ ? NetworkFailureInjector.SetRate(rate))) map (_ ⇒ ToServer(Done)) pipeTo self - } + TestConductor().failureInjector ? t map (_ ⇒ ToServer(Done)) pipeTo self stay - case DisconnectMsg(target, abort) ⇒ + case d: DisconnectMsg ⇒ import settings.QueryTimeout - TestConductor().failureInjectors.get(target.copy(system = "")) match { - case null ⇒ log.warning("cannot disconnect unknown address {}", target) - case inj ⇒ inj.sender ? NetworkFailureInjector.Disconnect(abort) map (_ ⇒ ToServer(Done)) pipeTo self - } + TestConductor().failureInjector ? d map (_ ⇒ ToServer(Done)) pipeTo self stay case TerminateMsg(exit) ⇒ System.exit(exit) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala index 70cca7c34b..9f9257c69b 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala @@ -22,7 +22,7 @@ object SimpleRemoteMultiJvmSpec extends MultiNodeConfig { } commonConfig(ConfigFactory.parseString(""" - akka.loglevel = DEBUG + # akka.loglevel = DEBUG akka.remote { log-received-messages = on log-sent-messages = on diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala index 2690378ef1..e15027cc73 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala @@ -24,7 +24,7 @@ object DirectRoutedRemoteActorMultiJvmSpec extends MultiNodeConfig { import com.typesafe.config.ConfigFactory commonConfig(ConfigFactory.parseString(""" - akka.loglevel = DEBUG + # akka.loglevel = DEBUG akka.remote { log-received-messages = on log-sent-messages = on diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 087aac55c7..e311fa0023 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -17,7 +17,7 @@ import akka.remote.testkit.MultiNodeConfig object TestConductorMultiJvmSpec extends MultiNodeConfig { commonConfig(ConfigFactory.parseString(""" - akka.loglevel = DEBUG + # akka.loglevel = DEBUG akka.remote { log-received-messages = on log-sent-messages = on diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 4735132534..9091864348 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -173,6 +173,7 @@ class ActiveRemoteClient private[akka] ( notifyListeners(RemoteClientError(connection.getCause, netty, remoteAddress)) false } else { + ChannelAddress.set(connection.getChannel, Some(remoteAddress)) sendSecureCookie(connection) notifyListeners(RemoteClientStarted(netty, remoteAddress)) true @@ -196,8 +197,10 @@ class ActiveRemoteClient private[akka] ( notifyListeners(RemoteClientShutdown(netty, remoteAddress)) try { - if ((connection ne null) && (connection.getChannel ne null)) + if ((connection ne null) && (connection.getChannel ne null)) { + ChannelAddress.remove(connection.getChannel) connection.getChannel.close() + } } finally { try { if (openChannels ne null) openChannels.close.awaitUninterruptibly() diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 60c2ac6097..84ee77bbbe 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -29,6 +29,11 @@ import org.jboss.netty.handler.codec.frame.LengthFieldBasedFrameDecoder import org.jboss.netty.handler.timeout.IdleStateHandler import org.jboss.netty.channel.ChannelPipelineFactory import org.jboss.netty.handler.execution.ExecutionHandler +import org.jboss.netty.channel.ChannelLocal + +object ChannelAddress extends ChannelLocal[Option[Address]] { + override def initialValue(ch: Channel): Option[Address] = None +} /** * Provides the implementation of the Netty remote support diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index 87993f783d..5903dacd83 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -102,19 +102,11 @@ class RemoteServerAuthenticationHandler(secureCookie: Option[String]) extends Si } } -object ChannelLocalSystem extends ChannelLocal[ActorSystemImpl] { - override def initialValue(ch: Channel): ActorSystemImpl = null -} - @ChannelHandler.Sharable class RemoteServerHandler( val openChannels: ChannelGroup, val netty: NettyRemoteTransport) extends SimpleChannelUpstreamHandler { - val channelAddress = new ChannelLocal[Option[Address]](false) { - override def initialValue(channel: Channel) = None - } - import netty.settings private var addressToSet = true @@ -138,16 +130,16 @@ class RemoteServerHandler( override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = () override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { - netty.notifyListeners(RemoteServerClientDisconnected(netty, channelAddress.get(ctx.getChannel))) + netty.notifyListeners(RemoteServerClientDisconnected(netty, ChannelAddress.get(ctx.getChannel))) } override def channelClosed(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { - val address = channelAddress.get(ctx.getChannel) + val address = ChannelAddress.get(ctx.getChannel) if (address.isDefined && settings.UsePassiveConnections) netty.unbindClient(address.get) netty.notifyListeners(RemoteServerClientClosed(netty, address)) - channelAddress.remove(ctx.getChannel) + ChannelAddress.remove(ctx.getChannel) } override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) = try { @@ -161,7 +153,7 @@ class RemoteServerHandler( case CommandType.CONNECT ⇒ val origin = instruction.getOrigin val inbound = Address("akka", origin.getSystem, origin.getHostname, origin.getPort) - channelAddress.set(event.getChannel, Option(inbound)) + ChannelAddress.set(event.getChannel, Option(inbound)) //If we want to reuse the inbound connections as outbound we need to get busy if (settings.UsePassiveConnections) From 6bed19c6c9bc0e3581dfa077247cbfb21c3bcafd Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 24 May 2012 11:44:39 +0200 Subject: [PATCH 098/106] Binary compat work for Akka Remote --- .../src/main/scala/akka/actor/ActorRef.scala | 5 +- .../scala/akka/remote/MessageSerializer.scala | 11 +- .../akka/remote/RemoteActorRefProvider.scala | 51 ++-- .../scala/akka/remote/RemoteDeployer.scala | 4 +- .../scala/akka/remote/RemoteSettings.scala | 15 +- .../scala/akka/remote/RemoteTransport.scala | 232 +++++++++++------- .../main/scala/akka/remote/netty/Client.scala | 21 +- .../remote/netty/NettyRemoteSupport.scala | 11 +- .../main/scala/akka/remote/netty/Server.scala | 8 +- .../scala/akka/remote/netty/Settings.scala | 38 +-- 10 files changed, 219 insertions(+), 177 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 46e3440f95..460bd02076 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -154,17 +154,16 @@ trait ScalaActorRef { ref: ActorRef ⇒ } -//FIXME should ActorScope be private[akka], me thinks so - √ /** * All ActorRefs have a scope which describes where they live. Since it is * often necessary to distinguish between local and non-local references, this * is the only method provided on the scope. */ -trait ActorRefScope { +private[akka] trait ActorRefScope { def isLocal: Boolean } -trait LocalRef extends ActorRefScope { +private[akka] trait LocalRef extends ActorRefScope { final def isLocal = true } diff --git a/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala index 65777d49ca..6bd61dd812 100644 --- a/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala @@ -9,8 +9,14 @@ import com.google.protobuf.ByteString import akka.actor.ExtendedActorSystem import akka.serialization.SerializationExtension -object MessageSerializer { +/** + * MessageSerializer is a helper for serialize and deserialize messages + */ +private[akka] object MessageSerializer { + /** + * Uses Akka Serialization for the specified ActorSystem to transform the given MessageProtocol to a message + */ def deserialize(system: ExtendedActorSystem, messageProtocol: MessageProtocol): AnyRef = { val clazz = if (messageProtocol.hasMessageManifest) { @@ -24,6 +30,9 @@ object MessageSerializer { } } + /** + * Uses Akka Serialization for the specified ActorSystem to transform the given message to a MessageProtocol + */ def serialize(system: ExtendedActorSystem, message: AnyRef): MessageProtocol = { val s = SerializationExtension(system) val serializer = s.findSerializerFor(message) diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index bf55edf24c..a12c5f5578 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -4,34 +4,26 @@ package akka.remote -import akka.AkkaException import akka.actor._ import akka.dispatch._ import akka.event.{ DeathWatch, Logging, LoggingAdapter } import akka.event.EventStream -import akka.ConfigurationException -import java.util.concurrent.{ TimeoutException } -import com.typesafe.config.Config import akka.serialization.Serialization import akka.serialization.SerializationExtension -class RemoteException(msg: String) extends AkkaException(msg) -class RemoteCommunicationException(msg: String) extends RemoteException(msg) -class RemoteConnectionException(msg: String) extends RemoteException(msg) - /** * Remote ActorRefProvider. Starts up actor on remote node and creates a RemoteActorRef representing it. */ -class RemoteActorRefProvider( +private[akka] class RemoteActorRefProvider( val systemName: String, val settings: ActorSystem.Settings, val eventStream: EventStream, val scheduler: Scheduler, val dynamicAccess: DynamicAccess) extends ActorRefProvider { - val remoteSettings = new RemoteSettings(settings.config, systemName) + val remoteSettings: RemoteSettings = new RemoteSettings(settings.config, systemName) - val deployer = new RemoteDeployer(settings, dynamicAccess) + val deployer: RemoteDeployer = new RemoteDeployer(settings, dynamicAccess) private val local = new LocalActorRefProvider(systemName, settings, eventStream, scheduler, deployer) @@ -39,21 +31,21 @@ class RemoteActorRefProvider( private var _log = local.log def log: LoggingAdapter = _log - def rootPath = local.rootPath - def deadLetters = local.deadLetters + override def rootPath: ActorPath = local.rootPath + override def deadLetters: InternalActorRef = local.deadLetters - val deathWatch = new RemoteDeathWatch(local.deathWatch, this) + override val deathWatch: DeathWatch = new RemoteDeathWatch(local.deathWatch, this) // these are only available after init() - def rootGuardian = local.rootGuardian - def guardian = local.guardian - def systemGuardian = local.systemGuardian - def terminationFuture = local.terminationFuture - def dispatcher = local.dispatcher - def registerTempActor(actorRef: InternalActorRef, path: ActorPath) = local.registerTempActor(actorRef, path) - def unregisterTempActor(path: ActorPath) = local.unregisterTempActor(path) - def tempPath() = local.tempPath() - def tempContainer = local.tempContainer + override def rootGuardian: InternalActorRef = local.rootGuardian + override def guardian: InternalActorRef = local.guardian + override def systemGuardian: InternalActorRef = local.systemGuardian + override def terminationFuture: Promise[Unit] = local.terminationFuture + override def dispatcher: MessageDispatcher = local.dispatcher + override def registerTempActor(actorRef: InternalActorRef, path: ActorPath): Unit = local.registerTempActor(actorRef, path) + override def unregisterTempActor(path: ActorPath): Unit = local.unregisterTempActor(path) + override def tempPath(): ActorPath = local.tempPath() + override def tempContainer: VirtualPathContainer = local.tempContainer @volatile private var _transport: RemoteTransport = _ @@ -61,13 +53,13 @@ class RemoteActorRefProvider( @volatile private var _serialization: Serialization = _ - def serialization = _serialization + def serialization: Serialization = _serialization @volatile private var _remoteDaemon: InternalActorRef = _ - def remoteDaemon = _remoteDaemon + def remoteDaemon: InternalActorRef = _remoteDaemon - def init(system: ActorSystemImpl) { + def init(system: ActorSystemImpl): Unit = { local.init(system) _remoteDaemon = new RemoteSystemDaemon(system, rootPath / "remote", rootGuardian, log) @@ -193,7 +185,7 @@ class RemoteActorRefProvider( /** * Using (checking out) actor on a specific node. */ - def useActorOnNode(path: ActorPath, props: Props, deploy: Deploy, supervisor: ActorRef) { + def useActorOnNode(path: ActorPath, props: Props, deploy: Deploy, supervisor: ActorRef): Unit = { log.debug("[{}] Instantiating Remote Actor [{}]", rootPath, path) // we don’t wait for the ACK, because the remote end will process this command before any other message to the new actor @@ -211,7 +203,7 @@ class RemoteActorRefProvider( } } -trait RemoteRef extends ActorRefScope { +private[akka] trait RemoteRef extends ActorRefScope { final def isLocal = false } @@ -256,7 +248,7 @@ private[akka] class RemoteActorRef private[akka] ( private def writeReplace(): AnyRef = SerializedActorRef(path) } -class RemoteDeathWatch(val local: DeathWatch, val provider: RemoteActorRefProvider) extends DeathWatch { +private[akka] class RemoteDeathWatch(val local: DeathWatch, val provider: RemoteActorRefProvider) extends DeathWatch { override def subscribe(watcher: ActorRef, watched: ActorRef): Boolean = watched match { case r: RemoteRef ⇒ @@ -275,5 +267,4 @@ class RemoteDeathWatch(val local: DeathWatch, val provider: RemoteActorRefProvid override def unsubscribe(watcher: ActorRef): Unit = local.unsubscribe(watcher) override def publish(event: Terminated): Unit = local.publish(event) - } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala index e869c4ef4c..25df64795d 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala @@ -12,8 +12,7 @@ case class RemoteScope(node: Address) extends Scope { def withFallback(other: Scope): Scope = this } -class RemoteDeployer(_settings: ActorSystem.Settings, _pm: DynamicAccess) extends Deployer(_settings, _pm) { - +private[akka] class RemoteDeployer(_settings: ActorSystem.Settings, _pm: DynamicAccess) extends Deployer(_settings, _pm) { override protected def parseConfig(path: String, config: Config): Option[Deploy] = { import scala.collection.JavaConverters._ @@ -30,5 +29,4 @@ class RemoteDeployer(_settings: ActorSystem.Settings, _pm: DynamicAccess) extend case None ⇒ None } } - } \ No newline at end of file diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index 0b26311286..951c007fbc 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -6,17 +6,12 @@ package akka.remote import com.typesafe.config.Config import akka.util.Duration import java.util.concurrent.TimeUnit.MILLISECONDS -import java.net.InetAddress -import akka.ConfigurationException -import scala.collection.JavaConverters._ -import akka.actor.Address -import akka.actor.AddressFromURIString class RemoteSettings(val config: Config, val systemName: String) { import config._ - val RemoteTransport = getString("akka.remote.transport") - val LogReceive = getBoolean("akka.remote.log-received-messages") - val LogSend = getBoolean("akka.remote.log-sent-messages") - val RemoteSystemDaemonAckTimeout = Duration(getMilliseconds("akka.remote.remote-daemon-ack-timeout"), MILLISECONDS) - val UntrustedMode = getBoolean("akka.remote.untrusted-mode") + val RemoteTransport: String = getString("akka.remote.transport") + val LogReceive: Boolean = getBoolean("akka.remote.log-received-messages") + val LogSend: Boolean = getBoolean("akka.remote.log-sent-messages") + val RemoteSystemDaemonAckTimeout: Duration = Duration(getMilliseconds("akka.remote.remote-daemon-ack-timeout"), MILLISECONDS) + val UntrustedMode: Boolean = getBoolean("akka.remote.untrusted-mode") } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala index 3bade97460..d912d1d878 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala @@ -10,7 +10,6 @@ import akka.event.{ LoggingAdapter, Logging } import akka.AkkaException import akka.serialization.Serialization import akka.remote.RemoteProtocol._ -import akka.dispatch.ChildTerminated import akka.actor._ /** @@ -27,54 +26,67 @@ trait RemoteClientLifeCycleEvent extends RemoteLifeCycleEvent { def remoteAddress: Address } +/** + * A RemoteClientError is a general error that is thrown within or from a RemoteClient + */ case class RemoteClientError( @BeanProperty cause: Throwable, @transient @BeanProperty remote: RemoteTransport, @BeanProperty remoteAddress: Address) extends RemoteClientLifeCycleEvent { - override def logLevel = Logging.ErrorLevel - override def toString = - "RemoteClientError@" + remoteAddress + ": Error[" + cause + "]" + override def logLevel: Logging.LogLevel = Logging.ErrorLevel + override def toString: String = "RemoteClientError@" + remoteAddress + ": Error[" + cause + "]" } +/** + * RemoteClientDisconnected is published when a RemoteClient's connection is disconnected + */ case class RemoteClientDisconnected( @transient @BeanProperty remote: RemoteTransport, @BeanProperty remoteAddress: Address) extends RemoteClientLifeCycleEvent { - override def logLevel = Logging.DebugLevel - override def toString = - "RemoteClientDisconnected@" + remoteAddress + override def logLevel: Logging.LogLevel = Logging.DebugLevel + override def toString: String = "RemoteClientDisconnected@" + remoteAddress } +/** + * RemoteClientConnected is published when a RemoteClient's connection is established + */ case class RemoteClientConnected( @transient @BeanProperty remote: RemoteTransport, @BeanProperty remoteAddress: Address) extends RemoteClientLifeCycleEvent { - override def logLevel = Logging.DebugLevel - override def toString = - "RemoteClientConnected@" + remoteAddress + override def logLevel: Logging.LogLevel = Logging.DebugLevel + override def toString: String = "RemoteClientConnected@" + remoteAddress } +/** + * RemoteClientStarted is published when a RemoteClient has started up + */ case class RemoteClientStarted( @transient @BeanProperty remote: RemoteTransport, @BeanProperty remoteAddress: Address) extends RemoteClientLifeCycleEvent { - override def logLevel = Logging.InfoLevel - override def toString = - "RemoteClientStarted@" + remoteAddress + override def logLevel: Logging.LogLevel = Logging.InfoLevel + override def toString: String = "RemoteClientStarted@" + remoteAddress } +/** + * RemoteClientShutdown is published when a RemoteClient has shut down + */ case class RemoteClientShutdown( @transient @BeanProperty remote: RemoteTransport, @BeanProperty remoteAddress: Address) extends RemoteClientLifeCycleEvent { - override def logLevel = Logging.InfoLevel - override def toString = - "RemoteClientShutdown@" + remoteAddress + override def logLevel: Logging.LogLevel = Logging.InfoLevel + override def toString: String = "RemoteClientShutdown@" + remoteAddress } +/** + * RemoteClientWriteFailed is published when a remote send of a message detectably fails (throws an exception). + */ case class RemoteClientWriteFailed( @BeanProperty request: AnyRef, @BeanProperty cause: Throwable, @transient @BeanProperty remote: RemoteTransport, @BeanProperty remoteAddress: Address) extends RemoteClientLifeCycleEvent { - override def logLevel = Logging.WarningLevel - override def toString = + override def logLevel: Logging.LogLevel = Logging.WarningLevel + override def toString: String = "RemoteClientWriteFailed@" + remoteAddress + ": MessageClass[" + (if (request ne null) request.getClass.getName else "no message") + "] Error[" + cause + "]" @@ -85,53 +97,65 @@ case class RemoteClientWriteFailed( */ trait RemoteServerLifeCycleEvent extends RemoteLifeCycleEvent +/** + * RemoteServerStarted is published when a local RemoteServer has started up + */ case class RemoteServerStarted( @transient @BeanProperty remote: RemoteTransport) extends RemoteServerLifeCycleEvent { - override def logLevel = Logging.InfoLevel - override def toString = - "RemoteServerStarted@" + remote + override def logLevel: Logging.LogLevel = Logging.InfoLevel + override def toString: String = "RemoteServerStarted@" + remote } +/** + * RemoteServerShutdown is published when a local RemoteServer has shut down + */ case class RemoteServerShutdown( @transient @BeanProperty remote: RemoteTransport) extends RemoteServerLifeCycleEvent { - override def logLevel = Logging.InfoLevel - override def toString = - "RemoteServerShutdown@" + remote + override def logLevel: Logging.LogLevel = Logging.InfoLevel + override def toString: String = "RemoteServerShutdown@" + remote } +/** + * A RemoteServerError is a general error that is thrown within or from a RemoteServer + */ case class RemoteServerError( @BeanProperty val cause: Throwable, @transient @BeanProperty remote: RemoteTransport) extends RemoteServerLifeCycleEvent { - override def logLevel = Logging.ErrorLevel - override def toString = - "RemoteServerError@" + remote + "] Error[" + cause + "]" + override def logLevel: Logging.LogLevel = Logging.ErrorLevel + override def toString: String = "RemoteServerError@" + remote + "] Error[" + cause + "]" } +/** + * RemoteServerClientConnected is published when an inbound connection has been established + */ case class RemoteServerClientConnected( @transient @BeanProperty remote: RemoteTransport, @BeanProperty val clientAddress: Option[Address]) extends RemoteServerLifeCycleEvent { - override def logLevel = Logging.DebugLevel - override def toString = - "RemoteServerClientConnected@" + remote + - ": Client[" + clientAddress.getOrElse("no address") + "]" + override def logLevel: Logging.LogLevel = Logging.DebugLevel + override def toString: String = + "RemoteServerClientConnected@" + remote + ": Client[" + clientAddress.getOrElse("no address") + "]" } +/** + * RemoteServerClientConnected is published when an inbound connection has been disconnected + */ case class RemoteServerClientDisconnected( @transient @BeanProperty remote: RemoteTransport, @BeanProperty val clientAddress: Option[Address]) extends RemoteServerLifeCycleEvent { - override def logLevel = Logging.DebugLevel - override def toString = - "RemoteServerClientDisconnected@" + remote + - ": Client[" + clientAddress.getOrElse("no address") + "]" + override def logLevel: Logging.LogLevel = Logging.DebugLevel + override def toString: String = + "RemoteServerClientDisconnected@" + remote + ": Client[" + clientAddress.getOrElse("no address") + "]" } +/** + * RemoteServerClientClosed is published when an inbound RemoteClient is closed + */ case class RemoteServerClientClosed( @transient @BeanProperty remote: RemoteTransport, @BeanProperty val clientAddress: Option[Address]) extends RemoteServerLifeCycleEvent { - override def logLevel = Logging.DebugLevel - override def toString = - "RemoteServerClientClosed@" + remote + - ": Client[" + clientAddress.getOrElse("no address") + "]" + override def logLevel: Logging.LogLevel = Logging.DebugLevel + override def toString: String = + "RemoteServerClientClosed@" + remote + ": Client[" + clientAddress.getOrElse("no address") + "]" } /** @@ -142,6 +166,10 @@ class RemoteClientException private[akka] ( @transient @BeanProperty val client: RemoteTransport, val remoteAddress: Address, cause: Throwable = null) extends AkkaException(message, cause) +/** + * RemoteTransportException represents a general failure within a RemoteTransport, + * such as inability to start, wrong configuration etc. + */ class RemoteTransportException(message: String, cause: Throwable) extends AkkaException(message, cause) /** @@ -178,71 +206,56 @@ abstract class RemoteTransport(val system: ExtendedActorSystem, val provider: Re */ def restartClientConnection(address: Address): Boolean - /** Methods that needs to be implemented by a transport **/ - - def send(message: Any, - senderOption: Option[ActorRef], - recipient: RemoteActorRef): Unit + /** + * Sends the given message to the recipient supplying the sender if any + */ + def send(message: Any, senderOption: Option[ActorRef], recipient: RemoteActorRef): Unit + /** + * Default implementation both publishes the message to the eventStream as well as logs it using the system logger + */ def notifyListeners(message: RemoteLifeCycleEvent): Unit = { system.eventStream.publish(message) system.log.log(message.logLevel, "{}", message) } - override def toString = address.toString -} - -class RemoteMessage(input: RemoteMessageProtocol, system: ExtendedActorSystem) { - - def originalReceiver = input.getRecipient.getPath - - lazy val sender: ActorRef = - if (input.hasSender) system.provider.actorFor(system.provider.rootGuardian, input.getSender.getPath) - else system.deadLetters - - lazy val recipient: InternalActorRef = system.provider.actorFor(system.provider.rootGuardian, originalReceiver) - - lazy val payload: AnyRef = MessageSerializer.deserialize(system, input.getMessage) - - override def toString = "RemoteMessage: " + payload + " to " + recipient + "<+{" + originalReceiver + "} from " + sender -} - -trait RemoteMarshallingOps { + /** + * Returns this RemoteTransports Address' textual representation + */ + override def toString: String = address.toString + /** + * A Logger that can be used to log issues that may occur + */ def log: LoggingAdapter - def system: ExtendedActorSystem - - def provider: RemoteActorRefProvider - - def address: Address - + /** + * When this method returns true, some functionality will be turned off for security purposes. + */ protected def useUntrustedMode: Boolean - def createMessageSendEnvelope(rmp: RemoteMessageProtocol): AkkaRemoteProtocol = { - val arp = AkkaRemoteProtocol.newBuilder - arp.setMessage(rmp) - arp.build - } + /** + * Returns a newly created AkkaRemoteProtocol with the given message payload. + */ + def createMessageSendEnvelope(rmp: RemoteMessageProtocol): AkkaRemoteProtocol = + AkkaRemoteProtocol.newBuilder.setMessage(rmp).build - def createControlEnvelope(rcp: RemoteControlProtocol): AkkaRemoteProtocol = { - val arp = AkkaRemoteProtocol.newBuilder - arp.setInstruction(rcp) - arp.build - } + /** + * Returns a newly created AkkaRemoteProtocol with the given control payload. + */ + def createControlEnvelope(rcp: RemoteControlProtocol): AkkaRemoteProtocol = + AkkaRemoteProtocol.newBuilder.setInstruction(rcp).build /** * Serializes the ActorRef instance into a Protocol Buffers (protobuf) Message. */ - def toRemoteActorRefProtocol(actor: ActorRef): ActorRefProtocol = { + def toRemoteActorRefProtocol(actor: ActorRef): ActorRefProtocol = ActorRefProtocol.newBuilder.setPath(actor.path.toStringWithAddress(address)).build - } - - def createRemoteMessageProtocolBuilder( - recipient: ActorRef, - message: Any, - senderOption: Option[ActorRef]): RemoteMessageProtocol.Builder = { + /** + * Returns a new RemoteMessageProtocol containing the serialized representation of the given parameters. + */ + def createRemoteMessageProtocolBuilder(recipient: ActorRef, message: Any, senderOption: Option[ActorRef]): RemoteMessageProtocol.Builder = { val messageBuilder = RemoteMessageProtocol.newBuilder.setRecipient(toRemoteActorRefProtocol(recipient)) if (senderOption.isDefined) messageBuilder.setSender(toRemoteActorRefProtocol(senderOption.get)) @@ -253,7 +266,12 @@ trait RemoteMarshallingOps { messageBuilder } - def receiveMessage(remoteMessage: RemoteMessage) { + /** + * Call this method with an inbound RemoteMessage and this will take care of security (see: "useUntrustedMode") + * as well as making sure that the message ends up at its destination (best effort). + * There is also a fair amount of logging produced by this method, which is good for debugging. + */ + def receiveMessage(remoteMessage: RemoteMessage): Unit = { val remoteDaemon = provider.remoteDaemon remoteMessage.recipient match { @@ -289,3 +307,43 @@ trait RemoteMarshallingOps { } } } + +/** + * RemoteMessage is a wrapper around a message that has come in over the wire, + * it allows to easily obtain references to the deserialized message, its intended recipient + * and the sender. + */ +class RemoteMessage(input: RemoteMessageProtocol, system: ExtendedActorSystem) { + /** + * Returns a String-representation of the ActorPath that this RemoteMessage is destined for + */ + def originalReceiver: String = input.getRecipient.getPath + + /** + * Returns an Option with the String representation of the ActorPath of the Actor who is the sender of this message + */ + def originalSender: Option[String] = if (input.hasSender) Some(input.getSender.getPath) else None + + /** + * Returns a reference to the Actor that sent this message, or DeadLetterActorRef if not present or found. + */ + lazy val sender: ActorRef = + if (input.hasSender) system.provider.actorFor(system.provider.rootGuardian, input.getSender.getPath) + else system.deadLetters + + /** + * Returns a reference to the Actor that this message is destined for. + * In case this returns a DeadLetterActorRef, you have access to the path using the "originalReceiver" method. + */ + lazy val recipient: InternalActorRef = system.provider.actorFor(system.provider.rootGuardian, originalReceiver) + + /** + * Returns the message + */ + lazy val payload: AnyRef = MessageSerializer.deserialize(system, input.getMessage) + + /** + * Returns a String representation of this RemoteMessage, intended for debugging purposes. + */ + override def toString: String = "RemoteMessage: " + payload + " to " + recipient + "<+{" + originalReceiver + "} from " + sender +} diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 3c52179e4a..f4f200aef6 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -12,7 +12,6 @@ import org.jboss.netty.channel.{ ChannelFutureListener, ChannelHandler, StaticCh import org.jboss.netty.handler.codec.frame.{ LengthFieldPrepender, LengthFieldBasedFrameDecoder } import org.jboss.netty.handler.execution.ExecutionHandler import org.jboss.netty.handler.timeout.{ IdleState, IdleStateEvent, IdleStateAwareChannelHandler, IdleStateHandler } - import akka.remote.RemoteProtocol.{ RemoteControlProtocol, CommandType, AkkaRemoteProtocol } import akka.remote.{ RemoteProtocol, RemoteMessage, RemoteLifeCycleEvent, RemoteClientStarted, RemoteClientShutdown, RemoteClientException, RemoteClientError, RemoteClientDisconnected, RemoteClientConnected, RemoteClientWriteFailed } import akka.actor.{ Address, ActorRef } @@ -20,18 +19,12 @@ import akka.AkkaException import akka.event.Logging import akka.util.Switch -class RemoteClientMessageBufferException(message: String, cause: Throwable) extends AkkaException(message, cause) { - def this(msg: String) = this(msg, null) -} - /** * This is the abstract baseclass for netty remote clients, currently there's only an * ActiveRemoteClient, but others could be feasible, like a PassiveRemoteClient that * reuses an already established connection. */ -abstract class RemoteClient private[akka] ( - val netty: NettyRemoteTransport, - val remoteAddress: Address) { +private[akka] abstract class RemoteClient private[akka] (val netty: NettyRemoteTransport, val remoteAddress: Address) { val log = Logging(netty.system, "RemoteClient") @@ -92,7 +85,7 @@ abstract class RemoteClient private[akka] ( /** * RemoteClient represents a connection to an Akka node. Is used to send messages to remote actors on the node. */ -class ActiveRemoteClient private[akka] ( +private[akka] class ActiveRemoteClient private[akka] ( netty: NettyRemoteTransport, remoteAddress: Address, localAddress: Address) @@ -225,7 +218,7 @@ class ActiveRemoteClient private[akka] ( } @ChannelHandler.Sharable -class ActiveRemoteClientHandler( +private[akka] class ActiveRemoteClientHandler( val name: String, val bootstrap: ClientBootstrap, val remoteAddress: Address, @@ -314,7 +307,7 @@ class ActiveRemoteClientHandler( } } -class ActiveRemoteClientPipelineFactory( +private[akka] class ActiveRemoteClientPipelineFactory( name: String, bootstrap: ClientBootstrap, executionHandler: ExecutionHandler, @@ -339,9 +332,9 @@ class ActiveRemoteClientPipelineFactory( } } -class PassiveRemoteClient(val currentChannel: Channel, - netty: NettyRemoteTransport, - remoteAddress: Address) +private[akka] class PassiveRemoteClient(val currentChannel: Channel, + netty: NettyRemoteTransport, + remoteAddress: Address) extends RemoteClient(netty, remoteAddress) { def connect(reconnectIfAlreadyConnected: Boolean = false): Boolean = runSwitch switchOn { diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 4fd70b822f..d09c17f160 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -16,17 +16,16 @@ import org.jboss.netty.channel.{ ChannelHandlerContext, Channel } import org.jboss.netty.handler.codec.protobuf.{ ProtobufEncoder, ProtobufDecoder } import org.jboss.netty.handler.execution.OrderedMemoryAwareThreadPoolExecutor import org.jboss.netty.util.HashedWheelTimer -import akka.dispatch.MonitorableThreadFactory import akka.event.Logging import akka.remote.RemoteProtocol.AkkaRemoteProtocol -import akka.remote.{ RemoteTransportException, RemoteTransport, RemoteSettings, RemoteMarshallingOps, RemoteActorRefProvider, RemoteActorRef, RemoteServerStarted } +import akka.remote.{ RemoteTransportException, RemoteTransport, RemoteActorRefProvider, RemoteActorRef, RemoteServerStarted } import akka.util.NonFatal import akka.actor.{ ExtendedActorSystem, Address, ActorRef } /** * Provides the implementation of the Netty remote support */ -class NettyRemoteTransport(_system: ExtendedActorSystem, _provider: RemoteActorRefProvider) extends RemoteTransport(_system, _provider) with RemoteMarshallingOps { +private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider: RemoteActorRefProvider) extends RemoteTransport(_system, _provider) { import provider.remoteSettings @@ -192,7 +191,7 @@ class NettyRemoteTransport(_system: ExtendedActorSystem, _provider: RemoteActorR } -class RemoteMessageEncoder(remoteSupport: NettyRemoteTransport) extends ProtobufEncoder { +private[akka] class RemoteMessageEncoder(remoteSupport: NettyRemoteTransport) extends ProtobufEncoder { override def encode(ctx: ChannelHandlerContext, channel: Channel, msg: AnyRef): AnyRef = { msg match { case (message: Any, sender: Option[_], recipient: ActorRef) ⇒ @@ -207,9 +206,9 @@ class RemoteMessageEncoder(remoteSupport: NettyRemoteTransport) extends Protobuf } } -class RemoteMessageDecoder extends ProtobufDecoder(AkkaRemoteProtocol.getDefaultInstance) +private[akka] class RemoteMessageDecoder extends ProtobufDecoder(AkkaRemoteProtocol.getDefaultInstance) -class DefaultDisposableChannelGroup(name: String) extends DefaultChannelGroup(name) { +private[akka] class DefaultDisposableChannelGroup(name: String) extends DefaultChannelGroup(name) { protected val guard = new ReentrantReadWriteLock protected val open = new AtomicBoolean(true) diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index 674023dd52..5c18bc6933 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -20,7 +20,7 @@ import java.net.InetAddress import akka.actor.ActorSystemImpl import org.jboss.netty.channel._ -class NettyRemoteServer(val netty: NettyRemoteTransport) { +private[akka] class NettyRemoteServer(val netty: NettyRemoteTransport) { import netty.settings @@ -82,7 +82,7 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { } } -class RemoteServerPipelineFactory( +private[akka] class RemoteServerPipelineFactory( val openChannels: ChannelGroup, val executionHandler: ExecutionHandler, val netty: NettyRemoteTransport) extends ChannelPipelineFactory { @@ -103,7 +103,7 @@ class RemoteServerPipelineFactory( } @ChannelHandler.Sharable -class RemoteServerAuthenticationHandler(secureCookie: Option[String]) extends SimpleChannelUpstreamHandler { +private[akka] class RemoteServerAuthenticationHandler(secureCookie: Option[String]) extends SimpleChannelUpstreamHandler { val authenticated = new AnyRef override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) = secureCookie match { @@ -130,7 +130,7 @@ class RemoteServerAuthenticationHandler(secureCookie: Option[String]) extends Si } @ChannelHandler.Sharable -class RemoteServerHandler( +private[akka] class RemoteServerHandler( val openChannels: ChannelGroup, val netty: NettyRemoteTransport) extends SimpleChannelUpstreamHandler { diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index bb33cb9570..64bc184408 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -9,37 +9,37 @@ import java.util.concurrent.TimeUnit._ import java.net.InetAddress import akka.ConfigurationException -class NettySettings(config: Config, val systemName: String) { +private[akka] class NettySettings(config: Config, val systemName: String) { import config._ - val BackoffTimeout = Duration(getMilliseconds("backoff-timeout"), MILLISECONDS) + val BackoffTimeout: Duration = Duration(getMilliseconds("backoff-timeout"), MILLISECONDS) val SecureCookie: Option[String] = getString("secure-cookie") match { case "" ⇒ None case cookie ⇒ Some(cookie) } - val RequireCookie = { + val RequireCookie: Boolean = { val requireCookie = getBoolean("require-cookie") if (requireCookie && SecureCookie.isEmpty) throw new ConfigurationException( "Configuration option 'akka.remote.netty.require-cookie' is turned on but no secure cookie is defined in 'akka.remote.netty.secure-cookie'.") requireCookie } - val UsePassiveConnections = getBoolean("use-passive-connections") - val UseDispatcherForIO = getString("use-dispatcher-for-io") match { + val UsePassiveConnections: Boolean = getBoolean("use-passive-connections") + val UseDispatcherForIO: Option[String] = getString("use-dispatcher-for-io") match { case "" | null ⇒ None case dispatcher ⇒ Some(dispatcher) } - val ReconnectionTimeWindow = Duration(getMilliseconds("reconnection-time-window"), MILLISECONDS) - val ReadTimeout = Duration(getMilliseconds("read-timeout"), MILLISECONDS) - val WriteTimeout = Duration(getMilliseconds("write-timeout"), MILLISECONDS) - val AllTimeout = Duration(getMilliseconds("all-timeout"), MILLISECONDS) - val ReconnectDelay = Duration(getMilliseconds("reconnect-delay"), MILLISECONDS) - val MessageFrameSize = getBytes("message-frame-size").toInt + val ReconnectionTimeWindow: Duration = Duration(getMilliseconds("reconnection-time-window"), MILLISECONDS) + val ReadTimeout: Duration = Duration(getMilliseconds("read-timeout"), MILLISECONDS) + val WriteTimeout: Duration = Duration(getMilliseconds("write-timeout"), MILLISECONDS) + val AllTimeout: Duration = Duration(getMilliseconds("all-timeout"), MILLISECONDS) + val ReconnectDelay: Duration = Duration(getMilliseconds("reconnect-delay"), MILLISECONDS) + val MessageFrameSize: Int = getBytes("message-frame-size").toInt - val Hostname = getString("hostname") match { + val Hostname: String = getString("hostname") match { case "" ⇒ InetAddress.getLocalHost.getHostAddress case value ⇒ value } @@ -50,25 +50,25 @@ class NettySettings(config: Config, val systemName: String) { } @deprecated("WARNING: This should only be used by professionals.", "2.0") - val PortSelector = getInt("port") + val PortSelector: Int = getInt("port") - val ConnectionTimeout = Duration(getMilliseconds("connection-timeout"), MILLISECONDS) + val ConnectionTimeout: Duration = Duration(getMilliseconds("connection-timeout"), MILLISECONDS) - val Backlog = getInt("backlog") + val Backlog: Int = getInt("backlog") - val ExecutionPoolKeepalive = Duration(getMilliseconds("execution-pool-keepalive"), MILLISECONDS) + val ExecutionPoolKeepalive: Duration = Duration(getMilliseconds("execution-pool-keepalive"), MILLISECONDS) - val ExecutionPoolSize = getInt("execution-pool-size") match { + val ExecutionPoolSize: Int = getInt("execution-pool-size") match { case sz if sz < 1 ⇒ throw new IllegalArgumentException("akka.remote.netty.execution-pool-size is less than 1") case sz ⇒ sz } - val MaxChannelMemorySize = getBytes("max-channel-memory-size") match { + val MaxChannelMemorySize: Long = getBytes("max-channel-memory-size") match { case sz if sz < 0 ⇒ throw new IllegalArgumentException("akka.remote.netty.max-channel-memory-size is less than 0 bytes") case sz ⇒ sz } - val MaxTotalMemorySize = getBytes("max-total-memory-size") match { + val MaxTotalMemorySize: Long = getBytes("max-total-memory-size") match { case sz if sz < 0 ⇒ throw new IllegalArgumentException("akka.remote.netty.max-total-memory-size is less than 0 bytes") case sz ⇒ sz } From 55b185be02376b0dfd7d682186eeead6d3859514 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 11:58:57 +0200 Subject: [PATCH 099/106] Fix compilation error, getHostString, java7 --- .../src/main/scala/akka/remote/testconductor/Conductor.scala | 2 +- .../akka/remote/testconductor/NetworkFailureInjector.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index 89fa807762..643bc68cdb 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -397,7 +397,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP override def receive = LoggingReceive { case CreateServerFSM(channel) ⇒ val (ip, port) = channel.getRemoteAddress match { - case s: InetSocketAddress ⇒ (s.getHostString, s.getPort) + case s: InetSocketAddress ⇒ (s.getHostName, s.getPort) } val name = ip + ":" + port + "-server" + generation.next sender ! context.actorOf(Props(new ServerFSM(self, channel)), name) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index ba8f8d1285..b425518044 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -135,7 +135,7 @@ private[akka] object NetworkFailureInjector { } /** - * Brief overview: all network traffic passes through the `sender`/`receiver` FSMs managed + * Brief overview: all network traffic passes through the `sender`/`receiver` FSMs managed * by the FailureInjector of the TestConductor extension. These can * pass through requests immediately, drop them or throttle to a desired rate. The FSMs are * registered in the TestConductorExt.failureInjector so that settings can be applied from From cb7de1db751792d7f15472c87af0b19577f64bca Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 24 May 2012 12:12:45 +0200 Subject: [PATCH 100/106] Binary compat work for routing --- .../src/main/scala/akka/routing/Routing.scala | 53 +++++++++++------- .../akka/routing/RemoteRouterConfig.scala | 2 +- file-based/mailbox_user__a | 0 file-based/mailbox_user__b | 0 file-based/mailbox_user__c | Bin 14692 -> 0 bytes 5 files changed, 35 insertions(+), 20 deletions(-) delete mode 100644 file-based/mailbox_user__a delete mode 100644 file-based/mailbox_user__b delete mode 100644 file-based/mailbox_user__c diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 96da72eda2..94eed672f4 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -174,7 +174,7 @@ trait RouterConfig { def createRoute(routeeProps: Props, routeeProvider: RouteeProvider): Route - def createRouteeProvider(context: ActorContext) = new RouteeProvider(context, resizer) + def createRouteeProvider(context: ActorContext): RouteeProvider = new RouteeProvider(context, resizer) def createActor(): Router = new Router { override def supervisorStrategy: SupervisorStrategy = RouterConfig.this.supervisorStrategy @@ -195,7 +195,8 @@ trait RouterConfig { */ def withFallback(other: RouterConfig): RouterConfig = this - protected def toAll(sender: ActorRef, routees: Iterable[ActorRef]): Iterable[Destination] = routees.map(Destination(sender, _)) + protected def toAll(sender: ActorRef, routees: Iterable[ActorRef]): Iterable[Destination] = + routees.map(Destination(sender, _)) /** * Routers with dynamically resizable number of routees return the [[akka.routing.Resizer]] @@ -218,9 +219,7 @@ class RouteeProvider(val context: ActorContext, val resizer: Option[Resizer]) { * Not thread safe, but intended to be called from protected points, such as * `RouterConfig.createRoute` and `Resizer.resize`. */ - def registerRoutees(routees: IndexedSeq[ActorRef]): Unit = { - routedRef.addRoutees(routees) - } + def registerRoutees(routees: IndexedSeq[ActorRef]): Unit = routedRef.addRoutees(routees) /** * Adds the routees to the router. @@ -240,9 +239,7 @@ class RouteeProvider(val context: ActorContext, val resizer: Option[Resizer]) { * Not thread safe, but intended to be called from protected points, such as * `Resizer.resize`. */ - def unregisterRoutees(routees: IndexedSeq[ActorRef]): Unit = { - routedRef.removeRoutees(routees) - } + def unregisterRoutees(routees: IndexedSeq[ActorRef]): Unit = routedRef.removeRoutees(routees) def createRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] = (nrOfInstances, routees) match { @@ -253,11 +250,8 @@ class RouteeProvider(val context: ActorContext, val resizer: Option[Resizer]) { case (_, xs) ⇒ xs.map(context.actorFor(_))(scala.collection.breakOut) } - def createAndRegisterRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): Unit = { - if (resizer.isEmpty) { - registerRoutees(createRoutees(props, nrOfInstances, routees)) - } - } + def createAndRegisterRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): Unit = + if (resizer.isEmpty) registerRoutees(createRoutees(props, nrOfInstances, routees)) /** * All routees of the router @@ -265,7 +259,6 @@ class RouteeProvider(val context: ActorContext, val resizer: Option[Resizer]) { def routees: IndexedSeq[ActorRef] = routedRef.routees private def routedRef = context.self.asInstanceOf[RoutedActorRef] - } /** @@ -423,7 +416,11 @@ class FromConfig(val routerDispatcher: String = Dispatchers.DefaultDispatcherId) } object RoundRobinRouter { - def apply(routees: Iterable[ActorRef]) = new RoundRobinRouter(routees = routees map (_.path.toString)) + /** + * Creates a new RoundRobinRouter, routing to the specified routees + */ + def apply(routees: Iterable[ActorRef]): RoundRobinRouter = + new RoundRobinRouter(routees = routees map (_.path.toString)) /** * Java API to create router with the supplied 'routees' actors. @@ -539,7 +536,10 @@ trait RoundRobinLike { this: RouterConfig ⇒ } object RandomRouter { - def apply(routees: Iterable[ActorRef]) = new RandomRouter(routees = routees map (_.path.toString)) + /** + * Creates a new RandomRouter, routing to the specified routees + */ + def apply(routees: Iterable[ActorRef]): RandomRouter = new RandomRouter(routees = routees map (_.path.toString)) /** * Java API to create router with the supplied 'routees' actors. @@ -652,7 +652,11 @@ trait RandomLike { this: RouterConfig ⇒ } object SmallestMailboxRouter { - def apply(routees: Iterable[ActorRef]) = new SmallestMailboxRouter(routees = routees map (_.path.toString)) + /** + * Creates a new SmallestMailboxRouter, routing to the specified routees + */ + def apply(routees: Iterable[ActorRef]): SmallestMailboxRouter = + new SmallestMailboxRouter(routees = routees map (_.path.toString)) /** * Java API to create router with the supplied 'routees' actors. @@ -852,7 +856,10 @@ trait SmallestMailboxLike { this: RouterConfig ⇒ } object BroadcastRouter { - def apply(routees: Iterable[ActorRef]) = new BroadcastRouter(routees = routees map (_.path.toString)) + /** + * Creates a new BroadcastRouter, routing to the specified routees + */ + def apply(routees: Iterable[ActorRef]): BroadcastRouter = new BroadcastRouter(routees = routees map (_.path.toString)) /** * Java API to create router with the supplied 'routees' actors. @@ -957,7 +964,11 @@ trait BroadcastLike { this: RouterConfig ⇒ } object ScatterGatherFirstCompletedRouter { - def apply(routees: Iterable[ActorRef], within: Duration) = new ScatterGatherFirstCompletedRouter(routees = routees map (_.path.toString), within = within) + /** + * Creates a new ScatterGatherFirstCompletedRouter, routing to the specified routees, timing out after the specified Duration + */ + def apply(routees: Iterable[ActorRef], within: Duration): ScatterGatherFirstCompletedRouter = + new ScatterGatherFirstCompletedRouter(routees = routees map (_.path.toString), within = within) /** * Java API to create router with the supplied 'routees' actors. @@ -1106,6 +1117,10 @@ trait Resizer { } case object DefaultResizer { + + /** + * Creates a new DefaultResizer from the given configuration + */ def apply(resizerConfig: Config): DefaultResizer = DefaultResizer( lowerBound = resizerConfig.getInt("lower-bound"), diff --git a/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala index 714b854a69..9a71f309fc 100644 --- a/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala +++ b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala @@ -59,7 +59,7 @@ class RemoteRouteeProvider(nodes: Iterable[Address], _context: ActorContext, _re extends RouteeProvider(_context, _resizer) { // need this iterator as instance variable since Resizer may call createRoutees several times - private val nodeAddressIter = Stream.continually(nodes).flatten.iterator + private val nodeAddressIter: Iterator[Address] = Stream.continually(nodes).flatten.iterator override def createRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] = (nrOfInstances, routees, nodes) match { diff --git a/file-based/mailbox_user__a b/file-based/mailbox_user__a deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/file-based/mailbox_user__b b/file-based/mailbox_user__b deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/file-based/mailbox_user__c b/file-based/mailbox_user__c deleted file mode 100644 index 4b6ae68e6d608e0534358c9f4e9b0364c7240672..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14692 zcmZQ-XJBAh8Rc1L&d2}R5D#=3pT-fq`K& zW%)>eiw9ETv4ToGgTdq&3d&z5s)r0EMK9GILrK|7b;nRr_)^_5l$5?8$84cyZla|4 zrMhD%DSxT%7)lCYsyl|#5}2B4#)#4qn5vGUv;^Kt#oS~>X$eeK$52`VQ`Iq)mcUeX z45cM7RUJcV2~1VTP+9_Sqh@ZRv;?NAV<;_wsp%MFN=sm>I)>5`n5vGUv;?NAV<;_w zw^K1U8BI)>5`cqcV;6Qw0ERUJcV2~1VTP+9_0(=n!$ zmcUeX45cM7RUJcV3A~Gnxyh8$5}2xvp|k|1s$(cEfvM^kN=sm>I)>5`n5vGUv;^Kw z&D=z32~17Lm{D2+Q`Iq)mcUeX45cM7RUJcV2~1VTP+9`-p<-?_qqGF3s$(cEfvM^k zN=sm>I)>5`n5vGUv;?N6W6UWnfvM^kN=x9qRLt7ul$O9$bqu8?FjXBxX$eeK$52`V zQ`Iq)mcUeX45cM7RUJcV3A~S*S(}0qm=V0_ikhympr8nbEWib=6``t&C@qJn>KF>j zVdw&7s)r8+r7^9XL}{74pNjd*f`T#`DSW8vBuYzWsyc>(l9^WFLuvU;)$pODd`1+B z)O3<11%))Y#yCL5{AEc&`HYl)sOlsNN@?nc4|U3Cs)i4xF#a<)w5d0|NtvP7>teh3xEL1MlphpqPdP( Date: Thu, 24 May 2012 12:19:39 +0200 Subject: [PATCH 101/106] Binary compat for serialization --- .../akka/serialization/DaemonMsgCreateSerializer.scala | 8 ++++---- .../akka/serialization/DaemonMsgWatchSerializer.scala | 4 +++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala b/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala index ce54ff5adb..2905c3ef3b 100644 --- a/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala +++ b/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala @@ -30,8 +30,10 @@ import akka.actor.FromClassCreator * Serialization of contained RouterConfig, Config, and Scope * is done with configured serializer for those classes, by * default java.io.Serializable. + * + * INTERNAL API */ -class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) extends Serializer { +private[akka] class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) extends Serializer { import ProtobufSerializer.serializeActorRef import ProtobufSerializer.deserializeActorRef @@ -81,7 +83,7 @@ class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) extends Seriali def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { val proto = DaemonMsgCreateProtocol.parseFrom(bytes) - def deploy(protoDeploy: DeployProtocol) = { + def deploy(protoDeploy: DeployProtocol): Deploy = { val config = if (protoDeploy.hasConfig) deserialize(protoDeploy.getConfig, classOf[Config]) else ConfigFactory.empty @@ -146,7 +148,5 @@ class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) extends Seriali case _ ⇒ throw e // the first exception } } - } - } \ No newline at end of file diff --git a/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala b/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala index 0ca5216da0..016d7f14cb 100644 --- a/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala +++ b/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala @@ -12,8 +12,10 @@ import akka.actor.ExtendedActorSystem /** * Serializes akka's internal DaemonMsgWatch using protobuf. + * + * INTERNAL API */ -class DaemonMsgWatchSerializer(val system: ExtendedActorSystem) extends Serializer { +private[akka] class DaemonMsgWatchSerializer(val system: ExtendedActorSystem) extends Serializer { import ProtobufSerializer.serializeActorRef import ProtobufSerializer.deserializeActorRef From c9ab35d5f0998001c71f2fec3ea43d602f86459e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 24 May 2012 12:34:18 +0200 Subject: [PATCH 102/106] Binary compat work for the 0mq module --- .../akka/zeromq/ConcurrentSocketActor.scala | 9 ++------ .../main/scala/akka/zeromq/SocketOption.scala | 4 +++- .../akka/zeromq/ZMQMessageDeserializer.scala | 2 +- .../scala/akka/zeromq/ZeroMQExtension.scala | 21 +++++++------------ .../src/main/scala/akka/zeromq/package.scala | 4 ++-- 5 files changed, 16 insertions(+), 24 deletions(-) diff --git a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala index c4e6d08f59..e848809644 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala @@ -205,11 +205,6 @@ private[zeromq] class ConcurrentSocketActor(params: Seq[SocketOption]) extends A } private val listenerOpt = params collectFirst { case Listener(l) ⇒ l } - private def watchListener() { - listenerOpt foreach context.watch - } - - private def notifyListener(message: Any) { - listenerOpt foreach { _ ! message } - } + private def watchListener(): Unit = listenerOpt foreach context.watch + private def notifyListener(message: Any): Unit = listenerOpt foreach { _ ! message } } diff --git a/akka-zeromq/src/main/scala/akka/zeromq/SocketOption.scala b/akka-zeromq/src/main/scala/akka/zeromq/SocketOption.scala index 1e4c83bcef..c5d5919fb7 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/SocketOption.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/SocketOption.scala @@ -255,7 +255,9 @@ case class Linger(value: Long) extends SocketOption /** * Gets the linger option @see [[akka.zeromq.Linger]] */ -object Linger extends SocketOptionQuery +object Linger extends SocketOptionQuery { + val no: Linger = Linger(0) +} /** * Sets the recovery interval for multicast transports using the specified socket. diff --git a/akka-zeromq/src/main/scala/akka/zeromq/ZMQMessageDeserializer.scala b/akka-zeromq/src/main/scala/akka/zeromq/ZMQMessageDeserializer.scala index 1776f21211..2d41424e88 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/ZMQMessageDeserializer.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/ZMQMessageDeserializer.scala @@ -20,5 +20,5 @@ case class Frame(payload: Seq[Byte]) { * Deserializes ZeroMQ messages into an immutable sequence of frames */ class ZMQMessageDeserializer extends Deserializer { - def apply(frames: Seq[Frame]) = ZMQMessage(frames) + def apply(frames: Seq[Frame]): ZMQMessage = ZMQMessage(frames) } diff --git a/akka-zeromq/src/main/scala/akka/zeromq/ZeroMQExtension.scala b/akka-zeromq/src/main/scala/akka/zeromq/ZeroMQExtension.scala index 1ddd213325..85a9ea6642 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/ZeroMQExtension.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/ZeroMQExtension.scala @@ -19,7 +19,7 @@ import org.zeromq.ZMQException * @param patch */ case class ZeroMQVersion(major: Int, minor: Int, patch: Int) { - override def toString = "%d.%d.%d".format(major, minor, patch) + override def toString: String = "%d.%d.%d".format(major, minor, patch) } /** @@ -27,17 +27,14 @@ case class ZeroMQVersion(major: Int, minor: Int, patch: Int) { */ object ZeroMQExtension extends ExtensionId[ZeroMQExtension] with ExtensionIdProvider { override def get(system: ActorSystem): ZeroMQExtension = super.get(system) - def lookup() = this - def createExtension(system: ExtendedActorSystem) = new ZeroMQExtension(system) + def lookup(): this.type = this + override def createExtension(system: ExtendedActorSystem): ZeroMQExtension = new ZeroMQExtension(system) private val minVersionString = "2.1.0" private val minVersion = JZMQ.makeVersion(2, 1, 0) - private[zeromq] def check[TOption <: SocketOption: Manifest](parameters: Seq[SocketOption]) = { - parameters exists { p ⇒ - ClassManifest.singleType(p) <:< manifest[TOption] - } - } + private[zeromq] def check[TOption <: SocketOption: Manifest](parameters: Seq[SocketOption]) = + parameters exists { p ⇒ ClassManifest.singleType(p) <:< manifest[TOption] } } /** @@ -47,16 +44,14 @@ object ZeroMQExtension extends ExtensionId[ZeroMQExtension] with ExtensionIdProv */ class ZeroMQExtension(system: ActorSystem) extends Extension { - val DefaultPollTimeout = Duration(system.settings.config.getMilliseconds("akka.zeromq.poll-timeout"), TimeUnit.MILLISECONDS) - val NewSocketTimeout = Timeout(Duration(system.settings.config.getMilliseconds("akka.zeromq.new-socket-timeout"), TimeUnit.MILLISECONDS)) + val DefaultPollTimeout: Duration = Duration(system.settings.config.getMilliseconds("akka.zeromq.poll-timeout"), TimeUnit.MILLISECONDS) + val NewSocketTimeout: Timeout = Timeout(Duration(system.settings.config.getMilliseconds("akka.zeromq.new-socket-timeout"), TimeUnit.MILLISECONDS)) /** * The version of the ZeroMQ library * @return a [[akka.zeromq.ZeroMQVersion]] */ - def version = { - ZeroMQVersion(JZMQ.getMajorVersion, JZMQ.getMinorVersion, JZMQ.getPatchVersion) - } + def version: ZeroMQVersion = ZeroMQVersion(JZMQ.getMajorVersion, JZMQ.getMinorVersion, JZMQ.getPatchVersion) /** * Factory method to create the [[akka.actor.Props]] to build the ZeroMQ socket actor. diff --git a/akka-zeromq/src/main/scala/akka/zeromq/package.scala b/akka-zeromq/src/main/scala/akka/zeromq/package.scala index 6eeba5b92a..1241700fcb 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/package.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/package.scala @@ -20,10 +20,10 @@ package object zeromq { /** * Convenience accessor to subscribe to all events */ - val SubscribeAll = Subscribe(Seq.empty) + val SubscribeAll: Subscribe = Subscribe.all /** * Set the linger to 0, doesn't block and discards messages that haven't been sent yet. */ - val NoLinger = Linger(0) + val NoLinger: Linger = Linger.no } \ No newline at end of file From 8cad9bb1b61fa104861b22944afe238e0a75317d Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 24 May 2012 12:40:52 +0200 Subject: [PATCH 103/106] add deployOn/deployOnAll DSL to MultiNodeSpec, see #2118 --- .../src/main/scala/akka/actor/Deployer.scala | 2 +- .../NetworkFailureInjector.scala | 2 +- .../DirectRoutedRemoteActorMultiJvmSpec.scala | 38 +++++++++------ .../akka/remote/testkit/MultiNodeSpec.scala | 47 +++++++++++++++++-- .../scala/akka/remote/RemoteDeployer.scala | 4 +- 5 files changed, 70 insertions(+), 23 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 2fd9538d77..821691d853 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -128,7 +128,7 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce add(d.path.split("/").drop(1), d) } - protected def parseConfig(key: String, config: Config): Option[Deploy] = { + def parseConfig(key: String, config: Config): Option[Deploy] = { val deployment = config.withFallback(default) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index ba8f8d1285..b425518044 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -135,7 +135,7 @@ private[akka] object NetworkFailureInjector { } /** - * Brief overview: all network traffic passes through the `sender`/`receiver` FSMs managed + * Brief overview: all network traffic passes through the `sender`/`receiver` FSMs managed * by the FailureInjector of the TestConductor extension. These can * pass through requests immediately, drop them or throttle to a desired rate. The FSMs are * registered in the TestConductorExt.failureInjector so that settings can be applied from diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala index 3f23f60b37..294bc80884 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala @@ -27,20 +27,9 @@ object DirectRoutedRemoteActorMultiJvmSpec extends MultiNodeConfig { val master = role("master") val slave = role("slave") - nodeConfig(master, ConfigFactory.parseString(""" - akka.actor { - deployment { - /service-hello.remote = "akka://MultiNodeSpec@%s" - } - } - # FIXME When using NettyRemoteTransport instead of TestConductorTransport it works - # akka.remote.transport = "akka.remote.netty.NettyRemoteTransport" - """.format("localhost:2553"))) // FIXME is there a way to avoid hardcoding the host:port here? - - nodeConfig(slave, ConfigFactory.parseString(""" - akka.remote.netty.port = 2553 - """)) - + deployOn(master, """/service-hello.remote = "@slave@" """) + + deployOnAll("""/service-hello2.remote = "@slave@" """) } class DirectRoutedRemoteActorMultiJvmNode1 extends DirectRoutedRemoteActorSpec @@ -60,7 +49,26 @@ class DirectRoutedRemoteActorSpec extends MultiNodeSpec(DirectRoutedRemoteActorM actor.isInstanceOf[RemoteActorRef] must be(true) val slaveAddress = testConductor.getAddressFor(slave).await - (actor ? "identify").await.asInstanceOf[ActorRef].path.address must equal(slaveAddress) + actor ! "identify" + expectMsgType[ActorRef].path.address must equal(slaveAddress) + + // shut down the actor before we let the other node(s) shut down so we don't try to send + // "Terminate" to a shut down node + system.stop(actor) + } + + testConductor.enter("done") + } + + "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef (with deployOnAll)" in { + + runOn(master) { + val actor = system.actorOf(Props[SomeActor], "service-hello2") + actor.isInstanceOf[RemoteActorRef] must be(true) + + val slaveAddress = testConductor.getAddressFor(slave).await + actor ! "identify" + expectMsgType[ActorRef].path.address must equal(slaveAddress) // shut down the actor before we let the other node(s) shut down so we don't try to send // "Terminate" to a shut down node diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 3822a1f529..e7bce0890c 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -4,7 +4,7 @@ package akka.remote.testkit import akka.testkit.AkkaSpec -import akka.actor.ActorSystem +import akka.actor.{ ActorSystem, ExtendedActorSystem } import akka.remote.testconductor.TestConductor import java.net.InetAddress import java.net.InetSocketAddress @@ -17,6 +17,8 @@ import akka.util.Duration import akka.actor.ActorPath import akka.actor.RootActorPath import akka.remote.testconductor.RoleName +import akka.actor.Deploy +import com.typesafe.config.ConfigObject /** * Configure the role names and participants of the test, including configuration settings. @@ -25,7 +27,9 @@ abstract class MultiNodeConfig { private var _commonConf: Option[Config] = None private var _nodeConf = Map[RoleName, Config]() - private var _roles = Seq[RoleName]() + private var _roles = Vector[RoleName]() + private var _deployments = Map[RoleName, Seq[String]]() + private var _allDeploy = Vector[String]() /** * Register a common base config for all test participants, if so desired. @@ -68,6 +72,11 @@ abstract class MultiNodeConfig { r } + def deployOn(role: RoleName, deployment: String): Unit = + _deployments += role -> ((_deployments get role getOrElse Vector()) :+ deployment) + + def deployOnAll(deployment: String): Unit = _allDeploy :+= deployment + private[testkit] lazy val mySelf: RoleName = { require(_roles.size > MultiNodeSpec.selfIndex, "not enough roles declared for this test") _roles(MultiNodeSpec.selfIndex) @@ -78,6 +87,10 @@ abstract class MultiNodeConfig { configs reduce (_ withFallback _) } + private[testkit] def deployments(node: RoleName): Seq[String] = (_deployments get node getOrElse Nil) ++ _allDeploy + + private[testkit] def roles: Seq[RoleName] = _roles + } object MultiNodeSpec { @@ -115,11 +128,13 @@ object MultiNodeSpec { } -abstract class MultiNodeSpec(val mySelf: RoleName, _system: ActorSystem) extends AkkaSpec(_system) { +abstract class MultiNodeSpec(val mySelf: RoleName, _system: ActorSystem, roles: Seq[RoleName], deployments: RoleName ⇒ Seq[String]) + extends AkkaSpec(_system) { import MultiNodeSpec._ - def this(config: MultiNodeConfig) = this(config.mySelf, ActorSystem(AkkaSpec.getCallerName, config.config)) + def this(config: MultiNodeConfig) = + this(config.mySelf, ActorSystem(AkkaSpec.getCallerName, config.config), config.roles, config.deployments) /* * Test Class Interface @@ -188,4 +203,28 @@ abstract class MultiNodeSpec(val mySelf: RoleName, _system: ActorSystem) extends testConductor.startClient(mySelf, controllerAddr).await } + // now add deployments, if so desired + + private case class Replacement(tag: String, role: RoleName) { + lazy val addr = node(role).address.toString + } + private val replacements = roles map (r ⇒ Replacement("@" + r.name + "@", r)) + private val deployer = system.asInstanceOf[ExtendedActorSystem].provider.deployer + deployments(mySelf) foreach { str ⇒ + val deployString = (str /: replacements) { + case (base, r @ Replacement(tag, _)) ⇒ + base.indexOf(tag) match { + case -1 ⇒ base + case start ⇒ base.replace(tag, r.addr) + } + } + import scala.collection.JavaConverters._ + ConfigFactory.parseString(deployString).root.asScala foreach { + case (key, value: ConfigObject) ⇒ + deployer.parseConfig(key, value.toConfig) foreach deployer.deploy + case (key, x) ⇒ + throw new IllegalArgumentException("key " + key + " must map to deployment section, not simple value " + x) + } + } + } \ No newline at end of file diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala index 0858c66405..296a773625 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala @@ -12,9 +12,9 @@ case class RemoteScope(node: Address) extends Scope { def withFallback(other: Scope): Scope = this } -class RemoteDeployer(_settings: ActorSystem.Settings, _pm: DynamicAccess) extends Deployer(_settings, _pm) { +private[akka] class RemoteDeployer(_settings: ActorSystem.Settings, _pm: DynamicAccess) extends Deployer(_settings, _pm) { - override protected def parseConfig(path: String, config: Config): Option[Deploy] = { + override def parseConfig(path: String, config: Config): Option[Deploy] = { import scala.collection.JavaConverters._ super.parseConfig(path, config) match { From af4e0c2ce83375ee588339c10fb98ef748b4cb77 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 24 May 2012 16:49:24 +0200 Subject: [PATCH 104/106] Fixing some additional binary compat thingies + documented the heaviness of the ActorSystem --- akka-actor/src/main/scala/akka/actor/ActorSystem.scala | 4 +--- akka-actor/src/main/scala/akka/actor/Address.scala | 4 +++- akka-actor/src/main/scala/akka/actor/FaultHandling.scala | 2 +- akka-actor/src/main/scala/akka/actor/IO.scala | 4 ++-- akka-actor/src/main/scala/akka/event/Logging.scala | 6 +++--- akka-docs/general/actor-systems.rst | 5 +++++ 6 files changed, 15 insertions(+), 10 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 0b8e68c56a..c5595212c2 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -428,9 +428,7 @@ abstract class ExtendedActorSystem extends ActorSystem { def dynamicAccess: DynamicAccess } -//FIXME This should most probably not be protected[akka] right? - √ -//FIXME We also need to decide whether this should be supported API or not - √ -class ActorSystemImpl protected[akka] (val name: String, applicationConfig: Config, classLoader: ClassLoader) extends ExtendedActorSystem { +private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, classLoader: ClassLoader) extends ExtendedActorSystem { if (!name.matches("""^[a-zA-Z0-9][a-zA-Z0-9-]*$""")) throw new IllegalArgumentException( diff --git a/akka-actor/src/main/scala/akka/actor/Address.scala b/akka-actor/src/main/scala/akka/actor/Address.scala index 67f147b836..44c12eed35 100644 --- a/akka-actor/src/main/scala/akka/actor/Address.scala +++ b/akka-actor/src/main/scala/akka/actor/Address.scala @@ -114,7 +114,9 @@ object AddressFromURIString { def parse(addr: String): Address = apply(addr) } -//FIXME is this public API? - √ +/** + * Given an ActorPath it returns the Address and the path elements if the path is well-formed + */ object ActorPathExtractor extends PathUtils { def unapply(addr: String): Option[(Address, Iterable[String])] = try { diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index 8641153476..27a9f346db 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -165,7 +165,7 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { def makeDecider(flat: Iterable[CauseDirective]): Decider = { val directives = sort(flat) - { case x ⇒ directives find (_._1 isInstance x) map (_._2) getOrElse Escalate } + { case x ⇒ directives collectFirst { case (c, d) if c isInstance x ⇒ d } getOrElse Escalate } } /** diff --git a/akka-actor/src/main/scala/akka/actor/IO.scala b/akka-actor/src/main/scala/akka/actor/IO.scala index 63eb2e4b3a..07af4213fc 100644 --- a/akka-actor/src/main/scala/akka/actor/IO.scala +++ b/akka-actor/src/main/scala/akka/actor/IO.scala @@ -320,7 +320,7 @@ object IO { } object Chunk { - val empty = new Chunk(ByteString.empty) + val empty: Chunk = new Chunk(ByteString.empty) } /** @@ -344,7 +344,7 @@ object IO { * Iteratee.recover() in order to handle it properly. */ case class EOF(cause: Option[Exception]) extends Input { - final override def ++(that: Input) = that + final override def ++(that: Input): Input = that } object Iteratee { diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index 58aa6aadf4..6e6f92ad0d 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -276,9 +276,9 @@ object LogSource { // this one unfortunately does not work as implicit, because existential types have some weird behavior val fromClass: LogSource[Class[_]] = new LogSource[Class[_]] { - def genString(c: Class[_]) = Logging.simpleName(c) - override def genString(c: Class[_], system: ActorSystem) = genString(c) + "(" + system + ")" - override def getClazz(c: Class[_]) = c + def genString(c: Class[_]): String = Logging.simpleName(c) + override def genString(c: Class[_], system: ActorSystem): String = genString(c) + "(" + system + ")" + override def getClazz(c: Class[_]): Class[_] = c } implicit def fromAnyClass[T]: LogSource[Class[T]] = fromClass.asInstanceOf[LogSource[Class[T]]] diff --git a/akka-docs/general/actor-systems.rst b/akka-docs/general/actor-systems.rst index 2051f2d845..1b7d6a7759 100644 --- a/akka-docs/general/actor-systems.rst +++ b/akka-docs/general/actor-systems.rst @@ -14,6 +14,11 @@ which means that we need not concern ourselves with their emotional state or moral issues). The result can then serve as a mental scaffolding for building the software implementation. +.. note:: + + An ActorSystem is a heavyweight structure that will allocate 1…N Threads, + so create one per logical application. + Hierarchical Structure ---------------------- From d8bb688ecebd5f7b5db60568f2845a475da6161c Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 24 May 2012 17:59:36 +0200 Subject: [PATCH 105/106] add comments to clarify code in NetworkFailureInjector.scala --- .../NetworkFailureInjector.scala | 56 ++++++++++++++++++- 1 file changed, 53 insertions(+), 3 deletions(-) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index b425518044..bf5d7d6007 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -58,7 +58,9 @@ private[akka] class FailureInjector extends Actor with ActorLogging { } /** - * Retrieve target settings, also if they were sketchy before (i.e. no system name) + * Retrieve target settings, also if they were sketchy before (i.e. no system name). + * In the latter case, copy settings from the sketchy address and remove the old + * mapping. */ def retrieveTargetSettings(target: Address): Option[ChannelSettings] = { settings get target orElse { @@ -68,12 +70,16 @@ private[akka] class FailureInjector extends Actor with ActorLogging { case (Address("akka", "", `host`, `port`), s) ⇒ true case _ ⇒ false } map { - case (_, s) ⇒ settings += target -> s; s + case (a, s) ⇒ settings -= a; settings += target -> s; s } } } def receive = { + /* + * If a channel handler tells us that he’s been disconnected, stop the + * throttle actors and forget about them (but not possibly applied settings) + */ case RemoveContext(ctx) ⇒ channels get ctx foreach { inj ⇒ context stop inj.sender @@ -81,6 +87,12 @@ private[akka] class FailureInjector extends Actor with ActorLogging { } channels -= ctx settings ++= settings collect { case (addr, c @ ChannelSettings(Some(`ctx`), _, _)) ⇒ (addr, c.copy(ctx = None)) } + /* + * Throttle/Blackhole/Unblock connections, based on the sign of rateMBit; + * will inform throttle actors for that destination if currently connected + * and update the settings for the target address; reply is needed to + * confirm this operation and tell the master that he can proceed. + */ case ThrottleMsg(target, dir, rateMBit) ⇒ val setting = retrieveTargetSettings(target) settings += target -> ((setting getOrElse ChannelSettings() match { @@ -95,6 +107,10 @@ private[akka] class FailureInjector extends Actor with ActorLogging { case x ⇒ x }) sender ! "ok" + /* + * Disconnect the currently active connection to the given target; reply is + * needed to confirm this operation and tell the master the he can proceed. + */ case DisconnectMsg(target, abort) ⇒ retrieveTargetSettings(target) foreach { case ChannelSettings(Some(ctx), _, _) ⇒ @@ -107,22 +123,39 @@ private[akka] class FailureInjector extends Actor with ActorLogging { case _ ⇒ log.debug("no connection to {} to close or abort", target) } sender ! "ok" + /* + * All data transfers up or down the pipeline are redirected through this + * case statement, which dispatches to the throttle actors for the given + * channel handler context. If none exist yet, they will be created, and + * this is a bit complicated in the case where the first message has not + * yet been exchanged, i.e. the other side’s Address is not yet known + * (keep in mind that an actor system’s remote address is not necessarily + * connected in any way to the IP from which we receive the connection). + */ case s @ Send(ctx, direction, future, msg) ⇒ channels get ctx match { case Some(Injectors(snd, rcv)) ⇒ if (direction includes Direction.Send) snd ! s if (direction includes Direction.Receive) rcv ! s case None ⇒ + // don’t do reverse lookup at first val (ipaddr, ip, port) = ctx.getChannel.getRemoteAddress match { case s: InetSocketAddress ⇒ (s.getAddress, s.getAddress.getHostAddress, s.getPort) } val addr = ChannelAddress.get(ctx.getChannel) orElse { settings collect { case (a @ Address("akka", _, Some(`ip`), Some(`port`)), _) ⇒ a } headOption } orElse { + // only if raw IP failed, try with hostname val name = ipaddr.getHostName if (name == ip) None else settings collect { case (a @ Address("akka", _, Some(`name`), Some(`port`)), _) ⇒ a } headOption - } getOrElse Address("akka", "", ip, port) // this will not match later requests directly, but be picked up by retrieveTargetSettings + } getOrElse Address("akka", "", ip, port) + /* + * ^- the above last resort will not match later requests directly, but be + * picked up by retrieveTargetSettings, so that throttle ops are + * applied to the right throttle actors, assuming that there can + * be only one actor system per host:port. + */ val inj = ingestContextAddress(ctx, addr) if (direction includes Direction.Send) inj.sender ! s if (direction includes Direction.Receive) inj.receiver ! s @@ -276,6 +309,16 @@ private[akka] class ThrottleActor(channelContext: ChannelHandlerContext) case _ ⇒ } + /** + * Core of the throttling engine: delay Send operations until their bit count + * would actually have had time to travel down the line at the configured + * data rate, and split up send operations which are so big that gaps larger + * than packetSplitThreshold would be planned (they will happen nevertheless + * due to HashedWheelTimer’s semantics, but we compensate by sending more the + * next time, in proportion to how long the Tick was overdue). So, this should + * lead to the correct rate on average, with increased latency of the order of + * HWT granularity. + */ private def schedule(d: Data): (Data, Seq[Send], Option[Duration]) = { val now = System.nanoTime @tailrec def rec(d: Data, toSend: Seq[Send]): (Data, Seq[Send], Option[Duration]) = { @@ -297,6 +340,13 @@ private[akka] class ThrottleActor(channelContext: ChannelHandlerContext) rec(d, Seq()) } + /** + * Split one Send operation in two, cutting off the given number of bytes at + * the front. If it was Direction.Send, i.e. a channel.write(), then also + * split the Future so that a failure in either part will complete the original + * with that failure. Data are not copied, as long as ChannelBuffer.slice does + * not copy them. + */ private def split(s: Send, bytes: Int): (Send, Send) = { s.msg match { case buf: ChannelBuffer ⇒ From 7d342e5c968e949492c97c988b163038c7287749 Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 15 May 2012 20:57:39 +0200 Subject: [PATCH 106/106] add docs about how to serialize ActorRefs - scala & java samples of how to obtain the correct address to inject when calling ActorPath.toStringWithAddress --- .../SerializationDocTestBase.java | 137 ++++++++++++++---- akka-docs/java/serialization.rst | 41 ++++++ .../serialization/SerializationDocSpec.scala | 41 +++++- akka-docs/scala/serialization.rst | 41 ++++++ 4 files changed, 233 insertions(+), 27 deletions(-) diff --git a/akka-docs/java/code/docs/serialization/SerializationDocTestBase.java b/akka-docs/java/code/docs/serialization/SerializationDocTestBase.java index a62827fc98..5d27e4f37f 100644 --- a/akka-docs/java/code/docs/serialization/SerializationDocTestBase.java +++ b/akka-docs/java/code/docs/serialization/SerializationDocTestBase.java @@ -7,6 +7,7 @@ import org.junit.Test; import static org.junit.Assert.*; //#imports import akka.actor.*; +import akka.remote.RemoteActorRefProvider; import akka.serialization.*; import com.typesafe.config.*; @@ -78,32 +79,118 @@ public class SerializationDocTestBase { //#actorref-serializer theActorSystem.shutdown(); } + + //#external-address + public static class ExternalAddressExt implements Extension { + private final ExtendedActorSystem system; + public ExternalAddressExt(ExtendedActorSystem system) { + this.system = system; + } - @Test public void demonstrateTheProgrammaticAPI() { - //#programmatic - ActorSystem system = ActorSystem.create("example"); - - // Get the Serialization Extension - Serialization serialization = SerializationExtension.get(system); - - // Have something to serialize - String original = "woohoo"; - - // Find the Serializer for it - Serializer serializer = serialization.findSerializerFor(original); - - // Turn it into bytes - byte[] bytes = serializer.toBinary(original); - - // Turn it back into an object, - // the nulls are for the class manifest and for the classloader - String back = (String)serializer.fromBinary(bytes); - - // Voilá! - assertEquals(original, back); - - //#programmatic - system.shutdown(); + public Address getAddressFor(Address remoteAddress) { + final scala.Option

optAddr = system.provider() + .getExternalAddressFor(remoteAddress); + if (optAddr.isDefined()) { + return optAddr.get(); + } else { + throw new UnsupportedOperationException( + "cannot send to remote address " + remoteAddress); + } + } } + + public static class ExternalAddress extends + AbstractExtensionId implements ExtensionIdProvider { + public static final ExternalAddress ID = new ExternalAddress(); + + public ExternalAddress lookup() { + return ID; + } + + public ExternalAddressExt createExtension(ExtendedActorSystem system) { + return new ExternalAddressExt(system); + } + } + + //#external-address + + public void demonstrateExternalAddress() { + // this is not meant to be run, only to be compiled + final ActorSystem system = ActorSystem.create(); + final Address remoteAddr = new Address("", ""); + // #external-address + final Address addr = ExternalAddress.ID.get(system).getAddressFor(remoteAddr); + // #external-address + } + + //#external-address-default + public static class DefaultAddressExt implements Extension { + private final ExtendedActorSystem system; + + public DefaultAddressExt(ExtendedActorSystem system) { + this.system = system; + } + + public Address getAddress() { + final ActorRefProvider provider = system.provider(); + if (provider instanceof RemoteActorRefProvider) { + return ((RemoteActorRefProvider) provider).transport().address(); + } else { + throw new UnsupportedOperationException("need RemoteActorRefProvider"); + } + } + } + + public static class DefaultAddress extends + AbstractExtensionId implements ExtensionIdProvider { + public static final DefaultAddress ID = new DefaultAddress(); + + public DefaultAddress lookup() { + return ID; + } + + public DefaultAddressExt createExtension(ExtendedActorSystem system) { + return new DefaultAddressExt(system); + } + } + + //#external-address-default + + public void demonstrateDefaultAddress() { + // this is not meant to be run, only to be compiled + final ActorSystem system = ActorSystem.create(); + final Address remoteAddr = new Address("", ""); + // #external-address-default + final Address addr = DefaultAddress.ID.get(system).getAddress(); + // #external-address-default + } + + @Test + public void demonstrateTheProgrammaticAPI() { + // #programmatic + ActorSystem system = ActorSystem.create("example"); + + // Get the Serialization Extension + Serialization serialization = SerializationExtension.get(system); + + // Have something to serialize + String original = "woohoo"; + + // Find the Serializer for it + Serializer serializer = serialization.findSerializerFor(original); + + // Turn it into bytes + byte[] bytes = serializer.toBinary(original); + + // Turn it back into an object, + // the nulls are for the class manifest and for the classloader + String back = (String) serializer.fromBinary(bytes); + + // Voilá! + assertEquals(original, back); + + // #programmatic + system.shutdown(); + } } diff --git a/akka-docs/java/serialization.rst b/akka-docs/java/serialization.rst index 2710dd1f4c..7618ffa4a8 100644 --- a/akka-docs/java/serialization.rst +++ b/akka-docs/java/serialization.rst @@ -109,6 +109,47 @@ you might want to know how to serialize and deserialize them properly, here's th .. includecode:: code/akka/docs/serialization/SerializationDocTestBase.java :include: imports,actorref-serializer +.. note:: + + ``ActorPath.toStringWithAddress`` only differs from ``toString`` if the + address does not already have ``host`` and ``port`` components, i.e. it only + inserts address information for local addresses. + +This assumes that serialization happens in the context of sending a message +through the remote transport. There are other uses of serialization, though, +e.g. storing actor references outside of an actor application (database, +durable mailbox, etc.). In this case, it is important to keep in mind that the +address part of an actor’s path determines how that actor is communicated with. +Storing a local actor path might be the right choice if the retrieval happens +in the same logical context, but it is not enough when deserializing it on a +different network host: for that it would need to include the system’s remote +transport address. An actor system is not limited to having just one remote +transport per se, which makes this question a bit more interesting. + +In the general case, the local address to be used depends on the type of remote +address which shall be the recipient of the serialized information. Use +:meth:`ActorRefProvider.getExternalAddressFor(remoteAddr)` to query the system +for the appropriate address to use when sending to ``remoteAddr``: + +.. includecode:: code/akka/docs/serialization/SerializationDocTestBase.java + :include: external-address + +This requires that you know at least which type of address will be supported by +the system which will deserialize the resulting actor reference; if you have no +concrete address handy you can create a dummy one for the right protocol using +``new Address(protocol, "", "", 0)`` (assuming that the actual transport used is as +lenient as Akka’s RemoteActorRefProvider). + +There is a possible simplification available if you are just using the default +:class:`NettyRemoteTransport` with the :meth:`RemoteActorRefProvider`, which is +enabled by the fact that this combination has just a single remote address: + +.. includecode:: code/akka/docs/serialization/SerializationDocTestBase.java + :include: external-address-default + +This solution has to be adapted once other providers are used (like the planned +extensions for clustering). + Deep serialization of Actors ---------------------------- diff --git a/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala b/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala index 61086b78a6..b3eb4cfe13 100644 --- a/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala +++ b/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala @@ -5,13 +5,17 @@ package docs.serialization import org.scalatest.matchers.MustMatchers import akka.testkit._ -import akka.actor.{ ActorRef, ActorSystem } - //#imports +import akka.actor.{ ActorRef, ActorSystem } import akka.serialization._ import com.typesafe.config.ConfigFactory //#imports +import akka.actor.ExtensionKey +import akka.actor.ExtendedActorSystem +import akka.actor.Extension +import akka.actor.Address +import akka.remote.RemoteActorRefProvider //#my-own-serializer class MyOwnSerializer extends Serializer { @@ -176,5 +180,38 @@ class SerializationDocSpec extends AkkaSpec { val deserializedActorRef = theActorSystem actorFor identifier // Then just use the ActorRef //#actorref-serializer + + //#external-address + object ExternalAddress extends ExtensionKey[ExternalAddressExt] + + class ExternalAddressExt(system: ExtendedActorSystem) extends Extension { + def addressFor(remoteAddr: Address): Address = + system.provider.getExternalAddressFor(remoteAddr) getOrElse + (throw new UnsupportedOperationException("cannot send to " + remoteAddr)) + } + + def serializeTo(ref: ActorRef, remote: Address): String = + ref.path.toStringWithAddress(ExternalAddress(theActorSystem).addressFor(remote)) + //#external-address + } + + "demonstrate how to do default Akka serialization of ActorRef" in { + val theActorSystem: ActorSystem = system + + //#external-address-default + object ExternalAddress extends ExtensionKey[ExternalAddressExt] + + class ExternalAddressExt(system: ExtendedActorSystem) extends Extension { + def addressForAkka: Address = system.provider match { + case r: RemoteActorRefProvider ⇒ r.transport.address + case _ ⇒ + throw new UnsupportedOperationException( + "this method requires the RemoteActorRefProvider to be configured") + } + } + + def serializeAkkaDefault(ref: ActorRef): String = + ref.path.toStringWithAddress(ExternalAddress(theActorSystem).addressForAkka) + //#external-address-default } } diff --git a/akka-docs/scala/serialization.rst b/akka-docs/scala/serialization.rst index fc97bbd0df..88fe74fd13 100644 --- a/akka-docs/scala/serialization.rst +++ b/akka-docs/scala/serialization.rst @@ -107,6 +107,47 @@ you might want to know how to serialize and deserialize them properly, here's th .. includecode:: code/akka/docs/serialization/SerializationDocSpec.scala :include: imports,actorref-serializer +.. note:: + + ``ActorPath.toStringWithAddress`` only differs from ``toString`` if the + address does not already have ``host`` and ``port`` components, i.e. it only + inserts address information for local addresses. + +This assumes that serialization happens in the context of sending a message +through the remote transport. There are other uses of serialization, though, +e.g. storing actor references outside of an actor application (database, +durable mailbox, etc.). In this case, it is important to keep in mind that the +address part of an actor’s path determines how that actor is communicated with. +Storing a local actor path might be the right choice if the retrieval happens +in the same logical context, but it is not enough when deserializing it on a +different network host: for that it would need to include the system’s remote +transport address. An actor system is not limited to having just one remote +transport per se, which makes this question a bit more interesting. + +In the general case, the local address to be used depends on the type of remote +address which shall be the recipient of the serialized information. Use +:meth:`ActorRefProvider.getExternalAddressFor(remoteAddr)` to query the system +for the appropriate address to use when sending to ``remoteAddr``: + +.. includecode:: code/akka/docs/serialization/SerializationDocSpec.scala + :include: external-address + +This requires that you know at least which type of address will be supported by +the system which will deserialize the resulting actor reference; if you have no +concrete address handy you can create a dummy one for the right protocol using +``Address(protocol, "", "", 0)`` (assuming that the actual transport used is as +lenient as Akka’s RemoteActorRefProvider). + +There is a possible simplification available if you are just using the default +:class:`NettyRemoteTransport` with the :meth:`RemoteActorRefProvider`, which is +enabled by the fact that this combination has just a single remote address: + +.. includecode:: code/akka/docs/serialization/SerializationDocSpec.scala + :include: external-address-default + +This solution has to be adapted once other providers are used (like the planned +extensions for clustering). + Deep serialization of Actors ----------------------------