From 418b11d5539bfa69eabdcce5f1ad0e77550a5154 Mon Sep 17 00:00:00 2001 From: Roland Date: Wed, 14 Sep 2011 16:09:17 +0200 Subject: [PATCH 001/538] first step towards TestConductor - it compiles - server side ("Conductor") functions almost there - client side ("Player") sketched, but missing network failures - no internal failure handling whatsoever, waiting for Project DeathWatch - not yet possible to shutdown, need to kill VM - next step is to hook into the NettyRemoteSupport for failure injection --- .../testconductor/TestConductorProtocol.java | 2610 +++++++++++++++++ .../main/protocol/TestConductorProtocol.proto | 48 + .../akka/remote/testconductor/Conductor.scala | 268 ++ .../akka/remote/testconductor/DataTypes.scala | 13 + .../akka/remote/testconductor/Features.scala | 74 + .../NetworkFailureInjector.scala | 161 + .../akka/remote/testconductor/Player.scala | 133 + .../testconductor/RemoteConnection.scala | 52 + 8 files changed, 3359 insertions(+) create mode 100644 akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java create mode 100644 akka-remote/src/main/protocol/TestConductorProtocol.proto create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/Features.scala create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/Player.scala create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala diff --git a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java new file mode 100644 index 0000000000..e9065b53e4 --- /dev/null +++ b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java @@ -0,0 +1,2610 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: TestConductorProtocol.proto + +package akka.remote.testconductor; + +public final class TestConductorProtocol { + private TestConductorProtocol() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public enum FailType + implements com.google.protobuf.ProtocolMessageEnum { + Throttle(0, 1), + Disconnect(1, 2), + Abort(2, 3), + Shutdown(3, 4), + ; + + public static final int Throttle_VALUE = 1; + public static final int Disconnect_VALUE = 2; + public static final int Abort_VALUE = 3; + public static final int Shutdown_VALUE = 4; + + + public final int getNumber() { return value; } + + public static FailType valueOf(int value) { + switch (value) { + case 1: return Throttle; + case 2: return Disconnect; + case 3: return Abort; + case 4: return Shutdown; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public FailType findValueByNumber(int number) { + return FailType.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.getDescriptor().getEnumTypes().get(0); + } + + private static final FailType[] VALUES = { + Throttle, Disconnect, Abort, Shutdown, + }; + + public static FailType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private FailType(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:FailType) + } + + public enum Direction + implements com.google.protobuf.ProtocolMessageEnum { + Send(0, 1), + Receive(1, 2), + ; + + public static final int Send_VALUE = 1; + public static final int Receive_VALUE = 2; + + + public final int getNumber() { return value; } + + public static Direction valueOf(int value) { + switch (value) { + case 1: return Send; + case 2: return Receive; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Direction findValueByNumber(int number) { + return Direction.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.getDescriptor().getEnumTypes().get(1); + } + + private static final Direction[] VALUES = { + Send, Receive, + }; + + public static Direction valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private Direction(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:Direction) + } + + public interface WrapperOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .Hello hello = 1; + boolean hasHello(); + akka.remote.testconductor.TestConductorProtocol.Hello getHello(); + akka.remote.testconductor.TestConductorProtocol.HelloOrBuilder getHelloOrBuilder(); + + // optional .EnterBarrier barrier = 2; + boolean hasBarrier(); + akka.remote.testconductor.TestConductorProtocol.EnterBarrier getBarrier(); + akka.remote.testconductor.TestConductorProtocol.EnterBarrierOrBuilder getBarrierOrBuilder(); + + // optional .InjectFailure failure = 3; + boolean hasFailure(); + akka.remote.testconductor.TestConductorProtocol.InjectFailure getFailure(); + akka.remote.testconductor.TestConductorProtocol.InjectFailureOrBuilder getFailureOrBuilder(); + } + public static final class Wrapper extends + com.google.protobuf.GeneratedMessage + implements WrapperOrBuilder { + // Use Wrapper.newBuilder() to construct. + private Wrapper(Builder builder) { + super(builder); + } + private Wrapper(boolean noInit) {} + + private static final Wrapper defaultInstance; + public static Wrapper getDefaultInstance() { + return defaultInstance; + } + + public Wrapper getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Wrapper_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Wrapper_fieldAccessorTable; + } + + private int bitField0_; + // optional .Hello hello = 1; + public static final int HELLO_FIELD_NUMBER = 1; + private akka.remote.testconductor.TestConductorProtocol.Hello hello_; + public boolean hasHello() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public akka.remote.testconductor.TestConductorProtocol.Hello getHello() { + return hello_; + } + public akka.remote.testconductor.TestConductorProtocol.HelloOrBuilder getHelloOrBuilder() { + return hello_; + } + + // optional .EnterBarrier barrier = 2; + public static final int BARRIER_FIELD_NUMBER = 2; + private akka.remote.testconductor.TestConductorProtocol.EnterBarrier barrier_; + public boolean hasBarrier() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public akka.remote.testconductor.TestConductorProtocol.EnterBarrier getBarrier() { + return barrier_; + } + public akka.remote.testconductor.TestConductorProtocol.EnterBarrierOrBuilder getBarrierOrBuilder() { + return barrier_; + } + + // optional .InjectFailure failure = 3; + public static final int FAILURE_FIELD_NUMBER = 3; + private akka.remote.testconductor.TestConductorProtocol.InjectFailure failure_; + public boolean hasFailure() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public akka.remote.testconductor.TestConductorProtocol.InjectFailure getFailure() { + return failure_; + } + public akka.remote.testconductor.TestConductorProtocol.InjectFailureOrBuilder getFailureOrBuilder() { + return failure_; + } + + private void initFields() { + hello_ = akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance(); + barrier_ = akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance(); + failure_ = akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasHello()) { + if (!getHello().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasBarrier()) { + if (!getBarrier().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasFailure()) { + if (!getFailure().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, hello_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, barrier_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, failure_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, hello_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, barrier_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, failure_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Wrapper parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.remote.testconductor.TestConductorProtocol.Wrapper prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.remote.testconductor.TestConductorProtocol.WrapperOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Wrapper_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Wrapper_fieldAccessorTable; + } + + // Construct using akka.remote.testconductor.TestConductorProtocol.Wrapper.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getHelloFieldBuilder(); + getBarrierFieldBuilder(); + getFailureFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (helloBuilder_ == null) { + hello_ = akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance(); + } else { + helloBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (barrierBuilder_ == null) { + barrier_ = akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance(); + } else { + barrierBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (failureBuilder_ == null) { + failure_ = akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance(); + } else { + failureBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.remote.testconductor.TestConductorProtocol.Wrapper.getDescriptor(); + } + + public akka.remote.testconductor.TestConductorProtocol.Wrapper getDefaultInstanceForType() { + return akka.remote.testconductor.TestConductorProtocol.Wrapper.getDefaultInstance(); + } + + public akka.remote.testconductor.TestConductorProtocol.Wrapper build() { + akka.remote.testconductor.TestConductorProtocol.Wrapper result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private akka.remote.testconductor.TestConductorProtocol.Wrapper buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + akka.remote.testconductor.TestConductorProtocol.Wrapper result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public akka.remote.testconductor.TestConductorProtocol.Wrapper buildPartial() { + akka.remote.testconductor.TestConductorProtocol.Wrapper result = new akka.remote.testconductor.TestConductorProtocol.Wrapper(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (helloBuilder_ == null) { + result.hello_ = hello_; + } else { + result.hello_ = helloBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (barrierBuilder_ == null) { + result.barrier_ = barrier_; + } else { + result.barrier_ = barrierBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (failureBuilder_ == null) { + result.failure_ = failure_; + } else { + result.failure_ = failureBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.remote.testconductor.TestConductorProtocol.Wrapper) { + return mergeFrom((akka.remote.testconductor.TestConductorProtocol.Wrapper)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.remote.testconductor.TestConductorProtocol.Wrapper other) { + if (other == akka.remote.testconductor.TestConductorProtocol.Wrapper.getDefaultInstance()) return this; + if (other.hasHello()) { + mergeHello(other.getHello()); + } + if (other.hasBarrier()) { + mergeBarrier(other.getBarrier()); + } + if (other.hasFailure()) { + mergeFailure(other.getFailure()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasHello()) { + if (!getHello().isInitialized()) { + + return false; + } + } + if (hasBarrier()) { + if (!getBarrier().isInitialized()) { + + return false; + } + } + if (hasFailure()) { + if (!getFailure().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + akka.remote.testconductor.TestConductorProtocol.Hello.Builder subBuilder = akka.remote.testconductor.TestConductorProtocol.Hello.newBuilder(); + if (hasHello()) { + subBuilder.mergeFrom(getHello()); + } + input.readMessage(subBuilder, extensionRegistry); + setHello(subBuilder.buildPartial()); + break; + } + case 18: { + akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder subBuilder = akka.remote.testconductor.TestConductorProtocol.EnterBarrier.newBuilder(); + if (hasBarrier()) { + subBuilder.mergeFrom(getBarrier()); + } + input.readMessage(subBuilder, extensionRegistry); + setBarrier(subBuilder.buildPartial()); + break; + } + case 26: { + akka.remote.testconductor.TestConductorProtocol.InjectFailure.Builder subBuilder = akka.remote.testconductor.TestConductorProtocol.InjectFailure.newBuilder(); + if (hasFailure()) { + subBuilder.mergeFrom(getFailure()); + } + input.readMessage(subBuilder, extensionRegistry); + setFailure(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // optional .Hello hello = 1; + private akka.remote.testconductor.TestConductorProtocol.Hello hello_ = akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Hello, akka.remote.testconductor.TestConductorProtocol.Hello.Builder, akka.remote.testconductor.TestConductorProtocol.HelloOrBuilder> helloBuilder_; + public boolean hasHello() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public akka.remote.testconductor.TestConductorProtocol.Hello getHello() { + if (helloBuilder_ == null) { + return hello_; + } else { + return helloBuilder_.getMessage(); + } + } + public Builder setHello(akka.remote.testconductor.TestConductorProtocol.Hello value) { + if (helloBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + hello_ = value; + onChanged(); + } else { + helloBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setHello( + akka.remote.testconductor.TestConductorProtocol.Hello.Builder builderForValue) { + if (helloBuilder_ == null) { + hello_ = builderForValue.build(); + onChanged(); + } else { + helloBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeHello(akka.remote.testconductor.TestConductorProtocol.Hello value) { + if (helloBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + hello_ != akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance()) { + hello_ = + akka.remote.testconductor.TestConductorProtocol.Hello.newBuilder(hello_).mergeFrom(value).buildPartial(); + } else { + hello_ = value; + } + onChanged(); + } else { + helloBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearHello() { + if (helloBuilder_ == null) { + hello_ = akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance(); + onChanged(); + } else { + helloBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public akka.remote.testconductor.TestConductorProtocol.Hello.Builder getHelloBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getHelloFieldBuilder().getBuilder(); + } + public akka.remote.testconductor.TestConductorProtocol.HelloOrBuilder getHelloOrBuilder() { + if (helloBuilder_ != null) { + return helloBuilder_.getMessageOrBuilder(); + } else { + return hello_; + } + } + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Hello, akka.remote.testconductor.TestConductorProtocol.Hello.Builder, akka.remote.testconductor.TestConductorProtocol.HelloOrBuilder> + getHelloFieldBuilder() { + if (helloBuilder_ == null) { + helloBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Hello, akka.remote.testconductor.TestConductorProtocol.Hello.Builder, akka.remote.testconductor.TestConductorProtocol.HelloOrBuilder>( + hello_, + getParentForChildren(), + isClean()); + hello_ = null; + } + return helloBuilder_; + } + + // optional .EnterBarrier barrier = 2; + private akka.remote.testconductor.TestConductorProtocol.EnterBarrier barrier_ = akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.EnterBarrier, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder, akka.remote.testconductor.TestConductorProtocol.EnterBarrierOrBuilder> barrierBuilder_; + public boolean hasBarrier() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public akka.remote.testconductor.TestConductorProtocol.EnterBarrier getBarrier() { + if (barrierBuilder_ == null) { + return barrier_; + } else { + return barrierBuilder_.getMessage(); + } + } + public Builder setBarrier(akka.remote.testconductor.TestConductorProtocol.EnterBarrier value) { + if (barrierBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + barrier_ = value; + onChanged(); + } else { + barrierBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder setBarrier( + akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder builderForValue) { + if (barrierBuilder_ == null) { + barrier_ = builderForValue.build(); + onChanged(); + } else { + barrierBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder mergeBarrier(akka.remote.testconductor.TestConductorProtocol.EnterBarrier value) { + if (barrierBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + barrier_ != akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance()) { + barrier_ = + akka.remote.testconductor.TestConductorProtocol.EnterBarrier.newBuilder(barrier_).mergeFrom(value).buildPartial(); + } else { + barrier_ = value; + } + onChanged(); + } else { + barrierBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder clearBarrier() { + if (barrierBuilder_ == null) { + barrier_ = akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance(); + onChanged(); + } else { + barrierBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + public akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder getBarrierBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getBarrierFieldBuilder().getBuilder(); + } + public akka.remote.testconductor.TestConductorProtocol.EnterBarrierOrBuilder getBarrierOrBuilder() { + if (barrierBuilder_ != null) { + return barrierBuilder_.getMessageOrBuilder(); + } else { + return barrier_; + } + } + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.EnterBarrier, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder, akka.remote.testconductor.TestConductorProtocol.EnterBarrierOrBuilder> + getBarrierFieldBuilder() { + if (barrierBuilder_ == null) { + barrierBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.EnterBarrier, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder, akka.remote.testconductor.TestConductorProtocol.EnterBarrierOrBuilder>( + barrier_, + getParentForChildren(), + isClean()); + barrier_ = null; + } + return barrierBuilder_; + } + + // optional .InjectFailure failure = 3; + private akka.remote.testconductor.TestConductorProtocol.InjectFailure failure_ = akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.InjectFailure, akka.remote.testconductor.TestConductorProtocol.InjectFailure.Builder, akka.remote.testconductor.TestConductorProtocol.InjectFailureOrBuilder> failureBuilder_; + public boolean hasFailure() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public akka.remote.testconductor.TestConductorProtocol.InjectFailure getFailure() { + if (failureBuilder_ == null) { + return failure_; + } else { + return failureBuilder_.getMessage(); + } + } + public Builder setFailure(akka.remote.testconductor.TestConductorProtocol.InjectFailure value) { + if (failureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + failure_ = value; + onChanged(); + } else { + failureBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + public Builder setFailure( + akka.remote.testconductor.TestConductorProtocol.InjectFailure.Builder builderForValue) { + if (failureBuilder_ == null) { + failure_ = builderForValue.build(); + onChanged(); + } else { + failureBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + public Builder mergeFailure(akka.remote.testconductor.TestConductorProtocol.InjectFailure value) { + if (failureBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + failure_ != akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance()) { + failure_ = + akka.remote.testconductor.TestConductorProtocol.InjectFailure.newBuilder(failure_).mergeFrom(value).buildPartial(); + } else { + failure_ = value; + } + onChanged(); + } else { + failureBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + public Builder clearFailure() { + if (failureBuilder_ == null) { + failure_ = akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance(); + onChanged(); + } else { + failureBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + public akka.remote.testconductor.TestConductorProtocol.InjectFailure.Builder getFailureBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getFailureFieldBuilder().getBuilder(); + } + public akka.remote.testconductor.TestConductorProtocol.InjectFailureOrBuilder getFailureOrBuilder() { + if (failureBuilder_ != null) { + return failureBuilder_.getMessageOrBuilder(); + } else { + return failure_; + } + } + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.InjectFailure, akka.remote.testconductor.TestConductorProtocol.InjectFailure.Builder, akka.remote.testconductor.TestConductorProtocol.InjectFailureOrBuilder> + getFailureFieldBuilder() { + if (failureBuilder_ == null) { + failureBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.InjectFailure, akka.remote.testconductor.TestConductorProtocol.InjectFailure.Builder, akka.remote.testconductor.TestConductorProtocol.InjectFailureOrBuilder>( + failure_, + getParentForChildren(), + isClean()); + failure_ = null; + } + return failureBuilder_; + } + + // @@protoc_insertion_point(builder_scope:Wrapper) + } + + static { + defaultInstance = new Wrapper(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:Wrapper) + } + + public interface HelloOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; + boolean hasName(); + String getName(); + + // required string host = 2; + boolean hasHost(); + String getHost(); + + // required int32 port = 3; + boolean hasPort(); + int getPort(); + } + public static final class Hello extends + com.google.protobuf.GeneratedMessage + implements HelloOrBuilder { + // Use Hello.newBuilder() to construct. + private Hello(Builder builder) { + super(builder); + } + private Hello(boolean noInit) {} + + private static final Hello defaultInstance; + public static Hello getDefaultInstance() { + return defaultInstance; + } + + public Hello getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Hello_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Hello_fieldAccessorTable; + } + + private int bitField0_; + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getName() { + java.lang.Object ref = name_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + name_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string host = 2; + public static final int HOST_FIELD_NUMBER = 2; + private java.lang.Object host_; + public boolean hasHost() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public String getHost() { + java.lang.Object ref = host_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + host_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getHostBytes() { + java.lang.Object ref = host_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + host_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required int32 port = 3; + public static final int PORT_FIELD_NUMBER = 3; + private int port_; + public boolean hasPort() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public int getPort() { + return port_; + } + + private void initFields() { + name_ = ""; + host_ = ""; + port_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasHost()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasPort()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getHostBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt32(3, port_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getHostBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(3, port_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.remote.testconductor.TestConductorProtocol.Hello parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Hello parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.remote.testconductor.TestConductorProtocol.Hello prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.remote.testconductor.TestConductorProtocol.HelloOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Hello_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Hello_fieldAccessorTable; + } + + // Construct using akka.remote.testconductor.TestConductorProtocol.Hello.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + host_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + port_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.remote.testconductor.TestConductorProtocol.Hello.getDescriptor(); + } + + public akka.remote.testconductor.TestConductorProtocol.Hello getDefaultInstanceForType() { + return akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance(); + } + + public akka.remote.testconductor.TestConductorProtocol.Hello build() { + akka.remote.testconductor.TestConductorProtocol.Hello result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private akka.remote.testconductor.TestConductorProtocol.Hello buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + akka.remote.testconductor.TestConductorProtocol.Hello result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public akka.remote.testconductor.TestConductorProtocol.Hello buildPartial() { + akka.remote.testconductor.TestConductorProtocol.Hello result = new akka.remote.testconductor.TestConductorProtocol.Hello(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.host_ = host_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.port_ = port_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.remote.testconductor.TestConductorProtocol.Hello) { + return mergeFrom((akka.remote.testconductor.TestConductorProtocol.Hello)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.remote.testconductor.TestConductorProtocol.Hello other) { + if (other == akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance()) return this; + if (other.hasName()) { + setName(other.getName()); + } + if (other.hasHost()) { + setHost(other.getHost()); + } + if (other.hasPort()) { + setPort(other.getPort()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + if (!hasHost()) { + + return false; + } + if (!hasPort()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + host_ = input.readBytes(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + port_ = input.readInt32(); + break; + } + } + } + } + + private int bitField0_; + + // required string name = 1; + private java.lang.Object name_ = ""; + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + name_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setName(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + void setName(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + } + + // required string host = 2; + private java.lang.Object host_ = ""; + public boolean hasHost() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public String getHost() { + java.lang.Object ref = host_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + host_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setHost(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + host_ = value; + onChanged(); + return this; + } + public Builder clearHost() { + bitField0_ = (bitField0_ & ~0x00000002); + host_ = getDefaultInstance().getHost(); + onChanged(); + return this; + } + void setHost(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000002; + host_ = value; + onChanged(); + } + + // required int32 port = 3; + private int port_ ; + public boolean hasPort() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public int getPort() { + return port_; + } + public Builder setPort(int value) { + bitField0_ |= 0x00000004; + port_ = value; + onChanged(); + return this; + } + public Builder clearPort() { + bitField0_ = (bitField0_ & ~0x00000004); + port_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:Hello) + } + + static { + defaultInstance = new Hello(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:Hello) + } + + public interface EnterBarrierOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; + boolean hasName(); + String getName(); + } + public static final class EnterBarrier extends + com.google.protobuf.GeneratedMessage + implements EnterBarrierOrBuilder { + // Use EnterBarrier.newBuilder() to construct. + private EnterBarrier(Builder builder) { + super(builder); + } + private EnterBarrier(boolean noInit) {} + + private static final EnterBarrier defaultInstance; + public static EnterBarrier getDefaultInstance() { + return defaultInstance; + } + + public EnterBarrier getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_EnterBarrier_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_EnterBarrier_fieldAccessorTable; + } + + private int bitField0_; + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getName() { + java.lang.Object ref = name_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + name_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + name_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.EnterBarrier parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.remote.testconductor.TestConductorProtocol.EnterBarrier prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.remote.testconductor.TestConductorProtocol.EnterBarrierOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_EnterBarrier_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_EnterBarrier_fieldAccessorTable; + } + + // Construct using akka.remote.testconductor.TestConductorProtocol.EnterBarrier.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDescriptor(); + } + + public akka.remote.testconductor.TestConductorProtocol.EnterBarrier getDefaultInstanceForType() { + return akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance(); + } + + public akka.remote.testconductor.TestConductorProtocol.EnterBarrier build() { + akka.remote.testconductor.TestConductorProtocol.EnterBarrier result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private akka.remote.testconductor.TestConductorProtocol.EnterBarrier buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + akka.remote.testconductor.TestConductorProtocol.EnterBarrier result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public akka.remote.testconductor.TestConductorProtocol.EnterBarrier buildPartial() { + akka.remote.testconductor.TestConductorProtocol.EnterBarrier result = new akka.remote.testconductor.TestConductorProtocol.EnterBarrier(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.remote.testconductor.TestConductorProtocol.EnterBarrier) { + return mergeFrom((akka.remote.testconductor.TestConductorProtocol.EnterBarrier)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.remote.testconductor.TestConductorProtocol.EnterBarrier other) { + if (other == akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance()) return this; + if (other.hasName()) { + setName(other.getName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required string name = 1; + private java.lang.Object name_ = ""; + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + name_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setName(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + void setName(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + } + + // @@protoc_insertion_point(builder_scope:EnterBarrier) + } + + static { + defaultInstance = new EnterBarrier(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:EnterBarrier) + } + + public interface InjectFailureOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .FailType failure = 1; + boolean hasFailure(); + akka.remote.testconductor.TestConductorProtocol.FailType getFailure(); + + // optional .Direction direction = 2; + boolean hasDirection(); + akka.remote.testconductor.TestConductorProtocol.Direction getDirection(); + + // optional string host = 3; + boolean hasHost(); + String getHost(); + + // optional int32 port = 4; + boolean hasPort(); + int getPort(); + + // optional float rateMBit = 5; + boolean hasRateMBit(); + float getRateMBit(); + + // optional int32 exitValue = 6; + boolean hasExitValue(); + int getExitValue(); + } + public static final class InjectFailure extends + com.google.protobuf.GeneratedMessage + implements InjectFailureOrBuilder { + // Use InjectFailure.newBuilder() to construct. + private InjectFailure(Builder builder) { + super(builder); + } + private InjectFailure(boolean noInit) {} + + private static final InjectFailure defaultInstance; + public static InjectFailure getDefaultInstance() { + return defaultInstance; + } + + public InjectFailure getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_InjectFailure_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_InjectFailure_fieldAccessorTable; + } + + private int bitField0_; + // required .FailType failure = 1; + public static final int FAILURE_FIELD_NUMBER = 1; + private akka.remote.testconductor.TestConductorProtocol.FailType failure_; + public boolean hasFailure() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public akka.remote.testconductor.TestConductorProtocol.FailType getFailure() { + return failure_; + } + + // optional .Direction direction = 2; + public static final int DIRECTION_FIELD_NUMBER = 2; + private akka.remote.testconductor.TestConductorProtocol.Direction direction_; + public boolean hasDirection() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public akka.remote.testconductor.TestConductorProtocol.Direction getDirection() { + return direction_; + } + + // optional string host = 3; + public static final int HOST_FIELD_NUMBER = 3; + private java.lang.Object host_; + public boolean hasHost() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public String getHost() { + java.lang.Object ref = host_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + host_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getHostBytes() { + java.lang.Object ref = host_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + host_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional int32 port = 4; + public static final int PORT_FIELD_NUMBER = 4; + private int port_; + public boolean hasPort() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public int getPort() { + return port_; + } + + // optional float rateMBit = 5; + public static final int RATEMBIT_FIELD_NUMBER = 5; + private float rateMBit_; + public boolean hasRateMBit() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public float getRateMBit() { + return rateMBit_; + } + + // optional int32 exitValue = 6; + public static final int EXITVALUE_FIELD_NUMBER = 6; + private int exitValue_; + public boolean hasExitValue() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + public int getExitValue() { + return exitValue_; + } + + private void initFields() { + failure_ = akka.remote.testconductor.TestConductorProtocol.FailType.Throttle; + direction_ = akka.remote.testconductor.TestConductorProtocol.Direction.Send; + host_ = ""; + port_ = 0; + rateMBit_ = 0F; + exitValue_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasFailure()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, failure_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, direction_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getHostBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeInt32(4, port_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeFloat(5, rateMBit_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeInt32(6, exitValue_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, failure_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, direction_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getHostBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(4, port_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeFloatSize(5, rateMBit_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(6, exitValue_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.InjectFailure parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.remote.testconductor.TestConductorProtocol.InjectFailure prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.remote.testconductor.TestConductorProtocol.InjectFailureOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_InjectFailure_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_InjectFailure_fieldAccessorTable; + } + + // Construct using akka.remote.testconductor.TestConductorProtocol.InjectFailure.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + failure_ = akka.remote.testconductor.TestConductorProtocol.FailType.Throttle; + bitField0_ = (bitField0_ & ~0x00000001); + direction_ = akka.remote.testconductor.TestConductorProtocol.Direction.Send; + bitField0_ = (bitField0_ & ~0x00000002); + host_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + port_ = 0; + bitField0_ = (bitField0_ & ~0x00000008); + rateMBit_ = 0F; + bitField0_ = (bitField0_ & ~0x00000010); + exitValue_ = 0; + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDescriptor(); + } + + public akka.remote.testconductor.TestConductorProtocol.InjectFailure getDefaultInstanceForType() { + return akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance(); + } + + public akka.remote.testconductor.TestConductorProtocol.InjectFailure build() { + akka.remote.testconductor.TestConductorProtocol.InjectFailure result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private akka.remote.testconductor.TestConductorProtocol.InjectFailure buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + akka.remote.testconductor.TestConductorProtocol.InjectFailure result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public akka.remote.testconductor.TestConductorProtocol.InjectFailure buildPartial() { + akka.remote.testconductor.TestConductorProtocol.InjectFailure result = new akka.remote.testconductor.TestConductorProtocol.InjectFailure(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.failure_ = failure_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.direction_ = direction_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.host_ = host_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.port_ = port_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.rateMBit_ = rateMBit_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.exitValue_ = exitValue_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.remote.testconductor.TestConductorProtocol.InjectFailure) { + return mergeFrom((akka.remote.testconductor.TestConductorProtocol.InjectFailure)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.remote.testconductor.TestConductorProtocol.InjectFailure other) { + if (other == akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance()) return this; + if (other.hasFailure()) { + setFailure(other.getFailure()); + } + if (other.hasDirection()) { + setDirection(other.getDirection()); + } + if (other.hasHost()) { + setHost(other.getHost()); + } + if (other.hasPort()) { + setPort(other.getPort()); + } + if (other.hasRateMBit()) { + setRateMBit(other.getRateMBit()); + } + if (other.hasExitValue()) { + setExitValue(other.getExitValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasFailure()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + akka.remote.testconductor.TestConductorProtocol.FailType value = akka.remote.testconductor.TestConductorProtocol.FailType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + failure_ = value; + } + break; + } + case 16: { + int rawValue = input.readEnum(); + akka.remote.testconductor.TestConductorProtocol.Direction value = akka.remote.testconductor.TestConductorProtocol.Direction.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + direction_ = value; + } + break; + } + case 26: { + bitField0_ |= 0x00000004; + host_ = input.readBytes(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + port_ = input.readInt32(); + break; + } + case 45: { + bitField0_ |= 0x00000010; + rateMBit_ = input.readFloat(); + break; + } + case 48: { + bitField0_ |= 0x00000020; + exitValue_ = input.readInt32(); + break; + } + } + } + } + + private int bitField0_; + + // required .FailType failure = 1; + private akka.remote.testconductor.TestConductorProtocol.FailType failure_ = akka.remote.testconductor.TestConductorProtocol.FailType.Throttle; + public boolean hasFailure() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public akka.remote.testconductor.TestConductorProtocol.FailType getFailure() { + return failure_; + } + public Builder setFailure(akka.remote.testconductor.TestConductorProtocol.FailType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + failure_ = value; + onChanged(); + return this; + } + public Builder clearFailure() { + bitField0_ = (bitField0_ & ~0x00000001); + failure_ = akka.remote.testconductor.TestConductorProtocol.FailType.Throttle; + onChanged(); + return this; + } + + // optional .Direction direction = 2; + private akka.remote.testconductor.TestConductorProtocol.Direction direction_ = akka.remote.testconductor.TestConductorProtocol.Direction.Send; + public boolean hasDirection() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public akka.remote.testconductor.TestConductorProtocol.Direction getDirection() { + return direction_; + } + public Builder setDirection(akka.remote.testconductor.TestConductorProtocol.Direction value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + direction_ = value; + onChanged(); + return this; + } + public Builder clearDirection() { + bitField0_ = (bitField0_ & ~0x00000002); + direction_ = akka.remote.testconductor.TestConductorProtocol.Direction.Send; + onChanged(); + return this; + } + + // optional string host = 3; + private java.lang.Object host_ = ""; + public boolean hasHost() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public String getHost() { + java.lang.Object ref = host_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + host_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setHost(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + host_ = value; + onChanged(); + return this; + } + public Builder clearHost() { + bitField0_ = (bitField0_ & ~0x00000004); + host_ = getDefaultInstance().getHost(); + onChanged(); + return this; + } + void setHost(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000004; + host_ = value; + onChanged(); + } + + // optional int32 port = 4; + private int port_ ; + public boolean hasPort() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public int getPort() { + return port_; + } + public Builder setPort(int value) { + bitField0_ |= 0x00000008; + port_ = value; + onChanged(); + return this; + } + public Builder clearPort() { + bitField0_ = (bitField0_ & ~0x00000008); + port_ = 0; + onChanged(); + return this; + } + + // optional float rateMBit = 5; + private float rateMBit_ ; + public boolean hasRateMBit() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public float getRateMBit() { + return rateMBit_; + } + public Builder setRateMBit(float value) { + bitField0_ |= 0x00000010; + rateMBit_ = value; + onChanged(); + return this; + } + public Builder clearRateMBit() { + bitField0_ = (bitField0_ & ~0x00000010); + rateMBit_ = 0F; + onChanged(); + return this; + } + + // optional int32 exitValue = 6; + private int exitValue_ ; + public boolean hasExitValue() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + public int getExitValue() { + return exitValue_; + } + public Builder setExitValue(int value) { + bitField0_ |= 0x00000020; + exitValue_ = value; + onChanged(); + return this; + } + public Builder clearExitValue() { + bitField0_ = (bitField0_ & ~0x00000020); + exitValue_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:InjectFailure) + } + + static { + defaultInstance = new InjectFailure(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:InjectFailure) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_Wrapper_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_Wrapper_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_Hello_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_Hello_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_EnterBarrier_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_EnterBarrier_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_InjectFailure_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_InjectFailure_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\033TestConductorProtocol.proto\"a\n\007Wrapper" + + "\022\025\n\005hello\030\001 \001(\0132\006.Hello\022\036\n\007barrier\030\002 \001(\013" + + "2\r.EnterBarrier\022\037\n\007failure\030\003 \001(\0132\016.Injec" + + "tFailure\"1\n\005Hello\022\014\n\004name\030\001 \002(\t\022\014\n\004host\030" + + "\002 \002(\t\022\014\n\004port\030\003 \002(\005\"\034\n\014EnterBarrier\022\014\n\004n" + + "ame\030\001 \002(\t\"\213\001\n\rInjectFailure\022\032\n\007failure\030\001" + + " \002(\0162\t.FailType\022\035\n\tdirection\030\002 \001(\0162\n.Dir" + + "ection\022\014\n\004host\030\003 \001(\t\022\014\n\004port\030\004 \001(\005\022\020\n\010ra" + + "teMBit\030\005 \001(\002\022\021\n\texitValue\030\006 \001(\005*A\n\010FailT" + + "ype\022\014\n\010Throttle\020\001\022\016\n\nDisconnect\020\002\022\t\n\005Abo", + "rt\020\003\022\014\n\010Shutdown\020\004*\"\n\tDirection\022\010\n\004Send\020" + + "\001\022\013\n\007Receive\020\002B\035\n\031akka.remote.testconduc" + + "torH\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_Wrapper_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_Wrapper_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Wrapper_descriptor, + new java.lang.String[] { "Hello", "Barrier", "Failure", }, + akka.remote.testconductor.TestConductorProtocol.Wrapper.class, + akka.remote.testconductor.TestConductorProtocol.Wrapper.Builder.class); + internal_static_Hello_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_Hello_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Hello_descriptor, + new java.lang.String[] { "Name", "Host", "Port", }, + akka.remote.testconductor.TestConductorProtocol.Hello.class, + akka.remote.testconductor.TestConductorProtocol.Hello.Builder.class); + internal_static_EnterBarrier_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_EnterBarrier_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_EnterBarrier_descriptor, + new java.lang.String[] { "Name", }, + akka.remote.testconductor.TestConductorProtocol.EnterBarrier.class, + akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder.class); + internal_static_InjectFailure_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_InjectFailure_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_InjectFailure_descriptor, + new java.lang.String[] { "Failure", "Direction", "Host", "Port", "RateMBit", "ExitValue", }, + akka.remote.testconductor.TestConductorProtocol.InjectFailure.class, + akka.remote.testconductor.TestConductorProtocol.InjectFailure.Builder.class); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/akka-remote/src/main/protocol/TestConductorProtocol.proto b/akka-remote/src/main/protocol/TestConductorProtocol.proto new file mode 100644 index 0000000000..1db35a7516 --- /dev/null +++ b/akka-remote/src/main/protocol/TestConductorProtocol.proto @@ -0,0 +1,48 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ + +option java_package = "akka.remote.testconductor"; +option optimize_for = SPEED; + +/****************************************** + Compile with: + cd ./akka-remote/src/main/protocol + protoc TestConductorProtocol.proto --java_out ../java +*******************************************/ + +message Wrapper { + optional Hello hello = 1; + optional EnterBarrier barrier = 2; + optional InjectFailure failure = 3; +} + +message Hello { + required string name = 1; + required string host = 2; + required int32 port = 3; +} + +message EnterBarrier { + required string name = 1; +} + +enum FailType { + Throttle = 1; + Disconnect = 2; + Abort = 3; + Shutdown = 4; +} +enum Direction { + Send = 1; + Receive = 2; +} +message InjectFailure { + required FailType failure = 1; + optional Direction direction = 2; + optional string host = 3; + optional int32 port = 4; + optional float rateMBit = 5; + optional int32 exitValue = 6; +} + diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala new file mode 100644 index 0000000000..58a6a5f88e --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -0,0 +1,268 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ +package akka.remote.testconductor + +import akka.actor.{ Actor, ActorRef, LoggingFSM, Timeout, UntypedChannel } +import akka.event.EventHandler +import RemoteConnection.getAddrString +import akka.util.duration._ +import TestConductorProtocol._ +import akka.NoStackTrace +import org.jboss.netty.channel.{ Channel, SimpleChannelUpstreamHandler, ChannelHandlerContext, ChannelStateEvent, MessageEvent } + +object Conductor extends RunControl with FailureInject with BarrierSync { + + import Controller._ + + private val controller = Actor.actorOf[Controller] + controller ! ClientConnected + + override def enter(name: String*) { + implicit val timeout = Timeout(30 seconds) + name foreach (b ⇒ (controller ? EnterBarrier(b)).get) + } + + override def throttle(node: String, target: String, direction: Direction, rateMBit: Float) { + controller ! Throttle(node, target, direction, rateMBit) + } + + override def blackhole(node: String, target: String, direction: Direction) { + controller ! Throttle(node, target, direction, 0f) + } + + override def disconnect(node: String, target: String) { + controller ! Disconnect(node, target, false) + } + + override def abort(node: String, target: String) { + controller ! Disconnect(node, target, true) + } + + override def shutdown(node: String, exitValue: Int) { + controller ! Terminate(node, exitValue) + } + + override def kill(node: String) { + controller ! Terminate(node, -1) + } + + override def getNodes = (controller ? GetNodes).as[List[String]].get + + override def removeNode(node: String) { + controller ! Remove(node) + } + +} + +class ConductorHandler(controller: ActorRef) extends SimpleChannelUpstreamHandler { + + var clients = Map[Channel, ActorRef]() + + override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { + val channel = event.getChannel + EventHandler.debug(this, "connection from " + getAddrString(channel)) + val fsm = Actor.actorOf(new ServerFSM(controller, channel)) + clients += channel -> fsm + } + + override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { + val channel = event.getChannel + EventHandler.debug(this, "disconnect from " + getAddrString(channel)) + val fsm = clients(channel) + fsm.stop() + clients -= channel + } + + override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) = { + val channel = event.getChannel + EventHandler.debug(this, "message from " + getAddrString(channel) + ": " + event.getMessage) + event.getMessage match { + case msg: Wrapper if msg.getAllFields.size == 1 ⇒ + clients(channel) ! msg + case msg ⇒ + EventHandler.info(this, "client " + getAddrString(channel) + " sent garbage '" + msg + "', disconnecting") + channel.close() + } + } + +} + +object ServerFSM { + sealed trait State + case object Initial extends State + case object Ready extends State + + case class Send(msg: Wrapper) +} + +class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor with LoggingFSM[ServerFSM.State, Null] { + import ServerFSM._ + import akka.actor.FSM._ + import Controller._ + + startWith(Initial, null) + + when(Initial, stateTimeout = 10 seconds) { + case Ev(msg: Wrapper) ⇒ + if (msg.hasHello) { + val hello = msg.getHello + controller ! ClientConnected(hello.getName, hello.getHost, hello.getPort) + goto(Ready) + } else { + EventHandler.warning(this, "client " + getAddrString(channel) + " sent no Hello in first message, disconnecting") + channel.close() + stop() + } + case Ev(StateTimeout) ⇒ + EventHandler.info(this, "closing channel to " + getAddrString(channel) + " because of Hello timeout") + channel.close() + stop() + } + + when(Ready) { + case Ev(msg: Wrapper) ⇒ + if (msg.hasBarrier) { + val barrier = msg.getBarrier + controller ! EnterBarrier(barrier.getName) + } else { + EventHandler.warning(this, "client " + getAddrString(channel) + " sent unsupported message " + msg) + } + stay + case Ev(Send(msg)) ⇒ + channel.write(msg) + stay + case Ev(EnterBarrier(name)) ⇒ + val barrier = TestConductorProtocol.EnterBarrier.newBuilder.setName(name).build + channel.write(Wrapper.newBuilder.setBarrier(barrier).build) + stay + } + + initialize +} + +object Controller { + case class ClientConnected(name: String, host: String, port: Int) + case class ClientDisconnected(name: String) + case object GetNodes + + case class NodeInfo(name: String, host: String, port: Int, fsm: ActorRef) +} + +class Controller extends Actor { + import Controller._ + + val host = System.getProperty("akka.testconductor.host", "localhost") + val port = Integer.getInteger("akka.testconductor.port", 4545) + val connection = RemoteConnection(Server, host, port, new ConductorHandler(self)) + + val barrier = Actor.actorOf[BarrierCoordinator] + var nodes = Map[String, NodeInfo]() + + override def receive = Actor.loggable(this) { + case ClientConnected(name, host, port) ⇒ + self.channel match { + case ref: ActorRef ⇒ nodes += name -> NodeInfo(name, host, port, ref) + } + barrier forward ClientConnected + case ClientConnected ⇒ + barrier forward ClientConnected + case ClientDisconnected(name) ⇒ + nodes -= name + barrier forward ClientDisconnected + case e @ EnterBarrier(name) ⇒ + barrier forward e + case Throttle(node, target, direction, rateMBit) ⇒ + val t = nodes(target) + val throttle = + InjectFailure.newBuilder + .setFailure(FailType.Throttle) + .setDirection(TestConductorProtocol.Direction.valueOf(direction.toString)) + .setHost(t.host) + .setPort(t.port) + .setRateMBit(rateMBit) + .build + nodes(node).fsm ! ServerFSM.Send(Wrapper.newBuilder.setFailure(throttle).build) + case Disconnect(node, target, abort) ⇒ + val t = nodes(target) + val disconnect = + InjectFailure.newBuilder + .setFailure(if (abort) FailType.Abort else FailType.Disconnect) + .setHost(t.host) + .setPort(t.port) + .build + nodes(node).fsm ! ServerFSM.Send(Wrapper.newBuilder.setFailure(disconnect).build) + case Terminate(node, exitValueOrKill) ⇒ + if (exitValueOrKill < 0) { + // TODO: kill via SBT + } else { + val shutdown = InjectFailure.newBuilder.setFailure(FailType.Shutdown).setExitValue(exitValueOrKill).build + nodes(node).fsm ! ServerFSM.Send(Wrapper.newBuilder.setFailure(shutdown).build) + } + // TODO: properly remove node from BarrierCoordinator + // case Remove(node) => + // nodes -= node + case GetNodes ⇒ self reply nodes.keys + } +} + +object BarrierCoordinator { + sealed trait State + case object Idle extends State + case object Waiting extends State + + case class Data(clients: Int, barrier: String, arrived: List[UntypedChannel]) + class BarrierTimeoutException(msg: String) extends RuntimeException(msg) with NoStackTrace +} + +class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, BarrierCoordinator.Data] { + import BarrierCoordinator._ + import akka.actor.FSM._ + import Controller._ + + startWith(Idle, Data(0, "", Nil)) + + when(Idle) { + case Event(EnterBarrier(name), Data(num, _, _)) ⇒ + if (num == 0) throw new IllegalStateException("no client expected yet") + goto(Waiting) using Data(num, name, self.channel :: Nil) + case Event(ClientConnected, d @ Data(num, _, _)) ⇒ + stay using d.copy(clients = num + 1) + case Event(ClientDisconnected, d @ Data(num, _, _)) ⇒ + if (num == 0) throw new IllegalStateException("no client to disconnect") + stay using d.copy(clients = num - 1) + } + + onTransition { + case Idle -> Waiting ⇒ setTimer("Timeout", StateTimeout, 30 seconds, false) + case Waiting -> Idle ⇒ cancelTimer("Timeout") + } + + when(Waiting) { + case Event(e @ EnterBarrier(name), d @ Data(num, barrier, arrived)) ⇒ + if (name != barrier) throw new IllegalStateException("trying enter barrier '" + name + "' while barrier '" + barrier + "' is active") + val together = self.channel :: arrived + if (together.size == num) { + together foreach (_ ! e) + goto(Idle) using Data(num, "", Nil) + } else { + stay using d.copy(arrived = together) + } + case Event(ClientConnected, d @ Data(num, _, _)) ⇒ + stay using d.copy(clients = num + 1) + case Event(ClientDisconnected, d @ Data(num, barrier, arrived)) ⇒ + val expected = num - 1 + if (arrived.size == expected) { + val e = EnterBarrier(barrier) + self.channel :: arrived foreach (_ ! e) + goto(Idle) using Data(expected, "", Nil) + } else { + stay using d.copy(clients = expected) + } + case Event(StateTimeout, Data(num, barrier, arrived)) ⇒ + throw new BarrierTimeoutException("only " + arrived.size + " of " + num + " arrived at barrier " + barrier) + } + + initialize +} + diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala new file mode 100644 index 0000000000..2b54ea1018 --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -0,0 +1,13 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ +package akka.remote.testconductor + +sealed trait ClientOp +sealed trait ServerOp + +case class EnterBarrier(name: String) extends ClientOp with ServerOp +case class Throttle(node: String, target: String, direction: Direction, rateMBit: Float) extends ServerOp +case class Disconnect(node: String, target: String, abort: Boolean) extends ServerOp +case class Terminate(node: String, exitValueOrKill: Int) extends ServerOp +case class Remove(node: String) extends ServerOp diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala new file mode 100644 index 0000000000..399b58337b --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala @@ -0,0 +1,74 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ +package akka.remote.testconductor + +trait BarrierSync { + /** + * Enter all given barriers in the order in which they were given. + */ + def enter(name: String*): Unit +} + +sealed trait Direction +case object Send extends Direction +case object Receive extends Direction +case object Both extends Direction + +trait FailureInject { + + /** + * Make the remoting pipeline on the node throttle data sent to or received + * from the given remote peer. + */ + def throttle(node: String, target: String, direction: Direction, rateMBit: Float): Unit + + /** + * Switch the Netty pipeline of the remote support into blackhole mode for + * sending and/or receiving: it will just drop all messages right before + * submitting them to the Socket or right after receiving them from the + * Socket. + */ + def blackhole(node: String, target: String, direction: Direction): Unit + + /** + * Tell the remote support to shutdown the connection to the given remote + * peer. It works regardless of whether the recipient was initiator or + * responder. + */ + def disconnect(node: String, target: String): Unit + + /** + * Tell the remote support to TCP_RESET the connection to the given remote + * peer. It works regardless of whether the recipient was initiator or + * responder. + */ + def abort(node: String, target: String): Unit + +} + +trait RunControl { + + /** + * Tell the remote node to shut itself down using System.exit with the given + * exitValue. + */ + def shutdown(node: String, exitValue: Int): Unit + + /** + * Tell the SBT plugin to forcibly terminate the given remote node using Process.destroy. + */ + def kill(node: String): Unit + + /** + * Obtain the list of remote host names currently registered. + */ + def getNodes: List[String] + + /** + * Remove a remote host from the list, so that the remaining nodes may still + * pass subsequent barriers. + */ + def removeNode(node: String): Unit + +} diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala new file mode 100644 index 0000000000..eec6a2cbf1 --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -0,0 +1,161 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ +package akka.remote.testconductor + +import java.net.InetSocketAddress + +import scala.collection.immutable.Queue + +import org.jboss.netty.buffer.ChannelBuffer +import org.jboss.netty.channel.ChannelState.BOUND +import org.jboss.netty.channel.ChannelState.OPEN +import org.jboss.netty.channel.Channel +import org.jboss.netty.channel.ChannelDownstreamHandler +import org.jboss.netty.channel.ChannelEvent +import org.jboss.netty.channel.ChannelHandlerContext +import org.jboss.netty.channel.ChannelStateEvent +import org.jboss.netty.channel.ChannelUpstreamHandler +import org.jboss.netty.channel.MessageEvent + +import akka.actor.FSM +import akka.actor.Actor +import akka.util.duration.doubleToDurationDouble +import akka.util.Index +import akka.util.RemoteAddress + +object NetworkFailureInjector { + + val channels = new Index[RemoteAddress, Channel]() + + def close(remote: RemoteAddress): Unit = { + val set = channels.remove(remote) + // channels will be cleaned up by the handler + set foreach (_.close()) + } +} + +class NetworkFailureInjector extends ChannelUpstreamHandler with ChannelDownstreamHandler { + + import NetworkFailureInjector._ + + // local cache of remote address + private var remote: Option[RemoteAddress] = None + + // everything goes via these Throttle actors to enable easy steering + private val sender = Actor.actorOf(new Throttle(_.sendDownstream(_))) + private val receiver = Actor.actorOf(new Throttle(_.sendUpstream(_))) + + /* + * State, Data and Messages for the internal Throttle actor + */ + sealed private trait State + private case object PassThrough extends State + private case object Throttle extends State + private case object Blackhole extends State + + private case class Data(ctx: ChannelHandlerContext, rateMBit: Float, queue: Queue[MessageEvent]) + + private case class SetRate(rateMBit: Float) + private case class Send(ctx: ChannelHandlerContext, msg: MessageEvent) + private case object Tick + + private class Throttle(send: (ChannelHandlerContext, MessageEvent) ⇒ Unit) extends Actor with FSM[State, Data] { + import FSM._ + + startWith(PassThrough, Data(null, -1, Queue())) + + when(PassThrough) { + case Event(Send(ctx, msg), d) ⇒ + send(ctx, msg) + stay + } + + when(Throttle) { + case Event(Send(ctx, msg), d) ⇒ + if (!timerActive_?("send")) { + setTimer("send", Tick, (size(msg) / d.rateMBit) microseconds, false) + } + stay using d.copy(ctx = ctx, queue = d.queue.enqueue(msg)) + case Event(Tick, d) ⇒ + val (msg, queue) = d.queue.dequeue + send(d.ctx, msg) + if (queue.nonEmpty) setTimer("send", Tick, (size(queue.head) / d.rateMBit) microseconds, false) + stay using d.copy(queue = queue) + } + + onTransition { + case Throttle -> PassThrough ⇒ + stateData.queue foreach (send(stateData.ctx, _)) + cancelTimer("send") + case Throttle -> Blackhole ⇒ + cancelTimer("send") + } + + when(Blackhole) { + case Event(Send(_, _), _) ⇒ + stay + } + + whenUnhandled { + case Event(SetRate(rate), d) ⇒ + if (rate > 0) { + goto(Throttle) using d.copy(rateMBit = rate, queue = Queue()) + } else if (rate == 0) { + goto(Blackhole) + } else { + goto(PassThrough) + } + } + + initialize + + private def size(msg: MessageEvent) = msg.getMessage() match { + case b: ChannelBuffer ⇒ b.readableBytes() * 8 + case _ ⇒ throw new UnsupportedOperationException("NetworkFailureInjector only supports ChannelBuffer messages") + } + } + + def throttleSend(rateMBit: Float) { + sender ! SetRate(rateMBit) + } + + def throttleReceive(rateMBit: Float) { + receiver ! SetRate(rateMBit) + } + + override def handleUpstream(ctx: ChannelHandlerContext, evt: ChannelEvent) { + evt match { + case msg: MessageEvent ⇒ + receiver ! Send(ctx, msg) + case state: ChannelStateEvent ⇒ + state.getState match { + case BOUND ⇒ + state.getValue match { + case null ⇒ + remote = remote flatMap { a ⇒ channels.remove(a, state.getChannel); None } + case a: InetSocketAddress ⇒ + val addr = RemoteAddress(a) + channels.put(addr, state.getChannel) + remote = Some(addr) + } + case OPEN if state.getValue == false ⇒ + remote = remote flatMap { a ⇒ channels.remove(a, state.getChannel); None } + } + ctx.sendUpstream(evt) + case _ ⇒ + ctx.sendUpstream(evt) + } + } + + override def handleDownstream(ctx: ChannelHandlerContext, evt: ChannelEvent) { + evt match { + case msg: MessageEvent ⇒ + sender ! Send(ctx, msg) + case _ ⇒ + ctx.sendUpstream(evt) + } + } + +} + diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala new file mode 100644 index 0000000000..16abe5bb27 --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala @@ -0,0 +1,133 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ +package akka.remote.testconductor + +import akka.actor.{ Actor, ActorRef, LoggingFSM, Timeout, UntypedChannel } +import akka.event.EventHandler +import RemoteConnection.getAddrString +import akka.util.duration._ +import TestConductorProtocol._ +import akka.NoStackTrace +import org.jboss.netty.channel.{ Channel, SimpleChannelUpstreamHandler, ChannelHandlerContext, ChannelStateEvent, MessageEvent } +import com.eaio.uuid.UUID + +object Player extends BarrierSync { + + private val server = Actor.actorOf[ClientFSM] + + override def enter(name: String*) { + EventHandler.debug(this, "entering barriers " + name.mkString("(", ", ", ")")) + implicit val timeout = Timeout(30 seconds) + name foreach { b ⇒ + (server ? EnterBarrier(b)).get + EventHandler.debug(this, "passed barrier " + b) + } + } +} + +object ClientFSM { + sealed trait State + case object Connecting extends State + case object Connected extends State + + case class Data(channel: Channel, msg: Either[List[ClientOp], (String, UntypedChannel)]) + + class ConnectionFailure(msg: String) extends RuntimeException(msg) with NoStackTrace + case object Disconnected +} + +class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { + import ClientFSM._ + import akka.actor.FSM._ + + val name = System.getProperty("akka.testconductor.name", (new UUID).toString) + val host = System.getProperty("akka.testconductor.host", "localhost") + val port = Integer.getInteger("akka.testconductor.port", 4545) + val handler = new PlayerHandler(self) + + val myself = Actor.remote.address + + startWith(Connecting, Data(RemoteConnection(Client, host, port, handler), Left(Nil))) + + when(Connecting, stateTimeout = 10 seconds) { + case Event(msg: ClientOp, Data(channel, Left(msgs))) ⇒ + stay using Data(channel, Left(msg :: msgs)) + case Event(Connected, Data(channel, Left(msgs))) ⇒ + val hello = Hello.newBuilder.setName(name).setHost(myself.getAddress.getHostAddress).setPort(myself.getPort).build + channel.write(Wrapper.newBuilder.setHello(hello).build) + msgs.reverse foreach sendMsg(channel) + goto(Connected) using Data(channel, Left(Nil)) + case Event(_: ConnectionFailure, _) ⇒ + // System.exit(1) + stop + case Event(StateTimeout, _) ⇒ + EventHandler.error(this, "connect timeout to TestConductor") + // System.exit(1) + stop + } + + when(Connected) { + case Event(Disconnected, _) ⇒ + EventHandler.info(this, "disconnected from TestConductor") + throw new ConnectionFailure("disconnect") + case Event(msg: EnterBarrier, Data(channel, _)) ⇒ + sendMsg(channel)(msg) + stay using Data(channel, Right((msg.name, self.channel))) + case Event(msg: Wrapper, Data(channel, Right((barrier, sender)))) if msg.getAllFields.size == 1 ⇒ + if (msg.hasBarrier) { + val b = msg.getBarrier.getName + if (b != barrier) { + sender.sendException(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) + } else { + sender ! b + } + } + stay using Data(channel, Left(Nil)) + } + + onTermination { + case StopEvent(_, _, Data(channel, _)) ⇒ + channel.close() + } + + private def sendMsg(channel: Channel)(msg: ClientOp) { + msg match { + case EnterBarrier(name) ⇒ + val enter = TestConductorProtocol.EnterBarrier.newBuilder.setName(name).build + channel.write(Wrapper.newBuilder.setBarrier(enter).build) + } + } + +} + +class PlayerHandler(fsm: ActorRef) extends SimpleChannelUpstreamHandler { + + import ClientFSM._ + + override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { + val channel = event.getChannel + EventHandler.debug(this, "connected to " + getAddrString(channel)) + while (!fsm.isRunning) Thread.sleep(100) + fsm ! Connected + } + + override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { + val channel = event.getChannel + EventHandler.debug(this, "disconnected from " + getAddrString(channel)) + fsm.stop() + } + + override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) = { + val channel = event.getChannel + EventHandler.debug(this, "message from " + getAddrString(channel) + ": " + event.getMessage) + event.getMessage match { + case msg: Wrapper if msg.getAllFields.size == 1 ⇒ + fsm ! msg + case msg ⇒ + EventHandler.info(this, "server " + getAddrString(channel) + " sent garbage '" + msg + "', disconnecting") + channel.close() + } + } +} + diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala b/akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala new file mode 100644 index 0000000000..a92b6295e2 --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala @@ -0,0 +1,52 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ +package akka.remote.testconductor + +import org.jboss.netty.channel.{ Channel, ChannelPipeline, ChannelPipelineFactory, ChannelUpstreamHandler, SimpleChannelUpstreamHandler, StaticChannelPipeline } +import org.jboss.netty.channel.socket.nio.{ NioClientSocketChannelFactory, NioServerSocketChannelFactory } +import org.jboss.netty.bootstrap.{ ClientBootstrap, ServerBootstrap } +import org.jboss.netty.handler.codec.frame.{ LengthFieldBasedFrameDecoder, LengthFieldPrepender } +import org.jboss.netty.handler.codec.compression.{ ZlibDecoder, ZlibEncoder } +import org.jboss.netty.handler.codec.protobuf.{ ProtobufDecoder, ProtobufEncoder } +import org.jboss.netty.handler.timeout.{ ReadTimeoutHandler, ReadTimeoutException } +import java.net.InetSocketAddress +import java.util.concurrent.Executors + +class TestConductorPipelineFactory(handler: ChannelUpstreamHandler) extends ChannelPipelineFactory { + def getPipeline: ChannelPipeline = { + val encap = List(new LengthFieldPrepender(4), new LengthFieldBasedFrameDecoder(10000, 0, 4, 0, 4)) + val proto = List(new ProtobufEncoder, new ProtobufDecoder(TestConductorProtocol.Wrapper.getDefaultInstance)) + new StaticChannelPipeline(encap ::: proto ::: handler :: Nil: _*) + } +} + +sealed trait Role +case object Client extends Role +case object Server extends Role + +object RemoteConnection { + def apply(role: Role, host: String, port: Int, handler: ChannelUpstreamHandler): Channel = { + val sockaddr = new InetSocketAddress(host, port) + role match { + case Client ⇒ + val socketfactory = new NioClientSocketChannelFactory(Executors.newCachedThreadPool, Executors.newCachedThreadPool) + val bootstrap = new ClientBootstrap(socketfactory) + bootstrap.setPipelineFactory(new TestConductorPipelineFactory(handler)) + bootstrap.setOption("tcpNoDelay", true) + bootstrap.connect(sockaddr).getChannel + case Server ⇒ + val socketfactory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool, Executors.newCachedThreadPool) + val bootstrap = new ServerBootstrap(socketfactory) + bootstrap.setPipelineFactory(new TestConductorPipelineFactory(handler)) + bootstrap.setOption("reuseAddress", true) + bootstrap.setOption("child.tcpNoDelay", true) + bootstrap.bind(sockaddr) + } + } + + def getAddrString(channel: Channel) = channel.getRemoteAddress match { + case i: InetSocketAddress ⇒ i.toString + case _ ⇒ "[unknown]" + } +} From 6c786d20b808b4b5989076223fcc7da0d27e71f9 Mon Sep 17 00:00:00 2001 From: Roland Date: Wed, 2 May 2012 21:56:26 +0200 Subject: [PATCH 002/538] porting to 2.0, making it compile: UNTESTED! --- .../remote/netty/NettyRemoteSupport.scala | 24 ++++- .../main/scala/akka/remote/netty/Server.scala | 4 +- .../akka/remote/testconductor/Conductor.scala | 93 ++++++++++++------- .../NetworkFailureInjector.scala | 24 +++-- .../akka/remote/testconductor/Player.scala | 72 ++++++++------ 5 files changed, 135 insertions(+), 82 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 8acd33c7fb..55e2d95636 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -31,9 +31,11 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor val settings = new NettySettings(remoteSettings.config.getConfig("akka.remote.netty"), remoteSettings.systemName) + // TODO replace by system.scheduler val timer: HashedWheelTimer = new HashedWheelTimer(system.threadFactory) - val executor = new OrderedMemoryAwareThreadPoolExecutor( + // TODO make configurable + lazy val executor = new OrderedMemoryAwareThreadPoolExecutor( settings.ExecutionPoolSize, settings.MaxChannelMemorySize, settings.MaxTotalMemorySize, @@ -41,6 +43,7 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor settings.ExecutionPoolKeepalive.unit, system.threadFactory) + // TODO make configurable/shareable with server socket factory val clientChannelFactory = new NioClientSocketChannelFactory( Executors.newCachedThreadPool(system.threadFactory), Executors.newCachedThreadPool(system.threadFactory)) @@ -50,9 +53,20 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor override protected def useUntrustedMode = remoteSettings.UntrustedMode - val server = try new NettyRemoteServer(this) catch { - case ex ⇒ shutdown(); throw ex - } + val server: NettyRemoteServer = try createServer() catch { case NonFatal(ex) ⇒ shutdown(); throw ex } + + /** + * Override this method to inject a subclass of NettyRemoteServer instead of + * the normal one, e.g. for altering the pipeline. + */ + protected def createServer(): NettyRemoteServer = new NettyRemoteServer(this) + + /** + * Override this method to inject a subclass of RemoteClient instead of + * the normal one, e.g. for altering the pipeline. Get this transport’s + * address from `this.address`. + */ + protected def createClient(recipient: Address): RemoteClient = new ActiveRemoteClient(this, recipient, address) // the address is set in start() or from the RemoteServerHandler, whichever comes first private val _address = new AtomicReference[Address] @@ -121,7 +135,7 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor //Recheck for addition, race between upgrades case Some(client) ⇒ client //If already populated by other writer case None ⇒ //Populate map - val client = new ActiveRemoteClient(this, recipientAddress, address) + val client = createClient(recipientAddress) client.connect() remoteClients += recipientAddress -> client client diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index 1f18b27c8c..97d3f194f3 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -37,13 +37,15 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { private val bootstrap = { val b = new ServerBootstrap(factory) - b.setPipelineFactory(new RemoteServerPipelineFactory(openChannels, executionHandler, netty)) + b.setPipelineFactory(makePipeline()) b.setOption("backlog", settings.Backlog) b.setOption("tcpNoDelay", true) b.setOption("child.keepAlive", true) b.setOption("reuseAddress", true) b } + + protected def makePipeline(): ChannelPipelineFactory = new RemoteServerPipelineFactory(openChannels, executionHandler, netty) @volatile private[akka] var channel: Channel = _ diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala index 58a6a5f88e..3265fc8808 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -3,24 +3,41 @@ */ package akka.remote.testconductor -import akka.actor.{ Actor, ActorRef, LoggingFSM, Timeout, UntypedChannel } -import akka.event.EventHandler +import akka.actor.{ Actor, ActorRef, ActorSystem, LoggingFSM, Props } import RemoteConnection.getAddrString -import akka.util.duration._ import TestConductorProtocol._ -import akka.NoStackTrace import org.jboss.netty.channel.{ Channel, SimpleChannelUpstreamHandler, ChannelHandlerContext, ChannelStateEvent, MessageEvent } +import com.typesafe.config.ConfigFactory +import akka.util.Timeout +import akka.util.Duration +import akka.util.duration._ +import akka.pattern.ask +import java.util.concurrent.TimeUnit.MILLISECONDS +import akka.dispatch.Await +import akka.event.LoggingAdapter +import akka.actor.PoisonPill +import akka.event.Logging +import scala.util.control.NoStackTrace object Conductor extends RunControl with FailureInject with BarrierSync { + val system = ActorSystem("conductor", ConfigFactory.load().getConfig("conductor")) + + object Settings { + val config = system.settings.config + + implicit val BarrierTimeout = Timeout(Duration(config.getMilliseconds("barrier-timeout"), MILLISECONDS)) + implicit val QueryTimeout = Timeout(Duration(config.getMilliseconds("query-timeout"), MILLISECONDS)) + } + import Controller._ - private val controller = Actor.actorOf[Controller] + private val controller = system.actorOf(Props[Controller], "controller") controller ! ClientConnected override def enter(name: String*) { - implicit val timeout = Timeout(30 seconds) - name foreach (b ⇒ (controller ? EnterBarrier(b)).get) + import Settings.BarrierTimeout + name foreach (b ⇒ Await.result(controller ? EnterBarrier(b), Duration.Inf)) } override def throttle(node: String, target: String, direction: Direction, rateMBit: Float) { @@ -47,7 +64,10 @@ object Conductor extends RunControl with FailureInject with BarrierSync { controller ! Terminate(node, -1) } - override def getNodes = (controller ? GetNodes).as[List[String]].get + override def getNodes = { + import Settings.QueryTimeout + Await.result(controller ? GetNodes mapTo manifest[List[String]], Duration.Inf) + } override def removeNode(node: String) { controller ! Remove(node) @@ -55,33 +75,33 @@ object Conductor extends RunControl with FailureInject with BarrierSync { } -class ConductorHandler(controller: ActorRef) extends SimpleChannelUpstreamHandler { +class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { var clients = Map[Channel, ActorRef]() override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { val channel = event.getChannel - EventHandler.debug(this, "connection from " + getAddrString(channel)) - val fsm = Actor.actorOf(new ServerFSM(controller, channel)) + log.debug("connection from {}", getAddrString(channel)) + val fsm = system.actorOf(Props(new ServerFSM(controller, channel))) clients += channel -> fsm } override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { val channel = event.getChannel - EventHandler.debug(this, "disconnect from " + getAddrString(channel)) + log.debug("disconnect from {}", getAddrString(channel)) val fsm = clients(channel) - fsm.stop() + fsm ! PoisonPill clients -= channel } override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) = { val channel = event.getChannel - EventHandler.debug(this, "message from " + getAddrString(channel) + ": " + event.getMessage) + log.debug("message from {}: {}", getAddrString(channel), event.getMessage) event.getMessage match { case msg: Wrapper if msg.getAllFields.size == 1 ⇒ clients(channel) ! msg case msg ⇒ - EventHandler.info(this, "client " + getAddrString(channel) + " sent garbage '" + msg + "', disconnecting") + log.info("client {} sent garbage '{}', disconnecting", getAddrString(channel), msg) channel.close() } } @@ -104,35 +124,35 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi startWith(Initial, null) when(Initial, stateTimeout = 10 seconds) { - case Ev(msg: Wrapper) ⇒ + case Event(msg: Wrapper, _) ⇒ if (msg.hasHello) { val hello = msg.getHello controller ! ClientConnected(hello.getName, hello.getHost, hello.getPort) goto(Ready) } else { - EventHandler.warning(this, "client " + getAddrString(channel) + " sent no Hello in first message, disconnecting") + log.warning("client {} sent no Hello in first message, disconnecting", getAddrString(channel)) channel.close() stop() } - case Ev(StateTimeout) ⇒ - EventHandler.info(this, "closing channel to " + getAddrString(channel) + " because of Hello timeout") + case Event(StateTimeout, _) ⇒ + log.info("closing channel to {} because of Hello timeout", getAddrString(channel)) channel.close() stop() } when(Ready) { - case Ev(msg: Wrapper) ⇒ + case Event(msg: Wrapper, _) ⇒ if (msg.hasBarrier) { val barrier = msg.getBarrier controller ! EnterBarrier(barrier.getName) } else { - EventHandler.warning(this, "client " + getAddrString(channel) + " sent unsupported message " + msg) + log.warning("client {} sent unsupported message {}", getAddrString(channel), msg) } stay - case Ev(Send(msg)) ⇒ + case Event(Send(msg), _) ⇒ channel.write(msg) stay - case Ev(EnterBarrier(name)) ⇒ + case Event(EnterBarrier(name), _) ⇒ val barrier = TestConductorProtocol.EnterBarrier.newBuilder.setName(name).build channel.write(Wrapper.newBuilder.setBarrier(barrier).build) stay @@ -152,18 +172,19 @@ object Controller { class Controller extends Actor { import Controller._ - val host = System.getProperty("akka.testconductor.host", "localhost") - val port = Integer.getInteger("akka.testconductor.port", 4545) - val connection = RemoteConnection(Server, host, port, new ConductorHandler(self)) + val config = context.system.settings.config - val barrier = Actor.actorOf[BarrierCoordinator] + val host = config.getString("akka.testconductor.host") + val port = config.getInt("akka.testconductor.port") + val connection = RemoteConnection(Server, host, port, + new ConductorHandler(context.system, self, Logging(context.system, "ConductorHandler"))) + + val barrier = context.actorOf(Props[BarrierCoordinator], "barriers") var nodes = Map[String, NodeInfo]() - override def receive = Actor.loggable(this) { + override def receive = { case ClientConnected(name, host, port) ⇒ - self.channel match { - case ref: ActorRef ⇒ nodes += name -> NodeInfo(name, host, port, ref) - } + nodes += name -> NodeInfo(name, host, port, sender) barrier forward ClientConnected case ClientConnected ⇒ barrier forward ClientConnected @@ -202,7 +223,7 @@ class Controller extends Actor { // TODO: properly remove node from BarrierCoordinator // case Remove(node) => // nodes -= node - case GetNodes ⇒ self reply nodes.keys + case GetNodes ⇒ sender ! nodes.keys } } @@ -211,7 +232,7 @@ object BarrierCoordinator { case object Idle extends State case object Waiting extends State - case class Data(clients: Int, barrier: String, arrived: List[UntypedChannel]) + case class Data(clients: Int, barrier: String, arrived: List[ActorRef]) class BarrierTimeoutException(msg: String) extends RuntimeException(msg) with NoStackTrace } @@ -225,7 +246,7 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, when(Idle) { case Event(EnterBarrier(name), Data(num, _, _)) ⇒ if (num == 0) throw new IllegalStateException("no client expected yet") - goto(Waiting) using Data(num, name, self.channel :: Nil) + goto(Waiting) using Data(num, name, sender :: Nil) case Event(ClientConnected, d @ Data(num, _, _)) ⇒ stay using d.copy(clients = num + 1) case Event(ClientDisconnected, d @ Data(num, _, _)) ⇒ @@ -241,7 +262,7 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, when(Waiting) { case Event(e @ EnterBarrier(name), d @ Data(num, barrier, arrived)) ⇒ if (name != barrier) throw new IllegalStateException("trying enter barrier '" + name + "' while barrier '" + barrier + "' is active") - val together = self.channel :: arrived + val together = sender :: arrived if (together.size == num) { together foreach (_ ! e) goto(Idle) using Data(num, "", Nil) @@ -254,7 +275,7 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, val expected = num - 1 if (arrived.size == expected) { val e = EnterBarrier(barrier) - self.channel :: arrived foreach (_ ! e) + sender :: arrived foreach (_ ! e) goto(Idle) using Data(expected, "", Nil) } else { stay using d.copy(clients = expected) diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index eec6a2cbf1..88102b5e86 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -4,9 +4,7 @@ package akka.remote.testconductor import java.net.InetSocketAddress - import scala.collection.immutable.Queue - import org.jboss.netty.buffer.ChannelBuffer import org.jboss.netty.channel.ChannelState.BOUND import org.jboss.netty.channel.ChannelState.OPEN @@ -17,34 +15,34 @@ import org.jboss.netty.channel.ChannelHandlerContext import org.jboss.netty.channel.ChannelStateEvent import org.jboss.netty.channel.ChannelUpstreamHandler import org.jboss.netty.channel.MessageEvent - import akka.actor.FSM import akka.actor.Actor import akka.util.duration.doubleToDurationDouble import akka.util.Index -import akka.util.RemoteAddress +import akka.actor.Address +import akka.actor.ActorSystem +import akka.actor.Props object NetworkFailureInjector { - val channels = new Index[RemoteAddress, Channel]() + val channels = new Index[Address, Channel](16, (c1, c2) => c1 compareTo c2) - def close(remote: RemoteAddress): Unit = { - val set = channels.remove(remote) + def close(remote: Address): Unit = { // channels will be cleaned up by the handler - set foreach (_.close()) + for (chs <- channels.remove(remote); c <- chs) c.close() } } -class NetworkFailureInjector extends ChannelUpstreamHandler with ChannelDownstreamHandler { +class NetworkFailureInjector(system: ActorSystem) extends ChannelUpstreamHandler with ChannelDownstreamHandler { import NetworkFailureInjector._ // local cache of remote address - private var remote: Option[RemoteAddress] = None + private var remote: Option[Address] = None // everything goes via these Throttle actors to enable easy steering - private val sender = Actor.actorOf(new Throttle(_.sendDownstream(_))) - private val receiver = Actor.actorOf(new Throttle(_.sendUpstream(_))) + private val sender = system.actorOf(Props(new Throttle(_.sendDownstream(_)))) + private val receiver = system.actorOf(Props(new Throttle(_.sendUpstream(_)))) /* * State, Data and Messages for the internal Throttle actor @@ -135,7 +133,7 @@ class NetworkFailureInjector extends ChannelUpstreamHandler with ChannelDownstre case null ⇒ remote = remote flatMap { a ⇒ channels.remove(a, state.getChannel); None } case a: InetSocketAddress ⇒ - val addr = RemoteAddress(a) + val addr = Address("akka", "XXX", a.getHostName, a.getPort) channels.put(addr, state.getChannel) remote = Some(addr) } diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala index 16abe5bb27..029045394c 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala @@ -3,25 +3,42 @@ */ package akka.remote.testconductor -import akka.actor.{ Actor, ActorRef, LoggingFSM, Timeout, UntypedChannel } -import akka.event.EventHandler +import akka.actor.{ Actor, ActorRef, ActorSystem, LoggingFSM, Props } import RemoteConnection.getAddrString import akka.util.duration._ import TestConductorProtocol._ -import akka.NoStackTrace import org.jboss.netty.channel.{ Channel, SimpleChannelUpstreamHandler, ChannelHandlerContext, ChannelStateEvent, MessageEvent } import com.eaio.uuid.UUID +import com.typesafe.config.ConfigFactory +import akka.util.Timeout +import akka.util.Duration +import java.util.concurrent.TimeUnit.MILLISECONDS +import akka.pattern.ask +import akka.dispatch.Await +import scala.util.control.NoStackTrace +import akka.actor.Status +import akka.event.LoggingAdapter +import akka.actor.PoisonPill +import akka.event.Logging object Player extends BarrierSync { - private val server = Actor.actorOf[ClientFSM] + val system = ActorSystem("Player", ConfigFactory.load().getConfig("player")) + + object Settings { + val config = system.settings.config + + implicit val BarrierTimeout = Timeout(Duration(config.getMilliseconds("barrier-timeout"), MILLISECONDS)) + } + + private val server = system.actorOf(Props[ClientFSM], "client") override def enter(name: String*) { - EventHandler.debug(this, "entering barriers " + name.mkString("(", ", ", ")")) - implicit val timeout = Timeout(30 seconds) + system.log.debug("entering barriers " + name.mkString("(", ", ", ")")) name foreach { b ⇒ - (server ? EnterBarrier(b)).get - EventHandler.debug(this, "passed barrier " + b) + import Settings.BarrierTimeout + Await.result(server ? EnterBarrier(b), Duration.Inf) + system.log.debug("passed barrier {}", b) } } } @@ -31,7 +48,7 @@ object ClientFSM { case object Connecting extends State case object Connected extends State - case class Data(channel: Channel, msg: Either[List[ClientOp], (String, UntypedChannel)]) + case class Data(channel: Channel, msg: Either[List[ClientOp], (String, ActorRef)]) class ConnectionFailure(msg: String) extends RuntimeException(msg) with NoStackTrace case object Disconnected @@ -39,14 +56,16 @@ object ClientFSM { class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { import ClientFSM._ - import akka.actor.FSM._ - val name = System.getProperty("akka.testconductor.name", (new UUID).toString) - val host = System.getProperty("akka.testconductor.host", "localhost") - val port = Integer.getInteger("akka.testconductor.port", 4545) - val handler = new PlayerHandler(self) + val config = context.system.settings.config - val myself = Actor.remote.address + val name = config.getString("akka.testconductor.name") + val host = config.getString("akka.testconductor.host") + val port = config.getInt("akka.testconductor.port") + val handler = new PlayerHandler(self, Logging(context.system, "PlayerHandler")) + + val myself = "XXX" + val myport = 12345 startWith(Connecting, Data(RemoteConnection(Client, host, port, handler), Left(Nil))) @@ -54,7 +73,7 @@ class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { case Event(msg: ClientOp, Data(channel, Left(msgs))) ⇒ stay using Data(channel, Left(msg :: msgs)) case Event(Connected, Data(channel, Left(msgs))) ⇒ - val hello = Hello.newBuilder.setName(name).setHost(myself.getAddress.getHostAddress).setPort(myself.getPort).build + val hello = Hello.newBuilder.setName(name).setHost(myself).setPort(myport).build channel.write(Wrapper.newBuilder.setHello(hello).build) msgs.reverse foreach sendMsg(channel) goto(Connected) using Data(channel, Left(Nil)) @@ -62,23 +81,23 @@ class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { // System.exit(1) stop case Event(StateTimeout, _) ⇒ - EventHandler.error(this, "connect timeout to TestConductor") + log.error("connect timeout to TestConductor") // System.exit(1) stop } when(Connected) { case Event(Disconnected, _) ⇒ - EventHandler.info(this, "disconnected from TestConductor") + log.info("disconnected from TestConductor") throw new ConnectionFailure("disconnect") case Event(msg: EnterBarrier, Data(channel, _)) ⇒ sendMsg(channel)(msg) - stay using Data(channel, Right((msg.name, self.channel))) + stay using Data(channel, Right((msg.name, sender))) case Event(msg: Wrapper, Data(channel, Right((barrier, sender)))) if msg.getAllFields.size == 1 ⇒ if (msg.hasBarrier) { val b = msg.getBarrier.getName if (b != barrier) { - sender.sendException(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) + sender ! Status.Failure(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) } else { sender ! b } @@ -101,31 +120,30 @@ class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { } -class PlayerHandler(fsm: ActorRef) extends SimpleChannelUpstreamHandler { +class PlayerHandler(fsm: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { import ClientFSM._ override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { val channel = event.getChannel - EventHandler.debug(this, "connected to " + getAddrString(channel)) - while (!fsm.isRunning) Thread.sleep(100) + log.debug("connected to {}", getAddrString(channel)) fsm ! Connected } override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { val channel = event.getChannel - EventHandler.debug(this, "disconnected from " + getAddrString(channel)) - fsm.stop() + log.debug("disconnected from {}", getAddrString(channel)) + fsm ! PoisonPill } override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) = { val channel = event.getChannel - EventHandler.debug(this, "message from " + getAddrString(channel) + ": " + event.getMessage) + log.debug("message from {}: {}", getAddrString(channel), event.getMessage) event.getMessage match { case msg: Wrapper if msg.getAllFields.size == 1 ⇒ fsm ! msg case msg ⇒ - EventHandler.info(this, "server " + getAddrString(channel) + " sent garbage '" + msg + "', disconnecting") + log.info("server {} sent garbage '{}', disconnecting", getAddrString(channel), msg) channel.close() } } From 5cf0fa66f803caffee4100507263ab87a8adf71a Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 3 May 2012 20:48:27 +0200 Subject: [PATCH 003/538] TestConductor: convert to Akka Extension and add test - make start-up synchronous and explicit for client and server - server can be queried for actual port, client requires explicit port - simple multi-jvm-test for verification of TestConductor barriers --- .../testconductor/TestConductorProtocol.java | 1230 +++++++++++++---- .../main/protocol/TestConductorProtocol.proto | 17 +- akka-remote/src/main/resources/reference.conf | 21 + .../remote/netty/NettyRemoteSupport.scala | 6 +- .../main/scala/akka/remote/netty/Server.scala | 2 +- .../akka/remote/testconductor/Conductor.scala | 63 +- .../akka/remote/testconductor/Extension.scala | 31 + .../akka/remote/testconductor/Features.scala | 10 + .../NetworkFailureInjector.scala | 4 +- .../akka/remote/testconductor/Player.scala | 68 +- .../akka/remote/testconductor/package.scala | 19 + .../testconductor/TestConductorSpec.scala | 52 + 12 files changed, 1170 insertions(+), 353 deletions(-) create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/package.scala create mode 100644 akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala diff --git a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java index e9065b53e4..0b2950018f 100644 --- a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java +++ b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java @@ -905,13 +905,10 @@ public final class TestConductorProtocol { boolean hasName(); String getName(); - // required string host = 2; - boolean hasHost(); - String getHost(); - - // required int32 port = 3; - boolean hasPort(); - int getPort(); + // required .Address address = 2; + boolean hasAddress(); + akka.remote.testconductor.TestConductorProtocol.Address getAddress(); + akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddressOrBuilder(); } public static final class Hello extends com.google.protobuf.GeneratedMessage @@ -974,52 +971,22 @@ public final class TestConductorProtocol { } } - // required string host = 2; - public static final int HOST_FIELD_NUMBER = 2; - private java.lang.Object host_; - public boolean hasHost() { + // required .Address address = 2; + public static final int ADDRESS_FIELD_NUMBER = 2; + private akka.remote.testconductor.TestConductorProtocol.Address address_; + public boolean hasAddress() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public String getHost() { - java.lang.Object ref = host_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - host_ = s; - } - return s; - } + public akka.remote.testconductor.TestConductorProtocol.Address getAddress() { + return address_; } - private com.google.protobuf.ByteString getHostBytes() { - java.lang.Object ref = host_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - host_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required int32 port = 3; - public static final int PORT_FIELD_NUMBER = 3; - private int port_; - public boolean hasPort() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public int getPort() { - return port_; + public akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddressOrBuilder() { + return address_; } private void initFields() { name_ = ""; - host_ = ""; - port_ = 0; + address_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -1030,11 +997,11 @@ public final class TestConductorProtocol { memoizedIsInitialized = 0; return false; } - if (!hasHost()) { + if (!hasAddress()) { memoizedIsInitialized = 0; return false; } - if (!hasPort()) { + if (!getAddress().isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -1049,10 +1016,7 @@ public final class TestConductorProtocol { output.writeBytes(1, getNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getHostBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeInt32(3, port_); + output.writeMessage(2, address_); } getUnknownFields().writeTo(output); } @@ -1069,11 +1033,7 @@ public final class TestConductorProtocol { } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getHostBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(3, port_); + .computeMessageSize(2, address_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -1191,6 +1151,7 @@ public final class TestConductorProtocol { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getAddressFieldBuilder(); } } private static Builder create() { @@ -1201,10 +1162,12 @@ public final class TestConductorProtocol { super.clear(); name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - host_ = ""; + if (addressBuilder_ == null) { + address_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + } else { + addressBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000002); - port_ = 0; - bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -1250,11 +1213,11 @@ public final class TestConductorProtocol { if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.host_ = host_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; + if (addressBuilder_ == null) { + result.address_ = address_; + } else { + result.address_ = addressBuilder_.build(); } - result.port_ = port_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -1274,11 +1237,8 @@ public final class TestConductorProtocol { if (other.hasName()) { setName(other.getName()); } - if (other.hasHost()) { - setHost(other.getHost()); - } - if (other.hasPort()) { - setPort(other.getPort()); + if (other.hasAddress()) { + mergeAddress(other.getAddress()); } this.mergeUnknownFields(other.getUnknownFields()); return this; @@ -1289,11 +1249,11 @@ public final class TestConductorProtocol { return false; } - if (!hasHost()) { + if (!hasAddress()) { return false; } - if (!hasPort()) { + if (!getAddress().isInitialized()) { return false; } @@ -1329,13 +1289,12 @@ public final class TestConductorProtocol { break; } case 18: { - bitField0_ |= 0x00000002; - host_ = input.readBytes(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - port_ = input.readInt32(); + akka.remote.testconductor.TestConductorProtocol.Address.Builder subBuilder = akka.remote.testconductor.TestConductorProtocol.Address.newBuilder(); + if (hasAddress()) { + subBuilder.mergeFrom(getAddress()); + } + input.readMessage(subBuilder, extensionRegistry); + setAddress(subBuilder.buildPartial()); break; } } @@ -1380,62 +1339,95 @@ public final class TestConductorProtocol { onChanged(); } - // required string host = 2; - private java.lang.Object host_ = ""; - public boolean hasHost() { + // required .Address address = 2; + private akka.remote.testconductor.TestConductorProtocol.Address address_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder> addressBuilder_; + public boolean hasAddress() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public String getHost() { - java.lang.Object ref = host_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - host_ = s; - return s; + public akka.remote.testconductor.TestConductorProtocol.Address getAddress() { + if (addressBuilder_ == null) { + return address_; } else { - return (String) ref; + return addressBuilder_.getMessage(); } } - public Builder setHost(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - host_ = value; - onChanged(); - return this; - } - public Builder clearHost() { - bitField0_ = (bitField0_ & ~0x00000002); - host_ = getDefaultInstance().getHost(); - onChanged(); - return this; - } - void setHost(com.google.protobuf.ByteString value) { + public Builder setAddress(akka.remote.testconductor.TestConductorProtocol.Address value) { + if (addressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + address_ = value; + onChanged(); + } else { + addressBuilder_.setMessage(value); + } bitField0_ |= 0x00000002; - host_ = value; - onChanged(); - } - - // required int32 port = 3; - private int port_ ; - public boolean hasPort() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public int getPort() { - return port_; - } - public Builder setPort(int value) { - bitField0_ |= 0x00000004; - port_ = value; - onChanged(); return this; } - public Builder clearPort() { - bitField0_ = (bitField0_ & ~0x00000004); - port_ = 0; - onChanged(); + public Builder setAddress( + akka.remote.testconductor.TestConductorProtocol.Address.Builder builderForValue) { + if (addressBuilder_ == null) { + address_ = builderForValue.build(); + onChanged(); + } else { + addressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; return this; } + public Builder mergeAddress(akka.remote.testconductor.TestConductorProtocol.Address value) { + if (addressBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + address_ != akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance()) { + address_ = + akka.remote.testconductor.TestConductorProtocol.Address.newBuilder(address_).mergeFrom(value).buildPartial(); + } else { + address_ = value; + } + onChanged(); + } else { + addressBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder clearAddress() { + if (addressBuilder_ == null) { + address_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + onChanged(); + } else { + addressBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + public akka.remote.testconductor.TestConductorProtocol.Address.Builder getAddressBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getAddressFieldBuilder().getBuilder(); + } + public akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddressOrBuilder() { + if (addressBuilder_ != null) { + return addressBuilder_.getMessageOrBuilder(); + } else { + return address_; + } + } + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder> + getAddressFieldBuilder() { + if (addressBuilder_ == null) { + addressBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder>( + address_, + getParentForChildren(), + isClean()); + address_ = null; + } + return addressBuilder_; + } // @@protoc_insertion_point(builder_scope:Hello) } @@ -1831,6 +1823,658 @@ public final class TestConductorProtocol { // @@protoc_insertion_point(class_scope:EnterBarrier) } + public interface AddressOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string protocol = 1; + boolean hasProtocol(); + String getProtocol(); + + // required string system = 2; + boolean hasSystem(); + String getSystem(); + + // required string host = 3; + boolean hasHost(); + String getHost(); + + // required int32 port = 4; + boolean hasPort(); + int getPort(); + } + public static final class Address extends + com.google.protobuf.GeneratedMessage + implements AddressOrBuilder { + // Use Address.newBuilder() to construct. + private Address(Builder builder) { + super(builder); + } + private Address(boolean noInit) {} + + private static final Address defaultInstance; + public static Address getDefaultInstance() { + return defaultInstance; + } + + public Address getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Address_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Address_fieldAccessorTable; + } + + private int bitField0_; + // required string protocol = 1; + public static final int PROTOCOL_FIELD_NUMBER = 1; + private java.lang.Object protocol_; + public boolean hasProtocol() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getProtocol() { + java.lang.Object ref = protocol_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + protocol_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getProtocolBytes() { + java.lang.Object ref = protocol_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + protocol_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string system = 2; + public static final int SYSTEM_FIELD_NUMBER = 2; + private java.lang.Object system_; + public boolean hasSystem() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public String getSystem() { + java.lang.Object ref = system_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + system_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getSystemBytes() { + java.lang.Object ref = system_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + system_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string host = 3; + public static final int HOST_FIELD_NUMBER = 3; + private java.lang.Object host_; + public boolean hasHost() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public String getHost() { + java.lang.Object ref = host_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + host_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getHostBytes() { + java.lang.Object ref = host_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + host_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required int32 port = 4; + public static final int PORT_FIELD_NUMBER = 4; + private int port_; + public boolean hasPort() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public int getPort() { + return port_; + } + + private void initFields() { + protocol_ = ""; + system_ = ""; + host_ = ""; + port_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasProtocol()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSystem()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasHost()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasPort()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getProtocolBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getSystemBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getHostBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeInt32(4, port_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getProtocolBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getSystemBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getHostBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(4, port_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.remote.testconductor.TestConductorProtocol.Address parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.Address parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.remote.testconductor.TestConductorProtocol.Address prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Address_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_Address_fieldAccessorTable; + } + + // Construct using akka.remote.testconductor.TestConductorProtocol.Address.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + protocol_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + system_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + host_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + port_ = 0; + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.remote.testconductor.TestConductorProtocol.Address.getDescriptor(); + } + + public akka.remote.testconductor.TestConductorProtocol.Address getDefaultInstanceForType() { + return akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + } + + public akka.remote.testconductor.TestConductorProtocol.Address build() { + akka.remote.testconductor.TestConductorProtocol.Address result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private akka.remote.testconductor.TestConductorProtocol.Address buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + akka.remote.testconductor.TestConductorProtocol.Address result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public akka.remote.testconductor.TestConductorProtocol.Address buildPartial() { + akka.remote.testconductor.TestConductorProtocol.Address result = new akka.remote.testconductor.TestConductorProtocol.Address(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.protocol_ = protocol_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.system_ = system_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.host_ = host_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.port_ = port_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.remote.testconductor.TestConductorProtocol.Address) { + return mergeFrom((akka.remote.testconductor.TestConductorProtocol.Address)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.remote.testconductor.TestConductorProtocol.Address other) { + if (other == akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance()) return this; + if (other.hasProtocol()) { + setProtocol(other.getProtocol()); + } + if (other.hasSystem()) { + setSystem(other.getSystem()); + } + if (other.hasHost()) { + setHost(other.getHost()); + } + if (other.hasPort()) { + setPort(other.getPort()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasProtocol()) { + + return false; + } + if (!hasSystem()) { + + return false; + } + if (!hasHost()) { + + return false; + } + if (!hasPort()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + protocol_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + system_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + host_ = input.readBytes(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + port_ = input.readInt32(); + break; + } + } + } + } + + private int bitField0_; + + // required string protocol = 1; + private java.lang.Object protocol_ = ""; + public boolean hasProtocol() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getProtocol() { + java.lang.Object ref = protocol_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + protocol_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setProtocol(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + protocol_ = value; + onChanged(); + return this; + } + public Builder clearProtocol() { + bitField0_ = (bitField0_ & ~0x00000001); + protocol_ = getDefaultInstance().getProtocol(); + onChanged(); + return this; + } + void setProtocol(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + protocol_ = value; + onChanged(); + } + + // required string system = 2; + private java.lang.Object system_ = ""; + public boolean hasSystem() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public String getSystem() { + java.lang.Object ref = system_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + system_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setSystem(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + system_ = value; + onChanged(); + return this; + } + public Builder clearSystem() { + bitField0_ = (bitField0_ & ~0x00000002); + system_ = getDefaultInstance().getSystem(); + onChanged(); + return this; + } + void setSystem(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000002; + system_ = value; + onChanged(); + } + + // required string host = 3; + private java.lang.Object host_ = ""; + public boolean hasHost() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public String getHost() { + java.lang.Object ref = host_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + host_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setHost(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + host_ = value; + onChanged(); + return this; + } + public Builder clearHost() { + bitField0_ = (bitField0_ & ~0x00000004); + host_ = getDefaultInstance().getHost(); + onChanged(); + return this; + } + void setHost(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000004; + host_ = value; + onChanged(); + } + + // required int32 port = 4; + private int port_ ; + public boolean hasPort() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public int getPort() { + return port_; + } + public Builder setPort(int value) { + bitField0_ |= 0x00000008; + port_ = value; + onChanged(); + return this; + } + public Builder clearPort() { + bitField0_ = (bitField0_ & ~0x00000008); + port_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:Address) + } + + static { + defaultInstance = new Address(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:Address) + } + public interface InjectFailureOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -1842,19 +2486,16 @@ public final class TestConductorProtocol { boolean hasDirection(); akka.remote.testconductor.TestConductorProtocol.Direction getDirection(); - // optional string host = 3; - boolean hasHost(); - String getHost(); + // optional .Address address = 3; + boolean hasAddress(); + akka.remote.testconductor.TestConductorProtocol.Address getAddress(); + akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddressOrBuilder(); - // optional int32 port = 4; - boolean hasPort(); - int getPort(); - - // optional float rateMBit = 5; + // optional float rateMBit = 6; boolean hasRateMBit(); float getRateMBit(); - // optional int32 exitValue = 6; + // optional int32 exitValue = 7; boolean hasExitValue(); int getExitValue(); } @@ -1907,63 +2548,34 @@ public final class TestConductorProtocol { return direction_; } - // optional string host = 3; - public static final int HOST_FIELD_NUMBER = 3; - private java.lang.Object host_; - public boolean hasHost() { + // optional .Address address = 3; + public static final int ADDRESS_FIELD_NUMBER = 3; + private akka.remote.testconductor.TestConductorProtocol.Address address_; + public boolean hasAddress() { return ((bitField0_ & 0x00000004) == 0x00000004); } - public String getHost() { - java.lang.Object ref = host_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - host_ = s; - } - return s; - } + public akka.remote.testconductor.TestConductorProtocol.Address getAddress() { + return address_; } - private com.google.protobuf.ByteString getHostBytes() { - java.lang.Object ref = host_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - host_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddressOrBuilder() { + return address_; } - // optional int32 port = 4; - public static final int PORT_FIELD_NUMBER = 4; - private int port_; - public boolean hasPort() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public int getPort() { - return port_; - } - - // optional float rateMBit = 5; - public static final int RATEMBIT_FIELD_NUMBER = 5; + // optional float rateMBit = 6; + public static final int RATEMBIT_FIELD_NUMBER = 6; private float rateMBit_; public boolean hasRateMBit() { - return ((bitField0_ & 0x00000010) == 0x00000010); + return ((bitField0_ & 0x00000008) == 0x00000008); } public float getRateMBit() { return rateMBit_; } - // optional int32 exitValue = 6; - public static final int EXITVALUE_FIELD_NUMBER = 6; + // optional int32 exitValue = 7; + public static final int EXITVALUE_FIELD_NUMBER = 7; private int exitValue_; public boolean hasExitValue() { - return ((bitField0_ & 0x00000020) == 0x00000020); + return ((bitField0_ & 0x00000010) == 0x00000010); } public int getExitValue() { return exitValue_; @@ -1972,8 +2584,7 @@ public final class TestConductorProtocol { private void initFields() { failure_ = akka.remote.testconductor.TestConductorProtocol.FailType.Throttle; direction_ = akka.remote.testconductor.TestConductorProtocol.Direction.Send; - host_ = ""; - port_ = 0; + address_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); rateMBit_ = 0F; exitValue_ = 0; } @@ -1986,6 +2597,12 @@ public final class TestConductorProtocol { memoizedIsInitialized = 0; return false; } + if (hasAddress()) { + if (!getAddress().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -2000,16 +2617,13 @@ public final class TestConductorProtocol { output.writeEnum(2, direction_.getNumber()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getHostBytes()); + output.writeMessage(3, address_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeInt32(4, port_); + output.writeFloat(6, rateMBit_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeFloat(5, rateMBit_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeInt32(6, exitValue_); + output.writeInt32(7, exitValue_); } getUnknownFields().writeTo(output); } @@ -2030,19 +2644,15 @@ public final class TestConductorProtocol { } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getHostBytes()); + .computeMessageSize(3, address_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream - .computeInt32Size(4, port_); + .computeFloatSize(6, rateMBit_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream - .computeFloatSize(5, rateMBit_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(6, exitValue_); + .computeInt32Size(7, exitValue_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -2160,6 +2770,7 @@ public final class TestConductorProtocol { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getAddressFieldBuilder(); } } private static Builder create() { @@ -2172,14 +2783,16 @@ public final class TestConductorProtocol { bitField0_ = (bitField0_ & ~0x00000001); direction_ = akka.remote.testconductor.TestConductorProtocol.Direction.Send; bitField0_ = (bitField0_ & ~0x00000002); - host_ = ""; + if (addressBuilder_ == null) { + address_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + } else { + addressBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000004); - port_ = 0; - bitField0_ = (bitField0_ & ~0x00000008); rateMBit_ = 0F; - bitField0_ = (bitField0_ & ~0x00000010); + bitField0_ = (bitField0_ & ~0x00000008); exitValue_ = 0; - bitField0_ = (bitField0_ & ~0x00000020); + bitField0_ = (bitField0_ & ~0x00000010); return this; } @@ -2229,18 +2842,18 @@ public final class TestConductorProtocol { if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } - result.host_ = host_; + if (addressBuilder_ == null) { + result.address_ = address_; + } else { + result.address_ = addressBuilder_.build(); + } if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } - result.port_ = port_; + result.rateMBit_ = rateMBit_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } - result.rateMBit_ = rateMBit_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } result.exitValue_ = exitValue_; result.bitField0_ = to_bitField0_; onBuilt(); @@ -2264,11 +2877,8 @@ public final class TestConductorProtocol { if (other.hasDirection()) { setDirection(other.getDirection()); } - if (other.hasHost()) { - setHost(other.getHost()); - } - if (other.hasPort()) { - setPort(other.getPort()); + if (other.hasAddress()) { + mergeAddress(other.getAddress()); } if (other.hasRateMBit()) { setRateMBit(other.getRateMBit()); @@ -2285,6 +2895,12 @@ public final class TestConductorProtocol { return false; } + if (hasAddress()) { + if (!getAddress().isInitialized()) { + + return false; + } + } return true; } @@ -2334,22 +2950,21 @@ public final class TestConductorProtocol { break; } case 26: { - bitField0_ |= 0x00000004; - host_ = input.readBytes(); + akka.remote.testconductor.TestConductorProtocol.Address.Builder subBuilder = akka.remote.testconductor.TestConductorProtocol.Address.newBuilder(); + if (hasAddress()) { + subBuilder.mergeFrom(getAddress()); + } + input.readMessage(subBuilder, extensionRegistry); + setAddress(subBuilder.buildPartial()); break; } - case 32: { + case 53: { bitField0_ |= 0x00000008; - port_ = input.readInt32(); - break; - } - case 45: { - bitField0_ |= 0x00000010; rateMBit_ = input.readFloat(); break; } - case 48: { - bitField0_ |= 0x00000020; + case 56: { + bitField0_ |= 0x00000010; exitValue_ = input.readInt32(); break; } @@ -2407,100 +3022,133 @@ public final class TestConductorProtocol { return this; } - // optional string host = 3; - private java.lang.Object host_ = ""; - public boolean hasHost() { + // optional .Address address = 3; + private akka.remote.testconductor.TestConductorProtocol.Address address_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder> addressBuilder_; + public boolean hasAddress() { return ((bitField0_ & 0x00000004) == 0x00000004); } - public String getHost() { - java.lang.Object ref = host_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - host_ = s; - return s; + public akka.remote.testconductor.TestConductorProtocol.Address getAddress() { + if (addressBuilder_ == null) { + return address_; } else { - return (String) ref; + return addressBuilder_.getMessage(); } } - public Builder setHost(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - host_ = value; - onChanged(); - return this; - } - public Builder clearHost() { - bitField0_ = (bitField0_ & ~0x00000004); - host_ = getDefaultInstance().getHost(); - onChanged(); - return this; - } - void setHost(com.google.protobuf.ByteString value) { + public Builder setAddress(akka.remote.testconductor.TestConductorProtocol.Address value) { + if (addressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + address_ = value; + onChanged(); + } else { + addressBuilder_.setMessage(value); + } bitField0_ |= 0x00000004; - host_ = value; - onChanged(); - } - - // optional int32 port = 4; - private int port_ ; - public boolean hasPort() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public int getPort() { - return port_; - } - public Builder setPort(int value) { - bitField0_ |= 0x00000008; - port_ = value; - onChanged(); return this; } - public Builder clearPort() { - bitField0_ = (bitField0_ & ~0x00000008); - port_ = 0; - onChanged(); + public Builder setAddress( + akka.remote.testconductor.TestConductorProtocol.Address.Builder builderForValue) { + if (addressBuilder_ == null) { + address_ = builderForValue.build(); + onChanged(); + } else { + addressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; return this; } + public Builder mergeAddress(akka.remote.testconductor.TestConductorProtocol.Address value) { + if (addressBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + address_ != akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance()) { + address_ = + akka.remote.testconductor.TestConductorProtocol.Address.newBuilder(address_).mergeFrom(value).buildPartial(); + } else { + address_ = value; + } + onChanged(); + } else { + addressBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + public Builder clearAddress() { + if (addressBuilder_ == null) { + address_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + onChanged(); + } else { + addressBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + public akka.remote.testconductor.TestConductorProtocol.Address.Builder getAddressBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getAddressFieldBuilder().getBuilder(); + } + public akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddressOrBuilder() { + if (addressBuilder_ != null) { + return addressBuilder_.getMessageOrBuilder(); + } else { + return address_; + } + } + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder> + getAddressFieldBuilder() { + if (addressBuilder_ == null) { + addressBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder>( + address_, + getParentForChildren(), + isClean()); + address_ = null; + } + return addressBuilder_; + } - // optional float rateMBit = 5; + // optional float rateMBit = 6; private float rateMBit_ ; public boolean hasRateMBit() { - return ((bitField0_ & 0x00000010) == 0x00000010); + return ((bitField0_ & 0x00000008) == 0x00000008); } public float getRateMBit() { return rateMBit_; } public Builder setRateMBit(float value) { - bitField0_ |= 0x00000010; + bitField0_ |= 0x00000008; rateMBit_ = value; onChanged(); return this; } public Builder clearRateMBit() { - bitField0_ = (bitField0_ & ~0x00000010); + bitField0_ = (bitField0_ & ~0x00000008); rateMBit_ = 0F; onChanged(); return this; } - // optional int32 exitValue = 6; + // optional int32 exitValue = 7; private int exitValue_ ; public boolean hasExitValue() { - return ((bitField0_ & 0x00000020) == 0x00000020); + return ((bitField0_ & 0x00000010) == 0x00000010); } public int getExitValue() { return exitValue_; } public Builder setExitValue(int value) { - bitField0_ |= 0x00000020; + bitField0_ |= 0x00000010; exitValue_ = value; onChanged(); return this; } public Builder clearExitValue() { - bitField0_ = (bitField0_ & ~0x00000020); + bitField0_ = (bitField0_ & ~0x00000010); exitValue_ = 0; onChanged(); return this; @@ -2532,6 +3180,11 @@ public final class TestConductorProtocol { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_EnterBarrier_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_Address_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_Address_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_InjectFailure_descriptor; private static @@ -2549,16 +3202,17 @@ public final class TestConductorProtocol { "\n\033TestConductorProtocol.proto\"a\n\007Wrapper" + "\022\025\n\005hello\030\001 \001(\0132\006.Hello\022\036\n\007barrier\030\002 \001(\013" + "2\r.EnterBarrier\022\037\n\007failure\030\003 \001(\0132\016.Injec" + - "tFailure\"1\n\005Hello\022\014\n\004name\030\001 \002(\t\022\014\n\004host\030" + - "\002 \002(\t\022\014\n\004port\030\003 \002(\005\"\034\n\014EnterBarrier\022\014\n\004n" + - "ame\030\001 \002(\t\"\213\001\n\rInjectFailure\022\032\n\007failure\030\001" + - " \002(\0162\t.FailType\022\035\n\tdirection\030\002 \001(\0162\n.Dir" + - "ection\022\014\n\004host\030\003 \001(\t\022\014\n\004port\030\004 \001(\005\022\020\n\010ra" + - "teMBit\030\005 \001(\002\022\021\n\texitValue\030\006 \001(\005*A\n\010FailT" + - "ype\022\014\n\010Throttle\020\001\022\016\n\nDisconnect\020\002\022\t\n\005Abo", - "rt\020\003\022\014\n\010Shutdown\020\004*\"\n\tDirection\022\010\n\004Send\020" + - "\001\022\013\n\007Receive\020\002B\035\n\031akka.remote.testconduc" + - "torH\001" + "tFailure\"0\n\005Hello\022\014\n\004name\030\001 \002(\t\022\031\n\007addre" + + "ss\030\002 \002(\0132\010.Address\"\034\n\014EnterBarrier\022\014\n\004na" + + "me\030\001 \002(\t\"G\n\007Address\022\020\n\010protocol\030\001 \002(\t\022\016\n" + + "\006system\030\002 \002(\t\022\014\n\004host\030\003 \002(\t\022\014\n\004port\030\004 \002(" + + "\005\"\212\001\n\rInjectFailure\022\032\n\007failure\030\001 \002(\0162\t.F" + + "ailType\022\035\n\tdirection\030\002 \001(\0162\n.Direction\022\031" + + "\n\007address\030\003 \001(\0132\010.Address\022\020\n\010rateMBit\030\006 ", + "\001(\002\022\021\n\texitValue\030\007 \001(\005*A\n\010FailType\022\014\n\010Th" + + "rottle\020\001\022\016\n\nDisconnect\020\002\022\t\n\005Abort\020\003\022\014\n\010S" + + "hutdown\020\004*\"\n\tDirection\022\010\n\004Send\020\001\022\013\n\007Rece" + + "ive\020\002B\035\n\031akka.remote.testconductorH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -2578,7 +3232,7 @@ public final class TestConductorProtocol { internal_static_Hello_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Hello_descriptor, - new java.lang.String[] { "Name", "Host", "Port", }, + new java.lang.String[] { "Name", "Address", }, akka.remote.testconductor.TestConductorProtocol.Hello.class, akka.remote.testconductor.TestConductorProtocol.Hello.Builder.class); internal_static_EnterBarrier_descriptor = @@ -2589,12 +3243,20 @@ public final class TestConductorProtocol { new java.lang.String[] { "Name", }, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.class, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder.class); - internal_static_InjectFailure_descriptor = + internal_static_Address_descriptor = getDescriptor().getMessageTypes().get(3); + internal_static_Address_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Address_descriptor, + new java.lang.String[] { "Protocol", "System", "Host", "Port", }, + akka.remote.testconductor.TestConductorProtocol.Address.class, + akka.remote.testconductor.TestConductorProtocol.Address.Builder.class); + internal_static_InjectFailure_descriptor = + getDescriptor().getMessageTypes().get(4); internal_static_InjectFailure_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_InjectFailure_descriptor, - new java.lang.String[] { "Failure", "Direction", "Host", "Port", "RateMBit", "ExitValue", }, + new java.lang.String[] { "Failure", "Direction", "Address", "RateMBit", "ExitValue", }, akka.remote.testconductor.TestConductorProtocol.InjectFailure.class, akka.remote.testconductor.TestConductorProtocol.InjectFailure.Builder.class); return null; diff --git a/akka-remote/src/main/protocol/TestConductorProtocol.proto b/akka-remote/src/main/protocol/TestConductorProtocol.proto index 1db35a7516..213820e687 100644 --- a/akka-remote/src/main/protocol/TestConductorProtocol.proto +++ b/akka-remote/src/main/protocol/TestConductorProtocol.proto @@ -19,14 +19,20 @@ message Wrapper { message Hello { required string name = 1; - required string host = 2; - required int32 port = 3; + required Address address = 2; } message EnterBarrier { required string name = 1; } +message Address { + required string protocol = 1; + required string system = 2; + required string host = 3; + required int32 port = 4; +} + enum FailType { Throttle = 1; Disconnect = 2; @@ -40,9 +46,8 @@ enum Direction { message InjectFailure { required FailType failure = 1; optional Direction direction = 2; - optional string host = 3; - optional int32 port = 4; - optional float rateMBit = 5; - optional int32 exitValue = 6; + optional Address address = 3; + optional float rateMBit = 6; + optional int32 exitValue = 7; } diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 1438904fe2..384d00b55d 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -155,4 +155,25 @@ akka { type = PinnedDispatcher } } + + testconductor { + + # Timeout for joining a barrier: this is the maximum time any participants + # waits for everybody else to join a named barrier. + barrier-timeout = 30s + + # Timeout for interrogation of TestConductor’s Controller actor + query-timeout = 5s + + # Default port to start the conductor on; 0 means + port = 0 + + # Hostname of the TestConductor server, used by the server to bind to the IP + # and by the client to connect to it. + host = localhost + + # Name of the TestConductor client (for identification on the server e.g. for + # failure injection) + name = "noname" + } } diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 55e2d95636..c3a41f8275 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -56,13 +56,13 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor val server: NettyRemoteServer = try createServer() catch { case NonFatal(ex) ⇒ shutdown(); throw ex } /** - * Override this method to inject a subclass of NettyRemoteServer instead of + * Override this method to inject a subclass of NettyRemoteServer instead of * the normal one, e.g. for altering the pipeline. */ protected def createServer(): NettyRemoteServer = new NettyRemoteServer(this) - + /** - * Override this method to inject a subclass of RemoteClient instead of + * Override this method to inject a subclass of RemoteClient instead of * the normal one, e.g. for altering the pipeline. Get this transport’s * address from `this.address`. */ diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index 97d3f194f3..ac4289e8ae 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -44,7 +44,7 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { b.setOption("reuseAddress", true) b } - + protected def makePipeline(): ChannelPipelineFactory = new RemoteServerPipelineFactory(openChannels, executionHandler, netty) @volatile diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala index 3265fc8808..c46e22eb9f 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -18,26 +18,30 @@ import akka.event.LoggingAdapter import akka.actor.PoisonPill import akka.event.Logging import scala.util.control.NoStackTrace +import akka.event.LoggingReceive +import akka.actor.Address +import java.net.InetSocketAddress -object Conductor extends RunControl with FailureInject with BarrierSync { - - val system = ActorSystem("conductor", ConfigFactory.load().getConfig("conductor")) - - object Settings { - val config = system.settings.config - - implicit val BarrierTimeout = Timeout(Duration(config.getMilliseconds("barrier-timeout"), MILLISECONDS)) - implicit val QueryTimeout = Timeout(Duration(config.getMilliseconds("query-timeout"), MILLISECONDS)) - } +trait Conductor extends RunControl with FailureInject { this: TestConductorExt ⇒ import Controller._ - private val controller = system.actorOf(Props[Controller], "controller") - controller ! ClientConnected + private var _controller: ActorRef = _ + private def controller: ActorRef = _controller match { + case null ⇒ throw new RuntimeException("TestConductorServer was not started") + case x ⇒ x + } - override def enter(name: String*) { + override def startController() { + if (_controller ne null) throw new RuntimeException("TestConductorServer was already started") + _controller = system.actorOf(Props[Controller], "controller") import Settings.BarrierTimeout - name foreach (b ⇒ Await.result(controller ? EnterBarrier(b), Duration.Inf)) + startClient(Await.result(controller ? GetPort mapTo, Duration.Inf)) + } + + override def port: Int = { + import Settings.QueryTimeout + Await.result(controller ? GetPort mapTo, Duration.Inf) } override def throttle(node: String, target: String, direction: Direction, rateMBit: Float) { @@ -127,7 +131,7 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi case Event(msg: Wrapper, _) ⇒ if (msg.hasHello) { val hello = msg.getHello - controller ! ClientConnected(hello.getName, hello.getHost, hello.getPort) + controller ! ClientConnected(hello.getName, hello.getAddress) goto(Ready) } else { log.warning("client {} sent no Hello in first message, disconnecting", getAddrString(channel)) @@ -162,29 +166,28 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi } object Controller { - case class ClientConnected(name: String, host: String, port: Int) + case class ClientConnected(name: String, address: Address) case class ClientDisconnected(name: String) case object GetNodes + case object GetPort - case class NodeInfo(name: String, host: String, port: Int, fsm: ActorRef) + case class NodeInfo(name: String, addr: Address, fsm: ActorRef) } class Controller extends Actor { import Controller._ - val config = context.system.settings.config - - val host = config.getString("akka.testconductor.host") - val port = config.getInt("akka.testconductor.port") - val connection = RemoteConnection(Server, host, port, + val settings = TestConductor().Settings + val connection = RemoteConnection(Server, settings.host, settings.port, new ConductorHandler(context.system, self, Logging(context.system, "ConductorHandler"))) val barrier = context.actorOf(Props[BarrierCoordinator], "barriers") var nodes = Map[String, NodeInfo]() - override def receive = { - case ClientConnected(name, host, port) ⇒ - nodes += name -> NodeInfo(name, host, port, sender) + override def receive = LoggingReceive { + case "ready?" ⇒ sender ! "yes" + case ClientConnected(name, addr) ⇒ + nodes += name -> NodeInfo(name, addr, sender) barrier forward ClientConnected case ClientConnected ⇒ barrier forward ClientConnected @@ -199,8 +202,7 @@ class Controller extends Actor { InjectFailure.newBuilder .setFailure(FailType.Throttle) .setDirection(TestConductorProtocol.Direction.valueOf(direction.toString)) - .setHost(t.host) - .setPort(t.port) + .setAddress(t.addr) .setRateMBit(rateMBit) .build nodes(node).fsm ! ServerFSM.Send(Wrapper.newBuilder.setFailure(throttle).build) @@ -209,8 +211,7 @@ class Controller extends Actor { val disconnect = InjectFailure.newBuilder .setFailure(if (abort) FailType.Abort else FailType.Disconnect) - .setHost(t.host) - .setPort(t.port) + .setAddress(t.addr) .build nodes(node).fsm ! ServerFSM.Send(Wrapper.newBuilder.setFailure(disconnect).build) case Terminate(node, exitValueOrKill) ⇒ @@ -224,6 +225,10 @@ class Controller extends Actor { // case Remove(node) => // nodes -= node case GetNodes ⇒ sender ! nodes.keys + case GetPort ⇒ + sender ! (connection.getLocalAddress match { + case inet: InetSocketAddress ⇒ inet.getPort + }) } } diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala new file mode 100644 index 0000000000..94847664c9 --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala @@ -0,0 +1,31 @@ +package akka.remote.testconductor + +import akka.actor.ExtensionKey +import akka.actor.Extension +import akka.actor.ExtendedActorSystem +import akka.remote.RemoteActorRefProvider +import akka.actor.ActorContext +import akka.util.{ Duration, Timeout } +import java.util.concurrent.TimeUnit.MILLISECONDS + +object TestConductor extends ExtensionKey[TestConductorExt] { + def apply()(implicit ctx: ActorContext): TestConductorExt = apply(ctx.system) +} + +class TestConductorExt(val system: ExtendedActorSystem) extends Extension with Conductor with Player { + + object Settings { + val config = system.settings.config + + implicit val BarrierTimeout = Timeout(Duration(config.getMilliseconds("akka.testconductor.barrier-timeout"), MILLISECONDS)) + implicit val QueryTimeout = Timeout(Duration(config.getMilliseconds("akka.testconductor.query-timeout"), MILLISECONDS)) + + val name = config.getString("akka.testconductor.name") + val host = config.getString("akka.testconductor.host") + val port = config.getInt("akka.testconductor.port") + } + + val transport = system.provider.asInstanceOf[RemoteActorRefProvider].transport + val address = transport.address + +} \ No newline at end of file diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala index 399b58337b..930be600c2 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala @@ -49,6 +49,16 @@ trait FailureInject { trait RunControl { + /** + * Start the server port. + */ + def startController(): Unit + + /** + * Get the actual port used by the server. + */ + def port: Int + /** * Tell the remote node to shut itself down using System.exit with the given * exitValue. diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index 88102b5e86..6569d81acc 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -25,11 +25,11 @@ import akka.actor.Props object NetworkFailureInjector { - val channels = new Index[Address, Channel](16, (c1, c2) => c1 compareTo c2) + val channels = new Index[Address, Channel](16, (c1, c2) ⇒ c1 compareTo c2) def close(remote: Address): Unit = { // channels will be cleaned up by the handler - for (chs <- channels.remove(remote); c <- chs) c.close() + for (chs ← channels.remove(remote); c ← chs) c.close() } } diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala index 029045394c..93aa6bc33d 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala @@ -21,23 +21,40 @@ import akka.event.LoggingAdapter import akka.actor.PoisonPill import akka.event.Logging -object Player extends BarrierSync { +trait Player extends BarrierSync { this: TestConductorExt ⇒ - val system = ActorSystem("Player", ConfigFactory.load().getConfig("player")) - - object Settings { - val config = system.settings.config - - implicit val BarrierTimeout = Timeout(Duration(config.getMilliseconds("barrier-timeout"), MILLISECONDS)) + private var _client: ActorRef = _ + private def client = _client match { + case null ⇒ throw new IllegalStateException("TestConductor client not yet started") + case x ⇒ x } - private val server = system.actorOf(Props[ClientFSM], "client") + def startClient(port: Int) { + import ClientFSM._ + import akka.actor.FSM._ + import Settings.BarrierTimeout + + if (_client ne null) throw new IllegalStateException("TestConductorClient already started") + _client = system.actorOf(Props(new ClientFSM(port)), "TestConductorClient") + val a = system.actorOf(Props(new Actor { + var waiting: ActorRef = _ + def receive = { + case fsm: ActorRef ⇒ waiting = sender; fsm ! SubscribeTransitionCallBack(self) + case Transition(_, Connecting, Connected) ⇒ waiting ! "okay" + case t: Transition[_] ⇒ waiting ! Status.Failure(new RuntimeException("unexpected transition: " + t)) + case CurrentState(_, Connected) ⇒ waiting ! "okay" + case _: CurrentState[_] ⇒ + } + })) + + Await.result(a ? client, Duration.Inf) + } override def enter(name: String*) { system.log.debug("entering barriers " + name.mkString("(", ", ", ")")) name foreach { b ⇒ import Settings.BarrierTimeout - Await.result(server ? EnterBarrier(b), Duration.Inf) + Await.result(client ? EnterBarrier(b), Duration.Inf) system.log.debug("passed barrier {}", b) } } @@ -48,35 +65,28 @@ object ClientFSM { case object Connecting extends State case object Connected extends State - case class Data(channel: Channel, msg: Either[List[ClientOp], (String, ActorRef)]) + case class Data(channel: Channel, barrier: Option[(String, ActorRef)]) class ConnectionFailure(msg: String) extends RuntimeException(msg) with NoStackTrace case object Disconnected } -class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { +class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { import ClientFSM._ - val config = context.system.settings.config + val settings = TestConductor().Settings - val name = config.getString("akka.testconductor.name") - val host = config.getString("akka.testconductor.host") - val port = config.getInt("akka.testconductor.port") val handler = new PlayerHandler(self, Logging(context.system, "PlayerHandler")) - val myself = "XXX" - val myport = 12345 - - startWith(Connecting, Data(RemoteConnection(Client, host, port, handler), Left(Nil))) + startWith(Connecting, Data(RemoteConnection(Client, settings.host, port, handler), None)) when(Connecting, stateTimeout = 10 seconds) { - case Event(msg: ClientOp, Data(channel, Left(msgs))) ⇒ - stay using Data(channel, Left(msg :: msgs)) - case Event(Connected, Data(channel, Left(msgs))) ⇒ - val hello = Hello.newBuilder.setName(name).setHost(myself).setPort(myport).build + case Event(msg: ClientOp, _) ⇒ + stay replying Status.Failure(new IllegalStateException("not connected yet")) + case Event(Connected, d @ Data(channel, _)) ⇒ + val hello = Hello.newBuilder.setName(settings.name).setAddress(TestConductor().address).build channel.write(Wrapper.newBuilder.setHello(hello).build) - msgs.reverse foreach sendMsg(channel) - goto(Connected) using Data(channel, Left(Nil)) + goto(Connected) case Event(_: ConnectionFailure, _) ⇒ // System.exit(1) stop @@ -92,8 +102,8 @@ class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { throw new ConnectionFailure("disconnect") case Event(msg: EnterBarrier, Data(channel, _)) ⇒ sendMsg(channel)(msg) - stay using Data(channel, Right((msg.name, sender))) - case Event(msg: Wrapper, Data(channel, Right((barrier, sender)))) if msg.getAllFields.size == 1 ⇒ + stay using Data(channel, Some(msg.name, sender)) + case Event(msg: Wrapper, Data(channel, Some((barrier, sender)))) if msg.getAllFields.size == 1 ⇒ if (msg.hasBarrier) { val b = msg.getBarrier.getName if (b != barrier) { @@ -102,7 +112,7 @@ class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { sender ! b } } - stay using Data(channel, Left(Nil)) + stay using Data(channel, None) } onTermination { @@ -110,6 +120,8 @@ class ClientFSM extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { channel.close() } + initialize + private def sendMsg(channel: Channel)(msg: ClientOp) { msg match { case EnterBarrier(name) ⇒ diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/package.scala b/akka-remote/src/main/scala/akka/remote/testconductor/package.scala new file mode 100644 index 0000000000..8ebeea90a9 --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/package.scala @@ -0,0 +1,19 @@ +package akka.remote + +import akka.actor.Address +import testconductor.{ TestConductorProtocol ⇒ TCP } + +package object testconductor { + + implicit def address2proto(addr: Address): TCP.Address = + TCP.Address.newBuilder + .setProtocol(addr.protocol) + .setSystem(addr.system) + .setHost(addr.host.get) + .setPort(addr.port.get) + .build + + implicit def address2scala(addr: TCP.Address): Address = + Address(addr.getProtocol, addr.getSystem, addr.getHost, addr.getPort) + +} \ No newline at end of file diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala new file mode 100644 index 0000000000..cae2917577 --- /dev/null +++ b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -0,0 +1,52 @@ +package akka.remote.testconductor + +import akka.remote.AkkaRemoteSpec +import com.typesafe.config.ConfigFactory +import akka.remote.AbstractRemoteActorMultiJvmSpec + +object TestConductorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { + override def NrOfNodes = 2 + override def commonConfig = ConfigFactory.parseString(""" + akka.loglevel = DEBUG + akka.actor.provider = akka.remote.RemoteActorRefProvider + akka.actor.debug { + receive = on + fsm = on + } + akka.testconductor { + host = localhost + port = 4712 + } + """) + def nameConfig(n: Int) = ConfigFactory.parseString("akka.testconductor.name = node" + n).withFallback(nodeConfigs(n)) +} + +import TestConductorMultiJvmSpec._ + +class TestConductorMultiJvmNode1 extends AkkaRemoteSpec(nameConfig(0)) { + + val nodes = TestConductorMultiJvmSpec.NrOfNodes + + "running a test" in { + val tc = TestConductor(system) + tc.startController() + barrier("start") + barrier("first") + tc.enter("begin") + barrier("end") + } +} + +class TestConductorMultiJvmNode2 extends AkkaRemoteSpec(nameConfig(1)) { + + val nodes = TestConductorMultiJvmSpec.NrOfNodes + + "running a test" in { + barrier("start") + val tc = TestConductor(system) + tc.startClient(4712) + barrier("first") + tc.enter("begin") + barrier("end") + } +} From 0314b9abbbeac4a0c8f72dea6fb866eb29cb3847 Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 4 May 2012 22:30:00 +0200 Subject: [PATCH 004/538] fix bug in FSM when manually rescheduling non-recurring timer, see #2043 --- akka-actor/src/main/scala/akka/actor/FSM.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index b277142e76..81126c4d8d 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -443,10 +443,10 @@ trait FSM[S, D] extends Listeners { timeoutFuture = None } generation += 1 - processMsg(msg, t) if (!repeat) { timers -= name } + processMsg(msg, t) } case SubscribeTransitionCallBack(actorRef) ⇒ // TODO use DeathWatch to clean up list From 9266ac451b4d938f972f943e352b9da9329ca226 Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 4 May 2012 22:33:08 +0200 Subject: [PATCH 005/538] integrate NetworkFailureInjector and add first test - rework socket pipeline to transform protobuf into case classes and back - introduce NetworkOp messages for that purpose - make API asynchronous (because it is, really) and add Done notification for all server operations; enter(...) is still synchronous, because that is its only purpose in life - factor out mkPipeline in NettyRemoteTransport, enabling the very slick TestConductorTransport (essentially a one-liner) - switch NetworkFailureInjector from Channel{Up,Down}streamHandler to subclassing SimpleChannelHandler, because otherwise deadlocks occurred, not sure why (but SCH is the recommended way from the netty docs, so there may well be a reason) --- .../testconductor/TestConductorProtocol.java | 126 +++++++++++++-- .../main/protocol/TestConductorProtocol.proto | 2 + .../main/scala/akka/remote/netty/Client.scala | 31 +--- .../remote/netty/NettyRemoteSupport.scala | 65 ++++++-- .../main/scala/akka/remote/netty/Server.scala | 26 +-- .../akka/remote/testconductor/Conductor.scala | 151 +++++++++--------- .../akka/remote/testconductor/DataTypes.scala | 77 ++++++++- .../akka/remote/testconductor/Extension.scala | 7 + .../akka/remote/testconductor/Features.scala | 33 ++-- .../NetworkFailureInjector.scala | 116 ++++++++------ .../akka/remote/testconductor/Player.scala | 69 ++++---- .../testconductor/RemoteConnection.scala | 3 +- .../akka/remote/testconductor/package.scala | 12 ++ .../AbstractRemoteActorMultiJvmSpec.scala | 2 +- .../testconductor/TestConductorSpec.scala | 87 ++++++++-- 15 files changed, 538 insertions(+), 269 deletions(-) diff --git a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java index 0b2950018f..f112a1b0c2 100644 --- a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java +++ b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java @@ -87,10 +87,12 @@ public final class TestConductorProtocol { implements com.google.protobuf.ProtocolMessageEnum { Send(0, 1), Receive(1, 2), + Both(2, 3), ; public static final int Send_VALUE = 1; public static final int Receive_VALUE = 2; + public static final int Both_VALUE = 3; public final int getNumber() { return value; } @@ -99,6 +101,7 @@ public final class TestConductorProtocol { switch (value) { case 1: return Send; case 2: return Receive; + case 3: return Both; default: return null; } } @@ -129,7 +132,7 @@ public final class TestConductorProtocol { } private static final Direction[] VALUES = { - Send, Receive, + Send, Receive, Both, }; public static Direction valueOf( @@ -169,6 +172,10 @@ public final class TestConductorProtocol { boolean hasFailure(); akka.remote.testconductor.TestConductorProtocol.InjectFailure getFailure(); akka.remote.testconductor.TestConductorProtocol.InjectFailureOrBuilder getFailureOrBuilder(); + + // optional string done = 4; + boolean hasDone(); + String getDone(); } public static final class Wrapper extends com.google.protobuf.GeneratedMessage @@ -238,10 +245,43 @@ public final class TestConductorProtocol { return failure_; } + // optional string done = 4; + public static final int DONE_FIELD_NUMBER = 4; + private java.lang.Object done_; + public boolean hasDone() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public String getDone() { + java.lang.Object ref = done_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + done_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getDoneBytes() { + java.lang.Object ref = done_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + done_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + private void initFields() { hello_ = akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance(); barrier_ = akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance(); failure_ = akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance(); + done_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -282,6 +322,9 @@ public final class TestConductorProtocol { if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeMessage(3, failure_); } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, getDoneBytes()); + } getUnknownFields().writeTo(output); } @@ -303,6 +346,10 @@ public final class TestConductorProtocol { size += com.google.protobuf.CodedOutputStream .computeMessageSize(3, failure_); } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, getDoneBytes()); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -448,6 +495,8 @@ public final class TestConductorProtocol { failureBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); + done_ = ""; + bitField0_ = (bitField0_ & ~0x00000008); return this; } @@ -510,6 +559,10 @@ public final class TestConductorProtocol { } else { result.failure_ = failureBuilder_.build(); } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.done_ = done_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -535,6 +588,9 @@ public final class TestConductorProtocol { if (other.hasFailure()) { mergeFailure(other.getFailure()); } + if (other.hasDone()) { + setDone(other.getDone()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -611,6 +667,11 @@ public final class TestConductorProtocol { setFailure(subBuilder.buildPartial()); break; } + case 34: { + bitField0_ |= 0x00000008; + done_ = input.readBytes(); + break; + } } } } @@ -887,6 +948,42 @@ public final class TestConductorProtocol { return failureBuilder_; } + // optional string done = 4; + private java.lang.Object done_ = ""; + public boolean hasDone() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public String getDone() { + java.lang.Object ref = done_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + done_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setDone(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + done_ = value; + onChanged(); + return this; + } + public Builder clearDone() { + bitField0_ = (bitField0_ & ~0x00000008); + done_ = getDefaultInstance().getDone(); + onChanged(); + return this; + } + void setDone(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000008; + done_ = value; + onChanged(); + } + // @@protoc_insertion_point(builder_scope:Wrapper) } @@ -3199,20 +3296,21 @@ public final class TestConductorProtocol { descriptor; static { java.lang.String[] descriptorData = { - "\n\033TestConductorProtocol.proto\"a\n\007Wrapper" + + "\n\033TestConductorProtocol.proto\"o\n\007Wrapper" + "\022\025\n\005hello\030\001 \001(\0132\006.Hello\022\036\n\007barrier\030\002 \001(\013" + "2\r.EnterBarrier\022\037\n\007failure\030\003 \001(\0132\016.Injec" + - "tFailure\"0\n\005Hello\022\014\n\004name\030\001 \002(\t\022\031\n\007addre" + - "ss\030\002 \002(\0132\010.Address\"\034\n\014EnterBarrier\022\014\n\004na" + - "me\030\001 \002(\t\"G\n\007Address\022\020\n\010protocol\030\001 \002(\t\022\016\n" + - "\006system\030\002 \002(\t\022\014\n\004host\030\003 \002(\t\022\014\n\004port\030\004 \002(" + - "\005\"\212\001\n\rInjectFailure\022\032\n\007failure\030\001 \002(\0162\t.F" + - "ailType\022\035\n\tdirection\030\002 \001(\0162\n.Direction\022\031" + - "\n\007address\030\003 \001(\0132\010.Address\022\020\n\010rateMBit\030\006 ", - "\001(\002\022\021\n\texitValue\030\007 \001(\005*A\n\010FailType\022\014\n\010Th" + - "rottle\020\001\022\016\n\nDisconnect\020\002\022\t\n\005Abort\020\003\022\014\n\010S" + - "hutdown\020\004*\"\n\tDirection\022\010\n\004Send\020\001\022\013\n\007Rece" + - "ive\020\002B\035\n\031akka.remote.testconductorH\001" + "tFailure\022\014\n\004done\030\004 \001(\t\"0\n\005Hello\022\014\n\004name\030" + + "\001 \002(\t\022\031\n\007address\030\002 \002(\0132\010.Address\"\034\n\014Ente" + + "rBarrier\022\014\n\004name\030\001 \002(\t\"G\n\007Address\022\020\n\010pro" + + "tocol\030\001 \002(\t\022\016\n\006system\030\002 \002(\t\022\014\n\004host\030\003 \002(" + + "\t\022\014\n\004port\030\004 \002(\005\"\212\001\n\rInjectFailure\022\032\n\007fai" + + "lure\030\001 \002(\0162\t.FailType\022\035\n\tdirection\030\002 \001(\016" + + "2\n.Direction\022\031\n\007address\030\003 \001(\0132\010.Address\022", + "\020\n\010rateMBit\030\006 \001(\002\022\021\n\texitValue\030\007 \001(\005*A\n\010" + + "FailType\022\014\n\010Throttle\020\001\022\016\n\nDisconnect\020\002\022\t" + + "\n\005Abort\020\003\022\014\n\010Shutdown\020\004*,\n\tDirection\022\010\n\004" + + "Send\020\001\022\013\n\007Receive\020\002\022\010\n\004Both\020\003B\035\n\031akka.re" + + "mote.testconductorH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -3224,7 +3322,7 @@ public final class TestConductorProtocol { internal_static_Wrapper_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Wrapper_descriptor, - new java.lang.String[] { "Hello", "Barrier", "Failure", }, + new java.lang.String[] { "Hello", "Barrier", "Failure", "Done", }, akka.remote.testconductor.TestConductorProtocol.Wrapper.class, akka.remote.testconductor.TestConductorProtocol.Wrapper.Builder.class); internal_static_Hello_descriptor = diff --git a/akka-remote/src/main/protocol/TestConductorProtocol.proto b/akka-remote/src/main/protocol/TestConductorProtocol.proto index 213820e687..e483bf4f01 100644 --- a/akka-remote/src/main/protocol/TestConductorProtocol.proto +++ b/akka-remote/src/main/protocol/TestConductorProtocol.proto @@ -15,6 +15,7 @@ message Wrapper { optional Hello hello = 1; optional EnterBarrier barrier = 2; optional InjectFailure failure = 3; + optional string done = 4; } message Hello { @@ -42,6 +43,7 @@ enum FailType { enum Direction { Send = 1; Receive = 2; + Both = 3; } message InjectFailure { required FailType failure = 1; diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index a0e91398fc..cf143650bc 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -112,8 +112,6 @@ class ActiveRemoteClient private[akka] ( private var connection: ChannelFuture = _ @volatile private[remote] var openChannels: DefaultChannelGroup = _ - @volatile - private var executionHandler: ExecutionHandler = _ @volatile private var reconnectionTimeWindowStart = 0L @@ -156,9 +154,8 @@ class ActiveRemoteClient private[akka] ( runSwitch switchOn { openChannels = new DefaultDisposableChannelGroup(classOf[RemoteClient].getName) - executionHandler = new ExecutionHandler(netty.executor) val b = new ClientBootstrap(netty.clientChannelFactory) - b.setPipelineFactory(new ActiveRemoteClientPipelineFactory(name, b, executionHandler, remoteAddress, localAddress, this)) + b.setPipelineFactory(netty.mkPipeline(new ActiveRemoteClientHandler(name, b, remoteAddress, localAddress, netty.timer, this), true)) b.setOption("tcpNoDelay", true) b.setOption("keepAlive", true) b.setOption("connectTimeoutMillis", settings.ConnectionTimeout.toMillis) @@ -206,7 +203,6 @@ class ActiveRemoteClient private[akka] ( if (openChannels ne null) openChannels.close.awaitUninterruptibly() } finally { connection = null - executionHandler = null } } @@ -319,31 +315,6 @@ class ActiveRemoteClientHandler( } } -class ActiveRemoteClientPipelineFactory( - name: String, - bootstrap: ClientBootstrap, - executionHandler: ExecutionHandler, - remoteAddress: Address, - localAddress: Address, - client: ActiveRemoteClient) extends ChannelPipelineFactory { - - import client.netty.settings - - def getPipeline: ChannelPipeline = { - val timeout = new IdleStateHandler(client.netty.timer, - settings.ReadTimeout.toSeconds.toInt, - settings.WriteTimeout.toSeconds.toInt, - settings.AllTimeout.toSeconds.toInt) - val lenDec = new LengthFieldBasedFrameDecoder(settings.MessageFrameSize, 0, 4, 0, 4) - val lenPrep = new LengthFieldPrepender(4) - val messageDec = new RemoteMessageDecoder - val messageEnc = new RemoteMessageEncoder(client.netty) - val remoteClient = new ActiveRemoteClientHandler(name, bootstrap, remoteAddress, localAddress, client.netty.timer, client) - - new StaticChannelPipeline(timeout, lenDec, messageDec, lenPrep, messageEnc, executionHandler, remoteClient) - } -} - class PassiveRemoteClient(val currentChannel: Channel, netty: NettyRemoteTransport, remoteAddress: Address) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index c3a41f8275..35ef3bf7fd 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -22,6 +22,13 @@ import akka.event.Logging import akka.remote.RemoteProtocol.AkkaRemoteProtocol import akka.remote.{ RemoteTransportException, RemoteTransport, RemoteSettings, RemoteMarshallingOps, RemoteActorRefProvider, RemoteActorRef, RemoteServerStarted } import akka.util.NonFatal +import org.jboss.netty.channel.StaticChannelPipeline +import org.jboss.netty.channel.ChannelHandler +import org.jboss.netty.handler.codec.frame.LengthFieldPrepender +import org.jboss.netty.handler.codec.frame.LengthFieldBasedFrameDecoder +import org.jboss.netty.handler.timeout.IdleStateHandler +import org.jboss.netty.channel.ChannelPipelineFactory +import org.jboss.netty.handler.execution.ExecutionHandler /** * Provides the implementation of the Netty remote support @@ -34,20 +41,54 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor // TODO replace by system.scheduler val timer: HashedWheelTimer = new HashedWheelTimer(system.threadFactory) - // TODO make configurable - lazy val executor = new OrderedMemoryAwareThreadPoolExecutor( - settings.ExecutionPoolSize, - settings.MaxChannelMemorySize, - settings.MaxTotalMemorySize, - settings.ExecutionPoolKeepalive.length, - settings.ExecutionPoolKeepalive.unit, - system.threadFactory) - // TODO make configurable/shareable with server socket factory val clientChannelFactory = new NioClientSocketChannelFactory( Executors.newCachedThreadPool(system.threadFactory), Executors.newCachedThreadPool(system.threadFactory)) + object PipelineFactory { + def apply(handlers: Seq[ChannelHandler]): StaticChannelPipeline = new StaticChannelPipeline(handlers: _*) + def apply(endpoint: ⇒ Seq[ChannelHandler], withTimeout: Boolean): ChannelPipelineFactory = + new ChannelPipelineFactory { + def getPipeline = apply(defaultStack(withTimeout) ++ endpoint) + } + + def defaultStack(withTimeout: Boolean): Seq[ChannelHandler] = + (if (withTimeout) timeout :: Nil else Nil) ::: + msgFormat ::: + authenticator ::: + executionHandler :: + Nil + + def timeout = new IdleStateHandler(timer, + settings.ReadTimeout.toSeconds.toInt, + settings.WriteTimeout.toSeconds.toInt, + settings.AllTimeout.toSeconds.toInt) + + def msgFormat = new LengthFieldBasedFrameDecoder(settings.MessageFrameSize, 0, 4, 0, 4) :: + new LengthFieldPrepender(4) :: + new RemoteMessageDecoder :: + new RemoteMessageEncoder(NettyRemoteTransport.this) :: + Nil + + val executionHandler = new ExecutionHandler(new OrderedMemoryAwareThreadPoolExecutor( + settings.ExecutionPoolSize, + settings.MaxChannelMemorySize, + settings.MaxTotalMemorySize, + settings.ExecutionPoolKeepalive.length, + settings.ExecutionPoolKeepalive.unit, + system.threadFactory)) + + def authenticator = if (settings.RequireCookie) new RemoteServerAuthenticationHandler(settings.SecureCookie) :: Nil else Nil + } + + /** + * This method is factored out to provide an extension point in case the + * pipeline shall be changed. It is recommended to use + */ + def mkPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean): ChannelPipelineFactory = + PipelineFactory(Seq(endpoint), withTimeout) + private val remoteClients = new HashMap[Address, RemoteClient] private val clientsLock = new ReentrantReadWriteLock @@ -105,11 +146,7 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor try { timer.stop() } finally { - try { - clientChannelFactory.releaseExternalResources() - } finally { - executor.shutdown() - } + clientChannelFactory.releaseExternalResources() } } } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index ac4289e8ae..f9d4ede1d8 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -30,14 +30,12 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { Executors.newCachedThreadPool(netty.system.threadFactory), Executors.newCachedThreadPool(netty.system.threadFactory)) - private val executionHandler = new ExecutionHandler(netty.executor) - // group of open channels, used for clean-up private val openChannels: ChannelGroup = new DefaultDisposableChannelGroup("akka-remote-server") private val bootstrap = { val b = new ServerBootstrap(factory) - b.setPipelineFactory(makePipeline()) + b.setPipelineFactory(netty.mkPipeline(new RemoteServerHandler(openChannels, netty), false)) b.setOption("backlog", settings.Backlog) b.setOption("tcpNoDelay", true) b.setOption("child.keepAlive", true) @@ -45,8 +43,6 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { b } - protected def makePipeline(): ChannelPipelineFactory = new RemoteServerPipelineFactory(openChannels, executionHandler, netty) - @volatile private[akka] var channel: Channel = _ @@ -79,26 +75,6 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { } } -class RemoteServerPipelineFactory( - val openChannels: ChannelGroup, - val executionHandler: ExecutionHandler, - val netty: NettyRemoteTransport) extends ChannelPipelineFactory { - - import netty.settings - - def getPipeline: ChannelPipeline = { - val lenDec = new LengthFieldBasedFrameDecoder(settings.MessageFrameSize, 0, 4, 0, 4) - val lenPrep = new LengthFieldPrepender(4) - val messageDec = new RemoteMessageDecoder - val messageEnc = new RemoteMessageEncoder(netty) - - val authenticator = if (settings.RequireCookie) new RemoteServerAuthenticationHandler(settings.SecureCookie) :: Nil else Nil - val remoteServer = new RemoteServerHandler(openChannels, netty) - val stages: List[ChannelHandler] = lenDec :: messageDec :: lenPrep :: messageEnc :: executionHandler :: authenticator ::: remoteServer :: Nil - new StaticChannelPipeline(stages: _*) - } -} - @ChannelHandler.Sharable class RemoteServerAuthenticationHandler(secureCookie: Option[String]) extends SimpleChannelUpstreamHandler { val authenticated = new AnyRef diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala index c46e22eb9f..c9cbeadf83 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -21,6 +21,7 @@ import scala.util.control.NoStackTrace import akka.event.LoggingReceive import akka.actor.Address import java.net.InetSocketAddress +import akka.dispatch.Future trait Conductor extends RunControl with FailureInject { this: TestConductorExt ⇒ @@ -32,55 +33,63 @@ trait Conductor extends RunControl with FailureInject { this: TestConductorExt case x ⇒ x } - override def startController() { + override def startController(): Future[Int] = { if (_controller ne null) throw new RuntimeException("TestConductorServer was already started") _controller = system.actorOf(Props[Controller], "controller") import Settings.BarrierTimeout - startClient(Await.result(controller ? GetPort mapTo, Duration.Inf)) + controller ? GetPort flatMap { case port: Int ⇒ startClient(port) map (_ ⇒ port) } } - override def port: Int = { + override def port: Future[Int] = { import Settings.QueryTimeout - Await.result(controller ? GetPort mapTo, Duration.Inf) + controller ? GetPort mapTo } - override def throttle(node: String, target: String, direction: Direction, rateMBit: Float) { - controller ! Throttle(node, target, direction, rateMBit) - } - - override def blackhole(node: String, target: String, direction: Direction) { - controller ! Throttle(node, target, direction, 0f) - } - - override def disconnect(node: String, target: String) { - controller ! Disconnect(node, target, false) - } - - override def abort(node: String, target: String) { - controller ! Disconnect(node, target, true) - } - - override def shutdown(node: String, exitValue: Int) { - controller ! Terminate(node, exitValue) - } - - override def kill(node: String) { - controller ! Terminate(node, -1) - } - - override def getNodes = { + override def throttle(node: String, target: String, direction: Direction, rateMBit: Double): Future[Done] = { import Settings.QueryTimeout - Await.result(controller ? GetNodes mapTo manifest[List[String]], Duration.Inf) + controller ? Throttle(node, target, direction, rateMBit.toFloat) mapTo } - override def removeNode(node: String) { - controller ! Remove(node) + override def blackhole(node: String, target: String, direction: Direction): Future[Done] = { + import Settings.QueryTimeout + controller ? Throttle(node, target, direction, 0f) mapTo + } + + override def disconnect(node: String, target: String): Future[Done] = { + import Settings.QueryTimeout + controller ? Disconnect(node, target, false) mapTo + } + + override def abort(node: String, target: String): Future[Done] = { + import Settings.QueryTimeout + controller ? Disconnect(node, target, true) mapTo + } + + override def shutdown(node: String, exitValue: Int): Future[Done] = { + import Settings.QueryTimeout + controller ? Terminate(node, exitValue) mapTo + } + + override def kill(node: String): Future[Done] = { + import Settings.QueryTimeout + controller ? Terminate(node, -1) mapTo + } + + override def getNodes: Future[List[String]] = { + import Settings.QueryTimeout + controller ? GetNodes mapTo + } + + override def removeNode(node: String): Future[Done] = { + import Settings.QueryTimeout + controller ? Remove(node) mapTo } } class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { + @volatile var clients = Map[Channel, ActorRef]() override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { @@ -102,7 +111,7 @@ class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAd val channel = event.getChannel log.debug("message from {}: {}", getAddrString(channel), event.getMessage) event.getMessage match { - case msg: Wrapper if msg.getAllFields.size == 1 ⇒ + case msg: NetworkOp ⇒ clients(channel) ! msg case msg ⇒ log.info("client {} sent garbage '{}', disconnecting", getAddrString(channel), msg) @@ -116,28 +125,26 @@ object ServerFSM { sealed trait State case object Initial extends State case object Ready extends State - - case class Send(msg: Wrapper) } -class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor with LoggingFSM[ServerFSM.State, Null] { +class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor with LoggingFSM[ServerFSM.State, Option[ActorRef]] { import ServerFSM._ import akka.actor.FSM._ import Controller._ - startWith(Initial, null) + startWith(Initial, None) when(Initial, stateTimeout = 10 seconds) { - case Event(msg: Wrapper, _) ⇒ - if (msg.hasHello) { - val hello = msg.getHello - controller ! ClientConnected(hello.getName, hello.getAddress) - goto(Ready) - } else { - log.warning("client {} sent no Hello in first message, disconnecting", getAddrString(channel)) - channel.close() - stop() - } + case Event(Hello(name, addr), _) ⇒ + controller ! ClientConnected(name, addr) + goto(Ready) + case Event(x: NetworkOp, _) ⇒ + log.warning("client {} sent no Hello in first message (instead {}), disconnecting", getAddrString(channel), x) + channel.close() + stop() + case Event(Send(msg), _) ⇒ + log.warning("cannot send {} in state Initial", msg) + stay case Event(StateTimeout, _) ⇒ log.info("closing channel to {} because of Hello timeout", getAddrString(channel)) channel.close() @@ -145,20 +152,24 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi } when(Ready) { - case Event(msg: Wrapper, _) ⇒ - if (msg.hasBarrier) { - val barrier = msg.getBarrier - controller ! EnterBarrier(barrier.getName) - } else { - log.warning("client {} sent unsupported message {}", getAddrString(channel), msg) - } + case Event(msg: EnterBarrier, _) ⇒ + controller ! msg stay - case Event(Send(msg), _) ⇒ + case Event(d: Done, Some(s)) ⇒ + s ! d + stay using None + case Event(msg: NetworkOp, _) ⇒ + log.warning("client {} sent unsupported message {}", getAddrString(channel), msg) + channel.close() + stop() + case Event(Send(msg: EnterBarrier), _) ⇒ channel.write(msg) stay - case Event(EnterBarrier(name), _) ⇒ - val barrier = TestConductorProtocol.EnterBarrier.newBuilder.setName(name).build - channel.write(Wrapper.newBuilder.setBarrier(barrier).build) + case Event(Send(msg), None) ⇒ + channel.write(msg) + stay using Some(sender) + case Event(Send(msg), _) ⇒ + log.warning("cannot send {} while waiting for previous ACK", msg) stay } @@ -185,7 +196,6 @@ class Controller extends Actor { var nodes = Map[String, NodeInfo]() override def receive = LoggingReceive { - case "ready?" ⇒ sender ! "yes" case ClientConnected(name, addr) ⇒ nodes += name -> NodeInfo(name, addr, sender) barrier forward ClientConnected @@ -198,28 +208,15 @@ class Controller extends Actor { barrier forward e case Throttle(node, target, direction, rateMBit) ⇒ val t = nodes(target) - val throttle = - InjectFailure.newBuilder - .setFailure(FailType.Throttle) - .setDirection(TestConductorProtocol.Direction.valueOf(direction.toString)) - .setAddress(t.addr) - .setRateMBit(rateMBit) - .build - nodes(node).fsm ! ServerFSM.Send(Wrapper.newBuilder.setFailure(throttle).build) + nodes(node).fsm forward Send(ThrottleMsg(t.addr, direction, rateMBit)) case Disconnect(node, target, abort) ⇒ val t = nodes(target) - val disconnect = - InjectFailure.newBuilder - .setFailure(if (abort) FailType.Abort else FailType.Disconnect) - .setAddress(t.addr) - .build - nodes(node).fsm ! ServerFSM.Send(Wrapper.newBuilder.setFailure(disconnect).build) + nodes(node).fsm forward Send(DisconnectMsg(t.addr, abort)) case Terminate(node, exitValueOrKill) ⇒ if (exitValueOrKill < 0) { // TODO: kill via SBT } else { - val shutdown = InjectFailure.newBuilder.setFailure(FailType.Shutdown).setExitValue(exitValueOrKill).build - nodes(node).fsm ! ServerFSM.Send(Wrapper.newBuilder.setFailure(shutdown).build) + nodes(node).fsm forward Send(TerminateMsg(exitValueOrKill)) } // TODO: properly remove node from BarrierCoordinator // case Remove(node) => @@ -269,7 +266,7 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, if (name != barrier) throw new IllegalStateException("trying enter barrier '" + name + "' while barrier '" + barrier + "' is active") val together = sender :: arrived if (together.size == num) { - together foreach (_ ! e) + together foreach (_ ! Send(e)) goto(Idle) using Data(num, "", Nil) } else { stay using d.copy(arrived = together) @@ -280,7 +277,7 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, val expected = num - 1 if (arrived.size == expected) { val e = EnterBarrier(barrier) - sender :: arrived foreach (_ ! e) + sender :: arrived foreach (_ ! Send(e)) goto(Idle) using Data(expected, "", Nil) } else { stay using d.copy(clients = expected) diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala index 2b54ea1018..90d7eeccd5 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -3,11 +3,82 @@ */ package akka.remote.testconductor -sealed trait ClientOp -sealed trait ServerOp +import org.jboss.netty.handler.codec.oneone.OneToOneEncoder +import org.jboss.netty.channel.ChannelHandlerContext +import org.jboss.netty.channel.Channel +import akka.remote.testconductor.{ TestConductorProtocol ⇒ TCP } +import com.google.protobuf.Message +import akka.actor.Address +import org.jboss.netty.handler.codec.oneone.OneToOneDecoder -case class EnterBarrier(name: String) extends ClientOp with ServerOp +case class Send(msg: NetworkOp) + +sealed trait ClientOp // messages sent to Player FSM +sealed trait ServerOp // messages sent to Conductor FSM +sealed trait NetworkOp // messages sent over the wire + +case class Hello(name: String, addr: Address) extends NetworkOp +case class EnterBarrier(name: String) extends ClientOp with ServerOp with NetworkOp case class Throttle(node: String, target: String, direction: Direction, rateMBit: Float) extends ServerOp +case class ThrottleMsg(target: Address, direction: Direction, rateMBit: Float) extends NetworkOp case class Disconnect(node: String, target: String, abort: Boolean) extends ServerOp +case class DisconnectMsg(target: Address, abort: Boolean) extends NetworkOp case class Terminate(node: String, exitValueOrKill: Int) extends ServerOp +case class TerminateMsg(exitValue: Int) extends NetworkOp +abstract class Done extends NetworkOp +case object Done extends Done { + def getInstance: Done = this +} + case class Remove(node: String) extends ServerOp + +class MsgEncoder extends OneToOneEncoder { + def encode(ctx: ChannelHandlerContext, ch: Channel, msg: AnyRef): AnyRef = msg match { + case x: NetworkOp ⇒ + val w = TCP.Wrapper.newBuilder + x match { + case Hello(name, addr) ⇒ + w.setHello(TCP.Hello.newBuilder.setName(name).setAddress(addr)) + case EnterBarrier(name) ⇒ + w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name)) + case ThrottleMsg(target, dir, rate) ⇒ + w.setFailure(TCP.InjectFailure.newBuilder.setAddress(target) + .setFailure(TCP.FailType.Throttle).setDirection(dir).setRateMBit(rate)) + case DisconnectMsg(target, abort) ⇒ + w.setFailure(TCP.InjectFailure.newBuilder.setAddress(target) + .setFailure(if (abort) TCP.FailType.Abort else TCP.FailType.Disconnect)) + case TerminateMsg(exitValue) ⇒ + w.setFailure(TCP.InjectFailure.newBuilder.setFailure(TCP.FailType.Shutdown).setExitValue(exitValue)) + case _: Done ⇒ + w.setDone("") + } + w.build + case _ ⇒ throw new IllegalArgumentException("wrong message " + msg) + } +} + +class MsgDecoder extends OneToOneDecoder { + def decode(ctx: ChannelHandlerContext, ch: Channel, msg: AnyRef): AnyRef = msg match { + case w: TCP.Wrapper if w.getAllFields.size == 1 ⇒ + if (w.hasHello) { + val h = w.getHello + Hello(h.getName, h.getAddress) + } else if (w.hasBarrier) { + EnterBarrier(w.getBarrier.getName) + } else if (w.hasFailure) { + val f = w.getFailure + import TCP.{ FailType ⇒ FT } + f.getFailure match { + case FT.Throttle ⇒ ThrottleMsg(f.getAddress, f.getDirection, f.getRateMBit) + case FT.Abort ⇒ DisconnectMsg(f.getAddress, true) + case FT.Disconnect ⇒ DisconnectMsg(f.getAddress, false) + case FT.Shutdown ⇒ TerminateMsg(f.getExitValue) + } + } else if (w.hasDone) { + Done + } else { + throw new IllegalArgumentException("unknown message " + msg) + } + case _ ⇒ throw new IllegalArgumentException("wrong message " + msg) + } +} diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala index 94847664c9..bffa84847f 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala @@ -7,9 +7,14 @@ import akka.remote.RemoteActorRefProvider import akka.actor.ActorContext import akka.util.{ Duration, Timeout } import java.util.concurrent.TimeUnit.MILLISECONDS +import akka.actor.ActorRef +import java.util.concurrent.ConcurrentHashMap +import akka.actor.Address object TestConductor extends ExtensionKey[TestConductorExt] { + def apply()(implicit ctx: ActorContext): TestConductorExt = apply(ctx.system) + } class TestConductorExt(val system: ExtendedActorSystem) extends Extension with Conductor with Player { @@ -28,4 +33,6 @@ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with C val transport = system.provider.asInstanceOf[RemoteActorRefProvider].transport val address = transport.address + val failureInjectors = new ConcurrentHashMap[Address, FailureInjector] + } \ No newline at end of file diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala index 930be600c2..b94f205726 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala @@ -3,6 +3,8 @@ */ package akka.remote.testconductor +import akka.dispatch.Future + trait BarrierSync { /** * Enter all given barriers in the order in which they were given. @@ -11,9 +13,12 @@ trait BarrierSync { } sealed trait Direction -case object Send extends Direction -case object Receive extends Direction -case object Both extends Direction + +object Direction { + case object Send extends Direction + case object Receive extends Direction + case object Both extends Direction +} trait FailureInject { @@ -21,7 +26,7 @@ trait FailureInject { * Make the remoting pipeline on the node throttle data sent to or received * from the given remote peer. */ - def throttle(node: String, target: String, direction: Direction, rateMBit: Float): Unit + def throttle(node: String, target: String, direction: Direction, rateMBit: Double): Future[Done] /** * Switch the Netty pipeline of the remote support into blackhole mode for @@ -29,56 +34,56 @@ trait FailureInject { * submitting them to the Socket or right after receiving them from the * Socket. */ - def blackhole(node: String, target: String, direction: Direction): Unit + def blackhole(node: String, target: String, direction: Direction): Future[Done] /** * Tell the remote support to shutdown the connection to the given remote * peer. It works regardless of whether the recipient was initiator or * responder. */ - def disconnect(node: String, target: String): Unit + def disconnect(node: String, target: String): Future[Done] /** * Tell the remote support to TCP_RESET the connection to the given remote * peer. It works regardless of whether the recipient was initiator or * responder. */ - def abort(node: String, target: String): Unit + def abort(node: String, target: String): Future[Done] } trait RunControl { /** - * Start the server port. + * Start the server port, returns the port number. */ - def startController(): Unit + def startController(): Future[Int] /** * Get the actual port used by the server. */ - def port: Int + def port: Future[Int] /** * Tell the remote node to shut itself down using System.exit with the given * exitValue. */ - def shutdown(node: String, exitValue: Int): Unit + def shutdown(node: String, exitValue: Int): Future[Done] /** * Tell the SBT plugin to forcibly terminate the given remote node using Process.destroy. */ - def kill(node: String): Unit + def kill(node: String): Future[Done] /** * Obtain the list of remote host names currently registered. */ - def getNodes: List[String] + def getNodes: Future[List[String]] /** * Remove a remote host from the list, so that the remaining nodes may still * pass subsequent barriers. */ - def removeNode(node: String): Unit + def removeNode(node: String): Future[Done] } diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index 6569d81acc..30e5308979 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2011 Typesafe Inc. + * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.remote.testconductor @@ -9,11 +9,9 @@ import org.jboss.netty.buffer.ChannelBuffer import org.jboss.netty.channel.ChannelState.BOUND import org.jboss.netty.channel.ChannelState.OPEN import org.jboss.netty.channel.Channel -import org.jboss.netty.channel.ChannelDownstreamHandler import org.jboss.netty.channel.ChannelEvent import org.jboss.netty.channel.ChannelHandlerContext import org.jboss.netty.channel.ChannelStateEvent -import org.jboss.netty.channel.ChannelUpstreamHandler import org.jboss.netty.channel.MessageEvent import akka.actor.FSM import akka.actor.Actor @@ -22,23 +20,26 @@ import akka.util.Index import akka.actor.Address import akka.actor.ActorSystem import akka.actor.Props +import akka.actor.ActorRef +import akka.event.Logging +import org.jboss.netty.channel.SimpleChannelHandler -object NetworkFailureInjector { - - val channels = new Index[Address, Channel](16, (c1, c2) ⇒ c1 compareTo c2) - - def close(remote: Address): Unit = { - // channels will be cleaned up by the handler - for (chs ← channels.remove(remote); c ← chs) c.close() +case class FailureInjector(sender: ActorRef, receiver: ActorRef) { + def refs(dir: Direction) = dir match { + case Direction.Send ⇒ Seq(sender) + case Direction.Receive ⇒ Seq(receiver) + case Direction.Both ⇒ Seq(sender, receiver) } } -class NetworkFailureInjector(system: ActorSystem) extends ChannelUpstreamHandler with ChannelDownstreamHandler { +object NetworkFailureInjector { + case class SetRate(rateMBit: Float) + case class Disconnect(abort: Boolean) +} - import NetworkFailureInjector._ +class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { - // local cache of remote address - private var remote: Option[Address] = None + val log = Logging(system, "FailureInjector") // everything goes via these Throttle actors to enable easy steering private val sender = system.actorOf(Props(new Throttle(_.sendDownstream(_)))) @@ -54,8 +55,8 @@ class NetworkFailureInjector(system: ActorSystem) extends ChannelUpstreamHandler private case class Data(ctx: ChannelHandlerContext, rateMBit: Float, queue: Queue[MessageEvent]) - private case class SetRate(rateMBit: Float) private case class Send(ctx: ChannelHandlerContext, msg: MessageEvent) + private case class SetContext(ctx: ChannelHandlerContext) private case object Tick private class Throttle(send: (ChannelHandlerContext, MessageEvent) ⇒ Unit) extends Actor with FSM[State, Data] { @@ -65,6 +66,7 @@ class NetworkFailureInjector(system: ActorSystem) extends ChannelUpstreamHandler when(PassThrough) { case Event(Send(ctx, msg), d) ⇒ + log.debug("sending msg (PassThrough): {}", msg) send(ctx, msg) stay } @@ -77,26 +79,37 @@ class NetworkFailureInjector(system: ActorSystem) extends ChannelUpstreamHandler stay using d.copy(ctx = ctx, queue = d.queue.enqueue(msg)) case Event(Tick, d) ⇒ val (msg, queue) = d.queue.dequeue + log.debug("sending msg (Tick, {}/{} left): {}", d.queue.size, queue.size, msg) send(d.ctx, msg) - if (queue.nonEmpty) setTimer("send", Tick, (size(queue.head) / d.rateMBit) microseconds, false) + if (queue.nonEmpty) { + val time = (size(queue.head) / d.rateMBit).microseconds + log.debug("scheduling next Tick in {}", time) + setTimer("send", Tick, time, false) + } stay using d.copy(queue = queue) } onTransition { case Throttle -> PassThrough ⇒ - stateData.queue foreach (send(stateData.ctx, _)) + stateData.queue foreach { msg ⇒ + log.debug("sending msg (Transition): {}") + send(stateData.ctx, msg) + } cancelTimer("send") case Throttle -> Blackhole ⇒ cancelTimer("send") } when(Blackhole) { - case Event(Send(_, _), _) ⇒ + case Event(Send(_, msg), _) ⇒ + log.debug("dropping msg {}", msg) stay } whenUnhandled { - case Event(SetRate(rate), d) ⇒ + case Event(SetContext(ctx), d) ⇒ stay using d.copy(ctx = ctx) + case Event(NetworkFailureInjector.SetRate(rate), d) ⇒ + sender ! "ok" if (rate > 0) { goto(Throttle) using d.copy(rateMBit = rate, queue = Queue()) } else if (rate == 0) { @@ -104,6 +117,11 @@ class NetworkFailureInjector(system: ActorSystem) extends ChannelUpstreamHandler } else { goto(PassThrough) } + case Event(NetworkFailureInjector.Disconnect(abort), Data(ctx, _, _)) ⇒ + sender ! "ok" + // TODO implement abort + ctx.getChannel.disconnect() + stay } initialize @@ -114,46 +132,42 @@ class NetworkFailureInjector(system: ActorSystem) extends ChannelUpstreamHandler } } - def throttleSend(rateMBit: Float) { - sender ! SetRate(rateMBit) + private var remote: Option[Address] = None + + override def messageReceived(ctx: ChannelHandlerContext, msg: MessageEvent) { + log.debug("upstream(queued): {}", msg) + receiver ! Send(ctx, msg) } - def throttleReceive(rateMBit: Float) { - receiver ! SetRate(rateMBit) - } - - override def handleUpstream(ctx: ChannelHandlerContext, evt: ChannelEvent) { - evt match { - case msg: MessageEvent ⇒ - receiver ! Send(ctx, msg) - case state: ChannelStateEvent ⇒ - state.getState match { - case BOUND ⇒ - state.getValue match { - case null ⇒ - remote = remote flatMap { a ⇒ channels.remove(a, state.getChannel); None } - case a: InetSocketAddress ⇒ - val addr = Address("akka", "XXX", a.getHostName, a.getPort) - channels.put(addr, state.getChannel) - remote = Some(addr) - } - case OPEN if state.getValue == false ⇒ - remote = remote flatMap { a ⇒ channels.remove(a, state.getChannel); None } + override def channelConnected(ctx: ChannelHandlerContext, state: ChannelStateEvent) { + state.getValue match { + case a: InetSocketAddress ⇒ + val addr = Address("akka", "", a.getHostName, a.getPort) + log.debug("connected to {}", addr) + TestConductor(system).failureInjectors.put(addr, FailureInjector(sender, receiver)) match { + case null ⇒ // okay + case fi ⇒ system.log.error("{} already registered for address {}", fi, addr) } - ctx.sendUpstream(evt) - case _ ⇒ - ctx.sendUpstream(evt) + remote = Some(addr) + sender ! SetContext(ctx) + case x ⇒ throw new IllegalArgumentException("unknown address type: " + x) } } - override def handleDownstream(ctx: ChannelHandlerContext, evt: ChannelEvent) { - evt match { - case msg: MessageEvent ⇒ - sender ! Send(ctx, msg) - case _ ⇒ - ctx.sendUpstream(evt) + override def channelDisconnected(ctx: ChannelHandlerContext, state: ChannelStateEvent) { + log.debug("disconnected from {}", remote) + remote = remote flatMap { addr ⇒ + TestConductor(system).failureInjectors.remove(addr) + system.stop(sender) + system.stop(receiver) + None } } + override def writeRequested(ctx: ChannelHandlerContext, msg: MessageEvent) { + log.debug("downstream(queued): {}", msg) + sender ! Send(ctx, msg) + } + } diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala index 93aa6bc33d..72b15922f3 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala @@ -6,20 +6,20 @@ package akka.remote.testconductor import akka.actor.{ Actor, ActorRef, ActorSystem, LoggingFSM, Props } import RemoteConnection.getAddrString import akka.util.duration._ -import TestConductorProtocol._ import org.jboss.netty.channel.{ Channel, SimpleChannelUpstreamHandler, ChannelHandlerContext, ChannelStateEvent, MessageEvent } import com.eaio.uuid.UUID import com.typesafe.config.ConfigFactory import akka.util.Timeout import akka.util.Duration import java.util.concurrent.TimeUnit.MILLISECONDS -import akka.pattern.ask +import akka.pattern.{ ask, pipe } import akka.dispatch.Await import scala.util.control.NoStackTrace import akka.actor.Status import akka.event.LoggingAdapter import akka.actor.PoisonPill import akka.event.Logging +import akka.dispatch.Future trait Player extends BarrierSync { this: TestConductorExt ⇒ @@ -29,7 +29,7 @@ trait Player extends BarrierSync { this: TestConductorExt ⇒ case x ⇒ x } - def startClient(port: Int) { + def startClient(port: Int): Future[Done] = { import ClientFSM._ import akka.actor.FSM._ import Settings.BarrierTimeout @@ -40,21 +40,21 @@ trait Player extends BarrierSync { this: TestConductorExt ⇒ var waiting: ActorRef = _ def receive = { case fsm: ActorRef ⇒ waiting = sender; fsm ! SubscribeTransitionCallBack(self) - case Transition(_, Connecting, Connected) ⇒ waiting ! "okay" + case Transition(_, Connecting, Connected) ⇒ waiting ! Done case t: Transition[_] ⇒ waiting ! Status.Failure(new RuntimeException("unexpected transition: " + t)) - case CurrentState(_, Connected) ⇒ waiting ! "okay" + case CurrentState(_, Connected) ⇒ waiting ! Done case _: CurrentState[_] ⇒ } })) - Await.result(a ? client, Duration.Inf) + a ? client mapTo } override def enter(name: String*) { system.log.debug("entering barriers " + name.mkString("(", ", ", ")")) name foreach { b ⇒ import Settings.BarrierTimeout - Await.result(client ? EnterBarrier(b), Duration.Inf) + Await.result(client ? Send(EnterBarrier(b)), Duration.Inf) system.log.debug("passed barrier {}", b) } } @@ -84,8 +84,7 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client case Event(msg: ClientOp, _) ⇒ stay replying Status.Failure(new IllegalStateException("not connected yet")) case Event(Connected, d @ Data(channel, _)) ⇒ - val hello = Hello.newBuilder.setName(settings.name).setAddress(TestConductor().address).build - channel.write(Wrapper.newBuilder.setHello(hello).build) + channel.write(Hello(settings.name, TestConductor().address)) goto(Connected) case Event(_: ConnectionFailure, _) ⇒ // System.exit(1) @@ -100,19 +99,41 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client case Event(Disconnected, _) ⇒ log.info("disconnected from TestConductor") throw new ConnectionFailure("disconnect") - case Event(msg: EnterBarrier, Data(channel, _)) ⇒ - sendMsg(channel)(msg) + case Event(Send(msg: EnterBarrier), Data(channel, None)) ⇒ + channel.write(msg) stay using Data(channel, Some(msg.name, sender)) - case Event(msg: Wrapper, Data(channel, Some((barrier, sender)))) if msg.getAllFields.size == 1 ⇒ - if (msg.hasBarrier) { - val b = msg.getBarrier.getName - if (b != barrier) { - sender ! Status.Failure(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) - } else { - sender ! b - } + case Event(Send(d: Done), Data(channel, _)) ⇒ + channel.write(d) + stay + case Event(Send(x), _) ⇒ + log.warning("cannot send message {}", x) + stay + case Event(EnterBarrier(b), Data(channel, Some((barrier, sender)))) ⇒ + if (b != barrier) { + sender ! Status.Failure(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) + } else { + sender ! b } stay using Data(channel, None) + case Event(ThrottleMsg(target, dir, rate), _) ⇒ + import settings.QueryTimeout + import context.dispatcher + TestConductor().failureInjectors.get(target.copy(system = "")) match { + case null ⇒ log.warning("cannot throttle unknown address {}", target) + case inj ⇒ + Future.sequence(inj.refs(dir) map (_ ? NetworkFailureInjector.SetRate(rate))) map (_ ⇒ Send(Done)) pipeTo self + } + stay + case Event(DisconnectMsg(target, abort), _) ⇒ + import settings.QueryTimeout + TestConductor().failureInjectors.get(target.copy(system = "")) match { + case null ⇒ log.warning("cannot disconnect unknown address {}", target) + case inj ⇒ inj.sender ? NetworkFailureInjector.Disconnect(abort) map (_ ⇒ Send(Done)) pipeTo self + } + stay + case Event(TerminateMsg(exit), _) ⇒ + System.exit(exit) + stay // needed because Java doesn’t have Nothing } onTermination { @@ -122,14 +143,6 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client initialize - private def sendMsg(channel: Channel)(msg: ClientOp) { - msg match { - case EnterBarrier(name) ⇒ - val enter = TestConductorProtocol.EnterBarrier.newBuilder.setName(name).build - channel.write(Wrapper.newBuilder.setBarrier(enter).build) - } - } - } class PlayerHandler(fsm: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { @@ -152,7 +165,7 @@ class PlayerHandler(fsm: ActorRef, log: LoggingAdapter) extends SimpleChannelUps val channel = event.getChannel log.debug("message from {}: {}", getAddrString(channel), event.getMessage) event.getMessage match { - case msg: Wrapper if msg.getAllFields.size == 1 ⇒ + case msg: NetworkOp ⇒ fsm ! msg case msg ⇒ log.info("server {} sent garbage '{}', disconnecting", getAddrString(channel), msg) diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala b/akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala index a92b6295e2..b2f4baebbb 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala @@ -17,7 +17,8 @@ class TestConductorPipelineFactory(handler: ChannelUpstreamHandler) extends Chan def getPipeline: ChannelPipeline = { val encap = List(new LengthFieldPrepender(4), new LengthFieldBasedFrameDecoder(10000, 0, 4, 0, 4)) val proto = List(new ProtobufEncoder, new ProtobufDecoder(TestConductorProtocol.Wrapper.getDefaultInstance)) - new StaticChannelPipeline(encap ::: proto ::: handler :: Nil: _*) + val msg = List(new MsgEncoder, new MsgDecoder) + new StaticChannelPipeline(encap ::: proto ::: msg ::: handler :: Nil: _*) } } diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/package.scala b/akka-remote/src/main/scala/akka/remote/testconductor/package.scala index 8ebeea90a9..b24279dbf6 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/package.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/package.scala @@ -16,4 +16,16 @@ package object testconductor { implicit def address2scala(addr: TCP.Address): Address = Address(addr.getProtocol, addr.getSystem, addr.getHost, addr.getPort) + implicit def direction2proto(dir: Direction): TCP.Direction = dir match { + case Direction.Send ⇒ TCP.Direction.Send + case Direction.Receive ⇒ TCP.Direction.Receive + case Direction.Both ⇒ TCP.Direction.Both + } + + implicit def direction2scala(dir: TCP.Direction): Direction = dir match { + case TCP.Direction.Send ⇒ Direction.Send + case TCP.Direction.Receive ⇒ Direction.Receive + case TCP.Direction.Both ⇒ Direction.Both + } + } \ No newline at end of file diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala index ab8bdadae6..ca4313b56b 100644 --- a/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala @@ -1,6 +1,7 @@ package akka.remote import com.typesafe.config.{Config, ConfigFactory} +import akka.actor.Address trait AbstractRemoteActorMultiJvmSpec { def NrOfNodes: Int @@ -8,7 +9,6 @@ trait AbstractRemoteActorMultiJvmSpec { def PortRangeStart = 1990 def NodeRange = 1 to NrOfNodes - def PortRange = PortRangeStart to NrOfNodes private[this] val remotes: IndexedSeq[String] = { val nodesOpt = Option(AkkaRemoteSpec.testNodes).map(_.split(",").toIndexedSeq) diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index cae2917577..096d4c5a89 100644 --- a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -3,12 +3,24 @@ package akka.remote.testconductor import akka.remote.AkkaRemoteSpec import com.typesafe.config.ConfigFactory import akka.remote.AbstractRemoteActorMultiJvmSpec +import akka.actor.Props +import akka.actor.Actor +import akka.dispatch.Await +import akka.dispatch.Await.Awaitable +import akka.util.Duration +import akka.util.duration._ +import akka.testkit.ImplicitSender object TestConductorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { override def NrOfNodes = 2 override def commonConfig = ConfigFactory.parseString(""" akka.loglevel = DEBUG akka.actor.provider = akka.remote.RemoteActorRefProvider + akka.remote { + transport = akka.remote.testconductor.TestConductorTransport + log-received-messages = on + log-sent-messages = on + } akka.actor.debug { receive = on fsm = on @@ -19,34 +31,87 @@ object TestConductorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { } """) def nameConfig(n: Int) = ConfigFactory.parseString("akka.testconductor.name = node" + n).withFallback(nodeConfigs(n)) + + implicit def awaitHelper[T](w: Awaitable[T]) = new AwaitHelper(w) + class AwaitHelper[T](w: Awaitable[T]) { + def await: T = Await.result(w, Duration.Inf) + } } -import TestConductorMultiJvmSpec._ +class TestConductorMultiJvmNode1 extends AkkaRemoteSpec(TestConductorMultiJvmSpec.nameConfig(0)) { -class TestConductorMultiJvmNode1 extends AkkaRemoteSpec(nameConfig(0)) { + import TestConductorMultiJvmSpec._ - val nodes = TestConductorMultiJvmSpec.NrOfNodes + val nodes = NrOfNodes - "running a test" in { - val tc = TestConductor(system) - tc.startController() + val tc = TestConductor(system) + + val echo = system.actorOf(Props(new Actor { + def receive = { + case x ⇒ testActor ! x; sender ! x + } + }), "echo") + + "running a test with barrier" in { + tc.startController().await barrier("start") barrier("first") tc.enter("begin") barrier("end") } + + "throttling" in { + expectMsg("start") + tc.throttle("node1", "node0", Direction.Send, 0.016).await + tc.enter("throttled_send") + within(1 second, 2 seconds) { + receiveN(10) must be(0 to 9) + } + tc.enter("throttled_send2") + tc.throttle("node1", "node0", Direction.Send, -1).await + + tc.throttle("node1", "node0", Direction.Receive, 0.016).await + tc.enter("throttled_recv") + receiveN(10, 500 millis) must be(10 to 19) + tc.enter("throttled_recv2") + tc.throttle("node1", "node0", Direction.Receive, -1).await + } } -class TestConductorMultiJvmNode2 extends AkkaRemoteSpec(nameConfig(1)) { +class TestConductorMultiJvmNode2 extends AkkaRemoteSpec(TestConductorMultiJvmSpec.nameConfig(1)) with ImplicitSender { - val nodes = TestConductorMultiJvmSpec.NrOfNodes + import TestConductorMultiJvmSpec._ - "running a test" in { + val nodes = NrOfNodes + + val tc = TestConductor(system) + + val echo = system.actorFor("akka://" + akkaSpec(0) + "/user/echo") + + "running a test with barrier" in { barrier("start") - val tc = TestConductor(system) - tc.startClient(4712) + tc.startClient(4712).await barrier("first") tc.enter("begin") barrier("end") } + + "throttling" in { + echo ! "start" + expectMsg("start") + tc.enter("throttled_send") + for (i <- 0 to 9) echo ! i + expectMsg(500 millis, 0) + within(1 second, 2 seconds) { + receiveN(9) must be(1 to 9) + } + tc.enter("throttled_send2", "throttled_recv") + for (i <- 10 to 19) echo ! i + expectMsg(500 millis, 10) + within(1 second, 2 seconds) { + receiveN(9) must be(11 to 19) + } + tc.enter("throttled_recv2") + } + } From c68df0635f2213d397649533db24a0a01ffe17c5 Mon Sep 17 00:00:00 2001 From: Roland Date: Sat, 5 May 2012 15:16:21 +0200 Subject: [PATCH 006/538] add previously forgotten TestConductorTransport --- .../TestConductorTransport.scala | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala b/akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala new file mode 100644 index 0000000000..d03adebe9a --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala @@ -0,0 +1,21 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.remote.testconductor + +import akka.remote.netty.NettyRemoteTransport +import akka.remote.RemoteSettings +import akka.actor.ActorSystemImpl +import akka.remote.RemoteActorRefProvider +import org.jboss.netty.channel.ChannelHandler +import org.jboss.netty.channel.ChannelPipelineFactory + +class TestConductorTransport(_remoteSettings: RemoteSettings, _system: ActorSystemImpl, _provider: RemoteActorRefProvider) + extends NettyRemoteTransport(_remoteSettings, _system, _provider) { + + override def mkPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean): ChannelPipelineFactory = + new ChannelPipelineFactory { + def getPipeline = PipelineFactory(new NetworkFailureInjector(system) +: PipelineFactory.defaultStack(withTimeout) :+ endpoint) + } + +} \ No newline at end of file From a351e6ad9fb9ca24d69a20da9f6bb2028f901a91 Mon Sep 17 00:00:00 2001 From: Roland Date: Sat, 5 May 2012 22:14:42 +0200 Subject: [PATCH 007/538] implement more precise bandwith throttling - will keep track of theoretical packet boundaries and send on timer tick or send request according to actual time - will split packets if calculated release time is >100ms into the future (configurable) to simulate proper trickling --- akka-remote/src/main/resources/reference.conf | 5 + .../akka/remote/testconductor/Extension.scala | 1 + .../NetworkFailureInjector.scala | 131 +++++++++++++----- .../testconductor/TestConductorSpec.scala | 4 +- .../src/main/scala/akka/testkit/TestKit.scala | 6 +- 5 files changed, 109 insertions(+), 38 deletions(-) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 384d00b55d..f14ee3d87c 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -165,6 +165,11 @@ akka { # Timeout for interrogation of TestConductor’s Controller actor query-timeout = 5s + # Threshold for packet size in time unit above which the failure injector will + # split the packet and deliver in smaller portions; do not give value smaller + # than HashedWheelTimer resolution (would not make sense) + packet-split-threshold = 100ms + # Default port to start the conductor on; 0 means port = 0 diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala index bffa84847f..97f5dd7295 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala @@ -24,6 +24,7 @@ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with C implicit val BarrierTimeout = Timeout(Duration(config.getMilliseconds("akka.testconductor.barrier-timeout"), MILLISECONDS)) implicit val QueryTimeout = Timeout(Duration(config.getMilliseconds("akka.testconductor.query-timeout"), MILLISECONDS)) + val PacketSplitThreshold = Duration(config.getMilliseconds("akka.testconductor.packet-split-threshold"), MILLISECONDS) val name = config.getString("akka.testconductor.name") val host = config.getString("akka.testconductor.host") diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index 30e5308979..5e101dea0c 100644 --- a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -23,6 +23,13 @@ import akka.actor.Props import akka.actor.ActorRef import akka.event.Logging import org.jboss.netty.channel.SimpleChannelHandler +import scala.annotation.tailrec +import akka.util.Duration +import akka.actor.LoggingFSM +import org.jboss.netty.channel.Channels +import org.jboss.netty.channel.ChannelFuture +import org.jboss.netty.channel.ChannelFutureListener +import org.jboss.netty.channel.ChannelFuture case class FailureInjector(sender: ActorRef, receiver: ActorRef) { def refs(dir: Direction) = dir match { @@ -42,8 +49,10 @@ class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { val log = Logging(system, "FailureInjector") // everything goes via these Throttle actors to enable easy steering - private val sender = system.actorOf(Props(new Throttle(_.sendDownstream(_)))) - private val receiver = system.actorOf(Props(new Throttle(_.sendUpstream(_)))) + private val sender = system.actorOf(Props(new Throttle(Direction.Send))) + private val receiver = system.actorOf(Props(new Throttle(Direction.Receive))) + + private val packetSplitThreshold = TestConductor(system).Settings.PacketSplitThreshold /* * State, Data and Messages for the internal Throttle actor @@ -53,47 +62,40 @@ class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { private case object Throttle extends State private case object Blackhole extends State - private case class Data(ctx: ChannelHandlerContext, rateMBit: Float, queue: Queue[MessageEvent]) + private case class Data(lastSent: Long, rateMBit: Float, queue: Queue[Send]) - private case class Send(ctx: ChannelHandlerContext, msg: MessageEvent) + private case class Send(ctx: ChannelHandlerContext, future: Option[ChannelFuture], msg: AnyRef) private case class SetContext(ctx: ChannelHandlerContext) private case object Tick - private class Throttle(send: (ChannelHandlerContext, MessageEvent) ⇒ Unit) extends Actor with FSM[State, Data] { + private class Throttle(dir: Direction) extends Actor with LoggingFSM[State, Data] { import FSM._ - startWith(PassThrough, Data(null, -1, Queue())) + var channelContext: ChannelHandlerContext = _ + + startWith(PassThrough, Data(0, -1, Queue())) when(PassThrough) { - case Event(Send(ctx, msg), d) ⇒ + case Event(s @ Send(_, _, msg), _) ⇒ log.debug("sending msg (PassThrough): {}", msg) - send(ctx, msg) + send(s) stay } when(Throttle) { - case Event(Send(ctx, msg), d) ⇒ - if (!timerActive_?("send")) { - setTimer("send", Tick, (size(msg) / d.rateMBit) microseconds, false) - } - stay using d.copy(ctx = ctx, queue = d.queue.enqueue(msg)) - case Event(Tick, d) ⇒ - val (msg, queue) = d.queue.dequeue - log.debug("sending msg (Tick, {}/{} left): {}", d.queue.size, queue.size, msg) - send(d.ctx, msg) - if (queue.nonEmpty) { - val time = (size(queue.head) / d.rateMBit).microseconds - log.debug("scheduling next Tick in {}", time) - setTimer("send", Tick, time, false) - } - stay using d.copy(queue = queue) + case Event(s: Send, d @ Data(_, _, Queue())) ⇒ + stay using sendThrottled(d.copy(lastSent = System.nanoTime, queue = Queue(s))) + case Event(s: Send, data) ⇒ + stay using sendThrottled(data.copy(queue = data.queue.enqueue(s))) + case Event(Tick, data) ⇒ + stay using sendThrottled(data) } onTransition { case Throttle -> PassThrough ⇒ - stateData.queue foreach { msg ⇒ - log.debug("sending msg (Transition): {}") - send(stateData.ctx, msg) + for (s ← stateData.queue) { + log.debug("sending msg (Transition): {}", s.msg) + send(s) } cancelTimer("send") case Throttle -> Blackhole ⇒ @@ -101,32 +103,95 @@ class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { } when(Blackhole) { - case Event(Send(_, msg), _) ⇒ + case Event(Send(_, _, msg), _) ⇒ log.debug("dropping msg {}", msg) stay } whenUnhandled { - case Event(SetContext(ctx), d) ⇒ stay using d.copy(ctx = ctx) case Event(NetworkFailureInjector.SetRate(rate), d) ⇒ sender ! "ok" if (rate > 0) { - goto(Throttle) using d.copy(rateMBit = rate, queue = Queue()) + goto(Throttle) using d.copy(lastSent = System.nanoTime, rateMBit = rate, queue = Queue()) } else if (rate == 0) { goto(Blackhole) } else { goto(PassThrough) } + case Event(SetContext(ctx), _) ⇒ channelContext = ctx; stay case Event(NetworkFailureInjector.Disconnect(abort), Data(ctx, _, _)) ⇒ sender ! "ok" // TODO implement abort - ctx.getChannel.disconnect() + channelContext.getChannel.disconnect() stay } initialize - private def size(msg: MessageEvent) = msg.getMessage() match { + private def sendThrottled(d: Data): Data = { + val (data, toSend, toTick) = schedule(d) + for (s ← toSend) { + log.debug("sending msg (Tick): {}", s.msg) + send(s) + } + for (time ← toTick) { + log.debug("scheduling next Tick in {}", time) + setTimer("send", Tick, time, false) + } + data + } + + private def send(s: Send): Unit = dir match { + case Direction.Send ⇒ Channels.write(s.ctx, s.future getOrElse Channels.future(s.ctx.getChannel), s.msg) + case Direction.Receive ⇒ Channels.fireMessageReceived(s.ctx, s.msg) + case _ ⇒ + } + + private def schedule(d: Data): (Data, Seq[Send], Option[Duration]) = { + val now = System.nanoTime + @tailrec def rec(d: Data, toSend: Seq[Send]): (Data, Seq[Send], Option[Duration]) = { + if (d.queue.isEmpty) (d, toSend, None) + else { + val timeForPacket = d.lastSent + (1000 * size(d.queue.head.msg) / d.rateMBit).toLong + if (timeForPacket <= now) rec(Data(timeForPacket, d.rateMBit, d.queue.tail), toSend :+ d.queue.head) + else { + val deadline = now + packetSplitThreshold.toNanos + if (timeForPacket <= deadline) (d, toSend, Some((timeForPacket - now).nanos)) + else { + val micros = (deadline - d.lastSent) / 1000 + val (s1, s2) = split(d.queue.head, (micros * d.rateMBit / 8).toInt) + (d.copy(queue = s1 +: s2 +: d.queue.tail), toSend, Some(packetSplitThreshold)) + } + } + } + } + rec(d, Seq()) + } + + private def split(s: Send, bytes: Int): (Send, Send) = { + s.msg match { + case buf: ChannelBuffer ⇒ + val f = s.future map { f ⇒ + val newF = Channels.future(s.ctx.getChannel) + newF.addListener(new ChannelFutureListener { + def operationComplete(future: ChannelFuture) { + if (future.isCancelled) f.cancel() + else future.getCause match { + case null ⇒ + case thr ⇒ f.setFailure(thr) + } + } + }) + newF + } + val b = buf.slice() + b.writerIndex(b.readerIndex + bytes) + buf.readerIndex(buf.readerIndex + bytes) + (Send(s.ctx, f, b), Send(s.ctx, s.future, buf)) + } + } + + private def size(msg: AnyRef) = msg match { case b: ChannelBuffer ⇒ b.readableBytes() * 8 case _ ⇒ throw new UnsupportedOperationException("NetworkFailureInjector only supports ChannelBuffer messages") } @@ -136,7 +201,7 @@ class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { override def messageReceived(ctx: ChannelHandlerContext, msg: MessageEvent) { log.debug("upstream(queued): {}", msg) - receiver ! Send(ctx, msg) + receiver ! Send(ctx, Option(msg.getFuture), msg.getMessage) } override def channelConnected(ctx: ChannelHandlerContext, state: ChannelStateEvent) { @@ -166,7 +231,7 @@ class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { override def writeRequested(ctx: ChannelHandlerContext, msg: MessageEvent) { log.debug("downstream(queued): {}", msg) - sender ! Send(ctx, msg) + sender ! Send(ctx, Option(msg.getFuture), msg.getMessage) } } diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 096d4c5a89..c7e848caf3 100644 --- a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -62,7 +62,7 @@ class TestConductorMultiJvmNode1 extends AkkaRemoteSpec(TestConductorMultiJvmSpe "throttling" in { expectMsg("start") - tc.throttle("node1", "node0", Direction.Send, 0.016).await + tc.throttle("node1", "node0", Direction.Send, 0.01).await tc.enter("throttled_send") within(1 second, 2 seconds) { receiveN(10) must be(0 to 9) @@ -70,7 +70,7 @@ class TestConductorMultiJvmNode1 extends AkkaRemoteSpec(TestConductorMultiJvmSpe tc.enter("throttled_send2") tc.throttle("node1", "node0", Direction.Send, -1).await - tc.throttle("node1", "node0", Direction.Receive, 0.016).await + tc.throttle("node1", "node0", Direction.Receive, 0.01).await tc.enter("throttled_recv") receiveN(10, 500 millis) must be(10 to 19) tc.enter("throttled_recv2") diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index bcac5c24cf..cbcfc2a77d 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -69,7 +69,7 @@ class TestActor(queue: BlockingDeque[TestActor.Message]) extends Actor { *
  * class Test extends TestKit(ActorSystem()) {
  *     try {
- *     
+ *
  *       val test = system.actorOf(Props[SomeActor]
  *
  *       within (1 second) {
@@ -77,7 +77,7 @@ class TestActor(queue: BlockingDeque[TestActor.Message]) extends Actor {
  *         expectMsg(Result1) // bounded to 1 second
  *         expectMsg(Result2) // bounded to the remainder of the 1 second
  *       }
- *     
+ *
  *     } finally {
  *       system.shutdown()
  *     }
@@ -86,7 +86,7 @@ class TestActor(queue: BlockingDeque[TestActor.Message]) extends Actor {
  *
  * Beware of two points:
  *
- *  - the ActorSystem passed into the constructor needs to be shutdown, 
+ *  - the ActorSystem passed into the constructor needs to be shutdown,
  *    otherwise thread pools and memory will be leaked
  *  - this trait is not thread-safe (only one actor with one queue, one stack
  *    of `within` blocks); it is expected that the code is executed from a

From 0076bddb523a358d0c00ba6fd5725deaffd926e3 Mon Sep 17 00:00:00 2001
From: Roland 
Date: Mon, 7 May 2012 07:36:02 +0200
Subject: [PATCH 008/538] optimize partial message scheduling

- split only right before send (if necessary)
- do not reschedule Tick if that has already been done, because the head
  of the queue does not change so the old data are still correct
- make test a bit less fickle wrt. timing
---
 .../NetworkFailureInjector.scala              | 23 ++++++++++---------
 .../testconductor/TestConductorSpec.scala     |  6 ++---
 2 files changed, 15 insertions(+), 14 deletions(-)

diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala
index 5e101dea0c..b853523979 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala
+++ b/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala
@@ -83,8 +83,8 @@ class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler {
     }
 
     when(Throttle) {
-      case Event(s: Send, d @ Data(_, _, Queue())) ⇒
-        stay using sendThrottled(d.copy(lastSent = System.nanoTime, queue = Queue(s)))
+      case Event(s: Send, data @ Data(_, _, Queue())) ⇒
+        stay using sendThrottled(data.copy(lastSent = System.nanoTime, queue = Queue(s)))
       case Event(s: Send, data) ⇒
         stay using sendThrottled(data.copy(queue = data.queue.enqueue(s)))
       case Event(Tick, data) ⇒
@@ -134,10 +134,11 @@ class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler {
         log.debug("sending msg (Tick): {}", s.msg)
         send(s)
       }
-      for (time ← toTick) {
-        log.debug("scheduling next Tick in {}", time)
-        setTimer("send", Tick, time, false)
-      }
+      if (!timerActive_?("send"))
+        for (time ← toTick) {
+          log.debug("scheduling next Tick in {}", time)
+          setTimer("send", Tick, time, false)
+        }
       data
     }
 
@@ -155,12 +156,12 @@ class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler {
           val timeForPacket = d.lastSent + (1000 * size(d.queue.head.msg) / d.rateMBit).toLong
           if (timeForPacket <= now) rec(Data(timeForPacket, d.rateMBit, d.queue.tail), toSend :+ d.queue.head)
           else {
-            val deadline = now + packetSplitThreshold.toNanos
-            if (timeForPacket <= deadline) (d, toSend, Some((timeForPacket - now).nanos))
+            val splitThreshold = d.lastSent + packetSplitThreshold.toNanos
+            if (now < splitThreshold) (d, toSend, Some((timeForPacket - now).nanos min (splitThreshold - now).nanos))
             else {
-              val micros = (deadline - d.lastSent) / 1000
-              val (s1, s2) = split(d.queue.head, (micros * d.rateMBit / 8).toInt)
-              (d.copy(queue = s1 +: s2 +: d.queue.tail), toSend, Some(packetSplitThreshold))
+              val microsToSend = (now - d.lastSent) / 1000
+              val (s1, s2) = split(d.queue.head, (microsToSend * d.rateMBit / 8).toInt)
+              (d.copy(queue = s2 +: d.queue.tail), toSend :+ s1, Some((timeForPacket - now).nanos min packetSplitThreshold))
             }
           }
         }
diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
index c7e848caf3..16193f7bd3 100644
--- a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
+++ b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
@@ -64,7 +64,7 @@ class TestConductorMultiJvmNode1 extends AkkaRemoteSpec(TestConductorMultiJvmSpe
     expectMsg("start")
     tc.throttle("node1", "node0", Direction.Send, 0.01).await
     tc.enter("throttled_send")
-    within(1 second, 2 seconds) {
+    within(0.6 seconds, 2 seconds) {
       receiveN(10) must be(0 to 9)
     }
     tc.enter("throttled_send2")
@@ -102,13 +102,13 @@ class TestConductorMultiJvmNode2 extends AkkaRemoteSpec(TestConductorMultiJvmSpe
     tc.enter("throttled_send")
     for (i <- 0 to 9) echo ! i
     expectMsg(500 millis, 0)
-    within(1 second, 2 seconds) {
+    within(0.6 seconds, 2 seconds) {
       receiveN(9) must be(1 to 9)
     }
     tc.enter("throttled_send2", "throttled_recv")
     for (i <- 10 to 19) echo ! i
     expectMsg(500 millis, 10)
-    within(1 second, 2 seconds) {
+    within(0.6 seconds, 2 seconds) {
       receiveN(9) must be(11 to 19)
     }
     tc.enter("throttled_recv2")

From f81184236fa0748304a26ae133d791d074c95536 Mon Sep 17 00:00:00 2001
From: Roland 
Date: Mon, 7 May 2012 08:04:15 +0200
Subject: [PATCH 009/538] wait for initial crew before starting the party

- the Controller is started with the required initial number of
  participants
- if that is >0, it will hold off sending Done to the clients until that
  number has connected, then set it to zero
- if that is <=0, send Done back immediately upon connect
---
 .../akka/remote/testconductor/Conductor.scala  | 17 +++++++++++------
 .../akka/remote/testconductor/Features.scala   |  2 +-
 .../akka/remote/testconductor/Player.scala     | 18 ++++++++++++++++--
 .../testconductor/TestConductorSpec.scala      |  8 +-------
 4 files changed, 29 insertions(+), 16 deletions(-)

diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala
index c9cbeadf83..7e3d315fea 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala
+++ b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala
@@ -33,9 +33,9 @@ trait Conductor extends RunControl with FailureInject { this: TestConductorExt 
     case x    ⇒ x
   }
 
-  override def startController(): Future[Int] = {
+  override def startController(participants: Int): Future[Int] = {
     if (_controller ne null) throw new RuntimeException("TestConductorServer was already started")
-    _controller = system.actorOf(Props[Controller], "controller")
+    _controller = system.actorOf(Props(new Controller(participants)), "controller")
     import Settings.BarrierTimeout
     controller ? GetPort flatMap { case port: Int ⇒ startClient(port) map (_ ⇒ port) }
   }
@@ -162,7 +162,7 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi
       log.warning("client {} sent unsupported message {}", getAddrString(channel), msg)
       channel.close()
       stop()
-    case Event(Send(msg: EnterBarrier), _) ⇒
+    case Event(Send(msg @ (_: EnterBarrier | _: Done)), _) ⇒
       channel.write(msg)
       stay
     case Event(Send(msg), None) ⇒
@@ -185,9 +185,11 @@ object Controller {
   case class NodeInfo(name: String, addr: Address, fsm: ActorRef)
 }
 
-class Controller extends Actor {
+class Controller(_participants: Int) extends Actor {
   import Controller._
 
+  var initialParticipants = _participants
+
   val settings = TestConductor().Settings
   val connection = RemoteConnection(Server, settings.host, settings.port,
     new ConductorHandler(context.system, self, Logging(context.system, "ConductorHandler")))
@@ -199,8 +201,11 @@ class Controller extends Actor {
     case ClientConnected(name, addr) ⇒
       nodes += name -> NodeInfo(name, addr, sender)
       barrier forward ClientConnected
-    case ClientConnected ⇒
-      barrier forward ClientConnected
+      if (initialParticipants <= 0) sender ! Done
+      else if (nodes.size == initialParticipants) {
+        for (NodeInfo(_, _, client) ← nodes.values) client ! Send(Done)
+        initialParticipants = 0
+      }
     case ClientDisconnected(name) ⇒
       nodes -= name
       barrier forward ClientDisconnected
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala
index b94f205726..336d04c368 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala
+++ b/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala
@@ -57,7 +57,7 @@ trait RunControl {
   /**
    * Start the server port, returns the port number.
    */
-  def startController(): Future[Int]
+  def startController(participants: Int): Future[Int]
 
   /**
    * Get the actual port used by the server.
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala
index 72b15922f3..f7d2fbd532 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala
+++ b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala
@@ -40,7 +40,8 @@ trait Player extends BarrierSync { this: TestConductorExt ⇒
       var waiting: ActorRef = _
       def receive = {
         case fsm: ActorRef                        ⇒ waiting = sender; fsm ! SubscribeTransitionCallBack(self)
-        case Transition(_, Connecting, Connected) ⇒ waiting ! Done
+        case Transition(_, Connecting, AwaitDone) ⇒ // step 1, not there yet
+        case Transition(_, AwaitDone, Connected)  ⇒ waiting ! Done
         case t: Transition[_]                     ⇒ waiting ! Status.Failure(new RuntimeException("unexpected transition: " + t))
         case CurrentState(_, Connected)           ⇒ waiting ! Done
         case _: CurrentState[_]                   ⇒
@@ -63,6 +64,7 @@ trait Player extends BarrierSync { this: TestConductorExt ⇒
 object ClientFSM {
   sealed trait State
   case object Connecting extends State
+  case object AwaitDone extends State
   case object Connected extends State
 
   case class Data(channel: Channel, barrier: Option[(String, ActorRef)])
@@ -85,7 +87,7 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client
       stay replying Status.Failure(new IllegalStateException("not connected yet"))
     case Event(Connected, d @ Data(channel, _)) ⇒
       channel.write(Hello(settings.name, TestConductor().address))
-      goto(Connected)
+      goto(AwaitDone)
     case Event(_: ConnectionFailure, _) ⇒
       // System.exit(1)
       stop
@@ -95,6 +97,18 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client
       stop
   }
 
+  when(AwaitDone, stateTimeout = settings.BarrierTimeout.duration) {
+    case Event(Done, _) ⇒
+      log.debug("received Done: starting test")
+      goto(Connected)
+    case Event(msg: ClientOp, _) ⇒
+      stay replying Status.Failure(new IllegalStateException("not connected yet"))
+    case Event(StateTimeout, _) ⇒
+      log.error("connect timeout to TestConductor")
+      // System.exit(1)
+      stop
+  }
+
   when(Connected) {
     case Event(Disconnected, _) ⇒
       log.info("disconnected from TestConductor")
diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
index 16193f7bd3..512757c130 100644
--- a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
+++ b/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
@@ -53,11 +53,8 @@ class TestConductorMultiJvmNode1 extends AkkaRemoteSpec(TestConductorMultiJvmSpe
   }), "echo")
 
   "running a test with barrier" in {
-    tc.startController().await
-    barrier("start")
-    barrier("first")
+    tc.startController(2).await
     tc.enter("begin")
-    barrier("end")
   }
 
   "throttling" in {
@@ -89,11 +86,8 @@ class TestConductorMultiJvmNode2 extends AkkaRemoteSpec(TestConductorMultiJvmSpe
   val echo = system.actorFor("akka://" + akkaSpec(0) + "/user/echo")
 
   "running a test with barrier" in {
-    barrier("start")
     tc.startClient(4712).await
-    barrier("first")
     tc.enter("begin")
-    barrier("end")
   }
 
   "throttling" in {

From d8268f8e6fe93f3d7a428c3b171325774c896b8b Mon Sep 17 00:00:00 2001
From: Roland 
Date: Mon, 7 May 2012 18:12:53 +0200
Subject: [PATCH 010/538] fix BuilderParent in generated
 TestConductorProtocol.java

---
 .../remote/testconductor/TestConductorProtocol.java    | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
index f112a1b0c2..4b9da03059 100644
--- a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
+++ b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
@@ -460,7 +460,7 @@ public final class TestConductorProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(BuilderParent parent) {
+      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
@@ -1242,7 +1242,7 @@ public final class TestConductorProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(BuilderParent parent) {
+      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
@@ -1750,7 +1750,7 @@ public final class TestConductorProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(BuilderParent parent) {
+      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
@@ -2255,7 +2255,7 @@ public final class TestConductorProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(BuilderParent parent) {
+      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
@@ -2861,7 +2861,7 @@ public final class TestConductorProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(BuilderParent parent) {
+      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }

From 33cea733a315bc5c8306a81f7ddbb6853360c319 Mon Sep 17 00:00:00 2001
From: Roland 
Date: Tue, 8 May 2012 10:05:14 +0200
Subject: [PATCH 011/538] rename mkPipeline => createPipeline

---
 akka-remote/src/main/scala/akka/remote/netty/Client.scala   | 2 +-
 .../main/scala/akka/remote/netty/NettyRemoteSupport.scala   | 6 +++---
 akka-remote/src/main/scala/akka/remote/netty/Server.scala   | 2 +-
 .../akka/remote/testconductor/TestConductorTransport.scala  | 2 +-
 4 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala
index cf143650bc..4735132534 100644
--- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala
+++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala
@@ -155,7 +155,7 @@ class ActiveRemoteClient private[akka] (
       openChannels = new DefaultDisposableChannelGroup(classOf[RemoteClient].getName)
 
       val b = new ClientBootstrap(netty.clientChannelFactory)
-      b.setPipelineFactory(netty.mkPipeline(new ActiveRemoteClientHandler(name, b, remoteAddress, localAddress, netty.timer, this), true))
+      b.setPipelineFactory(netty.createPipeline(new ActiveRemoteClientHandler(name, b, remoteAddress, localAddress, netty.timer, this), true))
       b.setOption("tcpNoDelay", true)
       b.setOption("keepAlive", true)
       b.setOption("connectTimeoutMillis", settings.ConnectionTimeout.toMillis)
diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala
index 35ef3bf7fd..60c2ac6097 100644
--- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala
+++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala
@@ -86,7 +86,7 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor
    * This method is factored out to provide an extension point in case the
    * pipeline shall be changed. It is recommended to use
    */
-  def mkPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean): ChannelPipelineFactory =
+  def createPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean): ChannelPipelineFactory =
     PipelineFactory(Seq(endpoint), withTimeout)
 
   private val remoteClients = new HashMap[Address, RemoteClient]
@@ -98,13 +98,13 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor
 
   /**
    * Override this method to inject a subclass of NettyRemoteServer instead of
-   * the normal one, e.g. for altering the pipeline.
+   * the normal one, e.g. for inserting security hooks.
    */
   protected def createServer(): NettyRemoteServer = new NettyRemoteServer(this)
 
   /**
    * Override this method to inject a subclass of RemoteClient instead of
-   * the normal one, e.g. for altering the pipeline. Get this transport’s
+   * the normal one, e.g. for inserting security hooks. Get this transport’s
    * address from `this.address`.
    */
   protected def createClient(recipient: Address): RemoteClient = new ActiveRemoteClient(this, recipient, address)
diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala
index f9d4ede1d8..87993f783d 100644
--- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala
+++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala
@@ -35,7 +35,7 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) {
 
   private val bootstrap = {
     val b = new ServerBootstrap(factory)
-    b.setPipelineFactory(netty.mkPipeline(new RemoteServerHandler(openChannels, netty), false))
+    b.setPipelineFactory(netty.createPipeline(new RemoteServerHandler(openChannels, netty), false))
     b.setOption("backlog", settings.Backlog)
     b.setOption("tcpNoDelay", true)
     b.setOption("child.keepAlive", true)
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala b/akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala
index d03adebe9a..2c51c2cf18 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala
+++ b/akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala
@@ -13,7 +13,7 @@ import org.jboss.netty.channel.ChannelPipelineFactory
 class TestConductorTransport(_remoteSettings: RemoteSettings, _system: ActorSystemImpl, _provider: RemoteActorRefProvider)
   extends NettyRemoteTransport(_remoteSettings, _system, _provider) {
 
-  override def mkPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean): ChannelPipelineFactory =
+  override def createPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean): ChannelPipelineFactory =
     new ChannelPipelineFactory {
       def getPipeline = PipelineFactory(new NetworkFailureInjector(system) +: PipelineFactory.defaultStack(withTimeout) :+ endpoint)
     }

From e950045015e16659b34c9901b7daa34a0e1f185e Mon Sep 17 00:00:00 2001
From: Roland 
Date: Tue, 8 May 2012 11:08:43 +0200
Subject: [PATCH 012/538] handle barrier failures better

---
 .../testconductor/TestConductorProtocol.java  | 81 ++++++++++++++++---
 .../main/protocol/TestConductorProtocol.proto |  1 +
 .../akka/remote/testconductor/Conductor.scala | 22 +++--
 .../akka/remote/testconductor/DataTypes.scala |  7 +-
 .../akka/remote/testconductor/Player.scala    |  3 +
 5 files changed, 96 insertions(+), 18 deletions(-)

diff --git a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
index 4b9da03059..3d6c145097 100644
--- a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
+++ b/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
@@ -1543,6 +1543,10 @@ public final class TestConductorProtocol {
     // required string name = 1;
     boolean hasName();
     String getName();
+    
+    // optional bool failed = 2;
+    boolean hasFailed();
+    boolean getFailed();
   }
   public static final class EnterBarrier extends
       com.google.protobuf.GeneratedMessage
@@ -1605,8 +1609,19 @@ public final class TestConductorProtocol {
       }
     }
     
+    // optional bool failed = 2;
+    public static final int FAILED_FIELD_NUMBER = 2;
+    private boolean failed_;
+    public boolean hasFailed() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    public boolean getFailed() {
+      return failed_;
+    }
+    
     private void initFields() {
       name_ = "";
+      failed_ = false;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -1627,6 +1642,9 @@ public final class TestConductorProtocol {
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
         output.writeBytes(1, getNameBytes());
       }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeBool(2, failed_);
+      }
       getUnknownFields().writeTo(output);
     }
     
@@ -1640,6 +1658,10 @@ public final class TestConductorProtocol {
         size += com.google.protobuf.CodedOutputStream
           .computeBytesSize(1, getNameBytes());
       }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(2, failed_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -1766,6 +1788,8 @@ public final class TestConductorProtocol {
         super.clear();
         name_ = "";
         bitField0_ = (bitField0_ & ~0x00000001);
+        failed_ = false;
+        bitField0_ = (bitField0_ & ~0x00000002);
         return this;
       }
       
@@ -1808,6 +1832,10 @@ public final class TestConductorProtocol {
           to_bitField0_ |= 0x00000001;
         }
         result.name_ = name_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.failed_ = failed_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -1827,6 +1855,9 @@ public final class TestConductorProtocol {
         if (other.hasName()) {
           setName(other.getName());
         }
+        if (other.hasFailed()) {
+          setFailed(other.getFailed());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -1867,6 +1898,11 @@ public final class TestConductorProtocol {
               name_ = input.readBytes();
               break;
             }
+            case 16: {
+              bitField0_ |= 0x00000002;
+              failed_ = input.readBool();
+              break;
+            }
           }
         }
       }
@@ -1909,6 +1945,27 @@ public final class TestConductorProtocol {
         onChanged();
       }
       
+      // optional bool failed = 2;
+      private boolean failed_ ;
+      public boolean hasFailed() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      public boolean getFailed() {
+        return failed_;
+      }
+      public Builder setFailed(boolean value) {
+        bitField0_ |= 0x00000002;
+        failed_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearFailed() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        failed_ = false;
+        onChanged();
+        return this;
+      }
+      
       // @@protoc_insertion_point(builder_scope:EnterBarrier)
     }
     
@@ -3300,17 +3357,17 @@ public final class TestConductorProtocol {
       "\022\025\n\005hello\030\001 \001(\0132\006.Hello\022\036\n\007barrier\030\002 \001(\013" +
       "2\r.EnterBarrier\022\037\n\007failure\030\003 \001(\0132\016.Injec" +
       "tFailure\022\014\n\004done\030\004 \001(\t\"0\n\005Hello\022\014\n\004name\030" +
-      "\001 \002(\t\022\031\n\007address\030\002 \002(\0132\010.Address\"\034\n\014Ente" +
-      "rBarrier\022\014\n\004name\030\001 \002(\t\"G\n\007Address\022\020\n\010pro" +
-      "tocol\030\001 \002(\t\022\016\n\006system\030\002 \002(\t\022\014\n\004host\030\003 \002(" +
-      "\t\022\014\n\004port\030\004 \002(\005\"\212\001\n\rInjectFailure\022\032\n\007fai" +
-      "lure\030\001 \002(\0162\t.FailType\022\035\n\tdirection\030\002 \001(\016" +
-      "2\n.Direction\022\031\n\007address\030\003 \001(\0132\010.Address\022",
-      "\020\n\010rateMBit\030\006 \001(\002\022\021\n\texitValue\030\007 \001(\005*A\n\010" +
-      "FailType\022\014\n\010Throttle\020\001\022\016\n\nDisconnect\020\002\022\t" +
-      "\n\005Abort\020\003\022\014\n\010Shutdown\020\004*,\n\tDirection\022\010\n\004" +
-      "Send\020\001\022\013\n\007Receive\020\002\022\010\n\004Both\020\003B\035\n\031akka.re" +
-      "mote.testconductorH\001"
+      "\001 \002(\t\022\031\n\007address\030\002 \002(\0132\010.Address\",\n\014Ente" +
+      "rBarrier\022\014\n\004name\030\001 \002(\t\022\016\n\006failed\030\002 \001(\010\"G" +
+      "\n\007Address\022\020\n\010protocol\030\001 \002(\t\022\016\n\006system\030\002 " +
+      "\002(\t\022\014\n\004host\030\003 \002(\t\022\014\n\004port\030\004 \002(\005\"\212\001\n\rInje" +
+      "ctFailure\022\032\n\007failure\030\001 \002(\0162\t.FailType\022\035\n" +
+      "\tdirection\030\002 \001(\0162\n.Direction\022\031\n\007address\030",
+      "\003 \001(\0132\010.Address\022\020\n\010rateMBit\030\006 \001(\002\022\021\n\texi" +
+      "tValue\030\007 \001(\005*A\n\010FailType\022\014\n\010Throttle\020\001\022\016" +
+      "\n\nDisconnect\020\002\022\t\n\005Abort\020\003\022\014\n\010Shutdown\020\004*" +
+      ",\n\tDirection\022\010\n\004Send\020\001\022\013\n\007Receive\020\002\022\010\n\004B" +
+      "oth\020\003B\035\n\031akka.remote.testconductorH\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -3338,7 +3395,7 @@ public final class TestConductorProtocol {
           internal_static_EnterBarrier_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_EnterBarrier_descriptor,
-              new java.lang.String[] { "Name", },
+              new java.lang.String[] { "Name", "Failed", },
               akka.remote.testconductor.TestConductorProtocol.EnterBarrier.class,
               akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder.class);
           internal_static_Address_descriptor =
diff --git a/akka-remote/src/main/protocol/TestConductorProtocol.proto b/akka-remote/src/main/protocol/TestConductorProtocol.proto
index e483bf4f01..007965b2e8 100644
--- a/akka-remote/src/main/protocol/TestConductorProtocol.proto
+++ b/akka-remote/src/main/protocol/TestConductorProtocol.proto
@@ -25,6 +25,7 @@ message Hello {
 
 message EnterBarrier {
   required string name = 1;
+  optional bool failed = 2;
 }
 
 message Address {
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala
index 7e3d315fea..2bbae6d28b 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala
+++ b/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala
@@ -22,6 +22,8 @@ import akka.event.LoggingReceive
 import akka.actor.Address
 import java.net.InetSocketAddress
 import akka.dispatch.Future
+import akka.actor.OneForOneStrategy
+import akka.actor.SupervisorStrategy
 
 trait Conductor extends RunControl with FailureInject { this: TestConductorExt ⇒
 
@@ -194,6 +196,15 @@ class Controller(_participants: Int) extends Actor {
   val connection = RemoteConnection(Server, settings.host, settings.port,
     new ConductorHandler(context.system, self, Logging(context.system, "ConductorHandler")))
 
+  override def supervisorStrategy = OneForOneStrategy() {
+    case e: BarrierCoordinator.BarrierTimeoutException ⇒ SupervisorStrategy.Resume
+    case e: BarrierCoordinator.WrongBarrierException ⇒
+      // I think we are lacking a means of communication here: this is not correct!
+      for (i ← 1 to e.data.clients) barrier ! ClientConnected
+      for (c ← e.data.arrived) c ! BarrierFailed(e.barrier)
+      SupervisorStrategy.Restart
+  }
+
   val barrier = context.actorOf(Props[BarrierCoordinator], "barriers")
   var nodes = Map[String, NodeInfo]()
 
@@ -240,7 +251,8 @@ object BarrierCoordinator {
   case object Waiting extends State
 
   case class Data(clients: Int, barrier: String, arrived: List[ActorRef])
-  class BarrierTimeoutException(msg: String) extends RuntimeException(msg) with NoStackTrace
+  class BarrierTimeoutException(val data: Data) extends RuntimeException(data.barrier) with NoStackTrace
+  class WrongBarrierException(val barrier: String, val client: ActorRef, val data: Data) extends RuntimeException(barrier) with NoStackTrace
 }
 
 class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, BarrierCoordinator.Data] {
@@ -262,13 +274,13 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State,
   }
 
   onTransition {
-    case Idle -> Waiting ⇒ setTimer("Timeout", StateTimeout, 30 seconds, false)
+    case Idle -> Waiting ⇒ setTimer("Timeout", StateTimeout, TestConductor().Settings.BarrierTimeout.duration, false)
     case Waiting -> Idle ⇒ cancelTimer("Timeout")
   }
 
   when(Waiting) {
     case Event(e @ EnterBarrier(name), d @ Data(num, barrier, arrived)) ⇒
-      if (name != barrier) throw new IllegalStateException("trying enter barrier '" + name + "' while barrier '" + barrier + "' is active")
+      if (name != barrier) throw new WrongBarrierException(barrier, sender, d)
       val together = sender :: arrived
       if (together.size == num) {
         together foreach (_ ! Send(e))
@@ -287,8 +299,8 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State,
       } else {
         stay using d.copy(clients = expected)
       }
-    case Event(StateTimeout, Data(num, barrier, arrived)) ⇒
-      throw new BarrierTimeoutException("only " + arrived.size + " of " + num + " arrived at barrier " + barrier)
+    case Event(StateTimeout, data) ⇒
+      throw new BarrierTimeoutException(data)
   }
 
   initialize
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala
index 90d7eeccd5..cadd69f786 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala
+++ b/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala
@@ -19,6 +19,7 @@ sealed trait NetworkOp // messages sent over the wire
 
 case class Hello(name: String, addr: Address) extends NetworkOp
 case class EnterBarrier(name: String) extends ClientOp with ServerOp with NetworkOp
+case class BarrierFailed(name: String) extends NetworkOp
 case class Throttle(node: String, target: String, direction: Direction, rateMBit: Float) extends ServerOp
 case class ThrottleMsg(target: Address, direction: Direction, rateMBit: Float) extends NetworkOp
 case class Disconnect(node: String, target: String, abort: Boolean) extends ServerOp
@@ -41,6 +42,8 @@ class MsgEncoder extends OneToOneEncoder {
           w.setHello(TCP.Hello.newBuilder.setName(name).setAddress(addr))
         case EnterBarrier(name) ⇒
           w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name))
+        case BarrierFailed(name) ⇒
+          w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name).setFailed(true))
         case ThrottleMsg(target, dir, rate) ⇒
           w.setFailure(TCP.InjectFailure.newBuilder.setAddress(target)
             .setFailure(TCP.FailType.Throttle).setDirection(dir).setRateMBit(rate))
@@ -64,7 +67,9 @@ class MsgDecoder extends OneToOneDecoder {
         val h = w.getHello
         Hello(h.getName, h.getAddress)
       } else if (w.hasBarrier) {
-        EnterBarrier(w.getBarrier.getName)
+        val barrier = w.getBarrier
+        if (barrier.hasFailed && barrier.getFailed) BarrierFailed(barrier.getName)
+        else EnterBarrier(w.getBarrier.getName)
       } else if (w.hasFailure) {
         val f = w.getFailure
         import TCP.{ FailType ⇒ FT }
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala
index f7d2fbd532..6e78610cfb 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala
+++ b/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala
@@ -129,6 +129,9 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client
         sender ! b
       }
       stay using Data(channel, None)
+    case Event(BarrierFailed(b), Data(channel, Some((_, sender)))) ⇒
+      sender ! Status.Failure(new RuntimeException("barrier failed: " + b))
+      stay using Data(channel, None)
     case Event(ThrottleMsg(target, dir, rate), _) ⇒
       import settings.QueryTimeout
       import context.dispatcher

From 9a33f468c0082ee6735d7f4684945ca06f531fcc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= 
Date: Tue, 8 May 2012 13:56:24 +0200
Subject: [PATCH 013/538] Adding a doc diagram for the Test Conductor Extension

---
 akka-docs/dev/multi-jvm-testing.rst            |   7 +++++++
 akka-docs/images/akka-remote-testconductor.png | Bin 0 -> 18288 bytes
 2 files changed, 7 insertions(+)
 create mode 100644 akka-docs/images/akka-remote-testconductor.png

diff --git a/akka-docs/dev/multi-jvm-testing.rst b/akka-docs/dev/multi-jvm-testing.rst
index 33c7dc7507..d19344e751 100644
--- a/akka-docs/dev/multi-jvm-testing.rst
+++ b/akka-docs/dev/multi-jvm-testing.rst
@@ -380,3 +380,10 @@ same machine at the same time.
 The machines that are used for testing (slaves) should have ssh access to the outside world and be able to talk
 to each other with the internal addresses given. On the master machine ssh client is required. Obviosly git
 and sbt should be installed on both master and slave machines.
+
+The Test Conductor Extension
+============================
+
+The Test Conductor Extension is aimed at enhancing the multi JVM and multi node testing facilities.
+
+.. image:: ../images/akka-remote-testconductor.png
diff --git a/akka-docs/images/akka-remote-testconductor.png b/akka-docs/images/akka-remote-testconductor.png
new file mode 100644
index 0000000000000000000000000000000000000000..b21353832670a12eb8130292416df62c6f1209a7
GIT binary patch
literal 18288
zcmeAS@N?(olHy`uVBq!ia0y~yU|z?-z<8g7je&t7;CZV(1B1J{r;B4q#jUq@t5@ix
z?|lA|&0NYP%fRc41iv(|+ZPQfp*;)Qbpw^OKK4!Q2yAi@IV17$-3;fZioWd&6n$I*
z4dr@VZuVJ99k*y(oWUWr$lX=3Pf&TWkHjBk9;1{6%j@@E-&`BM^w+j;yS_aA+$vwa
z{@trpuU=KXs;ztf`$}{(BLhi5=UBm#-7+BOGcY^``3puR#DGmgrFyi%hC?YZ8xQf|
zz()%i28OVX#nHUx7gxQyHqlS$6oW)K!>9B`U(e2OcgbFG_*MWzk2b?ky{#`w}$dbB4VU}j*LU_bd!a!a|%hU;9I_!$_WK0304iGcy3mtlgvFjRrTIv0?a
zp$cH0d~#l;UaZ1}iT^;+mU_8%W~ecUfcebGde6W9`}Bv~>;A0XS#tV(U44T6nXl%5
zGk%=EUQ@W{&;JGfQ{?}A>VKR(>G5_i^&j8!{=bdc|Fy1OAL>D!`f2vY>x8E^$IPF*
z^mBai!+A1a-W*Pq|5Nb3TK?aa=WYGge}4Ra>m9fI*{;RxWlZ~@%D$I>@BXcJ{kFoN
z?(1)U?LKq!eBHg6{a+W|e{H?}Z`zM_^}ocC3_m@;-2TI*&l4xFxH{9n@>WHxc-)_D
zx$EPWEN$_F)+Q+{SpRfPB`>}%UQ^DnY@&zw0
ze0Y4lu2Pp-UwqD*|C{6gew)V<_kH_Yt>;Jc{+fDZaQp4w
zcfWZhAcp
z`1PCbg11er{~R~1^5|qHgzPGXRQuWKED>OY+Nj=BL6EjhQEIK
zTJubHrM@cP-TMXVPbHDUCoQJj^uwp-5K9-nGS-us{grRFO;#}d^73ls2WL5>IJNiB
zjn7*=zxh~L-#&`3!fy7{PygqnRPL?n7TX)R@0+cy=gqWjbH8r?`uJt*Nu&I_-@A{$
zPrn%~UwZ4u)<3m}&u-6MZF|0a&HGvJC#23Re$sloFOW59d;9WtjVFG;?fq1$QzY|R
zu0>`#OO{@m<;2fX-?Z14*Ux_U-J79ofBe)gm&o|OpNnkWw$?E1h&%oN^R3F~`*(de
zzvH>q?lD(GA*(|DH2c%@C9lWw?wM0^_t%d(Yb-sBio(vj&kf1dbgGKk^zMs+uaw&V
zGvRqM`k!B2kKc2l?)j6|=bx&qt`?a0^Um{p>&jP0&Xi|-SKjMX`B!!Qo(FaJO@D{1
zUz(nIVJBzxy#uE=o;37&u*EiZ>-()S@hcBkum9un{(JSmO8vP%{(hPn!;@_*>hY8B
zrrZ2W$yehlHm%w0F&FYtd4_XD=f1(p+Dks|)|`{lt7d
zci<`AD=(NC7>P|1YET(UZ^J
zKI*ob9$EoKMcZD#=YI3%v}C$nDePxyiYat)No1XEiv>#O*q;S~zE(T>F=tCG1OH{aiff
zkL0ZU{6p{lWt47OR-SaLPOT$bezkgbkbdw~>odX?7uU=@`D(8#v;IMY8H?Ta?S8v2
z>VH`1JA>E1-gzSFTrC)DoXrp^Cbm)3uANqfZd
zGl}!$HqK>-CIaBi6+
zQfce_@MY?W&Pl>6_fE8V|5PP&`HFLn`RT!{4|uKplaL+tYMY6Y>65FUuY40IeJ{A;
zUZ41$A5R}&zkGO$$Ahh#7j0#n@Zex$pJs(w;^J>>4zwQv0Is@#*C&b4LN
zg)8@0E`2|3<`(hAIji@@$3A(+x#vvHwA%SgUaed``{8ZrkFV<*zOj9_$}^3Ye$16H
z|F(VqVMa)1WW87R^vHVVsS0QMXC{7_Ww)!k=~_=p;J^8)x8>JX#j{19Z_8@7e7*j=
z(21{mR=hfy*=+j%Jm;kEMTgR>zwTN7&F!@-lm71j+hEIwS`V_VOM5oG-4&2ByR_)q
z<^{KMpDsCnba|T5>h}@n7p_VVz54x3)uw+d`a-||-g?l&|H0WHCVSh!$ojhV-;^fa
zxBoq_cE#V^e7pHqP9F!=#Ed6EIWhg`=XI=cOltN~zvnF6x9jbf02PnLb8gg@KL0uK
zQ=IN}$*s5U&6ir`f4167dDGi{e@}AGdAV-!n*3X9l3woh^bguLee#0cIZdkf*S}x(
z@s9h2T809@buj^(_N_5*oc#Wlt(&FV&zxzeT^H{2zQ3)AC+J+@an<9SqQB3(H>>7-
zOwX;iTkh+>FJFFb-nYL(0hRgFKK=dl^Uwb1eHZ5`m*y1ryfV+3?|%%%x${`$F4lFU
zwjdsJ!I~-1s?`V7GEn)z04^5Qej0+d4tT``Nto^ns$%|+p(#TMxi074GC(b`WmduuRQ*{
zGt7^__QqjoO>caW?aBG?p&ta-Zt%O_#*`4lcw&C%HS_wqs~*!Y&wHsKt$r-D@94aj
zNpID`t&GatlHVTx?-ow5Pp;pYynn;<%Gjb2J09Y
z7!GVIc=_O;@%?>g5*a1J85kH6B7b=H7w$a><4RUsTJS$|alPzIk#=@)lfI$(*(uIr
zrk~CPHWacp^zO5(mf>U2VK4VLTxVZW!Eg-XO2?vwc
z2VeS&j>9Yr3=9VA4mf^1G4b8(q^S309iR@-F;R<*yje*x>q{ya9FkZ-LBKbQd%47Q
zV;MPy2@;G9G4mUnpFKIak#pv*WnaVE*`OXvh}^O6_>QX@(Ly1M)**S~jP*6SCF
z{#W^T)LtUoVBL`&M(bRJ!aw?}{5!a#sZcd3Ms=Ob>sdSWet&iTGx-PmV=kzg9_`0o
zM|K?gD%qng_*kn@wNv}C)<4fb%stu?;Yl%);Yyv_kG+r$=~?|lJt^klFP>vlI|`@n
zm~kLPzZ6|#bC67q~OS1c^nn+S7*S4^kMx<~Up{xm}rc*S@=;`-?SNIoIv
z;0_^>lP2xxs#gMe9_mDNey8>k`_B3u{|wh9#B>%;-67-|6Rv0|^dIDErXFp>bs&AG
zb_f}+E2w1yxdCLwsU47*;W{=|BK+7?k4L9=2zAyQfQ*m`?*xU#zfOtp9d^x-uv00V
zD$xm!N<11o{}`_WyV`J_i_mpnk2`a`p>D`sQCR!SLtQ@Mj3=u1s_gybU)n1FebgX*
zDd+dQiR)xudcK~uLvHWWZx2J~@2>mtF?+wwpX>F%zdr
zJ74=u+njIxk0)CXpRfCR`!J|~x%c(|bGiE^*DL?`yWKDU_t*L3+qZKa4|#NIhnr=1
zvFwiz%k9kfsXh61Vf!6W7j&D%iXFG#7ysR6{eGt8`@s9(|6DTPXaDDU_CAY0U&QyD
zeXM?F74Z7RwzRFq7nfwWgCgx%X=~|UUwf&2Po|3V_*b7vJ}!QS-R{MbhsNhBzgZOC
zSzqhHym$6%P&f7QeU1BT_a*FW3<>vibarvFNzQ673Ils3~&AeW8
zuHjzg|ySk{sES{c_*LJeikUmFI5h>U<6Al@`8Ht*rk2d!0o2j+8Bi
zoOd7d+1I5_+QxOv^uOA>UsD91nK7RKw7%l5>tn%XM{?W}=hffyZm+$5N9~}r`o+oL
z7OzX~(SE#(YuVOw-%I44@97IXYx?kPe*O0ylhv+wU)xjGo4roCQdfCnb!OD)U$giX4tj4f(o|zq%sdGXIgy
zYp*-yz1jP!t#1F$Gw!(bFkDmzR5~U2_GI^DO7(tu;P+~#Z*!3C{JYg&pUS-|m)&i?
zwl`eeGbP^h{+71w@9JN^OI~Yy`~B_zu07e`G=H0A{*PM9UUA9RLOU(yzl}v$k#&sm
z-JM>Y*`<8GUWbbIee0d~q}<0cQRt4&-f7Ru1g)986q{e^od0Z%QoK+i)5NTpB{QC;i0rU
z{^?}7_QN~gg;>^KR`WM1URrPS-SzRLhr8Bqy`n9Oej1aXb@NDl
zT=w<(*Nb~VLmYka{$i%<4CjLeHH7cipPMM27acBn-ROJmREQv@fa$eVQXqm-f1wn*6Nysxy*`c^_Id
zuWeKHe}Av`TYu*q-*N2EG9Sy|SKh@>%dFo0IodsQN%l^eW>Zbkb1CjKzr1*7>GO~^
z`uYF5>}z#*jW1nQw#~jOblq!%(I$iZ%=mL}=PWPPuR7{9>15f?sC=t=uV$#+`<(BS
zR=!vAQ>u31Ol$>9lv-_Rz**evaaYuHj8J^o@IPQy)T=C#RV6#!Bs-@pM=I`ga#SnJ2ec@+SO?@6Elfkv7}#SmvEm
z6P6vfbTi}3mwIZlDYE=)$ob@$)z>Xf*0=>N?vW^+{JCcR8}Drs+YC>atM^4rJ~{K8
z>`U(E-w|b9o?I_WIAu(-Nm~nb!J;t7ySz_nvtUSJmcK;eJOg%Tw7!2zV6)F
zwY}`fratF$Pmg#CJ?^>mJ@<3w9{#O!%^KGQ&Rn-^
z@sYQCGdq9hXkTCMx%gsg%;^PXr&~{7P+KP%eysGSDD(8cen$`GNnO{Pex4&u`t;Rn
zq3!&|Cqg0*6Vt-r0Tu68JGTbk?ZH*wpZz3WRS75ZA=3D=XkJgWv&
zD|4-xSs0sQvSjb`b(5ZXwrvx-m6a03e53EnvOgJ7D+^2)voB05TS{(%Fo>#oovG8e
z`utbtanyF@FK3e{zILAa+OPZRk-*)(na4^Ep7$L3>akklMILLY)xzCxbF*Vk_iR6X
zCHrMdGPqPsi^+bHBKUaL#ePq=by9n^Y}dIhi=Gsr+L+sHbaKZw;eai|JGMOa&3zQN
zbn<73>#y$SrJKGrJC%Cq^bS9%pOKHB?`U)9@3XCX*2kRVQGEZ^Q{}lE&vj<27Fqre
zdKA4_&~o|dlw&iFm3-?>Q(??Ke|c5U43XoZxf5@ArUz8}o;I80^DS51Gc{?g?fhRp
z8LAf~T|NJln!b(uC~;xNja}|r-BNGe42gbxOILhd;Eh)&l42yUr-rXmExY7sb$W+d
zpD=gr#yPJ&t=iI#7HymOWA^bK%Om#*&)v~?t*&a6S
zyuL+ZO|^gVzH65ErZM|p-lAe8Yd-bM(ipqNnz=J`qxTw||9NG)f?iTg@%bvD^O>hp
zOqTBTE9uR?`(5nt{P$Ph**`wDW8I;@?>cu!e!M$(e$uo;U-i&O+4myu@js4|4Xo{F
zoP8zmp6R;M+%q1frTea~bp-`;!r7keyZW=-axdj9%k)?_2q?{n0viF?pe_#E@
z6XBM1>*B0T&y=ItllINH_O9oMVXdik!TgtNEpF*PjuKw_^=PuS*Bv{%#n~~pf8YBw
zSLiXQP0>~C@AGQT*%!&*Ct7aq$-d{`?swl*ZDY(;q2HM?+F`%*QeWIV7ZMsB`#5TD
zq^5Y@`}FjfgFB4YML0jc^{q$PfM=j@D$*gaYK>ZVfv*Krcpzb$+k
zYIkL>%qlOfbjj;)+-=?0&0V6FwrJVQ^=y+1Q)9k+87^sF%NbbYpFX$yr@7j?w9V^&
zteMz4XY$&WYd-JQl1r1k-nDd=$WrUW`L^MvOHbw2T=^IF{9>Nu^&?TX##XY)Z~s0q
zyeT?4*Xh3Lx=_72@2^agFj^-)bH~~@w(nNjocKB0A|u(xFiLpG*11NTd$LO{j;m~Y
zTNWB{)ig={*w(p6Yc2*oj7nb@`PMj9`1eW6UBZsvB(H0UpEJx>uSo$H0U0s3C*AB>
zw}jEtdBTz{Lbs-HZtu*NT4N%T>2Ntib>bPL=5xg=;)d%=<>UXY{ryHw_OZ!5&%D(O
zPkqwlmMM`tZhZc6!733=&YFGRKKZIA-mcJltW|i|J9WwyHD$@1vn+T1?a$UaZL^G7
z{dtUU=&DnI=tD*e#j%z)KzSf-hRcE@ppwIui
z-7UMvQNQoh9#aOTzOGAw!RP&Db8MwnJzAvC_pGun`>a&l#yQzX6SA3(7#;7HxE`Xq
z_4=9^eo$*-#nyYj@^b$?x4Im6{#Q@-RBrcNchSdO$3!E7A4l14^8Av-v0CtiM@mnO
zoc4G1^K;)~!#~|qN;7w3DuI+vc+N2aQ*6H-QWfKP$?vy$P+9bl*i
zkeSGR_GZvn54g{MEcA~sN^f`7PSDT*BS?qgI-zi-&ecEEh5kePs+jyqJDmPZ{?Yzg
z{xP_>i|HxP7}a%;<{#M)?hisc(7Ud6PW+(`>ZgOc;i)m5pni2nVe4ySiEwbh356dE
z1&uu*jZYk#iq!A#lnDRmk1`gq`iKWX4Ugu73V+aGgzBUnc}|a>#ejnf5z`qK^SUN+_xbnU*$Xa1hHea6-TOYt;ncctVhU>3emK@;xIB8;DW|Efj+IWeVo_xFt
zhHMwk`_KBZ=hVZ57{!0j4c7lZxsX}x*#!^r-V%3V@14`XCO0R4-^LWNsqJG{J9C88
zxfd#nzvq~)0Vz+Pc|AT~^`W}ulmj7IZ4Y+O{_*qjgB!Xf%Pnjgjz}EHS@*`%&6nli
z4yQl<#_Pkq4>az4F2+2R_qH$CmnrLFHJ(4;D}`n5QWnt4n%h
zz#x?DP}5Y{`XgOq!;}{aj0%QqN7N<4JO0NybE=mKtGoIZXx`eWrTXU2~Iya!tBPWN_hW027({b>0nfZ>s_ckA(o
z9shlM>)AZpnIJ|U6Ma;#8ZfD9gT_O)1&J&QcA(^<-{9Hsjel|);{^kz8K-VGCoc^T
z^m?>P=k==#bH
zdpdQhC)io#eP$ag-yc{a+3@3d-o`v(k(o2N4jeP>QMXn(aOuM)HM{l277QQx+w#xy
zS}WRI#(&Q_`g|uJyTZSX%a2!2^~yObz>~08`q*>}nFE&!uRe%)|6j2^N%^CN
z%t!ry-{+^!yK;i(AcNVy#69kZIdoLh^N(tP!X{^(r-`${1}E{p?u_$a=DY8@S$HFO
zub0GQ7K=9mpm4A-;9I40o#)^Iu1~KI{*}M&v?VL*y3xGF(VeEC^+g9i`6X?7b1d}F
zWwoRA_gy@ye@YyT=kd!qr18=7)^p|ro6EPkuOmCWVxUz~m%q?_kl3n36<%e~AoH=Q9w3WAqtcfQy6TmVO)XUY
zcoJl@P`Fb6VZr|@|0Khe-Wr2OnT0YXK=w@q4IpR@D
z4qSRjFtX)``p5G-gfiz#hJy$FrQAT3s?jisVWZ9r*qJ
zF8GlaXn-8*uq+AB!zXrFP457OHz@EyxC7Ks>RJ6m1rkEAxH)3~*lS7cz4XUbr_a~f
z+x#~b|Mn{P{jUoje$B4ivYy@k_TE2>ufI9DYk&Fg{C3bXo#f^z@;^cIBR22^4lgpXH6CY3HPuaiz*PXAA_xIl``aLi2`||z2Pk+ocS|@b?LT~)SYG4
zZ9UqLt>$Z;`!&VJV&e_H`G4%+m&V`kuDx|Wx9hy9`MSt$I~RP>O3UBRSGP^Pc-!sQ
zy)Ua@9G0_-`wv>VkrRh-0xO7a_frZYGuirkh
z_0tqH^AkU3m?f|G{VJ`u^UC|9JJv;qMz8m)JhJj}%+6`+=M?L&w#c6Ut$6mMV&9(Z
z`H^Q%oB$0l|M?f!|7mK{sj}+mm!4k9Gvv3;R#^VA=9uOAd*#jhgdfkUFRuAjwWR#>
zkM!9+tB;hLe{Jlqd9}my?Em6<;1x~#e!hJ#XZJ&qU-EU^?EK3NnIP8%XzBXZUdcS}
zo3)NLbN1)=pU+pn{Fv=J^ZGs;)w$&YtLjh?@ey8idZvL7+YkE5Qhs{Fqwj?{H1EH|F0SES)MF8VsU!eb@RoJH_z{Td#>2$^UcaW*WMgIE3vKov5&3J
zf1j6r^Os%i{nBZLcYEKT*pc_l)pYO9+ULGi71OuAYV_NYSo6z6ef`QpTcv;H%R;_B
ziKzK=OaJ@6|F5pc+tk@}zOku1Ri}FO(fq<#_QzH)|CXpc|2reX@Ys%ZYYKZCPyPli
z$lG=|f6~R1&m4Nco_TIpe>)@7i+Qt;Ti$Ay531|;B>X9l^FK6e)7?Gue*12|x96Vj
z+05yyi`sJRPnU21`g48#_aECX{g^6PeXu*;?yJR;<>sfiz4lo=>+huk
zwlaA}+@l$fwMtKI%y@slyYuwFrxsxgmL1=DCRR`6r}g^dF7Y3Zc^^Bz>aXqX$C1Co
z7oYf9xAm`;U9GDB*4|s^-;|fir2PF_cWq+n^?y(A=PW+8>y>qxHS?#LHPc_MJZCuj
zyPeyy(%Px-|6bXAZsw1=b4%?W&#LfCp7p&buGD&GQQpx`k(L1n5}r$EI4jr?TSg=68o=hxmtK`C{
zJyZAbd^g+Ms{bJmGl^6w616>Yz2@chp@ziD$G-#ou+Uq)|c?(#<~dZjyG
z&*)hdXL9MR?RUSmn+tQlPM)YI`*Px=`b}D4yXU%hKhwNh9V7f&bbe;C_O#1xTXPeS
z7CqJ3>F08_F>a?{Z1JmG8(x0jWwh>fevG#GTEXk@{w@=>?wrh{rpoUVqjIdMZp$;%
zkFncdsIKqPTA%y4@S5$qt!IxE7d1Zldoosd_Fb=2PtI5Bo=)9=D_5^*Z(>aNksW?P
zQVnGyrktshcI~@3d6)T8x0kP2-t?7CD?H12)mP{CG0pc+SU>r0-G>Oa+%
z?aqvuyn@fFI8>$X^0sYn^NtqXww#*n^Hd~PeY=s{u~Oll>|57(cvSr+T@L+ib<*Y7
zmTfx=*^F=TJuLaBvhPygw$o4lpIklpdiXp0^@V56ZZmF?y5iM$;aBUr#&c$$Z+dQL
z>N+oKw$5_qE%%2hCJRrcd|F)I>+${A%RhH@bJq%Q$gVgg^Yv@Ow%%af@b5cIyg5F{
z30*9Uep#3~ef6=NiSGA8cDlsRt>4b*yWok1?=BCOlk0vv{dUV-m9r|}rQ*`ZH&WO4
zoiCIAX>ul3c;17)mv8P#fQH+OE`FCa{x#?6>zWtFmwHwo*_x!z8y&qa?)toMVJ$19
z_S!MeIln}GYj0!T%)-0d@4YoSy{*W0HJk3~EwcKP*D-FLyD$49o7y^kxtFUZ{xg|j
zsQh%&RyWxvW&7TBJN%C4{hR-N^`TYoUoyQ-IKStZ>HOET-hP`EP@GyaJ^T5q
z$mfCKOAqy~3SVsfwuUF%O*`qxuJ60fRn>T#s%+*7w>}m4Y~_@R56W)d3j1Dn>dU@o
zUtQ<%Myvl^5_pTHeCh!|sgpVG$=Tnw?Ry+^`_=CFbz8o!oWOOp8
zbk&rXYunaZ?Jweeou_<#nc22%>$`m$d3|I4C;MG?tG_(=QT=v~>W9gDN}5WRYb?=IsiPqj_!gI47&J>B3@t0!7}Gi~*(^~tdVI_xN@W66wRNfPfloA_pX1=^&RJoV&2xK
zMuyLSl``+ko8?}9yH$_WP7X+&$&zAPGPx&Pa_5&%e#YzGt~oHduQ)ebzc*XzN!1g7
zp8CFYA7{6Puk8-amAd}x?CQe}g{d*(p*>mWPD|V{sVe333ts2BIcWiCDqz`Vwc9Ih
zo?RWBH|y1&^>1@-wfF=}9aGQs^(y%;v+Qlt+8J@?XMf%M-c$0`8y=NRK>ovFvB7T$eQ9h3fafv9z@+~Tw4uf_I0nH4)ZX4ReAxW`u7aj%PC
zE||5|_v22+V_TQ4^FMU`+@;&PUuTBi+O?u^t
zxXI@$Q|jE%B7?OdJG_K9-_x>vv)e-@a9-J*d}6D
zYn7Y+@~N+{pQt^zf0z9pw}kXk3*Kcm`!w5jtWsXJcy0B%^TuE8A6sorv?*VrpELES
z+!Nb%zyDgy+4p`{b#1=H%hc%a46k?0`tW0Q&4^v~(
zrLWIYUNoCqE$;P7<-Rc+d>E2}h)ZB4(j=*-O}Ri91tLhGW`Z*)ps&z!7s
z)g!)lnUB%Bx14jAY<+Jz$<)#NE`Rv#v*qvZor(Q^Cakx9Dsy>t|KytO!s5PG=OXL3
zO%+_SO?iRsJoT&?^}GLPf4#P*GgK*Pzi)I^C9C|_
zhuB>Cn`OG{)?R(<(_w$k8m%)t(s@`aHn%$FwcN~oHyexd(yh1JJhloyJfpKiPv+&a
zwVYjMCwKfSSkmoo<@)D*-Mz}QN9;3&_IF)>_oQudPxh_TM=J0C;SxO^Dz&GhaINHV
z$?#)S3;#yHxzgg18$GLHUCnl{@4t%uc2%i~#7uFcHVGrG?EcvsV8
zW7U&u0#BH5d~IGXxI%MXa>}Q2shK*_o7>(^E6jDzHhML?O@02_i!-l9_!+M=Y%%q%
z^Sj65*AscByjN{``F1ak-M7zvKl5FmdHMsL$ur+xO-}qY^G*Dz`qhc+s_mDT3WV$|
zkUFXI^Jd((=w+W=db7{|5L2FT{+P@YW69oC_t$I-JUBmf^IjDeL+-D&uL|9M*2JB*
z+qO3!)Ro9}nr5Q)HfN9J_Eo~G=Bli;ntUs(`$*NU#3|t=aE+NlJF%cXpJ8ZhpFK`KD;G3BoJqRfl;d+8!>s
zV0`&~r^NMJan_6>euq@%F#9v#^}wTw|&37
z<^1$nrkmy-+cnoflQClP?%jr#!F7|3R+g^MX4)mxQDRr(zW)|?pq0idktKio?iM!}
zPTlcMb7NF*^w9#_cbOZ$_H276@~zZPCG9rn(OaLhrxoU&U%fBrr%B$`%FxMHSITY$
zxg9H2l{)s(EK|NMw@Uwo3=`lB%7Z8X1i{|
zzQ;>`x0YwFDmzx{JN4p!i4&_{d59j~lCHDd@XX$Ok#?ucylaiBX1C?tG7g>0kUsNW
z&*Qt2+k?N&f2(rxt@hvZR^L8XEl*5|F<g3(OuiQ<$Hh2GTH*fW)rz>l3nH_!3CJh<{IqU%)jZcrc9<9|68V3=4>;)Q-01w+2
zszS!gg$&m{0*{^atp1S#8ZY2g&XiF62U_Unax4AVRM4;nj
zTzl1EQrkrE5=iI}3`i~9M;)M%4^_|v3uyV`QFYMx2+Um2tVBA=FdcBs_);q<~f(W=entR>12F!P7X9;ScSJ-)4fWhK%-r0~c)w
z2_+z=8|<^;YbQ8bH4$VacytN01`{4j&=D?hV8J|f{p7K!8ArEXJN8xgeTDtSQ!f|t
zS6hJBcA@gNt?REQku6-|*{{FKzlefeQtmgcKi^{S4t}eTto}UB?
zA%@ilS8tOlS`>QqsJGps%IQDUtuFkqPhPUx{a1E!M7_nO)6;DJfyR*-85kN&pTBHQ
z4z15kJ$P~AyxmH&;h@Espmo)=itc%AH%T*v%uM{=BM*`Q?|v+p$
z;940Xg0H2vC4dqrD+9xUSsU}<#xPuKDV?i00~D2v3=A6{t@#Z%vOzR8816ELH4?MU
zV8$yHvW7MI&Vy;2u!G6F2kz)j?G00AAWL-;DOCd3#qhXm=@EoGNM;|#s+Z5EkoA`V
z%nuC5M7uTR=cgH-&Hi7#q?+w`8sES9&B>cN|KIx<6|tRfTDj2bSi^OO3@)pevCCM6
z9Q*3ytINP}qxZ#?D~f?(gZ8nn(f0pDFP?h2<+x3ma?k4fv%+lb
zB*PgP!j4GX+xq19^E#a({<}ib;rmV3R@4}+1Et6wZO(=`BBJ;sJUcvq>m9*Tn
zwZ2~VrRSr`;LQWarb=8-ee!B;?Vi#N6(=`940lNK>3Kb`_*24j)x%JAorZ?%ejl26
z@SIoMQFU+#g6#j^J9i3CH^fSnV@GzpleX5B6oc63(bHLYH@uHWOdBfFP;1;K*Y12|
zhgdgU(_<||wPU6CBbTH^)CfK10%cW@P3(sz9;|`cePD-IPiLX6!85qg$3j64Y&7(T
zIT#We{8N;hU!v(MST+^rK(Jk356lcR(y{9VrMU?b!jE@}$Vh&f4pRd1gC1`!%nSyQ
zTfCj)Vb*}dC8#iE!(NzkH*j@+1g*9H5N-eO>Icvm^_kRH?{#MHPhUCh|3Ur#7XN;4
zvwlBwM{iV`T&iv0zyFKl|5vW;OaG>Myt^ei&hW2JtfHGbXO_gVZum%D$a_PotG
z*XHiuEO&ek$ca6#%|J6Jz285A_Qw{_yf)`*`Rrv;|3Isnf4m2+3>V%Wc>2uZHR`AS
zgBFYbE}GMKuju!OPyKP5-)@br`}X_c?0Z!=w_V@lZyDF~`qHIoJjUyaS3N4ewezm{
z^Rt#Frt>!EBw5y{+xA&L=3+QtCFR_I_$KS)Uk%RfZSlX3eHS-c2O7G5tWrEl%y3=h
z`rkim)J{fz|C&?3zc;(@{Ll3}3vOq(&;M}b<3Gc7+Q$Wtm8Ksvt$IIoe#D6#c3%?t
z=U2^L@rviz)IwGUP?zz!(K^GV+NXOa_her`p=%^$+`nw*fxl+!YVVtODnG8WpJ!)b
zwyH9qtt4(;T8y~)x}APDKNR_ImT4c*d3>vPq9-WHgA&QH(q)Bfmuz02QgO2vy!iUz
z?R#GrK3pAN|FObELv_#FP4fG{od$1kJ}jNLFSp{Lw7%rN*Yp3S-mWc~|MOJu<4O0!
z_q<#5FjfA~hi%m%pZ|NUm|u8qbAP`6|9PJ}f{&m2zkdHq_u%h8XT9$0+isXCAAV;C
zbSU&ef_uj1x4YeU6>a!ixwdxBnH_q0F~zkf_JdcY-xWW<1C)y!3hx#_o~2^4bbqbt
z;nO)W^Xu<=S#+ZY9fE&l6eZ`vJ6Nw%=RqwfV{NGbsk=Ek7Olc>m{{
z$@9yueQ4yj%3L$!ZjIUL(54v^m;JGS|MY&2`KkZOZNI-+-afI7Jv#Ng=a0?*-guw+
z!XWuF8V{zs2|wYw$yR&!-x%>)d^$_TPLzIdD8M^t|3Db^VY1;V9!KrUUn1>Z_0~h(aGiQu
zO!2v8#Tv2(oy#MSZCxew>q?lp!MYx8P=)*0>U+<@sWr=%d~-d(4+L5jHV&><;(6^7HmIp7$wX56dFK;Q&sLX6r(Y?x;^}{X*+A#>c2+oYLDqJc6|GM(t}iyt+8sUyDm?9GDR*uP}tbCwlMEm42GtN5{0sdt;Y}6rZ)Jg+^{FN9<>wQ*^d)4ZFW$*N
zduu(Y_7SeTw(7`pBkAjTCpT2bfGUp-w~kdk?aAJrQX$)$UHk9t``t;8I$ler+x=|m
z$xaiJ)2X!lz^9&WRlY=@r*GnOyq53}$8%L9i6L(l7f
zQrEYo>_~eZtNwV_#YH_@>BsKBdv07crF4Gzztzj7t{<1J`xEkbR%bff+~2vk1K&>H
z^Qmd$&e(^?AEqo?%0d0@J-pbQ+&^#UV}{=BchP%mmKa}CwF^JEBkbgd1?h)WVg8sWecdea
zZti0%|4F;9A2a1Fmpn0rb-G}$=i6!Dr_Gc#S-Su0irE_f-`{@Tv+LT4(p=@#6V2z8
ztFIl{(RH5JbY1RA14t9I^W!{?laIEz7%KNvp3B_r!xR5+-SdU#f`o4B$RFN)q9gm>
ze3QzcYg519PG39CC^`T4&Ug3kzW)w765>z9T#1i&mQO$auw;5S5cK(FJY?
z94Jzm2d?EITn2Eh3!W%vWMBZb&b$1TKn)mB9S+ljO%Z5z9<(F<$PATZ(8f4;tr>=e
z44_IIYN$sKXm%gr6)&)-Kp`kP-vw0KM9e$#1JPId;U)oTcG-Ccb2A*sQki%9kHwtd
zT9#^2iF5ZYyWao*`F7QhoD2*S*AG4jRgU%Ba%fjY?|OMq_YrjT323J&17u4i1ITm+
zP;my@+z8rz3E2b*;t|Aa6y(~OL8_o;ftZAF@#p`Ht$qdRXIdBLgVcJu`njxgN@xNA
DUVNTH

literal 0
HcmV?d00001


From d931a6e727ee70952136f42250132e28a8aa34b4 Mon Sep 17 00:00:00 2001
From: Roland 
Date: Thu, 10 May 2012 10:24:05 +0200
Subject: [PATCH 014/538] break out TestConductor stuff into akka-remote-tests
 project

---
 .../testconductor/TestConductorProtocol.java  |  0
 .../main/protocol/TestConductorProtocol.proto |  0
 .../src/main/resources/reference.conf         | 34 +++++++++++++++++++
 .../akka/remote/testconductor/Conductor.scala | 12 +++----
 .../akka/remote/testconductor/DataTypes.scala |  0
 .../akka/remote/testconductor/Extension.scala |  0
 .../akka/remote/testconductor/Features.scala  |  0
 .../NetworkFailureInjector.scala              |  0
 .../akka/remote/testconductor/Player.scala    |  0
 .../testconductor/RemoteConnection.scala      |  0
 .../TestConductorTransport.scala              |  0
 .../akka/remote/testconductor/package.scala   |  0
 .../testconductor/TestConductorSpec.scala     |  0
 akka-remote/src/main/resources/reference.conf | 26 --------------
 project/AkkaBuild.scala                       | 20 ++++++++++-
 15 files changed, 59 insertions(+), 33 deletions(-)
 rename {akka-remote => akka-remote-tests}/src/main/java/akka/remote/testconductor/TestConductorProtocol.java (100%)
 rename {akka-remote => akka-remote-tests}/src/main/protocol/TestConductorProtocol.proto (100%)
 create mode 100644 akka-remote-tests/src/main/resources/reference.conf
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/Conductor.scala (97%)
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/DataTypes.scala (100%)
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/Extension.scala (100%)
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/Features.scala (100%)
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala (100%)
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/Player.scala (100%)
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/RemoteConnection.scala (100%)
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala (100%)
 rename {akka-remote => akka-remote-tests}/src/main/scala/akka/remote/testconductor/package.scala (100%)
 rename {akka-remote => akka-remote-tests}/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala (100%)

diff --git a/akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
similarity index 100%
rename from akka-remote/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
rename to akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java
diff --git a/akka-remote/src/main/protocol/TestConductorProtocol.proto b/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto
similarity index 100%
rename from akka-remote/src/main/protocol/TestConductorProtocol.proto
rename to akka-remote-tests/src/main/protocol/TestConductorProtocol.proto
diff --git a/akka-remote-tests/src/main/resources/reference.conf b/akka-remote-tests/src/main/resources/reference.conf
new file mode 100644
index 0000000000..f0d8a9d6ae
--- /dev/null
+++ b/akka-remote-tests/src/main/resources/reference.conf
@@ -0,0 +1,34 @@
+#############################################
+# Akka Remote Testing Reference Config File #
+#############################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits/overrides in your application.conf.
+
+akka {
+  testconductor {
+
+    # Timeout for joining a barrier: this is the maximum time any participants
+    # waits for everybody else to join a named barrier.
+    barrier-timeout = 30s
+    
+    # Timeout for interrogation of TestConductor’s Controller actor
+    query-timeout = 5s
+    
+    # Threshold for packet size in time unit above which the failure injector will
+    # split the packet and deliver in smaller portions; do not give value smaller
+    # than HashedWheelTimer resolution (would not make sense)
+    packet-split-threshold = 100ms
+    
+    # Default port to start the conductor on; 0 means 
+    port = 0
+    
+    # Hostname of the TestConductor server, used by the server to bind to the IP
+    # and by the client to connect to it.
+    host = localhost
+    
+    # Name of the TestConductor client (for identification on the server e.g. for
+    # failure injection)
+    name = "noname"
+  }
+}
\ No newline at end of file
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
similarity index 97%
rename from akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
index 2bbae6d28b..b25bd1838c 100644
--- a/akka-remote/src/main/scala/akka/remote/testconductor/Conductor.scala
+++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
@@ -24,6 +24,7 @@ import java.net.InetSocketAddress
 import akka.dispatch.Future
 import akka.actor.OneForOneStrategy
 import akka.actor.SupervisorStrategy
+import java.util.concurrent.ConcurrentHashMap
 
 trait Conductor extends RunControl with FailureInject { this: TestConductorExt ⇒
 
@@ -91,22 +92,21 @@ trait Conductor extends RunControl with FailureInject { this: TestConductorExt 
 
 class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler {
 
-  @volatile
-  var clients = Map[Channel, ActorRef]()
+  val clients = new ConcurrentHashMap[Channel, ActorRef]()
 
   override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = {
     val channel = event.getChannel
     log.debug("connection from {}", getAddrString(channel))
     val fsm = system.actorOf(Props(new ServerFSM(controller, channel)))
-    clients += channel -> fsm
+    clients.put(channel, fsm)
   }
 
   override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = {
     val channel = event.getChannel
     log.debug("disconnect from {}", getAddrString(channel))
-    val fsm = clients(channel)
+    val fsm = clients.get(channel)
     fsm ! PoisonPill
-    clients -= channel
+    clients.remove(channel)
   }
 
   override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) = {
@@ -114,7 +114,7 @@ class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAd
     log.debug("message from {}: {}", getAddrString(channel), event.getMessage)
     event.getMessage match {
       case msg: NetworkOp ⇒
-        clients(channel) ! msg
+        clients.get(channel) ! msg
       case msg ⇒
         log.info("client {} sent garbage '{}', disconnecting", getAddrString(channel), msg)
         channel.close()
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala
similarity index 100%
rename from akka-remote/src/main/scala/akka/remote/testconductor/DataTypes.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala
similarity index 100%
rename from akka-remote/src/main/scala/akka/remote/testconductor/Extension.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Features.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Features.scala
similarity index 100%
rename from akka-remote/src/main/scala/akka/remote/testconductor/Features.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/Features.scala
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala
similarity index 100%
rename from akka-remote/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala
similarity index 100%
rename from akka-remote/src/main/scala/akka/remote/testconductor/Player.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala
similarity index 100%
rename from akka-remote/src/main/scala/akka/remote/testconductor/RemoteConnection.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala
similarity index 100%
rename from akka-remote/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala
diff --git a/akka-remote/src/main/scala/akka/remote/testconductor/package.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/package.scala
similarity index 100%
rename from akka-remote/src/main/scala/akka/remote/testconductor/package.scala
rename to akka-remote-tests/src/main/scala/akka/remote/testconductor/package.scala
diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
similarity index 100%
rename from akka-remote/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
rename to akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala
diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf
index f14ee3d87c..1438904fe2 100644
--- a/akka-remote/src/main/resources/reference.conf
+++ b/akka-remote/src/main/resources/reference.conf
@@ -155,30 +155,4 @@ akka {
       type = PinnedDispatcher
     }
   }
-  
-  testconductor {
-
-    # Timeout for joining a barrier: this is the maximum time any participants
-    # waits for everybody else to join a named barrier.
-    barrier-timeout = 30s
-    
-    # Timeout for interrogation of TestConductor’s Controller actor
-    query-timeout = 5s
-    
-    # Threshold for packet size in time unit above which the failure injector will
-    # split the packet and deliver in smaller portions; do not give value smaller
-    # than HashedWheelTimer resolution (would not make sense)
-    packet-split-threshold = 100ms
-    
-    # Default port to start the conductor on; 0 means 
-    port = 0
-    
-    # Hostname of the TestConductor server, used by the server to bind to the IP
-    # and by the client to connect to it.
-    host = localhost
-    
-    # Name of the TestConductor client (for identification on the server e.g. for
-    # failure injection)
-    name = "noname"
-  }
 }
diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala
index f9fbfc6c4b..b899bdec45 100644
--- a/project/AkkaBuild.scala
+++ b/project/AkkaBuild.scala
@@ -87,6 +87,24 @@ object AkkaBuild extends Build {
     )
   ) configs (MultiJvm)
 
+  lazy val remoteTests = Project(
+    id = "akka-remote-tests",
+    base = file("akka-remote-tests"),
+    dependencies = Seq(remote % "compile;test->test;multi-jvm->multi-jvm", actorTests % "test->test", testkit % "test->test"),
+    settings = defaultSettings ++ multiJvmSettings ++ schoirSettings ++ Seq(
+      // disable parallel tests
+      parallelExecution in Test := false,
+      extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src =>
+        (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq
+      },
+      scalatestOptions in MultiJvm := Seq("-r", "org.scalatest.akka.QuietReporter"),
+      jvmOptions in MultiJvm := {
+        if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil
+      },
+      test in Test <<= (test in Test) dependsOn (test in MultiJvm)
+    )
+  ) configs (MultiJvm)
+
   lazy val cluster = Project(
     id = "akka-cluster",
     base = file("akka-cluster"),
@@ -438,7 +456,7 @@ object Dependencies {
     Test.zookeeper, Test.log4j // needed for ZkBarrier in multi-jvm tests
   )
 
- val cluster = Seq(Test.junit, Test.scalatest)
+  val cluster = Seq(Test.junit, Test.scalatest)
 
   val slf4j = Seq(slf4jApi, Test.logback)
 

From 160aa730667a82e4acd1eb6af5c54eb85ee6adc1 Mon Sep 17 00:00:00 2001
From: Roland 
Date: Thu, 10 May 2012 21:08:06 +0200
Subject: [PATCH 015/538] scaladoc for TestConductor

---
 .../akka/remote/testconductor/Conductor.scala | 287 ++++++++++++++----
 .../akka/remote/testconductor/Extension.scala |  16 +
 .../akka/remote/testconductor/Features.scala  |  89 ------
 .../akka/remote/testconductor/Player.scala    |  34 ++-
 4 files changed, 283 insertions(+), 143 deletions(-)
 delete mode 100644 akka-remote-tests/src/main/scala/akka/remote/testconductor/Features.scala

diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
index b25bd1838c..347973a255 100644
--- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
+++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
@@ -25,8 +25,26 @@ import akka.dispatch.Future
 import akka.actor.OneForOneStrategy
 import akka.actor.SupervisorStrategy
 import java.util.concurrent.ConcurrentHashMap
+import akka.actor.Status
 
-trait Conductor extends RunControl with FailureInject { this: TestConductorExt ⇒
+sealed trait Direction
+
+object Direction {
+  case object Send extends Direction
+  case object Receive extends Direction
+  case object Both extends Direction
+}
+
+/**
+ * The conductor is the one orchestrating the test: it governs the
+ * [[akka.remote.testconductor.Controller]]’s port to which all
+ * [[akka.remote.testconductor.Player]]s connect, it issues commands to their
+ * [[akka.remote.testconductor.NetworkFailureInjector]] and provides support
+ * for barriers using the [[akka.remote.testconductor.BarrierCoordinator]].
+ * All of this is bundled inside the [[akka.remote.testconductor.TestConductorExt]]
+ * extension.
+ */
+trait Conductor { this: TestConductorExt ⇒
 
   import Controller._
 
@@ -36,60 +54,154 @@ trait Conductor extends RunControl with FailureInject { this: TestConductorExt 
     case x    ⇒ x
   }
 
-  override def startController(participants: Int): Future[Int] = {
+  /**
+   * Start the [[akka.remote.testconductor.Controller]], which in turn will
+   * bind to a TCP port as specified in the `akka.testconductor.port` config
+   * property, where 0 denotes automatic allocation. Since the latter is
+   * actually preferred, a `Future[Int]` is returned which will be completed
+   * with the port number actually chosen, so that this can then be communicated
+   * to the players for their proper start-up.
+   *
+   * This method also invokes [[akka.remote.testconductor.Player]].startClient,
+   * since it is expected that the conductor participates in barriers for
+   * overall coordination. The returned Future will only be completed once the
+   * client’s start-up finishes, which in fact waits for all other players to
+   * connect.
+   *
+   * @param participants gives the number of participants which shall connect
+   * before any of their startClient() operations complete.
+   */
+  def startController(participants: Int): Future[Int] = {
     if (_controller ne null) throw new RuntimeException("TestConductorServer was already started")
     _controller = system.actorOf(Props(new Controller(participants)), "controller")
     import Settings.BarrierTimeout
     controller ? GetPort flatMap { case port: Int ⇒ startClient(port) map (_ ⇒ port) }
   }
 
-  override def port: Future[Int] = {
+  /**
+   * Obtain the port to which the controller’s socket is actually bound. This
+   * will deviate from the configuration in `akka.testconductor.port` in case
+   * that was given as zero.
+   */
+  def port: Future[Int] = {
     import Settings.QueryTimeout
     controller ? GetPort mapTo
   }
 
-  override def throttle(node: String, target: String, direction: Direction, rateMBit: Double): Future[Done] = {
+  /**
+   * Make the remoting pipeline on the node throttle data sent to or received
+   * from the given remote peer. Throttling works by delaying packet submission
+   * within the netty pipeline until the packet would have been completely sent
+   * according to the given rate, the previous packet completion and the current
+   * packet length. In case of large packets they are split up if the calculated
+   * send pause would exceed `akka.testconductor.packet-split-threshold`
+   * (roughly). All of this uses the system’s HashedWheelTimer, which is not
+   * terribly precise and will execute tasks later than they are schedule (even
+   * on average), but that is countered by using the actual execution time for
+   * determining how much to send, leading to the correct output rate, but with
+   * increased latency.
+   * 
+   * @param node is the symbolic name of the node which is to be affected
+   * @param target is the symbolic name of the other node to which connectivity shall be throttled
+   * @param direction can be either `Direction.Send`, `Direction.Receive` or `Direction.Both`
+   * @param rateMBit is the maximum data rate in MBit
+   */
+  def throttle(node: String, target: String, direction: Direction, rateMBit: Double): Future[Done] = {
     import Settings.QueryTimeout
     controller ? Throttle(node, target, direction, rateMBit.toFloat) mapTo
   }
 
-  override def blackhole(node: String, target: String, direction: Direction): Future[Done] = {
+  /**
+   * Switch the Netty pipeline of the remote support into blackhole mode for
+   * sending and/or receiving: it will just drop all messages right before
+   * submitting them to the Socket or right after receiving them from the
+   * Socket.
+   * 
+   * @param node is the symbolic name of the node which is to be affected
+   * @param target is the symbolic name of the other node to which connectivity shall be impeded
+   * @param direction can be either `Direction.Send`, `Direction.Receive` or `Direction.Both`
+   */
+  def blackhole(node: String, target: String, direction: Direction): Future[Done] = {
     import Settings.QueryTimeout
     controller ? Throttle(node, target, direction, 0f) mapTo
   }
 
-  override def disconnect(node: String, target: String): Future[Done] = {
+  /**
+   * Tell the remote support to shutdown the connection to the given remote
+   * peer. It works regardless of whether the recipient was initiator or
+   * responder.
+   * 
+   * @param node is the symbolic name of the node which is to be affected
+   * @param target is the symbolic name of the other node to which connectivity shall be impeded
+   */
+  def disconnect(node: String, target: String): Future[Done] = {
     import Settings.QueryTimeout
     controller ? Disconnect(node, target, false) mapTo
   }
 
-  override def abort(node: String, target: String): Future[Done] = {
+  /**
+   * Tell the remote support to TCP_RESET the connection to the given remote
+   * peer. It works regardless of whether the recipient was initiator or
+   * responder.
+   * 
+   * @param node is the symbolic name of the node which is to be affected
+   * @param target is the symbolic name of the other node to which connectivity shall be impeded
+   */
+  def abort(node: String, target: String): Future[Done] = {
     import Settings.QueryTimeout
     controller ? Disconnect(node, target, true) mapTo
   }
 
-  override def shutdown(node: String, exitValue: Int): Future[Done] = {
+  /**
+   * Tell the remote node to shut itself down using System.exit with the given
+   * exitValue.
+   * 
+   * @param node is the symbolic name of the node which is to be affected
+   * @param exitValue is the return code which shall be given to System.exit
+   */
+  def shutdown(node: String, exitValue: Int): Future[Done] = {
     import Settings.QueryTimeout
     controller ? Terminate(node, exitValue) mapTo
   }
 
-  override def kill(node: String): Future[Done] = {
+  /**
+   * Tell the SBT plugin to forcibly terminate the given remote node using Process.destroy.
+   * 
+   * @param node is the symbolic name of the node which is to be affected
+   */
+  def kill(node: String): Future[Done] = {
     import Settings.QueryTimeout
     controller ? Terminate(node, -1) mapTo
   }
 
-  override def getNodes: Future[List[String]] = {
+  /**
+   * Obtain the list of remote host names currently registered.
+   */
+  def getNodes: Future[List[String]] = {
     import Settings.QueryTimeout
     controller ? GetNodes mapTo
   }
 
-  override def removeNode(node: String): Future[Done] = {
+  /**
+   * Remove a remote host from the list, so that the remaining nodes may still
+   * pass subsequent barriers. This must be done before the client connection
+   * breaks down in order to affect an “orderly” removal (i.e. without failing
+   * present and future barriers).
+   * 
+   * @param node is the symbolic name of the node which is to be removed
+   */
+  def removeNode(node: String): Future[Done] = {
     import Settings.QueryTimeout
     controller ? Remove(node) mapTo
   }
 
 }
 
+/**
+ * This handler is installed at the end of the controller’s netty pipeline. Its only
+ * purpose is to dispatch incoming messages to the right ServerFSM actor. There is
+ * one shared instance of this class for all connections accepted by one Controller.
+ */
 class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler {
 
   val clients = new ConcurrentHashMap[Channel, ActorRef]()
@@ -105,7 +217,7 @@ class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAd
     val channel = event.getChannel
     log.debug("disconnect from {}", getAddrString(channel))
     val fsm = clients.get(channel)
-    fsm ! PoisonPill
+    fsm ! Controller.ClientDisconnected
     clients.remove(channel)
   }
 
@@ -129,6 +241,19 @@ object ServerFSM {
   case object Ready extends State
 }
 
+/**
+ * The server part of each client connection is represented by a ServerFSM.
+ * The Initial state handles reception of the new client’s
+ * [[akka.remote.testconductor.Hello]] message (which is needed for all subsequent
+ * node name translations).
+ *
+ * In the Ready state, messages from the client are forwarded to the controller
+ * and [[akka.remote.testconductor.Send]] requests are sent, but the latter is
+ * treated specially: all client operations are to be confirmed by a
+ * [[akka.remote.testconductor.Done]] message, and there can be only one such
+ * request outstanding at a given time (i.e. a Send fails if the previous has
+ * not yet been acknowledged).
+ */
 class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor with LoggingFSM[ServerFSM.State, Option[ActorRef]] {
   import ServerFSM._
   import akka.actor.FSM._
@@ -136,9 +261,20 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi
 
   startWith(Initial, None)
 
+  whenUnhandled {
+    case Event(ClientDisconnected, Some(s)) ⇒
+      s ! Status.Failure(new RuntimeException("client disconnected in state " + stateName + ": " + channel))
+      stop()
+    case Event(ClientDisconnected, None) ⇒ stop()
+  }
+
+  onTermination {
+    case _ ⇒ controller ! ClientDisconnected
+  }
+
   when(Initial, stateTimeout = 10 seconds) {
     case Event(Hello(name, addr), _) ⇒
-      controller ! ClientConnected(name, addr)
+      controller ! NodeInfo(name, addr, self)
       goto(Ready)
     case Event(x: NetworkOp, _) ⇒
       log.warning("client {} sent no Hello in first message (instead {}), disconnecting", getAddrString(channel), x)
@@ -162,7 +298,6 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi
       stay using None
     case Event(msg: NetworkOp, _) ⇒
       log.warning("client {} sent unsupported message {}", getAddrString(channel), msg)
-      channel.close()
       stop()
     case Event(Send(msg @ (_: EnterBarrier | _: Done)), _) ⇒
       channel.write(msg)
@@ -176,10 +311,13 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi
   }
 
   initialize
+
+  onTermination {
+    case _ ⇒ channel.close()
+  }
 }
 
 object Controller {
-  case class ClientConnected(name: String, address: Address)
   case class ClientDisconnected(name: String)
   case object GetNodes
   case object GetPort
@@ -187,6 +325,11 @@ object Controller {
   case class NodeInfo(name: String, addr: Address, fsm: ActorRef)
 }
 
+/**
+ * This controls test execution by managing barriers (delegated to
+ * [[akka.remote.testconductor.BarrierCoordinator]], its child) and allowing
+ * network and other failures to be injected at the test nodes.
+ */
 class Controller(_participants: Int) extends Actor {
   import Controller._
 
@@ -199,8 +342,8 @@ class Controller(_participants: Int) extends Actor {
   override def supervisorStrategy = OneForOneStrategy() {
     case e: BarrierCoordinator.BarrierTimeoutException ⇒ SupervisorStrategy.Resume
     case e: BarrierCoordinator.WrongBarrierException ⇒
-      // I think we are lacking a means of communication here: this is not correct!
-      for (i ← 1 to e.data.clients) barrier ! ClientConnected
+      for (NodeInfo(c, _, _) ← e.data.clients; info ← nodes get c)
+        barrier ! NodeInfo(c, info.addr, info.fsm)
       for (c ← e.data.arrived) c ! BarrierFailed(e.barrier)
       SupervisorStrategy.Restart
   }
@@ -209,17 +352,17 @@ class Controller(_participants: Int) extends Actor {
   var nodes = Map[String, NodeInfo]()
 
   override def receive = LoggingReceive {
-    case ClientConnected(name, addr) ⇒
-      nodes += name -> NodeInfo(name, addr, sender)
-      barrier forward ClientConnected
+    case c @ NodeInfo(name, addr, fsm) ⇒
+      nodes += name -> c
+      barrier forward c
       if (initialParticipants <= 0) sender ! Done
       else if (nodes.size == initialParticipants) {
         for (NodeInfo(_, _, client) ← nodes.values) client ! Send(Done)
         initialParticipants = 0
       }
-    case ClientDisconnected(name) ⇒
+    case c @ ClientDisconnected(name) ⇒
       nodes -= name
-      barrier forward ClientDisconnected
+      barrier forward c
     case e @ EnterBarrier(name) ⇒
       barrier forward e
     case Throttle(node, target, direction, rateMBit) ⇒
@@ -234,9 +377,9 @@ class Controller(_participants: Int) extends Actor {
       } else {
         nodes(node).fsm forward Send(TerminateMsg(exitValueOrKill))
       }
-    // TODO: properly remove node from BarrierCoordinator
-    //    case Remove(node) =>
-    //      nodes -= node
+    case Remove(node) ⇒
+      nodes -= node
+      barrier ! BarrierCoordinator.RemoveClient(node)
     case GetNodes ⇒ sender ! nodes.keys
     case GetPort ⇒
       sender ! (connection.getLocalAddress match {
@@ -250,27 +393,60 @@ object BarrierCoordinator {
   case object Idle extends State
   case object Waiting extends State
 
-  case class Data(clients: Int, barrier: String, arrived: List[ActorRef])
+  case class RemoveClient(name: String)
+
+  case class Data(clients: Set[Controller.NodeInfo], barrier: String, arrived: List[ActorRef])
   class BarrierTimeoutException(val data: Data) extends RuntimeException(data.barrier) with NoStackTrace
   class WrongBarrierException(val barrier: String, val client: ActorRef, val data: Data) extends RuntimeException(barrier) with NoStackTrace
 }
 
+/**
+ * This barrier coordinator gets informed of players connecting (NodeInfo),
+ * players being deliberately removed (RemoveClient) or failing (ClientDisconnected)
+ * by the controller. It also receives EnterBarrier requests, where upon the first
+ * one received the name of the current barrier is set and all other known clients
+ * are expected to join the barrier, whereupon all of the will be sent the successful
+ * EnterBarrier return message. In case of planned removals, this may just happen
+ * earlier, in case of failures the current barrier (and all subsequent ones) will
+ * be failed by sending BarrierFailed responses.
+ */
 class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, BarrierCoordinator.Data] {
   import BarrierCoordinator._
   import akka.actor.FSM._
   import Controller._
 
-  startWith(Idle, Data(0, "", Nil))
+  // this shall be set to false if all subsequent barriers shall fail
+  var failed = false
+  override def preRestart(reason: Throwable, message: Option[Any]) {}
+  override def postRestart(reason: Throwable) { failed = true }
+
+  // TODO what happens with the other waiting players in case of a test failure?
+
+  startWith(Idle, Data(Set(), "", Nil))
+
+  whenUnhandled {
+    case Event(n: NodeInfo, d @ Data(clients, _, _)) ⇒
+      stay using d.copy(clients = clients + n)
+  }
 
   when(Idle) {
-    case Event(EnterBarrier(name), Data(num, _, _)) ⇒
-      if (num == 0) throw new IllegalStateException("no client expected yet")
-      goto(Waiting) using Data(num, name, sender :: Nil)
-    case Event(ClientConnected, d @ Data(num, _, _)) ⇒
-      stay using d.copy(clients = num + 1)
-    case Event(ClientDisconnected, d @ Data(num, _, _)) ⇒
-      if (num == 0) throw new IllegalStateException("no client to disconnect")
-      stay using d.copy(clients = num - 1)
+    case Event(EnterBarrier(name), d @ Data(clients, _, _)) ⇒
+      if (clients.isEmpty) throw new IllegalStateException("no client expected yet")
+      if (failed)
+        stay replying BarrierFailed(name)
+      else
+        goto(Waiting) using d.copy(barrier = name, arrived = sender :: Nil)
+    case Event(ClientDisconnected(name), d @ Data(clients, _, _)) ⇒
+      if (clients.isEmpty) throw new IllegalStateException("no client to disconnect")
+      (clients filterNot (_.name == name)) match {
+        case `clients` ⇒ stay
+        case c ⇒
+          failed = true
+          stay using d.copy(clients = c)
+      }
+    case Event(RemoveClient(name), d @ Data(clients, _, _)) ⇒
+      if (clients.isEmpty) throw new IllegalStateException("no client to remove")
+      stay using d.copy(clients = clients filterNot (_.name == name))
   }
 
   onTransition {
@@ -279,30 +455,37 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State,
   }
 
   when(Waiting) {
-    case Event(e @ EnterBarrier(name), d @ Data(num, barrier, arrived)) ⇒
+    case Event(e @ EnterBarrier(name), d @ Data(clients, barrier, arrived)) ⇒
       if (name != barrier) throw new WrongBarrierException(barrier, sender, d)
       val together = sender :: arrived
-      if (together.size == num) {
-        together foreach (_ ! Send(e))
-        goto(Idle) using Data(num, "", Nil)
-      } else {
-        stay using d.copy(arrived = together)
-      }
-    case Event(ClientConnected, d @ Data(num, _, _)) ⇒
-      stay using d.copy(clients = num + 1)
-    case Event(ClientDisconnected, d @ Data(num, barrier, arrived)) ⇒
-      val expected = num - 1
-      if (arrived.size == expected) {
-        val e = EnterBarrier(barrier)
-        sender :: arrived foreach (_ ! Send(e))
-        goto(Idle) using Data(expected, "", Nil)
-      } else {
-        stay using d.copy(clients = expected)
+      handleBarrier(d.copy(arrived = together))
+    case Event(RemoveClient(name), d @ Data(clients, barrier, arrived)) ⇒
+      val newClients = clients filterNot (_.name == name)
+      val newArrived = arrived filterNot (_ == name)
+      handleBarrier(d.copy(clients = newClients, arrived = newArrived))
+    case Event(ClientDisconnected(name), d @ Data(clients, barrier, arrived)) ⇒
+      (clients filterNot (_.name == name)) match {
+        case `clients` ⇒ stay
+        case c ⇒
+          val f = BarrierFailed(barrier)
+          arrived foreach (_ ! Send(f))
+          failed = true
+          goto(Idle) using Data(c, "", Nil)
       }
     case Event(StateTimeout, data) ⇒
       throw new BarrierTimeoutException(data)
   }
 
   initialize
+
+  def handleBarrier(data: Data): State =
+    if ((data.clients.map(_.fsm) -- data.arrived).isEmpty) {
+      val e = EnterBarrier(data.barrier)
+      data.arrived foreach (_ ! Send(e))
+      goto(Idle) using data.copy(barrier = "", arrived = Nil)
+    } else {
+      stay using data
+    }
+
 }
 
diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala
index 97f5dd7295..ff1d77fb9d 100644
--- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala
+++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala
@@ -11,12 +11,28 @@ import akka.actor.ActorRef
 import java.util.concurrent.ConcurrentHashMap
 import akka.actor.Address
 
+/**
+ * Access to the [[akka.remote.testconductor.TestConductorExt]] extension:
+ * 
+ * {{{
+ * val tc = TestConductor(system)
+ * tc.startController(numPlayers)
+ * // OR
+ * tc.startClient(conductorPort)
+ * }}}
+ */
 object TestConductor extends ExtensionKey[TestConductorExt] {
 
   def apply()(implicit ctx: ActorContext): TestConductorExt = apply(ctx.system)
 
 }
 
+/**
+ * This binds together the [[akka.remote.testconductor.Conductor]] and
+ * [[akka.remote.testconductor.Player]] roles inside an Akka
+ * [[akka.actor.Extension]]. Please follow the aforementioned links for
+ * more information.
+ */
 class TestConductorExt(val system: ExtendedActorSystem) extends Extension with Conductor with Player {
 
   object Settings {
diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Features.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Features.scala
deleted file mode 100644
index 336d04c368..0000000000
--- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Features.scala
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- *  Copyright (C) 2009-2011 Typesafe Inc. 
- */
-package akka.remote.testconductor
-
-import akka.dispatch.Future
-
-trait BarrierSync {
-  /**
-   * Enter all given barriers in the order in which they were given.
-   */
-  def enter(name: String*): Unit
-}
-
-sealed trait Direction
-
-object Direction {
-  case object Send extends Direction
-  case object Receive extends Direction
-  case object Both extends Direction
-}
-
-trait FailureInject {
-
-  /**
-   * Make the remoting pipeline on the node throttle data sent to or received
-   * from the given remote peer.
-   */
-  def throttle(node: String, target: String, direction: Direction, rateMBit: Double): Future[Done]
-
-  /**
-   * Switch the Netty pipeline of the remote support into blackhole mode for
-   * sending and/or receiving: it will just drop all messages right before
-   * submitting them to the Socket or right after receiving them from the
-   * Socket.
-   */
-  def blackhole(node: String, target: String, direction: Direction): Future[Done]
-
-  /**
-   * Tell the remote support to shutdown the connection to the given remote
-   * peer. It works regardless of whether the recipient was initiator or
-   * responder.
-   */
-  def disconnect(node: String, target: String): Future[Done]
-
-  /**
-   * Tell the remote support to TCP_RESET the connection to the given remote
-   * peer. It works regardless of whether the recipient was initiator or
-   * responder.
-   */
-  def abort(node: String, target: String): Future[Done]
-
-}
-
-trait RunControl {
-
-  /**
-   * Start the server port, returns the port number.
-   */
-  def startController(participants: Int): Future[Int]
-
-  /**
-   * Get the actual port used by the server.
-   */
-  def port: Future[Int]
-
-  /**
-   * Tell the remote node to shut itself down using System.exit with the given
-   * exitValue.
-   */
-  def shutdown(node: String, exitValue: Int): Future[Done]
-
-  /**
-   * Tell the SBT plugin to forcibly terminate the given remote node using Process.destroy.
-   */
-  def kill(node: String): Future[Done]
-
-  /**
-   * Obtain the list of remote host names currently registered.
-   */
-  def getNodes: Future[List[String]]
-
-  /**
-   * Remove a remote host from the list, so that the remaining nodes may still
-   * pass subsequent barriers.
-   */
-  def removeNode(node: String): Future[Done]
-
-}
diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala
index 6e78610cfb..38d0f6ef34 100644
--- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala
+++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala
@@ -21,7 +21,13 @@ import akka.actor.PoisonPill
 import akka.event.Logging
 import akka.dispatch.Future
 
-trait Player extends BarrierSync { this: TestConductorExt ⇒
+/**
+ * The Player is the client component of the
+ * [[akka.remote.testconductor.TestConductorExt]] extension. It registers with
+ * the [[akka.remote.testconductor.Conductor]]’s [[akka.remote.testconductor.Controller]]
+ * in order to participate in barriers and enable network failure injection.
+ */
+trait Player { this: TestConductorExt ⇒
 
   private var _client: ActorRef = _
   private def client = _client match {
@@ -29,6 +35,14 @@ trait Player extends BarrierSync { this: TestConductorExt ⇒
     case x    ⇒ x
   }
 
+  /**
+   * Connect to the conductor on the given port (the host is taken from setting
+   * `akka.testconductor.host`). The connection is made asynchronously, but you
+   * should await completion of the returned Future because that implies that
+   * all expected participants of this test have successfully connected (i.e.
+   * this is a first barrier in itself). The number of expected participants is
+   * set in [[akka.remote.testconductor.Conductor]]`.startController()`.
+   */
   def startClient(port: Int): Future[Done] = {
     import ClientFSM._
     import akka.actor.FSM._
@@ -51,7 +65,11 @@ trait Player extends BarrierSync { this: TestConductorExt ⇒
     a ? client mapTo
   }
 
-  override def enter(name: String*) {
+  /**
+   * Enter the named barriers, one after the other, in the order given. Will
+   * throw an exception in case of timeouts or other errors.
+   */
+  def enter(name: String*) {
     system.log.debug("entering barriers " + name.mkString("(", ", ", ")"))
     name foreach { b ⇒
       import Settings.BarrierTimeout
@@ -73,6 +91,15 @@ object ClientFSM {
   case object Disconnected
 }
 
+/**
+ * This is the controlling entity on the [[akka.remote.testconductor.Player]]
+ * side: in a first step it registers itself with a symbolic name and its remote
+ * address at the [[akka.remote.testconductor.Controller]], then waits for the
+ * `Done` message which signals that all other expected test participants have
+ * done the same. After that, it will pass barrier requests to and from the
+ * coordinator and react to the [[akka.remote.testconductor.Conductor]]’s
+ * requests for failure injection.
+ */
 class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] {
   import ClientFSM._
 
@@ -162,6 +189,9 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client
 
 }
 
+/**
+ * This handler only forwards messages received from the conductor to the [[akka.remote.testconductor.ClientFSM]].
+ */
 class PlayerHandler(fsm: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler {
 
   import ClientFSM._

From 439f653427d4ad26d504b2b35633b2de9d421d8b Mon Sep 17 00:00:00 2001
From: Roland 
Date: Fri, 11 May 2012 11:31:44 +0200
Subject: [PATCH 016/538] add some tests for BarrierCoordinator and Controller

---
 .../akka/remote/testconductor/Conductor.scala | 129 +++--
 .../akka/remote/testconductor/Extension.scala |   2 +-
 .../akka/remote/testconductor/Player.scala    |  27 +-
 .../remote/testconductor/BarrierSpec.scala    | 465 ++++++++++++++++++
 .../remote/testconductor/ControllerSpec.scala |  38 ++
 5 files changed, 599 insertions(+), 62 deletions(-)
 create mode 100644 akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala
 create mode 100644 akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala

diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
index 347973a255..09a6faeeb0 100644
--- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
+++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala
@@ -1,5 +1,5 @@
 /**
- *  Copyright (C) 2009-2011 Typesafe Inc. 
+ *  Copyright (C) 2009-2012 Typesafe Inc. 
  */
 package akka.remote.testconductor
 
@@ -100,7 +100,7 @@ trait Conductor { this: TestConductorExt ⇒
    * on average), but that is countered by using the actual execution time for
    * determining how much to send, leading to the correct output rate, but with
    * increased latency.
-   * 
+   *
    * @param node is the symbolic name of the node which is to be affected
    * @param target is the symbolic name of the other node to which connectivity shall be throttled
    * @param direction can be either `Direction.Send`, `Direction.Receive` or `Direction.Both`
@@ -116,7 +116,7 @@ trait Conductor { this: TestConductorExt ⇒
    * sending and/or receiving: it will just drop all messages right before
    * submitting them to the Socket or right after receiving them from the
    * Socket.
-   * 
+   *
    * @param node is the symbolic name of the node which is to be affected
    * @param target is the symbolic name of the other node to which connectivity shall be impeded
    * @param direction can be either `Direction.Send`, `Direction.Receive` or `Direction.Both`
@@ -130,7 +130,7 @@ trait Conductor { this: TestConductorExt ⇒
    * Tell the remote support to shutdown the connection to the given remote
    * peer. It works regardless of whether the recipient was initiator or
    * responder.
-   * 
+   *
    * @param node is the symbolic name of the node which is to be affected
    * @param target is the symbolic name of the other node to which connectivity shall be impeded
    */
@@ -143,7 +143,7 @@ trait Conductor { this: TestConductorExt ⇒
    * Tell the remote support to TCP_RESET the connection to the given remote
    * peer. It works regardless of whether the recipient was initiator or
    * responder.
-   * 
+   *
    * @param node is the symbolic name of the node which is to be affected
    * @param target is the symbolic name of the other node to which connectivity shall be impeded
    */
@@ -155,7 +155,7 @@ trait Conductor { this: TestConductorExt ⇒
   /**
    * Tell the remote node to shut itself down using System.exit with the given
    * exitValue.
-   * 
+   *
    * @param node is the symbolic name of the node which is to be affected
    * @param exitValue is the return code which shall be given to System.exit
    */
@@ -166,7 +166,7 @@ trait Conductor { this: TestConductorExt ⇒
 
   /**
    * Tell the SBT plugin to forcibly terminate the given remote node using Process.destroy.
-   * 
+   *
    * @param node is the symbolic name of the node which is to be affected
    */
   def kill(node: String): Future[Done] = {
@@ -177,7 +177,7 @@ trait Conductor { this: TestConductorExt ⇒
   /**
    * Obtain the list of remote host names currently registered.
    */
-  def getNodes: Future[List[String]] = {
+  def getNodes: Future[Iterable[String]] = {
     import Settings.QueryTimeout
     controller ? GetNodes mapTo
   }
@@ -187,7 +187,7 @@ trait Conductor { this: TestConductorExt ⇒
    * pass subsequent barriers. This must be done before the client connection
    * breaks down in order to affect an “orderly” removal (i.e. without failing
    * present and future barriers).
-   * 
+   *
    * @param node is the symbolic name of the node which is to be removed
    */
   def removeNode(node: String): Future[Done] = {
@@ -330,22 +330,32 @@ object Controller {
  * [[akka.remote.testconductor.BarrierCoordinator]], its child) and allowing
  * network and other failures to be injected at the test nodes.
  */
-class Controller(_participants: Int) extends Actor {
+class Controller(private var initialParticipants: Int) extends Actor {
   import Controller._
-
-  var initialParticipants = _participants
+  import BarrierCoordinator._
 
   val settings = TestConductor().Settings
   val connection = RemoteConnection(Server, settings.host, settings.port,
     new ConductorHandler(context.system, self, Logging(context.system, "ConductorHandler")))
 
+  /*
+   * Supervision of the BarrierCoordinator means to catch all his bad emotions
+   * and sometimes console him (BarrierEmpty, BarrierTimeout), sometimes tell
+   * him to hate the world (WrongBarrier, DuplicateNode, ClientLost). The latter shall help
+   * terminate broken tests as quickly as possible (i.e. without awaiting
+   * BarrierTimeouts in the players).
+   */
   override def supervisorStrategy = OneForOneStrategy() {
-    case e: BarrierCoordinator.BarrierTimeoutException ⇒ SupervisorStrategy.Resume
-    case e: BarrierCoordinator.WrongBarrierException ⇒
-      for (NodeInfo(c, _, _) ← e.data.clients; info ← nodes get c)
-        barrier ! NodeInfo(c, info.addr, info.fsm)
-      for (c ← e.data.arrived) c ! BarrierFailed(e.barrier)
-      SupervisorStrategy.Restart
+    case BarrierTimeout(data)             ⇒ SupervisorStrategy.Resume
+    case BarrierEmpty(data, msg)          ⇒ SupervisorStrategy.Resume
+    case WrongBarrier(name, client, data) ⇒ client ! Send(BarrierFailed(name)); failBarrier(data)
+    case ClientLost(data, node)           ⇒ failBarrier(data)
+    case DuplicateNode(data, node)        ⇒ failBarrier(data)
+  }
+
+  def failBarrier(data: Data): SupervisorStrategy.Directive = {
+    for (c ← data.arrived) c ! Send(BarrierFailed(data.barrier))
+    SupervisorStrategy.Restart
   }
 
   val barrier = context.actorOf(Props[BarrierCoordinator], "barriers")
@@ -353,12 +363,20 @@ class Controller(_participants: Int) extends Actor {
 
   override def receive = LoggingReceive {
     case c @ NodeInfo(name, addr, fsm) ⇒
-      nodes += name -> c
       barrier forward c
-      if (initialParticipants <= 0) sender ! Done
-      else if (nodes.size == initialParticipants) {
-        for (NodeInfo(_, _, client) ← nodes.values) client ! Send(Done)
-        initialParticipants = 0
+      if (nodes contains name) {
+        if (initialParticipants > 0) {
+          for (NodeInfo(_, _, client) ← nodes.values) client ! Send(BarrierFailed("initial startup"))
+          initialParticipants = 0
+        }
+        fsm ! Send(BarrierFailed("initial startup"))
+      } else {
+        nodes += name -> c
+        if (initialParticipants <= 0) fsm ! Send(Done)
+        else if (nodes.size == initialParticipants) {
+          for (NodeInfo(_, _, client) ← nodes.values) client ! Send(Done)
+          initialParticipants = 0
+        }
       }
     case c @ ClientDisconnected(name) ⇒
       nodes -= name
@@ -396,8 +414,16 @@ object BarrierCoordinator {
   case class RemoveClient(name: String)
 
   case class Data(clients: Set[Controller.NodeInfo], barrier: String, arrived: List[ActorRef])
-  class BarrierTimeoutException(val data: Data) extends RuntimeException(data.barrier) with NoStackTrace
-  class WrongBarrierException(val barrier: String, val client: ActorRef, val data: Data) extends RuntimeException(barrier) with NoStackTrace
+
+  trait Printer { this: Product with Throwable with NoStackTrace ⇒
+    override def toString = productPrefix + productIterator.mkString("(", ", ", ")")
+  }
+
+  case class BarrierTimeout(data: Data) extends RuntimeException(data.barrier) with NoStackTrace with Printer
+  case class DuplicateNode(data: Data, node: Controller.NodeInfo) extends RuntimeException with NoStackTrace with Printer
+  case class WrongBarrier(barrier: String, client: ActorRef, data: Data) extends RuntimeException(barrier) with NoStackTrace with Printer
+  case class BarrierEmpty(data: Data, msg: String) extends RuntimeException(msg) with NoStackTrace with Printer
+  case class ClientLost(data: Data, client: String) extends RuntimeException with NoStackTrace with Printer
 }
 
 /**
@@ -426,26 +452,28 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State,
 
   whenUnhandled {
     case Event(n: NodeInfo, d @ Data(clients, _, _)) ⇒
+      if (clients.find(_.name == n.name).isDefined) throw new DuplicateNode(d, n)
       stay using d.copy(clients = clients + n)
+    case Event(ClientDisconnected(name), d @ Data(clients, _, arrived)) ⇒
+      if (clients.isEmpty) throw BarrierEmpty(d, "no client to disconnect")
+      (clients find (_.name == name)) match {
+        case None    ⇒ stay
+        case Some(c) ⇒ throw ClientLost(d.copy(clients = clients - c, arrived = arrived filterNot (_ == c.fsm)), name)
+      }
   }
 
   when(Idle) {
-    case Event(EnterBarrier(name), d @ Data(clients, _, _)) ⇒
-      if (clients.isEmpty) throw new IllegalStateException("no client expected yet")
+    case Event(e @ EnterBarrier(name), d @ Data(clients, _, _)) ⇒
       if (failed)
-        stay replying BarrierFailed(name)
+        stay replying Send(BarrierFailed(name))
+      else if (clients.map(_.fsm) == Set(sender))
+        stay replying Send(e)
+      else if (clients.find(_.fsm == sender).isEmpty)
+        stay replying Send(BarrierFailed(name))
       else
         goto(Waiting) using d.copy(barrier = name, arrived = sender :: Nil)
-    case Event(ClientDisconnected(name), d @ Data(clients, _, _)) ⇒
-      if (clients.isEmpty) throw new IllegalStateException("no client to disconnect")
-      (clients filterNot (_.name == name)) match {
-        case `clients` ⇒ stay
-        case c ⇒
-          failed = true
-          stay using d.copy(clients = c)
-      }
     case Event(RemoveClient(name), d @ Data(clients, _, _)) ⇒
-      if (clients.isEmpty) throw new IllegalStateException("no client to remove")
+      if (clients.isEmpty) throw BarrierEmpty(d, "no client to remove")
       stay using d.copy(clients = clients filterNot (_.name == name))
   }
 
@@ -456,36 +484,33 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State,
 
   when(Waiting) {
     case Event(e @ EnterBarrier(name), d @ Data(clients, barrier, arrived)) ⇒
-      if (name != barrier) throw new WrongBarrierException(barrier, sender, d)
+      if (name != barrier || clients.find(_.fsm == sender).isEmpty) throw WrongBarrier(name, sender, d)
       val together = sender :: arrived
       handleBarrier(d.copy(arrived = together))
     case Event(RemoveClient(name), d @ Data(clients, barrier, arrived)) ⇒
-      val newClients = clients filterNot (_.name == name)
-      val newArrived = arrived filterNot (_ == name)
-      handleBarrier(d.copy(clients = newClients, arrived = newArrived))
-    case Event(ClientDisconnected(name), d @ Data(clients, barrier, arrived)) ⇒
-      (clients filterNot (_.name == name)) match {
-        case `clients` ⇒ stay
-        case c ⇒
-          val f = BarrierFailed(barrier)
-          arrived foreach (_ ! Send(f))
-          failed = true
-          goto(Idle) using Data(c, "", Nil)
+      clients find (_.name == name) match {
+        case None ⇒ stay
+        case Some(client) ⇒
+          handleBarrier(d.copy(clients = clients - client, arrived = arrived filterNot (_ == client.fsm)))
       }
     case Event(StateTimeout, data) ⇒
-      throw new BarrierTimeoutException(data)
+      throw BarrierTimeout(data)
   }
 
   initialize
 
-  def handleBarrier(data: Data): State =
-    if ((data.clients.map(_.fsm) -- data.arrived).isEmpty) {
+  def handleBarrier(data: Data): State = {
+    log.debug("handleBarrier({})", data)
+    if (data.arrived.isEmpty) {
+      goto(Idle) using data.copy(barrier = "")
+    } else if ((data.clients.map(_.fsm) -- data.arrived).isEmpty) {
       val e = EnterBarrier(data.barrier)
       data.arrived foreach (_ ! Send(e))
       goto(Idle) using data.copy(barrier = "", arrived = Nil)
     } else {
       stay using data
     }
+  }
 
 }
 
diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala
index ff1d77fb9d..5d7826c60c 100644
--- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala
+++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala
@@ -13,7 +13,7 @@ import akka.actor.Address
 
 /**
  * Access to the [[akka.remote.testconductor.TestConductorExt]] extension:
- * 
+ *
  * {{{
  * val tc = TestConductor(system)
  * tc.startController(numPlayers)
diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala
index 38d0f6ef34..a82a090b23 100644
--- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala
+++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala
@@ -55,9 +55,9 @@ trait Player { this: TestConductorExt ⇒
       def receive = {
         case fsm: ActorRef                        ⇒ waiting = sender; fsm ! SubscribeTransitionCallBack(self)
         case Transition(_, Connecting, AwaitDone) ⇒ // step 1, not there yet
-        case Transition(_, AwaitDone, Connected)  ⇒ waiting ! Done
-        case t: Transition[_]                     ⇒ waiting ! Status.Failure(new RuntimeException("unexpected transition: " + t))
-        case CurrentState(_, Connected)           ⇒ waiting ! Done
+        case Transition(_, AwaitDone, Connected)  ⇒ waiting ! Done; context stop self
+        case t: Transition[_]                     ⇒ waiting ! Status.Failure(new RuntimeException("unexpected transition: " + t)); context stop self
+        case CurrentState(_, Connected)           ⇒ waiting ! Done; context stop self
         case _: CurrentState[_]                   ⇒
       }
     }))
@@ -84,6 +84,7 @@ object ClientFSM {
   case object Connecting extends State
   case object AwaitDone extends State
   case object Connected extends State
+  case object Failed extends State
 
   case class Data(channel: Channel, barrier: Option[(String, ActorRef)])
 
@@ -116,24 +117,24 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client
       channel.write(Hello(settings.name, TestConductor().address))
       goto(AwaitDone)
     case Event(_: ConnectionFailure, _) ⇒
-      // System.exit(1)
-      stop
+      goto(Failed)
     case Event(StateTimeout, _) ⇒
       log.error("connect timeout to TestConductor")
-      // System.exit(1)
-      stop
+      goto(Failed)
   }
 
   when(AwaitDone, stateTimeout = settings.BarrierTimeout.duration) {
     case Event(Done, _) ⇒
       log.debug("received Done: starting test")
       goto(Connected)
+    case Event(msg: NetworkOp, _) ⇒
+      log.error("received {} instead of Done", msg)
+      goto(Failed)
     case Event(msg: ClientOp, _) ⇒
       stay replying Status.Failure(new IllegalStateException("not connected yet"))
     case Event(StateTimeout, _) ⇒
       log.error("connect timeout to TestConductor")
-      // System.exit(1)
-      stop
+      goto(Failed)
   }
 
   when(Connected) {
@@ -180,6 +181,14 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client
       stay // needed because Java doesn’t have Nothing
   }
 
+  when(Failed) {
+    case Event(msg: ClientOp, _) ⇒
+      stay replying Status.Failure(new RuntimeException("cannot do " + msg + " while Failed"))
+    case Event(msg: NetworkOp, _) ⇒
+      log.warning("ignoring network message {} while Failed", msg)
+      stay
+  }
+
   onTermination {
     case StopEvent(_, _, Data(channel, _)) ⇒
       channel.close()
diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala
new file mode 100644
index 0000000000..f0b668d1ed
--- /dev/null
+++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala
@@ -0,0 +1,465 @@
+/**
+ *  Copyright (C) 2009-2012 Typesafe Inc. 
+ */
+package akka.remote.testconductor
+
+import akka.testkit.AkkaSpec
+import akka.actor.Props
+import akka.actor.AddressFromURIString
+import akka.actor.ActorRef
+import akka.testkit.ImplicitSender
+import akka.actor.Actor
+import akka.actor.OneForOneStrategy
+import akka.actor.SupervisorStrategy
+import akka.testkit.EventFilter
+import akka.testkit.TestProbe
+import akka.util.duration._
+import akka.event.Logging
+import org.scalatest.BeforeAndAfterEach
+
+object BarrierSpec {
+  case class Failed(ref: ActorRef, thr: Throwable)
+  val config = """
+    akka.testconductor.barrier-timeout = 5s
+    akka.actor.provider = akka.remote.RemoteActorRefProvider
+    akka.remote.netty.port = 0
+    akka.actor.debug.fsm = on
+    akka.actor.debug.lifecycle = on
+    """
+}
+
+class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with BeforeAndAfterEach {
+
+  import BarrierSpec._
+  import Controller._
+  import BarrierCoordinator._
+
+  override def afterEach {
+    system.eventStream.setLogLevel(Logging.WarningLevel)
+  }
+
+  "A BarrierCoordinator" must {
+
+    "register clients and remove them" in {
+      val b = getBarrier()
+      b ! NodeInfo("a", AddressFromURIString("akka://sys"), system.deadLetters)
+      b ! RemoveClient("b")
+      b ! RemoveClient("a")
+      EventFilter[BarrierEmpty](occurrences = 1) intercept {
+        b ! RemoveClient("a")
+      }
+      expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "no client to remove")))
+    }
+
+    "register clients and disconnect them" in {
+      val b = getBarrier()
+      b ! NodeInfo("a", AddressFromURIString("akka://sys"), system.deadLetters)
+      b ! ClientDisconnected("b")
+      EventFilter[ClientLost](occurrences = 1) intercept {
+        b ! ClientDisconnected("a")
+      }
+      expectMsg(Failed(b, ClientLost(Data(Set(), "", Nil), "a")))
+      EventFilter[BarrierEmpty](occurrences = 1) intercept {
+        b ! ClientDisconnected("a")
+      }
+      expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "no client to disconnect")))
+    }
+
+    "fail entering barrier when nobody registered" in {
+      val b = getBarrier()
+      b ! EnterBarrier("b")
+      expectMsg(Send(BarrierFailed("b")))
+    }
+
+    "enter barrier" in {
+      val barrier = getBarrier()
+      val a, b = TestProbe()
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      a.send(barrier, EnterBarrier("bar"))
+      noMsg(a, b)
+      within(1 second) {
+        b.send(barrier, EnterBarrier("bar"))
+        a.expectMsg(Send(EnterBarrier("bar")))
+        b.expectMsg(Send(EnterBarrier("bar")))
+      }
+    }
+
+    "enter barrier with joining node" in {
+      val barrier = getBarrier()
+      val a, b, c = TestProbe()
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      a.send(barrier, EnterBarrier("bar"))
+      barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref)
+      b.send(barrier, EnterBarrier("bar"))
+      noMsg(a, b, c)
+      within(1 second) {
+        c.send(barrier, EnterBarrier("bar"))
+        a.expectMsg(Send(EnterBarrier("bar")))
+        b.expectMsg(Send(EnterBarrier("bar")))
+        c.expectMsg(Send(EnterBarrier("bar")))
+      }
+    }
+
+    "enter barrier with leaving node" in {
+      val barrier = getBarrier()
+      val a, b, c = TestProbe()
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref)
+      a.send(barrier, EnterBarrier("bar"))
+      b.send(barrier, EnterBarrier("bar"))
+      barrier ! RemoveClient("a")
+      barrier ! ClientDisconnected("a")
+      noMsg(a, b, c)
+      b.within(1 second) {
+        barrier ! RemoveClient("c")
+        b.expectMsg(Send(EnterBarrier("bar")))
+      }
+      barrier ! ClientDisconnected("c")
+      expectNoMsg(1 second)
+    }
+
+    "leave barrier when last “arrived” is removed" in {
+      val barrier = getBarrier()
+      val a, b = TestProbe()
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      a.send(barrier, EnterBarrier("bar"))
+      barrier ! RemoveClient("a")
+      b.send(barrier, EnterBarrier("foo"))
+      b.expectMsg(Send(EnterBarrier("foo")))
+    }
+
+    "fail barrier with disconnecing node" in {
+      val barrier = getBarrier()
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! nodeA
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      a.send(barrier, EnterBarrier("bar"))
+      EventFilter[ClientLost](occurrences = 1) intercept {
+        barrier ! ClientDisconnected("b")
+      }
+      expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA), "bar", a.ref :: Nil), "b")))
+    }
+
+    "fail barrier with disconnecing node who already arrived" in {
+      val barrier = getBarrier()
+      val a, b, c = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      val nodeC = NodeInfo("c", AddressFromURIString("akka://sys"), c.ref)
+      barrier ! nodeA
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! nodeC
+      a.send(barrier, EnterBarrier("bar"))
+      b.send(barrier, EnterBarrier("bar"))
+      EventFilter[ClientLost](occurrences = 1) intercept {
+        barrier ! ClientDisconnected("b")
+      }
+      expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA, nodeC), "bar", a.ref :: Nil), "b")))
+    }
+
+    "fail when entering wrong barrier" in {
+      val barrier = getBarrier()
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! nodeA
+      val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! nodeB
+      a.send(barrier, EnterBarrier("bar"))
+      EventFilter[WrongBarrier](occurrences = 1) intercept {
+        b.send(barrier, EnterBarrier("foo"))
+      }
+      expectMsg(Failed(barrier, WrongBarrier("foo", b.ref, Data(Set(nodeA, nodeB), "bar", a.ref :: Nil))))
+    }
+
+    "fail barrier after first failure" in {
+      val barrier = getBarrier()
+      val a = TestProbe()
+      EventFilter[BarrierEmpty](occurrences = 1) intercept {
+        barrier ! RemoveClient("a")
+      }
+      expectMsg(Failed(barrier, BarrierEmpty(Data(Set(), "", Nil), "no client to remove")))
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      a.send(barrier, EnterBarrier("right"))
+      a.expectMsg(Send(BarrierFailed("right")))
+    }
+
+    "fail after barrier timeout" in {
+      val barrier = getBarrier()
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! nodeA
+      barrier ! nodeB
+      a.send(barrier, EnterBarrier("right"))
+      EventFilter[BarrierTimeout](occurrences = 1) intercept {
+        expectMsg(7 seconds, Failed(barrier, BarrierTimeout(Data(Set(nodeA, nodeB), "right", a.ref :: Nil))))
+      }
+    }
+
+    "fail if a node registers twice" in {
+      val barrier = getBarrier()
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      val nodeB = NodeInfo("a", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! nodeA
+      EventFilter[DuplicateNode](occurrences = 1) intercept {
+        barrier ! nodeB
+      }
+      expectMsg(Failed(barrier, DuplicateNode(Data(Set(nodeA), "", Nil), nodeB)))
+    }
+
+    "finally have no failure messages left" in {
+      expectNoMsg(1 second)
+    }
+
+  }
+
+  "A Controller with BarrierCoordinator" must {
+
+    "register clients and remove them" in {
+      val b = getController(1)
+      b ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor)
+      expectMsg(Send(Done))
+      b ! Remove("b")
+      b ! Remove("a")
+      EventFilter[BarrierEmpty](occurrences = 1) intercept {
+        b ! Remove("a")
+      }
+    }
+
+    "register clients and disconnect them" in {
+      val b = getController(1)
+      b ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor)
+      expectMsg(Send(Done))
+      b ! ClientDisconnected("b")
+      EventFilter[ClientLost](occurrences = 1) intercept {
+        b ! ClientDisconnected("a")
+      }
+      EventFilter[BarrierEmpty](occurrences = 1) intercept {
+        b ! ClientDisconnected("a")
+      }
+    }
+
+    "fail entering barrier when nobody registered" in {
+      val b = getController(0)
+      b ! EnterBarrier("b")
+      expectMsg(Send(BarrierFailed("b")))
+    }
+
+    "enter barrier" in {
+      val barrier = getController(2)
+      val a, b = TestProbe()
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      a.expectMsg(Send(Done))
+      b.expectMsg(Send(Done))
+      a.send(barrier, EnterBarrier("bar"))
+      noMsg(a, b)
+      within(1 second) {
+        b.send(barrier, EnterBarrier("bar"))
+        a.expectMsg(Send(EnterBarrier("bar")))
+        b.expectMsg(Send(EnterBarrier("bar")))
+      }
+    }
+
+    "enter barrier with joining node" in {
+      val barrier = getController(2)
+      val a, b, c = TestProbe()
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      a.expectMsg(Send(Done))
+      b.expectMsg(Send(Done))
+      a.send(barrier, EnterBarrier("bar"))
+      barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref)
+      c.expectMsg(Send(Done))
+      b.send(barrier, EnterBarrier("bar"))
+      noMsg(a, b, c)
+      within(1 second) {
+        c.send(barrier, EnterBarrier("bar"))
+        a.expectMsg(Send(EnterBarrier("bar")))
+        b.expectMsg(Send(EnterBarrier("bar")))
+        c.expectMsg(Send(EnterBarrier("bar")))
+      }
+    }
+
+    "enter barrier with leaving node" in {
+      val barrier = getController(3)
+      val a, b, c = TestProbe()
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref)
+      a.expectMsg(Send(Done))
+      b.expectMsg(Send(Done))
+      c.expectMsg(Send(Done))
+      a.send(barrier, EnterBarrier("bar"))
+      b.send(barrier, EnterBarrier("bar"))
+      barrier ! Remove("a")
+      barrier ! ClientDisconnected("a")
+      noMsg(a, b, c)
+      b.within(1 second) {
+        barrier ! Remove("c")
+        b.expectMsg(Send(EnterBarrier("bar")))
+      }
+      barrier ! ClientDisconnected("c")
+      expectNoMsg(1 second)
+    }
+
+    "leave barrier when last “arrived” is removed" in {
+      val barrier = getController(2)
+      val a, b = TestProbe()
+      barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      a.expectMsg(Send(Done))
+      b.expectMsg(Send(Done))
+      a.send(barrier, EnterBarrier("bar"))
+      barrier ! Remove("a")
+      b.send(barrier, EnterBarrier("foo"))
+      b.expectMsg(Send(EnterBarrier("foo")))
+    }
+
+    "fail barrier with disconnecing node" in {
+      val barrier = getController(2)
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! nodeA
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      a.expectMsg(Send(Done))
+      b.expectMsg(Send(Done))
+      a.send(barrier, EnterBarrier("bar"))
+      barrier ! ClientDisconnected("unknown")
+      noMsg(a)
+      EventFilter[ClientLost](occurrences = 1) intercept {
+        barrier ! ClientDisconnected("b")
+      }
+      a.expectMsg(Send(BarrierFailed("bar")))
+    }
+
+    "fail barrier with disconnecing node who already arrived" in {
+      val barrier = getController(3)
+      val a, b, c = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      val nodeC = NodeInfo("c", AddressFromURIString("akka://sys"), c.ref)
+      barrier ! nodeA
+      barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! nodeC
+      a.expectMsg(Send(Done))
+      b.expectMsg(Send(Done))
+      c.expectMsg(Send(Done))
+      a.send(barrier, EnterBarrier("bar"))
+      b.send(barrier, EnterBarrier("bar"))
+      EventFilter[ClientLost](occurrences = 1) intercept {
+        barrier ! ClientDisconnected("b")
+      }
+      a.expectMsg(Send(BarrierFailed("bar")))
+    }
+
+    "fail when entering wrong barrier" in {
+      val barrier = getController(2)
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      barrier ! nodeA
+      val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! nodeB
+      a.expectMsg(Send(Done))
+      b.expectMsg(Send(Done))
+      a.send(barrier, EnterBarrier("bar"))
+      EventFilter[WrongBarrier](occurrences = 1) intercept {
+        b.send(barrier, EnterBarrier("foo"))
+      }
+      a.expectMsg(Send(BarrierFailed("bar")))
+      b.expectMsg(Send(BarrierFailed("foo")))
+    }
+
+    "not really fail after barrier timeout" in {
+      val barrier = getController(2)
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref)
+      barrier ! nodeA
+      barrier ! nodeB
+      a.expectMsg(Send(Done))
+      b.expectMsg(Send(Done))
+      a.send(barrier, EnterBarrier("right"))
+      EventFilter[BarrierTimeout](occurrences = 1) intercept {
+        Thread.sleep(5000)
+      }
+      b.send(barrier, EnterBarrier("right"))
+      a.expectMsg(Send(EnterBarrier("right")))
+      b.expectMsg(Send(EnterBarrier("right")))
+    }
+
+    "fail if a node registers twice" in {
+      val controller = getController(2)
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      val nodeB = NodeInfo("a", AddressFromURIString("akka://sys"), b.ref)
+      controller ! nodeA
+      EventFilter[DuplicateNode](occurrences = 1) intercept {
+        controller ! nodeB
+      }
+      a.expectMsg(Send(BarrierFailed("initial startup")))
+      b.expectMsg(Send(BarrierFailed("initial startup")))
+    }
+
+    "fail subsequent barriers if a node registers twice" in {
+      val controller = getController(1)
+      val a, b = TestProbe()
+      val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref)
+      val nodeB = NodeInfo("a", AddressFromURIString("akka://sys"), b.ref)
+      controller ! nodeA
+      a.expectMsg(Send(Done))
+      EventFilter[DuplicateNode](occurrences = 1) intercept {
+        controller ! nodeB
+        b.expectMsg(Send(BarrierFailed("initial startup")))
+      }
+      a.send(controller, EnterBarrier("x"))
+      a.expectMsg(Send(BarrierFailed("x")))
+    }
+
+    "finally have no failure messages left" in {
+      expectNoMsg(1 second)
+    }
+
+  }
+
+  private def getController(participants: Int): ActorRef = {
+    system.actorOf(Props(new Actor {
+      val controller = context.actorOf(Props(new Controller(participants)))
+      controller ! GetPort
+      override def supervisorStrategy = OneForOneStrategy() {
+        case x ⇒ testActor ! Failed(controller, x); SupervisorStrategy.Restart
+      }
+      def receive = {
+        case x: Int ⇒ testActor ! controller
+      }
+    }))
+    expectMsgType[ActorRef]
+  }
+
+  /**
+   * Produce a BarrierCoordinator which is supervised with a strategy which
+   * forwards all failures to the testActor.
+   */
+  private def getBarrier(): ActorRef = {
+    system.actorOf(Props(new Actor {
+      val barrier = context.actorOf(Props[BarrierCoordinator])
+      override def supervisorStrategy = OneForOneStrategy() {
+        case x ⇒ testActor ! Failed(barrier, x); SupervisorStrategy.Restart
+      }
+      def receive = {
+        case _ ⇒ sender ! barrier
+      }
+    })) ! ""
+    expectMsgType[ActorRef]
+  }
+
+  private def noMsg(probes: TestProbe*) {
+    expectNoMsg(1 second)
+    probes foreach (_.msgAvailable must be(false))
+  }
+
+}
\ No newline at end of file
diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala
new file mode 100644
index 0000000000..db0e3cfe69
--- /dev/null
+++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala
@@ -0,0 +1,38 @@
+/**
+ *  Copyright (C) 2009-2012 Typesafe Inc. 
+ */
+package akka.remote.testconductor
+
+import akka.testkit.AkkaSpec
+import akka.actor.Props
+import akka.testkit.ImplicitSender
+import akka.remote.testconductor.Controller.NodeInfo
+import akka.actor.AddressFromURIString
+
+object ControllerSpec {
+  val config = """
+    akka.testconductor.barrier-timeout = 5s
+    akka.actor.provider = akka.remote.RemoteActorRefProvider
+    akka.remote.netty.port = 0
+    akka.actor.debug.fsm = on
+    akka.actor.debug.lifecycle = on
+    """
+}
+
+class ControllerSpec extends AkkaSpec(ControllerSpec.config) with ImplicitSender {
+
+  "A Controller" must {
+
+    "publish its nodes" in {
+      val c = system.actorOf(Props(new Controller(1)))
+      c ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor)
+      expectMsg(Send(Done))
+      c ! NodeInfo("b", AddressFromURIString("akka://sys"), testActor)
+      expectMsg(Send(Done))
+      c ! Controller.GetNodes
+      expectMsgType[Iterable[String]].toSet must be(Set("a", "b"))
+    }
+
+  }
+
+}
\ No newline at end of file

From 096025dc6463b11e0d7bea47384196b056cec06a Mon Sep 17 00:00:00 2001
From: Viktor Klang 
Date: Mon, 14 May 2012 11:35:29 +0200
Subject: [PATCH 017/538] Replacing MapBackedSet with newSetFromMap

---
 .../netty/akka/util/HashedWheelTimer.java     |  2 +-
 .../jboss/netty/akka/util/MapBackedSet.java   | 73 -------------------
 2 files changed, 1 insertion(+), 74 deletions(-)
 delete mode 100644 akka-actor/src/main/java/org/jboss/netty/akka/util/MapBackedSet.java

diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java b/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java
index 6e54fa2233..9eba51e53f 100644
--- a/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java
+++ b/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java
@@ -155,7 +155,7 @@ public class HashedWheelTimer implements Timer {
         ticksPerWheel = normalizeTicksPerWheel(ticksPerWheel);
         Set[] wheel = new Set[ticksPerWheel];
         for (int i = 0; i < wheel.length; i ++) {
-            wheel[i] = new MapBackedSet(new ConcurrentIdentityHashMap(16, 0.95f, 4));
+            wheel[i] = Collections.newSetFromMap(new ConcurrentIdentityHashMap(16, 0.95f, 4));
         }
         return wheel;
     }
diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/MapBackedSet.java b/akka-actor/src/main/java/org/jboss/netty/akka/util/MapBackedSet.java
deleted file mode 100644
index 2bc1bc25e0..0000000000
--- a/akka-actor/src/main/java/org/jboss/netty/akka/util/MapBackedSet.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright 2009 Red Hat, Inc.
- *
- * Red Hat licenses this file to you under the Apache License, version 2.0
- * (the "License"); you may not use this file except in compliance with the
- * License.  You may obtain a copy of the License at:
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
-package org.jboss.netty.akka.util;
-
-import java.io.Serializable;
-import java.util.AbstractSet;
-import java.util.Iterator;
-import java.util.Map;
-
-/**
- * A {@link java.util.Map}-backed {@link java.util.Set}.
- * 
- * @author The Netty Project
- * @author Trustin Lee
- * 
- * @version $Rev: 2080 $, $Date: 2010-01-26 18:04:19 +0900 (Tue, 26 Jan 2010) $
- */
-final class MapBackedSet extends AbstractSet implements Serializable {
-
-  private static final long serialVersionUID = -6761513279741915432L;
-
-  private final Map map;
-
-  /**
-   * Creates a new instance which wraps the specified {@code map}.
-   */
-  MapBackedSet(Map map) {
-    this.map = map;
-  }
-
-  @Override
-  public int size() {
-    return map.size();
-  }
-
-  @Override
-  public boolean contains(Object o) {
-    return map.containsKey(o);
-  }
-
-  @Override
-  public boolean add(E o) {
-    return map.put(o, Boolean.TRUE) == null;
-  }
-
-  @Override
-  public boolean remove(Object o) {
-    return map.remove(o) != null;
-  }
-
-  @Override
-  public void clear() {
-    map.clear();
-  }
-
-  @Override
-  public Iterator iterator() {
-    return map.keySet().iterator();
-  }
-}

From 14dc08b75a2c08f83b5357bba285150dcf7896c1 Mon Sep 17 00:00:00 2001
From: Patrik Nordwall 
Date: Mon, 14 May 2012 14:26:32 +0200
Subject: [PATCH 018/538] Added failing DirectRoutedRemoteActorMultiJvmSpec.
 See #2069

---
 .../remote/SimpleRemoteMultiJvmSpec.scala     | 82 +++++++++++++++++
 .../DirectRoutedRemoteActorMultiJvmSpec.scala | 90 +++++++++++++++++++
 2 files changed, 172 insertions(+)
 create mode 100644 akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteMultiJvmSpec.scala
 create mode 100644 akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala

diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteMultiJvmSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteMultiJvmSpec.scala
new file mode 100644
index 0000000000..9209deb9a5
--- /dev/null
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteMultiJvmSpec.scala
@@ -0,0 +1,82 @@
+/**
+ *  Copyright (C) 2009-2011 Typesafe Inc. 
+ */
+package akka.remote
+
+import akka.actor.Actor
+import akka.actor.ActorRef
+import akka.actor.Props
+import akka.dispatch.Await
+import akka.pattern.ask
+import akka.remote.testconductor.TestConductor
+import akka.testkit.DefaultTimeout
+import akka.testkit.ImplicitSender
+import akka.util.Duration
+import com.typesafe.config.ConfigFactory
+
+object SimpleRemoteMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec {
+  override def NrOfNodes = 2
+
+  class SomeActor extends Actor with Serializable {
+    def receive = {
+      case "identify" ⇒ sender ! self
+    }
+  }
+
+  override def commonConfig = ConfigFactory.parseString("""
+      akka {
+        loglevel = INFO
+        actor {
+          provider = akka.remote.RemoteActorRefProvider
+          debug {
+            receive = on
+            fsm = on
+          }
+        }
+        remote {
+          transport = akka.remote.testconductor.TestConductorTransport
+          log-received-messages = on
+          log-sent-messages = on
+        }
+        testconductor {
+          host = localhost
+          port = 4712
+        }
+      }""")
+
+  def nameConfig(n: Int) = ConfigFactory.parseString("akka.testconductor.name = node" + n).withFallback(nodeConfigs(n))
+}
+
+class SimpleRemoteMultiJvmNode1 extends AkkaRemoteSpec(SimpleRemoteMultiJvmSpec.nameConfig(0)) {
+  import SimpleRemoteMultiJvmSpec._
+  val nodes = NrOfNodes
+  val tc = TestConductor(system)
+
+  "lookup remote actor" in {
+    Await.result(tc.startController(2), Duration.Inf)
+    system.actorOf(Props[SomeActor], "service-hello")
+    tc.enter("begin", "done")
+  }
+
+}
+
+class SimpleRemoteMultiJvmNode2 extends AkkaRemoteSpec(SimpleRemoteMultiJvmSpec.nameConfig(1))
+  with ImplicitSender with DefaultTimeout {
+
+  import SimpleRemoteMultiJvmSpec._
+  val nodes = NrOfNodes
+  val tc = TestConductor(system)
+
+  "lookup remote actor" in {
+    Await.result(tc.startClient(4712), Duration.Inf)
+    tc.enter("begin")
+    log.info("### begin ok")
+    val actor = system.actorFor("akka://" + akkaSpec(0) + "/user/service-hello")
+    log.info("### actor lookup " + akkaSpec(0) + "/service-hello")
+    actor.isInstanceOf[RemoteActorRef] must be(true)
+    Await.result(actor ? "identify", timeout.duration).asInstanceOf[ActorRef].path.address.hostPort must equal(akkaSpec(0))
+    log.info("### actor ok")
+    tc.enter("done")
+  }
+
+}
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala
new file mode 100644
index 0000000000..d44beff605
--- /dev/null
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala
@@ -0,0 +1,90 @@
+/**
+ *  Copyright (C) 2009-2011 Typesafe Inc. 
+ */
+package akka.remote.router
+
+import akka.actor.{ Actor, ActorRef, Props }
+import akka.remote.AkkaRemoteSpec
+import akka.remote.AbstractRemoteActorMultiJvmSpec
+import akka.remote.RemoteActorRef
+import akka.remote.testconductor.TestConductor
+import akka.testkit._
+import akka.dispatch.Await
+import akka.pattern.ask
+import akka.util.Duration
+
+object DirectRoutedRemoteActorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec {
+  override def NrOfNodes = 2
+
+  class SomeActor extends Actor with Serializable {
+    def receive = {
+      case "identify" ⇒ sender ! self
+    }
+  }
+
+  import com.typesafe.config.ConfigFactory
+  override def commonConfig = ConfigFactory.parseString("""
+      akka {
+        loglevel = INFO
+        actor {
+          provider = akka.remote.RemoteActorRefProvider
+          deployment {
+            /service-hello.remote = %s
+          }
+          debug {
+            receive = on
+            fsm = on
+          }
+        }
+        remote {
+          transport = akka.remote.testconductor.TestConductorTransport
+          log-received-messages = on
+          log-sent-messages = on
+        }
+        testconductor {
+          host = localhost
+          port = 4712
+        }
+      }""" format akkaURIs(1))
+
+  def nameConfig(n: Int) = ConfigFactory.parseString("akka.testconductor.name = node" + n).withFallback(nodeConfigs(n))
+}
+
+class DirectRoutedRemoteActorMultiJvmNode1 extends AkkaRemoteSpec(DirectRoutedRemoteActorMultiJvmSpec.nameConfig(0)) {
+  import DirectRoutedRemoteActorMultiJvmSpec._
+  val nodes = NrOfNodes
+  val tc = TestConductor(system)
+
+  "A new remote actor configured with a Direct router" must {
+    "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in {
+      Await.result(tc.startController(2), Duration.Inf)
+      tc.enter("begin", "done")
+    }
+  }
+
+}
+
+class DirectRoutedRemoteActorMultiJvmNode2 extends AkkaRemoteSpec(DirectRoutedRemoteActorMultiJvmSpec.nameConfig(1))
+  with ImplicitSender with DefaultTimeout {
+
+  import DirectRoutedRemoteActorMultiJvmSpec._
+  val nodes = NrOfNodes
+  val tc = TestConductor(system)
+
+  "A new remote actor configured with a Direct router" must {
+    "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in {
+      Await.result(tc.startClient(4712), Duration.Inf)
+      tc.enter("begin")
+
+      val actor = system.actorOf(Props[SomeActor], "service-hello")
+      actor.isInstanceOf[RemoteActorRef] must be(true)
+
+      Await.result(actor ? "identify", timeout.duration).asInstanceOf[ActorRef].path.address.hostPort must equal(akkaSpec(0))
+
+      // shut down the actor before we let the other node(s) shut down so we don't try to send
+      // "Terminate" to a shut down node
+      system.stop(actor)
+      tc.enter("done")
+    }
+  }
+}

From 114f1c995b6df7f6a9ccfe1e0cc4b5da9e74cd86 Mon Sep 17 00:00:00 2001
From: Viktor Klang 
Date: Mon, 14 May 2012 18:09:36 +0200
Subject: [PATCH 019/538] Adding docs and privatizing parts of Actor.scala

---
 .../src/main/scala/akka/actor/Actor.scala     | 117 +++++++++++++++---
 1 file changed, 100 insertions(+), 17 deletions(-)

diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala
index 2499d42f10..3e233a2056 100644
--- a/akka-actor/src/main/scala/akka/actor/Actor.scala
+++ b/akka-actor/src/main/scala/akka/actor/Actor.scala
@@ -12,8 +12,9 @@ import java.util.regex.Pattern
 
 /**
  * Marker trait to show which Messages are automatically handled by Akka
+ * Internal use only
  */
-trait AutoReceivedMessage extends Serializable
+private[akka] trait AutoReceivedMessage extends Serializable
 
 /**
  * Marker trait to indicate that a message might be potentially harmful,
@@ -26,9 +27,16 @@ trait PossiblyHarmful
  */
 trait NoSerializationVerificationNeeded
 
-case class Failed(cause: Throwable) extends AutoReceivedMessage with PossiblyHarmful
+/**
+ * Internal use only
+ */
+private[akka] case class Failed(cause: Throwable) extends AutoReceivedMessage with PossiblyHarmful
 
 abstract class PoisonPill extends AutoReceivedMessage with PossiblyHarmful
+
+/**
+ * A message all Actors will understand, that when processed will terminate the Actor permanently.
+ */
 case object PoisonPill extends PoisonPill {
   /**
    * Java API: get the singleton instance
@@ -37,6 +45,10 @@ case object PoisonPill extends PoisonPill {
 }
 
 abstract class Kill extends AutoReceivedMessage with PossiblyHarmful
+/**
+ * A message all Actors will understand, that when processed will make the Actor throw an ActorKilledException,
+ * which will trigger supervision.
+ */
 case object Kill extends Kill {
   /**
    * Java API: get the singleton instance
@@ -44,9 +56,17 @@ case object Kill extends Kill {
   def getInstance = this
 }
 
+/**
+ * When Death Watch is used, the watcher will receive a Terminated(watched) message when watched is terminated.
+ */
 case class Terminated(@BeanProperty actor: ActorRef) extends PossiblyHarmful
 
 abstract class ReceiveTimeout extends PossiblyHarmful
+
+/**
+ * When using ActorContext.setReceiveTimeout, the singleton instance of ReceiveTimeout will be sent
+ * to the Actor when there hasn't been any message for that long.
+ */
 case object ReceiveTimeout extends ReceiveTimeout {
   /**
    * Java API: get the singleton instance
@@ -60,49 +80,85 @@ case object ReceiveTimeout extends ReceiveTimeout {
  * message is delivered by active routing of the various actors involved.
  */
 sealed trait SelectionPath extends AutoReceivedMessage
-case class SelectChildName(name: String, next: Any) extends SelectionPath
-case class SelectChildPattern(pattern: Pattern, next: Any) extends SelectionPath
-case class SelectParent(next: Any) extends SelectionPath
 
-// Exceptions for Actors
+/**
+ * Internal use only
+ */
+private[akka] case class SelectChildName(name: String, next: Any) extends SelectionPath
+
+/**
+ * Internal use only
+ */
+private[akka] case class SelectChildPattern(pattern: Pattern, next: Any) extends SelectionPath
+
+/**
+ * Internal use only
+ */
+private[akka] case class SelectParent(next: Any) extends SelectionPath
+
+/**
+ * IllegalActorStateException is thrown when a core invariant in the Actor implementation has been violated.
+ * For instance, if you try to create an Actor that doesn't extend Actor.
+ */
 class IllegalActorStateException private[akka] (message: String, cause: Throwable = null)
   extends AkkaException(message, cause) {
-  def this(msg: String) = this(msg, null);
+  def this(msg: String) = this(msg, null)
 }
 
+/**
+ * ActorKilledException is thrown when an Actor receives the akka.actor.Kill message
+ */
 class ActorKilledException private[akka] (message: String, cause: Throwable)
   extends AkkaException(message, cause)
   with NoStackTrace {
-  def this(msg: String) = this(msg, null);
+  def this(msg: String) = this(msg, null)
 }
 
+/**
+ * An InvalidActorNameException is thrown when you try to convert something, usually a String, to an Actor name
+ * which doesn't validate.
+ */
 case class InvalidActorNameException(message: String) extends AkkaException(message)
 
+/**
+ * An ActorInitializationException is thrown when the the initialization logic for an Actor fails.
+ */
 case class ActorInitializationException private[akka] (actor: ActorRef, message: String, cause: Throwable = null)
   extends AkkaException(message, cause)
   with NoStackTrace {
-  def this(msg: String) = this(null, msg, null);
+  def this(msg: String) = this(null, msg, null)
 }
 
+//FIXME: Only used by gracefulStop we should remove this if possible
 class ActorTimeoutException private[akka] (message: String, cause: Throwable = null)
   extends AkkaException(message, cause) {
-  def this(msg: String) = this(msg, null);
+  def this(msg: String) = this(msg, null)
 }
 
+/**
+ * InvalidMessageException is thrown when an invalid message is sent to an Actor.
+ * Technically it's only "null" which is an InvalidMessageException but who knows,
+ * there might be more of them in the future, or not.
+ */
 class InvalidMessageException private[akka] (message: String, cause: Throwable = null)
   extends AkkaException(message, cause)
   with NoStackTrace {
-  def this(msg: String) = this(msg, null);
+  def this(msg: String) = this(msg, null)
 }
 
+/**
+ * A DeathPactException is thrown by an Actor that receives a Terminated(someActor) message
+ * that it doesn't handle itself, effectively crashing the Actor and escalating to the supervisor.
+ */
 case class DeathPactException private[akka] (dead: ActorRef)
   extends AkkaException("Monitored actor [" + dead + "] terminated")
   with NoStackTrace
 
-// must not pass InterruptedException to other threads
-case class ActorInterruptedException private[akka] (cause: Throwable)
-  extends AkkaException(cause.getMessage, cause)
-  with NoStackTrace
+/**
+ * When an InterruptedException is thrown inside an Actor, it is wrapped as an ActorInterruptedException as to
+ * avoid cascading interrupts to other threads than the originally interrupted one.
+ */
+case class ActorInterruptedException private[akka] (cause: Throwable) extends AkkaException(cause.getMessage, cause) with NoStackTrace
 
 /**
  * This message is published to the EventStream whenever an Actor receives a message it doesn't understand
@@ -115,18 +171,42 @@ case class UnhandledMessage(@BeanProperty message: Any, @BeanProperty sender: Ac
  */
 object Status {
   sealed trait Status extends Serializable
+
+  /**
+   * This class/message type is preferably used to indicate success of some operation performed.
+   */
   case class Success(status: AnyRef) extends Status
+
+  /**
+   * This class/message type is preferably used to indicate failure of some operation performed.
+   */
   case class Failure(cause: Throwable) extends Status
 }
 
+/**
+ * Mix in ActorLogging into your Actor to easily obtain a reference to a logger, which is available under the name "log".
+ *
+ * {{
+ * class MyActor extends Actor with ActorLogging {
+ *   def receive = {
+ *     case "pigdog" => log.info("We've got yet another pigdog on our hands")
+ *   }
+ * }
+ * }}
+ */
 trait ActorLogging { this: Actor ⇒
   val log = akka.event.Logging(context.system, this)
 }
 
 object Actor {
-
+  /**
+   * Type alias representing a Receive-expression for Akka Actors.
+   */
   type Receive = PartialFunction[Any, Unit]
 
+  /**
+   * emptyBehavior is a Receive-expression that matches no messages at all, ever.
+   */
   object emptyBehavior extends Receive {
     def isDefinedAt(x: Any) = false
     def apply(x: Any) = throw new UnsupportedOperationException("Empty behavior apply()")
@@ -312,7 +392,7 @@ trait Actor {
    * For Akka internal use only.
    */
   private[akka] final def apply(msg: Any) = {
-    // TODO would it be more efficient to assume that most messages are matched and catch MatchError instead of using isDefinedAt?
+    //FIXME replace with behaviorStack.head.applyOrElse(msg, unhandled) + "-optimize"
     val head = behaviorStack.head
     if (head.isDefinedAt(msg)) head.apply(msg) else unhandled(msg)
   }
@@ -339,6 +419,9 @@ trait Actor {
   private[akka] def clearBehaviorStack(): Unit =
     behaviorStack = Stack.empty[Receive].push(behaviorStack.last)
 
+  /**
+   * For Akka internal use only.
+   */
   private var behaviorStack: Stack[Receive] = Stack.empty[Receive].push(receive)
 }
 

From 6dd017d6c19e4975a257fad6cc5ec2150635ab80 Mon Sep 17 00:00:00 2001
From: Patrik Nordwall 
Date: Tue, 15 May 2012 09:40:13 +0200
Subject: [PATCH 020/538] Replace Java serialization of DaemonMsg by protobuf.
 See #1755

* Serializers for DaemonMsgCreate and DaemonMsgWatch
* Protobuf for DaemonMsgCreateProtocol, PropsProtocol,
  DeployProtocol, DaemonMsgWatchProtocol
* Removed unused MailboxProtocol.proto
* Fixed wrong serializeActorRef in DurableMessageSerialization
---
 .../src/main/protocol/MailboxProtocol.proto   |   30 -
 .../akka/actor/mailbox/DurableMailbox.scala   |   10 +-
 .../main/java/akka/remote/RemoteProtocol.java | 2588 +++++++++++++++--
 .../src/main/protocol/RemoteProtocol.proto    |   39 +-
 akka-remote/src/main/resources/reference.conf |    4 +
 .../DaemonMsgCreateSerializer.scala           |  139 +
 .../DaemonMsgWatchSerializer.scala            |   41 +
 .../serialization/ProtobufSerializer.scala    |   16 +
 .../DaemonMsgCreateSerializerSpec.scala       |  104 +
 .../DaemonMsgWatchSerializerSpec.scala        |   49 +
 10 files changed, 2724 insertions(+), 296 deletions(-)
 delete mode 100644 akka-durable-mailboxes/akka-mailboxes-common/src/main/protocol/MailboxProtocol.proto
 create mode 100644 akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala
 create mode 100644 akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala
 create mode 100644 akka-remote/src/test/scala/akka/serialization/DaemonMsgCreateSerializerSpec.scala
 create mode 100644 akka-remote/src/test/scala/akka/serialization/DaemonMsgWatchSerializerSpec.scala

diff --git a/akka-durable-mailboxes/akka-mailboxes-common/src/main/protocol/MailboxProtocol.proto b/akka-durable-mailboxes/akka-mailboxes-common/src/main/protocol/MailboxProtocol.proto
deleted file mode 100644
index 96fab2bf95..0000000000
--- a/akka-durable-mailboxes/akka-mailboxes-common/src/main/protocol/MailboxProtocol.proto
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc. 
- */
-
-option java_package = "akka.actor.mailbox";
-option optimize_for = SPEED;
-
-/******************************************
-  Compile with:
-  cd ./akka-durable-mailboxes/akka-mailboxes-common/src/main/protocol
-  protoc MailboxProtocol.proto --java_out ../java
-*******************************************/
-
-/**
- * Defines the durable mailbox message.
- */
-message DurableMailboxMessageProtocol {
-  required string ownerAddress = 1;
-  optional string senderAddress = 2;
-  optional UuidProtocol futureUuid = 3;
-  required bytes message = 4;
-}
-
-/**
- * Defines a UUID.
- */
-message UuidProtocol {
-  required uint64 high = 1;
-  required uint64 low = 2;
-}
diff --git a/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala b/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala
index 41ec6d7307..db7b137bf0 100644
--- a/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala
+++ b/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala
@@ -9,6 +9,7 @@ import akka.remote.MessageSerializer
 import akka.remote.RemoteProtocol.{ ActorRefProtocol, RemoteMessageProtocol }
 import com.typesafe.config.Config
 import akka.actor.ActorSystem
+import akka.serialization.Serialization
 
 private[akka] object DurableExecutableMailboxConfig {
   val Name = "[\\.\\/\\$\\s]".r
@@ -26,9 +27,10 @@ abstract class DurableMessageQueue(val owner: ActorContext) extends MessageQueue
 
 trait DurableMessageSerialization { this: DurableMessageQueue ⇒
 
-  def serialize(durableMessage: Envelope): Array[Byte] = {
+  import akka.serialization.ProtobufSerializer.serializeActorRef
+  import akka.serialization.ProtobufSerializer.deserializeActorRef
 
-    def serializeActorRef(ref: ActorRef): ActorRefProtocol = ActorRefProtocol.newBuilder.setPath(ref.path.toString).build
+  def serialize(durableMessage: Envelope): Array[Byte] = {
 
     val message = MessageSerializer.serialize(system, durableMessage.message.asInstanceOf[AnyRef])
     val builder = RemoteMessageProtocol.newBuilder
@@ -41,11 +43,9 @@ trait DurableMessageSerialization { this: DurableMessageQueue ⇒
 
   def deserialize(bytes: Array[Byte]): Envelope = {
 
-    def deserializeActorRef(refProtocol: ActorRefProtocol): ActorRef = system.actorFor(refProtocol.getPath)
-
     val durableMessage = RemoteMessageProtocol.parseFrom(bytes)
     val message = MessageSerializer.deserialize(system, durableMessage.getMessage)
-    val sender = deserializeActorRef(durableMessage.getSender)
+    val sender = deserializeActorRef(system, durableMessage.getSender)
 
     new Envelope(message, sender)(system)
   }
diff --git a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java
index 21074a44c0..e340a807cf 100644
--- a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java
+++ b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java
@@ -309,7 +309,7 @@ public final class RemoteProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      private Builder(BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
@@ -981,7 +981,7 @@ public final class RemoteProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      private Builder(BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
@@ -1977,7 +1977,7 @@ public final class RemoteProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      private Builder(BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
@@ -2527,7 +2527,7 @@ public final class RemoteProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      private Builder(BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
@@ -2936,7 +2936,7 @@ public final class RemoteProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      private Builder(BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
@@ -3410,7 +3410,7 @@ public final class RemoteProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      private Builder(BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
@@ -3909,7 +3909,7 @@ public final class RemoteProtocol {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      private Builder(BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
@@ -4172,115 +4172,166 @@ public final class RemoteProtocol {
     // @@protoc_insertion_point(class_scope:AddressProtocol)
   }
   
-  public interface DurableMailboxMessageProtocolOrBuilder
+  public interface DaemonMsgCreateProtocolOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // required .ActorRefProtocol recipient = 1;
-    boolean hasRecipient();
-    akka.remote.RemoteProtocol.ActorRefProtocol getRecipient();
-    akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getRecipientOrBuilder();
+    // required .PropsProtocol props = 1;
+    boolean hasProps();
+    akka.remote.RemoteProtocol.PropsProtocol getProps();
+    akka.remote.RemoteProtocol.PropsProtocolOrBuilder getPropsOrBuilder();
     
-    // optional .ActorRefProtocol sender = 2;
-    boolean hasSender();
-    akka.remote.RemoteProtocol.ActorRefProtocol getSender();
-    akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getSenderOrBuilder();
+    // required .DeployProtocol deploy = 2;
+    boolean hasDeploy();
+    akka.remote.RemoteProtocol.DeployProtocol getDeploy();
+    akka.remote.RemoteProtocol.DeployProtocolOrBuilder getDeployOrBuilder();
     
-    // required bytes message = 3;
-    boolean hasMessage();
-    com.google.protobuf.ByteString getMessage();
+    // required string path = 3;
+    boolean hasPath();
+    String getPath();
+    
+    // required .ActorRefProtocol supervisor = 4;
+    boolean hasSupervisor();
+    akka.remote.RemoteProtocol.ActorRefProtocol getSupervisor();
+    akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getSupervisorOrBuilder();
   }
-  public static final class DurableMailboxMessageProtocol extends
+  public static final class DaemonMsgCreateProtocol extends
       com.google.protobuf.GeneratedMessage
-      implements DurableMailboxMessageProtocolOrBuilder {
-    // Use DurableMailboxMessageProtocol.newBuilder() to construct.
-    private DurableMailboxMessageProtocol(Builder builder) {
+      implements DaemonMsgCreateProtocolOrBuilder {
+    // Use DaemonMsgCreateProtocol.newBuilder() to construct.
+    private DaemonMsgCreateProtocol(Builder builder) {
       super(builder);
     }
-    private DurableMailboxMessageProtocol(boolean noInit) {}
+    private DaemonMsgCreateProtocol(boolean noInit) {}
     
-    private static final DurableMailboxMessageProtocol defaultInstance;
-    public static DurableMailboxMessageProtocol getDefaultInstance() {
+    private static final DaemonMsgCreateProtocol defaultInstance;
+    public static DaemonMsgCreateProtocol getDefaultInstance() {
       return defaultInstance;
     }
     
-    public DurableMailboxMessageProtocol getDefaultInstanceForType() {
+    public DaemonMsgCreateProtocol getDefaultInstanceForType() {
       return defaultInstance;
     }
     
     public static final com.google.protobuf.Descriptors.Descriptor
         getDescriptor() {
-      return akka.remote.RemoteProtocol.internal_static_DurableMailboxMessageProtocol_descriptor;
+      return akka.remote.RemoteProtocol.internal_static_DaemonMsgCreateProtocol_descriptor;
     }
     
     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
         internalGetFieldAccessorTable() {
-      return akka.remote.RemoteProtocol.internal_static_DurableMailboxMessageProtocol_fieldAccessorTable;
+      return akka.remote.RemoteProtocol.internal_static_DaemonMsgCreateProtocol_fieldAccessorTable;
     }
     
     private int bitField0_;
-    // required .ActorRefProtocol recipient = 1;
-    public static final int RECIPIENT_FIELD_NUMBER = 1;
-    private akka.remote.RemoteProtocol.ActorRefProtocol recipient_;
-    public boolean hasRecipient() {
+    // required .PropsProtocol props = 1;
+    public static final int PROPS_FIELD_NUMBER = 1;
+    private akka.remote.RemoteProtocol.PropsProtocol props_;
+    public boolean hasProps() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
-    public akka.remote.RemoteProtocol.ActorRefProtocol getRecipient() {
-      return recipient_;
+    public akka.remote.RemoteProtocol.PropsProtocol getProps() {
+      return props_;
     }
-    public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getRecipientOrBuilder() {
-      return recipient_;
+    public akka.remote.RemoteProtocol.PropsProtocolOrBuilder getPropsOrBuilder() {
+      return props_;
     }
     
-    // optional .ActorRefProtocol sender = 2;
-    public static final int SENDER_FIELD_NUMBER = 2;
-    private akka.remote.RemoteProtocol.ActorRefProtocol sender_;
-    public boolean hasSender() {
+    // required .DeployProtocol deploy = 2;
+    public static final int DEPLOY_FIELD_NUMBER = 2;
+    private akka.remote.RemoteProtocol.DeployProtocol deploy_;
+    public boolean hasDeploy() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
     }
-    public akka.remote.RemoteProtocol.ActorRefProtocol getSender() {
-      return sender_;
+    public akka.remote.RemoteProtocol.DeployProtocol getDeploy() {
+      return deploy_;
     }
-    public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getSenderOrBuilder() {
-      return sender_;
+    public akka.remote.RemoteProtocol.DeployProtocolOrBuilder getDeployOrBuilder() {
+      return deploy_;
     }
     
-    // required bytes message = 3;
-    public static final int MESSAGE_FIELD_NUMBER = 3;
-    private com.google.protobuf.ByteString message_;
-    public boolean hasMessage() {
+    // required string path = 3;
+    public static final int PATH_FIELD_NUMBER = 3;
+    private java.lang.Object path_;
+    public boolean hasPath() {
       return ((bitField0_ & 0x00000004) == 0x00000004);
     }
-    public com.google.protobuf.ByteString getMessage() {
-      return message_;
+    public String getPath() {
+      java.lang.Object ref = path_;
+      if (ref instanceof String) {
+        return (String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        String s = bs.toStringUtf8();
+        if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+          path_ = s;
+        }
+        return s;
+      }
+    }
+    private com.google.protobuf.ByteString getPathBytes() {
+      java.lang.Object ref = path_;
+      if (ref instanceof String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+        path_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+    
+    // required .ActorRefProtocol supervisor = 4;
+    public static final int SUPERVISOR_FIELD_NUMBER = 4;
+    private akka.remote.RemoteProtocol.ActorRefProtocol supervisor_;
+    public boolean hasSupervisor() {
+      return ((bitField0_ & 0x00000008) == 0x00000008);
+    }
+    public akka.remote.RemoteProtocol.ActorRefProtocol getSupervisor() {
+      return supervisor_;
+    }
+    public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getSupervisorOrBuilder() {
+      return supervisor_;
     }
     
     private void initFields() {
-      recipient_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
-      sender_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
-      message_ = com.google.protobuf.ByteString.EMPTY;
+      props_ = akka.remote.RemoteProtocol.PropsProtocol.getDefaultInstance();
+      deploy_ = akka.remote.RemoteProtocol.DeployProtocol.getDefaultInstance();
+      path_ = "";
+      supervisor_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
       byte isInitialized = memoizedIsInitialized;
       if (isInitialized != -1) return isInitialized == 1;
       
-      if (!hasRecipient()) {
+      if (!hasProps()) {
         memoizedIsInitialized = 0;
         return false;
       }
-      if (!hasMessage()) {
+      if (!hasDeploy()) {
         memoizedIsInitialized = 0;
         return false;
       }
-      if (!getRecipient().isInitialized()) {
+      if (!hasPath()) {
         memoizedIsInitialized = 0;
         return false;
       }
-      if (hasSender()) {
-        if (!getSender().isInitialized()) {
-          memoizedIsInitialized = 0;
-          return false;
-        }
+      if (!hasSupervisor()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getProps().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getDeploy().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getSupervisor().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
       }
       memoizedIsInitialized = 1;
       return true;
@@ -4290,13 +4341,16 @@ public final class RemoteProtocol {
                         throws java.io.IOException {
       getSerializedSize();
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeMessage(1, recipient_);
+        output.writeMessage(1, props_);
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeMessage(2, sender_);
+        output.writeMessage(2, deploy_);
       }
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        output.writeBytes(3, message_);
+        output.writeBytes(3, getPathBytes());
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        output.writeMessage(4, supervisor_);
       }
       getUnknownFields().writeTo(output);
     }
@@ -4309,15 +4363,19 @@ public final class RemoteProtocol {
       size = 0;
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, recipient_);
+          .computeMessageSize(1, props_);
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(2, sender_);
+          .computeMessageSize(2, deploy_);
       }
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeBytesSize(3, message_);
+          .computeBytesSize(3, getPathBytes());
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(4, supervisor_);
       }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
@@ -4331,41 +4389,41 @@ public final class RemoteProtocol {
       return super.writeReplace();
     }
     
-    public static akka.remote.RemoteProtocol.DurableMailboxMessageProtocol parseFrom(
+    public static akka.remote.RemoteProtocol.DaemonMsgCreateProtocol parseFrom(
         com.google.protobuf.ByteString data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return newBuilder().mergeFrom(data).buildParsed();
     }
-    public static akka.remote.RemoteProtocol.DurableMailboxMessageProtocol parseFrom(
+    public static akka.remote.RemoteProtocol.DaemonMsgCreateProtocol parseFrom(
         com.google.protobuf.ByteString data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return newBuilder().mergeFrom(data, extensionRegistry)
                .buildParsed();
     }
-    public static akka.remote.RemoteProtocol.DurableMailboxMessageProtocol parseFrom(byte[] data)
+    public static akka.remote.RemoteProtocol.DaemonMsgCreateProtocol parseFrom(byte[] data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return newBuilder().mergeFrom(data).buildParsed();
     }
-    public static akka.remote.RemoteProtocol.DurableMailboxMessageProtocol parseFrom(
+    public static akka.remote.RemoteProtocol.DaemonMsgCreateProtocol parseFrom(
         byte[] data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return newBuilder().mergeFrom(data, extensionRegistry)
                .buildParsed();
     }
-    public static akka.remote.RemoteProtocol.DurableMailboxMessageProtocol parseFrom(java.io.InputStream input)
+    public static akka.remote.RemoteProtocol.DaemonMsgCreateProtocol parseFrom(java.io.InputStream input)
         throws java.io.IOException {
       return newBuilder().mergeFrom(input).buildParsed();
     }
-    public static akka.remote.RemoteProtocol.DurableMailboxMessageProtocol parseFrom(
+    public static akka.remote.RemoteProtocol.DaemonMsgCreateProtocol parseFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return newBuilder().mergeFrom(input, extensionRegistry)
                .buildParsed();
     }
-    public static akka.remote.RemoteProtocol.DurableMailboxMessageProtocol parseDelimitedFrom(java.io.InputStream input)
+    public static akka.remote.RemoteProtocol.DaemonMsgCreateProtocol parseDelimitedFrom(java.io.InputStream input)
         throws java.io.IOException {
       Builder builder = newBuilder();
       if (builder.mergeDelimitedFrom(input)) {
@@ -4374,7 +4432,7 @@ public final class RemoteProtocol {
         return null;
       }
     }
-    public static akka.remote.RemoteProtocol.DurableMailboxMessageProtocol parseDelimitedFrom(
+    public static akka.remote.RemoteProtocol.DaemonMsgCreateProtocol parseDelimitedFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
@@ -4385,12 +4443,12 @@ public final class RemoteProtocol {
         return null;
       }
     }
-    public static akka.remote.RemoteProtocol.DurableMailboxMessageProtocol parseFrom(
+    public static akka.remote.RemoteProtocol.DaemonMsgCreateProtocol parseFrom(
         com.google.protobuf.CodedInputStream input)
         throws java.io.IOException {
       return newBuilder().mergeFrom(input).buildParsed();
     }
-    public static akka.remote.RemoteProtocol.DurableMailboxMessageProtocol parseFrom(
+    public static akka.remote.RemoteProtocol.DaemonMsgCreateProtocol parseFrom(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
@@ -4400,7 +4458,7 @@ public final class RemoteProtocol {
     
     public static Builder newBuilder() { return Builder.create(); }
     public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(akka.remote.RemoteProtocol.DurableMailboxMessageProtocol prototype) {
+    public static Builder newBuilder(akka.remote.RemoteProtocol.DaemonMsgCreateProtocol prototype) {
       return newBuilder().mergeFrom(prototype);
     }
     public Builder toBuilder() { return newBuilder(this); }
@@ -4413,30 +4471,31 @@ public final class RemoteProtocol {
     }
     public static final class Builder extends
         com.google.protobuf.GeneratedMessage.Builder
-       implements akka.remote.RemoteProtocol.DurableMailboxMessageProtocolOrBuilder {
+       implements akka.remote.RemoteProtocol.DaemonMsgCreateProtocolOrBuilder {
       public static final com.google.protobuf.Descriptors.Descriptor
           getDescriptor() {
-        return akka.remote.RemoteProtocol.internal_static_DurableMailboxMessageProtocol_descriptor;
+        return akka.remote.RemoteProtocol.internal_static_DaemonMsgCreateProtocol_descriptor;
       }
       
       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
           internalGetFieldAccessorTable() {
-        return akka.remote.RemoteProtocol.internal_static_DurableMailboxMessageProtocol_fieldAccessorTable;
+        return akka.remote.RemoteProtocol.internal_static_DaemonMsgCreateProtocol_fieldAccessorTable;
       }
       
-      // Construct using akka.remote.RemoteProtocol.DurableMailboxMessageProtocol.newBuilder()
+      // Construct using akka.remote.RemoteProtocol.DaemonMsgCreateProtocol.newBuilder()
       private Builder() {
         maybeForceBuilderInitialization();
       }
       
-      private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      private Builder(BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
       private void maybeForceBuilderInitialization() {
         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-          getRecipientFieldBuilder();
-          getSenderFieldBuilder();
+          getPropsFieldBuilder();
+          getDeployFieldBuilder();
+          getSupervisorFieldBuilder();
         }
       }
       private static Builder create() {
@@ -4445,20 +4504,26 @@ public final class RemoteProtocol {
       
       public Builder clear() {
         super.clear();
-        if (recipientBuilder_ == null) {
-          recipient_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
+        if (propsBuilder_ == null) {
+          props_ = akka.remote.RemoteProtocol.PropsProtocol.getDefaultInstance();
         } else {
-          recipientBuilder_.clear();
+          propsBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000001);
-        if (senderBuilder_ == null) {
-          sender_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
+        if (deployBuilder_ == null) {
+          deploy_ = akka.remote.RemoteProtocol.DeployProtocol.getDefaultInstance();
         } else {
-          senderBuilder_.clear();
+          deployBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000002);
-        message_ = com.google.protobuf.ByteString.EMPTY;
+        path_ = "";
         bitField0_ = (bitField0_ & ~0x00000004);
+        if (supervisorBuilder_ == null) {
+          supervisor_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
+        } else {
+          supervisorBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000008);
         return this;
       }
       
@@ -4468,24 +4533,24 @@ public final class RemoteProtocol {
       
       public com.google.protobuf.Descriptors.Descriptor
           getDescriptorForType() {
-        return akka.remote.RemoteProtocol.DurableMailboxMessageProtocol.getDescriptor();
+        return akka.remote.RemoteProtocol.DaemonMsgCreateProtocol.getDescriptor();
       }
       
-      public akka.remote.RemoteProtocol.DurableMailboxMessageProtocol getDefaultInstanceForType() {
-        return akka.remote.RemoteProtocol.DurableMailboxMessageProtocol.getDefaultInstance();
+      public akka.remote.RemoteProtocol.DaemonMsgCreateProtocol getDefaultInstanceForType() {
+        return akka.remote.RemoteProtocol.DaemonMsgCreateProtocol.getDefaultInstance();
       }
       
-      public akka.remote.RemoteProtocol.DurableMailboxMessageProtocol build() {
-        akka.remote.RemoteProtocol.DurableMailboxMessageProtocol result = buildPartial();
+      public akka.remote.RemoteProtocol.DaemonMsgCreateProtocol build() {
+        akka.remote.RemoteProtocol.DaemonMsgCreateProtocol result = buildPartial();
         if (!result.isInitialized()) {
           throw newUninitializedMessageException(result);
         }
         return result;
       }
       
-      private akka.remote.RemoteProtocol.DurableMailboxMessageProtocol buildParsed()
+      private akka.remote.RemoteProtocol.DaemonMsgCreateProtocol buildParsed()
           throws com.google.protobuf.InvalidProtocolBufferException {
-        akka.remote.RemoteProtocol.DurableMailboxMessageProtocol result = buildPartial();
+        akka.remote.RemoteProtocol.DaemonMsgCreateProtocol result = buildPartial();
         if (!result.isInitialized()) {
           throw newUninitializedMessageException(
             result).asInvalidProtocolBufferException();
@@ -4493,78 +4558,2071 @@ public final class RemoteProtocol {
         return result;
       }
       
-      public akka.remote.RemoteProtocol.DurableMailboxMessageProtocol buildPartial() {
-        akka.remote.RemoteProtocol.DurableMailboxMessageProtocol result = new akka.remote.RemoteProtocol.DurableMailboxMessageProtocol(this);
+      public akka.remote.RemoteProtocol.DaemonMsgCreateProtocol buildPartial() {
+        akka.remote.RemoteProtocol.DaemonMsgCreateProtocol result = new akka.remote.RemoteProtocol.DaemonMsgCreateProtocol(this);
         int from_bitField0_ = bitField0_;
         int to_bitField0_ = 0;
         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
           to_bitField0_ |= 0x00000001;
         }
-        if (recipientBuilder_ == null) {
-          result.recipient_ = recipient_;
+        if (propsBuilder_ == null) {
+          result.props_ = props_;
         } else {
-          result.recipient_ = recipientBuilder_.build();
+          result.props_ = propsBuilder_.build();
         }
         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
           to_bitField0_ |= 0x00000002;
         }
-        if (senderBuilder_ == null) {
-          result.sender_ = sender_;
+        if (deployBuilder_ == null) {
+          result.deploy_ = deploy_;
         } else {
-          result.sender_ = senderBuilder_.build();
+          result.deploy_ = deployBuilder_.build();
         }
         if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
           to_bitField0_ |= 0x00000004;
         }
-        result.message_ = message_;
+        result.path_ = path_;
+        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+          to_bitField0_ |= 0x00000008;
+        }
+        if (supervisorBuilder_ == null) {
+          result.supervisor_ = supervisor_;
+        } else {
+          result.supervisor_ = supervisorBuilder_.build();
+        }
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
       }
       
       public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof akka.remote.RemoteProtocol.DurableMailboxMessageProtocol) {
-          return mergeFrom((akka.remote.RemoteProtocol.DurableMailboxMessageProtocol)other);
+        if (other instanceof akka.remote.RemoteProtocol.DaemonMsgCreateProtocol) {
+          return mergeFrom((akka.remote.RemoteProtocol.DaemonMsgCreateProtocol)other);
         } else {
           super.mergeFrom(other);
           return this;
         }
       }
       
-      public Builder mergeFrom(akka.remote.RemoteProtocol.DurableMailboxMessageProtocol other) {
-        if (other == akka.remote.RemoteProtocol.DurableMailboxMessageProtocol.getDefaultInstance()) return this;
-        if (other.hasRecipient()) {
-          mergeRecipient(other.getRecipient());
+      public Builder mergeFrom(akka.remote.RemoteProtocol.DaemonMsgCreateProtocol other) {
+        if (other == akka.remote.RemoteProtocol.DaemonMsgCreateProtocol.getDefaultInstance()) return this;
+        if (other.hasProps()) {
+          mergeProps(other.getProps());
         }
-        if (other.hasSender()) {
-          mergeSender(other.getSender());
+        if (other.hasDeploy()) {
+          mergeDeploy(other.getDeploy());
         }
-        if (other.hasMessage()) {
-          setMessage(other.getMessage());
+        if (other.hasPath()) {
+          setPath(other.getPath());
+        }
+        if (other.hasSupervisor()) {
+          mergeSupervisor(other.getSupervisor());
         }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
       
       public final boolean isInitialized() {
-        if (!hasRecipient()) {
+        if (!hasProps()) {
           
           return false;
         }
-        if (!hasMessage()) {
+        if (!hasDeploy()) {
           
           return false;
         }
-        if (!getRecipient().isInitialized()) {
+        if (!hasPath()) {
           
           return false;
         }
-        if (hasSender()) {
-          if (!getSender().isInitialized()) {
-            
-            return false;
+        if (!hasSupervisor()) {
+          
+          return false;
+        }
+        if (!getProps().isInitialized()) {
+          
+          return false;
+        }
+        if (!getDeploy().isInitialized()) {
+          
+          return false;
+        }
+        if (!getSupervisor().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+      
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder(
+            this.getUnknownFields());
+        while (true) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              this.setUnknownFields(unknownFields.build());
+              onChanged();
+              return this;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                this.setUnknownFields(unknownFields.build());
+                onChanged();
+                return this;
+              }
+              break;
+            }
+            case 10: {
+              akka.remote.RemoteProtocol.PropsProtocol.Builder subBuilder = akka.remote.RemoteProtocol.PropsProtocol.newBuilder();
+              if (hasProps()) {
+                subBuilder.mergeFrom(getProps());
+              }
+              input.readMessage(subBuilder, extensionRegistry);
+              setProps(subBuilder.buildPartial());
+              break;
+            }
+            case 18: {
+              akka.remote.RemoteProtocol.DeployProtocol.Builder subBuilder = akka.remote.RemoteProtocol.DeployProtocol.newBuilder();
+              if (hasDeploy()) {
+                subBuilder.mergeFrom(getDeploy());
+              }
+              input.readMessage(subBuilder, extensionRegistry);
+              setDeploy(subBuilder.buildPartial());
+              break;
+            }
+            case 26: {
+              bitField0_ |= 0x00000004;
+              path_ = input.readBytes();
+              break;
+            }
+            case 34: {
+              akka.remote.RemoteProtocol.ActorRefProtocol.Builder subBuilder = akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder();
+              if (hasSupervisor()) {
+                subBuilder.mergeFrom(getSupervisor());
+              }
+              input.readMessage(subBuilder, extensionRegistry);
+              setSupervisor(subBuilder.buildPartial());
+              break;
+            }
           }
         }
+      }
+      
+      private int bitField0_;
+      
+      // required .PropsProtocol props = 1;
+      private akka.remote.RemoteProtocol.PropsProtocol props_ = akka.remote.RemoteProtocol.PropsProtocol.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          akka.remote.RemoteProtocol.PropsProtocol, akka.remote.RemoteProtocol.PropsProtocol.Builder, akka.remote.RemoteProtocol.PropsProtocolOrBuilder> propsBuilder_;
+      public boolean hasProps() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      public akka.remote.RemoteProtocol.PropsProtocol getProps() {
+        if (propsBuilder_ == null) {
+          return props_;
+        } else {
+          return propsBuilder_.getMessage();
+        }
+      }
+      public Builder setProps(akka.remote.RemoteProtocol.PropsProtocol value) {
+        if (propsBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          props_ = value;
+          onChanged();
+        } else {
+          propsBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      public Builder setProps(
+          akka.remote.RemoteProtocol.PropsProtocol.Builder builderForValue) {
+        if (propsBuilder_ == null) {
+          props_ = builderForValue.build();
+          onChanged();
+        } else {
+          propsBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      public Builder mergeProps(akka.remote.RemoteProtocol.PropsProtocol value) {
+        if (propsBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              props_ != akka.remote.RemoteProtocol.PropsProtocol.getDefaultInstance()) {
+            props_ =
+              akka.remote.RemoteProtocol.PropsProtocol.newBuilder(props_).mergeFrom(value).buildPartial();
+          } else {
+            props_ = value;
+          }
+          onChanged();
+        } else {
+          propsBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      public Builder clearProps() {
+        if (propsBuilder_ == null) {
+          props_ = akka.remote.RemoteProtocol.PropsProtocol.getDefaultInstance();
+          onChanged();
+        } else {
+          propsBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      public akka.remote.RemoteProtocol.PropsProtocol.Builder getPropsBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getPropsFieldBuilder().getBuilder();
+      }
+      public akka.remote.RemoteProtocol.PropsProtocolOrBuilder getPropsOrBuilder() {
+        if (propsBuilder_ != null) {
+          return propsBuilder_.getMessageOrBuilder();
+        } else {
+          return props_;
+        }
+      }
+      private com.google.protobuf.SingleFieldBuilder<
+          akka.remote.RemoteProtocol.PropsProtocol, akka.remote.RemoteProtocol.PropsProtocol.Builder, akka.remote.RemoteProtocol.PropsProtocolOrBuilder> 
+          getPropsFieldBuilder() {
+        if (propsBuilder_ == null) {
+          propsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              akka.remote.RemoteProtocol.PropsProtocol, akka.remote.RemoteProtocol.PropsProtocol.Builder, akka.remote.RemoteProtocol.PropsProtocolOrBuilder>(
+                  props_,
+                  getParentForChildren(),
+                  isClean());
+          props_ = null;
+        }
+        return propsBuilder_;
+      }
+      
+      // required .DeployProtocol deploy = 2;
+      private akka.remote.RemoteProtocol.DeployProtocol deploy_ = akka.remote.RemoteProtocol.DeployProtocol.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          akka.remote.RemoteProtocol.DeployProtocol, akka.remote.RemoteProtocol.DeployProtocol.Builder, akka.remote.RemoteProtocol.DeployProtocolOrBuilder> deployBuilder_;
+      public boolean hasDeploy() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      public akka.remote.RemoteProtocol.DeployProtocol getDeploy() {
+        if (deployBuilder_ == null) {
+          return deploy_;
+        } else {
+          return deployBuilder_.getMessage();
+        }
+      }
+      public Builder setDeploy(akka.remote.RemoteProtocol.DeployProtocol value) {
+        if (deployBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          deploy_ = value;
+          onChanged();
+        } else {
+          deployBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      public Builder setDeploy(
+          akka.remote.RemoteProtocol.DeployProtocol.Builder builderForValue) {
+        if (deployBuilder_ == null) {
+          deploy_ = builderForValue.build();
+          onChanged();
+        } else {
+          deployBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      public Builder mergeDeploy(akka.remote.RemoteProtocol.DeployProtocol value) {
+        if (deployBuilder_ == null) {
+          if (((bitField0_ & 0x00000002) == 0x00000002) &&
+              deploy_ != akka.remote.RemoteProtocol.DeployProtocol.getDefaultInstance()) {
+            deploy_ =
+              akka.remote.RemoteProtocol.DeployProtocol.newBuilder(deploy_).mergeFrom(value).buildPartial();
+          } else {
+            deploy_ = value;
+          }
+          onChanged();
+        } else {
+          deployBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      public Builder clearDeploy() {
+        if (deployBuilder_ == null) {
+          deploy_ = akka.remote.RemoteProtocol.DeployProtocol.getDefaultInstance();
+          onChanged();
+        } else {
+          deployBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+      public akka.remote.RemoteProtocol.DeployProtocol.Builder getDeployBuilder() {
+        bitField0_ |= 0x00000002;
+        onChanged();
+        return getDeployFieldBuilder().getBuilder();
+      }
+      public akka.remote.RemoteProtocol.DeployProtocolOrBuilder getDeployOrBuilder() {
+        if (deployBuilder_ != null) {
+          return deployBuilder_.getMessageOrBuilder();
+        } else {
+          return deploy_;
+        }
+      }
+      private com.google.protobuf.SingleFieldBuilder<
+          akka.remote.RemoteProtocol.DeployProtocol, akka.remote.RemoteProtocol.DeployProtocol.Builder, akka.remote.RemoteProtocol.DeployProtocolOrBuilder> 
+          getDeployFieldBuilder() {
+        if (deployBuilder_ == null) {
+          deployBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              akka.remote.RemoteProtocol.DeployProtocol, akka.remote.RemoteProtocol.DeployProtocol.Builder, akka.remote.RemoteProtocol.DeployProtocolOrBuilder>(
+                  deploy_,
+                  getParentForChildren(),
+                  isClean());
+          deploy_ = null;
+        }
+        return deployBuilder_;
+      }
+      
+      // required string path = 3;
+      private java.lang.Object path_ = "";
+      public boolean hasPath() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      public String getPath() {
+        java.lang.Object ref = path_;
+        if (!(ref instanceof String)) {
+          String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+          path_ = s;
+          return s;
+        } else {
+          return (String) ref;
+        }
+      }
+      public Builder setPath(String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000004;
+        path_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearPath() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        path_ = getDefaultInstance().getPath();
+        onChanged();
+        return this;
+      }
+      void setPath(com.google.protobuf.ByteString value) {
+        bitField0_ |= 0x00000004;
+        path_ = value;
+        onChanged();
+      }
+      
+      // required .ActorRefProtocol supervisor = 4;
+      private akka.remote.RemoteProtocol.ActorRefProtocol supervisor_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> supervisorBuilder_;
+      public boolean hasSupervisor() {
+        return ((bitField0_ & 0x00000008) == 0x00000008);
+      }
+      public akka.remote.RemoteProtocol.ActorRefProtocol getSupervisor() {
+        if (supervisorBuilder_ == null) {
+          return supervisor_;
+        } else {
+          return supervisorBuilder_.getMessage();
+        }
+      }
+      public Builder setSupervisor(akka.remote.RemoteProtocol.ActorRefProtocol value) {
+        if (supervisorBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          supervisor_ = value;
+          onChanged();
+        } else {
+          supervisorBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000008;
+        return this;
+      }
+      public Builder setSupervisor(
+          akka.remote.RemoteProtocol.ActorRefProtocol.Builder builderForValue) {
+        if (supervisorBuilder_ == null) {
+          supervisor_ = builderForValue.build();
+          onChanged();
+        } else {
+          supervisorBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000008;
+        return this;
+      }
+      public Builder mergeSupervisor(akka.remote.RemoteProtocol.ActorRefProtocol value) {
+        if (supervisorBuilder_ == null) {
+          if (((bitField0_ & 0x00000008) == 0x00000008) &&
+              supervisor_ != akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance()) {
+            supervisor_ =
+              akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder(supervisor_).mergeFrom(value).buildPartial();
+          } else {
+            supervisor_ = value;
+          }
+          onChanged();
+        } else {
+          supervisorBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000008;
+        return this;
+      }
+      public Builder clearSupervisor() {
+        if (supervisorBuilder_ == null) {
+          supervisor_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
+          onChanged();
+        } else {
+          supervisorBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000008);
+        return this;
+      }
+      public akka.remote.RemoteProtocol.ActorRefProtocol.Builder getSupervisorBuilder() {
+        bitField0_ |= 0x00000008;
+        onChanged();
+        return getSupervisorFieldBuilder().getBuilder();
+      }
+      public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getSupervisorOrBuilder() {
+        if (supervisorBuilder_ != null) {
+          return supervisorBuilder_.getMessageOrBuilder();
+        } else {
+          return supervisor_;
+        }
+      }
+      private com.google.protobuf.SingleFieldBuilder<
+          akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> 
+          getSupervisorFieldBuilder() {
+        if (supervisorBuilder_ == null) {
+          supervisorBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder>(
+                  supervisor_,
+                  getParentForChildren(),
+                  isClean());
+          supervisor_ = null;
+        }
+        return supervisorBuilder_;
+      }
+      
+      // @@protoc_insertion_point(builder_scope:DaemonMsgCreateProtocol)
+    }
+    
+    static {
+      defaultInstance = new DaemonMsgCreateProtocol(true);
+      defaultInstance.initFields();
+    }
+    
+    // @@protoc_insertion_point(class_scope:DaemonMsgCreateProtocol)
+  }
+  
+  public interface PropsProtocolOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+    
+    // required bytes creator = 1;
+    boolean hasCreator();
+    com.google.protobuf.ByteString getCreator();
+    
+    // required string dispatcher = 2;
+    boolean hasDispatcher();
+    String getDispatcher();
+    
+    // required .DeployProtocol deploy = 3;
+    boolean hasDeploy();
+    akka.remote.RemoteProtocol.DeployProtocol getDeploy();
+    akka.remote.RemoteProtocol.DeployProtocolOrBuilder getDeployOrBuilder();
+    
+    // optional bytes routerConfig = 4;
+    boolean hasRouterConfig();
+    com.google.protobuf.ByteString getRouterConfig();
+  }
+  public static final class PropsProtocol extends
+      com.google.protobuf.GeneratedMessage
+      implements PropsProtocolOrBuilder {
+    // Use PropsProtocol.newBuilder() to construct.
+    private PropsProtocol(Builder builder) {
+      super(builder);
+    }
+    private PropsProtocol(boolean noInit) {}
+    
+    private static final PropsProtocol defaultInstance;
+    public static PropsProtocol getDefaultInstance() {
+      return defaultInstance;
+    }
+    
+    public PropsProtocol getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+    
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return akka.remote.RemoteProtocol.internal_static_PropsProtocol_descriptor;
+    }
+    
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return akka.remote.RemoteProtocol.internal_static_PropsProtocol_fieldAccessorTable;
+    }
+    
+    private int bitField0_;
+    // required bytes creator = 1;
+    public static final int CREATOR_FIELD_NUMBER = 1;
+    private com.google.protobuf.ByteString creator_;
+    public boolean hasCreator() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    public com.google.protobuf.ByteString getCreator() {
+      return creator_;
+    }
+    
+    // required string dispatcher = 2;
+    public static final int DISPATCHER_FIELD_NUMBER = 2;
+    private java.lang.Object dispatcher_;
+    public boolean hasDispatcher() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    public String getDispatcher() {
+      java.lang.Object ref = dispatcher_;
+      if (ref instanceof String) {
+        return (String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        String s = bs.toStringUtf8();
+        if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+          dispatcher_ = s;
+        }
+        return s;
+      }
+    }
+    private com.google.protobuf.ByteString getDispatcherBytes() {
+      java.lang.Object ref = dispatcher_;
+      if (ref instanceof String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+        dispatcher_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+    
+    // required .DeployProtocol deploy = 3;
+    public static final int DEPLOY_FIELD_NUMBER = 3;
+    private akka.remote.RemoteProtocol.DeployProtocol deploy_;
+    public boolean hasDeploy() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    public akka.remote.RemoteProtocol.DeployProtocol getDeploy() {
+      return deploy_;
+    }
+    public akka.remote.RemoteProtocol.DeployProtocolOrBuilder getDeployOrBuilder() {
+      return deploy_;
+    }
+    
+    // optional bytes routerConfig = 4;
+    public static final int ROUTERCONFIG_FIELD_NUMBER = 4;
+    private com.google.protobuf.ByteString routerConfig_;
+    public boolean hasRouterConfig() {
+      return ((bitField0_ & 0x00000008) == 0x00000008);
+    }
+    public com.google.protobuf.ByteString getRouterConfig() {
+      return routerConfig_;
+    }
+    
+    private void initFields() {
+      creator_ = com.google.protobuf.ByteString.EMPTY;
+      dispatcher_ = "";
+      deploy_ = akka.remote.RemoteProtocol.DeployProtocol.getDefaultInstance();
+      routerConfig_ = com.google.protobuf.ByteString.EMPTY;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+      
+      if (!hasCreator()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasDispatcher()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasDeploy()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getDeploy().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+    
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, creator_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeBytes(2, getDispatcherBytes());
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeMessage(3, deploy_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        output.writeBytes(4, routerConfig_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+    
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+    
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, creator_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(2, getDispatcherBytes());
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(3, deploy_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(4, routerConfig_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+    
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+    
+    public static akka.remote.RemoteProtocol.PropsProtocol parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.PropsProtocol parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.PropsProtocol parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.PropsProtocol parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.PropsProtocol parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.PropsProtocol parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.PropsProtocol parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static akka.remote.RemoteProtocol.PropsProtocol parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static akka.remote.RemoteProtocol.PropsProtocol parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.PropsProtocol parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(akka.remote.RemoteProtocol.PropsProtocol prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+    
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder
+       implements akka.remote.RemoteProtocol.PropsProtocolOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return akka.remote.RemoteProtocol.internal_static_PropsProtocol_descriptor;
+      }
+      
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return akka.remote.RemoteProtocol.internal_static_PropsProtocol_fieldAccessorTable;
+      }
+      
+      // Construct using akka.remote.RemoteProtocol.PropsProtocol.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+      
+      private Builder(BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getDeployFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+      
+      public Builder clear() {
+        super.clear();
+        creator_ = com.google.protobuf.ByteString.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        dispatcher_ = "";
+        bitField0_ = (bitField0_ & ~0x00000002);
+        if (deployBuilder_ == null) {
+          deploy_ = akka.remote.RemoteProtocol.DeployProtocol.getDefaultInstance();
+        } else {
+          deployBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000004);
+        routerConfig_ = com.google.protobuf.ByteString.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000008);
+        return this;
+      }
+      
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+      
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return akka.remote.RemoteProtocol.PropsProtocol.getDescriptor();
+      }
+      
+      public akka.remote.RemoteProtocol.PropsProtocol getDefaultInstanceForType() {
+        return akka.remote.RemoteProtocol.PropsProtocol.getDefaultInstance();
+      }
+      
+      public akka.remote.RemoteProtocol.PropsProtocol build() {
+        akka.remote.RemoteProtocol.PropsProtocol result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+      
+      private akka.remote.RemoteProtocol.PropsProtocol buildParsed()
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        akka.remote.RemoteProtocol.PropsProtocol result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(
+            result).asInvalidProtocolBufferException();
+        }
+        return result;
+      }
+      
+      public akka.remote.RemoteProtocol.PropsProtocol buildPartial() {
+        akka.remote.RemoteProtocol.PropsProtocol result = new akka.remote.RemoteProtocol.PropsProtocol(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.creator_ = creator_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.dispatcher_ = dispatcher_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        if (deployBuilder_ == null) {
+          result.deploy_ = deploy_;
+        } else {
+          result.deploy_ = deployBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+          to_bitField0_ |= 0x00000008;
+        }
+        result.routerConfig_ = routerConfig_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+      
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof akka.remote.RemoteProtocol.PropsProtocol) {
+          return mergeFrom((akka.remote.RemoteProtocol.PropsProtocol)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+      
+      public Builder mergeFrom(akka.remote.RemoteProtocol.PropsProtocol other) {
+        if (other == akka.remote.RemoteProtocol.PropsProtocol.getDefaultInstance()) return this;
+        if (other.hasCreator()) {
+          setCreator(other.getCreator());
+        }
+        if (other.hasDispatcher()) {
+          setDispatcher(other.getDispatcher());
+        }
+        if (other.hasDeploy()) {
+          mergeDeploy(other.getDeploy());
+        }
+        if (other.hasRouterConfig()) {
+          setRouterConfig(other.getRouterConfig());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+      
+      public final boolean isInitialized() {
+        if (!hasCreator()) {
+          
+          return false;
+        }
+        if (!hasDispatcher()) {
+          
+          return false;
+        }
+        if (!hasDeploy()) {
+          
+          return false;
+        }
+        if (!getDeploy().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+      
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder(
+            this.getUnknownFields());
+        while (true) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              this.setUnknownFields(unknownFields.build());
+              onChanged();
+              return this;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                this.setUnknownFields(unknownFields.build());
+                onChanged();
+                return this;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              creator_ = input.readBytes();
+              break;
+            }
+            case 18: {
+              bitField0_ |= 0x00000002;
+              dispatcher_ = input.readBytes();
+              break;
+            }
+            case 26: {
+              akka.remote.RemoteProtocol.DeployProtocol.Builder subBuilder = akka.remote.RemoteProtocol.DeployProtocol.newBuilder();
+              if (hasDeploy()) {
+                subBuilder.mergeFrom(getDeploy());
+              }
+              input.readMessage(subBuilder, extensionRegistry);
+              setDeploy(subBuilder.buildPartial());
+              break;
+            }
+            case 34: {
+              bitField0_ |= 0x00000008;
+              routerConfig_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      }
+      
+      private int bitField0_;
+      
+      // required bytes creator = 1;
+      private com.google.protobuf.ByteString creator_ = com.google.protobuf.ByteString.EMPTY;
+      public boolean hasCreator() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      public com.google.protobuf.ByteString getCreator() {
+        return creator_;
+      }
+      public Builder setCreator(com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        creator_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearCreator() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        creator_ = getDefaultInstance().getCreator();
+        onChanged();
+        return this;
+      }
+      
+      // required string dispatcher = 2;
+      private java.lang.Object dispatcher_ = "";
+      public boolean hasDispatcher() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      public String getDispatcher() {
+        java.lang.Object ref = dispatcher_;
+        if (!(ref instanceof String)) {
+          String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+          dispatcher_ = s;
+          return s;
+        } else {
+          return (String) ref;
+        }
+      }
+      public Builder setDispatcher(String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        dispatcher_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearDispatcher() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        dispatcher_ = getDefaultInstance().getDispatcher();
+        onChanged();
+        return this;
+      }
+      void setDispatcher(com.google.protobuf.ByteString value) {
+        bitField0_ |= 0x00000002;
+        dispatcher_ = value;
+        onChanged();
+      }
+      
+      // required .DeployProtocol deploy = 3;
+      private akka.remote.RemoteProtocol.DeployProtocol deploy_ = akka.remote.RemoteProtocol.DeployProtocol.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          akka.remote.RemoteProtocol.DeployProtocol, akka.remote.RemoteProtocol.DeployProtocol.Builder, akka.remote.RemoteProtocol.DeployProtocolOrBuilder> deployBuilder_;
+      public boolean hasDeploy() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      public akka.remote.RemoteProtocol.DeployProtocol getDeploy() {
+        if (deployBuilder_ == null) {
+          return deploy_;
+        } else {
+          return deployBuilder_.getMessage();
+        }
+      }
+      public Builder setDeploy(akka.remote.RemoteProtocol.DeployProtocol value) {
+        if (deployBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          deploy_ = value;
+          onChanged();
+        } else {
+          deployBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000004;
+        return this;
+      }
+      public Builder setDeploy(
+          akka.remote.RemoteProtocol.DeployProtocol.Builder builderForValue) {
+        if (deployBuilder_ == null) {
+          deploy_ = builderForValue.build();
+          onChanged();
+        } else {
+          deployBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000004;
+        return this;
+      }
+      public Builder mergeDeploy(akka.remote.RemoteProtocol.DeployProtocol value) {
+        if (deployBuilder_ == null) {
+          if (((bitField0_ & 0x00000004) == 0x00000004) &&
+              deploy_ != akka.remote.RemoteProtocol.DeployProtocol.getDefaultInstance()) {
+            deploy_ =
+              akka.remote.RemoteProtocol.DeployProtocol.newBuilder(deploy_).mergeFrom(value).buildPartial();
+          } else {
+            deploy_ = value;
+          }
+          onChanged();
+        } else {
+          deployBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000004;
+        return this;
+      }
+      public Builder clearDeploy() {
+        if (deployBuilder_ == null) {
+          deploy_ = akka.remote.RemoteProtocol.DeployProtocol.getDefaultInstance();
+          onChanged();
+        } else {
+          deployBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000004);
+        return this;
+      }
+      public akka.remote.RemoteProtocol.DeployProtocol.Builder getDeployBuilder() {
+        bitField0_ |= 0x00000004;
+        onChanged();
+        return getDeployFieldBuilder().getBuilder();
+      }
+      public akka.remote.RemoteProtocol.DeployProtocolOrBuilder getDeployOrBuilder() {
+        if (deployBuilder_ != null) {
+          return deployBuilder_.getMessageOrBuilder();
+        } else {
+          return deploy_;
+        }
+      }
+      private com.google.protobuf.SingleFieldBuilder<
+          akka.remote.RemoteProtocol.DeployProtocol, akka.remote.RemoteProtocol.DeployProtocol.Builder, akka.remote.RemoteProtocol.DeployProtocolOrBuilder> 
+          getDeployFieldBuilder() {
+        if (deployBuilder_ == null) {
+          deployBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              akka.remote.RemoteProtocol.DeployProtocol, akka.remote.RemoteProtocol.DeployProtocol.Builder, akka.remote.RemoteProtocol.DeployProtocolOrBuilder>(
+                  deploy_,
+                  getParentForChildren(),
+                  isClean());
+          deploy_ = null;
+        }
+        return deployBuilder_;
+      }
+      
+      // optional bytes routerConfig = 4;
+      private com.google.protobuf.ByteString routerConfig_ = com.google.protobuf.ByteString.EMPTY;
+      public boolean hasRouterConfig() {
+        return ((bitField0_ & 0x00000008) == 0x00000008);
+      }
+      public com.google.protobuf.ByteString getRouterConfig() {
+        return routerConfig_;
+      }
+      public Builder setRouterConfig(com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000008;
+        routerConfig_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearRouterConfig() {
+        bitField0_ = (bitField0_ & ~0x00000008);
+        routerConfig_ = getDefaultInstance().getRouterConfig();
+        onChanged();
+        return this;
+      }
+      
+      // @@protoc_insertion_point(builder_scope:PropsProtocol)
+    }
+    
+    static {
+      defaultInstance = new PropsProtocol(true);
+      defaultInstance.initFields();
+    }
+    
+    // @@protoc_insertion_point(class_scope:PropsProtocol)
+  }
+  
+  public interface DeployProtocolOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+    
+    // required string path = 1;
+    boolean hasPath();
+    String getPath();
+    
+    // optional bytes config = 2;
+    boolean hasConfig();
+    com.google.protobuf.ByteString getConfig();
+    
+    // optional bytes routerConfig = 3;
+    boolean hasRouterConfig();
+    com.google.protobuf.ByteString getRouterConfig();
+    
+    // optional bytes scope = 4;
+    boolean hasScope();
+    com.google.protobuf.ByteString getScope();
+  }
+  public static final class DeployProtocol extends
+      com.google.protobuf.GeneratedMessage
+      implements DeployProtocolOrBuilder {
+    // Use DeployProtocol.newBuilder() to construct.
+    private DeployProtocol(Builder builder) {
+      super(builder);
+    }
+    private DeployProtocol(boolean noInit) {}
+    
+    private static final DeployProtocol defaultInstance;
+    public static DeployProtocol getDefaultInstance() {
+      return defaultInstance;
+    }
+    
+    public DeployProtocol getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+    
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return akka.remote.RemoteProtocol.internal_static_DeployProtocol_descriptor;
+    }
+    
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return akka.remote.RemoteProtocol.internal_static_DeployProtocol_fieldAccessorTable;
+    }
+    
+    private int bitField0_;
+    // required string path = 1;
+    public static final int PATH_FIELD_NUMBER = 1;
+    private java.lang.Object path_;
+    public boolean hasPath() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    public String getPath() {
+      java.lang.Object ref = path_;
+      if (ref instanceof String) {
+        return (String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        String s = bs.toStringUtf8();
+        if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+          path_ = s;
+        }
+        return s;
+      }
+    }
+    private com.google.protobuf.ByteString getPathBytes() {
+      java.lang.Object ref = path_;
+      if (ref instanceof String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+        path_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+    
+    // optional bytes config = 2;
+    public static final int CONFIG_FIELD_NUMBER = 2;
+    private com.google.protobuf.ByteString config_;
+    public boolean hasConfig() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    public com.google.protobuf.ByteString getConfig() {
+      return config_;
+    }
+    
+    // optional bytes routerConfig = 3;
+    public static final int ROUTERCONFIG_FIELD_NUMBER = 3;
+    private com.google.protobuf.ByteString routerConfig_;
+    public boolean hasRouterConfig() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    public com.google.protobuf.ByteString getRouterConfig() {
+      return routerConfig_;
+    }
+    
+    // optional bytes scope = 4;
+    public static final int SCOPE_FIELD_NUMBER = 4;
+    private com.google.protobuf.ByteString scope_;
+    public boolean hasScope() {
+      return ((bitField0_ & 0x00000008) == 0x00000008);
+    }
+    public com.google.protobuf.ByteString getScope() {
+      return scope_;
+    }
+    
+    private void initFields() {
+      path_ = "";
+      config_ = com.google.protobuf.ByteString.EMPTY;
+      routerConfig_ = com.google.protobuf.ByteString.EMPTY;
+      scope_ = com.google.protobuf.ByteString.EMPTY;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+      
+      if (!hasPath()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+    
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getPathBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeBytes(2, config_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeBytes(3, routerConfig_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        output.writeBytes(4, scope_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+    
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+    
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getPathBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(2, config_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(3, routerConfig_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(4, scope_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+    
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+    
+    public static akka.remote.RemoteProtocol.DeployProtocol parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.DeployProtocol parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.DeployProtocol parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.DeployProtocol parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.DeployProtocol parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.DeployProtocol parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.DeployProtocol parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static akka.remote.RemoteProtocol.DeployProtocol parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static akka.remote.RemoteProtocol.DeployProtocol parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.DeployProtocol parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(akka.remote.RemoteProtocol.DeployProtocol prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+    
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder
+       implements akka.remote.RemoteProtocol.DeployProtocolOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return akka.remote.RemoteProtocol.internal_static_DeployProtocol_descriptor;
+      }
+      
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return akka.remote.RemoteProtocol.internal_static_DeployProtocol_fieldAccessorTable;
+      }
+      
+      // Construct using akka.remote.RemoteProtocol.DeployProtocol.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+      
+      private Builder(BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+      
+      public Builder clear() {
+        super.clear();
+        path_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        config_ = com.google.protobuf.ByteString.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        routerConfig_ = com.google.protobuf.ByteString.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000004);
+        scope_ = com.google.protobuf.ByteString.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000008);
+        return this;
+      }
+      
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+      
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return akka.remote.RemoteProtocol.DeployProtocol.getDescriptor();
+      }
+      
+      public akka.remote.RemoteProtocol.DeployProtocol getDefaultInstanceForType() {
+        return akka.remote.RemoteProtocol.DeployProtocol.getDefaultInstance();
+      }
+      
+      public akka.remote.RemoteProtocol.DeployProtocol build() {
+        akka.remote.RemoteProtocol.DeployProtocol result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+      
+      private akka.remote.RemoteProtocol.DeployProtocol buildParsed()
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        akka.remote.RemoteProtocol.DeployProtocol result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(
+            result).asInvalidProtocolBufferException();
+        }
+        return result;
+      }
+      
+      public akka.remote.RemoteProtocol.DeployProtocol buildPartial() {
+        akka.remote.RemoteProtocol.DeployProtocol result = new akka.remote.RemoteProtocol.DeployProtocol(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.path_ = path_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.config_ = config_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.routerConfig_ = routerConfig_;
+        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+          to_bitField0_ |= 0x00000008;
+        }
+        result.scope_ = scope_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+      
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof akka.remote.RemoteProtocol.DeployProtocol) {
+          return mergeFrom((akka.remote.RemoteProtocol.DeployProtocol)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+      
+      public Builder mergeFrom(akka.remote.RemoteProtocol.DeployProtocol other) {
+        if (other == akka.remote.RemoteProtocol.DeployProtocol.getDefaultInstance()) return this;
+        if (other.hasPath()) {
+          setPath(other.getPath());
+        }
+        if (other.hasConfig()) {
+          setConfig(other.getConfig());
+        }
+        if (other.hasRouterConfig()) {
+          setRouterConfig(other.getRouterConfig());
+        }
+        if (other.hasScope()) {
+          setScope(other.getScope());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+      
+      public final boolean isInitialized() {
+        if (!hasPath()) {
+          
+          return false;
+        }
+        return true;
+      }
+      
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder(
+            this.getUnknownFields());
+        while (true) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              this.setUnknownFields(unknownFields.build());
+              onChanged();
+              return this;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                this.setUnknownFields(unknownFields.build());
+                onChanged();
+                return this;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              path_ = input.readBytes();
+              break;
+            }
+            case 18: {
+              bitField0_ |= 0x00000002;
+              config_ = input.readBytes();
+              break;
+            }
+            case 26: {
+              bitField0_ |= 0x00000004;
+              routerConfig_ = input.readBytes();
+              break;
+            }
+            case 34: {
+              bitField0_ |= 0x00000008;
+              scope_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      }
+      
+      private int bitField0_;
+      
+      // required string path = 1;
+      private java.lang.Object path_ = "";
+      public boolean hasPath() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      public String getPath() {
+        java.lang.Object ref = path_;
+        if (!(ref instanceof String)) {
+          String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+          path_ = s;
+          return s;
+        } else {
+          return (String) ref;
+        }
+      }
+      public Builder setPath(String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        path_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearPath() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        path_ = getDefaultInstance().getPath();
+        onChanged();
+        return this;
+      }
+      void setPath(com.google.protobuf.ByteString value) {
+        bitField0_ |= 0x00000001;
+        path_ = value;
+        onChanged();
+      }
+      
+      // optional bytes config = 2;
+      private com.google.protobuf.ByteString config_ = com.google.protobuf.ByteString.EMPTY;
+      public boolean hasConfig() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      public com.google.protobuf.ByteString getConfig() {
+        return config_;
+      }
+      public Builder setConfig(com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        config_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearConfig() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        config_ = getDefaultInstance().getConfig();
+        onChanged();
+        return this;
+      }
+      
+      // optional bytes routerConfig = 3;
+      private com.google.protobuf.ByteString routerConfig_ = com.google.protobuf.ByteString.EMPTY;
+      public boolean hasRouterConfig() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      public com.google.protobuf.ByteString getRouterConfig() {
+        return routerConfig_;
+      }
+      public Builder setRouterConfig(com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000004;
+        routerConfig_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearRouterConfig() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        routerConfig_ = getDefaultInstance().getRouterConfig();
+        onChanged();
+        return this;
+      }
+      
+      // optional bytes scope = 4;
+      private com.google.protobuf.ByteString scope_ = com.google.protobuf.ByteString.EMPTY;
+      public boolean hasScope() {
+        return ((bitField0_ & 0x00000008) == 0x00000008);
+      }
+      public com.google.protobuf.ByteString getScope() {
+        return scope_;
+      }
+      public Builder setScope(com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000008;
+        scope_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearScope() {
+        bitField0_ = (bitField0_ & ~0x00000008);
+        scope_ = getDefaultInstance().getScope();
+        onChanged();
+        return this;
+      }
+      
+      // @@protoc_insertion_point(builder_scope:DeployProtocol)
+    }
+    
+    static {
+      defaultInstance = new DeployProtocol(true);
+      defaultInstance.initFields();
+    }
+    
+    // @@protoc_insertion_point(class_scope:DeployProtocol)
+  }
+  
+  public interface DaemonMsgWatchProtocolOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+    
+    // required .ActorRefProtocol watcher = 1;
+    boolean hasWatcher();
+    akka.remote.RemoteProtocol.ActorRefProtocol getWatcher();
+    akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatcherOrBuilder();
+    
+    // required .ActorRefProtocol watched = 2;
+    boolean hasWatched();
+    akka.remote.RemoteProtocol.ActorRefProtocol getWatched();
+    akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatchedOrBuilder();
+  }
+  public static final class DaemonMsgWatchProtocol extends
+      com.google.protobuf.GeneratedMessage
+      implements DaemonMsgWatchProtocolOrBuilder {
+    // Use DaemonMsgWatchProtocol.newBuilder() to construct.
+    private DaemonMsgWatchProtocol(Builder builder) {
+      super(builder);
+    }
+    private DaemonMsgWatchProtocol(boolean noInit) {}
+    
+    private static final DaemonMsgWatchProtocol defaultInstance;
+    public static DaemonMsgWatchProtocol getDefaultInstance() {
+      return defaultInstance;
+    }
+    
+    public DaemonMsgWatchProtocol getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+    
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return akka.remote.RemoteProtocol.internal_static_DaemonMsgWatchProtocol_descriptor;
+    }
+    
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return akka.remote.RemoteProtocol.internal_static_DaemonMsgWatchProtocol_fieldAccessorTable;
+    }
+    
+    private int bitField0_;
+    // required .ActorRefProtocol watcher = 1;
+    public static final int WATCHER_FIELD_NUMBER = 1;
+    private akka.remote.RemoteProtocol.ActorRefProtocol watcher_;
+    public boolean hasWatcher() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    public akka.remote.RemoteProtocol.ActorRefProtocol getWatcher() {
+      return watcher_;
+    }
+    public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatcherOrBuilder() {
+      return watcher_;
+    }
+    
+    // required .ActorRefProtocol watched = 2;
+    public static final int WATCHED_FIELD_NUMBER = 2;
+    private akka.remote.RemoteProtocol.ActorRefProtocol watched_;
+    public boolean hasWatched() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    public akka.remote.RemoteProtocol.ActorRefProtocol getWatched() {
+      return watched_;
+    }
+    public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatchedOrBuilder() {
+      return watched_;
+    }
+    
+    private void initFields() {
+      watcher_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
+      watched_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+      
+      if (!hasWatcher()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasWatched()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getWatcher().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getWatched().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+    
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, watcher_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeMessage(2, watched_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+    
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+    
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, watcher_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(2, watched_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+    
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+    
+    public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(akka.remote.RemoteProtocol.DaemonMsgWatchProtocol prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+    
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder
+       implements akka.remote.RemoteProtocol.DaemonMsgWatchProtocolOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return akka.remote.RemoteProtocol.internal_static_DaemonMsgWatchProtocol_descriptor;
+      }
+      
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return akka.remote.RemoteProtocol.internal_static_DaemonMsgWatchProtocol_fieldAccessorTable;
+      }
+      
+      // Construct using akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+      
+      private Builder(BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getWatcherFieldBuilder();
+          getWatchedFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+      
+      public Builder clear() {
+        super.clear();
+        if (watcherBuilder_ == null) {
+          watcher_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
+        } else {
+          watcherBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        if (watchedBuilder_ == null) {
+          watched_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
+        } else {
+          watchedBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+      
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+      
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.getDescriptor();
+      }
+      
+      public akka.remote.RemoteProtocol.DaemonMsgWatchProtocol getDefaultInstanceForType() {
+        return akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.getDefaultInstance();
+      }
+      
+      public akka.remote.RemoteProtocol.DaemonMsgWatchProtocol build() {
+        akka.remote.RemoteProtocol.DaemonMsgWatchProtocol result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+      
+      private akka.remote.RemoteProtocol.DaemonMsgWatchProtocol buildParsed()
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        akka.remote.RemoteProtocol.DaemonMsgWatchProtocol result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(
+            result).asInvalidProtocolBufferException();
+        }
+        return result;
+      }
+      
+      public akka.remote.RemoteProtocol.DaemonMsgWatchProtocol buildPartial() {
+        akka.remote.RemoteProtocol.DaemonMsgWatchProtocol result = new akka.remote.RemoteProtocol.DaemonMsgWatchProtocol(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (watcherBuilder_ == null) {
+          result.watcher_ = watcher_;
+        } else {
+          result.watcher_ = watcherBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        if (watchedBuilder_ == null) {
+          result.watched_ = watched_;
+        } else {
+          result.watched_ = watchedBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+      
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof akka.remote.RemoteProtocol.DaemonMsgWatchProtocol) {
+          return mergeFrom((akka.remote.RemoteProtocol.DaemonMsgWatchProtocol)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+      
+      public Builder mergeFrom(akka.remote.RemoteProtocol.DaemonMsgWatchProtocol other) {
+        if (other == akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.getDefaultInstance()) return this;
+        if (other.hasWatcher()) {
+          mergeWatcher(other.getWatcher());
+        }
+        if (other.hasWatched()) {
+          mergeWatched(other.getWatched());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+      
+      public final boolean isInitialized() {
+        if (!hasWatcher()) {
+          
+          return false;
+        }
+        if (!hasWatched()) {
+          
+          return false;
+        }
+        if (!getWatcher().isInitialized()) {
+          
+          return false;
+        }
+        if (!getWatched().isInitialized()) {
+          
+          return false;
+        }
         return true;
       }
       
@@ -4593,25 +6651,20 @@ public final class RemoteProtocol {
             }
             case 10: {
               akka.remote.RemoteProtocol.ActorRefProtocol.Builder subBuilder = akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder();
-              if (hasRecipient()) {
-                subBuilder.mergeFrom(getRecipient());
+              if (hasWatcher()) {
+                subBuilder.mergeFrom(getWatcher());
               }
               input.readMessage(subBuilder, extensionRegistry);
-              setRecipient(subBuilder.buildPartial());
+              setWatcher(subBuilder.buildPartial());
               break;
             }
             case 18: {
               akka.remote.RemoteProtocol.ActorRefProtocol.Builder subBuilder = akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder();
-              if (hasSender()) {
-                subBuilder.mergeFrom(getSender());
+              if (hasWatched()) {
+                subBuilder.mergeFrom(getWatched());
               }
               input.readMessage(subBuilder, extensionRegistry);
-              setSender(subBuilder.buildPartial());
-              break;
-            }
-            case 26: {
-              bitField0_ |= 0x00000004;
-              message_ = input.readBytes();
+              setWatched(subBuilder.buildPartial());
               break;
             }
           }
@@ -4620,219 +6673,195 @@ public final class RemoteProtocol {
       
       private int bitField0_;
       
-      // required .ActorRefProtocol recipient = 1;
-      private akka.remote.RemoteProtocol.ActorRefProtocol recipient_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
+      // required .ActorRefProtocol watcher = 1;
+      private akka.remote.RemoteProtocol.ActorRefProtocol watcher_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
       private com.google.protobuf.SingleFieldBuilder<
-          akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> recipientBuilder_;
-      public boolean hasRecipient() {
+          akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> watcherBuilder_;
+      public boolean hasWatcher() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
-      public akka.remote.RemoteProtocol.ActorRefProtocol getRecipient() {
-        if (recipientBuilder_ == null) {
-          return recipient_;
+      public akka.remote.RemoteProtocol.ActorRefProtocol getWatcher() {
+        if (watcherBuilder_ == null) {
+          return watcher_;
         } else {
-          return recipientBuilder_.getMessage();
+          return watcherBuilder_.getMessage();
         }
       }
-      public Builder setRecipient(akka.remote.RemoteProtocol.ActorRefProtocol value) {
-        if (recipientBuilder_ == null) {
+      public Builder setWatcher(akka.remote.RemoteProtocol.ActorRefProtocol value) {
+        if (watcherBuilder_ == null) {
           if (value == null) {
             throw new NullPointerException();
           }
-          recipient_ = value;
+          watcher_ = value;
           onChanged();
         } else {
-          recipientBuilder_.setMessage(value);
+          watcherBuilder_.setMessage(value);
         }
         bitField0_ |= 0x00000001;
         return this;
       }
-      public Builder setRecipient(
+      public Builder setWatcher(
           akka.remote.RemoteProtocol.ActorRefProtocol.Builder builderForValue) {
-        if (recipientBuilder_ == null) {
-          recipient_ = builderForValue.build();
+        if (watcherBuilder_ == null) {
+          watcher_ = builderForValue.build();
           onChanged();
         } else {
-          recipientBuilder_.setMessage(builderForValue.build());
+          watcherBuilder_.setMessage(builderForValue.build());
         }
         bitField0_ |= 0x00000001;
         return this;
       }
-      public Builder mergeRecipient(akka.remote.RemoteProtocol.ActorRefProtocol value) {
-        if (recipientBuilder_ == null) {
+      public Builder mergeWatcher(akka.remote.RemoteProtocol.ActorRefProtocol value) {
+        if (watcherBuilder_ == null) {
           if (((bitField0_ & 0x00000001) == 0x00000001) &&
-              recipient_ != akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance()) {
-            recipient_ =
-              akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder(recipient_).mergeFrom(value).buildPartial();
+              watcher_ != akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance()) {
+            watcher_ =
+              akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder(watcher_).mergeFrom(value).buildPartial();
           } else {
-            recipient_ = value;
+            watcher_ = value;
           }
           onChanged();
         } else {
-          recipientBuilder_.mergeFrom(value);
+          watcherBuilder_.mergeFrom(value);
         }
         bitField0_ |= 0x00000001;
         return this;
       }
-      public Builder clearRecipient() {
-        if (recipientBuilder_ == null) {
-          recipient_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
+      public Builder clearWatcher() {
+        if (watcherBuilder_ == null) {
+          watcher_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
           onChanged();
         } else {
-          recipientBuilder_.clear();
+          watcherBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000001);
         return this;
       }
-      public akka.remote.RemoteProtocol.ActorRefProtocol.Builder getRecipientBuilder() {
+      public akka.remote.RemoteProtocol.ActorRefProtocol.Builder getWatcherBuilder() {
         bitField0_ |= 0x00000001;
         onChanged();
-        return getRecipientFieldBuilder().getBuilder();
+        return getWatcherFieldBuilder().getBuilder();
       }
-      public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getRecipientOrBuilder() {
-        if (recipientBuilder_ != null) {
-          return recipientBuilder_.getMessageOrBuilder();
+      public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatcherOrBuilder() {
+        if (watcherBuilder_ != null) {
+          return watcherBuilder_.getMessageOrBuilder();
         } else {
-          return recipient_;
+          return watcher_;
         }
       }
       private com.google.protobuf.SingleFieldBuilder<
           akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> 
-          getRecipientFieldBuilder() {
-        if (recipientBuilder_ == null) {
-          recipientBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+          getWatcherFieldBuilder() {
+        if (watcherBuilder_ == null) {
+          watcherBuilder_ = new com.google.protobuf.SingleFieldBuilder<
               akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder>(
-                  recipient_,
+                  watcher_,
                   getParentForChildren(),
                   isClean());
-          recipient_ = null;
+          watcher_ = null;
         }
-        return recipientBuilder_;
+        return watcherBuilder_;
       }
       
-      // optional .ActorRefProtocol sender = 2;
-      private akka.remote.RemoteProtocol.ActorRefProtocol sender_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
+      // required .ActorRefProtocol watched = 2;
+      private akka.remote.RemoteProtocol.ActorRefProtocol watched_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
       private com.google.protobuf.SingleFieldBuilder<
-          akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> senderBuilder_;
-      public boolean hasSender() {
+          akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> watchedBuilder_;
+      public boolean hasWatched() {
         return ((bitField0_ & 0x00000002) == 0x00000002);
       }
-      public akka.remote.RemoteProtocol.ActorRefProtocol getSender() {
-        if (senderBuilder_ == null) {
-          return sender_;
+      public akka.remote.RemoteProtocol.ActorRefProtocol getWatched() {
+        if (watchedBuilder_ == null) {
+          return watched_;
         } else {
-          return senderBuilder_.getMessage();
+          return watchedBuilder_.getMessage();
         }
       }
-      public Builder setSender(akka.remote.RemoteProtocol.ActorRefProtocol value) {
-        if (senderBuilder_ == null) {
+      public Builder setWatched(akka.remote.RemoteProtocol.ActorRefProtocol value) {
+        if (watchedBuilder_ == null) {
           if (value == null) {
             throw new NullPointerException();
           }
-          sender_ = value;
+          watched_ = value;
           onChanged();
         } else {
-          senderBuilder_.setMessage(value);
+          watchedBuilder_.setMessage(value);
         }
         bitField0_ |= 0x00000002;
         return this;
       }
-      public Builder setSender(
+      public Builder setWatched(
           akka.remote.RemoteProtocol.ActorRefProtocol.Builder builderForValue) {
-        if (senderBuilder_ == null) {
-          sender_ = builderForValue.build();
+        if (watchedBuilder_ == null) {
+          watched_ = builderForValue.build();
           onChanged();
         } else {
-          senderBuilder_.setMessage(builderForValue.build());
+          watchedBuilder_.setMessage(builderForValue.build());
         }
         bitField0_ |= 0x00000002;
         return this;
       }
-      public Builder mergeSender(akka.remote.RemoteProtocol.ActorRefProtocol value) {
-        if (senderBuilder_ == null) {
+      public Builder mergeWatched(akka.remote.RemoteProtocol.ActorRefProtocol value) {
+        if (watchedBuilder_ == null) {
           if (((bitField0_ & 0x00000002) == 0x00000002) &&
-              sender_ != akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance()) {
-            sender_ =
-              akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder(sender_).mergeFrom(value).buildPartial();
+              watched_ != akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance()) {
+            watched_ =
+              akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder(watched_).mergeFrom(value).buildPartial();
           } else {
-            sender_ = value;
+            watched_ = value;
           }
           onChanged();
         } else {
-          senderBuilder_.mergeFrom(value);
+          watchedBuilder_.mergeFrom(value);
         }
         bitField0_ |= 0x00000002;
         return this;
       }
-      public Builder clearSender() {
-        if (senderBuilder_ == null) {
-          sender_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
+      public Builder clearWatched() {
+        if (watchedBuilder_ == null) {
+          watched_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance();
           onChanged();
         } else {
-          senderBuilder_.clear();
+          watchedBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000002);
         return this;
       }
-      public akka.remote.RemoteProtocol.ActorRefProtocol.Builder getSenderBuilder() {
+      public akka.remote.RemoteProtocol.ActorRefProtocol.Builder getWatchedBuilder() {
         bitField0_ |= 0x00000002;
         onChanged();
-        return getSenderFieldBuilder().getBuilder();
+        return getWatchedFieldBuilder().getBuilder();
       }
-      public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getSenderOrBuilder() {
-        if (senderBuilder_ != null) {
-          return senderBuilder_.getMessageOrBuilder();
+      public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatchedOrBuilder() {
+        if (watchedBuilder_ != null) {
+          return watchedBuilder_.getMessageOrBuilder();
         } else {
-          return sender_;
+          return watched_;
         }
       }
       private com.google.protobuf.SingleFieldBuilder<
           akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> 
-          getSenderFieldBuilder() {
-        if (senderBuilder_ == null) {
-          senderBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+          getWatchedFieldBuilder() {
+        if (watchedBuilder_ == null) {
+          watchedBuilder_ = new com.google.protobuf.SingleFieldBuilder<
               akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder>(
-                  sender_,
+                  watched_,
                   getParentForChildren(),
                   isClean());
-          sender_ = null;
+          watched_ = null;
         }
-        return senderBuilder_;
+        return watchedBuilder_;
       }
       
-      // required bytes message = 3;
-      private com.google.protobuf.ByteString message_ = com.google.protobuf.ByteString.EMPTY;
-      public boolean hasMessage() {
-        return ((bitField0_ & 0x00000004) == 0x00000004);
-      }
-      public com.google.protobuf.ByteString getMessage() {
-        return message_;
-      }
-      public Builder setMessage(com.google.protobuf.ByteString value) {
-        if (value == null) {
-    throw new NullPointerException();
-  }
-  bitField0_ |= 0x00000004;
-        message_ = value;
-        onChanged();
-        return this;
-      }
-      public Builder clearMessage() {
-        bitField0_ = (bitField0_ & ~0x00000004);
-        message_ = getDefaultInstance().getMessage();
-        onChanged();
-        return this;
-      }
-      
-      // @@protoc_insertion_point(builder_scope:DurableMailboxMessageProtocol)
+      // @@protoc_insertion_point(builder_scope:DaemonMsgWatchProtocol)
     }
     
     static {
-      defaultInstance = new DurableMailboxMessageProtocol(true);
+      defaultInstance = new DaemonMsgWatchProtocol(true);
       defaultInstance.initFields();
     }
     
-    // @@protoc_insertion_point(class_scope:DurableMailboxMessageProtocol)
+    // @@protoc_insertion_point(class_scope:DaemonMsgWatchProtocol)
   }
   
   private static com.google.protobuf.Descriptors.Descriptor
@@ -4871,10 +6900,25 @@ public final class RemoteProtocol {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_AddressProtocol_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_DurableMailboxMessageProtocol_descriptor;
+    internal_static_DaemonMsgCreateProtocol_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_DurableMailboxMessageProtocol_fieldAccessorTable;
+      internal_static_DaemonMsgCreateProtocol_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_PropsProtocol_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_PropsProtocol_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_DeployProtocol_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_DeployProtocol_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_DaemonMsgWatchProtocol_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_DaemonMsgWatchProtocol_fieldAccessorTable;
   
   public static com.google.protobuf.Descriptors.FileDescriptor
       getDescriptor() {
@@ -4900,12 +6944,20 @@ public final class RemoteProtocol {
       "anifest\030\003 \001(\014\"3\n\025MetadataEntryProtocol\022\013" +
       "\n\003key\030\001 \002(\t\022\r\n\005value\030\002 \002(\014\"A\n\017AddressPro" +
       "tocol\022\016\n\006system\030\001 \002(\t\022\020\n\010hostname\030\002 \002(\t\022" +
-      "\014\n\004port\030\003 \002(\r\"y\n\035DurableMailboxMessagePr" +
-      "otocol\022$\n\trecipient\030\001 \002(\0132\021.ActorRefProt" +
-      "ocol\022!\n\006sender\030\002 \001(\0132\021.ActorRefProtocol\022" +
-      "\017\n\007message\030\003 \002(\014*7\n\013CommandType\022\013\n\007CONNE",
-      "CT\020\001\022\014\n\010SHUTDOWN\020\002\022\r\n\tHEARTBEAT\020\003B\017\n\013akk" +
-      "a.remoteH\001"
+      "\014\n\004port\030\003 \002(\r\"\216\001\n\027DaemonMsgCreateProtoco" +
+      "l\022\035\n\005props\030\001 \002(\0132\016.PropsProtocol\022\037\n\006depl" +
+      "oy\030\002 \002(\0132\017.DeployProtocol\022\014\n\004path\030\003 \002(\t\022" +
+      "%\n\nsupervisor\030\004 \002(\0132\021.ActorRefProtocol\"k",
+      "\n\rPropsProtocol\022\017\n\007creator\030\001 \002(\014\022\022\n\ndisp" +
+      "atcher\030\002 \002(\t\022\037\n\006deploy\030\003 \002(\0132\017.DeployPro" +
+      "tocol\022\024\n\014routerConfig\030\004 \001(\014\"S\n\016DeployPro" +
+      "tocol\022\014\n\004path\030\001 \002(\t\022\016\n\006config\030\002 \001(\014\022\024\n\014r" +
+      "outerConfig\030\003 \001(\014\022\r\n\005scope\030\004 \001(\014\"`\n\026Daem" +
+      "onMsgWatchProtocol\022\"\n\007watcher\030\001 \002(\0132\021.Ac" +
+      "torRefProtocol\022\"\n\007watched\030\002 \002(\0132\021.ActorR" +
+      "efProtocol*7\n\013CommandType\022\013\n\007CONNECT\020\001\022\014" +
+      "\n\010SHUTDOWN\020\002\022\r\n\tHEARTBEAT\020\003B\017\n\013akka.remo" +
+      "teH\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -4968,14 +7020,38 @@ public final class RemoteProtocol {
               new java.lang.String[] { "System", "Hostname", "Port", },
               akka.remote.RemoteProtocol.AddressProtocol.class,
               akka.remote.RemoteProtocol.AddressProtocol.Builder.class);
-          internal_static_DurableMailboxMessageProtocol_descriptor =
+          internal_static_DaemonMsgCreateProtocol_descriptor =
             getDescriptor().getMessageTypes().get(7);
-          internal_static_DurableMailboxMessageProtocol_fieldAccessorTable = new
+          internal_static_DaemonMsgCreateProtocol_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_DurableMailboxMessageProtocol_descriptor,
-              new java.lang.String[] { "Recipient", "Sender", "Message", },
-              akka.remote.RemoteProtocol.DurableMailboxMessageProtocol.class,
-              akka.remote.RemoteProtocol.DurableMailboxMessageProtocol.Builder.class);
+              internal_static_DaemonMsgCreateProtocol_descriptor,
+              new java.lang.String[] { "Props", "Deploy", "Path", "Supervisor", },
+              akka.remote.RemoteProtocol.DaemonMsgCreateProtocol.class,
+              akka.remote.RemoteProtocol.DaemonMsgCreateProtocol.Builder.class);
+          internal_static_PropsProtocol_descriptor =
+            getDescriptor().getMessageTypes().get(8);
+          internal_static_PropsProtocol_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_PropsProtocol_descriptor,
+              new java.lang.String[] { "Creator", "Dispatcher", "Deploy", "RouterConfig", },
+              akka.remote.RemoteProtocol.PropsProtocol.class,
+              akka.remote.RemoteProtocol.PropsProtocol.Builder.class);
+          internal_static_DeployProtocol_descriptor =
+            getDescriptor().getMessageTypes().get(9);
+          internal_static_DeployProtocol_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_DeployProtocol_descriptor,
+              new java.lang.String[] { "Path", "Config", "RouterConfig", "Scope", },
+              akka.remote.RemoteProtocol.DeployProtocol.class,
+              akka.remote.RemoteProtocol.DeployProtocol.Builder.class);
+          internal_static_DaemonMsgWatchProtocol_descriptor =
+            getDescriptor().getMessageTypes().get(10);
+          internal_static_DaemonMsgWatchProtocol_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_DaemonMsgWatchProtocol_descriptor,
+              new java.lang.String[] { "Watcher", "Watched", },
+              akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.class,
+              akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.Builder.class);
           return null;
         }
       };
diff --git a/akka-remote/src/main/protocol/RemoteProtocol.proto b/akka-remote/src/main/protocol/RemoteProtocol.proto
index 730c3a5883..7ae11d9cb7 100644
--- a/akka-remote/src/main/protocol/RemoteProtocol.proto
+++ b/akka-remote/src/main/protocol/RemoteProtocol.proto
@@ -79,10 +79,39 @@ message AddressProtocol {
 }
 
 /**
- * Defines the durable mailbox message.
+ * Defines akka.remote.DaemonMsgCreate
  */
-message DurableMailboxMessageProtocol {
-  required ActorRefProtocol recipient= 1;
-  optional ActorRefProtocol sender = 2;
-  required bytes message = 3;
+message DaemonMsgCreateProtocol {
+  required PropsProtocol props = 1;
+  required DeployProtocol deploy = 2;
+  required string path = 3;
+  required ActorRefProtocol supervisor = 4;
+}
+
+/**
+ * Serialization of akka.actor.Props
+ */
+message PropsProtocol {
+  required bytes creator = 1;
+  required string dispatcher = 2;
+  required DeployProtocol deploy = 3;
+  optional bytes routerConfig = 4;
+}
+
+/**
+ * Serialization of akka.actor.Deploy
+ */
+message DeployProtocol {
+  required string path = 1;
+  optional bytes config = 2;
+  optional bytes routerConfig = 3;
+  optional bytes scope = 4;
+}
+
+/**
+ * Serialization of akka.remote.DaemonMsgWatch
+ */
+message DaemonMsgWatchProtocol {
+  required ActorRefProtocol watcher = 1;
+  required ActorRefProtocol watched = 2;
 }
diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf
index 4512ea3a98..11a4da0711 100644
--- a/akka-remote/src/main/resources/reference.conf
+++ b/akka-remote/src/main/resources/reference.conf
@@ -14,6 +14,8 @@ akka {
 
     serializers {
       proto = "akka.serialization.ProtobufSerializer"
+      daemon-create = "akka.serialization.DaemonMsgCreateSerializer"
+      daemon-watch = "akka.serialization.DaemonMsgWatchSerializer"
     }
 
 
@@ -21,6 +23,8 @@ akka {
       # Since com.google.protobuf.Message does not extend Serializable but GeneratedMessage
       # does, need to use the more specific one here in order to avoid ambiguity
       "com.google.protobuf.GeneratedMessage" = proto
+      "akka.remote.DaemonMsgCreate" = daemon-create
+      "akka.remote.DaemonMsgWatch" = daemon-watch
     }
 
     deployment {
diff --git a/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala b/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala
new file mode 100644
index 0000000000..cc7e3b3851
--- /dev/null
+++ b/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala
@@ -0,0 +1,139 @@
+/**
+ * Copyright (C) 2009-2012 Typesafe Inc. 
+ */
+
+package akka.serialization
+
+import java.io.Serializable
+
+import com.google.protobuf.ByteString
+import com.typesafe.config.Config
+import com.typesafe.config.ConfigFactory
+
+import akka.actor.Actor
+import akka.actor.ActorRef
+import akka.actor.Deploy
+import akka.actor.ExtendedActorSystem
+import akka.actor.NoScopeGiven
+import akka.actor.Props
+import akka.actor.Scope
+import akka.remote.DaemonMsgCreate
+import akka.remote.RemoteProtocol.ActorRefProtocol
+import akka.remote.RemoteProtocol.DaemonMsgCreateProtocol
+import akka.remote.RemoteProtocol.DeployProtocol
+import akka.remote.RemoteProtocol.PropsProtocol
+import akka.routing.NoRouter
+import akka.routing.RouterConfig
+
+/**
+ * Serializes akka's internal DaemonMsgCreate using protobuf
+ * for the core structure of DaemonMsgCreate, Props and Deploy.
+ * Serialization of contained RouterConfig, Config, Scope, and creator (scala.Function0)
+ * is done with configured serializer for those classes, by default java.io.Serializable.
+ */
+class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) extends Serializer {
+  import ProtobufSerializer.serializeActorRef
+  import ProtobufSerializer.deserializeActorRef
+
+  def includeManifest: Boolean = true
+  def identifier = 3
+  lazy val serialization = SerializationExtension(system)
+
+  def toBinary(obj: AnyRef): Array[Byte] = obj match {
+    case DaemonMsgCreate(props, deploy, path, supervisor) ⇒
+
+      def deployProto(d: Deploy): DeployProtocol = {
+        val builder = DeployProtocol.newBuilder.setPath(d.path)
+        if (d.config != ConfigFactory.empty)
+          builder.setConfig(serialize(d.config))
+        if (d.routerConfig != NoRouter)
+          builder.setRouterConfig(serialize(d.routerConfig))
+        if (d.scope != NoScopeGiven)
+          builder.setScope(serialize(d.scope))
+        builder.build
+      }
+
+      def propsProto = {
+        val builder = PropsProtocol.newBuilder.
+          setCreator(serialize(props.creator)).
+          setDispatcher(props.dispatcher).
+          setDeploy(deployProto(props.deploy))
+        if (props.routerConfig != NoRouter)
+          builder.setRouterConfig(serialize(props.routerConfig))
+        builder.build
+      }
+
+      DaemonMsgCreateProtocol.newBuilder.
+        setProps(propsProto).
+        setDeploy(deployProto(deploy)).
+        setPath(path).
+        setSupervisor(serializeActorRef(supervisor)).
+        build.toByteArray
+
+    case _ ⇒
+      throw new IllegalArgumentException(
+        "Can't serialize a non-DaemonMsgCreate message using DaemonMsgCreateSerializer [%s]".format(obj))
+  }
+
+  def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = {
+    val proto = DaemonMsgCreateProtocol.parseFrom(bytes)
+
+    def deploy(protoDeploy: DeployProtocol) = {
+      val config =
+        if (protoDeploy.hasConfig) deserialize(protoDeploy.getConfig, classOf[Config])
+        else ConfigFactory.empty
+      val routerConfig =
+        if (protoDeploy.hasRouterConfig) deserialize(protoDeploy.getRouterConfig, classOf[RouterConfig])
+        else NoRouter
+      val scope =
+        if (protoDeploy.hasScope) deserialize(protoDeploy.getScope, classOf[Scope])
+        else NoScopeGiven
+      Deploy(protoDeploy.getPath, config, routerConfig, scope)
+    }
+
+    def props = {
+      val routerConfig =
+        if (proto.getProps.hasRouterConfig) deserialize(proto.getProps.getRouterConfig, classOf[RouterConfig])
+        else NoRouter
+      Props(
+        creator = deserialize(proto.getProps.getCreator, classOf[() ⇒ Actor]),
+        dispatcher = proto.getProps.getDispatcher,
+        routerConfig = routerConfig,
+        deploy = deploy(proto.getProps.getDeploy))
+    }
+
+    DaemonMsgCreate(
+      props = props,
+      deploy = deploy(proto.getDeploy),
+      path = proto.getPath,
+      supervisor = deserializeActorRef(system, proto.getSupervisor))
+  }
+
+  protected def serialize(any: AnyRef): ByteString =
+    serialization.serialize(any) match {
+      case Right(bytes) ⇒ ByteString.copyFrom(bytes)
+      case Left(e)      ⇒ throw e
+    }
+
+  protected def deserialize[T: ClassManifest](data: ByteString, clazz: Class[T]): T = {
+    val bytes = data.toByteArray
+    serialization.deserialize(bytes, clazz) match {
+      case Right(x) if classManifest[T].erasure.isInstance(x) ⇒ x.asInstanceOf[T]
+      case Right(other) ⇒ throw new IllegalArgumentException("Can't deserialize to [%s], got [%s]".
+        format(clazz.getName, other))
+      case Left(e) ⇒
+        // Fallback to the java serializer, because some interfaces don't implement java.io.Serializable,
+        // but the impl instance does. This could be optimized by adding java serializers in reference.conf:
+        // scala.Function0 (the creator)
+        // com.typesafe.config.Config
+        // akka.routing.RouterConfig
+        // akka.actor.Scope
+        serialization.deserialize(bytes, classOf[java.io.Serializable]) match {
+          case Right(x) if classManifest[T].erasure.isInstance(x) ⇒ x.asInstanceOf[T]
+          case _ ⇒ throw e // the first exception
+        }
+    }
+
+  }
+
+}
\ No newline at end of file
diff --git a/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala b/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala
new file mode 100644
index 0000000000..a564e92088
--- /dev/null
+++ b/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala
@@ -0,0 +1,41 @@
+/**
+ * Copyright (C) 2009-2012 Typesafe Inc. 
+ */
+
+package akka.serialization
+
+import akka.actor.ActorRef
+import akka.remote.DaemonMsgWatch
+import akka.remote.RemoteProtocol.ActorRefProtocol
+import akka.remote.RemoteProtocol.DaemonMsgWatchProtocol
+import akka.actor.ExtendedActorSystem
+
+/**
+ * Serializes akka's internal DaemonMsgWatch using protobuf.
+ */
+class DaemonMsgWatchSerializer(val system: ExtendedActorSystem) extends Serializer {
+  import ProtobufSerializer.serializeActorRef
+  import ProtobufSerializer.deserializeActorRef
+
+  def includeManifest: Boolean = true
+  def identifier = 4
+
+  def toBinary(obj: AnyRef): Array[Byte] = obj match {
+    case DaemonMsgWatch(watcher, watched) ⇒
+      DaemonMsgWatchProtocol.newBuilder.
+        setWatcher(serializeActorRef(watcher)).
+        setWatched(serializeActorRef(watched)).
+        build.toByteArray
+    case _ ⇒
+      throw new IllegalArgumentException(
+        "Can't serialize a non-DaemonMsgWatch message using DaemonMsgWatchSerializer [%s]".format(obj))
+  }
+
+  def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = {
+    val proto = DaemonMsgWatchProtocol.parseFrom(bytes)
+    DaemonMsgWatch(
+      watcher = deserializeActorRef(system, proto.getWatcher),
+      watched = deserializeActorRef(system, proto.getWatched))
+  }
+
+}
\ No newline at end of file
diff --git a/akka-remote/src/main/scala/akka/serialization/ProtobufSerializer.scala b/akka-remote/src/main/scala/akka/serialization/ProtobufSerializer.scala
index 813a22fba4..72690b3c91 100644
--- a/akka-remote/src/main/scala/akka/serialization/ProtobufSerializer.scala
+++ b/akka-remote/src/main/scala/akka/serialization/ProtobufSerializer.scala
@@ -6,6 +6,22 @@ package akka.serialization
 
 import com.google.protobuf.Message
 import akka.actor.DynamicAccess
+import akka.remote.RemoteProtocol.ActorRefProtocol
+import akka.actor.ActorSystem
+import akka.actor.ActorRef
+
+object ProtobufSerializer {
+  def serializeActorRef(ref: ActorRef): ActorRefProtocol = {
+    val identifier: String = Serialization.currentTransportAddress.value match {
+      case null    ⇒ ref.path.toString
+      case address ⇒ ref.path.toStringWithAddress(address)
+    }
+    ActorRefProtocol.newBuilder.setPath(identifier).build
+  }
+
+  def deserializeActorRef(system: ActorSystem, refProtocol: ActorRefProtocol): ActorRef =
+    system.actorFor(refProtocol.getPath)
+}
 
 /**
  * This Serializer serializes `com.google.protobuf.Message`s
diff --git a/akka-remote/src/test/scala/akka/serialization/DaemonMsgCreateSerializerSpec.scala b/akka-remote/src/test/scala/akka/serialization/DaemonMsgCreateSerializerSpec.scala
new file mode 100644
index 0000000000..eed2c73d2c
--- /dev/null
+++ b/akka-remote/src/test/scala/akka/serialization/DaemonMsgCreateSerializerSpec.scala
@@ -0,0 +1,104 @@
+/**
+ * Copyright (C) 2009-2012 Typesafe Inc. 
+ */
+
+package akka.serialization
+
+import com.typesafe.config.ConfigFactory
+import akka.testkit.AkkaSpec
+import akka.actor.Actor
+import akka.actor.Address
+import akka.actor.Props
+import akka.actor.Deploy
+import akka.actor.OneForOneStrategy
+import akka.actor.SupervisorStrategy
+import akka.remote.DaemonMsgCreate
+import akka.remote.RemoteScope
+import akka.routing.RoundRobinRouter
+import akka.routing.FromConfig
+import akka.util.duration._
+
+object DaemonMsgCreateSerializerSpec {
+  class MyActor extends Actor {
+    def receive = {
+      case _ ⇒
+    }
+  }
+}
+
+@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
+class DaemonMsgCreateSerializerSpec extends AkkaSpec {
+
+  import DaemonMsgCreateSerializerSpec._
+  val ser = SerializationExtension(system)
+  val supervisor = system.actorOf(Props[MyActor], "supervisor")
+
+  "Serialization" must {
+
+    "resolve DaemonMsgCreateSerializer" in {
+      ser.serializerFor(classOf[DaemonMsgCreate]).getClass must be(classOf[DaemonMsgCreateSerializer])
+    }
+
+    "serialize and de-serialize simple DaemonMsgCreate" in {
+
+      val msg = DaemonMsgCreate(
+        props = Props[MyActor],
+        deploy = Deploy(),
+        path = "foo",
+        supervisor = supervisor)
+
+      val bytes = ser.serialize(msg) match {
+        case Left(exception) ⇒ fail(exception)
+        case Right(bytes)    ⇒ bytes
+      }
+      ser.deserialize(bytes.asInstanceOf[Array[Byte]], classOf[DaemonMsgCreate]) match {
+        case Left(exception)           ⇒ fail(exception)
+        case Right(m: DaemonMsgCreate) ⇒ assertDaemonMsgCreate(msg, m)
+      }
+    }
+
+    "serialize and de-serialize DaemonMsgCreate with Deploy and RouterConfig" in {
+      // Duration.Inf doesn't equal Duration.Inf, so we use another for test
+      val supervisorStrategy = OneForOneStrategy(3, 10 seconds) {
+        case _ ⇒ SupervisorStrategy.Escalate
+      }
+      val deploy1 = Deploy(
+        path = "path1",
+        config = ConfigFactory.parseString("a=1"),
+        routerConfig = RoundRobinRouter(nrOfInstances = 5, supervisorStrategy = supervisorStrategy),
+        scope = RemoteScope(Address("akka", "Test", "host1", 1921)))
+      val deploy2 = Deploy(
+        path = "path2",
+        config = ConfigFactory.parseString("a=2"),
+        routerConfig = FromConfig,
+        scope = RemoteScope(Address("akka", "Test", "host2", 1922)))
+      val msg = DaemonMsgCreate(
+        props = Props[MyActor].withDispatcher("my-disp").withDeploy(deploy1),
+        deploy = deploy2,
+        path = "foo",
+        supervisor = supervisor)
+
+      val bytes = ser.serialize(msg) match {
+        case Left(exception) ⇒ fail(exception)
+        case Right(bytes)    ⇒ bytes
+      }
+      ser.deserialize(bytes.asInstanceOf[Array[Byte]], classOf[DaemonMsgCreate]) match {
+        case Left(exception)           ⇒ fail(exception)
+        case Right(m: DaemonMsgCreate) ⇒ assertDaemonMsgCreate(msg, m)
+      }
+    }
+
+    def assertDaemonMsgCreate(expected: DaemonMsgCreate, got: DaemonMsgCreate): Unit = {
+      // can't compare props.creator
+      assert(got.props.dispatcher === expected.props.dispatcher)
+      assert(got.props.dispatcher === expected.props.dispatcher)
+      assert(got.props.routerConfig === expected.props.routerConfig)
+      assert(got.props.deploy === expected.props.deploy)
+      assert(got.deploy === expected.deploy)
+      assert(got.path === expected.path)
+      assert(got.supervisor === expected.supervisor)
+    }
+
+  }
+}
+
diff --git a/akka-remote/src/test/scala/akka/serialization/DaemonMsgWatchSerializerSpec.scala b/akka-remote/src/test/scala/akka/serialization/DaemonMsgWatchSerializerSpec.scala
new file mode 100644
index 0000000000..a6069beac1
--- /dev/null
+++ b/akka-remote/src/test/scala/akka/serialization/DaemonMsgWatchSerializerSpec.scala
@@ -0,0 +1,49 @@
+/**
+ * Copyright (C) 2009-2012 Typesafe Inc. 
+ */
+
+package akka.serialization
+
+import akka.testkit.AkkaSpec
+import akka.remote.DaemonMsgWatch
+import akka.actor.Actor
+import akka.actor.Props
+
+object DaemonMsgWatchSerializerSpec {
+  class MyActor extends Actor {
+    def receive = {
+      case _ ⇒
+    }
+  }
+}
+
+@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
+class DaemonMsgWatchSerializerSpec extends AkkaSpec {
+
+  import DaemonMsgWatchSerializerSpec._
+
+  val ser = SerializationExtension(system)
+
+  "Serialization" must {
+
+    "resolve DaemonMsgWatchSerializer" in {
+      ser.serializerFor(classOf[DaemonMsgWatch]).getClass must be(classOf[DaemonMsgWatchSerializer])
+    }
+
+    "serialize and de-serialize DaemonMsgWatch" in {
+      val watcher = system.actorOf(Props[MyActor], "watcher")
+      val watched = system.actorOf(Props[MyActor], "watched")
+      val msg = DaemonMsgWatch(watcher, watched)
+      val bytes = ser.serialize(msg) match {
+        case Left(exception) ⇒ fail(exception)
+        case Right(bytes)    ⇒ bytes
+      }
+      ser.deserialize(bytes.asInstanceOf[Array[Byte]], classOf[DaemonMsgWatch]) match {
+        case Left(exception) ⇒ fail(exception)
+        case Right(m)        ⇒ assert(m === msg)
+      }
+    }
+
+  }
+}
+

From 3e44cda33bf67f6a4440be84ad55c76783cd286d Mon Sep 17 00:00:00 2001
From: Heiko Seeberger 
Date: Tue, 15 May 2012 12:13:23 +0200
Subject: [PATCH 021/538] closes #2076: Streamline usage of parens on arity-0
 methods with the Style Guide (once more)

---
 akka-actor/src/main/scala/akka/actor/Actor.scala        | 2 +-
 akka-actor/src/main/scala/akka/actor/Props.scala        | 7 +------
 akka-actor/src/main/scala/akka/actor/UntypedActor.scala | 2 +-
 3 files changed, 3 insertions(+), 8 deletions(-)

diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala
index 2499d42f10..ee24b3d892 100644
--- a/akka-actor/src/main/scala/akka/actor/Actor.scala
+++ b/akka-actor/src/main/scala/akka/actor/Actor.scala
@@ -249,7 +249,7 @@ trait Actor {
    * User overridable definition the strategy to use for supervising
    * child actors.
    */
-  def supervisorStrategy(): SupervisorStrategy = SupervisorStrategy.defaultStrategy
+  def supervisorStrategy: SupervisorStrategy = SupervisorStrategy.defaultStrategy
 
   /**
    * User overridable callback.
diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala
index 3751898c5c..13d55996e1 100644
--- a/akka-actor/src/main/scala/akka/actor/Props.scala
+++ b/akka-actor/src/main/scala/akka/actor/Props.scala
@@ -31,18 +31,13 @@ object Props {
    */
   final val default = new Props()
 
-  /**
-   * Returns a cached default implementation of Props.
-   */
-  def apply(): Props = default
-
   /**
    * Returns a Props that has default values except for "creator" which will be a function that creates an instance
    * of the supplied type using the default constructor.
    *
    * Scala API.
    */
-  def apply[T <: Actor: ClassManifest]: Props =
+  def apply[T <: Actor: ClassManifest](): Props =
     default.withCreator(implicitly[ClassManifest[T]].erasure.asInstanceOf[Class[_ <: Actor]])
 
   /**
diff --git a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala
index a5ebeb851c..a1f43c3616 100644
--- a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala
+++ b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala
@@ -116,7 +116,7 @@ abstract class UntypedActor extends Actor {
    * User overridable definition the strategy to use for supervising
    * child actors.
    */
-  override def supervisorStrategy(): SupervisorStrategy = super.supervisorStrategy()
+  override def supervisorStrategy: SupervisorStrategy = super.supervisorStrategy
 
   /**
    * User overridable callback.

From 6d430122e951d30410de84e927ef0654adbe1f8a Mon Sep 17 00:00:00 2001
From: Viktor Klang 
Date: Tue, 15 May 2012 12:31:38 +0200
Subject: [PATCH 022/538] Removing com.eaio.Uuid and replacing its usage with
 java.util.UUID

---
 .../scala/akka/actor/ActorLifeCycleSpec.scala |   3 +-
 .../src/main/java/com/eaio/util/lang/Hex.java | 215 ----------
 .../java/com/eaio/uuid/MACAddressParser.java  | 116 ------
 .../src/main/java/com/eaio/uuid/UUID.java     | 308 ---------------
 .../src/main/java/com/eaio/uuid/UUIDGen.java  | 368 ------------------
 .../src/main/scala/akka/AkkaException.scala   |   6 +-
 akka-actor/src/main/scala/akka/actor/IO.scala |   7 +-
 .../src/main/scala/akka/actor/package.scala   |  14 +-
 8 files changed, 8 insertions(+), 1029 deletions(-)
 delete mode 100644 akka-actor/src/main/java/com/eaio/util/lang/Hex.java
 delete mode 100644 akka-actor/src/main/java/com/eaio/uuid/MACAddressParser.java
 delete mode 100644 akka-actor/src/main/java/com/eaio/uuid/UUID.java
 delete mode 100644 akka-actor/src/main/java/com/eaio/uuid/UUIDGen.java

diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala
index 16b4055d0e..d87aaaaee6 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala
@@ -13,6 +13,7 @@ import akka.util.duration._
 import java.util.concurrent.atomic._
 import akka.dispatch.Await
 import akka.pattern.ask
+import java.util.UUID.{ randomUUID ⇒ newUuid }
 
 object ActorLifeCycleSpec {
 
@@ -35,7 +36,7 @@ class ActorLifeCycleSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitS
 
     "invoke preRestart, preStart, postRestart when using OneForOneStrategy" in {
       filterException[ActorKilledException] {
-        val id = newUuid().toString
+        val id = newUuid.toString
         val supervisor = system.actorOf(Props(new Supervisor(
           OneForOneStrategy(maxNrOfRetries = 3)(List(classOf[Exception])))))
         val gen = new AtomicInteger(0)
diff --git a/akka-actor/src/main/java/com/eaio/util/lang/Hex.java b/akka-actor/src/main/java/com/eaio/util/lang/Hex.java
deleted file mode 100644
index 7794059517..0000000000
--- a/akka-actor/src/main/java/com/eaio/util/lang/Hex.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Hex.java
- *
- * Created 04.07.2003.
- *
- * eaio: UUID - an implementation of the UUID specification Copyright (c) 2003-2009 Johann Burkard (jb@eaio.com)
- * http://eaio.com.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
- * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
- * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
- * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-package com.eaio.util.lang;
-
-import java.io.IOException;
-
-/**
- * Number-to-hexadecimal and hexadecimal-to-number conversions.
- *
- * @see UUID
- * @author Johann Burkard
- * @version $Id: Hex.java 1888 2009-03-15 12:43:24Z johann $
- */
-public final class Hex {
-
-    /**
-     * No instances needed.
-     */
-    private Hex() {
-        super();
-    }
-
-    private static final char[] DIGITS = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e',
-            'f' };
-
-    /**
-     * Turns a short into hex octets.
-     *
-     * @param a the {@link Appendable}, may not be null
-     * @param in the integer
-     * @return {@link Appendable}
-     */
-    public static Appendable append(Appendable a, short in) {
-        return append(a, (long) in, 4);
-    }
-
-    /**
-     * Turns a short into hex octets.
-     *
-     * @param a the {@link Appendable}, may not be null
-     * @param in the integer
-     * @param length the number of octets to produce
-     * @return {@link Appendable}
-     */
-    public static Appendable append(Appendable a, short in, int length) {
-        return append(a, (long) in, length);
-    }
-
-    /**
-     * Turns an int into hex octets.
-     *
-     * @param a the {@link Appendable}, may not be null
-     * @param in the integer
-     * @return {@link Appendable}
-     */
-    public static Appendable append(Appendable a, int in) {
-        return append(a, (long) in, 8);
-    }
-
-    /**
-     * Turns an int into hex octets.
-     *
-     * @param a the {@link Appendable}, may not be null
-     * @param in the integer
-     * @param length the number of octets to produce
-     * @return {@link Appendable}
-     */
-    public static Appendable append(Appendable a, int in, int length) {
-        return append(a, (long) in, length);
-    }
-
-    /**
-     * Turns a long into hex octets.
-     *
-     * @param a the {@link Appendable}, may not be null
-     * @param in the long
-     * @return {@link Appendable}
-     */
-    public static Appendable append(Appendable a, long in) {
-        return append(a, in, 16);
-    }
-
-    /**
-     * Turns a long into hex octets.
-     *
-     * @param a the {@link Appendable}, may not be null
-     * @param in the long
-     * @param length the number of octets to produce
-     * @return {@link Appendable}
-     */
-    public static Appendable append(Appendable a, long in, int length) {
-        try {
-            int lim = (length << 2) - 4;
-            while (lim >= 0) {
-                a.append(DIGITS[(byte) (in >> lim) & 0x0f]);
-                lim -= 4;
-            }
-        }
-        catch (IOException ex) {
-            // Bla
-        }
-        return a;
-    }
-
-    /**
-     * Turns a byte array into hex octets.
-     *
-     * @param a the {@link Appendable}, may not be null
-     * @param bytes the byte array
-     * @return {@link Appendable}
-     */
-    public static Appendable append(Appendable a, byte[] bytes) {
-        try {
-            for (byte b : bytes) {
-                a.append(DIGITS[(byte) ((b & 0xF0) >> 4)]);
-                a.append(DIGITS[(byte) (b & 0x0F)]);
-            }
-        }
-        catch (IOException ex) {
-            // Bla
-        }
-        return a;
-    }
-
-    /**
-     * Parses a long from a hex encoded number. This method will skip all characters that are not 0-9,
-     * A-F and a-f.
-     * 

- * Returns 0 if the {@link CharSequence} does not contain any interesting characters. - * - * @param s the {@link CharSequence} to extract a long from, may not be null - * @return a long - * @throws NullPointerException if the {@link CharSequence} is null - */ - public static long parseLong(CharSequence s) { - long out = 0; - byte shifts = 0; - char c; - for (int i = 0; i < s.length() && shifts < 16; i++) { - c = s.charAt(i); - if ((c > 47) && (c < 58)) { - ++shifts; - out <<= 4; - out |= c - 48; - } - else if ((c > 64) && (c < 71)) { - ++shifts; - out <<= 4; - out |= c - 55; - } - else if ((c > 96) && (c < 103)) { - ++shifts; - out <<= 4; - out |= c - 87; - } - } - return out; - } - - /** - * Parses a short from a hex encoded number. This method will skip all characters that are not 0-9, - * A-F and a-f. - *

- * Returns 0 if the {@link CharSequence} does not contain any interesting characters. - * - * @param s the {@link CharSequence} to extract a short from, may not be null - * @return a short - * @throws NullPointerException if the {@link CharSequence} is null - */ - public static short parseShort(String s) { - short out = 0; - byte shifts = 0; - char c; - for (int i = 0; i < s.length() && shifts < 4; i++) { - c = s.charAt(i); - if ((c > 47) && (c < 58)) { - ++shifts; - out <<= 4; - out |= c - 48; - } - else if ((c > 64) && (c < 71)) { - ++shifts; - out <<= 4; - out |= c - 55; - } - else if ((c > 96) && (c < 103)) { - ++shifts; - out <<= 4; - out |= c - 87; - } - } - return out; - } - -} diff --git a/akka-actor/src/main/java/com/eaio/uuid/MACAddressParser.java b/akka-actor/src/main/java/com/eaio/uuid/MACAddressParser.java deleted file mode 100644 index c077147470..0000000000 --- a/akka-actor/src/main/java/com/eaio/uuid/MACAddressParser.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * MACAddressParserTest.java - * - * Created 30.01.2006. - * - * eaio: UUID - an implementation of the UUID specification - * Copyright (c) 2003-2009 Johann Burkard (jb@eaio.com) http://eaio.com. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN - * NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ -package com.eaio.uuid; - -/** - * The MAC address parser attempts to find the following patterns: - *

    - *
  • .{1,2}:.{1,2}:.{1,2}:.{1,2}:.{1,2}:.{1,2}
  • - *
  • .{1,2}-.{1,2}-.{1,2}-.{1,2}-.{1,2}-.{1,2}
  • - *
- * - * @see UUID - * @author Johann Burkard - * @version $Id: MACAddressParser.java 1888 2009-03-15 12:43:24Z johann $ - */ -class MACAddressParser { - - /** - * No instances needed. - */ - private MACAddressParser() { - super(); - } - - /** - * Attempts to find a pattern in the given String. - * - * @param in the String, may not be null - * @return the substring that matches this pattern or null - */ - static String parse(String in) { - - String out = in; - - // lanscan - - int hexStart = out.indexOf("0x"); - if (hexStart != -1 && out.indexOf("ETHER") != -1) { - int hexEnd = out.indexOf(' ', hexStart); - if (hexEnd > hexStart + 2) { - out = out.substring(hexStart, hexEnd); - } - } - - else { - - int octets = 0; - int lastIndex, old, end; - - if (out.indexOf('-') > -1) { - out = out.replace('-', ':'); - } - - lastIndex = out.lastIndexOf(':'); - - if (lastIndex > out.length() - 2) { - out = null; - } - else { - - end = Math.min(out.length(), lastIndex + 3); - - ++octets; - old = lastIndex; - while (octets != 5 && lastIndex != -1 && lastIndex > 1) { - lastIndex = out.lastIndexOf(':', --lastIndex); - if (old - lastIndex == 3 || old - lastIndex == 2) { - ++octets; - old = lastIndex; - } - } - - if (octets == 5 && lastIndex > 1) { - out = out.substring(lastIndex - 2, end).trim(); - } - else { - out = null; - } - - } - - } - - if (out != null && out.startsWith("0x")) { - out = out.substring(2); - } - - return out; - } - -} diff --git a/akka-actor/src/main/java/com/eaio/uuid/UUID.java b/akka-actor/src/main/java/com/eaio/uuid/UUID.java deleted file mode 100644 index a578a68c6d..0000000000 --- a/akka-actor/src/main/java/com/eaio/uuid/UUID.java +++ /dev/null @@ -1,308 +0,0 @@ -/* - * UUID.java - * - * Created 07.02.2003 - * - * eaio: UUID - an implementation of the UUID specification - * Copyright (c) 2003-2009 Johann Burkard (jb@eaio.com) http://eaio.com. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN - * NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ -package com.eaio.uuid; - -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; -import java.io.Serializable; - -import com.eaio.util.lang.Hex; - -/** - * Creates UUIDs according to the DCE Universal Token Identifier specification. - *

- * All you need to know: - *

- * UUID u = new UUID();
- * 
- * - * @see - * http://www.opengroup.org/onlinepubs/9629399/apdxa.htm - * - * @see - * http://www.uddi.org/pubs/draft-leach-uuids-guids-01.txt - * - * @see UUID - * @author Johann Burkard - * @version $Id: UUID.java 1888 2009-03-15 12:43:24Z johann $ - */ -public class UUID implements Comparable, Serializable, Cloneable { - - /** - * Hasn't ever changed between versions. - */ - static final long serialVersionUID = 7435962790062944603L; - - /** - * The time field of the UUID. - * - * @serial - */ - public long time; - - /** - * The clock sequence and node field of the UUID. - * - * @serial - */ - public long clockSeqAndNode; - - /** - * Constructor for UUID. Constructs a new, unique UUID. - * - * @see UUIDGen#newTime() - * @see UUIDGen#getClockSeqAndNode() - */ - public UUID() { - this(UUIDGen.newTime(), UUIDGen.getClockSeqAndNode()); - } - - /** - * Constructor for UUID. Constructs a UUID from two long values. - * - * @param time the upper 64 bits - * @param clockSeqAndNode the lower 64 bits - */ - public UUID(long time, long clockSeqAndNode) { - this.time = time; - this.clockSeqAndNode = clockSeqAndNode; - } - - /** - * Copy constructor for UUID. Values of the given UUID are copied. - * - * @param u the UUID, may not be null - */ - public UUID(UUID u) { - this(u.time, u.clockSeqAndNode); - } - - /** - * Parses a textual representation of a UUID. - *

- * No validation is performed. If the {@link CharSequence} is shorter than 36 characters, - * {@link ArrayIndexOutOfBoundsException}s will be thrown. - * - * @param s the {@link CharSequence}, may not be null - */ - public UUID(CharSequence s) { - this(Hex.parseLong(s.subSequence(0, 18)), Hex.parseLong(s.subSequence( - 19, 36))); - } - - /** - * Compares this UUID to another Object. Throws a {@link ClassCastException} if - * the other Object is not an instance of the UUID class. Returns a value - * smaller than zero if the other UUID is "larger" than this UUID and a value - * larger than zero if the other UUID is "smaller" than this UUID. - * - * @param t the other UUID, may not be null - * @return a value < 0, 0 or a value > 0 - * @see java.lang.Comparable#compareTo(java.lang.Object) - * @throws ClassCastException - */ - public int compareTo(UUID t) { - if (this == t) { - return 0; - } - if (time > t.time) { - return 1; - } - if (time < t.time) { - return -1; - } - if (clockSeqAndNode > t.clockSeqAndNode) { - return 1; - } - if (clockSeqAndNode < t.clockSeqAndNode) { - return -1; - } - return 0; - } - - /** - * Tweaked Serialization routine. - * - * @param out the ObjectOutputStream - * @throws IOException - */ - private void writeObject(ObjectOutputStream out) throws IOException { - out.writeLong(time); - out.writeLong(clockSeqAndNode); - } - - /** - * Tweaked Serialization routine. - * - * @param in the ObjectInputStream - * @throws IOException - */ - private void readObject(ObjectInputStream in) throws IOException { - time = in.readLong(); - clockSeqAndNode = in.readLong(); - } - - /** - * Returns this UUID as a String. - * - * @return a String, never null - * @see java.lang.Object#toString() - * @see #toAppendable(Appendable) - */ - @Override - public final String toString() { - return toAppendable(null).toString(); - } - - /** - * Appends a String representation of this to the given {@link StringBuffer} or - * creates a new one if none is given. - * - * @param in the StringBuffer to append to, may be null - * @return a StringBuffer, never null - * @see #toAppendable(Appendable) - */ - public StringBuffer toStringBuffer(StringBuffer in) { - StringBuffer out = in; - if (out == null) { - out = new StringBuffer(36); - } - else { - out.ensureCapacity(out.length() + 36); - } - return (StringBuffer) toAppendable(out); - } - - /** - * Appends a String representation of this object to the given {@link Appendable} object. - *

- * For reasons I'll probably never understand, Sun has decided to have a number of I/O classes implement - * Appendable which forced them to destroy an otherwise nice and simple interface with {@link IOException}s. - *

- * I decided to ignore any possible IOExceptions in this method. - * - * @param a the Appendable object, may be null - * @return an Appendable object, defaults to a {@link StringBuilder} if a is null - */ - public Appendable toAppendable(Appendable a) { - Appendable out = a; - if (out == null) { - out = new StringBuilder(36); - } - try { - Hex.append(out, (int) (time >> 32)).append('-'); - Hex.append(out, (short) (time >> 16)).append('-'); - Hex.append(out, (short) time).append('-'); - Hex.append(out, (short) (clockSeqAndNode >> 48)).append('-'); - Hex.append(out, clockSeqAndNode, 12); - } - catch (IOException ex) { - // What were they thinking? - } - return out; - } - - /** - * Returns a hash code of this UUID. The hash code is calculated by XOR'ing the - * upper 32 bits of the time and clockSeqAndNode fields and the lower 32 bits of - * the time and clockSeqAndNode fields. - * - * @return an int representing the hash code - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - return (int) ((time >> 32) ^ time ^ (clockSeqAndNode >> 32) ^ clockSeqAndNode); - } - - /** - * Clones this UUID. - * - * @return a new UUID with identical values, never null - */ - @Override - public Object clone() { - try { - return super.clone(); - } - catch (CloneNotSupportedException ex) { - // One of Sun's most epic fails. - return null; - } - } - - /** - * Returns the time field of the UUID (upper 64 bits). - * - * @return the time field - */ - public final long getTime() { - return time; - } - - /** - * Returns the clock and node field of the UUID (lower 64 bits). - * - * @return the clockSeqAndNode field - */ - public final long getClockSeqAndNode() { - return clockSeqAndNode; - } - - /** - * Compares two Objects for equality. - * - * @see java.lang.Object#equals(Object) - * @param obj the Object to compare this UUID with, may be null - * @return true if the other Object is equal to this UUID, - * false if not - */ - @Override - public boolean equals(Object obj) { - if (!(obj instanceof UUID)) { - return false; - } - return compareTo((UUID) obj) == 0; - } - - /** - * Returns the nil UUID (a UUID whose values are both set to zero). - *

- * Starting with version 2.0, this method does return a new UUID instance every - * time it is called. Earlier versions returned one instance. This has now been - * changed because this UUID has public, non-final instance fields. Returning a - * new instance is therefore more safe. - * - * @return a nil UUID, never null - */ - public static UUID nilUUID() { - return new UUID(0, 0); - } - -} diff --git a/akka-actor/src/main/java/com/eaio/uuid/UUIDGen.java b/akka-actor/src/main/java/com/eaio/uuid/UUIDGen.java deleted file mode 100644 index fb60e1727a..0000000000 --- a/akka-actor/src/main/java/com/eaio/uuid/UUIDGen.java +++ /dev/null @@ -1,368 +0,0 @@ -/* - * UUIDGen.java - * - * Created on 09.08.2003. - * - * eaio: UUID - an implementation of the UUID specification - * Copyright (c) 2003-2009 Johann Burkard (jb@eaio.com) http://eaio.com. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN - * NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ -package com.eaio.uuid; - -import java.io.BufferedReader; -import java.io.File; -import java.io.IOException; -import java.io.InputStreamReader; -import java.net.InetAddress; -import java.net.InterfaceAddress; -import java.net.NetworkInterface; -import java.net.SocketException; -import java.net.UnknownHostException; -import java.util.Enumeration; -import java.util.concurrent.atomic.AtomicLong; - -import com.eaio.util.lang.Hex; - -/** - * This class contains methods to generate UUID fields. These methods have been - * refactored out of {@link com.eaio.uuid.UUID}. - *

- * Starting with version 2, this implementation tries to obtain the MAC address - * of the network card. Under Microsoft Windows, the ifconfig - * command is used which may pop up a command window in Java Virtual Machines - * prior to 1.4 once this class is initialized. The command window is closed - * automatically. - *

- * The MAC address code has been tested extensively in Microsoft Windows, - * Linux, Solaris 8, HP-UX 11, but should work in MacOS X and BSDs, too. - *

- * If you use JDK 6 or later, the code in {@link InterfaceAddress} will be used. - * - * @see UUID - * @author Johann Burkard - * @version $Id: UUIDGen.java 2914 2010-04-23 11:35:00Z johann $ - * @see com.eaio.uuid.UUID - */ -public final class UUIDGen { - - /** - * No instances needed. - */ - private UUIDGen() { - super(); - } - - /** - * The last time value. Used to remove duplicate UUIDs. - */ - private final static AtomicLong lastTime = new AtomicLong(Long.MIN_VALUE); - - /** - * The cached MAC address. - */ - private static String macAddress = null; - - /** - * The current clock and node value. - */ - private static long clockSeqAndNode = 0x8000000000000000L; - - static { - - try { - Class.forName("java.net.InterfaceAddress"); - macAddress = Class.forName( - "com.eaio.uuid.UUIDGen$HardwareAddressLookup").newInstance().toString(); - } - catch (ExceptionInInitializerError err) { - // Ignored. - } - catch (ClassNotFoundException ex) { - // Ignored. - } - catch (LinkageError err) { - // Ignored. - } - catch (IllegalAccessException ex) { - // Ignored. - } - catch (InstantiationException ex) { - // Ignored. - } - catch (SecurityException ex) { - // Ignored. - } - - if (macAddress == null) { - - Process p = null; - BufferedReader in = null; - - try { - String osname = System.getProperty("os.name", ""); - - if (osname.startsWith("Windows")) { - p = Runtime.getRuntime().exec( - new String[] { "ipconfig", "/all" }, null); - } - // Solaris code must appear before the generic code - else if (osname.startsWith("Solaris") - || osname.startsWith("SunOS")) { - String hostName = getFirstLineOfCommand( - "uname", "-n" ); - if (hostName != null) { - p = Runtime.getRuntime().exec( - new String[] { "/usr/sbin/arp", hostName }, - null); - } - } - else if (new File("/usr/sbin/lanscan").exists()) { - p = Runtime.getRuntime().exec( - new String[] { "/usr/sbin/lanscan" }, null); - } - else if (new File("/sbin/ifconfig").exists()) { - p = Runtime.getRuntime().exec( - new String[] { "/sbin/ifconfig", "-a" }, null); - } - - if (p != null) { - in = new BufferedReader(new InputStreamReader( - p.getInputStream()), 128); - String l = null; - while ((l = in.readLine()) != null) { - macAddress = MACAddressParser.parse(l); - if (macAddress != null - && Hex.parseShort(macAddress) != 0xff) { - break; - } - } - } - - } - catch (SecurityException ex) { - // Ignore it. - } - catch (IOException ex) { - // Ignore it. - } - finally { - if (p != null) { - if (in != null) { - try { - in.close(); - } - catch (IOException ex) { - // Ignore it. - } - } - try { - p.getErrorStream().close(); - } - catch (IOException ex) { - // Ignore it. - } - try { - p.getOutputStream().close(); - } - catch (IOException ex) { - // Ignore it. - } - p.destroy(); - } - } - - } - - if (macAddress != null) { - clockSeqAndNode |= Hex.parseLong(macAddress); - } - else { - try { - byte[] local = InetAddress.getLocalHost().getAddress(); - clockSeqAndNode |= (local[0] << 24) & 0xFF000000L; - clockSeqAndNode |= (local[1] << 16) & 0xFF0000; - clockSeqAndNode |= (local[2] << 8) & 0xFF00; - clockSeqAndNode |= local[3] & 0xFF; - } - catch (UnknownHostException ex) { - clockSeqAndNode |= (long) (Math.random() * 0x7FFFFFFF); - } - } - - // Skip the clock sequence generation process and use random instead. - - clockSeqAndNode |= (long) (Math.random() * 0x3FFF) << 48; - - } - - /** - * Returns the current clockSeqAndNode value. - * - * @return the clockSeqAndNode value - * @see UUID#getClockSeqAndNode() - */ - public static long getClockSeqAndNode() { - return clockSeqAndNode; - } - - /** - * Generates a new time field. Each time field is unique and larger than the - * previously generated time field. - * - * @return a new time value - * @see UUID#getTime() - */ - public static long newTime() { - return createTime(System.currentTimeMillis()); - } - - /** - * Creates a new time field from the given timestamp. Note that even identical - * values of currentTimeMillis will produce different time fields. - * - * @param currentTimeMillis the timestamp - * @return a new time value - * @see UUID#getTime() - */ - public static long createTime(long currentTimeMillis) { - - long time; - - // UTC time - - long timeMillis = (currentTimeMillis * 10000) + 0x01B21DD213814000L; - - // Make sure our time is unique - - for(;;) { - final long c = lastTime.get(); - if (timeMillis <= c) { - timeMillis = lastTime.incrementAndGet(); - break; - } else if(lastTime.compareAndSet(c, timeMillis)) break; - } - - // time low - - time = timeMillis << 32; - - // time mid - - time |= (timeMillis & 0xFFFF00000000L) >> 16; - - // time hi and version - - time |= 0x1000 | ((timeMillis >> 48) & 0x0FFF); // version 1 - - return time; - - } - - /** - * Returns the MAC address. Not guaranteed to return anything. - * - * @return the MAC address, may be null - */ - public static String getMACAddress() { - return macAddress; - } - - /** - * Returns the first line of the shell command. - * - * @param commands the commands to run - * @return the first line of the command - * @throws IOException - */ - static String getFirstLineOfCommand(String... commands) throws IOException { - - Process p = null; - BufferedReader reader = null; - - try { - p = Runtime.getRuntime().exec(commands); - reader = new BufferedReader(new InputStreamReader( - p.getInputStream()), 128); - - return reader.readLine(); - } - finally { - if (p != null) { - if (reader != null) { - try { - reader.close(); - } - catch (IOException ex) { - // Ignore it. - } - } - try { - p.getErrorStream().close(); - } - catch (IOException ex) { - // Ignore it. - } - try { - p.getOutputStream().close(); - } - catch (IOException ex) { - // Ignore it. - } - p.destroy(); - } - } - - } - - /** - * Scans MAC addresses for good ones. - */ - static class HardwareAddressLookup { - - /** - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - String out = null; - try { - Enumeration ifs = NetworkInterface.getNetworkInterfaces(); - if (ifs != null) { - while (ifs.hasMoreElements()) { - NetworkInterface iface = ifs.nextElement(); - byte[] hardware = iface.getHardwareAddress(); - if (hardware != null && hardware.length == 6 - && hardware[1] != (byte) 0xff) { - out = Hex.append(new StringBuilder(36), hardware).toString(); - break; - } - } - } - } - catch (SocketException ex) { - // Ignore it. - } - return out; - } - - } - -} diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index 85de2504d3..002233ffe5 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -4,8 +4,6 @@ package akka -import akka.actor.newUuid - object AkkaException { def toStringWithStackTrace(throwable: Throwable): String = throwable match { @@ -34,7 +32,7 @@ object AkkaException { */ //TODO add @SerialVersionUID(1L) when SI-4804 is fixed class AkkaException(message: String = "", cause: Throwable = null) extends RuntimeException(message, cause) with Serializable { - lazy val uuid = newUuid.toString + lazy val uuid = java.util.UUID.randomUUID().toString override lazy val toString = "%s:%s\n[%s]".format(getClass.getName, message, uuid) @@ -42,7 +40,7 @@ class AkkaException(message: String = "", cause: Throwable = null) extends Runti lazy val toLongString = "%s:%s\n[%s]\n%s".format(getClass.getName, message, uuid, stackTraceToString) - def this(msg: String) = this(msg, null); + def this(msg: String) = this(msg, null) def stackTraceToString = AkkaException.stackTraceToString(this) } diff --git a/akka-actor/src/main/scala/akka/actor/IO.scala b/akka-actor/src/main/scala/akka/actor/IO.scala index 50ea8f229b..60ee528e45 100644 --- a/akka-actor/src/main/scala/akka/actor/IO.scala +++ b/akka-actor/src/main/scala/akka/actor/IO.scala @@ -21,8 +21,7 @@ import java.nio.channels.{ import scala.collection.mutable import scala.annotation.tailrec import scala.collection.generic.CanBuildFrom -import com.eaio.uuid.UUID - +import java.util.UUID /** * IO messages and iteratees. * @@ -89,7 +88,7 @@ object IO { * created by [[akka.actor.IOManager]].connect() and * [[akka.actor.IO.ServerHandle]].accept(). */ - case class SocketHandle(owner: ActorRef, ioManager: ActorRef, uuid: UUID = new UUID()) extends ReadHandle with WriteHandle { + case class SocketHandle(owner: ActorRef, ioManager: ActorRef, uuid: UUID = UUID.randomUUID()) extends ReadHandle with WriteHandle { override def asSocket = this } @@ -97,7 +96,7 @@ object IO { * A [[akka.actor.IO.Handle]] to a ServerSocketChannel. Instances are * normally created by [[akka.actor.IOManager]].listen(). */ - case class ServerHandle(owner: ActorRef, ioManager: ActorRef, uuid: UUID = new UUID()) extends Handle { + case class ServerHandle(owner: ActorRef, ioManager: ActorRef, uuid: UUID = UUID.randomUUID()) extends Handle { override def asServer = this /** diff --git a/akka-actor/src/main/scala/akka/actor/package.scala b/akka-actor/src/main/scala/akka/actor/package.scala index 9ec5348fee..617e3fee5c 100644 --- a/akka-actor/src/main/scala/akka/actor/package.scala +++ b/akka-actor/src/main/scala/akka/actor/package.scala @@ -8,19 +8,7 @@ package object actor { implicit def actorRef2Scala(ref: ActorRef): ScalaActorRef = ref.asInstanceOf[ScalaActorRef] implicit def scala2ActorRef(ref: ScalaActorRef): ActorRef = ref.asInstanceOf[ActorRef] - type Uuid = com.eaio.uuid.UUID - - def newUuid(): Uuid = new Uuid() - - def uuidFrom(time: Long, clockSeqAndNode: Long): Uuid = new Uuid(time, clockSeqAndNode) - - def uuidFrom(uuid: String): Uuid = new Uuid(uuid) - - def simpleName(obj: AnyRef): String = { - val n = obj.getClass.getName - val i = n.lastIndexOf('.') - n.substring(i + 1) - } + def simpleName(obj: AnyRef): String = simpleName(obj.getClass) def simpleName(clazz: Class[_]): String = { val n = clazz.getName From 1f7d2cef497fbfb60b0164f5e569344aec8ece60 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 15 May 2012 15:18:21 +0200 Subject: [PATCH 023/538] Adding some commends and making SuspendReason sealed --- .../src/main/scala/akka/actor/ActorCell.scala | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 8c68ba3315..bd5342fec4 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -134,10 +134,17 @@ trait ActorContext extends ActorRefFactory { */ def unwatch(subject: ActorRef): ActorRef + /** + * ActorContexts shouldn't be Serializable + */ final protected def writeObject(o: ObjectOutputStream): Unit = throw new NotSerializableException("ActorContext is not serializable!") } +/** + * UntypedActorContext is the UntypedActor equivalent of ActorContext, + * containing the Java API + */ trait UntypedActorContext extends ActorContext { /** @@ -178,7 +185,7 @@ private[akka] object ActorCell { final val emptyReceiveTimeoutData: (Long, Cancellable) = (-1, emptyCancellable) - trait SuspendReason + sealed trait SuspendReason case object UserRequest extends SuspendReason case class Recreation(cause: Throwable) extends SuspendReason case object Termination extends SuspendReason @@ -749,13 +756,11 @@ private[akka] class ActorCell( } - final def cancelReceiveTimeout() { - //Only cancel if + final def cancelReceiveTimeout(): Unit = if (receiveTimeoutData._2 ne emptyCancellable) { receiveTimeoutData._2.cancel() receiveTimeoutData = (receiveTimeoutData._1, emptyCancellable) } - } final def clearActorFields(actorInstance: Actor): Unit = { setActorFields(actorInstance, context = null, self = system.deadLetters) From 788d735f8c074b1827b323f7c9ba76eeadb82af4 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 15 May 2012 15:18:42 +0200 Subject: [PATCH 024/538] Moving ActorPath.split into PathUtils so it's only internally used --- .../src/main/scala/akka/actor/ActorPath.scala | 11 ------- .../src/main/scala/akka/actor/Address.scala | 32 ++++++++++++------- 2 files changed, 21 insertions(+), 22 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorPath.scala b/akka-actor/src/main/scala/akka/actor/ActorPath.scala index 4ebcec0dbb..15e5677775 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorPath.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorPath.scala @@ -6,17 +6,6 @@ import scala.annotation.tailrec import java.net.MalformedURLException object ActorPath { - def split(s: String): List[String] = { - @tailrec - def rec(pos: Int, acc: List[String]): List[String] = { - val from = s.lastIndexOf('/', pos - 1) - val sub = s.substring(from + 1, pos) - val l = sub :: acc - if (from == -1) l else rec(from, l) - } - rec(s.length, Nil) - } - /** * Parse string as actor path; throws java.net.MalformedURLException if unable to do so. */ diff --git a/akka-actor/src/main/scala/akka/actor/Address.scala b/akka-actor/src/main/scala/akka/actor/Address.scala index 651d970885..53f95e12d0 100644 --- a/akka-actor/src/main/scala/akka/actor/Address.scala +++ b/akka-actor/src/main/scala/akka/actor/Address.scala @@ -5,6 +5,7 @@ package akka.actor import java.net.URI import java.net.URISyntaxException import java.net.MalformedURLException +import annotation.tailrec /** * The address specifies the physical location under which an Actor can be @@ -44,12 +45,27 @@ object Address { def apply(protocol: String, system: String, host: String, port: Int) = new Address(protocol, system, Some(host), Some(port)) } -object RelativeActorPath { +private[akka] trait PathUtils { + def split(s: String): List[String] = { + @tailrec + def rec(pos: Int, acc: List[String]): List[String] = { + val from = s.lastIndexOf('/', pos - 1) + val sub = s.substring(from + 1, pos) + val l = sub :: acc + if (from == -1) l else rec(from, l) + } + rec(s.length, Nil) + } +} + +object RelativeActorPath extends PathUtils { def unapply(addr: String): Option[Iterable[String]] = { try { val uri = new URI(addr) if (uri.isAbsolute) None - else Some(ActorPath.split(uri.getPath)) + else Some(split(uri.getPath)) + } catch { + case _: URISyntaxException ⇒ None } } } @@ -58,13 +74,7 @@ object RelativeActorPath { * This object serves as extractor for Scala and as address parser for Java. */ object AddressFromURIString { - def unapply(addr: String): Option[Address] = - try { - val uri = new URI(addr) - unapply(uri) - } catch { - case _: URISyntaxException ⇒ None - } + def unapply(addr: String): Option[Address] = try unapply(new URI(addr)) catch { case _: URISyntaxException ⇒ None } def unapply(uri: URI): Option[Address] = if (uri eq null) None @@ -93,14 +103,14 @@ object AddressFromURIString { def parse(addr: String): Address = apply(addr) } -object ActorPathExtractor { +object ActorPathExtractor extends PathUtils { def unapply(addr: String): Option[(Address, Iterable[String])] = try { val uri = new URI(addr) if (uri.getPath == null) None else AddressFromURIString.unapply(uri) match { case None ⇒ None - case Some(addr) ⇒ Some((addr, ActorPath.split(uri.getPath).drop(1))) + case Some(addr) ⇒ Some((addr, split(uri.getPath).drop(1))) } } catch { case _: URISyntaxException ⇒ None From 6c2bee0533b5a4029b1822234b1f44b95fcfcc5d Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 15 May 2012 16:01:03 +0200 Subject: [PATCH 025/538] Future-proofing ActorRef --- .../src/main/scala/akka/actor/ActorPath.scala | 27 +++--- .../src/main/scala/akka/actor/ActorRef.scala | 84 ++++++++++--------- 2 files changed, 59 insertions(+), 52 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorPath.scala b/akka-actor/src/main/scala/akka/actor/ActorPath.scala index 15e5677775..e8c277660f 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorPath.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorPath.scala @@ -14,6 +14,9 @@ object ActorPath { case _ ⇒ throw new MalformedURLException("cannot parse as ActorPath: " + s) } + /** + * This Regular Expression is used to validate a path element (Actor Name) + */ val ElementRegex = """[-\w:@&=+,.!~*'_;][-\w:@&=+,.!~*'$_;]*""".r } @@ -101,21 +104,21 @@ sealed trait ActorPath extends Comparable[ActorPath] with Serializable { //TODO add @SerialVersionUID(1L) when SI-4804 is fixed final case class RootActorPath(address: Address, name: String = "/") extends ActorPath { - def parent: ActorPath = this + override def parent: ActorPath = this - def root: RootActorPath = this + override def root: RootActorPath = this - def /(child: String): ActorPath = new ChildActorPath(this, child) + override def /(child: String): ActorPath = new ChildActorPath(this, child) - val elements: Iterable[String] = List("") + override val elements: Iterable[String] = List("") - override val toString = address + name + override val toString: String = address + name - def toStringWithAddress(addr: Address): String = + override def toStringWithAddress(addr: Address): String = if (address.host.isDefined) address + name else addr + name - def compareTo(other: ActorPath) = other match { + override def compareTo(other: ActorPath): Int = other match { case r: RootActorPath ⇒ toString compareTo r.toString case c: ChildActorPath ⇒ 1 } @@ -125,11 +128,11 @@ final case class RootActorPath(address: Address, name: String = "/") extends Act final class ChildActorPath(val parent: ActorPath, val name: String) extends ActorPath { if (name.indexOf('/') != -1) throw new IllegalArgumentException("/ is a path separator and is not legal in ActorPath names: [%s]" format name) - def address: Address = root.address + override def address: Address = root.address - def /(child: String): ActorPath = new ChildActorPath(this, child) + override def /(child: String): ActorPath = new ChildActorPath(this, child) - def elements: Iterable[String] = { + override def elements: Iterable[String] = { @tailrec def rec(p: ActorPath, acc: List[String]): Iterable[String] = p match { case r: RootActorPath ⇒ acc @@ -138,7 +141,7 @@ final class ChildActorPath(val parent: ActorPath, val name: String) extends Acto rec(this, Nil) } - def root = { + override def root: RootActorPath = { @tailrec def rec(p: ActorPath): RootActorPath = p match { case r: RootActorPath ⇒ r @@ -198,7 +201,7 @@ final class ChildActorPath(val parent: ActorPath, val name: String) extends Acto finalizeHash(rec(this, startHash(42), startMagicA, startMagicB)) } - def compareTo(other: ActorPath) = { + override def compareTo(other: ActorPath): Int = { @tailrec def rec(left: ActorPath, right: ActorPath): Int = if (left eq right) 0 diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index b3d4ad19d1..32bb674865 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -6,7 +6,6 @@ package akka.actor import akka.dispatch._ import akka.util._ -import scala.collection.immutable.Stack import java.lang.{ UnsupportedOperationException, IllegalStateException } import akka.serialization.{ Serialization, JavaSerializer } import akka.event.EventStream @@ -155,6 +154,7 @@ trait ScalaActorRef { ref: ActorRef ⇒ } +//FIXME should ActorScope be private[akka], me thinks so - √ /** * All ActorRefs have a scope which describes where they live. Since it is * often necessary to distinguish between local and non-local references, this @@ -215,18 +215,20 @@ private[akka] abstract class InternalActorRef extends ActorRef with ScalaActorRe * This is an internal look-up failure token, not useful for anything else. */ private[akka] case object Nobody extends MinimalActorRef { - val path = new RootActorPath(Address("akka", "all-systems"), "/Nobody") - def provider = throw new UnsupportedOperationException("Nobody does not provide") + override val path: RootActorPath = new RootActorPath(Address("akka", "all-systems"), "/Nobody") + override def provider = throw new UnsupportedOperationException("Nobody does not provide") } /** * Local (serializable) ActorRef that is used when referencing the Actor on its "home" node. + * + * INTERNAL API */ private[akka] class LocalActorRef private[akka] ( _system: ActorSystemImpl, _props: Props, _supervisor: InternalActorRef, - val path: ActorPath, + override val path: ActorPath, val systemService: Boolean = false, _receiveTimeout: Option[Duration] = None) extends InternalActorRef with LocalRef { @@ -268,21 +270,21 @@ private[akka] class LocalActorRef private[akka] ( * message sends done from the same thread after calling this method will not * be processed until resumed. */ - def suspend(): Unit = actorCell.suspend() + override def suspend(): Unit = actorCell.suspend() /** * Resumes a suspended actor. */ - def resume(): Unit = actorCell.resume() + override def resume(): Unit = actorCell.resume() /** * Shuts down the actor and its message queue */ - def stop(): Unit = actorCell.stop() + override def stop(): Unit = actorCell.stop() - def getParent: InternalActorRef = actorCell.parent + override def getParent: InternalActorRef = actorCell.parent - def provider = actorCell.provider + override def provider: ActorRefProvider = actorCell.provider /** * Method for looking up a single child beneath this actor. Override in order @@ -294,7 +296,7 @@ private[akka] class LocalActorRef private[akka] ( case None ⇒ Nobody } - def getChild(names: Iterator[String]): InternalActorRef = { + override def getChild(names: Iterator[String]): InternalActorRef = { /* * The idea is to recursively descend as far as possible with LocalActor * Refs and hand over to that “foreign” child when we encounter it. @@ -313,6 +315,7 @@ private[akka] class LocalActorRef private[akka] ( case _ ⇒ ref.getChild(name) } + if (names.isEmpty) this else rec(this, names) } @@ -321,11 +324,11 @@ private[akka] class LocalActorRef private[akka] ( protected[akka] def underlying: ActorCell = actorCell - def sendSystemMessage(message: SystemMessage) { underlying.dispatcher.systemDispatch(underlying, message) } + override def sendSystemMessage(message: SystemMessage) { underlying.dispatcher.systemDispatch(underlying, message) } - def !(message: Any)(implicit sender: ActorRef = null): Unit = actorCell.tell(message, sender) + override def !(message: Any)(implicit sender: ActorRef = null): Unit = actorCell.tell(message, sender) - def restart(cause: Throwable): Unit = actorCell.restart(cause) + override def restart(cause: Throwable): Unit = actorCell.restart(cause) @throws(classOf[java.io.ObjectStreamException]) protected def writeReplace(): AnyRef = SerializedActorRef(path) @@ -348,7 +351,7 @@ case class SerializedActorRef private (path: String) { someSystem.actorFor(path) } } - +//FIXME: Should SerializedActorRef be private[akka] ? object SerializedActorRef { def apply(path: ActorPath): SerializedActorRef = { Serialization.currentTransportAddress.value match { @@ -360,33 +363,32 @@ object SerializedActorRef { /** * Trait for ActorRef implementations where all methods contain default stubs. + * + * INTERNAL API */ private[akka] trait MinimalActorRef extends InternalActorRef with LocalRef { - def getParent: InternalActorRef = Nobody + override def getParent: InternalActorRef = Nobody + override def getChild(names: Iterator[String]): InternalActorRef = if (names.forall(_.isEmpty)) this else Nobody - def getChild(names: Iterator[String]): InternalActorRef = { - val dropped = names.dropWhile(_.isEmpty) - if (dropped.isEmpty) this - else Nobody - } + override def suspend(): Unit = () + override def resume(): Unit = () + override def stop(): Unit = () + override def isTerminated = false - def suspend(): Unit = () - def resume(): Unit = () + override def !(message: Any)(implicit sender: ActorRef = null): Unit = () - def stop(): Unit = () - - def isTerminated = false - - def !(message: Any)(implicit sender: ActorRef = null): Unit = () - - def sendSystemMessage(message: SystemMessage): Unit = () - def restart(cause: Throwable): Unit = () + override def sendSystemMessage(message: SystemMessage): Unit = () + override def restart(cause: Throwable): Unit = () @throws(classOf[java.io.ObjectStreamException]) protected def writeReplace(): AnyRef = SerializedActorRef(path) } +/** + * When a message is sent to an Actor that is terminated before receiving the message, it will be sent as a DeadLetter + * to the ActorSystem's EventStream + */ case class DeadLetter(message: Any, sender: ActorRef, recipient: ActorRef) private[akka] object DeadLetterActorRef { @@ -402,10 +404,12 @@ private[akka] object DeadLetterActorRef { /** * This special dead letter reference has a name: it is that which is returned * by a local look-up which is unsuccessful. + * + * INTERNAL API */ private[akka] class EmptyLocalActorRef( - val provider: ActorRefProvider, - val path: ActorPath, + override val provider: ActorRefProvider, + override val path: ActorPath, val eventStream: EventStream) extends MinimalActorRef { override def isTerminated(): Boolean = true @@ -419,6 +423,8 @@ private[akka] class EmptyLocalActorRef( /** * Internal implementation of the dead letter destination: will publish any * received message to the eventStream, wrapped as [[akka.actor.DeadLetter]]. + * + * INTERNAL API */ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, _path: ActorPath, _eventStream: EventStream) extends EmptyLocalActorRef(_provider, _path, _eventStream) { @@ -434,10 +440,12 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, _path: Actor /** * Internal implementation detail used for paths like “/temp” + * + * INTERNAL API */ private[akka] class VirtualPathContainer( - val provider: ActorRefProvider, - val path: ActorPath, + override val provider: ActorRefProvider, + override val path: ActorPath, override val getParent: InternalActorRef, val log: LoggingAdapter) extends MinimalActorRef { @@ -450,12 +458,8 @@ private[akka] class VirtualPathContainer( } } - def removeChild(name: String): Unit = { - children.remove(name) match { - case null ⇒ log.warning("{} trying to remove non-child {}", path, name) - case _ ⇒ //okay - } - } + def removeChild(name: String): Unit = + if (children.remove(name) eq null) log.warning("{} trying to remove non-child {}", path, name) def getChild(name: String): InternalActorRef = children.get(name) From 8924080017797973fcebd28464eea045bedede43 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 15 May 2012 16:01:32 +0200 Subject: [PATCH 026/538] Create test-fixture for durable mailboxes. See #2061 * Improved DurableMailboxSpec for stand alone usage * Changed build to publish DurableMailboxSpec in akka-mailboxes-common-test * Changed documentation of durable mailboxes and added full example of how to implement a durable mailbox, with test --- akka-docs/intro/deployment-scenarios.rst | 5 +- akka-docs/intro/getting-started.rst | 3 +- akka-docs/java/microkernel.rst | 2 +- .../actor/mailbox/DurableMailboxDocSpec.scala | 88 ++++++++++++++- .../mailbox/DurableMailboxDocTestBase.java | 13 +-- akka-docs/modules/durable-mailbox.rst | 102 ++++++++++-------- akka-docs/scala/microkernel.rst | 2 +- .../actor/mailbox/FileBasedMailboxSpec.scala | 8 +- .../actor/mailbox/DurableMailboxSpec.scala | 79 ++++++++++++-- .../test/scala/akka/testkit/AkkaSpec.scala | 4 +- project/AkkaBuild.scala | 7 +- 11 files changed, 234 insertions(+), 79 deletions(-) diff --git a/akka-docs/intro/deployment-scenarios.rst b/akka-docs/intro/deployment-scenarios.rst index b2d0334c7d..fc3b38cbd2 100644 --- a/akka-docs/intro/deployment-scenarios.rst +++ b/akka-docs/intro/deployment-scenarios.rst @@ -13,7 +13,7 @@ Akka can be used in different ways: be put into ``WEB-INF/lib`` - As a stand alone application by instantiating ActorSystem in a main class or - using the :ref:`microkernel` + using the :ref:`microkernel-scala` / :ref:`microkernel-java` Using Akka as library @@ -27,5 +27,6 @@ modules to the stack. Using Akka as a stand alone microkernel ---------------------------------------- -Akka can also be run as a stand-alone microkernel. See :ref:`microkernel` for +Akka can also be run as a stand-alone microkernel. See +:ref:`microkernel-scala` / :ref:`microkernel-java` for more information. diff --git a/akka-docs/intro/getting-started.rst b/akka-docs/intro/getting-started.rst index b3bdbf70f3..9c76ee8edf 100644 --- a/akka-docs/intro/getting-started.rst +++ b/akka-docs/intro/getting-started.rst @@ -67,7 +67,8 @@ The Akka distribution includes the microkernel. To run the microkernel put your application jar in the ``deploy`` directory and use the scripts in the ``bin`` directory. -More information is available in the documentation of the :ref:`microkernel`. +More information is available in the documentation of the +:ref:`microkernel-scala` / :ref:`microkernel-java`. Using a build tool ------------------ diff --git a/akka-docs/java/microkernel.rst b/akka-docs/java/microkernel.rst index 551c118e94..d6652fe316 100644 --- a/akka-docs/java/microkernel.rst +++ b/akka-docs/java/microkernel.rst @@ -1,5 +1,5 @@ -.. _microkernel: +.. _microkernel-java: Microkernel (Java) ================== diff --git a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala b/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala index 2f67c607ed..25f312cac3 100644 --- a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala +++ b/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala @@ -33,8 +33,94 @@ class DurableMailboxDocSpec extends AkkaSpec(DurableMailboxDocSpec.config) { "configuration of dispatcher with durable mailbox" in { //#dispatcher-config-use - val myActor = system.actorOf(Props[MyActor].withDispatcher("my-dispatcher"), name = "myactor") + val myActor = system.actorOf(Props[MyActor]. + withDispatcher("my-dispatcher"), name = "myactor") //#dispatcher-config-use } } + +//#custom-mailbox +import com.typesafe.config.Config +import akka.actor.ActorContext +import akka.actor.ActorRef +import akka.actor.ActorSystem +import akka.dispatch.Envelope +import akka.dispatch.MailboxType +import akka.dispatch.MessageQueue +import akka.actor.mailbox.DurableMessageQueue +import akka.actor.mailbox.DurableMessageSerialization + +class MyMailboxType(systemSettings: ActorSystem.Settings, config: Config) + extends MailboxType { + + override def create(owner: Option[ActorContext]): MessageQueue = owner match { + case Some(o) ⇒ new MyMessageQueue(o) + case None ⇒ throw new IllegalArgumentException( + "requires an owner (i.e. does not work with BalancingDispatcher)") + } +} + +class MyMessageQueue(_owner: ActorContext) + extends DurableMessageQueue(_owner) with DurableMessageSerialization { + + val storage = new QueueStorage + + def enqueue(receiver: ActorRef, envelope: Envelope) { + val data: Array[Byte] = serialize(envelope) + storage.push(data) + } + + def dequeue(): Envelope = { + val data: Option[Array[Byte]] = storage.pull() + data.map(deserialize(_)).getOrElse(null) + } + + def hasMessages: Boolean = !storage.isEmpty + + def numberOfMessages: Int = storage.size + + def cleanUp(owner: ActorContext, deadLetters: MessageQueue): Unit = () + +} +//#custom-mailbox + +// dummy +class QueueStorage { + import java.util.concurrent.ConcurrentLinkedQueue + val queue = new ConcurrentLinkedQueue[Array[Byte]] + def push(data: Array[Byte]): Unit = queue.offer(data) + def pull(): Option[Array[Byte]] = Option(queue.poll()) + def isEmpty: Boolean = queue.isEmpty + def size: Int = queue.size +} + +//#custom-mailbox-test +import akka.actor.mailbox.DurableMailboxSpec + +object MyMailboxSpec { + val config = """ + MyStorage-dispatcher { + mailbox-type = akka.docs.actor.mailbox.MyMailboxType + } + """ +} + +class MyMailboxSpec extends DurableMailboxSpec("MyStorage", MyMailboxSpec.config) { + override def atStartup() { + } + + override def atTermination() { + } + + "MyMailbox" must { + "deliver a message" in { + val actor = createMailboxTestActor() + implicit val sender = testActor + actor ! "hello" + expectMsg("hello") + } + + // add more tests + } +} \ No newline at end of file diff --git a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java b/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java index 834dc6f0fb..25158446b6 100644 --- a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java +++ b/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java @@ -4,9 +4,8 @@ package akka.docs.actor.mailbox; //#imports -import akka.actor.UntypedActorFactory; -import akka.actor.UntypedActor; import akka.actor.Props; +import akka.actor.ActorRef; //#imports @@ -16,8 +15,8 @@ import org.junit.Test; import akka.testkit.AkkaSpec; import com.typesafe.config.ConfigFactory; -import akka.actor.ActorRef; import akka.actor.ActorSystem; +import akka.actor.UntypedActor; import static org.junit.Assert.*; @@ -39,12 +38,8 @@ public class DurableMailboxDocTestBase { @Test public void configDefinedDispatcher() { //#dispatcher-config-use - ActorRef myActor = system.actorOf( - new Props().withDispatcher("my-dispatcher").withCreator(new UntypedActorFactory() { - public UntypedActor create() { - return new MyUntypedActor(); - } - }), "myactor"); + ActorRef myActor = system.actorOf(new Props(MyUntypedActor.class). + withDispatcher("my-dispatcher"), "myactor"); //#dispatcher-config-use myActor.tell("test"); } diff --git a/akka-docs/modules/durable-mailbox.rst b/akka-docs/modules/durable-mailbox.rst index 74618d978e..4de60ea12b 100644 --- a/akka-docs/modules/durable-mailbox.rst +++ b/akka-docs/modules/durable-mailbox.rst @@ -9,40 +9,45 @@ Overview ======== -Akka supports a set of durable mailboxes. A durable mailbox is a replacement for -the standard actor mailbox that is durable. What this means in practice is that -if there are pending messages in the actor's mailbox when the node of the actor -resides on crashes, then when you restart the node, the actor will be able to -continue processing as if nothing had happened; with all pending messages still -in its mailbox. +A durable mailbox is a replacement for the standard actor mailbox that is durable. +What this means in practice is that if there are pending messages in the actor's +mailbox when the node of the actor resides on crashes, then when you restart the +node, the actor will be able to continue processing as if nothing had happened; +with all pending messages still in its mailbox. -None of these mailboxes implements transactions for current message. It's possible +You configure durable mailboxes through the dispatcher. The actor is oblivious +to which type of mailbox it is using. + +This gives you an excellent way of creating bulkheads in your application, where +groups of actors sharing the same dispatcher also share the same backing +storage. Read more about that in the :ref:`dispatchers-scala` documentation. + +One basic file based durable mailbox is provided by Akka out-of-the-box. +Other implementations can easily be added. Some are available as separate community +Open Source projects, such as: + +* `AMQP Durable Mailbox `_ + + +A durable mailbox typically doesn't implements transactions for current message. It's possible if the actor crashes after receiving a message, but before completing processing of it, that the message could be lost. -.. warning:: **IMPORTANT** +.. warning:: - None of these mailboxes work with blocking message send, i.e. the message + A durable mailbox typically doesn't work with blocking message send, i.e. the message send operations that are relying on futures; ``?`` or ``ask``. If the node has crashed and then restarted, the thread that was blocked waiting for the reply is gone and there is no way we can deliver the message. -The durable mailboxes supported out-of-the-box are: - - ``FileBasedMailbox`` -- backed by a journaling transaction log on the local file system +File-based durable mailbox +========================== -You can easily implement your own mailbox. Look at the existing implementation for inspiration. - -.. _DurableMailbox.General: - -General Usage -------------- - -The durable mailboxes and their configuration options reside in the -``akka.actor.mailbox`` package. - -You configure durable mailboxes through the dispatcher. The -actor is oblivious to which type of mailbox it is using. +This mailbox is backed by a journaling transaction log on the local file +system. It is the simplest to use since it does not require an extra +infrastructure piece to administer, but it is usually sufficient and just what +you need. In the configuration of the dispatcher you specify the fully qualified class name of the mailbox: @@ -60,32 +65,37 @@ Corresponding example in Java: .. includecode:: code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java :include: imports,dispatcher-config-use -The actor is oblivious to which type of mailbox it is using. - -This gives you an excellent way of creating bulkheads in your application, where -groups of actors sharing the same dispatcher also share the same backing -storage. Read more about that in the :ref:`dispatchers-scala` documentation. - -File-based durable mailbox -========================== - -This mailbox is backed by a journaling transaction log on the local file -system. It is the simplest to use since it does not require an extra -infrastructure piece to administer, but it is usually sufficient and just what -you need. - -You configure durable mailboxes through the dispatcher, as described in -:ref:`DurableMailbox.General` with the following mailbox type. - -Config:: - - my-dispatcher { - mailbox-type = akka.actor.mailbox.FileBasedMailboxType - } - You can also configure and tune the file-based durable mailbox. This is done in the ``akka.actor.mailbox.file-based`` section in the :ref:`configuration`. .. literalinclude:: ../../akka-durable-mailboxes/akka-file-mailbox/src/main/resources/reference.conf :language: none +How to implement a durable mailbox +================================== + +Here is an example of how to implement a custom durable mailbox. Essentially it consists of +a configurator (MailboxType) and a queue implementation (DurableMessageQueue). + +The envelope contains the message sent to the actor, and information about sender. It is the +envelope that needs to be stored. As a help utility you can mixin DurableMessageSerialization +to serialize and deserialize the envelope using the ordinary :ref:`serialization-scala` +mechanism. This optional and you may store the envelope data in any way you like. + +.. includecode:: code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala + :include: custom-mailbox + +To facilitate testing of a durable mailbox you may use ``DurableMailboxSpec`` as base class. +It implements a few basic tests and helps you setup the a fixture. More tests can be +added in concrete subclass like this: + +.. includecode:: code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala + :include: custom-mailbox-test + +You find DurableMailboxDocSpec in ``akka-mailboxes-common-test-2.1-SNAPSHOT.jar``. +Add this dependency:: + + "com.typesafe.akka" % "akka-mailboxes-common-test" % "2.1-SNAPSHOT" + +For more inspiration you can look at the old implementations based on Redis, MongoDB, Beanstalk, +and ZooKeeper, which can be found in Akka git repository tag v2.0.1. \ No newline at end of file diff --git a/akka-docs/scala/microkernel.rst b/akka-docs/scala/microkernel.rst index 8fb1aec2c2..108a00588a 100644 --- a/akka-docs/scala/microkernel.rst +++ b/akka-docs/scala/microkernel.rst @@ -1,5 +1,5 @@ -.. _microkernel: +.. _microkernel-scala: Microkernel (Scala) =================== diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/test/scala/akka/actor/mailbox/FileBasedMailboxSpec.scala b/akka-durable-mailboxes/akka-file-mailbox/src/test/scala/akka/actor/mailbox/FileBasedMailboxSpec.scala index be82e0fcb3..6c97142068 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/test/scala/akka/actor/mailbox/FileBasedMailboxSpec.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/test/scala/akka/actor/mailbox/FileBasedMailboxSpec.scala @@ -25,19 +25,17 @@ class FileBasedMailboxSpec extends DurableMailboxSpec("File", FileBasedMailboxSp } } - def isDurableMailbox(m: Mailbox): Boolean = m.messageQueue.isInstanceOf[FileBasedMessageQueue] - - def clean { + def clean() { FileUtils.deleteDirectory(new java.io.File(queuePath)) } override def atStartup() { - clean + clean() super.atStartup() } override def atTermination() { - clean + clean() super.atTermination() } } diff --git a/akka-durable-mailboxes/akka-mailboxes-common/src/test/scala/akka/actor/mailbox/DurableMailboxSpec.scala b/akka-durable-mailboxes/akka-mailboxes-common/src/test/scala/akka/actor/mailbox/DurableMailboxSpec.scala index ff436c227e..9081a5fcb0 100644 --- a/akka-durable-mailboxes/akka-mailboxes-common/src/test/scala/akka/actor/mailbox/DurableMailboxSpec.scala +++ b/akka-durable-mailboxes/akka-mailboxes-common/src/test/scala/akka/actor/mailbox/DurableMailboxSpec.scala @@ -3,14 +3,25 @@ */ package akka.actor.mailbox -import akka.testkit.AkkaSpec -import akka.testkit.TestLatch -import akka.util.duration._ -import java.io.InputStream -import scala.annotation.tailrec +import DurableMailboxSpecActorFactory.AccumulatorActor +import DurableMailboxSpecActorFactory.MailboxTestActor +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.ActorSystem +import akka.actor.LocalActorRef +import akka.actor.Props +import akka.actor.actorRef2Scala +import akka.dispatch.Mailbox +import akka.testkit.TestKit +import akka.util.duration.intToDurationInt import com.typesafe.config.Config -import akka.actor._ -import akka.dispatch.{ Mailbox, Await } +import com.typesafe.config.ConfigFactory +import java.io.InputStream +import java.util.concurrent.TimeoutException +import org.scalatest.BeforeAndAfterAll +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers +import scala.annotation.tailrec object DurableMailboxSpecActorFactory { @@ -28,13 +39,62 @@ object DurableMailboxSpecActorFactory { } +object DurableMailboxSpec { + def fallbackConfig: Config = ConfigFactory.parseString(""" + akka { + event-handlers = ["akka.testkit.TestEventListener"] + loglevel = "WARNING" + stdout-loglevel = "WARNING" + } + """) +} + /** + * Reusable test fixture for durable mailboxes. Implements a few basic tests. More + * tests can be added in concrete subclass. + * * Subclass must define dispatcher in the supplied config for the specific backend. * The id of the dispatcher must be the same as the `-dispatcher`. */ -abstract class DurableMailboxSpec(val backendName: String, config: String) extends AkkaSpec(config) { +abstract class DurableMailboxSpec(system: ActorSystem, val backendName: String) + extends TestKit(system) with WordSpec with MustMatchers with BeforeAndAfterAll { + import DurableMailboxSpecActorFactory._ + /** + * Subclass must define dispatcher in the supplied config for the specific backend. + * The id of the dispatcher must be the same as the `-dispatcher`. + */ + def this(backendName: String, config: String) = { + this(ActorSystem(backendName + "BasedDurableMailboxSpec", + ConfigFactory.parseString(config).withFallback(DurableMailboxSpec.fallbackConfig)), + backendName) + } + + final override def beforeAll { + atStartup() + } + + /** + * May be implemented in concrete subclass to do additional things once before test + * cases are run. + */ + protected def atStartup() {} + + final override def afterAll { + system.shutdown() + try system.awaitTermination(5 seconds) catch { + case _: TimeoutException ⇒ system.log.warning("Failed to stop [{}] within 5 seconds", system.name) + } + atTermination() + } + + /** + * May be implemented in concrete subclass to do additional things once after all + * test cases have been run. + */ + def atTermination() {} + protected def streamMustContain(in: InputStream, words: String): Unit = { val output = new Array[Byte](8192) @@ -60,7 +120,8 @@ abstract class DurableMailboxSpec(val backendName: String, config: String) exten case some ⇒ system.actorOf(props.withDispatcher(backendName + "-dispatcher"), some) } - def isDurableMailbox(m: Mailbox): Boolean + private def isDurableMailbox(m: Mailbox): Boolean = + m.messageQueue.isInstanceOf[DurableMessageQueue] "A " + backendName + " based mailbox backed actor" must { diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala index fd763e6bad..f24ea49b8c 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala @@ -5,7 +5,7 @@ package akka.testkit import org.scalatest.{ WordSpec, BeforeAndAfterAll, Tag } import org.scalatest.matchers.MustMatchers -import akka.actor.{ ActorSystem, ActorSystemImpl } +import akka.actor.ActorSystem import akka.actor.{ Actor, ActorRef, Props } import akka.event.{ Logging, LoggingAdapter } import akka.util.duration._ @@ -72,7 +72,7 @@ abstract class AkkaSpec(_system: ActorSystem) final override def afterAll { system.shutdown() - try Await.ready(system.asInstanceOf[ActorSystemImpl].terminationFuture, 5 seconds) catch { + try system.awaitTermination(5 seconds) catch { case _: TimeoutException ⇒ system.log.warning("Failed to stop [{}] within 5 seconds", system.name) } atTermination() diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 4804c0f796..a489d57c8b 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -154,7 +154,9 @@ object AkkaBuild extends Build { base = file("akka-durable-mailboxes/akka-mailboxes-common"), dependencies = Seq(remote, testkit % "compile;test->test"), settings = defaultSettings ++ Seq( - libraryDependencies ++= Dependencies.mailboxes + libraryDependencies ++= Dependencies.mailboxes, + // DurableMailboxSpec published in akka-mailboxes-common-test + publishArtifact in Test := true ) ) @@ -257,7 +259,8 @@ object AkkaBuild extends Build { lazy val docs = Project( id = "akka-docs", base = file("akka-docs"), - dependencies = Seq(actor, testkit % "test->test", remote, cluster, slf4j, agent, transactor, fileMailbox, zeroMQ, camel), + dependencies = Seq(actor, testkit % "test->test", mailboxesCommon % "compile;test->test", + remote, cluster, slf4j, agent, transactor, fileMailbox, zeroMQ, camel), settings = defaultSettings ++ Sphinx.settings ++ Seq( unmanagedSourceDirectories in Test <<= baseDirectory { _ ** "code" get }, libraryDependencies ++= Dependencies.docs, From c6d60e1089e940e68d1bb1c0a4de0b632a17db5d Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 15 May 2012 16:26:08 +0200 Subject: [PATCH 027/538] Future proofing ActorRefProvider --- .../scala/akka/actor/ActorRefProvider.scala | 75 +++++++++++-------- .../akka/remote/RemoteActorRefProvider.scala | 10 +-- 2 files changed, 50 insertions(+), 35 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 536136934a..f1dac8e28d 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -49,8 +49,12 @@ trait ActorRefProvider { */ def rootPath: ActorPath + /** + * The Settings associated with this ActorRefProvider + */ def settings: ActorSystem.Settings + //FIXME WHY IS THIS HERE? def dispatcher: MessageDispatcher /** @@ -61,8 +65,12 @@ trait ActorRefProvider { */ def init(system: ActorSystemImpl): Unit + /** + * The Deployer associated with this ActorRefProvider + */ def deployer: Deployer + //FIXME WHY IS THIS HERE? def scheduler: Scheduler /** @@ -131,6 +139,7 @@ trait ActorRefProvider { */ def terminationFuture: Future[Unit] + //FIXME I PROPOSE TO REMOVE THIS IN 2.1 - √ /** * Obtain the address which is to be used within sender references when * sending to the given other address or none if the other address cannot be @@ -141,22 +150,33 @@ trait ActorRefProvider { } /** - * Interface implemented by ActorSystem and AkkaContext, the only two places + * Interface implemented by ActorSystem and ActorContext, the only two places * from which you can get fresh actors. */ trait ActorRefFactory { - + /** + * INTERNAL USE ONLY + */ protected def systemImpl: ActorSystemImpl - + /** + * INTERNAL USE ONLY + */ protected def provider: ActorRefProvider - + /** + * INTERNAL USE ONLY + */ protected def dispatcher: MessageDispatcher /** * Father of all children created by this interface. + * + * INTERNAL USE ONLY */ protected def guardian: InternalActorRef + /** + * INTERNAL USE ONLY + */ protected def lookupRoot: InternalActorRef /** @@ -276,8 +296,6 @@ trait ActorRefFactory { def stop(actor: ActorRef): Unit } -class ActorRefProviderException(message: String) extends AkkaException(message) - /** * Internal Akka use only, used in implementation of system.actorOf. */ @@ -298,10 +316,10 @@ private[akka] case class StopChild(child: ActorRef) */ class LocalActorRefProvider( _systemName: String, - val settings: ActorSystem.Settings, + override val settings: ActorSystem.Settings, val eventStream: EventStream, - val scheduler: Scheduler, - val deployer: Deployer) extends ActorRefProvider { + override val scheduler: Scheduler, + override val deployer: Deployer) extends ActorRefProvider { // this is the constructor needed for reflectively instantiating the provider def this(_systemName: String, @@ -315,13 +333,13 @@ class LocalActorRefProvider( scheduler, new Deployer(settings, dynamicAccess)) - val rootPath: ActorPath = RootActorPath(Address("akka", _systemName)) + override val rootPath: ActorPath = RootActorPath(Address("akka", _systemName)) - val log = Logging(eventStream, "LocalActorRefProvider(" + rootPath.address + ")") + private[akka] val log: LoggingAdapter = Logging(eventStream, "LocalActorRefProvider(" + rootPath.address + ")") - val deadLetters = new DeadLetterActorRef(this, rootPath / "deadLetters", eventStream) + override val deadLetters: InternalActorRef = new DeadLetterActorRef(this, rootPath / "deadLetters", eventStream) - val deathWatch = new LocalDeathWatch(1024) //TODO make configrable + override val deathWatch: DeathWatch = new LocalDeathWatch(1024) //TODO make configrable /* * generate name for temporary actor refs @@ -332,7 +350,7 @@ class LocalActorRefProvider( private val tempNode = rootPath / "temp" - def tempPath() = tempNode / tempName() + override def tempPath() = tempNode / tempName() /** * Top-level anchor for the supervision hierarchy of this actor system. Will @@ -348,11 +366,11 @@ class LocalActorRefProvider( def provider: ActorRefProvider = LocalActorRefProvider.this - override def stop() = stopped switchOn { + override def stop(): Unit = stopped switchOn { terminationFuture.complete(causeOfTermination.toLeft(())) } - override def isTerminated = stopped.isOn + override def isTerminated: Boolean = stopped.isOn override def !(message: Any)(implicit sender: ActorRef = null): Unit = stopped.ifOff(message match { case Failed(ex) if sender ne null ⇒ causeOfTermination = Some(ex); sender.asInstanceOf[InternalActorRef].stop() @@ -371,7 +389,7 @@ class LocalActorRefProvider( /** * Overridable supervision strategy to be used by the “/user” guardian. */ - protected def guardianSupervisionStrategy = { + protected def guardianSupervisionStrategy: SupervisorStrategy = { import akka.actor.SupervisorStrategy._ OneForOneStrategy() { case _: ActorKilledException ⇒ Stop @@ -387,12 +405,12 @@ class LocalActorRefProvider( */ private class Guardian extends Actor { - override val supervisorStrategy = guardianSupervisionStrategy + override val supervisorStrategy: SupervisorStrategy = guardianSupervisionStrategy def receive = { case Terminated(_) ⇒ context.stop(self) - case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case e: Exception ⇒ e }) - case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case e: Exception ⇒ e }) + case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case e: Exception ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? + case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case e: Exception ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? case StopChild(child) ⇒ context.stop(child); sender ! "ok" case m ⇒ deadLetters ! DeadLetter(m, sender, self) } @@ -404,12 +422,11 @@ class LocalActorRefProvider( /** * Overridable supervision strategy to be used by the “/system” guardian. */ - protected def systemGuardianSupervisionStrategy = { + protected def systemGuardianSupervisionStrategy: SupervisorStrategy = { import akka.actor.SupervisorStrategy._ OneForOneStrategy() { - case _: ActorKilledException ⇒ Stop - case _: ActorInitializationException ⇒ Stop - case _: Exception ⇒ Restart + case _: ActorKilledException | _: ActorInitializationException ⇒ Stop + case _: Exception ⇒ Restart } } @@ -420,14 +437,12 @@ class LocalActorRefProvider( */ private class SystemGuardian extends Actor { - override val supervisorStrategy = systemGuardianSupervisionStrategy + override val supervisorStrategy: SupervisorStrategy = systemGuardianSupervisionStrategy def receive = { - case Terminated(_) ⇒ - eventStream.stopDefaultLoggers() - context.stop(self) - case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case e: Exception ⇒ e }) - case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case e: Exception ⇒ e }) + case Terminated(_) ⇒ eventStream.stopDefaultLoggers(); context.stop(self) + case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case e: Exception ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? + case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case e: Exception ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? case StopChild(child) ⇒ context.stop(child); sender ! "ok" case m ⇒ deadLetters ! DeadLetter(m, sender, self) } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index ab53d9e99d..8f1ec6e1b7 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -256,9 +256,9 @@ private[akka] class RemoteActorRef private[akka] ( private def writeReplace(): AnyRef = SerializedActorRef(path) } -class RemoteDeathWatch(val local: LocalDeathWatch, val provider: RemoteActorRefProvider) extends DeathWatch { +class RemoteDeathWatch(val local: DeathWatch, val provider: RemoteActorRefProvider) extends DeathWatch { - def subscribe(watcher: ActorRef, watched: ActorRef): Boolean = watched match { + override def subscribe(watcher: ActorRef, watched: ActorRef): Boolean = watched match { case r: RemoteRef ⇒ val ret = local.subscribe(watcher, watched) provider.actorFor(r.path.root / "remote") ! DaemonMsgWatch(watcher, watched) @@ -270,10 +270,10 @@ class RemoteDeathWatch(val local: LocalDeathWatch, val provider: RemoteActorRefP false } - def unsubscribe(watcher: ActorRef, watched: ActorRef): Boolean = local.unsubscribe(watcher, watched) + override def unsubscribe(watcher: ActorRef, watched: ActorRef): Boolean = local.unsubscribe(watcher, watched) - def unsubscribe(watcher: ActorRef): Unit = local.unsubscribe(watcher) + override def unsubscribe(watcher: ActorRef): Unit = local.unsubscribe(watcher) - def publish(event: Terminated): Unit = local.publish(event) + override def publish(event: Terminated): Unit = local.publish(event) } From 793af8b4ec316166f1cc3a17259443bd343bc054 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 15 May 2012 17:16:46 +0200 Subject: [PATCH 028/538] Imrovements from feedback. See #1755 --- .../akka/serialization/DaemonMsgCreateSerializer.scala | 2 +- .../akka/serialization/DaemonMsgWatchSerializer.scala | 2 +- .../scala/akka/serialization/ProtobufSerializer.scala | 10 ++++++++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala b/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala index cc7e3b3851..7bd3124792 100644 --- a/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala +++ b/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala @@ -35,7 +35,7 @@ class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) extends Seriali import ProtobufSerializer.serializeActorRef import ProtobufSerializer.deserializeActorRef - def includeManifest: Boolean = true + def includeManifest: Boolean = false def identifier = 3 lazy val serialization = SerializationExtension(system) diff --git a/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala b/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala index a564e92088..0ca5216da0 100644 --- a/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala +++ b/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala @@ -17,7 +17,7 @@ class DaemonMsgWatchSerializer(val system: ExtendedActorSystem) extends Serializ import ProtobufSerializer.serializeActorRef import ProtobufSerializer.deserializeActorRef - def includeManifest: Boolean = true + def includeManifest: Boolean = false def identifier = 4 def toBinary(obj: AnyRef): Array[Byte] = obj match { diff --git a/akka-remote/src/main/scala/akka/serialization/ProtobufSerializer.scala b/akka-remote/src/main/scala/akka/serialization/ProtobufSerializer.scala index 72690b3c91..d9a5c7b0c4 100644 --- a/akka-remote/src/main/scala/akka/serialization/ProtobufSerializer.scala +++ b/akka-remote/src/main/scala/akka/serialization/ProtobufSerializer.scala @@ -11,6 +11,11 @@ import akka.actor.ActorSystem import akka.actor.ActorRef object ProtobufSerializer { + + /** + * Helper to serialize an [[akka.actor.ActorRef]] to Akka's + * protobuf representation. + */ def serializeActorRef(ref: ActorRef): ActorRefProtocol = { val identifier: String = Serialization.currentTransportAddress.value match { case null ⇒ ref.path.toString @@ -19,6 +24,11 @@ object ProtobufSerializer { ActorRefProtocol.newBuilder.setPath(identifier).build } + /** + * Helper to materialize (lookup) an [[akka.actor.ActorRef]] + * from Akka's protobuf representation in the supplied + * [[akka.actor.ActorSystem]. + */ def deserializeActorRef(system: ActorSystem, refProtocol: ActorRefProtocol): ActorRef = system.actorFor(refProtocol.getPath) } From d092d17aed72bc69f698446c1147feecfc94f1ba Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 15 May 2012 17:44:56 +0200 Subject: [PATCH 029/538] Adding even more future proofing --- .../src/main/scala/akka/actor/ActorRef.scala | 7 ++-- .../scala/akka/actor/ActorSelection.scala | 21 ++++++++-- .../main/scala/akka/actor/ActorSystem.scala | 38 +++++++++++------- .../src/main/scala/akka/actor/Address.scala | 40 ++++++++++++------- 4 files changed, 70 insertions(+), 36 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 32bb674865..d0ad270957 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -305,11 +305,10 @@ private[akka] class LocalActorRef private[akka] ( def rec(ref: InternalActorRef, name: Iterator[String]): InternalActorRef = ref match { case l: LocalActorRef ⇒ - val n = name.next() - val next = n match { + val next = name.next() match { case ".." ⇒ l.getParent case "" ⇒ l - case _ ⇒ l.getSingleChild(n) + case any ⇒ l.getSingleChild(any) } if (next == Nobody || name.isEmpty) next else rec(next, name) case _ ⇒ @@ -324,7 +323,7 @@ private[akka] class LocalActorRef private[akka] ( protected[akka] def underlying: ActorCell = actorCell - override def sendSystemMessage(message: SystemMessage) { underlying.dispatcher.systemDispatch(underlying, message) } + override def sendSystemMessage(message: SystemMessage): Unit = underlying.dispatcher.systemDispatch(underlying, message) override def !(message: Any)(implicit sender: ActorRef = null): Unit = actorCell.tell(message, sender) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala index b407868270..44767cb0b6 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala @@ -5,6 +5,10 @@ package akka.actor import java.util.regex.Pattern import akka.util.Helpers +/** + * An ActorSelection is a logical view of a section of an ActorSystem's tree of Actors, + * allowing for broadcasting of messages to that section. + */ abstract class ActorSelection { this: ScalaActorSelection ⇒ @@ -12,11 +16,11 @@ abstract class ActorSelection { protected def path: Array[AnyRef] - def tell(msg: Any) { target ! toMessage(msg, path) } + def tell(msg: Any): Unit = target ! toMessage(msg, path) - def tell(msg: Any, sender: ActorRef) { target.tell(toMessage(msg, path), sender) } + def tell(msg: Any, sender: ActorRef): Unit = target.tell(toMessage(msg, path), sender) - // this may want to be fast ... + // FIXME make this so that "next" instead is the remaining path private def toMessage(msg: Any, path: Array[AnyRef]): Any = { var acc = msg var index = path.length - 1 @@ -32,7 +36,12 @@ abstract class ActorSelection { } } +/** + * An ActorSelection is a logical view of a section of an ActorSystem's tree of Actors, + * allowing for broadcasting of messages to that section. + */ object ActorSelection { + //This cast is safe because the self-type of ActorSelection requires that it mixes in ScalaActorSelection implicit def toScala(sel: ActorSelection): ScalaActorSelection = sel.asInstanceOf[ScalaActorSelection] /** @@ -43,7 +52,7 @@ object ActorSelection { */ def apply(anchor: ActorRef, path: String): ActorSelection = { val elems = path.split("/+").dropWhile(_.isEmpty) - val compiled: Array[AnyRef] = elems map (x ⇒ if (x.contains("?") || x.contains("*")) Helpers.makePattern(x) else x) + val compiled: Array[AnyRef] = elems map (x ⇒ if (x.contains('?') || x.contains('*')) Helpers.makePattern(x) else x) new ActorSelection with ScalaActorSelection { def target = anchor def path = compiled @@ -51,6 +60,10 @@ object ActorSelection { } } +/** + * Contains the Scala API (!-method) for ActorSelections) which provides automatic tracking of the sender, + * as per the usual implicit ActorRef pattern. + */ trait ScalaActorSelection { this: ActorSelection ⇒ diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index b84057b749..32e221a7a1 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -10,32 +10,30 @@ import akka.dispatch._ import akka.pattern.ask import org.jboss.netty.akka.util.HashedWheelTimer import java.util.concurrent.TimeUnit.MILLISECONDS -import com.typesafe.config.Config -import com.typesafe.config.ConfigFactory +import com.typesafe.config.{ Config, ConfigFactory } import scala.annotation.tailrec import org.jboss.netty.akka.util.internal.ConcurrentIdentityHashMap import java.io.Closeable -import akka.dispatch.Await.Awaitable -import akka.dispatch.Await.CanAwait +import akka.dispatch.Await.{ Awaitable, CanAwait } import akka.util._ import collection.immutable.Stack import java.util.concurrent.{ ThreadFactory, CountDownLatch, TimeoutException, RejectedExecutionException } object ActorSystem { - val Version = "2.1-SNAPSHOT" + val Version: String = "2.1-SNAPSHOT" - val EnvHome = System.getenv("AKKA_HOME") match { + val EnvHome: Option[String] = System.getenv("AKKA_HOME") match { case null | "" | "." ⇒ None case value ⇒ Some(value) } - val SystemHome = System.getProperty("akka.home") match { + val SystemHome: Option[String] = System.getProperty("akka.home") match { case null | "" ⇒ None case value ⇒ Some(value) } - val GlobalHome = SystemHome orElse EnvHome + val GlobalHome: Option[String] = SystemHome orElse EnvHome /** * Creates a new ActorSystem with the name "default", @@ -102,8 +100,16 @@ object ActorSystem { */ def apply(name: String, config: Config, classLoader: ClassLoader): ActorSystem = new ActorSystemImpl(name, config, classLoader).start() + /** + * Settings are the overall ActorSystem Settings which also provides a convenient access to the Config object. + * + * For more detailed information about the different possible configuration options, look in the Akka Documentation under "Configuration" + */ class Settings(classLoader: ClassLoader, cfg: Config, final val name: String) { + /** + * The backing Config of this ActorSystem's Settings + */ final val config: Config = { val config = cfg.withFallback(ConfigFactory.defaultReference(classLoader)) config.checkValid(ConfigFactory.defaultReference(classLoader), "akka") @@ -114,11 +120,9 @@ object ActorSystem { import config._ final val ConfigVersion = getString("akka.version") - final val ProviderClass = getString("akka.actor.provider") - final val CreationTimeout = Timeout(Duration(getMilliseconds("akka.actor.creation-timeout"), MILLISECONDS)) - final val ReaperInterval = Duration(getMilliseconds("akka.actor.reaper-interval"), MILLISECONDS) + final val SerializeAllMessages = getBoolean("akka.actor.serialize-messages") final val SerializeAllCreators = getBoolean("akka.actor.serialize-creators") @@ -148,11 +152,14 @@ object ActorSystem { if (ConfigVersion != Version) throw new ConfigurationException("Akka JAR version [" + Version + "] does not match the provided config version [" + ConfigVersion + "]") + /** + * Returns the String representation of the Config that this Settings is backed by + */ override def toString: String = config.root.render } /** - * INTERNAL + * INTERNAL USE ONLY */ private[akka] def findClassLoader(): ClassLoader = { def findCaller(get: Int ⇒ Class[_]): ClassLoader = @@ -422,6 +429,8 @@ abstract class ExtendedActorSystem extends ActorSystem { def dynamicAccess: DynamicAccess } +//FIXME This should most probably not be protected[akka] right? - √ +//FIXME We also need to decide whether this should be supported API or not - √ class ActorSystemImpl protected[akka] (val name: String, applicationConfig: Config, classLoader: ClassLoader) extends ExtendedActorSystem { if (!name.matches("""^[a-zA-Z0-9][a-zA-Z0-9-]*$""")) @@ -475,7 +484,7 @@ class ActorSystemImpl protected[akka] (val name: String, applicationConfig: Conf def logConfiguration(): Unit = log.info(settings.toString) - protected def systemImpl = this + protected def systemImpl: ActorSystemImpl = this private[akka] def systemActorOf(props: Props, name: String): ActorRef = { implicit val timeout = settings.CreationTimeout @@ -539,6 +548,7 @@ class ActorSystemImpl protected[akka] (val name: String, applicationConfig: Conf def deadLetters: ActorRef = provider.deadLetters + //FIXME Why do we need this at all? val deadLetterQueue: MessageQueue = new MessageQueue { def enqueue(receiver: ActorRef, envelope: Envelope) { deadLetters ! DeadLetter(envelope.message, envelope.sender, receiver) } def dequeue() = null @@ -546,7 +556,7 @@ class ActorSystemImpl protected[akka] (val name: String, applicationConfig: Conf def numberOfMessages = 0 def cleanUp(owner: ActorContext, deadLetters: MessageQueue): Unit = () } - + //FIXME Why do we need this at all? val deadLetterMailbox: Mailbox = new Mailbox(null, deadLetterQueue) { becomeClosed() def systemEnqueue(receiver: ActorRef, handle: SystemMessage): Unit = deadLetters ! DeadLetter(handle, receiver, receiver) diff --git a/akka-actor/src/main/scala/akka/actor/Address.scala b/akka-actor/src/main/scala/akka/actor/Address.scala index 53f95e12d0..67f147b836 100644 --- a/akka-actor/src/main/scala/akka/actor/Address.scala +++ b/akka-actor/src/main/scala/akka/actor/Address.scala @@ -21,32 +21,43 @@ final case class Address private (protocol: String, system: String, host: Option def this(protocol: String, system: String) = this(protocol, system, None, None) def this(protocol: String, system: String, host: String, port: Int) = this(protocol, system, Option(host), Some(port)) + /** + * Returns the canonical String representation of this Address formatted as: + * + * ://@: + */ @transient override lazy val toString: String = { - val sb = new StringBuilder(protocol) - sb.append("://") - sb.append(system) - if (host.isDefined) { - sb.append('@') - sb.append(host.get) - } - if (port.isDefined) { - sb.append(':') - sb.append(port.get) - } + val sb = (new StringBuilder(protocol)).append("://").append(system) + + if (host.isDefined) sb.append('@').append(host.get) + if (port.isDefined) sb.append(':').append(port.get) + sb.toString } - def hostPort: String = toString.substring(protocol.length() + 3) + /** + * Returns a String representation formatted as: + * + * @: + */ + def hostPort: String = toString.substring(protocol.length + 3) } object Address { + /** + * Constructs a new Address with the specified protocol and system name + */ def apply(protocol: String, system: String) = new Address(protocol, system) + + /** + * Constructs a new Address with the specified protocol, system name, host and port + */ def apply(protocol: String, system: String, host: String, port: Int) = new Address(protocol, system, Some(host), Some(port)) } private[akka] trait PathUtils { - def split(s: String): List[String] = { + protected def split(s: String): List[String] = { @tailrec def rec(pos: Int, acc: List[String]): List[String] = { val from = s.lastIndexOf('/', pos - 1) @@ -94,7 +105,7 @@ object AddressFromURIString { */ def apply(addr: String): Address = addr match { case AddressFromURIString(address) ⇒ address - case _ ⇒ throw new MalformedURLException + case _ ⇒ throw new MalformedURLException(addr) } /** @@ -103,6 +114,7 @@ object AddressFromURIString { def parse(addr: String): Address = apply(addr) } +//FIXME is this public API? - √ object ActorPathExtractor extends PathUtils { def unapply(addr: String): Option[(Address, Iterable[String])] = try { From 31ace9e83f1a9dc361bc6d1aa4e79c4825e8e07e Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 15 May 2012 18:22:40 +0200 Subject: [PATCH 030/538] Optimize remote serialization for the create from class case. See #1755 * Added FromClassCreator as special Function0 to be able to serialize class name as protobuf string --- .../src/main/scala/akka/actor/Props.scala | 13 +- .../main/java/akka/remote/RemoteProtocol.java | 314 +++++++++++------- .../src/main/protocol/RemoteProtocol.proto | 9 +- .../DaemonMsgCreateSerializer.scala | 27 +- .../DaemonMsgCreateSerializerSpec.scala | 75 +++-- 5 files changed, 278 insertions(+), 160 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index 3751898c5c..d66fb6653c 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -127,7 +127,7 @@ case class Props( * Java API. */ def this(actorClass: Class[_ <: Actor]) = this( - creator = () ⇒ actorClass.newInstance, + creator = FromClassCreator(actorClass), dispatcher = Dispatchers.DefaultDispatcherId, routerConfig = Props.defaultRoutedProps) @@ -150,7 +150,7 @@ case class Props( * * Java API. */ - def withCreator(c: Class[_ <: Actor]): Props = copy(creator = () ⇒ c.newInstance) + def withCreator(c: Class[_ <: Actor]): Props = copy(creator = FromClassCreator(c)) /** * Returns a new Props with the specified dispatcher set. @@ -166,4 +166,13 @@ case class Props( * Returns a new Props with the specified deployment configuration. */ def withDeploy(d: Deploy): Props = copy(deploy = d) + } + +/** + * Used when creating an Actor from a class. Special Function0 to be + * able to optimize serialization. + */ +private[akka] case class FromClassCreator(clazz: Class[_ <: Actor]) extends Function0[Actor] { + def apply(): Actor = clazz.newInstance +} \ No newline at end of file diff --git a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java index e340a807cf..d168e5acc1 100644 --- a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java +++ b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java @@ -5035,20 +5035,24 @@ public final class RemoteProtocol { public interface PropsProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bytes creator = 1; - boolean hasCreator(); - com.google.protobuf.ByteString getCreator(); - - // required string dispatcher = 2; + // required string dispatcher = 1; boolean hasDispatcher(); String getDispatcher(); - // required .DeployProtocol deploy = 3; + // required .DeployProtocol deploy = 2; boolean hasDeploy(); akka.remote.RemoteProtocol.DeployProtocol getDeploy(); akka.remote.RemoteProtocol.DeployProtocolOrBuilder getDeployOrBuilder(); - // optional bytes routerConfig = 4; + // optional string fromClassCreator = 3; + boolean hasFromClassCreator(); + String getFromClassCreator(); + + // optional bytes creator = 4; + boolean hasCreator(); + com.google.protobuf.ByteString getCreator(); + + // optional bytes routerConfig = 5; boolean hasRouterConfig(); com.google.protobuf.ByteString getRouterConfig(); } @@ -5081,21 +5085,11 @@ public final class RemoteProtocol { } private int bitField0_; - // required bytes creator = 1; - public static final int CREATOR_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString creator_; - public boolean hasCreator() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public com.google.protobuf.ByteString getCreator() { - return creator_; - } - - // required string dispatcher = 2; - public static final int DISPATCHER_FIELD_NUMBER = 2; + // required string dispatcher = 1; + public static final int DISPATCHER_FIELD_NUMBER = 1; private java.lang.Object dispatcher_; public boolean hasDispatcher() { - return ((bitField0_ & 0x00000002) == 0x00000002); + return ((bitField0_ & 0x00000001) == 0x00000001); } public String getDispatcher() { java.lang.Object ref = dispatcher_; @@ -5123,11 +5117,11 @@ public final class RemoteProtocol { } } - // required .DeployProtocol deploy = 3; - public static final int DEPLOY_FIELD_NUMBER = 3; + // required .DeployProtocol deploy = 2; + public static final int DEPLOY_FIELD_NUMBER = 2; private akka.remote.RemoteProtocol.DeployProtocol deploy_; public boolean hasDeploy() { - return ((bitField0_ & 0x00000004) == 0x00000004); + return ((bitField0_ & 0x00000002) == 0x00000002); } public akka.remote.RemoteProtocol.DeployProtocol getDeploy() { return deploy_; @@ -5136,20 +5130,63 @@ public final class RemoteProtocol { return deploy_; } - // optional bytes routerConfig = 4; - public static final int ROUTERCONFIG_FIELD_NUMBER = 4; + // optional string fromClassCreator = 3; + public static final int FROMCLASSCREATOR_FIELD_NUMBER = 3; + private java.lang.Object fromClassCreator_; + public boolean hasFromClassCreator() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public String getFromClassCreator() { + java.lang.Object ref = fromClassCreator_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + fromClassCreator_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getFromClassCreatorBytes() { + java.lang.Object ref = fromClassCreator_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + fromClassCreator_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional bytes creator = 4; + public static final int CREATOR_FIELD_NUMBER = 4; + private com.google.protobuf.ByteString creator_; + public boolean hasCreator() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public com.google.protobuf.ByteString getCreator() { + return creator_; + } + + // optional bytes routerConfig = 5; + public static final int ROUTERCONFIG_FIELD_NUMBER = 5; private com.google.protobuf.ByteString routerConfig_; public boolean hasRouterConfig() { - return ((bitField0_ & 0x00000008) == 0x00000008); + return ((bitField0_ & 0x00000010) == 0x00000010); } public com.google.protobuf.ByteString getRouterConfig() { return routerConfig_; } private void initFields() { - creator_ = com.google.protobuf.ByteString.EMPTY; dispatcher_ = ""; deploy_ = akka.remote.RemoteProtocol.DeployProtocol.getDefaultInstance(); + fromClassCreator_ = ""; + creator_ = com.google.protobuf.ByteString.EMPTY; routerConfig_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; @@ -5157,10 +5194,6 @@ public final class RemoteProtocol { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasCreator()) { - memoizedIsInitialized = 0; - return false; - } if (!hasDispatcher()) { memoizedIsInitialized = 0; return false; @@ -5181,16 +5214,19 @@ public final class RemoteProtocol { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, creator_); + output.writeBytes(1, getDispatcherBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getDispatcherBytes()); + output.writeMessage(2, deploy_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeMessage(3, deploy_); + output.writeBytes(3, getFromClassCreatorBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBytes(4, routerConfig_); + output.writeBytes(4, creator_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBytes(5, routerConfig_); } getUnknownFields().writeTo(output); } @@ -5203,19 +5239,23 @@ public final class RemoteProtocol { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, creator_); + .computeBytesSize(1, getDispatcherBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getDispatcherBytes()); + .computeMessageSize(2, deploy_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, deploy_); + .computeBytesSize(3, getFromClassCreatorBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(4, routerConfig_); + .computeBytesSize(4, creator_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, routerConfig_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -5342,18 +5382,20 @@ public final class RemoteProtocol { public Builder clear() { super.clear(); - creator_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); dispatcher_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000001); if (deployBuilder_ == null) { deploy_ = akka.remote.RemoteProtocol.DeployProtocol.getDefaultInstance(); } else { deployBuilder_.clear(); } + bitField0_ = (bitField0_ & ~0x00000002); + fromClassCreator_ = ""; bitField0_ = (bitField0_ & ~0x00000004); - routerConfig_ = com.google.protobuf.ByteString.EMPTY; + creator_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000008); + routerConfig_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); return this; } @@ -5395,22 +5437,26 @@ public final class RemoteProtocol { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.creator_ = creator_; + result.dispatcher_ = dispatcher_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.dispatcher_ = dispatcher_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } if (deployBuilder_ == null) { result.deploy_ = deploy_; } else { result.deploy_ = deployBuilder_.build(); } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.fromClassCreator_ = fromClassCreator_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } + result.creator_ = creator_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } result.routerConfig_ = routerConfig_; result.bitField0_ = to_bitField0_; onBuilt(); @@ -5428,15 +5474,18 @@ public final class RemoteProtocol { public Builder mergeFrom(akka.remote.RemoteProtocol.PropsProtocol other) { if (other == akka.remote.RemoteProtocol.PropsProtocol.getDefaultInstance()) return this; - if (other.hasCreator()) { - setCreator(other.getCreator()); - } if (other.hasDispatcher()) { setDispatcher(other.getDispatcher()); } if (other.hasDeploy()) { mergeDeploy(other.getDeploy()); } + if (other.hasFromClassCreator()) { + setFromClassCreator(other.getFromClassCreator()); + } + if (other.hasCreator()) { + setCreator(other.getCreator()); + } if (other.hasRouterConfig()) { setRouterConfig(other.getRouterConfig()); } @@ -5445,10 +5494,6 @@ public final class RemoteProtocol { } public final boolean isInitialized() { - if (!hasCreator()) { - - return false; - } if (!hasDispatcher()) { return false; @@ -5489,15 +5534,10 @@ public final class RemoteProtocol { } case 10: { bitField0_ |= 0x00000001; - creator_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; dispatcher_ = input.readBytes(); break; } - case 26: { + case 18: { akka.remote.RemoteProtocol.DeployProtocol.Builder subBuilder = akka.remote.RemoteProtocol.DeployProtocol.newBuilder(); if (hasDeploy()) { subBuilder.mergeFrom(getDeploy()); @@ -5506,8 +5546,18 @@ public final class RemoteProtocol { setDeploy(subBuilder.buildPartial()); break; } + case 26: { + bitField0_ |= 0x00000004; + fromClassCreator_ = input.readBytes(); + break; + } case 34: { bitField0_ |= 0x00000008; + creator_ = input.readBytes(); + break; + } + case 42: { + bitField0_ |= 0x00000010; routerConfig_ = input.readBytes(); break; } @@ -5517,34 +5567,10 @@ public final class RemoteProtocol { private int bitField0_; - // required bytes creator = 1; - private com.google.protobuf.ByteString creator_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasCreator() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public com.google.protobuf.ByteString getCreator() { - return creator_; - } - public Builder setCreator(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - creator_ = value; - onChanged(); - return this; - } - public Builder clearCreator() { - bitField0_ = (bitField0_ & ~0x00000001); - creator_ = getDefaultInstance().getCreator(); - onChanged(); - return this; - } - - // required string dispatcher = 2; + // required string dispatcher = 1; private java.lang.Object dispatcher_ = ""; public boolean hasDispatcher() { - return ((bitField0_ & 0x00000002) == 0x00000002); + return ((bitField0_ & 0x00000001) == 0x00000001); } public String getDispatcher() { java.lang.Object ref = dispatcher_; @@ -5560,29 +5586,29 @@ public final class RemoteProtocol { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000001; dispatcher_ = value; onChanged(); return this; } public Builder clearDispatcher() { - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000001); dispatcher_ = getDefaultInstance().getDispatcher(); onChanged(); return this; } void setDispatcher(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000001; dispatcher_ = value; onChanged(); } - // required .DeployProtocol deploy = 3; + // required .DeployProtocol deploy = 2; private akka.remote.RemoteProtocol.DeployProtocol deploy_ = akka.remote.RemoteProtocol.DeployProtocol.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< akka.remote.RemoteProtocol.DeployProtocol, akka.remote.RemoteProtocol.DeployProtocol.Builder, akka.remote.RemoteProtocol.DeployProtocolOrBuilder> deployBuilder_; public boolean hasDeploy() { - return ((bitField0_ & 0x00000004) == 0x00000004); + return ((bitField0_ & 0x00000002) == 0x00000002); } public akka.remote.RemoteProtocol.DeployProtocol getDeploy() { if (deployBuilder_ == null) { @@ -5601,7 +5627,7 @@ public final class RemoteProtocol { } else { deployBuilder_.setMessage(value); } - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000002; return this; } public Builder setDeploy( @@ -5612,12 +5638,12 @@ public final class RemoteProtocol { } else { deployBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000002; return this; } public Builder mergeDeploy(akka.remote.RemoteProtocol.DeployProtocol value) { if (deployBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004) && + if (((bitField0_ & 0x00000002) == 0x00000002) && deploy_ != akka.remote.RemoteProtocol.DeployProtocol.getDefaultInstance()) { deploy_ = akka.remote.RemoteProtocol.DeployProtocol.newBuilder(deploy_).mergeFrom(value).buildPartial(); @@ -5628,7 +5654,7 @@ public final class RemoteProtocol { } else { deployBuilder_.mergeFrom(value); } - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000002; return this; } public Builder clearDeploy() { @@ -5638,11 +5664,11 @@ public final class RemoteProtocol { } else { deployBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000004); + bitField0_ = (bitField0_ & ~0x00000002); return this; } public akka.remote.RemoteProtocol.DeployProtocol.Builder getDeployBuilder() { - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000002; onChanged(); return getDeployFieldBuilder().getBuilder(); } @@ -5667,10 +5693,70 @@ public final class RemoteProtocol { return deployBuilder_; } - // optional bytes routerConfig = 4; + // optional string fromClassCreator = 3; + private java.lang.Object fromClassCreator_ = ""; + public boolean hasFromClassCreator() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public String getFromClassCreator() { + java.lang.Object ref = fromClassCreator_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + fromClassCreator_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setFromClassCreator(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + fromClassCreator_ = value; + onChanged(); + return this; + } + public Builder clearFromClassCreator() { + bitField0_ = (bitField0_ & ~0x00000004); + fromClassCreator_ = getDefaultInstance().getFromClassCreator(); + onChanged(); + return this; + } + void setFromClassCreator(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000004; + fromClassCreator_ = value; + onChanged(); + } + + // optional bytes creator = 4; + private com.google.protobuf.ByteString creator_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasCreator() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public com.google.protobuf.ByteString getCreator() { + return creator_; + } + public Builder setCreator(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + creator_ = value; + onChanged(); + return this; + } + public Builder clearCreator() { + bitField0_ = (bitField0_ & ~0x00000008); + creator_ = getDefaultInstance().getCreator(); + onChanged(); + return this; + } + + // optional bytes routerConfig = 5; private com.google.protobuf.ByteString routerConfig_ = com.google.protobuf.ByteString.EMPTY; public boolean hasRouterConfig() { - return ((bitField0_ & 0x00000008) == 0x00000008); + return ((bitField0_ & 0x00000010) == 0x00000010); } public com.google.protobuf.ByteString getRouterConfig() { return routerConfig_; @@ -5679,13 +5765,13 @@ public final class RemoteProtocol { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000008; + bitField0_ |= 0x00000010; routerConfig_ = value; onChanged(); return this; } public Builder clearRouterConfig() { - bitField0_ = (bitField0_ & ~0x00000008); + bitField0_ = (bitField0_ & ~0x00000010); routerConfig_ = getDefaultInstance().getRouterConfig(); onChanged(); return this; @@ -6947,17 +7033,17 @@ public final class RemoteProtocol { "\014\n\004port\030\003 \002(\r\"\216\001\n\027DaemonMsgCreateProtoco" + "l\022\035\n\005props\030\001 \002(\0132\016.PropsProtocol\022\037\n\006depl" + "oy\030\002 \002(\0132\017.DeployProtocol\022\014\n\004path\030\003 \002(\t\022" + - "%\n\nsupervisor\030\004 \002(\0132\021.ActorRefProtocol\"k", - "\n\rPropsProtocol\022\017\n\007creator\030\001 \002(\014\022\022\n\ndisp" + - "atcher\030\002 \002(\t\022\037\n\006deploy\030\003 \002(\0132\017.DeployPro" + - "tocol\022\024\n\014routerConfig\030\004 \001(\014\"S\n\016DeployPro" + - "tocol\022\014\n\004path\030\001 \002(\t\022\016\n\006config\030\002 \001(\014\022\024\n\014r" + - "outerConfig\030\003 \001(\014\022\r\n\005scope\030\004 \001(\014\"`\n\026Daem" + - "onMsgWatchProtocol\022\"\n\007watcher\030\001 \002(\0132\021.Ac" + - "torRefProtocol\022\"\n\007watched\030\002 \002(\0132\021.ActorR" + - "efProtocol*7\n\013CommandType\022\013\n\007CONNECT\020\001\022\014" + - "\n\010SHUTDOWN\020\002\022\r\n\tHEARTBEAT\020\003B\017\n\013akka.remo" + - "teH\001" + "%\n\nsupervisor\030\004 \002(\0132\021.ActorRefProtocol\"\205", + "\001\n\rPropsProtocol\022\022\n\ndispatcher\030\001 \002(\t\022\037\n\006" + + "deploy\030\002 \002(\0132\017.DeployProtocol\022\030\n\020fromCla" + + "ssCreator\030\003 \001(\t\022\017\n\007creator\030\004 \001(\014\022\024\n\014rout" + + "erConfig\030\005 \001(\014\"S\n\016DeployProtocol\022\014\n\004path" + + "\030\001 \002(\t\022\016\n\006config\030\002 \001(\014\022\024\n\014routerConfig\030\003" + + " \001(\014\022\r\n\005scope\030\004 \001(\014\"`\n\026DaemonMsgWatchPro" + + "tocol\022\"\n\007watcher\030\001 \002(\0132\021.ActorRefProtoco" + + "l\022\"\n\007watched\030\002 \002(\0132\021.ActorRefProtocol*7\n" + + "\013CommandType\022\013\n\007CONNECT\020\001\022\014\n\010SHUTDOWN\020\002\022" + + "\r\n\tHEARTBEAT\020\003B\017\n\013akka.remoteH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -7033,7 +7119,7 @@ public final class RemoteProtocol { internal_static_PropsProtocol_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_PropsProtocol_descriptor, - new java.lang.String[] { "Creator", "Dispatcher", "Deploy", "RouterConfig", }, + new java.lang.String[] { "Dispatcher", "Deploy", "FromClassCreator", "Creator", "RouterConfig", }, akka.remote.RemoteProtocol.PropsProtocol.class, akka.remote.RemoteProtocol.PropsProtocol.Builder.class); internal_static_DeployProtocol_descriptor = diff --git a/akka-remote/src/main/protocol/RemoteProtocol.proto b/akka-remote/src/main/protocol/RemoteProtocol.proto index 7ae11d9cb7..72b04caa57 100644 --- a/akka-remote/src/main/protocol/RemoteProtocol.proto +++ b/akka-remote/src/main/protocol/RemoteProtocol.proto @@ -92,10 +92,11 @@ message DaemonMsgCreateProtocol { * Serialization of akka.actor.Props */ message PropsProtocol { - required bytes creator = 1; - required string dispatcher = 2; - required DeployProtocol deploy = 3; - optional bytes routerConfig = 4; + required string dispatcher = 1; + required DeployProtocol deploy = 2; + optional string fromClassCreator = 3; + optional bytes creator = 4; + optional bytes routerConfig = 5; } /** diff --git a/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala b/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala index 7bd3124792..ce54ff5adb 100644 --- a/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala +++ b/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala @@ -5,11 +5,9 @@ package akka.serialization import java.io.Serializable - import com.google.protobuf.ByteString import com.typesafe.config.Config import com.typesafe.config.ConfigFactory - import akka.actor.Actor import akka.actor.ActorRef import akka.actor.Deploy @@ -24,12 +22,14 @@ import akka.remote.RemoteProtocol.DeployProtocol import akka.remote.RemoteProtocol.PropsProtocol import akka.routing.NoRouter import akka.routing.RouterConfig +import akka.actor.FromClassCreator /** * Serializes akka's internal DaemonMsgCreate using protobuf * for the core structure of DaemonMsgCreate, Props and Deploy. - * Serialization of contained RouterConfig, Config, Scope, and creator (scala.Function0) - * is done with configured serializer for those classes, by default java.io.Serializable. + * Serialization of contained RouterConfig, Config, and Scope + * is done with configured serializer for those classes, by + * default java.io.Serializable. */ class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) extends Serializer { import ProtobufSerializer.serializeActorRef @@ -55,9 +55,12 @@ class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) extends Seriali def propsProto = { val builder = PropsProtocol.newBuilder. - setCreator(serialize(props.creator)). setDispatcher(props.dispatcher). setDeploy(deployProto(props.deploy)) + props.creator match { + case FromClassCreator(clazz) ⇒ builder.setFromClassCreator(clazz.getName) + case creator ⇒ builder.setCreator(serialize(creator)) + } if (props.routerConfig != NoRouter) builder.setRouterConfig(serialize(props.routerConfig)) builder.build @@ -92,11 +95,22 @@ class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) extends Seriali } def props = { + val creator = + if (proto.getProps.hasFromClassCreator) { + system.dynamicAccess.getClassFor(proto.getProps.getFromClassCreator) match { + case Right(clazz) ⇒ FromClassCreator(clazz) + case Left(e) ⇒ throw e + } + } else { + deserialize(proto.getProps.getCreator, classOf[() ⇒ Actor]) + } + val routerConfig = if (proto.getProps.hasRouterConfig) deserialize(proto.getProps.getRouterConfig, classOf[RouterConfig]) else NoRouter + Props( - creator = deserialize(proto.getProps.getCreator, classOf[() ⇒ Actor]), + creator = creator, dispatcher = proto.getProps.getDispatcher, routerConfig = routerConfig, deploy = deploy(proto.getProps.getDeploy)) @@ -124,7 +138,6 @@ class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) extends Seriali case Left(e) ⇒ // Fallback to the java serializer, because some interfaces don't implement java.io.Serializable, // but the impl instance does. This could be optimized by adding java serializers in reference.conf: - // scala.Function0 (the creator) // com.typesafe.config.Config // akka.routing.RouterConfig // akka.actor.Scope diff --git a/akka-remote/src/test/scala/akka/serialization/DaemonMsgCreateSerializerSpec.scala b/akka-remote/src/test/scala/akka/serialization/DaemonMsgCreateSerializerSpec.scala index eed2c73d2c..e38a3e1d1f 100644 --- a/akka-remote/src/test/scala/akka/serialization/DaemonMsgCreateSerializerSpec.scala +++ b/akka-remote/src/test/scala/akka/serialization/DaemonMsgCreateSerializerSpec.scala @@ -17,6 +17,7 @@ import akka.remote.RemoteScope import akka.routing.RoundRobinRouter import akka.routing.FromConfig import akka.util.duration._ +import akka.actor.FromClassCreator object DaemonMsgCreateSerializerSpec { class MyActor extends Actor { @@ -39,45 +40,51 @@ class DaemonMsgCreateSerializerSpec extends AkkaSpec { ser.serializerFor(classOf[DaemonMsgCreate]).getClass must be(classOf[DaemonMsgCreateSerializer]) } - "serialize and de-serialize simple DaemonMsgCreate" in { - - val msg = DaemonMsgCreate( - props = Props[MyActor], - deploy = Deploy(), - path = "foo", - supervisor = supervisor) - - val bytes = ser.serialize(msg) match { - case Left(exception) ⇒ fail(exception) - case Right(bytes) ⇒ bytes + "serialize and de-serialize DaemonMsgCreate with FromClassCreator" in { + verifySerialization { + DaemonMsgCreate( + props = Props[MyActor], + deploy = Deploy(), + path = "foo", + supervisor = supervisor) } - ser.deserialize(bytes.asInstanceOf[Array[Byte]], classOf[DaemonMsgCreate]) match { - case Left(exception) ⇒ fail(exception) - case Right(m: DaemonMsgCreate) ⇒ assertDaemonMsgCreate(msg, m) + } + + "serialize and de-serialize DaemonMsgCreate with function creator" in { + verifySerialization { + DaemonMsgCreate( + props = Props().withCreator(new MyActor), + deploy = Deploy(), + path = "foo", + supervisor = supervisor) } } "serialize and de-serialize DaemonMsgCreate with Deploy and RouterConfig" in { - // Duration.Inf doesn't equal Duration.Inf, so we use another for test - val supervisorStrategy = OneForOneStrategy(3, 10 seconds) { - case _ ⇒ SupervisorStrategy.Escalate + verifySerialization { + // Duration.Inf doesn't equal Duration.Inf, so we use another for test + val supervisorStrategy = OneForOneStrategy(3, 10 seconds) { + case _ ⇒ SupervisorStrategy.Escalate + } + val deploy1 = Deploy( + path = "path1", + config = ConfigFactory.parseString("a=1"), + routerConfig = RoundRobinRouter(nrOfInstances = 5, supervisorStrategy = supervisorStrategy), + scope = RemoteScope(Address("akka", "Test", "host1", 1921))) + val deploy2 = Deploy( + path = "path2", + config = ConfigFactory.parseString("a=2"), + routerConfig = FromConfig, + scope = RemoteScope(Address("akka", "Test", "host2", 1922))) + DaemonMsgCreate( + props = Props[MyActor].withDispatcher("my-disp").withDeploy(deploy1), + deploy = deploy2, + path = "foo", + supervisor = supervisor) } - val deploy1 = Deploy( - path = "path1", - config = ConfigFactory.parseString("a=1"), - routerConfig = RoundRobinRouter(nrOfInstances = 5, supervisorStrategy = supervisorStrategy), - scope = RemoteScope(Address("akka", "Test", "host1", 1921))) - val deploy2 = Deploy( - path = "path2", - config = ConfigFactory.parseString("a=2"), - routerConfig = FromConfig, - scope = RemoteScope(Address("akka", "Test", "host2", 1922))) - val msg = DaemonMsgCreate( - props = Props[MyActor].withDispatcher("my-disp").withDeploy(deploy1), - deploy = deploy2, - path = "foo", - supervisor = supervisor) + } + def verifySerialization(msg: DaemonMsgCreate): Unit = { val bytes = ser.serialize(msg) match { case Left(exception) ⇒ fail(exception) case Right(bytes) ⇒ bytes @@ -89,7 +96,9 @@ class DaemonMsgCreateSerializerSpec extends AkkaSpec { } def assertDaemonMsgCreate(expected: DaemonMsgCreate, got: DaemonMsgCreate): Unit = { - // can't compare props.creator + // can't compare props.creator when function + if (expected.props.creator.isInstanceOf[FromClassCreator]) + assert(got.props.creator === expected.props.creator) assert(got.props.dispatcher === expected.props.dispatcher) assert(got.props.dispatcher === expected.props.dispatcher) assert(got.props.routerConfig === expected.props.routerConfig) From b70d7c6227e7294ae16c33873f3f131bf5af8b71 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 16 May 2012 09:43:23 +0200 Subject: [PATCH 031/538] Reverted the change to serializeActorRef in DurableMessageSerialization. See #1755 * Discussed with Roland after seeing pull/457 --- .../scala/akka/actor/mailbox/DurableMailbox.scala | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala b/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala index db7b137bf0..0744215bae 100644 --- a/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala +++ b/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala @@ -9,7 +9,6 @@ import akka.remote.MessageSerializer import akka.remote.RemoteProtocol.{ ActorRefProtocol, RemoteMessageProtocol } import com.typesafe.config.Config import akka.actor.ActorSystem -import akka.serialization.Serialization private[akka] object DurableExecutableMailboxConfig { val Name = "[\\.\\/\\$\\s]".r @@ -27,11 +26,13 @@ abstract class DurableMessageQueue(val owner: ActorContext) extends MessageQueue trait DurableMessageSerialization { this: DurableMessageQueue ⇒ - import akka.serialization.ProtobufSerializer.serializeActorRef - import akka.serialization.ProtobufSerializer.deserializeActorRef - def serialize(durableMessage: Envelope): Array[Byte] = { + // It's alright to use ref.path.toString here + // When the sender is a LocalActorRef it should be local when deserialized also. + // When the sender is a RemoteActorRef the path.toString already contains remote address information. + def serializeActorRef(ref: ActorRef): ActorRefProtocol = ActorRefProtocol.newBuilder.setPath(ref.path.toString).build + val message = MessageSerializer.serialize(system, durableMessage.message.asInstanceOf[AnyRef]) val builder = RemoteMessageProtocol.newBuilder .setMessage(message) @@ -43,9 +44,11 @@ trait DurableMessageSerialization { this: DurableMessageQueue ⇒ def deserialize(bytes: Array[Byte]): Envelope = { + def deserializeActorRef(refProtocol: ActorRefProtocol): ActorRef = system.actorFor(refProtocol.getPath) + val durableMessage = RemoteMessageProtocol.parseFrom(bytes) val message = MessageSerializer.deserialize(system, durableMessage.getMessage) - val sender = deserializeActorRef(system, durableMessage.getSender) + val sender = deserializeActorRef(durableMessage.getSender) new Envelope(message, sender)(system) } From fccbba0de54633be7beee2834ba4830f534ecf3b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 15:22:21 +0200 Subject: [PATCH 032/538] Docs, comments, cleanup, fairy dust... --- .../workbench/PerformanceSpec.scala | 7 +- .../src/main/scala/akka/actor/Deployer.scala | 20 ++- .../main/scala/akka/actor/DynamicAccess.scala | 4 +- .../src/main/scala/akka/actor/FSM.scala | 3 +- .../main/scala/akka/actor/FaultHandling.scala | 46 ++++--- akka-actor/src/main/scala/akka/actor/IO.scala | 123 +++++++++--------- .../src/main/scala/akka/actor/package.scala | 8 -- .../main/scala/akka/event/EventStream.scala | 3 +- .../src/main/scala/akka/event/Logging.scala | 16 ++- .../main/scala/akka/remote/netty/Client.scala | 23 ++-- 10 files changed, 133 insertions(+), 120 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala index ca6e42d67f..ca23dd5a33 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala @@ -3,12 +3,11 @@ package akka.performance.workbench import scala.collection.immutable.TreeMap import org.apache.commons.math.stat.descriptive.DescriptiveStatistics import org.scalatest.BeforeAndAfterEach -import akka.actor.simpleName import akka.testkit.AkkaSpec -import akka.actor.ActorSystem import akka.util.Duration import com.typesafe.config.Config import java.util.concurrent.TimeUnit +import akka.event.Logging abstract class PerformanceSpec(cfg: Config = BenchmarkConfig.config) extends AkkaSpec(cfg) with BeforeAndAfterEach { @@ -36,7 +35,7 @@ abstract class PerformanceSpec(cfg: Config = BenchmarkConfig.config) extends Akk } def logMeasurement(numberOfClients: Int, durationNs: Long, n: Long) { - val name = simpleName(this) + val name = Logging.simpleName(this) val durationS = durationNs.toDouble / 1000000000.0 val stats = Stats( @@ -51,7 +50,7 @@ abstract class PerformanceSpec(cfg: Config = BenchmarkConfig.config) extends Akk } def logMeasurement(numberOfClients: Int, durationNs: Long, stat: DescriptiveStatistics) { - val name = simpleName(this) + val name = Logging.simpleName(this) val durationS = durationNs.toDouble / 1000000000.0 val percentiles = TreeMap[Int, Long]( diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 2fd9538d77..47b8bf329c 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -34,8 +34,19 @@ final case class Deploy( routerConfig: RouterConfig = NoRouter, scope: Scope = NoScopeGiven) { + /** + * Java API to create a Deploy with the given RouterConfig + */ def this(routing: RouterConfig) = this("", ConfigFactory.empty, routing) + + /** + * Java API to create a Deploy with the given RouterConfig with Scope + */ def this(routing: RouterConfig, scope: Scope) = this("", ConfigFactory.empty, routing, scope) + + /** + * Java API to create a Deploy with the given Scope + */ def this(scope: Scope) = this("", ConfigFactory.empty, NoRouter, scope) /** @@ -67,13 +78,9 @@ trait Scope { //TODO add @SerialVersionUID(1L) when SI-4804 is fixed abstract class LocalScope extends Scope -case object LocalScope extends LocalScope { - /** - * Java API - */ - @deprecated("use instance() method instead", "2.0.1") - def scope: Scope = this +//FIXME docs +case object LocalScope extends LocalScope { /** * Java API: get the singleton instance */ @@ -162,5 +169,4 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce Some(Deploy(key, deployment, router, NoScopeGiven)) } - } diff --git a/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala b/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala index 8d3ac68852..72ffbbe76e 100644 --- a/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala +++ b/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala @@ -14,7 +14,7 @@ import java.lang.reflect.InvocationTargetException * This is an internal facility and users are not expected to encounter it * unless they are extending Akka in ways which go beyond simple Extensions. */ -trait DynamicAccess { +abstract class DynamicAccess { /** * Convenience method which given a `Class[_]` object and a constructor description @@ -88,7 +88,7 @@ trait DynamicAccess { * by default. */ class ReflectiveDynamicAccess(val classLoader: ClassLoader) extends DynamicAccess { - + //FIXME switch to Scala Reflection for 2.10 override def getClassFor[T: ClassManifest](fqcn: String): Either[Throwable, Class[_ <: T]] = try { val c = classLoader.loadClass(fqcn).asInstanceOf[Class[_ <: T]] diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 3d1f8930c4..71d1ec7e69 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -6,9 +6,10 @@ package akka.actor import akka.util._ import scala.collection.mutable -import akka.event.Logging import akka.routing.{ Deafen, Listen, Listeners } +//FIXME: Roland, could you go through this file? + object FSM { object NullFunction extends PartialFunction[Any, Nothing] { diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index 70246bab30..383010f9de 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -9,8 +9,13 @@ import scala.collection.JavaConversions._ import java.lang.{ Iterable ⇒ JIterable } import akka.util.Duration +/** + * ChildRestartStats is the statistics kept by every parent Actor for every child Actor + * and is used for SupervisorStrategies to know how to deal with problems that occur for the children. + */ case class ChildRestartStats(val child: ActorRef, var maxNrOfRetriesCount: Int = 0, var restartTimeWindowStartNanos: Long = 0L) { + //FIXME How about making ChildRestartStats immutable and then move these methods into the actual supervisor strategies? def requestRestartPermission(retriesWindow: (Option[Int], Option[Int])): Boolean = retriesWindow match { case (Some(retries), _) if retries < 1 ⇒ false @@ -160,19 +165,19 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { def makeDecider(flat: Iterable[CauseDirective]): Decider = { val directives = sort(flat) - { - case x ⇒ directives find (_._1 isInstance x) map (_._2) getOrElse Escalate - } + { case x ⇒ directives find (_._1 isInstance x) map (_._2) getOrElse Escalate } } - def makeDecider(func: JDecider): Decider = { - case x ⇒ func(x) - } + /** + * Converts a Java Decider into a Scala Decider + */ + def makeDecider(func: JDecider): Decider = { case x ⇒ func(x) } /** * Sort so that subtypes always precede their supertypes, but without * obeying any order between unrelated subtypes (insert sort). */ + //FIXME Should this really be public API? def sort(in: Iterable[CauseDirective]): Seq[CauseDirective] = (new ArrayBuffer[CauseDirective](in.size) /: in) { (buf, ca) ⇒ buf.indexWhere(_._1 isAssignableFrom ca._1) match { @@ -184,14 +189,21 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { private[akka] def withinTimeRangeOption(withinTimeRange: Duration): Option[Duration] = if (withinTimeRange.isFinite && withinTimeRange >= Duration.Zero) Some(withinTimeRange) else None + private[akka] def maxNrOfRetriesOption(maxNrOfRetries: Int): Option[Int] = if (maxNrOfRetries < 0) None else Some(maxNrOfRetries) } +/** + * An Akka SupervisorStrategy is + */ abstract class SupervisorStrategy { import SupervisorStrategy._ + /** + * Returns the Decider that is associated with this SupervisorStrategy + */ def decider: Decider /** @@ -204,21 +216,19 @@ abstract class SupervisorStrategy { */ def processFailure(context: ActorContext, restart: Boolean, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]): Unit - def handleSupervisorFailing(supervisor: ActorRef, children: Iterable[ActorRef]): Unit = { - if (children.nonEmpty) - children.foreach(_.asInstanceOf[InternalActorRef].suspend()) - } + //FIXME docs + def handleSupervisorFailing(supervisor: ActorRef, children: Iterable[ActorRef]): Unit = + if (children.nonEmpty) children.foreach(_.asInstanceOf[InternalActorRef].suspend()) - def handleSupervisorRestarted(cause: Throwable, supervisor: ActorRef, children: Iterable[ActorRef]): Unit = { - if (children.nonEmpty) - children.foreach(_.asInstanceOf[InternalActorRef].restart(cause)) - } + //FIXME docs + def handleSupervisorRestarted(cause: Throwable, supervisor: ActorRef, children: Iterable[ActorRef]): Unit = + if (children.nonEmpty) children.foreach(_.asInstanceOf[InternalActorRef].restart(cause)) /** * Returns whether it processed the failure or not */ def handleFailure(context: ActorContext, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]): Boolean = { - val directive = if (decider.isDefinedAt(cause)) decider(cause) else Escalate + val directive = if (decider.isDefinedAt(cause)) decider(cause) else Escalate //FIXME applyOrElse in Scala 2.10 directive match { case Resume ⇒ child.asInstanceOf[InternalActorRef].resume(); true case Restart ⇒ processFailure(context, true, child, cause, stats, children); true @@ -242,6 +252,8 @@ abstract class SupervisorStrategy { case class AllForOneStrategy(maxNrOfRetries: Int = -1, withinTimeRange: Duration = Duration.Inf)(val decider: SupervisorStrategy.Decider) extends SupervisorStrategy { + import SupervisorStrategy._ + def this(maxNrOfRetries: Int, withinTimeRange: Duration, decider: SupervisorStrategy.JDecider) = this(maxNrOfRetries, withinTimeRange)(SupervisorStrategy.makeDecider(decider)) @@ -256,9 +268,7 @@ case class AllForOneStrategy(maxNrOfRetries: Int = -1, withinTimeRange: Duration * every call to requestRestartPermission, assuming that strategies are shared * across actors and thus this field does not take up much space */ - private val retriesWindow = ( - SupervisorStrategy.maxNrOfRetriesOption(maxNrOfRetries), - SupervisorStrategy.withinTimeRangeOption(withinTimeRange).map(_.toMillis.toInt)) + private val retriesWindow = (maxNrOfRetriesOption(maxNrOfRetries), withinTimeRangeOption(withinTimeRange).map(_.toMillis.toInt)) def handleChildTerminated(context: ActorContext, child: ActorRef, children: Iterable[ActorRef]): Unit = {} diff --git a/akka-actor/src/main/scala/akka/actor/IO.scala b/akka-actor/src/main/scala/akka/actor/IO.scala index 72eaf32e83..3ff91c4fa8 100644 --- a/akka-actor/src/main/scala/akka/actor/IO.scala +++ b/akka-actor/src/main/scala/akka/actor/IO.scala @@ -30,7 +30,7 @@ import java.util.UUID */ object IO { - final class DivergentIterateeException extends Exception("Iteratees should not return a continuation when receiving EOF") + final class DivergentIterateeException extends IllegalStateException("Iteratees should not return a continuation when receiving EOF") /** * An immutable handle to a Java NIO Channel. Contains a reference to the @@ -64,14 +64,14 @@ object IO { * A [[akka.actor.IO.Handle]] to a ReadableByteChannel. */ sealed trait ReadHandle extends Handle with Product { - override def asReadable = this + override def asReadable: ReadHandle = this } /** * A [[akka.actor.IO.Handle]] to a WritableByteChannel. */ sealed trait WriteHandle extends Handle with Product { - override def asWritable = this + override def asWritable: WriteHandle = this /** * Sends a request to the [[akka.actor.IOManager]] to write to the @@ -89,7 +89,7 @@ object IO { * [[akka.actor.IO.ServerHandle]].accept(). */ case class SocketHandle(owner: ActorRef, ioManager: ActorRef, uuid: UUID = UUID.randomUUID()) extends ReadHandle with WriteHandle { - override def asSocket = this + override def asSocket: SocketHandle = this } /** @@ -97,7 +97,7 @@ object IO { * normally created by [[akka.actor.IOManager]].listen(). */ case class ServerHandle(owner: ActorRef, ioManager: ActorRef, uuid: UUID = UUID.randomUUID()) extends Handle { - override def asServer = this + override def asServer: ServerHandle = this /** * Sends a request to the [[akka.actor.IOManager]] to accept an incoming @@ -320,16 +320,18 @@ object IO { } object Chunk { - val empty = Chunk(ByteString.empty) + val empty = new Chunk(ByteString.empty) } /** * Part of an [[akka.actor.IO.Input]] stream that contains a chunk of bytes. */ case class Chunk(bytes: ByteString) extends Input { - def ++(that: Input) = that match { - case Chunk(more) ⇒ Chunk(bytes ++ more) - case _: EOF ⇒ that + final override def ++(that: Input): Input = that match { + case Chunk(more) if more.isEmpty ⇒ this + case c: Chunk if bytes.isEmpty ⇒ c + case Chunk(more) ⇒ Chunk(bytes ++ more) + case _: EOF ⇒ that } } @@ -342,7 +344,7 @@ object IO { * Iteratee.recover() in order to handle it properly. */ case class EOF(cause: Option[Exception]) extends Input { - def ++(that: Input) = that + final override def ++(that: Input) = that } object Iteratee { @@ -352,7 +354,15 @@ object IO { * inferred as an Iteratee and not as a Done. */ def apply[A](value: A): Iteratee[A] = Done(value) + + /** + * Returns Iteratee.unit + */ def apply(): Iteratee[Unit] = unit + + /** + * The single value representing Done(()) + */ val unit: Iteratee[Unit] = Done(()) } @@ -445,6 +455,7 @@ object IO { */ final case class Cont[+A](f: Input ⇒ (Iteratee[A], Input), error: Option[Exception] = None) extends Iteratee[A] + //FIXME general description of what an IterateeRef is and how it is used, potentially with link to docs object IterateeRef { /** @@ -477,13 +488,14 @@ object IO { * 'refFactory' is used to provide the default value for new keys. */ class Map[K, V] private (refFactory: ⇒ IterateeRef[V], underlying: mutable.Map[K, IterateeRef[V]] = mutable.Map.empty[K, IterateeRef[V]]) extends mutable.Map[K, IterateeRef[V]] { - def get(key: K) = Some(underlying.getOrElseUpdate(key, refFactory)) - def iterator = underlying.iterator - def +=(kv: (K, IterateeRef[V])) = { underlying += kv; this } - def -=(key: K) = { underlying -= key; this } + override def get(key: K) = Some(underlying.getOrElseUpdate(key, refFactory)) + override def iterator = underlying.iterator + override def +=(kv: (K, IterateeRef[V])) = { underlying += kv; this } + override def -=(key: K) = { underlying -= key; this } override def empty = new Map[K, V](refFactory) } + //FIXME general description of what an Map is and how it is used, potentially with link to docs object Map { /** * Uses a factory to create the initial IterateeRef for each new key. @@ -500,7 +512,6 @@ object IO { */ def async[K]()(implicit executor: ExecutionContext): IterateeRef.Map[K, Unit] = new Map(IterateeRef.async()) } - } /** @@ -510,8 +521,11 @@ object IO { * for details. */ trait IterateeRef[A] { + //FIXME Add docs def flatMap(f: A ⇒ Iteratee[A]): Unit + //FIXME Add docs def map(f: A ⇒ A): Unit + //FIXME Add docs def apply(input: Input): Unit } @@ -528,12 +542,16 @@ object IO { */ final class IterateeRefSync[A](initial: Iteratee[A]) extends IterateeRef[A] { private var _value: (Iteratee[A], Input) = (initial, Chunk.empty) - def flatMap(f: A ⇒ Iteratee[A]): Unit = _value = _value match { + override def flatMap(f: A ⇒ Iteratee[A]): Unit = _value = _value match { case (iter, chunk @ Chunk(bytes)) if bytes.nonEmpty ⇒ (iter flatMap f)(chunk) case (iter, input) ⇒ (iter flatMap f, input) } - def map(f: A ⇒ A): Unit = _value = (_value._1 map f, _value._2) - def apply(input: Input): Unit = _value = _value._1(_value._2 ++ input) + override def map(f: A ⇒ A): Unit = _value = (_value._1 map f, _value._2) + override def apply(input: Input): Unit = _value = _value._1(_value._2 ++ input) + + /** + * Returns the current value of this IterateeRefSync + */ def value: (Iteratee[A], Input) = _value } @@ -553,12 +571,16 @@ object IO { */ final class IterateeRefAsync[A](initial: Iteratee[A])(implicit executor: ExecutionContext) extends IterateeRef[A] { private var _value: Future[(Iteratee[A], Input)] = Future((initial, Chunk.empty)) - def flatMap(f: A ⇒ Iteratee[A]): Unit = _value = _value map { + override def flatMap(f: A ⇒ Iteratee[A]): Unit = _value = _value map { case (iter, chunk @ Chunk(bytes)) if bytes.nonEmpty ⇒ (iter flatMap f)(chunk) case (iter, input) ⇒ (iter flatMap f, input) } - def map(f: A ⇒ A): Unit = _value = _value map (v ⇒ (v._1 map f, v._2)) - def apply(input: Input): Unit = _value = _value map (v ⇒ v._1(v._2 ++ input)) + override def map(f: A ⇒ A): Unit = _value = _value map (v ⇒ (v._1 map f, v._2)) + override def apply(input: Input): Unit = _value = _value map (v ⇒ v._1(v._2 ++ input)) + + /** + * Returns a Future which will hold the future value of this IterateeRefAsync + */ def future: Future[(Iteratee[A], Input)] = _value } @@ -702,10 +724,9 @@ object IO { /** * An Iteratee that continually repeats an Iteratee. * - * TODO: Should terminate on EOF + * FIXME TODO: Should terminate on EOF */ - def repeat(iter: Iteratee[Unit]): Iteratee[Unit] = - iter flatMap (_ ⇒ repeat(iter)) + def repeat(iter: Iteratee[Unit]): Iteratee[Unit] = iter flatMap (_ ⇒ repeat(iter)) /** * An Iteratee that applies an Iteratee to each element of a Traversable @@ -780,7 +801,7 @@ object IO { * An IOManager does not need to be manually stopped when not in use as it will * automatically enter an idle state when it has no channels to manage. */ -final class IOManager private (system: ActorSystem) extends Extension { +final class IOManager private (system: ActorSystem) extends Extension { //FIXME how about taking an ActorContext /** * A reference to the [[akka.actor.IOManagerActor]] that performs the actual * IO. It communicates with other actors using subclasses of @@ -861,9 +882,10 @@ final class IOManager private (system: ActorSystem) extends Extension { } +//FIXME add docs object IOManager extends ExtensionId[IOManager] with ExtensionIdProvider { - override def lookup = this - override def createExtension(system: ExtendedActorSystem) = new IOManager(system) + override def lookup: IOManager.type = this + override def createExtension(system: ExtendedActorSystem): IOManager = new IOManager(system) } /** @@ -874,7 +896,7 @@ object IOManager extends ExtensionId[IOManager] with ExtensionIdProvider { final class IOManagerActor extends Actor with ActorLogging { import SelectionKey.{ OP_READ, OP_WRITE, OP_ACCEPT, OP_CONNECT } - private val bufferSize = 8192 // TODO: make buffer size configurable + private val bufferSize = 8192 // FIXME TODO: make configurable private type ReadChannel = ReadableByteChannel with SelectableChannel private type WriteChannel = WritableByteChannel with SelectableChannel @@ -897,7 +919,7 @@ final class IOManagerActor extends Actor with ActorLogging { private var lastSelect = 0 /** force a select when lastSelect reaches this amount */ - private val selectAt = 100 + private val selectAt = 100 // FIXME TODO: make configurable /** true while the selector is open and channels.nonEmpty */ private var running = false @@ -947,9 +969,7 @@ final class IOManagerActor extends Actor with ActorLogging { lastSelect = 0 } - private def forwardFailure(f: ⇒ Unit): Unit = { - try { f } catch { case NonFatal(e) ⇒ sender ! Status.Failure(e) } - } + private def forwardFailure(f: ⇒ Unit): Unit = try f catch { case NonFatal(e) ⇒ sender ! Status.Failure(e) } private def setSocketOptions(socket: java.net.Socket, options: Seq[IO.SocketOption]) { options foreach { @@ -985,7 +1005,7 @@ final class IOManagerActor extends Actor with ActorLogging { forwardFailure(sock.setPerformancePreferences(connTime, latency, bandwidth)) } - channel.socket bind (address, 1000) // TODO: make backlog configurable + channel.socket bind (address, 1000) // FIXME TODO: make backlog configurable channels update (server, channel) channel register (selector, OP_ACCEPT, server) server.owner ! IO.Listening(server, channel.socket.getLocalSocketAddress()) @@ -1048,29 +1068,13 @@ final class IOManagerActor extends Actor with ActorLogging { private def process(key: SelectionKey) { val handle = key.attachment.asInstanceOf[IO.Handle] try { - if (key.isConnectable) key.channel match { - case channel: SocketChannel ⇒ connect(handle.asSocket, channel) - } - if (key.isAcceptable) key.channel match { - case channel: ServerSocketChannel ⇒ accept(handle.asServer, channel) - } - if (key.isReadable) key.channel match { - case channel: ReadChannel ⇒ read(handle.asReadable, channel) - } - if (key.isWritable) key.channel match { - case channel: WriteChannel ⇒ - try { - write(handle.asWritable, channel) - } catch { - case e: IOException ⇒ - // ignore, let it fail on read to ensure nothing left in read buffer. - } - } + if (key.isConnectable) key.channel match { case channel: SocketChannel ⇒ connect(handle.asSocket, channel) } + if (key.isAcceptable) key.channel match { case channel: ServerSocketChannel ⇒ accept(handle.asServer, channel) } + if (key.isReadable) key.channel match { case channel: ReadChannel ⇒ read(handle.asReadable, channel) } + if (key.isWritable) key.channel match { case channel: WriteChannel ⇒ try write(handle.asWritable, channel) catch { case e: IOException ⇒ } } // ignore, let it fail on read to ensure nothing left in read buffer. } catch { - case e: ClassCastException ⇒ cleanup(handle, Some(e)) - case e: CancelledKeyException ⇒ cleanup(handle, Some(e)) - case e: IOException ⇒ cleanup(handle, Some(e)) - case e: ActorInitializationException ⇒ cleanup(handle, Some(e)) + case e @ (_: ClassCastException | _: CancelledKeyException | _: IOException | _: ActorInitializationException) ⇒ + cleanup(handle, Some(e.asInstanceOf[Exception])) //Scala patmat is broken } } @@ -1089,9 +1093,6 @@ final class IOManagerActor extends Actor with ActorLogging { } } - private def setOps(handle: IO.Handle, ops: Int): Unit = - channels(handle) keyFor selector interestOps ops - private def addOps(handle: IO.Handle, ops: Int) { val key = channels(handle) keyFor selector val cur = key.interestOps @@ -1157,9 +1158,9 @@ final class IOManagerActor extends Actor with ActorLogging { } } } - } +//FIXME is this public API? final class WriteBuffer(bufferSize: Int) { private val _queue = new java.util.ArrayDeque[ByteString] private val _buffer = ByteBuffer.allocate(bufferSize) @@ -1181,9 +1182,9 @@ final class WriteBuffer(bufferSize: Int) { this } - def length = _length + def length: Int = _length - def isEmpty = _length == 0 + def isEmpty: Boolean = _length == 0 def write(channel: WritableByteChannel with SelectableChannel): Int = { @tailrec diff --git a/akka-actor/src/main/scala/akka/actor/package.scala b/akka-actor/src/main/scala/akka/actor/package.scala index 617e3fee5c..3bf56b8bc4 100644 --- a/akka-actor/src/main/scala/akka/actor/package.scala +++ b/akka-actor/src/main/scala/akka/actor/package.scala @@ -7,12 +7,4 @@ package akka package object actor { implicit def actorRef2Scala(ref: ActorRef): ScalaActorRef = ref.asInstanceOf[ScalaActorRef] implicit def scala2ActorRef(ref: ScalaActorRef): ActorRef = ref.asInstanceOf[ActorRef] - - def simpleName(obj: AnyRef): String = simpleName(obj.getClass) - - def simpleName(clazz: Class[_]): String = { - val n = clazz.getName - val i = n.lastIndexOf('.') - n.substring(i + 1) - } } diff --git a/akka-actor/src/main/scala/akka/event/EventStream.scala b/akka-actor/src/main/scala/akka/event/EventStream.scala index 27f0c71515..172cf052ca 100644 --- a/akka-actor/src/main/scala/akka/event/EventStream.scala +++ b/akka-actor/src/main/scala/akka/event/EventStream.scala @@ -3,7 +3,8 @@ */ package akka.event -import akka.actor.{ ActorRef, ActorSystem, simpleName } +import akka.actor.{ ActorRef, ActorSystem } +import akka.event.Logging.simpleName import akka.util.Subclassification object EventStream { diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index bf4fc7996d..2cda6469da 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -275,8 +275,8 @@ object LogSource { // this one unfortunately does not work as implicit, because existential types have some weird behavior val fromClass: LogSource[Class[_]] = new LogSource[Class[_]] { - def genString(c: Class[_]) = simpleName(c) - override def genString(c: Class[_], system: ActorSystem) = simpleName(c) + "(" + system + ")" + def genString(c: Class[_]) = Logging.simpleName(c) + override def genString(c: Class[_], system: ActorSystem) = genString(c) + "(" + system + ")" override def getClazz(c: Class[_]) = c } implicit def fromAnyClass[T]: LogSource[Class[T]] = fromClass.asInstanceOf[LogSource[Class[T]]] @@ -310,7 +310,7 @@ object LogSource { case a: Actor ⇒ apply(a) case a: ActorRef ⇒ apply(a) case s: String ⇒ apply(s) - case x ⇒ (simpleName(x), x.getClass) + case x ⇒ (Logging.simpleName(x), x.getClass) } /** @@ -324,7 +324,7 @@ object LogSource { case a: Actor ⇒ apply(a) case a: ActorRef ⇒ apply(a) case s: String ⇒ apply(s) - case x ⇒ (simpleName(x) + "(" + system + ")", x.getClass) + case x ⇒ (Logging.simpleName(x) + "(" + system + ")", x.getClass) } } @@ -363,6 +363,14 @@ object LogSource { */ object Logging { + def simpleName(obj: AnyRef): String = simpleName(obj.getClass) + + def simpleName(clazz: Class[_]): String = { + val n = clazz.getName + val i = n.lastIndexOf('.') + n.substring(i + 1) + } + object Extension extends ExtensionKey[LogExt] class LogExt(system: ExtendedActorSystem) extends Extension { diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 7baf3011ee..84c5764cf5 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -3,27 +3,22 @@ */ package akka.remote.netty -import java.net.InetSocketAddress -import org.jboss.netty.util.HashedWheelTimer +import java.util.concurrent.TimeUnit +import java.net.{ InetAddress, InetSocketAddress } +import org.jboss.netty.util.{ Timeout, TimerTask, HashedWheelTimer } import org.jboss.netty.bootstrap.ClientBootstrap import org.jboss.netty.channel.group.DefaultChannelGroup -import org.jboss.netty.channel.{ ChannelHandler, StaticChannelPipeline, SimpleChannelUpstreamHandler, MessageEvent, ExceptionEvent, ChannelStateEvent, ChannelPipelineFactory, ChannelPipeline, ChannelHandlerContext, ChannelFuture, Channel } +import org.jboss.netty.channel.{ ChannelFutureListener, ChannelHandler, StaticChannelPipeline, MessageEvent, ExceptionEvent, ChannelStateEvent, ChannelPipelineFactory, ChannelPipeline, ChannelHandlerContext, ChannelFuture, Channel } import org.jboss.netty.handler.codec.frame.{ LengthFieldPrepender, LengthFieldBasedFrameDecoder } import org.jboss.netty.handler.execution.ExecutionHandler +import org.jboss.netty.handler.timeout.{ IdleState, IdleStateEvent, IdleStateAwareChannelHandler, IdleStateHandler } + import akka.remote.RemoteProtocol.{ RemoteControlProtocol, CommandType, AkkaRemoteProtocol } -import akka.remote.{ RemoteProtocol, RemoteMessage, RemoteLifeCycleEvent, RemoteClientStarted, RemoteClientShutdown, RemoteClientException, RemoteClientError, RemoteClientDisconnected, RemoteClientConnected } -import akka.actor.{ simpleName, Address } +import akka.remote.{ RemoteProtocol, RemoteMessage, RemoteLifeCycleEvent, RemoteClientStarted, RemoteClientShutdown, RemoteClientException, RemoteClientError, RemoteClientDisconnected, RemoteClientConnected, RemoteClientWriteFailed } +import akka.actor.{ Address, ActorRef } import akka.AkkaException import akka.event.Logging import akka.util.Switch -import akka.actor.ActorRef -import org.jboss.netty.channel.ChannelFutureListener -import akka.remote.RemoteClientWriteFailed -import java.net.InetAddress -import org.jboss.netty.util.TimerTask -import org.jboss.netty.util.Timeout -import java.util.concurrent.TimeUnit -import org.jboss.netty.handler.timeout.{ IdleState, IdleStateEvent, IdleStateAwareChannelHandler, IdleStateHandler } class RemoteClientMessageBufferException(message: String, cause: Throwable) extends AkkaException(message, cause) { def this(msg: String) = this(msg, null) @@ -40,7 +35,7 @@ abstract class RemoteClient private[akka] ( val log = Logging(netty.system, "RemoteClient") - val name = simpleName(this) + "@" + remoteAddress + val name = Logging.simpleName(this) + "@" + remoteAddress private[remote] val runSwitch = new Switch() From cd31b4b1039e07751fc706b39f25203006f7ecd6 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 15:38:22 +0200 Subject: [PATCH 033/538] Touch-up of Props --- .../src/main/scala/akka/actor/Props.scala | 27 +++++++++++++------ 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index 3751898c5c..ffc9574421 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -18,12 +18,24 @@ import akka.routing._ */ object Props { + /** + * The defaultCreator, simply throws an UnsupportedOperationException when applied, which is used when creating a Props + */ final val defaultCreator: () ⇒ Actor = () ⇒ throw new UnsupportedOperationException("No actor creator specified!") + /** + * The defaultRoutedProps is NoRouter which is used when creating a Props + */ final val defaultRoutedProps: RouterConfig = NoRouter + /** + * The default Deploy instance which is used when creating a Props + */ final val defaultDeploy = Deploy() + /** + * A Props instance whose creator will create an actor that doesn't respond to any message + */ final val empty = new Props(() ⇒ new Actor { def receive = Actor.emptyBehavior }) /** @@ -49,8 +61,7 @@ object Props { * Returns a Props that has default values except for "creator" which will be a function that creates an instance * of the supplied class using the default constructor. */ - def apply(actorClass: Class[_ <: Actor]): Props = - default.withCreator(actorClass) + def apply(actorClass: Class[_ <: Actor]): Props = default.withCreator(actorClass) /** * Returns a Props that has default values except for "creator" which will be a function that creates an instance @@ -58,18 +69,18 @@ object Props { * * Scala API. */ - def apply(creator: ⇒ Actor): Props = - default.withCreator(creator) + def apply(creator: ⇒ Actor): Props = default.withCreator(creator) /** * Returns a Props that has default values except for "creator" which will be a function that creates an instance * using the supplied thunk. */ - def apply(creator: Creator[_ <: Actor]): Props = - default.withCreator(creator.create) + def apply(creator: Creator[_ <: Actor]): Props = default.withCreator(creator.create) - def apply(behavior: ActorContext ⇒ Actor.Receive): Props = - apply(new Actor { def receive = behavior(context) }) + /** + * Returns a new Props whose creator will instantiate an Actor that has the behavior specified + */ + def apply(behavior: ActorContext ⇒ Actor.Receive): Props = apply(new Actor { def receive = behavior(context) }) } /** From c0cead3aad13ed178b4e2f0f6fd9ca031068db4f Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 16:16:31 +0200 Subject: [PATCH 034/538] Cleaning up Scheduler, rewriting ContinuousCancellable --- .../src/main/scala/akka/actor/Scheduler.scala | 56 +++++++------------ 1 file changed, 21 insertions(+), 35 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 827e511308..6155cab10c 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -9,6 +9,8 @@ import org.jboss.netty.akka.util.{ TimerTask, HashedWheelTimer, Timeout ⇒ HWTi import akka.event.LoggingAdapter import akka.dispatch.MessageDispatcher import java.io.Closeable +import java.util.concurrent.atomic.AtomicReference +import scala.annotation.tailrec //#scheduler /** @@ -188,11 +190,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, private trait ContinuousScheduling { this: TimerTask ⇒ def scheduleNext(timeout: HWTimeout, delay: Duration, delegator: ContinuousCancellable) { - try { - delegator.swap(timeout.getTimer.newTimeout(this, delay)) - } catch { - case _: IllegalStateException ⇒ // stop recurring if timer is stopped - } + try delegator.swap(timeout.getTimer.newTimeout(this, delay)) catch { case _: IllegalStateException ⇒ } // stop recurring if timer is stopped } } @@ -203,7 +201,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, } } - def close() = { + def close(): Unit = { import scala.collection.JavaConverters._ hashedWheelTimer.stop().asScala foreach execDirectly } @@ -214,43 +212,31 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, * methods. Needed to be able to cancel continuous tasks, * since they create new Timeout for each tick. */ -private[akka] class ContinuousCancellable extends Cancellable { - @volatile - private var delegate: HWTimeout = _ - @volatile - private var cancelled = false - +private[akka] class ContinuousCancellable extends AtomicReference[HWTimeout] with Cancellable { private[akka] def init(initialTimeout: HWTimeout): this.type = { - delegate = initialTimeout + assert(compareAndSet(null, initialTimeout)) this } - private[akka] def swap(newTimeout: HWTimeout): Unit = { - val wasCancelled = isCancelled - delegate = newTimeout - if (wasCancelled || isCancelled) cancel() + @tailrec private[akka] final def swap(newTimeout: HWTimeout): Unit = get match { + case null ⇒ newTimeout.cancel() + case some if some.isCancelled ⇒ cancel(); newTimeout.cancel() + case some ⇒ if (!compareAndSet(some, newTimeout)) swap(newTimeout) } - def isCancelled(): Boolean = { - // delegate is initially null, but this object will not be exposed to the world until after init - cancelled || delegate.isCancelled() + def isCancelled(): Boolean = get match { + case null ⇒ true + case some ⇒ isCancelled() } - def cancel(): Unit = { - // the underlying Timeout will not become cancelled once the task has been started to run, - // therefore we keep a flag here to make sure that rescheduling doesn't occur when cancelled - cancelled = true - // delegate is initially null, but this object will not be exposed to the world until after init - delegate.cancel() - } + def cancel(): Unit = + getAndSet(null) match { + case null ⇒ + case some ⇒ some.cancel() + } } -class DefaultCancellable(val timeout: HWTimeout) extends Cancellable { - def cancel() { - timeout.cancel() - } - - def isCancelled: Boolean = { - timeout.isCancelled - } +private[akka] class DefaultCancellable(val timeout: HWTimeout) extends Cancellable { + override def cancel(): Unit = timeout.cancel() + override def isCancelled: Boolean = timeout.isCancelled } From 07c9bfe6d7b058cd5883e3b62c00a0fd3cc4babf Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 16:22:59 +0200 Subject: [PATCH 035/538] Making sure that the ContinuousCancellable can never be re-initialized --- .../src/main/scala/akka/actor/Scheduler.scala | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 6155cab10c..3bb524ad92 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -5,7 +5,7 @@ package akka.actor import akka.util.Duration -import org.jboss.netty.akka.util.{ TimerTask, HashedWheelTimer, Timeout ⇒ HWTimeout } +import org.jboss.netty.akka.util.{ TimerTask, HashedWheelTimer, Timeout ⇒ HWTimeout, Timer } import akka.event.LoggingAdapter import akka.dispatch.MessageDispatcher import java.io.Closeable @@ -207,14 +207,23 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, } } +private[akka] object ContinuousCancellable { + val initial: HWTimeout = new HWTimeout { + override def getTimer: Timer = null + override def getTask: TimerTask = null + override def isExpired: Boolean = false + override def isCancelled: Boolean = false + override def cancel: Unit = () + } +} /** * Wrapper of a [[org.jboss.netty.akka.util.Timeout]] that delegates all * methods. Needed to be able to cancel continuous tasks, * since they create new Timeout for each tick. */ -private[akka] class ContinuousCancellable extends AtomicReference[HWTimeout] with Cancellable { +private[akka] class ContinuousCancellable extends AtomicReference[HWTimeout](ContinuousCancellable.initial) with Cancellable { private[akka] def init(initialTimeout: HWTimeout): this.type = { - assert(compareAndSet(null, initialTimeout)) + compareAndSet(ContinuousCancellable.initial, initialTimeout) this } From 7da74effe6d0118960b0441b293d2f0f389b5095 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 16:28:43 +0200 Subject: [PATCH 036/538] Adding overrides on the DefaultScheduler --- .../src/main/scala/akka/actor/Scheduler.scala | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 3bb524ad92..91e54a592d 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -121,7 +121,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter, dispatcher: ⇒ MessageDispatcher) extends Scheduler with Closeable { - def schedule(initialDelay: Duration, delay: Duration, receiver: ActorRef, message: Any): Cancellable = { + override def schedule(initialDelay: Duration, delay: Duration, receiver: ActorRef, message: Any): Cancellable = { val continuousCancellable = new ContinuousCancellable continuousCancellable.init( hashedWheelTimer.newTimeout( @@ -136,7 +136,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, initialDelay)) } - def schedule(initialDelay: Duration, delay: Duration)(f: ⇒ Unit): Cancellable = { + override def schedule(initialDelay: Duration, delay: Duration)(f: ⇒ Unit): Cancellable = { val continuousCancellable = new ContinuousCancellable continuousCancellable.init( hashedWheelTimer.newTimeout( @@ -150,7 +150,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, initialDelay)) } - def schedule(initialDelay: Duration, delay: Duration, runnable: Runnable): Cancellable = { + override def schedule(initialDelay: Duration, delay: Duration, runnable: Runnable): Cancellable = { val continuousCancellable = new ContinuousCancellable continuousCancellable.init( hashedWheelTimer.newTimeout( @@ -163,7 +163,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, initialDelay)) } - def scheduleOnce(delay: Duration, runnable: Runnable): Cancellable = + override def scheduleOnce(delay: Duration, runnable: Runnable): Cancellable = new DefaultCancellable( hashedWheelTimer.newTimeout( new TimerTask() { @@ -171,7 +171,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, }, delay)) - def scheduleOnce(delay: Duration, receiver: ActorRef, message: Any): Cancellable = + override def scheduleOnce(delay: Duration, receiver: ActorRef, message: Any): Cancellable = new DefaultCancellable( hashedWheelTimer.newTimeout( new TimerTask { @@ -179,7 +179,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, }, delay)) - def scheduleOnce(delay: Duration)(f: ⇒ Unit): Cancellable = + override def scheduleOnce(delay: Duration)(f: ⇒ Unit): Cancellable = new DefaultCancellable( hashedWheelTimer.newTimeout( new TimerTask with Runnable { @@ -201,7 +201,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, } } - def close(): Unit = { + override def close(): Unit = { import scala.collection.JavaConverters._ hashedWheelTimer.stop().asScala foreach execDirectly } From a4f990029827aa8798c94655ebf831a3442e524b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 16:30:28 +0200 Subject: [PATCH 037/538] Adding some docs to Stash --- akka-actor/src/main/scala/akka/actor/Stash.scala | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/Stash.scala b/akka-actor/src/main/scala/akka/actor/Stash.scala index 6672945522..386bc0f070 100644 --- a/akka-actor/src/main/scala/akka/actor/Stash.scala +++ b/akka-actor/src/main/scala/akka/actor/Stash.scala @@ -56,7 +56,7 @@ trait Stash { /* The capacity of the stash. Configured in the actor's dispatcher config. */ - private val capacity = { + private val capacity: Int = { val dispatcher = context.system.settings.config.getConfig(context.props.dispatcher) val config = dispatcher.withFallback(context.system.settings.config.getConfig("akka.actor.default-dispatcher")) config.getInt("stash-capacity") @@ -125,4 +125,7 @@ An (unbounded) deque-based mailbox can be configured as follows: } +/** + * Is thrown when the size of the Stash exceeds the capacity of the Stash + */ class StashOverflowException(message: String, cause: Throwable = null) extends AkkaException(message, cause) From 0527f81c20c868474926a7791596a96ad9ede0e0 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 16:45:36 +0200 Subject: [PATCH 038/538] Adding docs and making things private to akka for TypedActors --- .../main/scala/akka/actor/TypedActor.scala | 49 +++++++++++++++---- 1 file changed, 39 insertions(+), 10 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala index 4d85542d36..9bb560417b 100644 --- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala @@ -6,21 +6,28 @@ package akka.actor import akka.japi.{ Creator, Option ⇒ JOption } import java.lang.reflect.{ InvocationTargetException, Method, InvocationHandler, Proxy } -import akka.util.{ Timeout, NonFatal } +import akka.util.{ Timeout, NonFatal, Duration } import java.util.concurrent.atomic.{ AtomicReference ⇒ AtomVar } import akka.dispatch._ import java.util.concurrent.TimeoutException import java.util.concurrent.TimeUnit.MILLISECONDS -import java.lang.IllegalStateException -import akka.util.Duration import akka.actor.TypedActor.TypedActorInvocationHandler -import akka.serialization.{ JavaSerializer, Serialization, SerializationExtension } +import akka.serialization.{ JavaSerializer, SerializationExtension } import java.io.ObjectStreamException +/** + * A TypedActorFactory is something that can created TypedActor instances. + */ trait TypedActorFactory { + /** + * Underlying dependency is to be able to create normal Actors + */ protected def actorFactory: ActorRefFactory + /** + * Underlying dependency to a TypedActorExtension, which can either be contextual or ActorSystem "global" + */ protected def typedActor: TypedActorExtension /** @@ -80,6 +87,9 @@ trait TypedActorFactory { } +/** + * This represents the TypedActor Akka Extension, access to the functionality is done through a given ActorSystem. + */ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvider { override def get(system: ActorSystem): TypedActorExtension = super.get(system) @@ -145,8 +155,10 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi /** * Represents the serialized form of a MethodCall, uses readResolve and writeReplace to marshall the call + * + * INTERNAL USE ONLY */ - case class SerializedMethodCall(ownerType: Class[_], methodName: String, parameterTypes: Array[Class[_]], serializedParameters: Array[(Int, Class[_], Array[Byte])]) { + private[akka] case class SerializedMethodCall(ownerType: Class[_], methodName: String, parameterTypes: Array[Class[_]], serializedParameters: Array[(Int, Class[_], Array[Byte])]) { //TODO implement writeObject and readObject to serialize //TODO Possible optimization is to special encode the parameter-types to conserve space @@ -213,6 +225,8 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi /** * Implementation of TypedActor as an Actor + * + * INTERNAL USE ONLY */ private[akka] class TypedActor[R <: AnyRef, T <: R](val proxyVar: AtomVar[R], createInstance: ⇒ T) extends Actor { val me = try { @@ -371,6 +385,9 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi def postRestart(reason: Throwable): Unit } + /** + * INTERNAL USE ONLY + */ private[akka] class TypedActorInvocationHandler(@transient val extension: TypedActorExtension, @transient val actorVar: AtomVar[ActorRef], @transient val timeout: Timeout) extends InvocationHandler with Serializable { def actor = actorVar.get @throws(classOf[Throwable]) @@ -396,6 +413,9 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi @throws(classOf[ObjectStreamException]) private def writeReplace(): AnyRef = SerializedTypedActorInvocationHandler(actor, timeout.duration) } + /** + * INTERNAL USE ONLY + */ private[akka] case class SerializedTypedActorInvocationHandler(val actor: ActorRef, val timeout: Duration) { @throws(classOf[ObjectStreamException]) private def readResolve(): AnyRef = JavaSerializer.currentSystem.value match { case null ⇒ throw new IllegalStateException("SerializedTypedActorInvocationHandler.readResolve requires that JavaSerializer.currentSystem.value is set to a non-null value") @@ -569,12 +589,16 @@ case class TypedProps[T <: AnyRef] protected[TypedProps] ( def withoutInterface(interface: Class[_ >: T]): TypedProps[T] = this.copy(interfaces = interfaces diff TypedProps.extractInterfaces(interface)) - import akka.actor.{ Props ⇒ ActorProps } - def actorProps(): ActorProps = - if (dispatcher == ActorProps().dispatcher) ActorProps() - else ActorProps(dispatcher = dispatcher) + /** + * Returns the akka.actor.Props representation of this TypedProps + */ + def actorProps(): Props = if (dispatcher == Props().dispatcher) Props() else Props(dispatcher = dispatcher) } +/** + * ContextualTypedActorFactory allows TypedActors to create children, effectively forming the same Actor Supervision Hierarchies + * as normal Actors can. + */ case class ContextualTypedActorFactory(typedActor: TypedActorExtension, actorFactory: ActorContext) extends TypedActorFactory { override def getActorRefFor(proxy: AnyRef): ActorRef = typedActor.getActorRefFor(proxy) override def isTypedActor(proxyOrNot: AnyRef): Boolean = typedActor.isTypedActor(proxyOrNot) @@ -607,7 +631,9 @@ class TypedActorExtension(system: ExtendedActorSystem) extends TypedActorFactory def isTypedActor(proxyOrNot: AnyRef): Boolean = invocationHandlerFor(proxyOrNot) ne null // Private API - + /** + * INTERNAL USE ONLY + */ private[akka] def createActorRefProxy[R <: AnyRef, T <: R](props: TypedProps[T], proxyVar: AtomVar[R], actorRef: ⇒ ActorRef): R = { //Warning, do not change order of the following statements, it's some elaborate chicken-n-egg handling val actorVar = new AtomVar[ActorRef](null) @@ -631,6 +657,9 @@ class TypedActorExtension(system: ExtendedActorSystem) extends TypedActorFactory } } + /** + * INTERNAL USE ONLY + */ private[akka] def invocationHandlerFor(typedActor_? : AnyRef): TypedActorInvocationHandler = if ((typedActor_? ne null) && Proxy.isProxyClass(typedActor_?.getClass)) typedActor_? match { case null ⇒ null From 95db4bfd3744f0d44b91ca90b4adfbf31bffff3f Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 17:04:13 +0200 Subject: [PATCH 039/538] Moving out ConfigurationException from akka.config to akka --- .../routing/ConfiguredLocalRoutingSpec.scala | 2 +- .../test/scala/akka/routing/RoutingSpec.scala | 2 +- .../src/main/scala/akka/AkkaException.scala | 7 ++ .../main/scala/akka/actor/ActorSystem.scala | 3 +- .../akka/config/ConfigurationException.scala | 15 --- .../akka/dispatch/AbstractDispatcher.scala | 116 ++++++++++++++---- .../src/main/scala/akka/event/Logging.scala | 6 +- .../src/main/scala/akka/routing/Routing.scala | 2 +- .../akka/serialization/Serialization.scala | 2 +- .../src/main/scala/akka/cluster/Cluster.scala | 2 +- .../scala/akka/cluster/ClusterSettings.scala | 2 +- .../akka/actor/mailbox/FileBasedMailbox.scala | 2 +- .../akka/remote/RemoteActorRefProvider.scala | 2 +- .../scala/akka/remote/RemoteDeployer.scala | 2 +- .../scala/akka/remote/RemoteSettings.scala | 2 +- .../scala/akka/remote/netty/Settings.scala | 2 +- .../akka/routing/RemoteRouterConfig.scala | 2 +- 17 files changed, 111 insertions(+), 60 deletions(-) delete mode 100644 akka-actor/src/main/scala/akka/config/ConfigurationException.scala diff --git a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala index d01f1cda04..5bedc8fc33 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala @@ -8,7 +8,7 @@ import java.util.concurrent.atomic.AtomicInteger import org.junit.runner.RunWith import akka.actor.{ Props, LocalActorRef, Deploy, Actor, ActorRef } -import akka.config.ConfigurationException +import akka.ConfigurationException import akka.dispatch.Await import akka.pattern.{ ask, gracefulStop } import akka.testkit.{ TestLatch, ImplicitSender, DefaultTimeout, AkkaSpec } diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 2ae32cfcf5..5ad6da271f 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -10,7 +10,7 @@ import akka.testkit._ import akka.util.duration._ import akka.dispatch.Await import akka.util.Duration -import akka.config.ConfigurationException +import akka.ConfigurationException import com.typesafe.config.ConfigFactory import akka.pattern.ask import java.util.concurrent.ConcurrentHashMap diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index 002233ffe5..79d78b9d39 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -44,3 +44,10 @@ class AkkaException(message: String = "", cause: Throwable = null) extends Runti def stackTraceToString = AkkaException.stackTraceToString(this) } + +/** + * This exception is thrown when Akka detects a problem with the provided configuration + */ +class ConfigurationException(message: String, cause: Throwable = null) extends AkkaException(message, cause) { + def this(msg: String) = this(msg, null) +} diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 32e221a7a1..a1d30ddbc6 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -4,7 +4,6 @@ package akka.actor -import akka.config.ConfigurationException import akka.event._ import akka.dispatch._ import akka.pattern.ask @@ -150,7 +149,7 @@ object ActorSystem { final val JvmExitOnFatalError = getBoolean("akka.jvm-exit-on-fatal-error") if (ConfigVersion != Version) - throw new ConfigurationException("Akka JAR version [" + Version + "] does not match the provided config version [" + ConfigVersion + "]") + throw new akka.ConfigurationException("Akka JAR version [" + Version + "] does not match the provided config version [" + ConfigVersion + "]") /** * Returns the String representation of the Config that this Settings is backed by diff --git a/akka-actor/src/main/scala/akka/config/ConfigurationException.scala b/akka-actor/src/main/scala/akka/config/ConfigurationException.scala deleted file mode 100644 index ba0a3a2234..0000000000 --- a/akka-actor/src/main/scala/akka/config/ConfigurationException.scala +++ /dev/null @@ -1,15 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.config - -import akka.AkkaException - -class ConfigurationException(message: String, cause: Throwable = null) extends AkkaException(message, cause) { - def this(msg: String) = this(msg, null); -} - -class ModuleNotAvailableException(message: String, cause: Throwable = null) extends AkkaException(message, cause) { - def this(msg: String) = this(msg, null); -} diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index db5c71167b..9aec23b4c6 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -33,7 +33,10 @@ final case class Envelope(val message: Any, val sender: ActorRef)(system: ActorS } } -object SystemMessage { +/** + * INTERNAL API + */ +private[akka] object SystemMessage { @tailrec final def size(list: SystemMessage, acc: Int = 0): Int = { if (list eq null) acc else size(list.next, acc + 1) @@ -59,33 +62,57 @@ object SystemMessage { * system messages is handled in a single thread only and not ever passed around, * hence no further synchronization is needed. * + * INTERNAL API + * * ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ */ -sealed trait SystemMessage extends PossiblyHarmful { +private[akka] sealed trait SystemMessage extends PossiblyHarmful { @transient var next: SystemMessage = _ } -case class Create() extends SystemMessage // send to self from Dispatcher.register -case class Recreate(cause: Throwable) extends SystemMessage // sent to self from ActorCell.restart -case class Suspend() extends SystemMessage // sent to self from ActorCell.suspend -case class Resume() extends SystemMessage // sent to self from ActorCell.resume -case class Terminate() extends SystemMessage // sent to self from ActorCell.stop -case class Supervise(child: ActorRef) extends SystemMessage // sent to supervisor ActorRef from ActorCell.start -case class ChildTerminated(child: ActorRef) extends SystemMessage // sent to supervisor from ActorCell.doTerminate -case class Link(subject: ActorRef) extends SystemMessage // sent to self from ActorCell.watch -case class Unlink(subject: ActorRef) extends SystemMessage // sent to self from ActorCell.unwatch + +/** + * INTERNAL API + */ +private[akka] case class Create() extends SystemMessage // send to self from Dispatcher.register +/** + * INTERNAL API + */ +private[akka] case class Recreate(cause: Throwable) extends SystemMessage // sent to self from ActorCell.restart +/** + * INTERNAL API + */ +private[akka] case class Suspend() extends SystemMessage // sent to self from ActorCell.suspend +/** + * INTERNAL API + */ +private[akka] case class Resume() extends SystemMessage // sent to self from ActorCell.resume +/** + * INTERNAL API + */ +private[akka] case class Terminate() extends SystemMessage // sent to self from ActorCell.stop +/** + * INTERNAL API + */ +private[akka] case class Supervise(child: ActorRef) extends SystemMessage // sent to supervisor ActorRef from ActorCell.start +/** + * INTERNAL API + */ +private[akka] case class ChildTerminated(child: ActorRef) extends SystemMessage // sent to supervisor from ActorCell.doTerminate +/** + * INTERNAL API + */ +private[akka] case class Link(subject: ActorRef) extends SystemMessage // sent to self from ActorCell.watch +/** + * INTERNAL API + */ +private[akka] case class Unlink(subject: ActorRef) extends SystemMessage // sent to self from ActorCell.unwatch final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cleanup: () ⇒ Unit) extends Runnable { - def run() { - try { - runnable.run() - } catch { - case NonFatal(e) ⇒ - eventStream.publish(Error(e, "TaskInvocation", this.getClass, e.getMessage)) - } finally { - cleanup() - } - } + def run(): Unit = + try runnable.run() catch { + case NonFatal(e) ⇒ eventStream.publish(Error(e, "TaskInvocation", this.getClass, e.getMessage)) + } finally cleanup() } /** @@ -170,10 +197,16 @@ trait ExecutionContext { def reportFailure(t: Throwable): Unit } +/** + * INTERNAL API + */ private[akka] trait LoadMetrics { self: Executor ⇒ def atFullThrottle(): Boolean } +/** + * INTERNAL API + */ private[akka] object MessageDispatcher { val UNSCHEDULED = 0 //WARNING DO NOT CHANGE THE VALUE OF THIS: It relies on the faster init of 0 in AbstractMessageDispatcher val SCHEDULED = 1 @@ -228,7 +261,7 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext /** * Creates and returns a mailbox for the given actor. */ - protected[akka] def createMailbox(actor: ActorCell): Mailbox + protected[akka] def createMailbox(actor: ActorCell): Mailbox //FIXME should this really be private[akka]? /** * Identifier of this dispatcher, corresponds to the full key @@ -255,7 +288,7 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext ifSensibleToDoSoThenScheduleShutdown() } - final def execute(runnable: Runnable) { + final def execute(runnable: Runnable): Unit = { val invocation = TaskInvocation(eventStream, runnable, taskCleanup) addInhabitants(+1) try { @@ -300,6 +333,8 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext /** * If you override it, you must call it. But only ever once. See "attach" for only invocation. + * + * INTERNAL API */ protected[akka] def register(actor: ActorCell) { if (debug) actors.put(this, actor.self) @@ -308,6 +343,8 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext /** * If you override it, you must call it. But only ever once. See "detach" for the only invocation + * + * INTERNAL API */ protected[akka] def unregister(actor: ActorCell) { if (debug) actors.remove(this, actor.self) @@ -340,6 +377,8 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext * When the dispatcher no longer has any actors registered, how long will it wait until it shuts itself down, * defaulting to your akka configs "akka.actor.default-dispatcher.shutdown-timeout" or default specified in * reference.conf + * + * INTERNAL API */ protected[akka] def shutdownTimeout: Duration @@ -362,36 +401,59 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext } /** - * Will be called when the dispatcher is to queue an invocation for execution + * Will be called when the dispatcher is to queue an invocation for execution + * + * INTERNAL API */ protected[akka] def systemDispatch(receiver: ActorCell, invocation: SystemMessage) /** - * Will be called when the dispatcher is to queue an invocation for execution + * Will be called when the dispatcher is to queue an invocation for execution + * + * INTERNAL API */ protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope) /** * Suggest to register the provided mailbox for execution + * + * INTERNAL API */ protected[akka] def registerForExecution(mbox: Mailbox, hasMessageHint: Boolean, hasSystemMessageHint: Boolean): Boolean // TODO check whether this should not actually be a property of the mailbox + /** + * INTERNAL API + */ protected[akka] def throughput: Int + + /** + * INTERNAL API + */ protected[akka] def throughputDeadlineTime: Duration - @inline - protected[akka] final val isThroughputDeadlineTimeDefined = throughputDeadlineTime.toMillis > 0 + /** + * INTERNAL API + */ + @inline protected[akka] final val isThroughputDeadlineTimeDefined = throughputDeadlineTime.toMillis > 0 + /** + * INTERNAL API + */ protected[akka] def executeTask(invocation: TaskInvocation) /** * Called one time every time an actor is detached from this dispatcher and this dispatcher has no actors left attached * Must be idempotent + * + * INTERNAL API */ protected[akka] def shutdown(): Unit } +/** + * An ExecutorServiceConfigurator is a class that given some prerequisites and a configuration can create instances of ExecutorService + */ abstract class ExecutorServiceConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceFactoryProvider /** diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index 2cda6469da..ac31b133b3 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -4,12 +4,10 @@ package akka.event import akka.actor._ -import akka.AkkaException +import akka.{ ConfigurationException, AkkaException } import akka.actor.ActorSystem.Settings -import akka.config.ConfigurationException -import akka.util.ReentrantGuard +import akka.util.{ Timeout, ReentrantGuard } import akka.util.duration._ -import akka.util.Timeout import java.util.concurrent.atomic.AtomicInteger import scala.util.control.NoStackTrace import java.util.concurrent.TimeoutException diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index fdf14a5b96..211ef202f7 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -6,7 +6,7 @@ package akka.routing import akka.actor._ import akka.util.Duration import akka.util.duration._ -import akka.config.ConfigurationException +import akka.ConfigurationException import akka.pattern.pipe import akka.pattern.AskSupport import com.typesafe.config.Config diff --git a/akka-actor/src/main/scala/akka/serialization/Serialization.scala b/akka-actor/src/main/scala/akka/serialization/Serialization.scala index ce0f56a238..03d03dc785 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serialization.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serialization.scala @@ -85,7 +85,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { /** * Returns the Serializer configured for the given object, returns the NullSerializer if it's null. * - * @throws akka.config.ConfigurationException if no `serialization-bindings` is configured for the + * @throws akka.ConfigurationException if no `serialization-bindings` is configured for the * class of the object */ def findSerializerFor(o: AnyRef): Serializer = o match { diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index e3429cfdb3..76e3356143 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -13,7 +13,7 @@ import akka.dispatch.Await import akka.pattern.ask import akka.util._ import akka.util.duration._ -import akka.config.ConfigurationException +import akka.ConfigurationException import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean } import java.util.concurrent.TimeUnit._ diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index a24c75b436..8e9b9c770d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -6,7 +6,7 @@ package akka.cluster import com.typesafe.config.Config import akka.util.Duration import java.util.concurrent.TimeUnit.MILLISECONDS -import akka.config.ConfigurationException +import akka.ConfigurationException import scala.collection.JavaConverters._ import akka.actor.Address import akka.actor.AddressFromURIString diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala index ef8a28b2cf..e57bfd57d2 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala @@ -11,7 +11,7 @@ import akka.actor.ActorRef import akka.dispatch.MailboxType import com.typesafe.config.Config import akka.util.NonFatal -import akka.config.ConfigurationException +import akka.ConfigurationException import akka.actor.ActorSystem class FileBasedMailboxType(systemSettings: ActorSystem.Settings, config: Config) extends MailboxType { diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index 8f1ec6e1b7..bf55edf24c 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -9,7 +9,7 @@ import akka.actor._ import akka.dispatch._ import akka.event.{ DeathWatch, Logging, LoggingAdapter } import akka.event.EventStream -import akka.config.ConfigurationException +import akka.ConfigurationException import java.util.concurrent.{ TimeoutException } import com.typesafe.config.Config import akka.serialization.Serialization diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala index 0858c66405..e869c4ef4c 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala @@ -6,7 +6,7 @@ package akka.remote import akka.actor._ import akka.routing._ import com.typesafe.config._ -import akka.config.ConfigurationException +import akka.ConfigurationException case class RemoteScope(node: Address) extends Scope { def withFallback(other: Scope): Scope = this diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index ef30206a42..0b26311286 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -7,7 +7,7 @@ import com.typesafe.config.Config import akka.util.Duration import java.util.concurrent.TimeUnit.MILLISECONDS import java.net.InetAddress -import akka.config.ConfigurationException +import akka.ConfigurationException import scala.collection.JavaConverters._ import akka.actor.Address import akka.actor.AddressFromURIString diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index e2f69d77b5..95737e7584 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -7,7 +7,7 @@ import com.typesafe.config.Config import akka.util.Duration import java.util.concurrent.TimeUnit._ import java.net.InetAddress -import akka.config.ConfigurationException +import akka.ConfigurationException class NettySettings(config: Config, val systemName: String) { diff --git a/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala index c9bb6dba0f..714b854a69 100644 --- a/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala +++ b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala @@ -10,7 +10,7 @@ import akka.actor.ActorSystemImpl import akka.actor.Deploy import akka.actor.InternalActorRef import akka.actor.Props -import akka.config.ConfigurationException +import akka.ConfigurationException import akka.remote.RemoteScope import akka.actor.AddressFromURIString import akka.actor.SupervisorStrategy From 08ff967b08886af2fc815016546728e0a1219420 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 17:08:52 +0200 Subject: [PATCH 040/538] Moving in SharingMailbox into BalancingDispatcher and making team & messageQueue private[akka] --- .../akka/dispatch/BalancingDispatcher.scala | 46 ++++++++++--------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index e95f54b88b..ee492409ec 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -35,12 +35,35 @@ class BalancingDispatcher( attemptTeamWork: Boolean) extends Dispatcher(_prerequisites, _id, throughput, throughputDeadlineTime, mailboxType, _executorServiceFactoryProvider, _shutdownTimeout) { - val team = new ConcurrentSkipListSet[ActorCell]( + /** + * INTERNAL USE ONLY + */ + private[akka] val team = new ConcurrentSkipListSet[ActorCell]( Helpers.identityHashComparator(new Comparator[ActorCell] { def compare(l: ActorCell, r: ActorCell) = l.self.path compareTo r.self.path })) - val messageQueue: MessageQueue = mailboxType.create(None) + /** + * INTERNAL USE ONLY + */ + private[akka] val messageQueue: MessageQueue = mailboxType.create(None) + + private class SharingMailbox(_actor: ActorCell, _messageQueue: MessageQueue) extends Mailbox(_actor, _messageQueue) with DefaultSystemMessageQueue { + override def cleanUp(): Unit = { + //Don't call the original implementation of this since it scraps all messages, and we don't want to do that + if (hasSystemMessages) { + val dlq = actor.systemImpl.deadLetterMailbox + var message = systemDrain() + while (message ne null) { + // message must be “virgin” before being able to systemEnqueue again + val next = message.next + message.next = null + dlq.systemEnqueue(actor.self, message) + message = next + } + } + } + } protected[akka] override def createMailbox(actor: ActorCell): Mailbox = new SharingMailbox(actor, messageQueue) @@ -74,22 +97,3 @@ class BalancingDispatcher( scheduleOne() } } - -class SharingMailbox(_actor: ActorCell, _messageQueue: MessageQueue) - extends Mailbox(_actor, _messageQueue) with DefaultSystemMessageQueue { - - override def cleanUp(): Unit = { - //Don't call the original implementation of this since it scraps all messages, and we don't want to do that - if (hasSystemMessages) { - val dlq = actor.systemImpl.deadLetterMailbox - var message = systemDrain() - while (message ne null) { - // message must be “virgin” before being able to systemEnqueue again - val next = message.next - message.next = null - dlq.systemEnqueue(actor.self, message) - message = next - } - } - } -} From 630c4a0153fc8f96ff1f5d00f371e92f1b26a766 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 17:14:49 +0200 Subject: [PATCH 041/538] Adding docs and fixing a potential bug in use of getSimpleName in Dispatcher --- .../main/scala/akka/dispatch/Dispatcher.scala | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index fbffd08d7e..3a73bf0718 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -9,6 +9,7 @@ import java.util.concurrent.atomic.AtomicReference import akka.actor.ActorCell import akka.util.Duration import java.util.concurrent._ +import akka.event.Logging /** * The event-based ``Dispatcher`` binds a set of Actors to a thread pool backed up by a @@ -38,18 +39,27 @@ class Dispatcher( protected val executorService = new AtomicReference[ExecutorServiceDelegate]( new ExecutorServiceDelegate { lazy val executor = executorServiceFactory.createExecutorService }) + /** + * INTERNAL USE ONLY + */ protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope) = { val mbox = receiver.mailbox mbox.enqueue(receiver.self, invocation) registerForExecution(mbox, true, false) } + /** + * INTERNAL USE ONLY + */ protected[akka] def systemDispatch(receiver: ActorCell, invocation: SystemMessage) = { val mbox = receiver.mailbox mbox.systemEnqueue(receiver.self, invocation) registerForExecution(mbox, false, true) } + /** + * INTERNAL USE ONLY + */ protected[akka] def executeTask(invocation: TaskInvocation) { try { executorService.get() execute invocation @@ -65,8 +75,14 @@ class Dispatcher( } } + /** + * INTERNAL USE ONLY + */ protected[akka] def createMailbox(actor: ActorCell): Mailbox = new Mailbox(actor, mailboxType.create(Some(actor))) with DefaultSystemMessageQueue + /** + * INTERNAL USE ONLY + */ protected[akka] def shutdown: Unit = Option(executorService.getAndSet(new ExecutorServiceDelegate { lazy val executor = executorServiceFactory.createExecutorService @@ -74,6 +90,8 @@ class Dispatcher( /** * Returns if it was registered + * + * INTERNAL USE ONLY */ protected[akka] override def registerForExecution(mbox: Mailbox, hasMessageHint: Boolean, hasSystemMessageHint: Boolean): Boolean = { if (mbox.canBeScheduledForExecution(hasMessageHint, hasSystemMessageHint)) { //This needs to be here to ensure thread safety and no races @@ -97,7 +115,7 @@ class Dispatcher( } else false } - override val toString = getClass.getSimpleName + "[" + id + "]" + override val toString = Logging.simpleName(this) + "[" + id + "]" } object PriorityGenerator { From 72f12c89cd1a676c7fd2ca4e14c14ca63aedc90c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 17:21:58 +0200 Subject: [PATCH 042/538] Adding docs and privatizing some fields in Dispatchers.scala --- .../scala/akka/dispatch/Dispatchers.scala | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 93d44e007d..54173b8460 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -5,17 +5,15 @@ package akka.dispatch import java.util.concurrent.{ ConcurrentHashMap, TimeUnit, ThreadFactory } - -import scala.collection.JavaConverters.mapAsJavaMapConverter - import com.typesafe.config.{ ConfigFactory, Config } - -import Dispatchers.DefaultDispatcherId import akka.actor.{ Scheduler, DynamicAccess, ActorSystem } import akka.event.Logging.Warning import akka.event.EventStream import akka.util.Duration +/** + * DispatcherPrerequisites represents useful contextual pieces when constructing a MessageDispatcher + */ trait DispatcherPrerequisites { def threadFactory: ThreadFactory def eventStream: EventStream @@ -25,7 +23,10 @@ trait DispatcherPrerequisites { def settings: ActorSystem.Settings } -case class DefaultDispatcherPrerequisites( +/** + * INTERNAL USE ONLY + */ +private[akka] case class DefaultDispatcherPrerequisites( val threadFactory: ThreadFactory, val eventStream: EventStream, val deadLetterMailbox: Mailbox, @@ -110,7 +111,7 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc ConfigFactory.parseMap(Map("id" -> id).asJava) } - /* + /** * Creates a dispatcher from a Config. Internal test purpose only. * * ex: from(config.getConfig(id)) @@ -119,18 +120,22 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc * * Throws: IllegalArgumentException if the value of "type" is not valid * IllegalArgumentException if it cannot create the MessageDispatcherConfigurator + * + * INTERNAL USE ONLY */ private[akka] def from(cfg: Config): MessageDispatcher = { configuratorFrom(cfg).dispatcher() } - /* + /** * Creates a MessageDispatcherConfigurator from a Config. * * The Config must also contain a `id` property, which is the identifier of the dispatcher. * * Throws: IllegalArgumentException if the value of "type" is not valid * IllegalArgumentException if it cannot create the MessageDispatcherConfigurator + * + * INTERNAL USE ONLY */ private def configuratorFrom(cfg: Config): MessageDispatcherConfigurator = { if (!cfg.hasPath("id")) throw new IllegalArgumentException("Missing dispatcher 'id' property in config: " + cfg.root.render) @@ -208,7 +213,7 @@ class BalancingDispatcherConfigurator(config: Config, prerequisites: DispatcherP class PinnedDispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends MessageDispatcherConfigurator(config, prerequisites) { - val threadPoolConfig: ThreadPoolConfig = configureExecutor() match { + private val threadPoolConfig: ThreadPoolConfig = configureExecutor() match { case e: ThreadPoolExecutorConfigurator ⇒ e.threadPoolConfig case other ⇒ prerequisites.eventStream.publish( From 5ca3fe11f87f20b9cd54d3c24df21241c823ec7f Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 16 May 2012 17:37:23 +0200 Subject: [PATCH 043/538] Adding tons of ScalaDocs for Mailbox.scala --- .../main/scala/akka/dispatch/Mailbox.scala | 49 +++++++++++++++++-- 1 file changed, 44 insertions(+), 5 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index 2e3a98e8d9..ba559093d0 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -14,9 +14,15 @@ import akka.actor.ActorContext import com.typesafe.config.Config import akka.actor.ActorSystem +/** + * This exception normally is thrown when a bounded mailbox is over capacity + */ class MessageQueueAppendFailedException(message: String, cause: Throwable = null) extends AkkaException(message, cause) -object Mailbox { +/** + * INTERNAL USE ONLY + */ +private[akka] object Mailbox { type Status = Int @@ -244,6 +250,10 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes } } +/** + * A MessageQueue is one of the core components in forming an Akka Mailbox. + * The MessageQueue is where the normal messages that are sent to Actors will be enqueued (and subsequently dequeued) + */ trait MessageQueue { /** * Try to enqueue the message to this queue, or throw an exception. @@ -277,7 +287,7 @@ trait MessageQueue { } /** - * Internal mailbox implementation detail. + * INTERNAL USE ONLY */ private[akka] trait SystemMessageQueue { /** @@ -294,7 +304,7 @@ private[akka] trait SystemMessageQueue { } /** - * Internal mailbox implementation detail. + * INTERNAL USE ONLY */ private[akka] trait DefaultSystemMessageQueue { self: Mailbox ⇒ @@ -325,6 +335,9 @@ private[akka] trait DefaultSystemMessageQueue { self: Mailbox ⇒ def hasSystemMessages: Boolean = systemQueueGet ne null } +/** + * A QueueBasedMessageQueue is a MessageQueue which is backed by a java.util.Queue + */ trait QueueBasedMessageQueue extends MessageQueue { def queue: Queue[Envelope] def numberOfMessages = queue.size @@ -340,11 +353,18 @@ trait QueueBasedMessageQueue extends MessageQueue { } } +/** + * UnboundedMessageQueueSemantics adds the enqueue/dequeue operations for unbounded java.util.Queues + */ trait UnboundedMessageQueueSemantics extends QueueBasedMessageQueue { def enqueue(receiver: ActorRef, handle: Envelope): Unit = queue add handle def dequeue(): Envelope = queue.poll() } +/** + * BoundedMessageQueueSemantics adds the enqueue/dequeue operations for bounded java.util.Queues, + * and it also forces the java.util.Queue to extend java.util.BlockingQueue + */ trait BoundedMessageQueueSemantics extends QueueBasedMessageQueue { def pushTimeOut: Duration override def queue: BlockingQueue[Envelope] @@ -360,17 +380,27 @@ trait BoundedMessageQueueSemantics extends QueueBasedMessageQueue { def dequeue(): Envelope = queue.poll() } +/** + * DequeBasedMessageQueue forces the underlying java.util.Queue extend java.util.Deque + */ trait DequeBasedMessageQueue extends QueueBasedMessageQueue { def queue: Deque[Envelope] def enqueueFirst(receiver: ActorRef, handle: Envelope): Unit } +/** + * UnboundedMessageQueueSemantics adds the enqueue/dequeue operations for unbounded java.util.Deque + */ trait UnboundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueue { def enqueue(receiver: ActorRef, handle: Envelope): Unit = queue add handle def enqueueFirst(receiver: ActorRef, handle: Envelope): Unit = queue addFirst handle def dequeue(): Envelope = queue.poll() } +/** + * BoundedMessageQueueSemantics adds the enqueue/dequeue operations for bounded java.util.Deque, + * and it also forces the java.util.Queue to extend java.util.BlockingQueue + */ trait BoundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueue { def pushTimeOut: Duration override def queue: BlockingDeque[Envelope] @@ -393,14 +423,14 @@ trait BoundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueue { } /** - * Mailbox configuration. + * MailboxType is used to construct a Messagequeue given an optional ActorContext owner. */ trait MailboxType { def create(owner: Option[ActorContext]): MessageQueue } /** - * It's a case class for Java (new UnboundedMailbox) + * UnboundedMailbox is the standard issue Akka Mailbox as it is unbounded and has quite good performance */ case class UnboundedMailbox() extends MailboxType { @@ -412,6 +442,9 @@ case class UnboundedMailbox() extends MailboxType { } } +/** + * BoundedMailbox is the default bounded mailbox + */ case class BoundedMailbox( final val capacity: Int, final val pushTimeOut: Duration) extends MailboxType { def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity"), @@ -452,6 +485,9 @@ class BoundedPriorityMailbox( final val cmp: Comparator[Envelope], final val cap } } +/** + * This is the default mailbox for Deques, which is unbounded + */ case class UnboundedDequeBasedMailbox() extends MailboxType { def this(settings: ActorSystem.Settings, config: Config) = this() @@ -462,6 +498,9 @@ case class UnboundedDequeBasedMailbox() extends MailboxType { } } +/** + * This is the default mailbox for Deques, which is bounded + */ case class BoundedDequeBasedMailbox( final val capacity: Int, final val pushTimeOut: Duration) extends MailboxType { def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity"), From 1bd41e948e7e31f145b85df337c1e3d58697e22c Mon Sep 17 00:00:00 2001 From: Gert Vanthienen Date: Wed, 16 May 2012 23:49:42 +0200 Subject: [PATCH 044/538] Add sbtosgi plugin configuration to generate OSGi metadata --- project/AkkaBuild.scala | 74 +++++++++++++++++++++++++++++++++++------ project/plugins.sbt | 2 ++ 2 files changed, 66 insertions(+), 10 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 4804c0f796..a291abae7d 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -11,6 +11,8 @@ import com.typesafe.sbtmultijvm.MultiJvmPlugin.{ MultiJvm, extraOptions, jvmOpti import com.typesafe.schoir.SchoirPlugin.schoirSettings import com.typesafe.sbtscalariform.ScalariformPlugin import com.typesafe.sbtscalariform.ScalariformPlugin.ScalariformKeys +import com.typesafe.sbtosgi.OsgiPlugin.osgiSettings +import com.typesafe.sbtosgi.OsgiKeys import java.lang.Boolean.getBoolean import Sphinx.{ sphinxDocs, sphinxHtml, sphinxLatex, sphinxPdf, sphinxPygments, sphinxTags } @@ -45,7 +47,7 @@ object AkkaBuild extends Build { lazy val actor = Project( id = "akka-actor", base = file("akka-actor"), - settings = defaultSettings ++ Seq( + settings = defaultSettings ++ OSGi.actor ++ Seq( autoCompilerPlugins := true, libraryDependencies <+= scalaVersion { v => compilerPlugin("org.scala-lang.plugins" % "continuations" % v) }, scalacOptions += "-P:continuations:enable", @@ -79,7 +81,7 @@ object AkkaBuild extends Build { id = "akka-remote", base = file("akka-remote"), dependencies = Seq(actor, actorTests % "test->test", testkit % "test->test"), - settings = defaultSettings ++ multiJvmSettings ++ schoirSettings ++ Seq( + settings = defaultSettings ++ multiJvmSettings ++ schoirSettings ++ OSGi.remote ++ Seq( libraryDependencies ++= Dependencies.remote, // disable parallel tests parallelExecution in Test := false, @@ -98,7 +100,7 @@ object AkkaBuild extends Build { id = "akka-cluster", base = file("akka-cluster"), dependencies = Seq(remote, remote % "test->test", testkit % "test->test"), - settings = defaultSettings ++ multiJvmSettings ++ schoirSettings ++ Seq( + settings = defaultSettings ++ multiJvmSettings ++ schoirSettings ++ OSGi.cluster ++ Seq( libraryDependencies ++= Dependencies.cluster, // disable parallel tests parallelExecution in Test := false, @@ -117,7 +119,7 @@ object AkkaBuild extends Build { id = "akka-slf4j", base = file("akka-slf4j"), dependencies = Seq(actor, testkit % "test->test"), - settings = defaultSettings ++ Seq( + settings = defaultSettings ++ OSGi.slf4j ++ Seq( libraryDependencies ++= Dependencies.slf4j ) ) @@ -126,7 +128,7 @@ object AkkaBuild extends Build { id = "akka-agent", base = file("akka-agent"), dependencies = Seq(actor, testkit % "test->test"), - settings = defaultSettings ++ Seq( + settings = defaultSettings ++ OSGi.agent ++ Seq( libraryDependencies ++= Dependencies.agent ) ) @@ -135,7 +137,7 @@ object AkkaBuild extends Build { id = "akka-transactor", base = file("akka-transactor"), dependencies = Seq(actor, testkit % "test->test"), - settings = defaultSettings ++ Seq( + settings = defaultSettings ++ OSGi.transactor ++ Seq( libraryDependencies ++= Dependencies.transactor ) ) @@ -153,7 +155,7 @@ object AkkaBuild extends Build { id = "akka-mailboxes-common", base = file("akka-durable-mailboxes/akka-mailboxes-common"), dependencies = Seq(remote, testkit % "compile;test->test"), - settings = defaultSettings ++ Seq( + settings = defaultSettings ++ OSGi.mailboxesCommon ++ Seq( libraryDependencies ++= Dependencies.mailboxes ) ) @@ -162,7 +164,7 @@ object AkkaBuild extends Build { id = "akka-file-mailbox", base = file("akka-durable-mailboxes/akka-file-mailbox"), dependencies = Seq(mailboxesCommon % "compile;test->test", testkit % "test"), - settings = defaultSettings ++ Seq( + settings = defaultSettings ++ OSGi.fileMailbox ++ Seq( libraryDependencies ++= Dependencies.fileMailbox ) ) @@ -171,7 +173,7 @@ object AkkaBuild extends Build { id = "akka-zeromq", base = file("akka-zeromq"), dependencies = Seq(actor, testkit % "test;test->test"), - settings = defaultSettings ++ Seq( + settings = defaultSettings ++ OSGi.zeroMQ ++ Seq( libraryDependencies ++= Dependencies.zeroMQ ) ) @@ -189,7 +191,7 @@ object AkkaBuild extends Build { id = "akka-camel", base = file("akka-camel"), dependencies = Seq(actor, slf4j, testkit % "test->test"), - settings = defaultSettings ++ Seq( + settings = defaultSettings ++ OSGi.camel ++ Seq( libraryDependencies ++= Dependencies.camel ) ) @@ -435,3 +437,55 @@ object Dependency { val log4j = "log4j" % "log4j" % "1.2.14" % "test" // ApacheV2 } } + +// OSGi settings + +object OSGi { + + val actor = osgiSettings ++ Seq( + OsgiKeys.exportPackage := Seq("akka*", "com.typesafe.config.*", "com.eaio.*", "org.jboss.netty.akka.util"), + OsgiKeys.importPackage := Seq(scalaImport(), "*;resolution:=optional"), + OsgiKeys.privatePackage := Seq("org.jboss.netty.akka.util.internal") + ) + + val agent = osgiSettings ++ Seq( + OsgiKeys.exportPackage := Seq("akka.agent.*") + ) + + val camel = osgiSettings ++ Seq( + OsgiKeys.exportPackage := Seq("akka.camel.*", "akka.camelexamples"), + OsgiKeys.importPackage := Seq(scalaImport(), akkaImport(), "org.apache.camel.*") + ) + + val cluster = osgiSettings ++ Seq( + OsgiKeys.exportPackage := Seq("akka.cluster.*") + ) + + val fileMailbox = osgiSettings ++ Seq( + OsgiKeys.exportPackage := Seq("akka.actor.mailbox.*") + ) + + val mailboxesCommon = osgiSettings ++ Seq( + OsgiKeys.exportPackage := Seq("akka.actor.mailbox.*") + ) + + val remote = osgiSettings ++ Seq( + OsgiKeys.exportPackage := Seq("akka.remote.*", "akka.routing.*", "akka.serialization.*") + ) + + val slf4j = osgiSettings ++ Seq( + OsgiKeys.exportPackage := Seq("akka.event.slf4j.*") + ) + + val transactor = osgiSettings ++ Seq( + OsgiKeys.exportPackage := Seq("akka.transactor.*") + ) + + val zeroMQ = osgiSettings ++ Seq( + OsgiKeys.exportPackage := Seq("akka.zeromq.*") + ) + + def scalaImport(packageName: String = "scala.*") = "%s;version=\"[2.9.1,2.10)\"".format(packageName) + def akkaImport(packageName: String = "akka.*") = "%s;version=\"[2.1,3)\"".format(packageName) + +} diff --git a/project/plugins.sbt b/project/plugins.sbt index 80ff9db95a..f5355bd1d2 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -7,6 +7,8 @@ addSbtPlugin("com.typesafe.schoir" % "schoir" % "0.1.2") addSbtPlugin("com.typesafe.sbtscalariform" % "sbtscalariform" % "0.3.1") +addSbtPlugin("com.typesafe.sbtosgi" % "sbtosgi" % "0.2.0") + resolvers ++= Seq( "less is" at "http://repo.lessis.me", "coda" at "http://repo.codahale.com") From 80ca257f25bd6de74c346c140979e0f5e791daea Mon Sep 17 00:00:00 2001 From: Gert Vanthienen Date: Thu, 17 May 2012 08:09:59 +0200 Subject: [PATCH 045/538] sun.misc.Unsafe should be loaded through boot delegation instead --- akka-docs/additional/index.rst | 1 + akka-docs/additional/osgi.rst | 10 ++++++++++ project/AkkaBuild.scala | 2 +- 3 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 akka-docs/additional/osgi.rst diff --git a/akka-docs/additional/index.rst b/akka-docs/additional/index.rst index b3c89356c9..284586d59d 100644 --- a/akka-docs/additional/index.rst +++ b/akka-docs/additional/index.rst @@ -6,3 +6,4 @@ Additional Information recipes language-bindings + osgi diff --git a/akka-docs/additional/osgi.rst b/akka-docs/additional/osgi.rst new file mode 100644 index 0000000000..aea554ef9c --- /dev/null +++ b/akka-docs/additional/osgi.rst @@ -0,0 +1,10 @@ +Akka in OSGi +============ + +Configuring the OSGi Framework +------------------------------ + +To use Akka in an OSGi environment, the ``org.osgi.framework.bootdelegation`` +property must be set to always delegate the ``sun.misc`` package to the boot classloader +instead of resolving it through the normal OSGi class space. + diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index a291abae7d..59f7c62a04 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -444,7 +444,7 @@ object OSGi { val actor = osgiSettings ++ Seq( OsgiKeys.exportPackage := Seq("akka*", "com.typesafe.config.*", "com.eaio.*", "org.jboss.netty.akka.util"), - OsgiKeys.importPackage := Seq(scalaImport(), "*;resolution:=optional"), + OsgiKeys.importPackage := Seq("!sun.misc", scalaImport()), OsgiKeys.privatePackage := Seq("org.jboss.netty.akka.util.internal") ) From 58b2af8c09219cc2745814a6231600717111fa2c Mon Sep 17 00:00:00 2001 From: Mirko Friedenhagen Date: Thu, 17 May 2012 22:58:57 +0200 Subject: [PATCH 046/538] Add options for creating epub output for the documentation --- akka-docs/Makefile | 6 ++++++ akka-docs/conf.py | 8 ++++++++ 2 files changed, 14 insertions(+) diff --git a/akka-docs/Makefile b/akka-docs/Makefile index 3c0041537d..c78d5ba317 100644 --- a/akka-docs/Makefile +++ b/akka-docs/Makefile @@ -34,6 +34,7 @@ endif help: @echo "Please use \`make ' where is one of" @echo " pygments to locally install the custom pygments styles" + @echo " epub to make an epub" @echo " html to make standalone HTML files" @echo " singlehtml to make a single large HTML file" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @@ -53,6 +54,11 @@ pygments: $(LOCALPACKAGES): $(MAKE) pygments +epub: $(LOCALPACKAGES) + $(SPHINXBUILD) $(SPHINXFLAGS) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + html: $(LOCALPACKAGES) $(SPHINXBUILD) $(SPHINXFLAGS) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo diff --git a/akka-docs/conf.py b/akka-docs/conf.py index 2e66d8b56c..b632430b59 100644 --- a/akka-docs/conf.py +++ b/akka-docs/conf.py @@ -52,6 +52,14 @@ html_context = { 'include_analytics': 'online' in tags } +# -- Options for EPUB output --------------------------------------------------- +epub_author = "Typesafe Inc" +epub_language = "en" +epub_publisher = epub_author +epub_identifier = "http://doc.akka.io/docs/akka/snapshot/" +epub_scheme = "URL" +epub_cover = ("_sphinx/static/akka.png", "") + # -- Options for LaTeX output -------------------------------------------------- def setup(app): From d65f4bbe595960dc69302bb4cd72931ce5b02ba9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 18 May 2012 10:40:18 +0200 Subject: [PATCH 047/538] Added reference to the NetBeans SBT plugin --- akka-docs/intro/getting-started.rst | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/akka-docs/intro/getting-started.rst b/akka-docs/intro/getting-started.rst index b3bdbf70f3..31d6dc72e6 100644 --- a/akka-docs/intro/getting-started.rst +++ b/akka-docs/intro/getting-started.rst @@ -136,12 +136,17 @@ SBT installation instructions on `https://github.com/harrah/xsbt/wiki/Setup `_ to generate Eclipse project. +Setup SBT project and then use `sbteclipse `_ to generate a Eclipse project. Using Akka with IntelliJ IDEA ----------------------------- -Setup SBT project and then use `sbt-idea `_ to generate IntelliJ IDEA project. +Setup SBT project and then use `sbt-idea `_ to generate a IntelliJ IDEA project. + +Using Akka with NetBeans +------------------------ + +Setup SBT project and then use `sbt-netbeans-plugin `_ to generate a NetBeans project. Build from sources ------------------ From 2278d1fc8edbf63d2de3307d0469873a40d8629f Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 18 May 2012 11:58:16 +0200 Subject: [PATCH 048/538] ActorSystem.dispatcher should be implicit for easier importability. See #2074 * Makes it more consistent with ActorContext.dispatcher --- akka-actor/src/main/scala/akka/actor/ActorSystem.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index b84057b749..20ba0f696e 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -295,8 +295,9 @@ abstract class ActorSystem extends ActorRefFactory { * Default dispatcher as configured. This dispatcher is used for all actors * in the actor system which do not have a different dispatcher configured * explicitly. + * Importing this member will place the default MessageDispatcher in scope. */ - def dispatcher: MessageDispatcher + implicit def dispatcher: MessageDispatcher /** * Register a block of code (callback) to run after all actors in this actor system have From 877981caef210731ff7a2b93b9a34e6b8aa7aa8f Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 18 May 2012 13:28:53 +0200 Subject: [PATCH 049/538] DOC: TestKit sample is wrong. See #2072 * Added code to TestKitUsageSpec.scala * Fixed errors * Fixed race --- .../akka/docs/testkit/TestKitUsageSpec.scala | 156 ++++++++++++++++++ akka-docs/scala/testing.rst | 2 + akka-docs/scala/testkit-example.rst | 139 +--------------- 3 files changed, 159 insertions(+), 138 deletions(-) create mode 100644 akka-docs/scala/code/akka/docs/testkit/TestKitUsageSpec.scala diff --git a/akka-docs/scala/code/akka/docs/testkit/TestKitUsageSpec.scala b/akka-docs/scala/code/akka/docs/testkit/TestKitUsageSpec.scala new file mode 100644 index 0000000000..4f6c97abf7 --- /dev/null +++ b/akka-docs/scala/code/akka/docs/testkit/TestKitUsageSpec.scala @@ -0,0 +1,156 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.docs.testkit + +//#testkit-usage +import scala.util.Random + +import org.scalatest.BeforeAndAfterAll +import org.scalatest.WordSpec +import org.scalatest.matchers.ShouldMatchers + +import com.typesafe.config.ConfigFactory + +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.ActorSystem +import akka.actor.Props +import akka.testkit.DefaultTimeout +import akka.testkit.ImplicitSender +import akka.testkit.TestKit +import akka.util.duration._ + +/** + * a Test to show some TestKit examples + */ +class TestKitUsageSpec + extends TestKit(ActorSystem("TestKitUsageSpec", + ConfigFactory.parseString(TestKitUsageSpec.config))) + with DefaultTimeout with ImplicitSender + with WordSpec with ShouldMatchers with BeforeAndAfterAll { + import TestKitUsageSpec._ + + val echoRef = system.actorOf(Props(new EchoActor)) + val forwardRef = system.actorOf(Props(new ForwardingActor(testActor))) + val filterRef = system.actorOf(Props(new FilteringActor(testActor))) + val randomHead = Random.nextInt(6) + val randomTail = Random.nextInt(10) + val headList = List().padTo(randomHead, "0") + val tailList = List().padTo(randomTail, "1") + val seqRef = system.actorOf(Props(new SequencingActor(testActor, headList, tailList))) + + override def afterAll { + system.shutdown() + } + + "An EchoActor" should { + "Respond with the same message it receives" in { + within(500 millis) { + echoRef ! "test" + expectMsg("test") + } + } + } + "A ForwardingActor" should { + "Forward a message it receives" in { + within(500 millis) { + forwardRef ! "test" + expectMsg("test") + } + } + } + "A FilteringActor" should { + "Filter all messages, except expected messagetypes it receives" in { + var messages = List[String]() + within(500 millis) { + filterRef ! "test" + expectMsg("test") + filterRef ! 1 + expectNoMsg + filterRef ! "some" + filterRef ! "more" + filterRef ! 1 + filterRef ! "text" + filterRef ! 1 + + receiveWhile(500 millis) { + case msg: String ⇒ messages = msg :: messages + } + } + messages.length should be(3) + messages.reverse should be(List("some", "more", "text")) + } + } + "A SequencingActor" should { + "receive an interesting message at some point " in { + within(500 millis) { + ignoreMsg { + case msg: String ⇒ msg != "something" + } + seqRef ! "something" + expectMsg("something") + ignoreMsg { + case msg: String ⇒ msg == "1" + } + expectNoMsg + ignoreNoMsg + } + } + } +} + +object TestKitUsageSpec { + // Define your test specific configuration here + val config = """ + akka { + loglevel = "WARNING" + } + """ + + /** + * An Actor that echoes everything you send to it + */ + class EchoActor extends Actor { + def receive = { + case msg ⇒ sender ! msg + } + } + + /** + * An Actor that forwards every message to a next Actor + */ + class ForwardingActor(next: ActorRef) extends Actor { + def receive = { + case msg ⇒ next ! msg + } + } + + /** + * An Actor that only forwards certain messages to a next Actor + */ + class FilteringActor(next: ActorRef) extends Actor { + def receive = { + case msg: String ⇒ next ! msg + case _ ⇒ None + } + } + + /** + * An actor that sends a sequence of messages with a random head list, an + * interesting value and a random tail list. The idea is that you would + * like to test that the interesting value is received and that you cant + * be bothered with the rest + */ + class SequencingActor(next: ActorRef, head: List[String], tail: List[String]) + extends Actor { + def receive = { + case msg ⇒ { + head map (next ! _) + next ! msg + tail map (next ! _) + } + } + } +} +//#testkit-usage \ No newline at end of file diff --git a/akka-docs/scala/testing.rst b/akka-docs/scala/testing.rst index 15f73f4ef0..abb9e0d115 100644 --- a/akka-docs/scala/testing.rst +++ b/akka-docs/scala/testing.rst @@ -194,6 +194,8 @@ is a whole set of examination methods, e.g. receiving all consecutive messages matching certain criteria, receiving a whole sequence of fixed messages or classes, receiving nothing for some time, etc. +The ActorSystem passed in to the constructor of TestKit is accessible with +the the :obj:`system` member. Remember to shut down the actor system after the test is finished (also in case of failure) so that all actors—including the test actor—are stopped. diff --git a/akka-docs/scala/testkit-example.rst b/akka-docs/scala/testkit-example.rst index 54a848d267..7208de5828 100644 --- a/akka-docs/scala/testkit-example.rst +++ b/akka-docs/scala/testkit-example.rst @@ -6,142 +6,5 @@ TestKit Example (Scala) Ray Roestenburg's example code from `his blog `_ adapted to work with Akka 2.x. -.. code-block:: scala +.. includecode:: code/akka/docs/testkit/TestkitUsageSpec.scala#testkit-usage - package unit.akka - - import org.scalatest.matchers.ShouldMatchers - import org.scalatest.{WordSpec, BeforeAndAfterAll} - import akka.actor.Actor._ - import akka.util.duration._ - import akka.testkit.TestKit - import java.util.concurrent.TimeUnit - import akka.actor.{ActorRef, Actor} - import util.Random - - /** - * a Test to show some TestKit examples - */ - - class TestKitUsageSpec extends WordSpec with BeforeAndAfterAll with ShouldMatchers with TestKit { - val system = ActorSystem() - import system._ - val echoRef = actorOf(Props(new EchoActor)) - val forwardRef = actorOf(Props(new ForwardingActor(testActor))) - val filterRef = actorOf(Props(new FilteringActor(testActor))) - val randomHead = Random.nextInt(6) - val randomTail = Random.nextInt(10) - val headList = List().padTo(randomHead, "0") - val tailList = List().padTo(randomTail, "1") - val seqRef = actorOf(Props(new SequencingActor(testActor, headList, tailList))) - - override protected def afterAll(): scala.Unit = { - stopTestActor - echoRef.stop() - forwardRef.stop() - filterRef.stop() - seqRef.stop() - } - - "An EchoActor" should { - "Respond with the same message it receives" in { - within(100 millis) { - echoRef ! "test" - expectMsg("test") - } - } - } - "A ForwardingActor" should { - "Forward a message it receives" in { - within(100 millis) { - forwardRef ! "test" - expectMsg("test") - } - } - } - "A FilteringActor" should { - "Filter all messages, except expected messagetypes it receives" in { - var messages = List[String]() - within(100 millis) { - filterRef ! "test" - expectMsg("test") - filterRef ! 1 - expectNoMsg - filterRef ! "some" - filterRef ! "more" - filterRef ! 1 - filterRef ! "text" - filterRef ! 1 - - receiveWhile(500 millis) { - case msg: String => messages = msg :: messages - } - } - messages.length should be(3) - messages.reverse should be(List("some", "more", "text")) - } - } - "A SequencingActor" should { - "receive an interesting message at some point " in { - within(100 millis) { - seqRef ! "something" - ignoreMsg { - case msg: String => msg != "something" - } - expectMsg("something") - ignoreMsg { - case msg: String => msg == "1" - } - expectNoMsg - } - } - } - } - - /** - * An Actor that echoes everything you send to it - */ - class EchoActor extends Actor { - def receive = { - case msg => { - self.reply(msg) - } - } - } - - /** - * An Actor that forwards every message to a next Actor - */ - class ForwardingActor(next: ActorRef) extends Actor { - def receive = { - case msg => { - next ! msg - } - } - } - - /** - * An Actor that only forwards certain messages to a next Actor - */ - class FilteringActor(next: ActorRef) extends Actor { - def receive = { - case msg: String => { - next ! msg - } - case _ => None - } - } - - /** - * An actor that sends a sequence of messages with a random head list, an interesting value and a random tail list - * The idea is that you would like to test that the interesting value is received and that you cant be bothered with the rest - */ - class SequencingActor(next: ActorRef, head: List[String], tail: List[String]) extends Actor { - def receive = { - case msg => { - head map (next ! _) - next ! msg - tail map (next ! _) - } - } - } From 5eba9fceef884b16c8764a78d0d8bb274dfbc830 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 18 May 2012 13:37:26 +0200 Subject: [PATCH 050/538] Saving the planet and shufflin' --- .../scala/akka/event/EventStreamSpec.scala | 2 +- .../main/scala/akka/actor/UntypedActor.scala | 12 +- .../scala/akka/dispatch/Dispatchers.scala | 6 +- .../main/scala/akka/dispatch/Mailbox.scala | 56 +++++- .../main/scala/akka/event/DeathWatch.scala | 2 +- .../src/main/scala/akka/event/EventBus.scala | 15 +- .../src/main/scala/akka/event/Logging.scala | 167 ++++++++++-------- .../scala/akka/event/LoggingReceive.scala | 6 +- .../src/main/scala/akka/japi/JavaAPI.scala | 34 ++-- .../src/main/scala/akka/util/LockUtil.scala | 25 +-- 10 files changed, 177 insertions(+), 148 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala index d2497c4a69..a8cd32f5d3 100644 --- a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala @@ -19,7 +19,7 @@ object EventStreamSpec { loglevel = INFO event-handlers = ["akka.event.EventStreamSpec$MyLog", "%s"] } - """.format(Logging.StandardOutLoggerName)) + """.format(Logging.StandardOutLogger.getClass.getName)) val configUnhandled = ConfigFactory.parseString(""" akka { diff --git a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala index a5ebeb851c..c56a2a0167 100644 --- a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala @@ -93,11 +93,17 @@ import akka.japi.{ Creator } abstract class UntypedActor extends Actor { /** - * To be implemented by concrete UntypedActor. Defines the message handler. + * To be implemented by concrete UntypedActor, this defines the behavior of the + * UntypedActor. */ @throws(classOf[Exception]) def onReceive(message: Any): Unit + /** + * Returns this UntypedActor's UntypedActorContext + * The UntypedActorContext is not thread safe so do not expose it outside of the + * UntypedActor. + */ def getContext(): UntypedActorContext = context.asInstanceOf[UntypedActorContext] /** @@ -150,9 +156,7 @@ abstract class UntypedActor extends Actor { */ override def postRestart(reason: Throwable): Unit = super.postRestart(reason) - final protected def receive = { - case msg ⇒ onReceive(msg) - } + final protected def receive = { case msg ⇒ onReceive(msg) } } /** diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 54173b8460..9fae624e66 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -97,6 +97,7 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc } } + //INTERNAL API private def config(id: String): Config = { import scala.collection.JavaConverters._ def simpleName = id.substring(id.lastIndexOf('.') + 1) @@ -106,6 +107,7 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc .withFallback(defaultDispatcherConfig) } + //INTERNAL API private def idConfig(id: String): Config = { import scala.collection.JavaConverters._ ConfigFactory.parseMap(Map("id" -> id).asJava) @@ -123,9 +125,7 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc * * INTERNAL USE ONLY */ - private[akka] def from(cfg: Config): MessageDispatcher = { - configuratorFrom(cfg).dispatcher() - } + private[akka] def from(cfg: Config): MessageDispatcher = configuratorFrom(cfg).dispatcher() /** * Creates a MessageDispatcherConfigurator from a Config. diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index 2e3a98e8d9..1bb882d497 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -16,7 +16,10 @@ import akka.actor.ActorSystem class MessageQueueAppendFailedException(message: String, cause: Throwable = null) extends AkkaException(message, cause) -object Mailbox { +/** + * INTERNAL API + */ +private[akka] object Mailbox { type Status = Int @@ -40,6 +43,7 @@ object Mailbox { * Mailbox and InternalMailbox is separated in two classes because ActorCell is needed for implementation, * but can't be exposed to user defined mailbox subclasses. * + * INTERNAL API */ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: MessageQueue) extends SystemMessageQueue with Runnable { @@ -244,6 +248,10 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes } } +/** + * A MessageQueue is the user-message "lane" of an Akka Mailbox. + * It needs to atleast support N producers and 1 consumer thread-safely. + */ trait MessageQueue { /** * Try to enqueue the message to this queue, or throw an exception. @@ -325,6 +333,9 @@ private[akka] trait DefaultSystemMessageQueue { self: Mailbox ⇒ def hasSystemMessages: Boolean = systemQueueGet ne null } +/** + * A QueueBasedMessageQueue is a MessageQueue backed by a java.util.Queue + */ trait QueueBasedMessageQueue extends MessageQueue { def queue: Queue[Envelope] def numberOfMessages = queue.size @@ -340,11 +351,19 @@ trait QueueBasedMessageQueue extends MessageQueue { } } +/** + * UnboundedMessageQueueSemantics adds unbounded semantics to a QueueBasedMessageQueue, + * i.e. a non-blocking enqueue and dequeue. + */ trait UnboundedMessageQueueSemantics extends QueueBasedMessageQueue { def enqueue(receiver: ActorRef, handle: Envelope): Unit = queue add handle def dequeue(): Envelope = queue.poll() } +/** + * BoundedMessageQueueSemantics adds bounded semantics to a QueueBasedMessageQueue, + * i.e. blocking enqueue with timeout + */ trait BoundedMessageQueueSemantics extends QueueBasedMessageQueue { def pushTimeOut: Duration override def queue: BlockingQueue[Envelope] @@ -360,17 +379,28 @@ trait BoundedMessageQueueSemantics extends QueueBasedMessageQueue { def dequeue(): Envelope = queue.poll() } +/** + * DequeBasedMessageQueue refines QueueBasedMessageQueue to be backed by a java.util.Deque + */ trait DequeBasedMessageQueue extends QueueBasedMessageQueue { def queue: Deque[Envelope] def enqueueFirst(receiver: ActorRef, handle: Envelope): Unit } +/** + * UnboundedDequeBasedMessageQueueSemantics adds unbounded semantics to a DequeBasedMessageQueue, + * i.e. a non-blocking enqueue and dequeue. + */ trait UnboundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueue { def enqueue(receiver: ActorRef, handle: Envelope): Unit = queue add handle def enqueueFirst(receiver: ActorRef, handle: Envelope): Unit = queue addFirst handle def dequeue(): Envelope = queue.poll() } +/** + * BoundedMessageQueueSemantics adds bounded semantics to a DequeBasedMessageQueue, + * i.e. blocking enqueue with timeout + */ trait BoundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueue { def pushTimeOut: Duration override def queue: BlockingDeque[Envelope] @@ -393,14 +423,14 @@ trait BoundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueue { } /** - * Mailbox configuration. + * MailboxType is a factory to create MessageQueues for an optionally provided ActorContext */ trait MailboxType { def create(owner: Option[ActorContext]): MessageQueue } /** - * It's a case class for Java (new UnboundedMailbox) + * UnboundedMailbox is the default unbounded MailboxType used by Akka Actors. */ case class UnboundedMailbox() extends MailboxType { @@ -412,6 +442,9 @@ case class UnboundedMailbox() extends MailboxType { } } +/** + * BoundedMailbox is the default bounded MailboxType used by Akka Actors. + */ case class BoundedMailbox( final val capacity: Int, final val pushTimeOut: Duration) extends MailboxType { def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity"), @@ -428,17 +461,20 @@ case class BoundedMailbox( final val capacity: Int, final val pushTimeOut: Durat } /** - * Extend me to provide the comparator + * UnboundedPriorityMailbox is an unbounded mailbox that allows for priorization of its contents. + * Extend this class and provide the Comparator in the constructor. */ -class UnboundedPriorityMailbox( final val cmp: Comparator[Envelope]) extends MailboxType { +class UnboundedPriorityMailbox( final val cmp: Comparator[Envelope], final val initialCapacity: Int) extends MailboxType { + def this(cmp: Comparator[Envelope]) = this(cmp, 11) final override def create(owner: Option[ActorContext]): MessageQueue = - new PriorityBlockingQueue[Envelope](11, cmp) with QueueBasedMessageQueue with UnboundedMessageQueueSemantics { + new PriorityBlockingQueue[Envelope](initialCapacity, cmp) with QueueBasedMessageQueue with UnboundedMessageQueueSemantics { final def queue: Queue[Envelope] = this } } /** - * Extend me to provide the comparator + * BoundedPriorityMailbox is a bounded mailbox that allows for priorization of its contents. + * Extend this class and provide the Comparator in the constructor. */ class BoundedPriorityMailbox( final val cmp: Comparator[Envelope], final val capacity: Int, final val pushTimeOut: Duration) extends MailboxType { @@ -452,6 +488,9 @@ class BoundedPriorityMailbox( final val cmp: Comparator[Envelope], final val cap } } +/** + * UnboundedDequeBasedMailbox is an unbounded MailboxType, backed by a Deque. + */ case class UnboundedDequeBasedMailbox() extends MailboxType { def this(settings: ActorSystem.Settings, config: Config) = this() @@ -462,6 +501,9 @@ case class UnboundedDequeBasedMailbox() extends MailboxType { } } +/** + * BoundedDequeBasedMailbox is an bounded MailboxType, backed by a Deque. + */ case class BoundedDequeBasedMailbox( final val capacity: Int, final val pushTimeOut: Duration) extends MailboxType { def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity"), diff --git a/akka-actor/src/main/scala/akka/event/DeathWatch.scala b/akka-actor/src/main/scala/akka/event/DeathWatch.scala index 7469f6609f..8bf6935619 100644 --- a/akka-actor/src/main/scala/akka/event/DeathWatch.scala +++ b/akka-actor/src/main/scala/akka/event/DeathWatch.scala @@ -12,7 +12,7 @@ import akka.actor._ * A failed subscribe should also only mean that the Classifier (ActorRef) that is listened to is already shut down * See LocalDeathWatch for semantics */ -trait DeathWatch extends ActorEventBus with ActorClassifier { +abstract class DeathWatch extends ActorEventBus with ActorClassifier { type Event = Terminated protected final def classify(event: Event): Classifier = event.actor diff --git a/akka-actor/src/main/scala/akka/event/EventBus.scala b/akka-actor/src/main/scala/akka/event/EventBus.scala index 2dd22b3b54..6a5cc67cc4 100644 --- a/akka-actor/src/main/scala/akka/event/EventBus.scala +++ b/akka-actor/src/main/scala/akka/event/EventBus.scala @@ -182,10 +182,9 @@ trait SubchannelClassification { this: EventBus ⇒ */ trait ScanningClassification { self: EventBus ⇒ protected final val subscribers = new ConcurrentSkipListSet[(Classifier, Subscriber)](new Comparator[(Classifier, Subscriber)] { - def compare(a: (Classifier, Subscriber), b: (Classifier, Subscriber)): Int = { - val cM = compareClassifiers(a._1, b._1) - if (cM != 0) cM - else compareSubscribers(a._2, b._2) + def compare(a: (Classifier, Subscriber), b: (Classifier, Subscriber)): Int = compareClassifiers(a._1, b._1) match { + case 0 ⇒ compareSubscribers(a._2, b._2) + case other ⇒ other } }) @@ -238,7 +237,7 @@ trait ActorClassification { this: ActorEventBus with ActorClassifier ⇒ import java.util.concurrent.ConcurrentHashMap import scala.annotation.tailrec private val empty = TreeSet.empty[ActorRef] - protected val mappings = new ConcurrentHashMap[ActorRef, TreeSet[ActorRef]](mapSize) + private val mappings = new ConcurrentHashMap[ActorRef, TreeSet[ActorRef]](mapSize) @tailrec protected final def associate(monitored: ActorRef, monitor: ActorRef): Boolean = { @@ -320,9 +319,9 @@ trait ActorClassification { this: ActorEventBus with ActorClassifier ⇒ */ protected def mapSize: Int - def publish(event: Event): Unit = { - val receivers = mappings.get(classify(event)) - if (receivers ne null) receivers foreach { _ ! event } + def publish(event: Event): Unit = mappings.get(classify(event)) match { + case null ⇒ () + case some ⇒ some foreach { _ ! event } } def subscribe(subscriber: Subscriber, to: Classifier): Boolean = associate(to, subscriber) diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index ac31b133b3..1230756517 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -29,7 +29,7 @@ trait LoggingBus extends ActorEventBus { import Logging._ - private val guard = new ReentrantGuard + private val guard = new ReentrantGuard //Switch to ReentrantReadWrite private var loggers = Seq.empty[ActorRef] private var _logLevel: LogLevel = _ @@ -97,7 +97,7 @@ trait LoggingBus extends ActorEventBus { val myloggers = for { loggerName ← defaultLoggers - if loggerName != StandardOutLoggerName + if loggerName != StandardOutLogger.getClass.getName } yield { try { system.dynamicAccess.getClassFor[Actor](loggerName) match { @@ -129,7 +129,7 @@ trait LoggingBus extends ActorEventBus { case _: InvalidActorNameException ⇒ // ignore if it is already running } publish(Debug(logName, this.getClass, "Default Loggers started")) - if (!(defaultLoggers contains StandardOutLoggerName)) { + if (!(defaultLoggers contains StandardOutLogger.getClass.getName)) { unsubscribe(StandardOutLogger) } } catch { @@ -163,6 +163,9 @@ trait LoggingBus extends ActorEventBus { publish(Debug(simpleName(this), this.getClass, "all default loggers stopped")) } + /** + * INTERNAL API + */ private def addLogger(system: ActorSystemImpl, clazz: Class[_ <: Actor], level: LogLevel, logName: String): ActorRef = { val name = "log" + Extension(system).id() + "-" + simpleName(clazz) val actor = system.systemActorOf(Props(clazz), name) @@ -361,17 +364,33 @@ object LogSource { */ object Logging { + /** + * Returns a 'safe' getSimpleName for the provided object's Class + * @param obj + * @return the simple name of the given object's Class + */ def simpleName(obj: AnyRef): String = simpleName(obj.getClass) + /** + * Returns a 'safe' getSimpleName for the provided Class + * @param obj + * @return the simple name of the given Class + */ def simpleName(clazz: Class[_]): String = { val n = clazz.getName val i = n.lastIndexOf('.') n.substring(i + 1) } - object Extension extends ExtensionKey[LogExt] + /** + * INTERNAL API + */ + private[akka] object Extension extends ExtensionKey[LogExt] - class LogExt(system: ExtendedActorSystem) extends Extension { + /** + * INTERNAL API + */ + private[akka] class LogExt(system: ExtendedActorSystem) extends Extension { private val loggerId = new AtomicInteger def id() = loggerId.incrementAndGet() } @@ -431,12 +450,6 @@ object Logging { // these type ascriptions/casts are necessary to avoid CCEs during construction while retaining correct type val AllLogLevels = Seq(ErrorLevel: AnyRef, WarningLevel, InfoLevel, DebugLevel).asInstanceOf[Seq[LogLevel]] - val errorFormat = "[ERROR] [%s] [%s] [%s] %s\n%s".intern - val errorFormatWithoutCause = "[ERROR] [%s] [%s] [%s] %s".intern - val warningFormat = "[WARN] [%s] [%s] [%s] %s".intern - val infoFormat = "[INFO] [%s] [%s] [%s] %s".intern - val debugFormat = "[DEBUG] [%s] [%s] [%s] %s".intern - /** * Obtain LoggingAdapter for the given actor system and source object. This * will use the system’s event stream and include the system’s address in the @@ -624,27 +637,34 @@ object Logging { // weird return type due to binary compatibility def loggerInitialized(): LoggerInitialized.type = LoggerInitialized + /** + * LoggerInitializationException is thrown to indicate that there was a problem initializing a logger + * @param msg + */ class LoggerInitializationException(msg: String) extends AkkaException(msg) trait StdOutLogger { import java.text.SimpleDateFormat import java.util.Date - val dateFormat = new SimpleDateFormat("MM/dd/yyyy HH:mm:ss.S") + private val dateFormat = new SimpleDateFormat("MM/dd/yyyy HH:mm:ss.S") + private val errorFormat = "[ERROR] [%s] [%s] [%s] %s\n%s".intern + private val errorFormatWithoutCause = "[ERROR] [%s] [%s] [%s] %s".intern + private val warningFormat = "[WARN] [%s] [%s] [%s] %s".intern + private val infoFormat = "[INFO] [%s] [%s] [%s] %s".intern + private val debugFormat = "[DEBUG] [%s] [%s] [%s] %s".intern - def timestamp = dateFormat.format(new Date) + def timestamp(): String = synchronized { dateFormat.format(new Date) } // SDF isn't threadsafe - def print(event: Any) { - event match { - case e: Error ⇒ error(e) - case e: Warning ⇒ warning(e) - case e: Info ⇒ info(e) - case e: Debug ⇒ debug(e) - case e ⇒ warning(Warning(simpleName(this), this.getClass, "received unexpected event of class " + e.getClass + ": " + e)) - } + def print(event: Any): Unit = event match { + case e: Error ⇒ error(e) + case e: Warning ⇒ warning(e) + case e: Info ⇒ info(e) + case e: Debug ⇒ debug(e) + case e ⇒ warning(Warning(simpleName(this), this.getClass, "received unexpected event of class " + e.getClass + ": " + e)) } - def error(event: Error) = { + def error(event: Error): Unit = { val f = if (event.cause == Error.NoCause) errorFormatWithoutCause else errorFormat println(f.format( timestamp, @@ -654,21 +674,21 @@ object Logging { stackTraceFor(event.cause))) } - def warning(event: Warning) = + def warning(event: Warning): Unit = println(warningFormat.format( timestamp, event.thread.getName, event.logSource, event.message)) - def info(event: Info) = + def info(event: Info): Unit = println(infoFormat.format( timestamp, event.thread.getName, event.logSource, event.message)) - def debug(event: Debug) = + def debug(event: Debug): Unit = println(debugFormat.format( timestamp, event.thread.getName, @@ -689,8 +709,8 @@ object Logging { override val toString = "StandardOutLogger" override def !(message: Any)(implicit sender: ActorRef = null): Unit = print(message) } + val StandardOutLogger = new StandardOutLogger - val StandardOutLoggerName = StandardOutLogger.getClass.getName /** * Actor wrapper around the standard output logger. If @@ -708,7 +728,7 @@ object Logging { * Returns the StackTrace for the given Throwable as a String */ def stackTraceFor(e: Throwable): String = e match { - case null | Error.NoCause ⇒ "" + case null | Error.NoCause | _: NoStackTrace ⇒ "" case other ⇒ val sw = new java.io.StringWriter val pw = new java.io.PrintWriter(sw) @@ -752,51 +772,51 @@ trait LoggingAdapter { * These actually implement the passing on of the messages to be logged. * Will not be called if is...Enabled returned false. */ - protected def notifyError(message: String) - protected def notifyError(cause: Throwable, message: String) - protected def notifyWarning(message: String) - protected def notifyInfo(message: String) - protected def notifyDebug(message: String) + protected def notifyError(message: String): Unit + protected def notifyError(cause: Throwable, message: String): Unit + protected def notifyWarning(message: String): Unit + protected def notifyInfo(message: String): Unit + protected def notifyDebug(message: String): Unit /* * The rest is just the widening of the API for the user's convenience. */ - def error(cause: Throwable, message: String) { if (isErrorEnabled) notifyError(cause, message) } - def error(cause: Throwable, template: String, arg1: Any) { if (isErrorEnabled) notifyError(cause, format1(template, arg1)) } - def error(cause: Throwable, template: String, arg1: Any, arg2: Any) { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2)) } - def error(cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any) { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2, arg3)) } - def error(cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any) { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2, arg3, arg4)) } + def error(cause: Throwable, message: String): Unit = { if (isErrorEnabled) notifyError(cause, message) } + def error(cause: Throwable, template: String, arg1: Any): Unit = { if (isErrorEnabled) notifyError(cause, format1(template, arg1)) } + def error(cause: Throwable, template: String, arg1: Any, arg2: Any): Unit = { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2)) } + def error(cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2, arg3)) } + def error(cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2, arg3, arg4)) } - def error(message: String) { if (isErrorEnabled) notifyError(message) } - def error(template: String, arg1: Any) { if (isErrorEnabled) notifyError(format1(template, arg1)) } - def error(template: String, arg1: Any, arg2: Any) { if (isErrorEnabled) notifyError(format(template, arg1, arg2)) } - def error(template: String, arg1: Any, arg2: Any, arg3: Any) { if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3)) } - def error(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any) { if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3, arg4)) } + def error(message: String): Unit = { if (isErrorEnabled) notifyError(message) } + def error(template: String, arg1: Any): Unit = { if (isErrorEnabled) notifyError(format1(template, arg1)) } + def error(template: String, arg1: Any, arg2: Any): Unit = { if (isErrorEnabled) notifyError(format(template, arg1, arg2)) } + def error(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3)) } + def error(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3, arg4)) } - def warning(message: String) { if (isWarningEnabled) notifyWarning(message) } - def warning(template: String, arg1: Any) { if (isWarningEnabled) notifyWarning(format1(template, arg1)) } - def warning(template: String, arg1: Any, arg2: Any) { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2)) } - def warning(template: String, arg1: Any, arg2: Any, arg3: Any) { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3)) } - def warning(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any) { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3, arg4)) } + def warning(message: String): Unit = { if (isWarningEnabled) notifyWarning(message) } + def warning(template: String, arg1: Any): Unit = { if (isWarningEnabled) notifyWarning(format1(template, arg1)) } + def warning(template: String, arg1: Any, arg2: Any): Unit = { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2)) } + def warning(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3)) } + def warning(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3, arg4)) } def info(message: String) { if (isInfoEnabled) notifyInfo(message) } - def info(template: String, arg1: Any) { if (isInfoEnabled) notifyInfo(format1(template, arg1)) } - def info(template: String, arg1: Any, arg2: Any) { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2)) } - def info(template: String, arg1: Any, arg2: Any, arg3: Any) { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2, arg3)) } - def info(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any) { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2, arg3, arg4)) } + def info(template: String, arg1: Any): Unit = { if (isInfoEnabled) notifyInfo(format1(template, arg1)) } + def info(template: String, arg1: Any, arg2: Any): Unit = { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2)) } + def info(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2, arg3)) } + def info(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2, arg3, arg4)) } def debug(message: String) { if (isDebugEnabled) notifyDebug(message) } - def debug(template: String, arg1: Any) { if (isDebugEnabled) notifyDebug(format1(template, arg1)) } - def debug(template: String, arg1: Any, arg2: Any) { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2)) } - def debug(template: String, arg1: Any, arg2: Any, arg3: Any) { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2, arg3)) } - def debug(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any) { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2, arg3, arg4)) } + def debug(template: String, arg1: Any): Unit = { if (isDebugEnabled) notifyDebug(format1(template, arg1)) } + def debug(template: String, arg1: Any, arg2: Any): Unit = { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2)) } + def debug(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2, arg3)) } + def debug(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2, arg3, arg4)) } def log(level: Logging.LogLevel, message: String) { if (isEnabled(level)) notifyLog(level, message) } - def log(level: Logging.LogLevel, template: String, arg1: Any) { if (isEnabled(level)) notifyLog(level, format1(template, arg1)) } - def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any) { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2)) } - def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any) { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2, arg3)) } - def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any) { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2, arg3, arg4)) } + def log(level: Logging.LogLevel, template: String, arg1: Any): Unit = { if (isEnabled(level)) notifyLog(level, format1(template, arg1)) } + def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any): Unit = { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2)) } + def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2, arg3)) } + def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2, arg3, arg4)) } final def isEnabled(level: Logging.LogLevel): Boolean = level match { case Logging.ErrorLevel ⇒ isErrorEnabled @@ -812,14 +832,14 @@ trait LoggingAdapter { case Logging.DebugLevel ⇒ if (isDebugEnabled) notifyDebug(message) } - private def format1(t: String, arg: Any) = arg match { + private def format1(t: String, arg: Any): String = arg match { case a: Array[_] if !a.getClass.getComponentType.isPrimitive ⇒ format(t, a: _*) case a: Array[_] ⇒ format(t, (a map (_.asInstanceOf[AnyRef]): _*)) case x ⇒ format(t, x) } - def format(t: String, arg: Any*) = { - val sb = new StringBuilder + def format(t: String, arg: Any*): String = { + val sb = new StringBuilder //FIXME add some decent size hint here var p = 0 var rest = t while (p < arg.length) { @@ -829,17 +849,15 @@ trait LoggingAdapter { rest = "" p = arg.length } else { - sb.append(rest.substring(0, index)) - sb.append(arg(p)) + sb.append(rest.substring(0, index)).append(arg(p)) rest = rest.substring(index + 2) p += 1 } } - sb.append(rest) - sb.toString + sb.append(rest).toString } } - +//FIXME DOCUMENT class BusLogging(val bus: LoggingBus, val logSource: String, val logClass: Class[_]) extends LoggingAdapter { import Logging._ @@ -849,14 +867,9 @@ class BusLogging(val bus: LoggingBus, val logSource: String, val logClass: Class def isInfoEnabled = bus.logLevel >= InfoLevel def isDebugEnabled = bus.logLevel >= DebugLevel - protected def notifyError(message: String) { bus.publish(Error(logSource, logClass, message)) } - - protected def notifyError(cause: Throwable, message: String) { bus.publish(Error(cause, logSource, logClass, message)) } - - protected def notifyWarning(message: String) { bus.publish(Warning(logSource, logClass, message)) } - - protected def notifyInfo(message: String) { bus.publish(Info(logSource, logClass, message)) } - - protected def notifyDebug(message: String) { bus.publish(Debug(logSource, logClass, message)) } - + protected def notifyError(message: String): Unit = bus.publish(Error(logSource, logClass, message)) + protected def notifyError(cause: Throwable, message: String): Unit = bus.publish(Error(cause, logSource, logClass, message)) + protected def notifyWarning(message: String): Unit = bus.publish(Warning(logSource, logClass, message)) + protected def notifyInfo(message: String): Unit = bus.publish(Info(logSource, logClass, message)) + protected def notifyDebug(message: String): Unit = bus.publish(Debug(logSource, logClass, message)) } diff --git a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala index 452b2b6b19..337815eed1 100644 --- a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala +++ b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala @@ -26,9 +26,7 @@ object LoggingReceive { */ def apply(r: Receive)(implicit context: ActorContext): Receive = r match { case _: LoggingReceive ⇒ r - case _ ⇒ - if (context.system.settings.AddLoggingReceive) new LoggingReceive(None, r) - else r + case _ ⇒ if (context.system.settings.AddLoggingReceive) new LoggingReceive(None, r) else r } } @@ -37,7 +35,7 @@ object LoggingReceive { * @param source the log source, if not defined the actor of the context will be used */ class LoggingReceive(source: Option[AnyRef], r: Receive)(implicit context: ActorContext) extends Receive { - def isDefinedAt(o: Any) = { + def isDefinedAt(o: Any): Boolean = { val handled = r.isDefinedAt(o) val (str, clazz) = LogSource.fromAnyRef(source getOrElse context.asInstanceOf[ActorCell].actor) context.system.eventStream.publish(Debug(str, clazz, "received " + (if (handled) "handled" else "unhandled") + " message " + o)) diff --git a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala index 47ce667759..5bd38ad52a 100644 --- a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala +++ b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala @@ -24,28 +24,14 @@ trait Function2[T1, T2, R] { * A Procedure is like a Function, but it doesn't produce a return value. */ trait Procedure[T] { - def apply(param: T) -} - -/** - * A Procedure is like a Function, but it doesn't produce a return value. - */ -trait Procedure2[T1, T2] { - def apply(param: T1, param2: T2) -} - -/** - * An executable piece of code that takes no parameters and doesn't return any value. - */ -trait SideEffect { - def apply() + def apply(param: T): Unit } /** * An executable piece of code that takes no parameters and doesn't return any value. */ trait Effect { - def apply() + def apply(): Unit } /** @@ -67,9 +53,9 @@ sealed abstract class Option[A] extends java.lang.Iterable[A] { def get: A def isEmpty: Boolean - def isDefined = !isEmpty + def isDefined: Boolean = !isEmpty def asScala: scala.Option[A] - def iterator = if (isEmpty) Iterator.empty else Iterator.single(get) + def iterator: java.util.Iterator[A] = if (isEmpty) Iterator.empty else Iterator.single(get) } object Option { @@ -102,18 +88,18 @@ object Option { * A. */ final case class Some[A](v: A) extends Option[A] { - def get = v - def isEmpty = false - def asScala = scala.Some(v) + def get: A = v + def isEmpty: Boolean = false + def asScala: scala.Some[A] = scala.Some(v) } /** * This case object represents non-existent values. */ private case object None extends Option[Nothing] { - def get = throw new NoSuchElementException("None.get") - def isEmpty = true - def asScala = scala.None + def get: Nothing = throw new NoSuchElementException("None.get") + def isEmpty: Boolean = true + def asScala: scala.None.type = scala.None } implicit def java2ScalaOption[A](o: Option[A]): scala.Option[A] = o.asScala diff --git a/akka-actor/src/main/scala/akka/util/LockUtil.scala b/akka-actor/src/main/scala/akka/util/LockUtil.scala index c3295d4b52..14c787d3f6 100644 --- a/akka-actor/src/main/scala/akka/util/LockUtil.scala +++ b/akka-actor/src/main/scala/akka/util/LockUtil.scala @@ -7,17 +7,12 @@ package akka.util import java.util.concurrent.locks.{ ReentrantLock } import java.util.concurrent.atomic.{ AtomicBoolean } -final class ReentrantGuard { - final val lock = new ReentrantLock +final class ReentrantGuard extends ReentrantLock { @inline final def withGuard[T](body: ⇒ T): T = { - lock.lock - try { - body - } finally { - lock.unlock - } + lock() + try body finally unlock() } } @@ -104,19 +99,13 @@ class Switch(startAsOn: Boolean = false) { * Executes the provided action and returns its value if the switch is on, waiting for any pending changes to happen before (locking) * Be careful of longrunning or blocking within the provided action as it can lead to deadlocks or bad performance */ - def whileOnYield[T](action: ⇒ T): Option[T] = synchronized { - if (switch.get) Some(action) - else None - } + def whileOnYield[T](action: ⇒ T): Option[T] = synchronized { if (switch.get) Some(action) else None } /** * Executes the provided action and returns its value if the switch is off, waiting for any pending changes to happen before (locking) * Be careful of longrunning or blocking within the provided action as it can lead to deadlocks or bad performance */ - def whileOffYield[T](action: ⇒ T): Option[T] = synchronized { - if (!switch.get) Some(action) - else None - } + def whileOffYield[T](action: ⇒ T): Option[T] = synchronized { if (!switch.get) Some(action) else None } /** * Executes the provided action and returns if the action was executed or not, if the switch is on, waiting for any pending changes to happen before (locking) @@ -144,9 +133,7 @@ class Switch(startAsOn: Boolean = false) { * Executes the provided callbacks depending on if the switch is either on or off waiting for any pending changes to happen before (locking) * Be careful of longrunning or blocking within the provided action as it can lead to deadlocks or bad performance */ - def fold[T](on: ⇒ T)(off: ⇒ T) = synchronized { - if (switch.get) on else off - } + def fold[T](on: ⇒ T)(off: ⇒ T): T = synchronized { if (switch.get) on else off } /** * Executes the given code while holding this switch’s lock, i.e. protected from concurrent modification of the switch status. From 5afe6601ff3ca7168ee8fe1aeb3d52bd8a3cfbbd Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 18 May 2012 14:55:38 +0200 Subject: [PATCH 051/538] Removing ActorTimeoutException since it was only used in GracefulStop, and changed GracefulStop to use PromiseActorRef instead of spawning a toplevel actor --- .../test/scala/akka/pattern/PatternSpec.scala | 9 +---- .../src/main/scala/akka/actor/Actor.scala | 6 --- .../main/scala/akka/pattern/AskSupport.scala | 4 +- .../akka/pattern/GracefulStopSupport.scala | 37 ++++++++----------- 4 files changed, 18 insertions(+), 38 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala index 2776beabce..68e6d40824 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala @@ -7,11 +7,9 @@ package akka.pattern import akka.testkit.AkkaSpec import akka.actor.Props import akka.actor.Actor -import akka.actor.ActorTimeoutException import akka.util.Duration import akka.util.duration._ import akka.dispatch.{ Future, Promise, Await } -import java.lang.IllegalStateException object PatternSpec { case class Work(duration: Duration) @@ -41,13 +39,10 @@ class PatternSpec extends AkkaSpec { Await.ready(gracefulStop(target, 1 millis), 1 second) } - "complete Future with ActorTimeoutException when actor not terminated within timeout" in { + "complete Future with AskTimeoutException when actor not terminated within timeout" in { val target = system.actorOf(Props[TargetActor]) target ! Work(250 millis) - val result = gracefulStop(target, 10 millis) - intercept[ActorTimeoutException] { - Await.result(result, 200 millis) - } + intercept[AskTimeoutException] { Await.result(gracefulStop(target, 10 millis), 200 millis) } } } diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 3e233a2056..7c020925eb 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -129,12 +129,6 @@ case class ActorInitializationException private[akka] (actor: ActorRef, message: def this(msg: String) = this(null, msg, null) } -//FIXME: Only used by gracefulStop we should remove this if possible -class ActorTimeoutException private[akka] (message: String, cause: Throwable = null) - extends AkkaException(message, cause) { - def this(msg: String) = this(msg, null) -} - /** * InvalidMessageException is thrown when an invalid message is sent to an Actor. * Technically it's only "null" which is an InvalidMessageException but who knows, diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index ef4217039d..ede65b17da 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -272,9 +272,7 @@ private[akka] object PromiseActorRef { val result = Promise[Any]()(provider.dispatcher) val a = new PromiseActorRef(provider, result) val f = provider.scheduler.scheduleOnce(timeout.duration) { result.tryComplete(Left(new AskTimeoutException("Timed out"))) } - result onComplete { _ ⇒ - try a.stop() finally f.cancel() - } + result onComplete { _ ⇒ try a.stop() finally f.cancel() } a } } \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index d6fbd31c1e..8b441f3d5b 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -4,9 +4,9 @@ package akka.pattern -import akka.actor.{ ActorRef, Actor, ActorSystem, Props, PoisonPill, Terminated, ReceiveTimeout, ActorTimeoutException } import akka.dispatch.{ Promise, Future } -import akka.util.Duration +import akka.actor._ +import akka.util.{ Timeout, Duration } trait GracefulStopSupport { /** @@ -14,7 +14,8 @@ trait GracefulStopSupport { * existing messages of the target actor has been processed and the actor has been * terminated. * - * Useful when you need to wait for termination or compose ordered termination of several actors. + * Useful when you need to wait for termination or compose ordered termination of several actors, + * which should only be done outside of the ActorSystem as blocking inside Actors is discouraged. * * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] * is completed with failure [[akka.actor.ActorTimeoutException]]. @@ -22,26 +23,18 @@ trait GracefulStopSupport { def gracefulStop(target: ActorRef, timeout: Duration)(implicit system: ActorSystem): Future[Boolean] = { if (target.isTerminated) { Promise.successful(true) - } else { - val result = Promise[Boolean]() - system.actorOf(Props(new Actor { - // Terminated will be received when target has been stopped - context watch target + } else system match { + case e: ExtendedActorSystem ⇒ + val ref = PromiseActorRef(e.provider, Timeout(timeout)) + e.deathWatch.subscribe(ref, target) + ref.result onComplete { case x ⇒ println(x) } + ref.result onComplete { + case Right(Terminated(`target`)) ⇒ () // Ignore + case _ ⇒ e.deathWatch.unsubscribe(ref, target) + } // Just making sure we're not leaking here target ! PoisonPill - // ReceiveTimeout will be received if nothing else is received within the timeout - context setReceiveTimeout timeout - - def receive = { - case Terminated(a) if a == target ⇒ - result success true - context stop self - case ReceiveTimeout ⇒ - result failure new ActorTimeoutException( - "Failed to stop [%s] within [%s]".format(target.path, context.receiveTimeout)) - context stop self - } - })) - result + ref.result map { case Terminated(`target`) ⇒ true } + case s ⇒ throw new IllegalArgumentException("Unknown ActorSystem implementation: '" + s + "'") } } } \ No newline at end of file From 82a29bbd3b94a2d7f141996b8543b30c427c6df0 Mon Sep 17 00:00:00 2001 From: Gert Vanthienen Date: Fri, 18 May 2012 15:09:52 +0200 Subject: [PATCH 052/538] Tighten akka import version range and make some more packages private --- project/AkkaBuild.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 59f7c62a04..5066d3929a 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -443,9 +443,9 @@ object Dependency { object OSGi { val actor = osgiSettings ++ Seq( - OsgiKeys.exportPackage := Seq("akka*", "com.typesafe.config.*", "com.eaio.*", "org.jboss.netty.akka.util"), + OsgiKeys.exportPackage := Seq("akka*", "com.typesafe.config.*"), OsgiKeys.importPackage := Seq("!sun.misc", scalaImport()), - OsgiKeys.privatePackage := Seq("org.jboss.netty.akka.util.internal") + OsgiKeys.privatePackage := Seq("org.jboss.netty.akka.util.*", "com.eaio.*") ) val agent = osgiSettings ++ Seq( @@ -486,6 +486,6 @@ object OSGi { ) def scalaImport(packageName: String = "scala.*") = "%s;version=\"[2.9.1,2.10)\"".format(packageName) - def akkaImport(packageName: String = "akka.*") = "%s;version=\"[2.1,3)\"".format(packageName) + def akkaImport(packageName: String = "akka.*") = "%s;version=\"[2.1,2.2)\"".format(packageName) } From 2bb255b480d64aa60b5540705d4b0d62114595a3 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 18 May 2012 15:04:08 +0200 Subject: [PATCH 053/538] Removing ActorTimeoutException everywhere... I swear it... --- akka-actor/src/main/scala/akka/pattern/AskSupport.scala | 6 +++--- .../src/main/scala/akka/pattern/GracefulStopSupport.scala | 2 +- akka-actor/src/main/scala/akka/pattern/Patterns.scala | 6 +++--- .../java/code/akka/docs/actor/UntypedActorDocTestBase.java | 4 ++-- akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala | 3 +-- .../scala/akka/transactor/CoordinatedIncrementSpec.scala | 4 ++-- .../src/test/scala/akka/transactor/FickleFriendsSpec.scala | 4 ++-- .../src/test/scala/akka/transactor/TransactorSpec.scala | 4 ++-- 8 files changed, 16 insertions(+), 17 deletions(-) diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index ede65b17da..cfaa0a182b 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -46,7 +46,7 @@ trait AskSupport { * Sends a message asynchronously and returns a [[akka.dispatch.Future]] * holding the eventual reply message; this means that the target actor * needs to send the result to the `sender` reference provided. The Future - * will be completed with an [[akka.actor.AskTimeoutException]] after the + * will be completed with an [[akka.pattern.AskTimeoutException]] after the * given timeout has expired; this is independent from any timeout applied * while awaiting a result for this future (i.e. in * `Await.result(..., timeout)`). @@ -96,7 +96,7 @@ trait AskSupport { * Sends a message asynchronously and returns a [[akka.dispatch.Future]] * holding the eventual reply message; this means that the target actor * needs to send the result to the `sender` reference provided. The Future - * will be completed with an [[akka.actor.AskTimeoutException]] after the + * will be completed with an [[akka.pattern.AskTimeoutException]] after the * given timeout has expired; this is independent from any timeout applied * while awaiting a result for this future (i.e. in * `Await.result(..., timeout)`). @@ -126,7 +126,7 @@ trait AskSupport { * Sends a message asynchronously and returns a [[akka.dispatch.Future]] * holding the eventual reply message; this means that the target actor * needs to send the result to the `sender` reference provided. The Future - * will be completed with an [[akka.actor.AskTimeoutException]] after the + * will be completed with an [[akka.pattern.AskTimeoutException]] after the * given timeout has expired; this is independent from any timeout applied * while awaiting a result for this future (i.e. in * `Await.result(..., timeout)`). diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index 8b441f3d5b..9c8b6ae5ff 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -18,7 +18,7 @@ trait GracefulStopSupport { * which should only be done outside of the ActorSystem as blocking inside Actors is discouraged. * * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] - * is completed with failure [[akka.actor.ActorTimeoutException]]. + * is completed with failure [[akka.pattern.AskTimeoutException]]. */ def gracefulStop(target: ActorRef, timeout: Duration)(implicit system: ActorSystem): Future[Boolean] = { if (target.isTerminated) { diff --git a/akka-actor/src/main/scala/akka/pattern/Patterns.scala b/akka-actor/src/main/scala/akka/pattern/Patterns.scala index b58e9a8fc1..853b46e318 100644 --- a/akka-actor/src/main/scala/akka/pattern/Patterns.scala +++ b/akka-actor/src/main/scala/akka/pattern/Patterns.scala @@ -18,7 +18,7 @@ object Patterns { * Sends a message asynchronously and returns a [[akka.dispatch.Future]] * holding the eventual reply message; this means that the target actor * needs to send the result to the `sender` reference provided. The Future - * will be completed with an [[akka.actor.AskTimeoutException]] after the + * will be completed with an [[akka.pattern.AskTimeoutException]] after the * given timeout has expired; this is independent from any timeout applied * while awaiting a result for this future (i.e. in * `Await.result(..., timeout)`). @@ -49,7 +49,7 @@ object Patterns { * Sends a message asynchronously and returns a [[akka.dispatch.Future]] * holding the eventual reply message; this means that the target actor * needs to send the result to the `sender` reference provided. The Future - * will be completed with an [[akka.actor.AskTimeoutException]] after the + * will be completed with an [[akka.pattern.AskTimeoutException]] after the * given timeout has expired; this is independent from any timeout applied * while awaiting a result for this future (i.e. in * `Await.result(..., timeout)`). @@ -100,7 +100,7 @@ object Patterns { * Useful when you need to wait for termination or compose ordered termination of several actors. * * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] - * is completed with failure [[akka.actor.ActorTimeoutException]]. + * is completed with failure [[akka.pattern.AskTimeoutException]]. */ def gracefulStop(target: ActorRef, timeout: Duration, system: ActorSystem): Future[java.lang.Boolean] = scalaGracefulStop(target, timeout)(system).asInstanceOf[Future[java.lang.Boolean]] diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java b/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java index 65ff37c10e..146131f61e 100644 --- a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java +++ b/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java @@ -36,7 +36,7 @@ import static akka.pattern.Patterns.gracefulStop; import akka.dispatch.Future; import akka.dispatch.Await; import akka.util.Duration; -import akka.actor.ActorTimeoutException; +import akka.pattern.AskTimeoutException; //#import-gracefulStop //#import-askPipe @@ -207,7 +207,7 @@ public class UntypedActorDocTestBase { Future stopped = gracefulStop(actorRef, Duration.create(5, TimeUnit.SECONDS), system); Await.result(stopped, Duration.create(6, TimeUnit.SECONDS)); // the actor has been stopped - } catch (ActorTimeoutException e) { + } catch (AskTimeoutException e) { // the actor wasn't stopped within 5 seconds } //#gracefulStop diff --git a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala index 0bc540f970..8aed17605c 100644 --- a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala @@ -326,14 +326,13 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { //#gracefulStop import akka.pattern.gracefulStop import akka.dispatch.Await - import akka.actor.ActorTimeoutException try { val stopped: Future[Boolean] = gracefulStop(actorRef, 5 seconds)(system) Await.result(stopped, 6 seconds) // the actor has been stopped } catch { - case e: ActorTimeoutException ⇒ // the actor wasn't stopped within 5 seconds + case e: akka.pattern.AskTimeoutException ⇒ // the actor wasn't stopped within 5 seconds } //#gracefulStop } diff --git a/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala b/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala index 9c019a56a5..c76a5a701c 100644 --- a/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala +++ b/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala @@ -12,7 +12,7 @@ import akka.util.duration._ import akka.util.Timeout import akka.testkit._ import scala.concurrent.stm._ -import akka.pattern.ask +import akka.pattern.{ AskTimeoutException, ask } object CoordinatedIncrement { @@ -96,7 +96,7 @@ class CoordinatedIncrementSpec extends AkkaSpec(CoordinatedIncrement.config) wit val ignoreExceptions = Seq( EventFilter[ExpectedFailureException](), EventFilter[CoordinatedTransactionException](), - EventFilter[ActorTimeoutException]()) + EventFilter[AskTimeoutException]()) filterEvents(ignoreExceptions) { val (counters, failer) = actorOfs val coordinated = Coordinated() diff --git a/akka-transactor/src/test/scala/akka/transactor/FickleFriendsSpec.scala b/akka-transactor/src/test/scala/akka/transactor/FickleFriendsSpec.scala index 4f7fc89c14..9deee7b9cc 100644 --- a/akka-transactor/src/test/scala/akka/transactor/FickleFriendsSpec.scala +++ b/akka-transactor/src/test/scala/akka/transactor/FickleFriendsSpec.scala @@ -15,7 +15,7 @@ import akka.testkit.TestEvent.Mute import scala.concurrent.stm._ import scala.util.Random.{ nextInt ⇒ random } import java.util.concurrent.CountDownLatch -import akka.pattern.ask +import akka.pattern.{ AskTimeoutException, ask } object FickleFriends { case class FriendlyIncrement(friends: Seq[ActorRef], timeout: Timeout, latch: CountDownLatch) @@ -120,7 +120,7 @@ class FickleFriendsSpec extends AkkaSpec with BeforeAndAfterAll { val ignoreExceptions = Seq( EventFilter[ExpectedFailureException](), EventFilter[CoordinatedTransactionException](), - EventFilter[ActorTimeoutException]()) + EventFilter[AskTimeoutException]()) system.eventStream.publish(Mute(ignoreExceptions)) val (counters, coordinator) = actorOfs val latch = new CountDownLatch(1) diff --git a/akka-transactor/src/test/scala/akka/transactor/TransactorSpec.scala b/akka-transactor/src/test/scala/akka/transactor/TransactorSpec.scala index 1954c9a13b..df9723ffd2 100644 --- a/akka-transactor/src/test/scala/akka/transactor/TransactorSpec.scala +++ b/akka-transactor/src/test/scala/akka/transactor/TransactorSpec.scala @@ -10,7 +10,7 @@ import akka.util.duration._ import akka.util.Timeout import akka.testkit._ import scala.concurrent.stm._ -import akka.pattern.ask +import akka.pattern.{ AskTimeoutException, ask } object TransactorIncrement { case class Increment(friends: Seq[ActorRef], latch: TestLatch) @@ -105,7 +105,7 @@ class TransactorSpec extends AkkaSpec { val ignoreExceptions = Seq( EventFilter[ExpectedFailureException](), EventFilter[CoordinatedTransactionException](), - EventFilter[ActorTimeoutException]()) + EventFilter[AskTimeoutException]()) filterEvents(ignoreExceptions) { val (counters, failer) = createTransactors val failLatch = TestLatch(numCounters) From 07bf11d326132745fa1215570b26c12f7e49dee7 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 18 May 2012 15:40:51 +0200 Subject: [PATCH 054/538] Removing debug equipment left inside the patient. --- akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala | 1 - 1 file changed, 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index 9c8b6ae5ff..5f78e8ba27 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -27,7 +27,6 @@ trait GracefulStopSupport { case e: ExtendedActorSystem ⇒ val ref = PromiseActorRef(e.provider, Timeout(timeout)) e.deathWatch.subscribe(ref, target) - ref.result onComplete { case x ⇒ println(x) } ref.result onComplete { case Right(Terminated(`target`)) ⇒ () // Ignore case _ ⇒ e.deathWatch.unsubscribe(ref, target) From c86051505b3c78629ec416f6a26b60552ac93c80 Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 18 May 2012 15:55:04 +0200 Subject: [PATCH 055/538] wrap up MultiNodeSpec, see #1934 and #2063 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - restructure message classes in sealed traits according to message flow direction and include confirmed/unconfirmed status in the type - add GetAddress query for obtaining the remote transport address of another test participant - add reconnects to Player - add small DSL with runOn(node...), ifNode(node...)()() and node():ActorPath - rewrite TestConductorSpec to use that DSL and run within a single test procedure instead of separate NodeX classes - hook up that test into current multi-jvm infrastructure temporarily for testing (will use Björn’s new remote-multi-jvm stuff later) --- .../testconductor/TestConductorProtocol.java | 809 ++++++++++++++++-- .../main/protocol/TestConductorProtocol.proto | 8 +- .../src/main/resources/reference.conf | 15 +- .../akka/remote/testconductor/Conductor.scala | 109 +-- .../akka/remote/testconductor/DataTypes.scala | 55 +- .../akka/remote/testconductor/Extension.scala | 8 +- .../akka/remote/testconductor/Player.scala | 172 ++-- .../testconductor/RemoteConnection.scala | 3 +- .../testconductor/TestConductorSpec.scala | 140 +-- .../remote/testconductor/BarrierSpec.scala | 106 +-- .../remote/testconductor/ControllerSpec.scala | 8 +- .../akka/remote/testkit/MultiNodeSpec.scala | 157 ++++ scripts/fix-protobuf.sh | 3 + 13 files changed, 1291 insertions(+), 302 deletions(-) create mode 100644 akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala create mode 100755 scripts/fix-protobuf.sh diff --git a/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java index 3d6c145097..4ae1aae07a 100644 --- a/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java +++ b/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java @@ -176,6 +176,11 @@ public final class TestConductorProtocol { // optional string done = 4; boolean hasDone(); String getDone(); + + // optional .AddressRequest addr = 5; + boolean hasAddr(); + akka.remote.testconductor.TestConductorProtocol.AddressRequest getAddr(); + akka.remote.testconductor.TestConductorProtocol.AddressRequestOrBuilder getAddrOrBuilder(); } public static final class Wrapper extends com.google.protobuf.GeneratedMessage @@ -277,11 +282,25 @@ public final class TestConductorProtocol { } } + // optional .AddressRequest addr = 5; + public static final int ADDR_FIELD_NUMBER = 5; + private akka.remote.testconductor.TestConductorProtocol.AddressRequest addr_; + public boolean hasAddr() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public akka.remote.testconductor.TestConductorProtocol.AddressRequest getAddr() { + return addr_; + } + public akka.remote.testconductor.TestConductorProtocol.AddressRequestOrBuilder getAddrOrBuilder() { + return addr_; + } + private void initFields() { hello_ = akka.remote.testconductor.TestConductorProtocol.Hello.getDefaultInstance(); barrier_ = akka.remote.testconductor.TestConductorProtocol.EnterBarrier.getDefaultInstance(); failure_ = akka.remote.testconductor.TestConductorProtocol.InjectFailure.getDefaultInstance(); done_ = ""; + addr_ = akka.remote.testconductor.TestConductorProtocol.AddressRequest.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -306,6 +325,12 @@ public final class TestConductorProtocol { return false; } } + if (hasAddr()) { + if (!getAddr().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -325,6 +350,9 @@ public final class TestConductorProtocol { if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBytes(4, getDoneBytes()); } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeMessage(5, addr_); + } getUnknownFields().writeTo(output); } @@ -350,6 +378,10 @@ public final class TestConductorProtocol { size += com.google.protobuf.CodedOutputStream .computeBytesSize(4, getDoneBytes()); } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, addr_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -460,7 +492,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -469,6 +501,7 @@ public final class TestConductorProtocol { getHelloFieldBuilder(); getBarrierFieldBuilder(); getFailureFieldBuilder(); + getAddrFieldBuilder(); } } private static Builder create() { @@ -497,6 +530,12 @@ public final class TestConductorProtocol { bitField0_ = (bitField0_ & ~0x00000004); done_ = ""; bitField0_ = (bitField0_ & ~0x00000008); + if (addrBuilder_ == null) { + addr_ = akka.remote.testconductor.TestConductorProtocol.AddressRequest.getDefaultInstance(); + } else { + addrBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); return this; } @@ -563,6 +602,14 @@ public final class TestConductorProtocol { to_bitField0_ |= 0x00000008; } result.done_ = done_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + if (addrBuilder_ == null) { + result.addr_ = addr_; + } else { + result.addr_ = addrBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -591,6 +638,9 @@ public final class TestConductorProtocol { if (other.hasDone()) { setDone(other.getDone()); } + if (other.hasAddr()) { + mergeAddr(other.getAddr()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -614,6 +664,12 @@ public final class TestConductorProtocol { return false; } } + if (hasAddr()) { + if (!getAddr().isInitialized()) { + + return false; + } + } return true; } @@ -672,6 +728,15 @@ public final class TestConductorProtocol { done_ = input.readBytes(); break; } + case 42: { + akka.remote.testconductor.TestConductorProtocol.AddressRequest.Builder subBuilder = akka.remote.testconductor.TestConductorProtocol.AddressRequest.newBuilder(); + if (hasAddr()) { + subBuilder.mergeFrom(getAddr()); + } + input.readMessage(subBuilder, extensionRegistry); + setAddr(subBuilder.buildPartial()); + break; + } } } } @@ -984,6 +1049,96 @@ public final class TestConductorProtocol { onChanged(); } + // optional .AddressRequest addr = 5; + private akka.remote.testconductor.TestConductorProtocol.AddressRequest addr_ = akka.remote.testconductor.TestConductorProtocol.AddressRequest.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.AddressRequest, akka.remote.testconductor.TestConductorProtocol.AddressRequest.Builder, akka.remote.testconductor.TestConductorProtocol.AddressRequestOrBuilder> addrBuilder_; + public boolean hasAddr() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public akka.remote.testconductor.TestConductorProtocol.AddressRequest getAddr() { + if (addrBuilder_ == null) { + return addr_; + } else { + return addrBuilder_.getMessage(); + } + } + public Builder setAddr(akka.remote.testconductor.TestConductorProtocol.AddressRequest value) { + if (addrBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + addr_ = value; + onChanged(); + } else { + addrBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + return this; + } + public Builder setAddr( + akka.remote.testconductor.TestConductorProtocol.AddressRequest.Builder builderForValue) { + if (addrBuilder_ == null) { + addr_ = builderForValue.build(); + onChanged(); + } else { + addrBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + return this; + } + public Builder mergeAddr(akka.remote.testconductor.TestConductorProtocol.AddressRequest value) { + if (addrBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010) && + addr_ != akka.remote.testconductor.TestConductorProtocol.AddressRequest.getDefaultInstance()) { + addr_ = + akka.remote.testconductor.TestConductorProtocol.AddressRequest.newBuilder(addr_).mergeFrom(value).buildPartial(); + } else { + addr_ = value; + } + onChanged(); + } else { + addrBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000010; + return this; + } + public Builder clearAddr() { + if (addrBuilder_ == null) { + addr_ = akka.remote.testconductor.TestConductorProtocol.AddressRequest.getDefaultInstance(); + onChanged(); + } else { + addrBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + public akka.remote.testconductor.TestConductorProtocol.AddressRequest.Builder getAddrBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getAddrFieldBuilder().getBuilder(); + } + public akka.remote.testconductor.TestConductorProtocol.AddressRequestOrBuilder getAddrOrBuilder() { + if (addrBuilder_ != null) { + return addrBuilder_.getMessageOrBuilder(); + } else { + return addr_; + } + } + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.AddressRequest, akka.remote.testconductor.TestConductorProtocol.AddressRequest.Builder, akka.remote.testconductor.TestConductorProtocol.AddressRequestOrBuilder> + getAddrFieldBuilder() { + if (addrBuilder_ == null) { + addrBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.AddressRequest, akka.remote.testconductor.TestConductorProtocol.AddressRequest.Builder, akka.remote.testconductor.TestConductorProtocol.AddressRequestOrBuilder>( + addr_, + getParentForChildren(), + isClean()); + addr_ = null; + } + return addrBuilder_; + } + // @@protoc_insertion_point(builder_scope:Wrapper) } @@ -1242,7 +1397,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1544,9 +1699,9 @@ public final class TestConductorProtocol { boolean hasName(); String getName(); - // optional bool failed = 2; - boolean hasFailed(); - boolean getFailed(); + // optional bool status = 2; + boolean hasStatus(); + boolean getStatus(); } public static final class EnterBarrier extends com.google.protobuf.GeneratedMessage @@ -1609,19 +1764,19 @@ public final class TestConductorProtocol { } } - // optional bool failed = 2; - public static final int FAILED_FIELD_NUMBER = 2; - private boolean failed_; - public boolean hasFailed() { + // optional bool status = 2; + public static final int STATUS_FIELD_NUMBER = 2; + private boolean status_; + public boolean hasStatus() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public boolean getFailed() { - return failed_; + public boolean getStatus() { + return status_; } private void initFields() { name_ = ""; - failed_ = false; + status_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -1643,7 +1798,7 @@ public final class TestConductorProtocol { output.writeBytes(1, getNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(2, failed_); + output.writeBool(2, status_); } getUnknownFields().writeTo(output); } @@ -1660,7 +1815,7 @@ public final class TestConductorProtocol { } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(2, failed_); + .computeBoolSize(2, status_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -1772,7 +1927,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1788,7 +1943,7 @@ public final class TestConductorProtocol { super.clear(); name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - failed_ = false; + status_ = false; bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -1835,7 +1990,7 @@ public final class TestConductorProtocol { if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.failed_ = failed_; + result.status_ = status_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -1855,8 +2010,8 @@ public final class TestConductorProtocol { if (other.hasName()) { setName(other.getName()); } - if (other.hasFailed()) { - setFailed(other.getFailed()); + if (other.hasStatus()) { + setStatus(other.getStatus()); } this.mergeUnknownFields(other.getUnknownFields()); return this; @@ -1900,7 +2055,7 @@ public final class TestConductorProtocol { } case 16: { bitField0_ |= 0x00000002; - failed_ = input.readBool(); + status_ = input.readBool(); break; } } @@ -1945,23 +2100,23 @@ public final class TestConductorProtocol { onChanged(); } - // optional bool failed = 2; - private boolean failed_ ; - public boolean hasFailed() { + // optional bool status = 2; + private boolean status_ ; + public boolean hasStatus() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public boolean getFailed() { - return failed_; + public boolean getStatus() { + return status_; } - public Builder setFailed(boolean value) { + public Builder setStatus(boolean value) { bitField0_ |= 0x00000002; - failed_ = value; + status_ = value; onChanged(); return this; } - public Builder clearFailed() { + public Builder clearStatus() { bitField0_ = (bitField0_ & ~0x00000002); - failed_ = false; + status_ = false; onChanged(); return this; } @@ -1977,6 +2132,544 @@ public final class TestConductorProtocol { // @@protoc_insertion_point(class_scope:EnterBarrier) } + public interface AddressRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string node = 1; + boolean hasNode(); + String getNode(); + + // optional .Address addr = 2; + boolean hasAddr(); + akka.remote.testconductor.TestConductorProtocol.Address getAddr(); + akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddrOrBuilder(); + } + public static final class AddressRequest extends + com.google.protobuf.GeneratedMessage + implements AddressRequestOrBuilder { + // Use AddressRequest.newBuilder() to construct. + private AddressRequest(Builder builder) { + super(builder); + } + private AddressRequest(boolean noInit) {} + + private static final AddressRequest defaultInstance; + public static AddressRequest getDefaultInstance() { + return defaultInstance; + } + + public AddressRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_AddressRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_AddressRequest_fieldAccessorTable; + } + + private int bitField0_; + // required string node = 1; + public static final int NODE_FIELD_NUMBER = 1; + private java.lang.Object node_; + public boolean hasNode() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getNode() { + java.lang.Object ref = node_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + node_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getNodeBytes() { + java.lang.Object ref = node_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + node_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .Address addr = 2; + public static final int ADDR_FIELD_NUMBER = 2; + private akka.remote.testconductor.TestConductorProtocol.Address addr_; + public boolean hasAddr() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public akka.remote.testconductor.TestConductorProtocol.Address getAddr() { + return addr_; + } + public akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddrOrBuilder() { + return addr_; + } + + private void initFields() { + node_ = ""; + addr_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasNode()) { + memoizedIsInitialized = 0; + return false; + } + if (hasAddr()) { + if (!getAddr().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNodeBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, addr_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNodeBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, addr_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.remote.testconductor.TestConductorProtocol.AddressRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.remote.testconductor.TestConductorProtocol.AddressRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.remote.testconductor.TestConductorProtocol.AddressRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_AddressRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.remote.testconductor.TestConductorProtocol.internal_static_AddressRequest_fieldAccessorTable; + } + + // Construct using akka.remote.testconductor.TestConductorProtocol.AddressRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getAddrFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + node_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (addrBuilder_ == null) { + addr_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + } else { + addrBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.remote.testconductor.TestConductorProtocol.AddressRequest.getDescriptor(); + } + + public akka.remote.testconductor.TestConductorProtocol.AddressRequest getDefaultInstanceForType() { + return akka.remote.testconductor.TestConductorProtocol.AddressRequest.getDefaultInstance(); + } + + public akka.remote.testconductor.TestConductorProtocol.AddressRequest build() { + akka.remote.testconductor.TestConductorProtocol.AddressRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private akka.remote.testconductor.TestConductorProtocol.AddressRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + akka.remote.testconductor.TestConductorProtocol.AddressRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public akka.remote.testconductor.TestConductorProtocol.AddressRequest buildPartial() { + akka.remote.testconductor.TestConductorProtocol.AddressRequest result = new akka.remote.testconductor.TestConductorProtocol.AddressRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.node_ = node_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (addrBuilder_ == null) { + result.addr_ = addr_; + } else { + result.addr_ = addrBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.remote.testconductor.TestConductorProtocol.AddressRequest) { + return mergeFrom((akka.remote.testconductor.TestConductorProtocol.AddressRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.remote.testconductor.TestConductorProtocol.AddressRequest other) { + if (other == akka.remote.testconductor.TestConductorProtocol.AddressRequest.getDefaultInstance()) return this; + if (other.hasNode()) { + setNode(other.getNode()); + } + if (other.hasAddr()) { + mergeAddr(other.getAddr()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasNode()) { + + return false; + } + if (hasAddr()) { + if (!getAddr().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + node_ = input.readBytes(); + break; + } + case 18: { + akka.remote.testconductor.TestConductorProtocol.Address.Builder subBuilder = akka.remote.testconductor.TestConductorProtocol.Address.newBuilder(); + if (hasAddr()) { + subBuilder.mergeFrom(getAddr()); + } + input.readMessage(subBuilder, extensionRegistry); + setAddr(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required string node = 1; + private java.lang.Object node_ = ""; + public boolean hasNode() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getNode() { + java.lang.Object ref = node_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + node_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setNode(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + node_ = value; + onChanged(); + return this; + } + public Builder clearNode() { + bitField0_ = (bitField0_ & ~0x00000001); + node_ = getDefaultInstance().getNode(); + onChanged(); + return this; + } + void setNode(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + node_ = value; + onChanged(); + } + + // optional .Address addr = 2; + private akka.remote.testconductor.TestConductorProtocol.Address addr_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder> addrBuilder_; + public boolean hasAddr() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public akka.remote.testconductor.TestConductorProtocol.Address getAddr() { + if (addrBuilder_ == null) { + return addr_; + } else { + return addrBuilder_.getMessage(); + } + } + public Builder setAddr(akka.remote.testconductor.TestConductorProtocol.Address value) { + if (addrBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + addr_ = value; + onChanged(); + } else { + addrBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder setAddr( + akka.remote.testconductor.TestConductorProtocol.Address.Builder builderForValue) { + if (addrBuilder_ == null) { + addr_ = builderForValue.build(); + onChanged(); + } else { + addrBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder mergeAddr(akka.remote.testconductor.TestConductorProtocol.Address value) { + if (addrBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + addr_ != akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance()) { + addr_ = + akka.remote.testconductor.TestConductorProtocol.Address.newBuilder(addr_).mergeFrom(value).buildPartial(); + } else { + addr_ = value; + } + onChanged(); + } else { + addrBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder clearAddr() { + if (addrBuilder_ == null) { + addr_ = akka.remote.testconductor.TestConductorProtocol.Address.getDefaultInstance(); + onChanged(); + } else { + addrBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + public akka.remote.testconductor.TestConductorProtocol.Address.Builder getAddrBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getAddrFieldBuilder().getBuilder(); + } + public akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder getAddrOrBuilder() { + if (addrBuilder_ != null) { + return addrBuilder_.getMessageOrBuilder(); + } else { + return addr_; + } + } + private com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder> + getAddrFieldBuilder() { + if (addrBuilder_ == null) { + addrBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.remote.testconductor.TestConductorProtocol.Address, akka.remote.testconductor.TestConductorProtocol.Address.Builder, akka.remote.testconductor.TestConductorProtocol.AddressOrBuilder>( + addr_, + getParentForChildren(), + isClean()); + addr_ = null; + } + return addrBuilder_; + } + + // @@protoc_insertion_point(builder_scope:AddressRequest) + } + + static { + defaultInstance = new AddressRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:AddressRequest) + } + public interface AddressOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -2312,7 +3005,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -2918,7 +3611,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3334,6 +4027,11 @@ public final class TestConductorProtocol { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_EnterBarrier_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_AddressRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_AddressRequest_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_Address_descriptor; private static @@ -3353,21 +4051,24 @@ public final class TestConductorProtocol { descriptor; static { java.lang.String[] descriptorData = { - "\n\033TestConductorProtocol.proto\"o\n\007Wrapper" + - "\022\025\n\005hello\030\001 \001(\0132\006.Hello\022\036\n\007barrier\030\002 \001(\013" + - "2\r.EnterBarrier\022\037\n\007failure\030\003 \001(\0132\016.Injec" + - "tFailure\022\014\n\004done\030\004 \001(\t\"0\n\005Hello\022\014\n\004name\030" + - "\001 \002(\t\022\031\n\007address\030\002 \002(\0132\010.Address\",\n\014Ente" + - "rBarrier\022\014\n\004name\030\001 \002(\t\022\016\n\006failed\030\002 \001(\010\"G" + - "\n\007Address\022\020\n\010protocol\030\001 \002(\t\022\016\n\006system\030\002 " + - "\002(\t\022\014\n\004host\030\003 \002(\t\022\014\n\004port\030\004 \002(\005\"\212\001\n\rInje" + - "ctFailure\022\032\n\007failure\030\001 \002(\0162\t.FailType\022\035\n" + - "\tdirection\030\002 \001(\0162\n.Direction\022\031\n\007address\030", - "\003 \001(\0132\010.Address\022\020\n\010rateMBit\030\006 \001(\002\022\021\n\texi" + - "tValue\030\007 \001(\005*A\n\010FailType\022\014\n\010Throttle\020\001\022\016" + - "\n\nDisconnect\020\002\022\t\n\005Abort\020\003\022\014\n\010Shutdown\020\004*" + - ",\n\tDirection\022\010\n\004Send\020\001\022\013\n\007Receive\020\002\022\010\n\004B" + - "oth\020\003B\035\n\031akka.remote.testconductorH\001" + "\n\033TestConductorProtocol.proto\"\216\001\n\007Wrappe" + + "r\022\025\n\005hello\030\001 \001(\0132\006.Hello\022\036\n\007barrier\030\002 \001(" + + "\0132\r.EnterBarrier\022\037\n\007failure\030\003 \001(\0132\016.Inje" + + "ctFailure\022\014\n\004done\030\004 \001(\t\022\035\n\004addr\030\005 \001(\0132\017." + + "AddressRequest\"0\n\005Hello\022\014\n\004name\030\001 \002(\t\022\031\n" + + "\007address\030\002 \002(\0132\010.Address\",\n\014EnterBarrier" + + "\022\014\n\004name\030\001 \002(\t\022\016\n\006status\030\002 \001(\010\"6\n\016Addres" + + "sRequest\022\014\n\004node\030\001 \002(\t\022\026\n\004addr\030\002 \001(\0132\010.A" + + "ddress\"G\n\007Address\022\020\n\010protocol\030\001 \002(\t\022\016\n\006s" + + "ystem\030\002 \002(\t\022\014\n\004host\030\003 \002(\t\022\014\n\004port\030\004 \002(\005\"", + "\212\001\n\rInjectFailure\022\032\n\007failure\030\001 \002(\0162\t.Fai" + + "lType\022\035\n\tdirection\030\002 \001(\0162\n.Direction\022\031\n\007" + + "address\030\003 \001(\0132\010.Address\022\020\n\010rateMBit\030\006 \001(" + + "\002\022\021\n\texitValue\030\007 \001(\005*A\n\010FailType\022\014\n\010Thro" + + "ttle\020\001\022\016\n\nDisconnect\020\002\022\t\n\005Abort\020\003\022\014\n\010Shu" + + "tdown\020\004*,\n\tDirection\022\010\n\004Send\020\001\022\013\n\007Receiv" + + "e\020\002\022\010\n\004Both\020\003B\035\n\031akka.remote.testconduct" + + "orH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -3379,7 +4080,7 @@ public final class TestConductorProtocol { internal_static_Wrapper_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Wrapper_descriptor, - new java.lang.String[] { "Hello", "Barrier", "Failure", "Done", }, + new java.lang.String[] { "Hello", "Barrier", "Failure", "Done", "Addr", }, akka.remote.testconductor.TestConductorProtocol.Wrapper.class, akka.remote.testconductor.TestConductorProtocol.Wrapper.Builder.class); internal_static_Hello_descriptor = @@ -3395,11 +4096,19 @@ public final class TestConductorProtocol { internal_static_EnterBarrier_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EnterBarrier_descriptor, - new java.lang.String[] { "Name", "Failed", }, + new java.lang.String[] { "Name", "Status", }, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.class, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder.class); - internal_static_Address_descriptor = + internal_static_AddressRequest_descriptor = getDescriptor().getMessageTypes().get(3); + internal_static_AddressRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_AddressRequest_descriptor, + new java.lang.String[] { "Node", "Addr", }, + akka.remote.testconductor.TestConductorProtocol.AddressRequest.class, + akka.remote.testconductor.TestConductorProtocol.AddressRequest.Builder.class); + internal_static_Address_descriptor = + getDescriptor().getMessageTypes().get(4); internal_static_Address_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Address_descriptor, @@ -3407,7 +4116,7 @@ public final class TestConductorProtocol { akka.remote.testconductor.TestConductorProtocol.Address.class, akka.remote.testconductor.TestConductorProtocol.Address.Builder.class); internal_static_InjectFailure_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(5); internal_static_InjectFailure_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_InjectFailure_descriptor, diff --git a/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto b/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto index 007965b2e8..648234614e 100644 --- a/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto +++ b/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto @@ -16,6 +16,7 @@ message Wrapper { optional EnterBarrier barrier = 2; optional InjectFailure failure = 3; optional string done = 4; + optional AddressRequest addr = 5; } message Hello { @@ -25,7 +26,12 @@ message Hello { message EnterBarrier { required string name = 1; - optional bool failed = 2; + optional bool status = 2; +} + +message AddressRequest { + required string node = 1; + optional Address addr = 2; } message Address { diff --git a/akka-remote-tests/src/main/resources/reference.conf b/akka-remote-tests/src/main/resources/reference.conf index f0d8a9d6ae..40c16c4ccd 100644 --- a/akka-remote-tests/src/main/resources/reference.conf +++ b/akka-remote-tests/src/main/resources/reference.conf @@ -20,15 +20,14 @@ akka { # than HashedWheelTimer resolution (would not make sense) packet-split-threshold = 100ms - # Default port to start the conductor on; 0 means - port = 0 + # amount of time for the ClientFSM to wait for the connection to the conductor + # to be successful + connect-timeout = 20s - # Hostname of the TestConductor server, used by the server to bind to the IP - # and by the client to connect to it. - host = localhost + # Number of connect attempts to be made to the conductor controller + client-reconnects = 10 - # Name of the TestConductor client (for identification on the server e.g. for - # failure injection) - name = "noname" + # minimum time interval which is to be inserted between reconnect attempts + reconnect-backoff = 1s } } \ No newline at end of file diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index 09a6faeeb0..d4fa3152e6 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -71,11 +71,11 @@ trait Conductor { this: TestConductorExt ⇒ * @param participants gives the number of participants which shall connect * before any of their startClient() operations complete. */ - def startController(participants: Int): Future[Int] = { + def startController(participants: Int, name: String, controllerPort: InetSocketAddress): Future[InetSocketAddress] = { if (_controller ne null) throw new RuntimeException("TestConductorServer was already started") - _controller = system.actorOf(Props(new Controller(participants)), "controller") + _controller = system.actorOf(Props(new Controller(participants, controllerPort)), "controller") import Settings.BarrierTimeout - controller ? GetPort flatMap { case port: Int ⇒ startClient(port) map (_ ⇒ port) } + controller ? GetSockAddr flatMap { case sockAddr: InetSocketAddress ⇒ startClient(name, sockAddr) map (_ ⇒ sockAddr) } } /** @@ -83,9 +83,9 @@ trait Conductor { this: TestConductorExt ⇒ * will deviate from the configuration in `akka.testconductor.port` in case * that was given as zero. */ - def port: Future[Int] = { + def sockAddr: Future[InetSocketAddress] = { import Settings.QueryTimeout - controller ? GetPort mapTo + controller ? GetSockAddr mapTo } /** @@ -280,7 +280,7 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi log.warning("client {} sent no Hello in first message (instead {}), disconnecting", getAddrString(channel), x) channel.close() stop() - case Event(Send(msg), _) ⇒ + case Event(ToClient(msg), _) ⇒ log.warning("cannot send {} in state Initial", msg) stay case Event(StateTimeout, _) ⇒ @@ -290,22 +290,22 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi } when(Ready) { - case Event(msg: EnterBarrier, _) ⇒ - controller ! msg - stay case Event(d: Done, Some(s)) ⇒ s ! d stay using None + case Event(op: ServerOp, _) ⇒ + controller ! op + stay case Event(msg: NetworkOp, _) ⇒ log.warning("client {} sent unsupported message {}", getAddrString(channel), msg) stop() - case Event(Send(msg @ (_: EnterBarrier | _: Done)), _) ⇒ + case Event(ToClient(msg: UnconfirmedClientOp), _) ⇒ channel.write(msg) stay - case Event(Send(msg), None) ⇒ + case Event(ToClient(msg), None) ⇒ channel.write(msg) stay using Some(sender) - case Event(Send(msg), _) ⇒ + case Event(ToClient(msg), _) ⇒ log.warning("cannot send {} while waiting for previous ACK", msg) stay } @@ -320,7 +320,7 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi object Controller { case class ClientDisconnected(name: String) case object GetNodes - case object GetPort + case object GetSockAddr case class NodeInfo(name: String, addr: Address, fsm: ActorRef) } @@ -330,12 +330,12 @@ object Controller { * [[akka.remote.testconductor.BarrierCoordinator]], its child) and allowing * network and other failures to be injected at the test nodes. */ -class Controller(private var initialParticipants: Int) extends Actor { +class Controller(private var initialParticipants: Int, controllerPort: InetSocketAddress) extends Actor { import Controller._ import BarrierCoordinator._ val settings = TestConductor().Settings - val connection = RemoteConnection(Server, settings.host, settings.port, + val connection = RemoteConnection(Server, controllerPort, new ConductorHandler(context.system, self, Logging(context.system, "ConductorHandler"))) /* @@ -348,61 +348,73 @@ class Controller(private var initialParticipants: Int) extends Actor { override def supervisorStrategy = OneForOneStrategy() { case BarrierTimeout(data) ⇒ SupervisorStrategy.Resume case BarrierEmpty(data, msg) ⇒ SupervisorStrategy.Resume - case WrongBarrier(name, client, data) ⇒ client ! Send(BarrierFailed(name)); failBarrier(data) + case WrongBarrier(name, client, data) ⇒ client ! ToClient(BarrierResult(name, false)); failBarrier(data) case ClientLost(data, node) ⇒ failBarrier(data) case DuplicateNode(data, node) ⇒ failBarrier(data) } def failBarrier(data: Data): SupervisorStrategy.Directive = { - for (c ← data.arrived) c ! Send(BarrierFailed(data.barrier)) + for (c ← data.arrived) c ! ToClient(BarrierResult(data.barrier, false)) SupervisorStrategy.Restart } val barrier = context.actorOf(Props[BarrierCoordinator], "barriers") var nodes = Map[String, NodeInfo]() + // map keeping unanswered queries for node addresses (enqueued upon GetAddress, serviced upon NodeInfo) + var addrInterest = Map[String, Set[ActorRef]]() + override def receive = LoggingReceive { case c @ NodeInfo(name, addr, fsm) ⇒ barrier forward c if (nodes contains name) { if (initialParticipants > 0) { - for (NodeInfo(_, _, client) ← nodes.values) client ! Send(BarrierFailed("initial startup")) + for (NodeInfo(_, _, client) ← nodes.values) client ! ToClient(BarrierResult("initial startup", false)) initialParticipants = 0 } - fsm ! Send(BarrierFailed("initial startup")) + fsm ! ToClient(BarrierResult("initial startup", false)) } else { nodes += name -> c - if (initialParticipants <= 0) fsm ! Send(Done) + if (initialParticipants <= 0) fsm ! ToClient(Done) else if (nodes.size == initialParticipants) { - for (NodeInfo(_, _, client) ← nodes.values) client ! Send(Done) + for (NodeInfo(_, _, client) ← nodes.values) client ! ToClient(Done) initialParticipants = 0 } + if (addrInterest contains name) { + addrInterest(name) foreach (_ ! ToClient(AddressReply(name, addr))) + addrInterest -= name + } } case c @ ClientDisconnected(name) ⇒ nodes -= name barrier forward c - case e @ EnterBarrier(name) ⇒ - barrier forward e - case Throttle(node, target, direction, rateMBit) ⇒ - val t = nodes(target) - nodes(node).fsm forward Send(ThrottleMsg(t.addr, direction, rateMBit)) - case Disconnect(node, target, abort) ⇒ - val t = nodes(target) - nodes(node).fsm forward Send(DisconnectMsg(t.addr, abort)) - case Terminate(node, exitValueOrKill) ⇒ - if (exitValueOrKill < 0) { - // TODO: kill via SBT - } else { - nodes(node).fsm forward Send(TerminateMsg(exitValueOrKill)) + case op: ServerOp ⇒ + op match { + case _: EnterBarrier ⇒ barrier forward op + case GetAddress(node) ⇒ + if (nodes contains node) sender ! ToClient(AddressReply(node, nodes(node).addr)) + else addrInterest += node -> ((addrInterest get node getOrElse Set()) + sender) } - case Remove(node) ⇒ - nodes -= node - barrier ! BarrierCoordinator.RemoveClient(node) - case GetNodes ⇒ sender ! nodes.keys - case GetPort ⇒ - sender ! (connection.getLocalAddress match { - case inet: InetSocketAddress ⇒ inet.getPort - }) + case op: CommandOp ⇒ + op match { + case Throttle(node, target, direction, rateMBit) ⇒ + val t = nodes(target) + nodes(node).fsm forward ToClient(ThrottleMsg(t.addr, direction, rateMBit)) + case Disconnect(node, target, abort) ⇒ + val t = nodes(target) + nodes(node).fsm forward ToClient(DisconnectMsg(t.addr, abort)) + case Terminate(node, exitValueOrKill) ⇒ + if (exitValueOrKill < 0) { + // TODO: kill via SBT + } else { + nodes(node).fsm forward ToClient(TerminateMsg(exitValueOrKill)) + } + case Remove(node) ⇒ + nodes -= node + barrier ! BarrierCoordinator.RemoveClient(node) + } + case GetNodes ⇒ sender ! nodes.keys + case GetSockAddr ⇒ sender ! connection.getLocalAddress } } @@ -463,13 +475,13 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, } when(Idle) { - case Event(e @ EnterBarrier(name), d @ Data(clients, _, _)) ⇒ + case Event(EnterBarrier(name), d @ Data(clients, _, _)) ⇒ if (failed) - stay replying Send(BarrierFailed(name)) + stay replying ToClient(BarrierResult(name, false)) else if (clients.map(_.fsm) == Set(sender)) - stay replying Send(e) + stay replying ToClient(BarrierResult(name, true)) else if (clients.find(_.fsm == sender).isEmpty) - stay replying Send(BarrierFailed(name)) + stay replying ToClient(BarrierResult(name, false)) else goto(Waiting) using d.copy(barrier = name, arrived = sender :: Nil) case Event(RemoveClient(name), d @ Data(clients, _, _)) ⇒ @@ -483,7 +495,7 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, } when(Waiting) { - case Event(e @ EnterBarrier(name), d @ Data(clients, barrier, arrived)) ⇒ + case Event(EnterBarrier(name), d @ Data(clients, barrier, arrived)) ⇒ if (name != barrier || clients.find(_.fsm == sender).isEmpty) throw WrongBarrier(name, sender, d) val together = sender :: arrived handleBarrier(d.copy(arrived = together)) @@ -504,8 +516,7 @@ class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, if (data.arrived.isEmpty) { goto(Idle) using data.copy(barrier = "") } else if ((data.clients.map(_.fsm) -- data.arrived).isEmpty) { - val e = EnterBarrier(data.barrier) - data.arrived foreach (_ ! Send(e)) + data.arrived foreach (_ ! ToClient(BarrierResult(data.barrier, true))) goto(Idle) using data.copy(barrier = "", arrived = Nil) } else { stay using data diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala index cadd69f786..0273055469 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -11,27 +11,42 @@ import com.google.protobuf.Message import akka.actor.Address import org.jboss.netty.handler.codec.oneone.OneToOneDecoder -case class Send(msg: NetworkOp) +case class ToClient(msg: ClientOp with NetworkOp) +case class ToServer(msg: ServerOp with NetworkOp) -sealed trait ClientOp // messages sent to Player FSM -sealed trait ServerOp // messages sent to Conductor FSM +sealed trait ClientOp // messages sent to from Conductor to Player +sealed trait ServerOp // messages sent to from Player to Conductor +sealed trait CommandOp // messages sent from TestConductorExt to Conductor sealed trait NetworkOp // messages sent over the wire +sealed trait UnconfirmedClientOp extends ClientOp // unconfirmed messages going to the Player +sealed trait ConfirmedClientOp extends ClientOp +/** + * First message of connection sets names straight. + */ case class Hello(name: String, addr: Address) extends NetworkOp -case class EnterBarrier(name: String) extends ClientOp with ServerOp with NetworkOp -case class BarrierFailed(name: String) extends NetworkOp -case class Throttle(node: String, target: String, direction: Direction, rateMBit: Float) extends ServerOp -case class ThrottleMsg(target: Address, direction: Direction, rateMBit: Float) extends NetworkOp -case class Disconnect(node: String, target: String, abort: Boolean) extends ServerOp -case class DisconnectMsg(target: Address, abort: Boolean) extends NetworkOp -case class Terminate(node: String, exitValueOrKill: Int) extends ServerOp -case class TerminateMsg(exitValue: Int) extends NetworkOp -abstract class Done extends NetworkOp + +case class EnterBarrier(name: String) extends ServerOp with NetworkOp +case class BarrierResult(name: String, success: Boolean) extends UnconfirmedClientOp with NetworkOp + +case class Throttle(node: String, target: String, direction: Direction, rateMBit: Float) extends CommandOp +case class ThrottleMsg(target: Address, direction: Direction, rateMBit: Float) extends ConfirmedClientOp with NetworkOp + +case class Disconnect(node: String, target: String, abort: Boolean) extends CommandOp +case class DisconnectMsg(target: Address, abort: Boolean) extends ConfirmedClientOp with NetworkOp + +case class Terminate(node: String, exitValueOrKill: Int) extends CommandOp +case class TerminateMsg(exitValue: Int) extends ConfirmedClientOp with NetworkOp + +case class GetAddress(node: String) extends ServerOp with NetworkOp +case class AddressReply(node: String, addr: Address) extends UnconfirmedClientOp with NetworkOp + +abstract class Done extends ServerOp with UnconfirmedClientOp with NetworkOp case object Done extends Done { def getInstance: Done = this } -case class Remove(node: String) extends ServerOp +case class Remove(node: String) extends CommandOp class MsgEncoder extends OneToOneEncoder { def encode(ctx: ChannelHandlerContext, ch: Channel, msg: AnyRef): AnyRef = msg match { @@ -42,8 +57,8 @@ class MsgEncoder extends OneToOneEncoder { w.setHello(TCP.Hello.newBuilder.setName(name).setAddress(addr)) case EnterBarrier(name) ⇒ w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name)) - case BarrierFailed(name) ⇒ - w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name).setFailed(true)) + case BarrierResult(name, success) ⇒ + w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name).setStatus(success)) case ThrottleMsg(target, dir, rate) ⇒ w.setFailure(TCP.InjectFailure.newBuilder.setAddress(target) .setFailure(TCP.FailType.Throttle).setDirection(dir).setRateMBit(rate)) @@ -52,6 +67,10 @@ class MsgEncoder extends OneToOneEncoder { .setFailure(if (abort) TCP.FailType.Abort else TCP.FailType.Disconnect)) case TerminateMsg(exitValue) ⇒ w.setFailure(TCP.InjectFailure.newBuilder.setFailure(TCP.FailType.Shutdown).setExitValue(exitValue)) + case GetAddress(node) ⇒ + w.setAddr(TCP.AddressRequest.newBuilder.setNode(node)) + case AddressReply(node, addr) ⇒ + w.setAddr(TCP.AddressRequest.newBuilder.setNode(node).setAddr(addr)) case _: Done ⇒ w.setDone("") } @@ -68,7 +87,7 @@ class MsgDecoder extends OneToOneDecoder { Hello(h.getName, h.getAddress) } else if (w.hasBarrier) { val barrier = w.getBarrier - if (barrier.hasFailed && barrier.getFailed) BarrierFailed(barrier.getName) + if (barrier.hasStatus) BarrierResult(barrier.getName, barrier.getStatus) else EnterBarrier(w.getBarrier.getName) } else if (w.hasFailure) { val f = w.getFailure @@ -79,6 +98,10 @@ class MsgDecoder extends OneToOneDecoder { case FT.Disconnect ⇒ DisconnectMsg(f.getAddress, false) case FT.Shutdown ⇒ TerminateMsg(f.getExitValue) } + } else if (w.hasAddr) { + val a = w.getAddr + if (a.hasAddr) AddressReply(a.getNode, a.getAddr) + else GetAddress(a.getNode) } else if (w.hasDone) { Done } else { diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala index 5d7826c60c..7f6b576128 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala @@ -38,13 +38,13 @@ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with C object Settings { val config = system.settings.config + val ConnectTimeout = Duration(config.getMilliseconds("akka.testconductor.connect-timeout"), MILLISECONDS) + val ClientReconnects = config.getInt("akka.testconductor.client-reconnects") + val ReconnectBackoff = Duration(config.getMilliseconds("akka.testconductor.reconnect-backoff"), MILLISECONDS) + implicit val BarrierTimeout = Timeout(Duration(config.getMilliseconds("akka.testconductor.barrier-timeout"), MILLISECONDS)) implicit val QueryTimeout = Timeout(Duration(config.getMilliseconds("akka.testconductor.query-timeout"), MILLISECONDS)) val PacketSplitThreshold = Duration(config.getMilliseconds("akka.testconductor.packet-split-threshold"), MILLISECONDS) - - val name = config.getString("akka.testconductor.name") - val host = config.getString("akka.testconductor.host") - val port = config.getInt("akka.testconductor.port") } val transport = system.provider.asInstanceOf[RemoteActorRefProvider].transport diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala index a82a090b23..27a2487364 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala @@ -20,6 +20,13 @@ import akka.event.LoggingAdapter import akka.actor.PoisonPill import akka.event.Logging import akka.dispatch.Future +import java.net.InetSocketAddress +import akka.actor.Address +import org.jboss.netty.channel.ExceptionEvent +import org.jboss.netty.channel.WriteCompletionEvent +import java.net.ConnectException +import akka.util.Deadline +import akka.actor.Scheduler /** * The Player is the client component of the @@ -43,13 +50,13 @@ trait Player { this: TestConductorExt ⇒ * this is a first barrier in itself). The number of expected participants is * set in [[akka.remote.testconductor.Conductor]]`.startController()`. */ - def startClient(port: Int): Future[Done] = { + def startClient(name: String, controllerAddr: InetSocketAddress): Future[Done] = { import ClientFSM._ import akka.actor.FSM._ import Settings.BarrierTimeout if (_client ne null) throw new IllegalStateException("TestConductorClient already started") - _client = system.actorOf(Props(new ClientFSM(port)), "TestConductorClient") + _client = system.actorOf(Props(new ClientFSM(name, controllerAddr)), "TestConductorClient") val a = system.actorOf(Props(new Actor { var waiting: ActorRef = _ def receive = { @@ -73,10 +80,18 @@ trait Player { this: TestConductorExt ⇒ system.log.debug("entering barriers " + name.mkString("(", ", ", ")")) name foreach { b ⇒ import Settings.BarrierTimeout - Await.result(client ? Send(EnterBarrier(b)), Duration.Inf) + Await.result(client ? ToServer(EnterBarrier(b)), Duration.Inf) system.log.debug("passed barrier {}", b) } } + + /** + * Query remote transport address of named node. + */ + def getAddressFor(name: String): Future[Address] = { + import Settings.BarrierTimeout + client ? ToServer(GetAddress(name)) mapTo + } } object ClientFSM { @@ -86,9 +101,10 @@ object ClientFSM { case object Connected extends State case object Failed extends State - case class Data(channel: Channel, barrier: Option[(String, ActorRef)]) + case class Data(channel: Option[Channel], runningOp: Option[(String, ActorRef)]) - class ConnectionFailure(msg: String) extends RuntimeException(msg) with NoStackTrace + case class Connected(channel: Channel) + case class ConnectionFailure(msg: String) extends RuntimeException(msg) with NoStackTrace case object Disconnected } @@ -101,21 +117,22 @@ object ClientFSM { * coordinator and react to the [[akka.remote.testconductor.Conductor]]’s * requests for failure injection. */ -class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { +class ClientFSM(name: String, controllerAddr: InetSocketAddress) extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { import ClientFSM._ val settings = TestConductor().Settings - val handler = new PlayerHandler(self, Logging(context.system, "PlayerHandler")) + val handler = new PlayerHandler(controllerAddr, settings.ClientReconnects, settings.ReconnectBackoff, + self, Logging(context.system, "PlayerHandler"), context.system.scheduler) - startWith(Connecting, Data(RemoteConnection(Client, settings.host, port, handler), None)) + startWith(Connecting, Data(None, None)) - when(Connecting, stateTimeout = 10 seconds) { + when(Connecting, stateTimeout = settings.ConnectTimeout) { case Event(msg: ClientOp, _) ⇒ stay replying Status.Failure(new IllegalStateException("not connected yet")) - case Event(Connected, d @ Data(channel, _)) ⇒ - channel.write(Hello(settings.name, TestConductor().address)) - goto(AwaitDone) + case Event(Connected(channel), _) ⇒ + channel.write(Hello(name, TestConductor().address)) + goto(AwaitDone) using Data(Some(channel), None) case Event(_: ConnectionFailure, _) ⇒ goto(Failed) case Event(StateTimeout, _) ⇒ @@ -130,7 +147,7 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client case Event(msg: NetworkOp, _) ⇒ log.error("received {} instead of Done", msg) goto(Failed) - case Event(msg: ClientOp, _) ⇒ + case Event(msg: ServerOp, _) ⇒ stay replying Status.Failure(new IllegalStateException("not connected yet")) case Event(StateTimeout, _) ⇒ log.error("connect timeout to TestConductor") @@ -141,44 +158,63 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client case Event(Disconnected, _) ⇒ log.info("disconnected from TestConductor") throw new ConnectionFailure("disconnect") - case Event(Send(msg: EnterBarrier), Data(channel, None)) ⇒ + case Event(ToServer(Done), Data(Some(channel), _)) ⇒ + channel.write(Done) + stay + case Event(ToServer(msg), d @ Data(Some(channel), None)) ⇒ channel.write(msg) - stay using Data(channel, Some(msg.name, sender)) - case Event(Send(d: Done), Data(channel, _)) ⇒ - channel.write(d) - stay - case Event(Send(x), _) ⇒ - log.warning("cannot send message {}", x) - stay - case Event(EnterBarrier(b), Data(channel, Some((barrier, sender)))) ⇒ - if (b != barrier) { - sender ! Status.Failure(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) - } else { - sender ! b - } - stay using Data(channel, None) - case Event(BarrierFailed(b), Data(channel, Some((_, sender)))) ⇒ - sender ! Status.Failure(new RuntimeException("barrier failed: " + b)) - stay using Data(channel, None) - case Event(ThrottleMsg(target, dir, rate), _) ⇒ - import settings.QueryTimeout - import context.dispatcher - TestConductor().failureInjectors.get(target.copy(system = "")) match { - case null ⇒ log.warning("cannot throttle unknown address {}", target) - case inj ⇒ - Future.sequence(inj.refs(dir) map (_ ? NetworkFailureInjector.SetRate(rate))) map (_ ⇒ Send(Done)) pipeTo self + val token = msg match { + case EnterBarrier(barrier) ⇒ barrier + case GetAddress(node) ⇒ node } + stay using d.copy(runningOp = Some(token, sender)) + case Event(ToServer(op), Data(channel, Some((token, _)))) ⇒ + log.error("cannot write {} while waiting for {}", op, token) stay - case Event(DisconnectMsg(target, abort), _) ⇒ - import settings.QueryTimeout - TestConductor().failureInjectors.get(target.copy(system = "")) match { - case null ⇒ log.warning("cannot disconnect unknown address {}", target) - case inj ⇒ inj.sender ? NetworkFailureInjector.Disconnect(abort) map (_ ⇒ Send(Done)) pipeTo self + case Event(op: ClientOp, d @ Data(Some(channel), runningOp)) ⇒ + op match { + case BarrierResult(b, success) ⇒ + runningOp match { + case Some((barrier, requester)) ⇒ + if (b != barrier) { + requester ! Status.Failure(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) + } else if (!success) { + requester ! Status.Failure(new RuntimeException("barrier failed: " + b)) + } else { + requester ! b + } + case None ⇒ + log.warning("did not expect {}", op) + } + stay using d.copy(runningOp = None) + case AddressReply(node, addr) ⇒ + runningOp match { + case Some((_, requester)) ⇒ + requester ! addr + case None ⇒ + log.warning("did not expect {}", op) + } + stay using d.copy(runningOp = None) + case ThrottleMsg(target, dir, rate) ⇒ + import settings.QueryTimeout + import context.dispatcher + TestConductor().failureInjectors.get(target.copy(system = "")) match { + case null ⇒ log.warning("cannot throttle unknown address {}", target) + case inj ⇒ + Future.sequence(inj.refs(dir) map (_ ? NetworkFailureInjector.SetRate(rate))) map (_ ⇒ ToServer(Done)) pipeTo self + } + stay + case DisconnectMsg(target, abort) ⇒ + import settings.QueryTimeout + TestConductor().failureInjectors.get(target.copy(system = "")) match { + case null ⇒ log.warning("cannot disconnect unknown address {}", target) + case inj ⇒ inj.sender ? NetworkFailureInjector.Disconnect(abort) map (_ ⇒ ToServer(Done)) pipeTo self + } + stay + case TerminateMsg(exit) ⇒ + System.exit(exit) + stay // needed because Java doesn’t have Nothing } - stay - case Event(TerminateMsg(exit), _) ⇒ - System.exit(exit) - stay // needed because Java doesn’t have Nothing } when(Failed) { @@ -190,7 +226,7 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client } onTermination { - case StopEvent(_, _, Data(channel, _)) ⇒ + case StopEvent(_, _, Data(Some(channel), _)) ⇒ channel.close() } @@ -201,14 +237,46 @@ class ClientFSM(port: Int) extends Actor with LoggingFSM[ClientFSM.State, Client /** * This handler only forwards messages received from the conductor to the [[akka.remote.testconductor.ClientFSM]]. */ -class PlayerHandler(fsm: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { +class PlayerHandler( + server: InetSocketAddress, + private var reconnects: Int, + backoff: Duration, + fsm: ActorRef, + log: LoggingAdapter, + scheduler: Scheduler) + extends SimpleChannelUpstreamHandler { import ClientFSM._ + reconnect() + + var nextAttempt: Deadline = _ + + override def channelOpen(ctx: ChannelHandlerContext, event: ChannelStateEvent) = log.debug("channel {} open", event.getChannel) + override def channelClosed(ctx: ChannelHandlerContext, event: ChannelStateEvent) = log.debug("channel {} closed", event.getChannel) + override def channelBound(ctx: ChannelHandlerContext, event: ChannelStateEvent) = log.debug("channel {} bound", event.getChannel) + override def channelUnbound(ctx: ChannelHandlerContext, event: ChannelStateEvent) = log.debug("channel {} unbound", event.getChannel) + override def writeComplete(ctx: ChannelHandlerContext, event: WriteCompletionEvent) = log.debug("channel {} written {}", event.getChannel, event.getWrittenAmount) + + override def exceptionCaught(ctx: ChannelHandlerContext, event: ExceptionEvent) = { + log.debug("channel {} exception {}", event.getChannel, event.getCause) + event.getCause match { + case c: ConnectException if reconnects > 0 ⇒ + reconnects -= 1 + scheduler.scheduleOnce(nextAttempt.timeLeft)(reconnect()) + case e ⇒ fsm ! ConnectionFailure(e.getMessage) + } + } + + private def reconnect(): Unit = { + nextAttempt = Deadline.now + backoff + RemoteConnection(Client, server, this) + } + override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { - val channel = event.getChannel - log.debug("connected to {}", getAddrString(channel)) - fsm ! Connected + val ch = event.getChannel + log.debug("connected to {}", getAddrString(ch)) + fsm ! Connected(ch) } override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala index b2f4baebbb..5b1c454b0c 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala @@ -27,8 +27,7 @@ case object Client extends Role case object Server extends Role object RemoteConnection { - def apply(role: Role, host: String, port: Int, handler: ChannelUpstreamHandler): Channel = { - val sockaddr = new InetSocketAddress(host, port) + def apply(role: Role, sockaddr: InetSocketAddress, handler: ChannelUpstreamHandler): Channel = { role match { case Client ⇒ val socketfactory = new NioClientSocketChannelFactory(Executors.newCachedThreadPool, Executors.newCachedThreadPool) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 512757c130..39d25981aa 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -10,14 +10,15 @@ import akka.dispatch.Await.Awaitable import akka.util.Duration import akka.util.duration._ import akka.testkit.ImplicitSender +import java.net.InetSocketAddress +import java.net.InetAddress +import akka.remote.testkit.MultiNodeSpec object TestConductorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { override def NrOfNodes = 2 override def commonConfig = ConfigFactory.parseString(""" akka.loglevel = DEBUG - akka.actor.provider = akka.remote.RemoteActorRefProvider akka.remote { - transport = akka.remote.testconductor.TestConductorTransport log-received-messages = on log-sent-messages = on } @@ -25,87 +26,96 @@ object TestConductorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { receive = on fsm = on } - akka.testconductor { - host = localhost - port = 4712 - } """) - def nameConfig(n: Int) = ConfigFactory.parseString("akka.testconductor.name = node" + n).withFallback(nodeConfigs(n)) +} - implicit def awaitHelper[T](w: Awaitable[T]) = new AwaitHelper(w) - class AwaitHelper[T](w: Awaitable[T]) { - def await: T = Await.result(w, Duration.Inf) +object H { + def apply(x: Int) = { + System.setProperty("multinode.hosts", "localhost,localhost") + System.setProperty("multinode.index", x.toString) } } -class TestConductorMultiJvmNode1 extends AkkaRemoteSpec(TestConductorMultiJvmSpec.nameConfig(0)) { +class TestConductorMultiJvmNode1 extends { val dummy = H(0) } with TestConductorSpec +class TestConductorMultiJvmNode2 extends { val dummy = H(1) } with TestConductorSpec - import TestConductorMultiJvmSpec._ +class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec.commonConfig) with ImplicitSender { - val nodes = NrOfNodes + def initialParticipants = 2 + lazy val roles = Seq("master", "slave") - val tc = TestConductor(system) - - val echo = system.actorOf(Props(new Actor { - def receive = { - case x ⇒ testActor ! x; sender ! x - } - }), "echo") - - "running a test with barrier" in { - tc.startController(2).await - tc.enter("begin") + runOn("master") { + system.actorOf(Props(new Actor { + def receive = { + case x ⇒ testActor ! x; sender ! x + } + }), "echo") } - "throttling" in { - expectMsg("start") - tc.throttle("node1", "node0", Direction.Send, 0.01).await - tc.enter("throttled_send") - within(0.6 seconds, 2 seconds) { - receiveN(10) must be(0 to 9) + val echo = system.actorFor(node("master") / "user" / "echo") + + "A TestConductor" must { + + "enter a barrier" in { + testConductor.enter("name") } - tc.enter("throttled_send2") - tc.throttle("node1", "node0", Direction.Send, -1).await - - tc.throttle("node1", "node0", Direction.Receive, 0.01).await - tc.enter("throttled_recv") - receiveN(10, 500 millis) must be(10 to 19) - tc.enter("throttled_recv2") - tc.throttle("node1", "node0", Direction.Receive, -1).await - } -} -class TestConductorMultiJvmNode2 extends AkkaRemoteSpec(TestConductorMultiJvmSpec.nameConfig(1)) with ImplicitSender { + "support throttling of network connections" in { - import TestConductorMultiJvmSpec._ + runOn("slave") { + // start remote network connection so that it can be throttled + echo ! "start" + } - val nodes = NrOfNodes + expectMsg("start") - val tc = TestConductor(system) - - val echo = system.actorFor("akka://" + akkaSpec(0) + "/user/echo") + runOn("master") { + testConductor.throttle("slave", "master", Direction.Send, rateMBit = 0.01).await + } - "running a test with barrier" in { - tc.startClient(4712).await - tc.enter("begin") - } + testConductor.enter("throttled_send") - "throttling" in { - echo ! "start" - expectMsg("start") - tc.enter("throttled_send") - for (i <- 0 to 9) echo ! i - expectMsg(500 millis, 0) - within(0.6 seconds, 2 seconds) { - receiveN(9) must be(1 to 9) + runOn("slave") { + for (i ← 0 to 9) echo ! i + } + + within(0.6 seconds, 2 seconds) { + expectMsg(500 millis, 0) + receiveN(9) must be(1 to 9) + } + + testConductor.enter("throttled_send2") + + runOn("master") { + testConductor.throttle("slave", "master", Direction.Send, -1).await + testConductor.throttle("slave", "master", Direction.Receive, rateMBit = 0.01).await + } + + testConductor.enter("throttled_recv") + + runOn("slave") { + for (i ← 10 to 19) echo ! i + } + + val (min, max) = + ifNode("master") { + (0 seconds, 500 millis) + } { + (0.6 seconds, 2 seconds) + } + + within(min, max) { + expectMsg(500 millis, 10) + receiveN(9) must be(11 to 19) + } + + testConductor.enter("throttled_recv2") + + runOn("master") { + testConductor.throttle("slave", "master", Direction.Receive, -1).await + } } - tc.enter("throttled_send2", "throttled_recv") - for (i <- 10 to 19) echo ! i - expectMsg(500 millis, 10) - within(0.6 seconds, 2 seconds) { - receiveN(9) must be(11 to 19) - } - tc.enter("throttled_recv2") + } } diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala index f0b668d1ed..aa14b93f9d 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala @@ -16,6 +16,8 @@ import akka.testkit.TestProbe import akka.util.duration._ import akka.event.Logging import org.scalatest.BeforeAndAfterEach +import java.net.InetSocketAddress +import java.net.InetAddress object BarrierSpec { case class Failed(ref: ActorRef, thr: Throwable) @@ -68,7 +70,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail entering barrier when nobody registered" in { val b = getBarrier() b ! EnterBarrier("b") - expectMsg(Send(BarrierFailed("b"))) + expectMsg(ToClient(BarrierResult("b", false))) } "enter barrier" in { @@ -80,8 +82,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with noMsg(a, b) within(1 second) { b.send(barrier, EnterBarrier("bar")) - a.expectMsg(Send(EnterBarrier("bar"))) - b.expectMsg(Send(EnterBarrier("bar"))) + a.expectMsg(ToClient(BarrierResult("bar", true))) + b.expectMsg(ToClient(BarrierResult("bar", true))) } } @@ -96,9 +98,9 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with noMsg(a, b, c) within(1 second) { c.send(barrier, EnterBarrier("bar")) - a.expectMsg(Send(EnterBarrier("bar"))) - b.expectMsg(Send(EnterBarrier("bar"))) - c.expectMsg(Send(EnterBarrier("bar"))) + a.expectMsg(ToClient(BarrierResult("bar", true))) + b.expectMsg(ToClient(BarrierResult("bar", true))) + c.expectMsg(ToClient(BarrierResult("bar", true))) } } @@ -115,7 +117,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with noMsg(a, b, c) b.within(1 second) { barrier ! RemoveClient("c") - b.expectMsg(Send(EnterBarrier("bar"))) + b.expectMsg(ToClient(BarrierResult("bar", true))) } barrier ! ClientDisconnected("c") expectNoMsg(1 second) @@ -129,7 +131,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with a.send(barrier, EnterBarrier("bar")) barrier ! RemoveClient("a") b.send(barrier, EnterBarrier("foo")) - b.expectMsg(Send(EnterBarrier("foo"))) + b.expectMsg(ToClient(BarrierResult("foo", true))) } "fail barrier with disconnecing node" in { @@ -184,7 +186,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectMsg(Failed(barrier, BarrierEmpty(Data(Set(), "", Nil), "no client to remove"))) barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) a.send(barrier, EnterBarrier("right")) - a.expectMsg(Send(BarrierFailed("right"))) + a.expectMsg(ToClient(BarrierResult("right", false))) } "fail after barrier timeout" in { @@ -223,7 +225,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "register clients and remove them" in { val b = getController(1) b ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor) - expectMsg(Send(Done)) + expectMsg(ToClient(Done)) b ! Remove("b") b ! Remove("a") EventFilter[BarrierEmpty](occurrences = 1) intercept { @@ -234,7 +236,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "register clients and disconnect them" in { val b = getController(1) b ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor) - expectMsg(Send(Done)) + expectMsg(ToClient(Done)) b ! ClientDisconnected("b") EventFilter[ClientLost](occurrences = 1) intercept { b ! ClientDisconnected("a") @@ -247,7 +249,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail entering barrier when nobody registered" in { val b = getController(0) b ! EnterBarrier("b") - expectMsg(Send(BarrierFailed("b"))) + expectMsg(ToClient(BarrierResult("b", false))) } "enter barrier" in { @@ -255,14 +257,14 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val a, b = TestProbe() barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) - a.expectMsg(Send(Done)) - b.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) noMsg(a, b) within(1 second) { b.send(barrier, EnterBarrier("bar")) - a.expectMsg(Send(EnterBarrier("bar"))) - b.expectMsg(Send(EnterBarrier("bar"))) + a.expectMsg(ToClient(BarrierResult("bar", true))) + b.expectMsg(ToClient(BarrierResult("bar", true))) } } @@ -271,18 +273,18 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val a, b, c = TestProbe() barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) - a.expectMsg(Send(Done)) - b.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref) - c.expectMsg(Send(Done)) + c.expectMsg(ToClient(Done)) b.send(barrier, EnterBarrier("bar")) noMsg(a, b, c) within(1 second) { c.send(barrier, EnterBarrier("bar")) - a.expectMsg(Send(EnterBarrier("bar"))) - b.expectMsg(Send(EnterBarrier("bar"))) - c.expectMsg(Send(EnterBarrier("bar"))) + a.expectMsg(ToClient(BarrierResult("bar", true))) + b.expectMsg(ToClient(BarrierResult("bar", true))) + c.expectMsg(ToClient(BarrierResult("bar", true))) } } @@ -292,9 +294,9 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref) - a.expectMsg(Send(Done)) - b.expectMsg(Send(Done)) - c.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) + c.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) b.send(barrier, EnterBarrier("bar")) barrier ! Remove("a") @@ -302,7 +304,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with noMsg(a, b, c) b.within(1 second) { barrier ! Remove("c") - b.expectMsg(Send(EnterBarrier("bar"))) + b.expectMsg(ToClient(BarrierResult("bar", true))) } barrier ! ClientDisconnected("c") expectNoMsg(1 second) @@ -313,12 +315,12 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val a, b = TestProbe() barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) - a.expectMsg(Send(Done)) - b.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) barrier ! Remove("a") b.send(barrier, EnterBarrier("foo")) - b.expectMsg(Send(EnterBarrier("foo"))) + b.expectMsg(ToClient(BarrierResult("foo", true))) } "fail barrier with disconnecing node" in { @@ -327,15 +329,15 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) barrier ! nodeA barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) - a.expectMsg(Send(Done)) - b.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) barrier ! ClientDisconnected("unknown") noMsg(a) EventFilter[ClientLost](occurrences = 1) intercept { barrier ! ClientDisconnected("b") } - a.expectMsg(Send(BarrierFailed("bar"))) + a.expectMsg(ToClient(BarrierResult("bar", false))) } "fail barrier with disconnecing node who already arrived" in { @@ -346,15 +348,15 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! nodeA barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) barrier ! nodeC - a.expectMsg(Send(Done)) - b.expectMsg(Send(Done)) - c.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) + c.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) b.send(barrier, EnterBarrier("bar")) EventFilter[ClientLost](occurrences = 1) intercept { barrier ! ClientDisconnected("b") } - a.expectMsg(Send(BarrierFailed("bar"))) + a.expectMsg(ToClient(BarrierResult("bar", false))) } "fail when entering wrong barrier" in { @@ -364,14 +366,14 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! nodeA val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) barrier ! nodeB - a.expectMsg(Send(Done)) - b.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) EventFilter[WrongBarrier](occurrences = 1) intercept { b.send(barrier, EnterBarrier("foo")) } - a.expectMsg(Send(BarrierFailed("bar"))) - b.expectMsg(Send(BarrierFailed("foo"))) + a.expectMsg(ToClient(BarrierResult("bar", false))) + b.expectMsg(ToClient(BarrierResult("foo", false))) } "not really fail after barrier timeout" in { @@ -381,15 +383,15 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) barrier ! nodeA barrier ! nodeB - a.expectMsg(Send(Done)) - b.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("right")) EventFilter[BarrierTimeout](occurrences = 1) intercept { Thread.sleep(5000) } b.send(barrier, EnterBarrier("right")) - a.expectMsg(Send(EnterBarrier("right"))) - b.expectMsg(Send(EnterBarrier("right"))) + a.expectMsg(ToClient(BarrierResult("right", true))) + b.expectMsg(ToClient(BarrierResult("right", true))) } "fail if a node registers twice" in { @@ -401,8 +403,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with EventFilter[DuplicateNode](occurrences = 1) intercept { controller ! nodeB } - a.expectMsg(Send(BarrierFailed("initial startup"))) - b.expectMsg(Send(BarrierFailed("initial startup"))) + a.expectMsg(ToClient(BarrierResult("initial startup", false))) + b.expectMsg(ToClient(BarrierResult("initial startup", false))) } "fail subsequent barriers if a node registers twice" in { @@ -411,13 +413,13 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) val nodeB = NodeInfo("a", AddressFromURIString("akka://sys"), b.ref) controller ! nodeA - a.expectMsg(Send(Done)) + a.expectMsg(ToClient(Done)) EventFilter[DuplicateNode](occurrences = 1) intercept { controller ! nodeB - b.expectMsg(Send(BarrierFailed("initial startup"))) + b.expectMsg(ToClient(BarrierResult("initial startup", false))) } a.send(controller, EnterBarrier("x")) - a.expectMsg(Send(BarrierFailed("x"))) + a.expectMsg(ToClient(BarrierResult("x", false))) } "finally have no failure messages left" in { @@ -428,13 +430,13 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with private def getController(participants: Int): ActorRef = { system.actorOf(Props(new Actor { - val controller = context.actorOf(Props(new Controller(participants))) - controller ! GetPort + val controller = context.actorOf(Props(new Controller(participants, new InetSocketAddress(InetAddress.getLocalHost, 0)))) + controller ! GetSockAddr override def supervisorStrategy = OneForOneStrategy() { case x ⇒ testActor ! Failed(controller, x); SupervisorStrategy.Restart } def receive = { - case x: Int ⇒ testActor ! controller + case x: InetSocketAddress ⇒ testActor ! controller } })) expectMsgType[ActorRef] diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala index db0e3cfe69..c4e0ca6cd0 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala @@ -8,6 +8,8 @@ import akka.actor.Props import akka.testkit.ImplicitSender import akka.remote.testconductor.Controller.NodeInfo import akka.actor.AddressFromURIString +import java.net.InetSocketAddress +import java.net.InetAddress object ControllerSpec { val config = """ @@ -24,11 +26,11 @@ class ControllerSpec extends AkkaSpec(ControllerSpec.config) with ImplicitSender "A Controller" must { "publish its nodes" in { - val c = system.actorOf(Props(new Controller(1))) + val c = system.actorOf(Props(new Controller(1, new InetSocketAddress(InetAddress.getLocalHost, 0)))) c ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor) - expectMsg(Send(Done)) + expectMsg(ToClient(Done)) c ! NodeInfo("b", AddressFromURIString("akka://sys"), testActor) - expectMsg(Send(Done)) + expectMsg(ToClient(Done)) c ! Controller.GetNodes expectMsgType[Iterable[String]].toSet must be(Set("a", "b")) } diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala new file mode 100644 index 0000000000..7acde4eac9 --- /dev/null +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -0,0 +1,157 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.remote.testkit + +import akka.testkit.AkkaSpec +import akka.actor.ActorSystem +import akka.remote.testconductor.TestConductor +import java.net.InetAddress +import java.net.InetSocketAddress +import akka.remote.testconductor.TestConductorExt +import com.typesafe.config.Config +import com.typesafe.config.ConfigFactory +import akka.dispatch.Await.Awaitable +import akka.dispatch.Await +import akka.util.Duration +import akka.actor.ActorPath +import akka.actor.RootActorPath + +object MultiNodeSpec { + + /** + * Names (or IP addresses; must be resolvable using InetAddress.getByName) + * of all nodes taking part in this test, including symbolic name and host + * definition: + * + * {{{ + * -D"multinode.hosts=host1@workerA.example.com,host2@workerB.example.com" + * }}} + */ + val nodeNames: Seq[String] = Vector.empty ++ ( + Option(System.getProperty("multinode.hosts")) getOrElse + (throw new IllegalStateException("need system property multinode.hosts to be set")) split ",") + + require(nodeNames != List(""), "multinode.hosts must not be empty") + + /** + * Index of this node in the nodeNames / nodeAddresses lists. The TestConductor + * is started in “controller” mode on selfIndex 0, i.e. there you can inject + * failures and shutdown other nodes etc. + */ + val selfIndex = Option(Integer.getInteger("multinode.index")) getOrElse + (throw new IllegalStateException("need system property multinode.index to be set")) + + require(selfIndex >= 0 && selfIndex < nodeNames.size, "selfIndex out of bounds: " + selfIndex) + + val nodeConfig = AkkaSpec.mapToConfig(Map( + "akka.actor.provider" -> "akka.remote.RemoteActorRefProvider", + "akka.remote.transport" -> "akka.remote.testconductor.TestConductorTransport", + "akka.remote.netty.hostname" -> nodeNames(selfIndex), + "akka.remote.netty.port" -> 0)) + +} + +abstract class MultiNodeSpec(_system: ActorSystem) extends AkkaSpec(_system) { + + import MultiNodeSpec._ + + def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName, + MultiNodeSpec.nodeConfig.withFallback(config.withFallback(AkkaSpec.testConf)))) + + def this(s: String) = this(ConfigFactory.parseString(s)) + + def this(configMap: Map[String, _]) = this(AkkaSpec.mapToConfig(configMap)) + + def this() = this(AkkaSpec.testConf) + + /* + * Test Class Interface + */ + + /** + * TO BE DEFINED BY USER: Defines the number of participants required for starting the test. This + * might not be equals to the number of nodes available to the test. + * + * Must be a `def`: + * {{{ + * def initialParticipants = 5 + * }}} + */ + def initialParticipants: Int + require(initialParticipants > 0, "initialParticipants must be a 'def' or early initializer, and it must be greater zero") + require(initialParticipants <= nodeNames.size, "not enough nodes to run this test") + + /** + * Access to the barriers, failure injection, etc. The extension will have + * been started either in Conductor or Player mode when the constructor of + * MultiNodeSpec finishes, i.e. do not call the start*() methods yourself! + */ + val testConductor: TestConductorExt = TestConductor(system) + + /** + * TO BE DEFINED BY USER: The test class must define a set of role names to + * be used throughout the run, e.g. in naming nodes in failure injections. + * These will be mapped to the available nodes such that the first name will + * be the Controller, i.e. on this one you can do failure injection. + * + * Should be a lazy val due to initialization order: + * {{{ + * lazy val roles = Seq("master", "slave") + * }}} + */ + def roles: Seq[String] + + require(roles.size >= initialParticipants, "not enough roles for initialParticipants") + require(roles.size <= nodeNames.size, "not enough nodes for number of roles") + require(roles.distinct.size == roles.size, "role names must be distinct") + + val mySelf = { + if (selfIndex >= roles.size) System.exit(0) + roles(selfIndex) + } + + /** + * Execute the given block of code only on the given nodes (names according + * to the `roleMap`). + */ + def runOn(nodes: String*)(thunk: ⇒ Unit): Unit = { + if (nodes exists (_ == mySelf)) { + thunk + } + } + + def ifNode[T](nodes: String*)(yes: ⇒ T)(no: ⇒ T): T = { + if (nodes exists (_ == mySelf)) yes else no + } + + /** + * Query the controller for the transport address of the given node (by role name) and + * return that as an ActorPath for easy composition: + * + * {{{ + * val serviceA = system.actorFor(node("master") / "user" / "serviceA") + * }}} + */ + def node(name: String): ActorPath = RootActorPath(testConductor.getAddressFor(name).await) + + /** + * Enrich `.await()` onto all Awaitables, using BarrierTimeout. + */ + implicit def awaitHelper[T](w: Awaitable[T]) = new AwaitHelper(w) + class AwaitHelper[T](w: Awaitable[T]) { + def await: T = Await.result(w, testConductor.Settings.BarrierTimeout.duration) + } + + /* + * Implementation (i.e. wait for start etc.) + */ + + private val controllerAddr = new InetSocketAddress(nodeNames(0), 4711) + if (selfIndex == 0) { + testConductor.startController(initialParticipants, roles(0), controllerAddr).await + } else { + testConductor.startClient(roles(selfIndex), controllerAddr).await + } + +} \ No newline at end of file diff --git a/scripts/fix-protobuf.sh b/scripts/fix-protobuf.sh new file mode 100755 index 0000000000..b0c8831091 --- /dev/null +++ b/scripts/fix-protobuf.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +find . -name \*.proto -print0 | xargs -0 perl -pi -e 's/\Qprivate Builder(BuilderParent parent)/private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent)/' From 4217d639f96e25d900e40d8c2e33b5130d35f32a Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 18 May 2012 16:00:33 +0200 Subject: [PATCH 056/538] add utility for fixing up broken PROTOC code (and apply it) --- .../remote/testconductor/TestConductorProtocol.java | 12 ++++++------ scripts/fix-protobuf.sh | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java index 4ae1aae07a..99c33e6728 100644 --- a/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java +++ b/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java @@ -492,7 +492,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1397,7 +1397,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1927,7 +1927,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -2377,7 +2377,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3005,7 +3005,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3611,7 +3611,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } diff --git a/scripts/fix-protobuf.sh b/scripts/fix-protobuf.sh index b0c8831091..e53ce297ab 100755 --- a/scripts/fix-protobuf.sh +++ b/scripts/fix-protobuf.sh @@ -1,3 +1,3 @@ #!/bin/bash -find . -name \*.proto -print0 | xargs -0 perl -pi -e 's/\Qprivate Builder(BuilderParent parent)/private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent)/' +find . -name \*.java -print0 | xargs -0 perl -pi -e 's/\Qprivate Builder(BuilderParent parent)/private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent)/' From 483083708e0be1dcc226842c96bad2849024712e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 18 May 2012 16:05:38 +0200 Subject: [PATCH 057/538] Added verification that a BalancingDispatcher can not be used with any kind of Router (impl + test). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also had to 'ignore' one test that violates this principle. Should be looked into later. Signed-off-by: Jonas Bonér --- .../ActorConfigurationVerificationSpec.scala | 80 +++++++++++++++++++ .../test/scala/akka/routing/ResizerSpec.scala | 3 +- .../src/main/scala/akka/actor/ActorCell.scala | 12 +++ 3 files changed, 94 insertions(+), 1 deletion(-) create mode 100644 akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala new file mode 100644 index 0000000000..cdaa421a59 --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala @@ -0,0 +1,80 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.actor + +import akka.testkit._ +import akka.testkit.DefaultTimeout +import akka.testkit.TestEvent._ +import akka.dispatch.Await +import akka.util.duration._ +import akka.routing._ +import akka.config.ConfigurationException +import com.typesafe.config.{ Config, ConfigFactory } +import org.scalatest.BeforeAndAfterEach +import org.scalatest.junit.JUnitSuite + +object ActorConfigurationVerificationSpec { + + class TestActor extends Actor { + def receive: Receive = { + case _ ⇒ + } + } + + val config = """ + balancing-dispatcher { + type = BalancingDispatcher + throughput = 1 + } + pinned-dispatcher { + executor = "thread-pool-executor" + type = PinnedDispatcher + } + """ +} + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class ActorConfigurationVerificationSpec extends AkkaSpec(ActorConfigurationVerificationSpec.config) with DefaultTimeout with BeforeAndAfterEach { + import ActorConfigurationVerificationSpec._ + + override def atStartup { + system.eventStream.publish(Mute(EventFilter[ConfigurationException](""))) + } + + "An Actor configured with a BalancingDispatcher" must { + "fail verification with a ConfigurationException if also configured with a RoundRobinRouter" in { + intercept[ConfigurationException] { + system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher").withRouter(RoundRobinRouter(2))) + } + } + "fail verification with a ConfigurationException if also configured with a BroadcastRouter" in { + intercept[ConfigurationException] { + system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher").withRouter(BroadcastRouter(2))) + } + } + "fail verification with a ConfigurationException if also configured with a RandomRouter" in { + intercept[ConfigurationException] { + system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher").withRouter(RandomRouter(2))) + } + } + "fail verification with a ConfigurationException if also configured with a SmallestMailboxRouter" in { + intercept[ConfigurationException] { + system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher").withRouter(SmallestMailboxRouter(2))) + } + } + "fail verification with a ConfigurationException if also configured with a ScatterGatherFirstCompletedRouter" in { + intercept[ConfigurationException] { + system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher").withRouter(ScatterGatherFirstCompletedRouter(nrOfInstances = 2, within = 2 seconds))) + } + } + "not fail verification with a ConfigurationException also not configured with a Router" in { + system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher")) + } + } + "An Actor configured with a non-balancing dispatcher" must { + "not fail verification with a ConfigurationException if also configured with a Router" in { + system.actorOf(Props[TestActor].withDispatcher("pinned-dispatcher").withRouter(RoundRobinRouter(2))) + } + } +} diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala index 457c4ab411..111460e3ac 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -128,7 +128,8 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with current.routees.size must be(2) } - "resize when busy" in { + // FIXME this test violates the rule that you can not use a BalancingDispatcher with any kind of Router - now throws a ConfigurationException in verification process + "resize when busy" ignore { val busy = new TestLatch(1) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 8c68ba3315..9cc993062f 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -357,7 +357,19 @@ private[akka] class ActorCell( case _ ⇒ true } + private def verifyActorConfiguration(system: ActorSystem, props: Props, actorName: String): Unit = { + import akka.config.ConfigurationException + import akka.routing.NoRouter + // verify that a BalancingDispatcher is not used with a Router + if (system.dispatchers.lookup(props.dispatcher).isInstanceOf[BalancingDispatcher] && props.routerConfig != NoRouter) + throw new ConfigurationException( + "Configuration for actor [" + actorName + + "] is invalid - you can not use a 'BalancingDispatcher' together with any type of 'Router'") + } + private def _actorOf(props: Props, name: String): ActorRef = { + verifyActorConfiguration(systemImpl, props, name) + if (system.settings.SerializeAllCreators && !props.creator.isInstanceOf[NoSerializationVerificationNeeded]) { val ser = SerializationExtension(system) ser.serialize(props.creator) match { From 6d962174fedb48dc3d494f1f6ae47e9996885f8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 18 May 2012 16:16:41 +0200 Subject: [PATCH 058/538] Added documentation about the verification of BalancingDispatcher + Router (added to both Dispatcher and Routing docs) --- akka-docs/java/dispatchers.rst | 6 ++++-- akka-docs/java/routing.rst | 3 ++- akka-docs/scala/dispatchers.rst | 10 ++++++---- akka-docs/scala/routing.rst | 4 +++- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/akka-docs/java/dispatchers.rst b/akka-docs/java/dispatchers.rst index 6ef0d44d7e..90a0e9cb6a 100644 --- a/akka-docs/java/dispatchers.rst +++ b/akka-docs/java/dispatchers.rst @@ -70,7 +70,7 @@ There are 4 different types of message dispatchers: * BalancingDispatcher - - This is an executor based event driven dispatcher that will try to redistribute work from busy actors to idle actors. + - This is an executor based event driven dispatcher that will try to redistribute work from busy actors to idle actors. - It is assumed that all actors using the same instance of this dispatcher can process all messages that have been sent to one of the actors; i.e. the actors belong to a pool of actors, and to the client there is no guarantee about which actor instance actually processes a given message. @@ -85,9 +85,11 @@ There are 4 different types of message dispatchers: "thread-pool-executor" or the FQCN of an ``akka.dispatcher.ExecutorServiceConfigurator`` + - Note that you can **not** use a ``BalancingDispatcher`` together with any kind of ``Router``, trying to do so will make your actor fail verification. + * CallingThreadDispatcher - - This dispatcher runs invocations on the current thread only. This dispatcher does not create any new threads, + - This dispatcher runs invocations on the current thread only. This dispatcher does not create any new threads, but it can be used from different threads concurrently for the same actor. See :ref:`TestCallingThreadDispatcherRef` for details and restrictions. diff --git a/akka-docs/java/routing.rst b/akka-docs/java/routing.rst index 4d01642a72..e006c7db63 100644 --- a/akka-docs/java/routing.rst +++ b/akka-docs/java/routing.rst @@ -375,7 +375,8 @@ The dispatcher for created children of the router will be taken from makes sense to configure the :class:`BalancingDispatcher` if the precise routing is not so important (i.e. no consistent hashing or round-robin is required); this enables newly created routees to pick up work immediately by -stealing it from their siblings. +stealing it from their siblings. Note that you can **not** use a ``BalancingDispatcher`` +together with any kind of ``Router``, trying to do so will make your actor fail verification. The “head” router, of course, cannot run on the same balancing dispatcher, because it does not process the same messages, hence this special actor does diff --git a/akka-docs/scala/dispatchers.rst b/akka-docs/scala/dispatchers.rst index 7d6a1f6334..a1cc431643 100644 --- a/akka-docs/scala/dispatchers.rst +++ b/akka-docs/scala/dispatchers.rst @@ -71,7 +71,7 @@ There are 4 different types of message dispatchers: * BalancingDispatcher - - This is an executor based event driven dispatcher that will try to redistribute work from busy actors to idle actors. + - This is an executor based event driven dispatcher that will try to redistribute work from busy actors to idle actors. - It is assumed that all actors using the same instance of this dispatcher can process all messages that have been sent to one of the actors; i.e. the actors belong to a pool of actors, and to the client there is no guarantee about which actor instance actually processes a given message. @@ -86,9 +86,11 @@ There are 4 different types of message dispatchers: "thread-pool-executor" or the FQCN of an ``akka.dispatcher.ExecutorServiceConfigurator`` + - Note that you can **not** use a ``BalancingDispatcher`` together with any kind of ``Router``, trying to do so will make your actor fail verification. + * CallingThreadDispatcher - - This dispatcher runs invocations on the current thread only. This dispatcher does not create any new threads, + - This dispatcher runs invocations on the current thread only. This dispatcher does not create any new threads, but it can be used from different threads concurrently for the same actor. See :ref:`TestCallingThreadDispatcherRef` for details and restrictions. @@ -112,8 +114,8 @@ And then using it: .. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#defining-pinned-dispatcher -Note that ``thread-pool-executor`` configuration as per the above ``my-thread-pool-dispatcher`` exmaple is -NOT applicable. This is because every actor will have its own thread pool when using ``PinnedDispatcher``, +Note that ``thread-pool-executor`` configuration as per the above ``my-thread-pool-dispatcher`` exmaple is +NOT applicable. This is because every actor will have its own thread pool when using ``PinnedDispatcher``, and that pool will have only one thread. Mailboxes diff --git a/akka-docs/scala/routing.rst b/akka-docs/scala/routing.rst index 737c9e31e7..0d0625be36 100644 --- a/akka-docs/scala/routing.rst +++ b/akka-docs/scala/routing.rst @@ -375,7 +375,9 @@ The dispatcher for created children of the router will be taken from makes sense to configure the :class:`BalancingDispatcher` if the precise routing is not so important (i.e. no consistent hashing or round-robin is required); this enables newly created routees to pick up work immediately by -stealing it from their siblings. +stealing it from their siblings. Note that you can **not** use a ``BalancingDispatcher`` +together with any kind of ``Router``, trying to do so will make your actor fail verification. + .. note:: From e99c9385283256a6254b78dd4c6fe24eed464fb0 Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 18 May 2012 16:26:48 +0200 Subject: [PATCH 059/538] =?UTF-8?q?switch=20to=20Bj=C3=B6rn=E2=80=99s=20ne?= =?UTF-8?q?w=20multi-jvm=20setup=20(i.e.=20remove=20system=20properties)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../remote/testconductor/TestConductorSpec.scala | 16 ++++------------ project/plugins.sbt | 2 +- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 39d25981aa..7f3763fcc1 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -14,9 +14,8 @@ import java.net.InetSocketAddress import java.net.InetAddress import akka.remote.testkit.MultiNodeSpec -object TestConductorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { - override def NrOfNodes = 2 - override def commonConfig = ConfigFactory.parseString(""" +object TestConductorMultiJvmSpec { + def commonConfig = ConfigFactory.parseString(""" akka.loglevel = DEBUG akka.remote { log-received-messages = on @@ -29,15 +28,8 @@ object TestConductorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { """) } -object H { - def apply(x: Int) = { - System.setProperty("multinode.hosts", "localhost,localhost") - System.setProperty("multinode.index", x.toString) - } -} - -class TestConductorMultiJvmNode1 extends { val dummy = H(0) } with TestConductorSpec -class TestConductorMultiJvmNode2 extends { val dummy = H(1) } with TestConductorSpec +class TestConductorMultiJvmNode1 extends TestConductorSpec +class TestConductorMultiJvmNode2 extends TestConductorSpec class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec.commonConfig) with ImplicitSender { diff --git a/project/plugins.sbt b/project/plugins.sbt index 80ff9db95a..f49cfb688d 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -1,7 +1,7 @@ resolvers += Classpaths.typesafeResolver -addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.1.9") +addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.2.0-SNAPSHOT") addSbtPlugin("com.typesafe.schoir" % "schoir" % "0.1.2") From 4fb4903225e9b7f0d770d8812fb0a12f63c1bd77 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 18 May 2012 16:41:19 +0200 Subject: [PATCH 060/538] Further work on binary compatibility --- .../main/scala/akka/pattern/AskSupport.scala | 15 +++-- .../akka/routing/ConnectionManager.scala | 8 +-- .../scala/akka/routing/ConsistentHash.scala | 60 +++++++++---------- .../src/main/scala/akka/routing/Routing.scala | 2 - 4 files changed, 42 insertions(+), 43 deletions(-) diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index cfaa0a182b..a20baaf533 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -157,6 +157,8 @@ trait AskSupport { /** * Akka private optimized representation of the temporary actor spawned to * receive the reply to an "ask" operation. + * + * INTERNAL API */ private[akka] final class PromiseActorRef private (val provider: ActorRefProvider, val result: Promise[Any]) extends MinimalActorRef { @@ -182,14 +184,12 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide private def state: AnyRef = Unsafe.instance.getObjectVolatile(this, stateOffset) @inline - private def updateState(oldState: AnyRef, newState: AnyRef): Boolean = - Unsafe.instance.compareAndSwapObject(this, stateOffset, oldState, newState) + private def updateState(oldState: AnyRef, newState: AnyRef): Boolean = Unsafe.instance.compareAndSwapObject(this, stateOffset, oldState, newState) @inline - private def setState(newState: AnyRef): Unit = - Unsafe.instance.putObjectVolatile(this, stateOffset, newState) + private def setState(newState: AnyRef): Unit = Unsafe.instance.putObjectVolatile(this, stateOffset, newState) - override def getParent = provider.tempContainer + override def getParent: InternalActorRef = provider.tempContainer /** * Contract of this method: @@ -234,7 +234,7 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide case _ ⇒ } - override def isTerminated = state match { + override def isTerminated: Boolean = state match { case Stopped | _: StoppedWithPath ⇒ true case _ ⇒ false } @@ -263,6 +263,9 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide } } +/** + * INTERNAL API + */ private[akka] object PromiseActorRef { private case object Registering private case object Stopped diff --git a/akka-actor/src/main/scala/akka/routing/ConnectionManager.scala b/akka-actor/src/main/scala/akka/routing/ConnectionManager.scala index 3136a2342d..9029c1f78b 100644 --- a/akka-actor/src/main/scala/akka/routing/ConnectionManager.scala +++ b/akka-actor/src/main/scala/akka/routing/ConnectionManager.scala @@ -10,10 +10,8 @@ import akka.actor._ * An Iterable that also contains a version. */ trait VersionedIterable[A] { - val version: Long - + def version: Long def iterable: Iterable[A] - def apply(): Iterable[A] = iterable } @@ -42,7 +40,7 @@ trait ConnectionManager { /** * Shuts the connection manager down, which stops all managed actors */ - def shutdown() + def shutdown(): Unit /** * Returns a VersionedIterator containing all connected ActorRefs at some moment in time. Since there is @@ -59,5 +57,5 @@ trait ConnectionManager { * * @param ref the dead */ - def remove(deadRef: ActorRef) + def remove(deadRef: ActorRef): Unit } diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala index 130db2be3e..afa321d07d 100644 --- a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala +++ b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala @@ -23,7 +23,7 @@ class ConsistentHash[T](nodes: Seq[T], replicas: Int) { nodes.foreach(this += _) - def +=(node: T) { + def +=(node: T): Unit = { cluster += node (1 to replicas) foreach { replica ⇒ val key = hashFor((node + ":" + replica).getBytes("UTF-8")) @@ -32,7 +32,7 @@ class ConsistentHash[T](nodes: Seq[T], replicas: Int) { } } - def -=(node: T) { + def -=(node: T): Unit = { cluster -= node (1 to replicas) foreach { replica ⇒ val key = hashFor((node + ":" + replica).getBytes("UTF-8")) @@ -96,7 +96,7 @@ class MurmurHash[@specialized(Int, Long, Float, Double) T](seed: Int) extends (T private var hashvalue = h /** Begin a new hash using the same seed. */ - def reset() { + def reset(): Unit = { h = startHash(seed) c = hiddenMagicA k = hiddenMagicB @@ -104,7 +104,7 @@ class MurmurHash[@specialized(Int, Long, Float, Double) T](seed: Int) extends (T } /** Incorporate the hash value of one item. */ - def apply(t: T) { + def apply(t: T): Unit = { h = extendHash(h, t.##, c, k) c = nextMagicA(c) k = nextMagicB(k) @@ -112,7 +112,7 @@ class MurmurHash[@specialized(Int, Long, Float, Double) T](seed: Int) extends (T } /** Incorporate a known hash value. */ - def append(i: Int) { + def append(i: Int): Unit = { h = extendHash(h, i, c, k) c = nextMagicA(c) k = nextMagicB(k) @@ -120,14 +120,15 @@ class MurmurHash[@specialized(Int, Long, Float, Double) T](seed: Int) extends (T } /** Retrieve the hash value */ - def hash = { + def hash: Int = { if (!hashed) { hashvalue = finalizeHash(h) hashed = true } hashvalue } - override def hashCode = hash + + override def hashCode: Int = hash } /** @@ -143,35 +144,35 @@ class MurmurHash[@specialized(Int, Long, Float, Double) T](seed: Int) extends (T object MurmurHash { // Magic values used for MurmurHash's 32 bit hash. // Don't change these without consulting a hashing expert! - final private val visibleMagic = 0x971e137b - final private val hiddenMagicA = 0x95543787 - final private val hiddenMagicB = 0x2ad7eb25 - final private val visibleMixer = 0x52dce729 - final private val hiddenMixerA = 0x7b7d159c - final private val hiddenMixerB = 0x6bce6396 - final private val finalMixer1 = 0x85ebca6b - final private val finalMixer2 = 0xc2b2ae35 + final private val visibleMagic: Int = 0x971e137b + final private val hiddenMagicA: Int = 0x95543787 + final private val hiddenMagicB: Int = 0x2ad7eb25 + final private val visibleMixer: Int = 0x52dce729 + final private val hiddenMixerA: Int = 0x7b7d159c + final private val hiddenMixerB: Int = 0x6bce6396 + final private val finalMixer1: Int = 0x85ebca6b + final private val finalMixer2: Int = 0xc2b2ae35 // Arbitrary values used for hashing certain classes - final private val seedString = 0xf7ca7fd2 - final private val seedArray = 0x3c074a61 + final private val seedString: Int = 0xf7ca7fd2 + final private val seedArray: Int = 0x3c074a61 /** The first 23 magic integers from the first stream are stored here */ - val storedMagicA = + val storedMagicA: Array[Int] = Iterator.iterate(hiddenMagicA)(nextMagicA).take(23).toArray /** The first 23 magic integers from the second stream are stored here */ - val storedMagicB = + val storedMagicB: Array[Int] = Iterator.iterate(hiddenMagicB)(nextMagicB).take(23).toArray /** Begin a new hash with a seed value. */ - def startHash(seed: Int) = seed ^ visibleMagic + def startHash(seed: Int): Int = seed ^ visibleMagic /** The initial magic integers in the first stream. */ - def startMagicA = hiddenMagicA + def startMagicA: Int = hiddenMagicA /** The initial magic integer in the second stream. */ - def startMagicB = hiddenMagicB + def startMagicB: Int = hiddenMagicB /** * Incorporates a new value into an existing hash. @@ -182,18 +183,17 @@ object MurmurHash { * @param magicB a magic integer from a different stream * @return the updated hash value */ - def extendHash(hash: Int, value: Int, magicA: Int, magicB: Int) = { + def extendHash(hash: Int, value: Int, magicA: Int, magicB: Int): Int = (hash ^ rotl(value * magicA, 11) * magicB) * 3 + visibleMixer - } /** Given a magic integer from the first stream, compute the next */ - def nextMagicA(magicA: Int) = magicA * 5 + hiddenMixerA + def nextMagicA(magicA: Int): Int = magicA * 5 + hiddenMixerA /** Given a magic integer from the second stream, compute the next */ - def nextMagicB(magicB: Int) = magicB * 5 + hiddenMixerB + def nextMagicB(magicB: Int): Int = magicB * 5 + hiddenMixerB /** Once all hashes have been incorporated, this performs a final mixing */ - def finalizeHash(hash: Int) = { + def finalizeHash(hash: Int): Int = { var i = (hash ^ (hash >>> 16)) i *= finalMixer1 i ^= (i >>> 13) @@ -203,7 +203,7 @@ object MurmurHash { } /** Compute a high-quality hash of an array */ - def arrayHash[@specialized T](a: Array[T]) = { + def arrayHash[@specialized T](a: Array[T]): Int = { var h = startHash(a.length * seedArray) var c = hiddenMagicA var k = hiddenMagicB @@ -218,7 +218,7 @@ object MurmurHash { } /** Compute a high-quality hash of a string */ - def stringHash(s: String) = { + def stringHash(s: String): Int = { var h = startHash(s.length * seedString) var c = hiddenMagicA var k = hiddenMagicB @@ -239,7 +239,7 @@ object MurmurHash { * where the order of appearance of elements does not matter. * This is useful for hashing sets, for example. */ - def symmetricHash[T](xs: TraversableOnce[T], seed: Int) = { + def symmetricHash[T](xs: TraversableOnce[T], seed: Int): Int = { var a, b, n = 0 var c = 1 xs.foreach(i ⇒ { diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 211ef202f7..c3db8293d2 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -8,12 +8,10 @@ import akka.util.Duration import akka.util.duration._ import akka.ConfigurationException import akka.pattern.pipe -import akka.pattern.AskSupport import com.typesafe.config.Config import scala.collection.JavaConversions.iterableAsScalaIterable import java.util.concurrent.atomic.{ AtomicLong, AtomicBoolean } import java.util.concurrent.TimeUnit -import java.util.concurrent.locks.ReentrantLock import akka.jsr166y.ThreadLocalRandom import akka.util.Unsafe import akka.dispatch.Dispatchers From 134f1a19a50331acabac6b84eb3599d6edc24303 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 18 May 2012 16:41:39 +0200 Subject: [PATCH 061/538] Reworking Listeners so that senders can be supplied --- .../main/scala/akka/routing/Listeners.scala | 29 ++++++++++++++----- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/akka-actor/src/main/scala/akka/routing/Listeners.scala b/akka-actor/src/main/scala/akka/routing/Listeners.scala index 39fbf6355d..5ac02e2945 100644 --- a/akka-actor/src/main/scala/akka/routing/Listeners.scala +++ b/akka-actor/src/main/scala/akka/routing/Listeners.scala @@ -5,8 +5,7 @@ package akka.routing import akka.actor.{ Actor, ActorRef } -import java.util.concurrent.ConcurrentSkipListSet -import scala.collection.JavaConversions._ +import java.util.{ Set, TreeSet } sealed trait ListenerMessage case class Listen(listener: ActorRef) extends ListenerMessage @@ -25,13 +24,29 @@ case class WithListeners(f: (ActorRef) ⇒ Unit) extends ListenerMessage * Send WithListeners(fun) to traverse the current listeners. */ trait Listeners { self: Actor ⇒ - protected val listeners = new ConcurrentSkipListSet[ActorRef] + protected val listeners: Set[ActorRef] = new TreeSet[ActorRef] + /** + * Chain this into the receive function. + * + * {{ def receive = listenerManagement orElse … }} + */ protected def listenerManagement: Actor.Receive = { - case Listen(l) ⇒ listeners add l - case Deafen(l) ⇒ listeners remove l - case WithListeners(f) ⇒ listeners foreach f + case Listen(l) ⇒ listeners add l + case Deafen(l) ⇒ listeners remove l + case WithListeners(f) ⇒ + val i = listeners.iterator + while (i.hasNext) f(i.next) } - protected def gossip(msg: Any) = listeners foreach (_ ! msg) + /** + * Sends the supplied message to all current listeners using the provided sender as sender. + * + * @param msg + * @param sender + */ + protected def gossip(msg: Any)(implicit sender: ActorRef = null): Unit = { + val i = listeners.iterator + while (i.hasNext) i.next ! msg + } } From 6c327649908e6d127340076732d7c800f68cee83 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 18 May 2012 16:46:31 +0200 Subject: [PATCH 062/538] Embed the source of jmxclient into akka-kernel. See #2079 * Found source code here https://archive-crawler.svn.sourceforge.net * Included org.archive.jmx.Client in akka-kernel * Modified akka-cluster script * Removed dependency to jmxclient from build * Tested from dist with kernel app with cluster settings --- akka-kernel/src/main/dist/bin/akka-cluster | 18 +- .../src/main/java/org/archive/jmx/Client.java | 781 ++++++++++++++++++ project/AkkaBuild.scala | 3 +- 3 files changed, 792 insertions(+), 10 deletions(-) create mode 100644 akka-kernel/src/main/java/org/archive/jmx/Client.java diff --git a/akka-kernel/src/main/dist/bin/akka-cluster b/akka-kernel/src/main/dist/bin/akka-cluster index ecca52fa9b..7bb3a670da 100755 --- a/akka-kernel/src/main/dist/bin/akka-cluster +++ b/akka-kernel/src/main/dist/bin/akka-cluster @@ -3,7 +3,6 @@ # ============== Akka Cluster Administration Tool ============== # # This script is meant to be used from within the Akka distribution. -# Requires setting $AKKA_HOME to the root of the distribution. # # Add these options to the sbt or startup script: # java \ @@ -15,9 +14,12 @@ # FIXME support authentication? if so add: -Dcom.sun.management.jmxremote.password.file= AND tweak this script to support it (arg need 'user:passwd' instead of '-') -# NOTE: The 'cmdline-jmxclient' JAR is available as part of the Akka distribution. -# Provided by Typesafe Maven Repository: http://repo.typesafe.com/typesafe/releases/cmdline-jmxclient. -JMX_CLIENT="java -jar $AKKA_HOME/lib/akka/cmdline-jmxclient-0.10.3.jar -" +declare AKKA_HOME="$(cd "$(cd "$(dirname "$0")"; pwd -P)"/..; pwd)" + +[ -n "$JMX_CLIENT_CLASSPATH" ] || JMX_CLIENT_CLASSPATH="$AKKA_HOME/lib/akka/akka-kernel-*" + +# NOTE: The 'cmdline-jmxclient' is available as part of the Akka distribution. +JMX_CLIENT="java -cp $JMX_CLIENT_CLASSPATH org.archive.jmx.Client -" SELF=`basename $0` # script name HOST=$1 # cluster node:port to talk to through JMX @@ -168,7 +170,7 @@ case "$2" in ;; *) - printf "Usage: $SELF ...\n" + printf "Usage: bin/$SELF ...\n" printf "\n" printf "Supported commands are:\n" printf "%26s - %s\n" "join " "Sends request a JOIN node with the specified URL" @@ -183,9 +185,9 @@ case "$2" in printf "%26s - %s\n" has-convergence "Checks if there is a cluster convergence" printf "Where the should be on the format of 'akka://actor-system-name@hostname:port'\n" printf "\n" - printf "Examples: $SELF localhost:9999 is-available\n" - printf " $SELF localhost:9999 join akka://MySystem@darkstar:2552\n" - printf " $SELF localhost:9999 cluster-status\n" + printf "Examples: bin/$SELF localhost:9999 is-available\n" + printf " bin/$SELF localhost:9999 join akka://MySystem@darkstar:2552\n" + printf " bin/$SELF localhost:9999 cluster-status\n" exit 1 ;; esac diff --git a/akka-kernel/src/main/java/org/archive/jmx/Client.java b/akka-kernel/src/main/java/org/archive/jmx/Client.java new file mode 100644 index 0000000000..136de87ec3 --- /dev/null +++ b/akka-kernel/src/main/java/org/archive/jmx/Client.java @@ -0,0 +1,781 @@ +/* + * Client + * + * $Id$ + * + * Created on Nov 12, 2004 + * + * Copyright (C) 2004 Internet Archive. + * + * This file is part of the Heritrix web crawler (crawler.archive.org). + * + * Heritrix is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser Public License as published by + * the Free Software Foundation; either version 2.1 of the License, or + * any later version. + * + * Heritrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser Public License for more details. + * + * You should have received a copy of the GNU Lesser Public License + * along with Heritrix; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +package org.archive.jmx; + +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.text.FieldPosition; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.logging.ConsoleHandler; +import java.util.logging.Handler; +import java.util.logging.LogRecord; +import java.util.logging.Logger; +import java.util.logging.SimpleFormatter; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import javax.management.Attribute; +import javax.management.AttributeList; +import javax.management.InstanceNotFoundException; +import javax.management.IntrospectionException; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanFeatureInfo; +import javax.management.MBeanInfo; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanParameterInfo; +import javax.management.MBeanServerConnection; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectInstance; +import javax.management.ObjectName; +import javax.management.ReflectionException; +import javax.management.openmbean.CompositeData; +import javax.management.openmbean.TabularData; +import javax.management.remote.JMXConnector; +import javax.management.remote.JMXConnectorFactory; +import javax.management.remote.JMXServiceURL; + + +/** + * A Simple Command-Line JMX Client. + * Tested against the JDK 1.5.0 JMX Agent. + * See Monitoring + * and Management Using JMX. + *

Can supply credentials and do primitive string representation of tabular + * and composite openmbeans. + * @author stack + */ +public class Client { + private static final Logger logger = + Logger.getLogger(Client.class.getName()); + + /** + * Usage string. + */ + private static final String USAGE = "Usage: java -jar" + + " cmdline-jmxclient.jar USER:PASS HOST:PORT [BEAN] [COMMAND]\n" + + "Options:\n" + + " USER:PASS Username and password. Required. If none, pass '-'.\n" + + " E.g. 'controlRole:secret'\n" + + " HOST:PORT Hostname and port to connect to. Required." + + " E.g. localhost:8081.\n" + + " Lists registered beans if only USER:PASS and this" + + " argument.\n" + + " BEAN Optional target bean name. If present we list" + + " available operations\n" + + " and attributes.\n" + + " COMMAND Optional operation to run or attribute to fetch. If" + + " none supplied,\n" + + " all operations and attributes are listed. Attributes" + + " begin with a\n" + + " capital letter: e.g. 'Status' or 'Started'." + + " Operations do not.\n" + + " Operations can take arguments by adding an '=' " + + "followed by\n" + + " comma-delimited params. Pass multiple " + + "attributes/operations to run\n" + + " more than one per invocation. Use commands 'create' and " + + "'destroy'\n" + + " to instantiate and unregister beans ('create' takes name " + + "of class).\n" + + " Pass 'Attributes' to get listing of all attributes and " + + "and their\n" + + " values.\n" + + "Requirements:\n" + + " JDK1.5.0. If connecting to a SUN 1.5.0 JDK JMX Agent, remote side" + + " must be\n" + + " started with system properties such as the following:\n" + + " -Dcom.sun.management.jmxremote.port=PORT\n" + + " -Dcom.sun.management.jmxremote.authenticate=false\n" + + " -Dcom.sun.management.jmxremote.ssl=false\n" + + " The above will start the remote server with no password. See\n" + + " http://java.sun.com/j2se/1.5.0/docs/guide/management/agent.html" + + " for more on\n" + + " 'Monitoring and Management via JMX'.\n" + + "Client Use Examples:\n" + + " To list MBeans on a non-password protected remote agent:\n" + + " % java -jar cmdline-jmxclient-X.X.jar - localhost:8081 \\\n" + + " org.archive.crawler:name=Heritrix,type=Service\n" + + " To list attributes and attributes of the Heritrix MBean:\n" + + " % java -jar cmdline-jmxclient-X.X.jar - localhost:8081 \\\n" + + " org.archive.crawler:name=Heritrix,type=Service \\\n" + + " schedule=http://www.archive.org\n" + + " To set set logging level to FINE on a password protected JVM:\n" + + " % java -jar cmdline-jmxclient-X.X.jar controlRole:secret" + + " localhost:8081 \\\n" + + " java.util.logging:type=Logging \\\n" + + " setLoggerLevel=org.archive.crawler.Heritrix,FINE"; + + /** + * Pattern that matches a command name followed by + * an optional equals and optional comma-delimited list + * of arguments. + */ + protected static final Pattern CMD_LINE_ARGS_PATTERN = + Pattern.compile("^([^=]+)(?:(?:\\=)(.+))?$"); + + private static final String CREATE_CMD_PREFIX = "create="; + + public static void main(String[] args) throws Exception { + Client client = new Client(); + // Set the logger to use our all-on-one-line formatter. + Logger l = Logger.getLogger(""); + Handler [] hs = l.getHandlers(); + for (int i = 0; i < hs.length; i++) { + Handler h = hs[0]; + if (h instanceof ConsoleHandler) { + h.setFormatter(client.new OneLineSimpleLogger()); + } + } + client.execute(args); + } + + protected static void usage() { + usage(0, null); + } + + protected static void usage(int exitCode, String message) { + if (message != null && message.length() > 0) { + System.out.println(message); + } + System.out.println(USAGE); + System.exit(exitCode); + } + + /** + * Constructor. + */ + public Client() { + super(); + } + + /** + * Parse a 'login:password' string. Assumption is that no + * colon in the login name. + * @param userpass + * @return Array of strings with login in first position. + */ + protected String [] parseUserpass(final String userpass) { + if (userpass == null || userpass.equals("-")) { + return null; + } + int index = userpass.indexOf(':'); + if (index <= 0) { + throw new RuntimeException("Unable to parse: " +userpass); + } + return new String [] {userpass.substring(0, index), + userpass.substring(index + 1)}; + } + + /** + * @param login + * @param password + * @return Credentials as map for RMI. + */ + protected Map formatCredentials(final String login, + final String password) { + Map env = null; + String[] creds = new String[] {login, password}; + env = new HashMap(1); + env.put(JMXConnector.CREDENTIALS, creds); + return env; + } + + protected JMXConnector getJMXConnector(final String hostport, + final String login, final String password) + throws IOException { + // Make up the jmx rmi URL and get a connector. + JMXServiceURL rmiurl = new JMXServiceURL("service:jmx:rmi://" + + hostport + "/jndi/rmi://" + hostport + "/jmxrmi"); + return JMXConnectorFactory.connect(rmiurl, + formatCredentials(login, password)); + } + + protected ObjectName getObjectName(final String beanname) + throws MalformedObjectNameException, NullPointerException { + return notEmpty(beanname)? new ObjectName(beanname): null; + } + + /** + * Version of execute called from the cmdline. + * Prints out result of execution on stdout. + * Parses cmdline args. Then calls {@link #execute(String, String, + * String, String, String[], boolean)}. + * @param args Cmdline args. + * @throws Exception + */ + protected void execute(final String [] args) + throws Exception { + // Process command-line. + if (args.length == 0 || args.length == 1) { + usage(); + } + String userpass = args[0]; + String hostport = args[1]; + String beanname = null; + String [] command = null; + if (args.length > 2) { + beanname = args[2]; + } + if (args.length > 3) { + command = new String [args.length - 3]; + for (int i = 3; i < args.length; i++) { + command[i - 3] = args[i]; + } + } + String [] loginPassword = parseUserpass(userpass); + Object [] result = execute(hostport, + ((loginPassword == null)? null: loginPassword[0]), + ((loginPassword == null)? null: loginPassword[1]), beanname, + command); + // Print out results on stdout. Only log if a result. + if (result != null) { + for (int i = 0; i < result.length; i++) { + if (result[i] != null && result[i].toString().length() > 0) { + if (command != null) { + logger.info(command[i] + ": " + result[i]); + } else { + logger.info("\n" + result[i].toString()); + } + } + } + } + } + + protected Object [] execute(final String hostport, final String login, + final String password, final String beanname, + final String [] command) + throws Exception { + return execute(hostport, login, password, beanname, command, false); + } + + public Object [] executeOneCmd(final String hostport, final String login, + final String password, final String beanname, + final String command) + throws Exception { + return execute(hostport, login, password, beanname, + new String[] {command}, true); + } + + /** + * Execute command against remote JMX agent. + * @param hostport 'host:port' combination. + * @param login RMI login to use. + * @param password RMI password to use. + * @param beanname Name of remote bean to run command against. + * @param command Array of commands to run. + * @param oneBeanOnly Set true if passed beanname is + * an exact name and the query for a bean is only supposed to return + * one bean instance. If not, we raise an exception (Otherwise, if false, + * then we deal with possibility of multiple bean instances coming back + * from query). Set to true when want to get an attribute or run an + * operation. + * @return Array of results -- one per command. + * @throws Exception + */ + protected Object [] execute(final String hostport, final String login, + final String password, final String beanname, + final String [] command, final boolean oneBeanOnly) + throws Exception { + JMXConnector jmxc = getJMXConnector(hostport, login, password); + Object [] result = null; + try { + result = doBeans(jmxc.getMBeanServerConnection(), + getObjectName(beanname), command, oneBeanOnly); + } finally { + jmxc.close(); + } + return result; + } + + protected boolean notEmpty(String s) { + return s != null && s.length() > 0; + } + + protected Object [] doBeans(final MBeanServerConnection mbsc, + final ObjectName objName, final String[] command, + final boolean oneBeanOnly) + throws Exception { + Object [] result = null; + Set beans = mbsc.queryMBeans(objName, null); + if (beans.size() == 0) { + // No bean found. Check if we are to create a bean? + if (command.length == 1 && notEmpty(command[0]) + && command[0].startsWith(CREATE_CMD_PREFIX)) { + String className = + command[0].substring(CREATE_CMD_PREFIX.length()); + mbsc.createMBean(className, objName); + } else { + // TODO: Is there a better JMX exception that RE for this + // scenario? + throw new RuntimeException(objName.getCanonicalName() + + " not registered."); + } + } else if (beans.size() == 1) { + result = doBean(mbsc, (ObjectInstance) beans.iterator().next(), + command); + } else { + if (oneBeanOnly) { + throw new RuntimeException("Only supposed to be one bean " + + "query result"); + } + // This is case of multiple beans in query results. + // Print name of each into a StringBuffer. Return as one + // result. + StringBuffer buffer = new StringBuffer(); + for (Iterator i = beans.iterator(); i.hasNext();) { + Object obj = i.next(); + if (obj instanceof ObjectName) { + buffer.append((((ObjectName) obj).getCanonicalName())); + } else if (obj instanceof ObjectInstance) { + buffer.append((((ObjectInstance) obj).getObjectName() + .getCanonicalName())); + } else { + throw new RuntimeException("Unexpected object type: " + obj); + } + buffer.append("\n"); + } + result = new String [] {buffer.toString()}; + } + return result; + } + + /** + * Get attribute or run operation against passed bean instance. + * + * @param mbsc Server connection. + * @param instance Bean instance we're to get attributes from or run + * operation against. + * @param command Command to run (May be null). + * @return Result. If multiple commands, multiple results. + * @throws Exception + */ + protected Object [] doBean(MBeanServerConnection mbsc, + ObjectInstance instance, String [] command) + throws Exception { + // If no command, then print out list of attributes and operations. + if (command == null || command.length <= 0) { + return new String [] {listOptions(mbsc, instance)}; + } + + // Maybe multiple attributes/operations listed on one command line. + Object [] result = new Object[command.length]; + for (int i = 0; i < command.length; i++) { + result[i] = doSubCommand(mbsc, instance, command[i]); + } + return result; + } + + public Object doSubCommand(MBeanServerConnection mbsc, + ObjectInstance instance, String subCommand) + throws Exception { + // First, handle special case of our being asked to destroy a bean. + if (subCommand.equals("destroy")) { + mbsc.unregisterMBean(instance.getObjectName()); + return null; + } else if (subCommand.startsWith(CREATE_CMD_PREFIX)) { + throw new IllegalArgumentException("You cannot call create " + + "on an already existing bean."); + } + + // Get attribute and operation info. + MBeanAttributeInfo [] attributeInfo = + mbsc.getMBeanInfo(instance.getObjectName()).getAttributes(); + MBeanOperationInfo [] operationInfo = + mbsc.getMBeanInfo(instance.getObjectName()).getOperations(); + // Now, bdbje JMX bean doesn't follow the convention of attributes + // having uppercase first letter and operations having lowercase + // first letter. But most beans do. Be prepared to handle the bdbje + // case. + Object result = null; + if (Character.isUpperCase(subCommand.charAt(0))) { + // Probably an attribute. + if (!isFeatureInfo(attributeInfo, subCommand) && + isFeatureInfo(operationInfo, subCommand)) { + // Its not an attribute name. Looks like its name of an + // operation. Try it. + result = + doBeanOperation(mbsc, instance, subCommand, operationInfo); + } else { + // Then it is an attribute OR its not an attribute name nor + // operation name and the below invocation will throw a + // AttributeNotFoundException. + result = doAttributeOperation(mbsc, instance, subCommand, + attributeInfo); + } + } else { + // Must be an operation. + if (!isFeatureInfo(operationInfo, subCommand) && + isFeatureInfo(attributeInfo, subCommand)) { + // Its not an operation name but looks like it could be an + // attribute name. Try it. + result = doAttributeOperation(mbsc, instance, subCommand, + attributeInfo); + } else { + // Its an operation name OR its neither operation nor attribute + // name and the below will throw a NoSuchMethodException. + result = + doBeanOperation(mbsc, instance, subCommand, operationInfo); + } + } + + // Look at the result. Is it of composite or tabular type? + // If so, convert to a String representation. + if (result instanceof CompositeData) { + result = recurseCompositeData(new StringBuffer("\n"), "", "", + (CompositeData)result); + } else if (result instanceof TabularData) { + result = recurseTabularData(new StringBuffer("\n"), "", "", + (TabularData)result); + } else if (result instanceof String []) { + String [] strs = (String [])result; + StringBuffer buffer = new StringBuffer("\n"); + for (int i = 0; i < strs.length; i++) { + buffer.append(strs[i]); + buffer.append("\n"); + } + result = buffer; + } else if (result instanceof AttributeList) { + AttributeList list = (AttributeList)result; + if (list.size() <= 0) { + result = null; + } else { + StringBuffer buffer = new StringBuffer("\n"); + for (Iterator ii = list.iterator(); ii.hasNext();) { + Attribute a = (Attribute)ii.next(); + buffer.append(a.getName()); + buffer.append(": "); + buffer.append(a.getValue()); + buffer.append("\n"); + } + result = buffer; + } + } + return result; + } + + protected boolean isFeatureInfo(MBeanFeatureInfo [] infos, String cmd) { + return getFeatureInfo(infos, cmd) != null; + } + + protected MBeanFeatureInfo getFeatureInfo(MBeanFeatureInfo [] infos, + String cmd) { + // Cmd may be carrying arguments. Don't count them in the compare. + int index = cmd.indexOf('='); + String name = (index > 0)? cmd.substring(0, index): cmd; + for (int i = 0; i < infos.length; i++) { + if (infos[i].getName().equals(name)) { + return infos[i]; + } + } + return null; + } + + protected StringBuffer recurseTabularData(StringBuffer buffer, + String indent, String name, TabularData data) { + addNameToBuffer(buffer, indent, name); + java.util.Collection c = data.values(); + for (Iterator i = c.iterator(); i.hasNext();) { + Object obj = i.next(); + if (obj instanceof CompositeData) { + recurseCompositeData(buffer, indent + " ", "", + (CompositeData)obj); + } else if (obj instanceof TabularData) { + recurseTabularData(buffer, indent, "", + (TabularData)obj); + } else { + buffer.append(obj); + } + } + return buffer; + } + + protected StringBuffer recurseCompositeData(StringBuffer buffer, + String indent, String name, CompositeData data) { + indent = addNameToBuffer(buffer, indent, name); + for (Iterator i = data.getCompositeType().keySet().iterator(); + i.hasNext();) { + String key = (String)i.next(); + Object o = data.get(key); + if (o instanceof CompositeData) { + recurseCompositeData(buffer, indent + " ", key, + (CompositeData)o); + } else if (o instanceof TabularData) { + recurseTabularData(buffer, indent, key, (TabularData)o); + } else { + buffer.append(indent); + buffer.append(key); + buffer.append(": "); + buffer.append(o); + buffer.append("\n"); + } + } + return buffer; + } + + protected String addNameToBuffer(StringBuffer buffer, String indent, + String name) { + if (name == null || name.length() == 0) { + return indent; + } + buffer.append(indent); + buffer.append(name); + buffer.append(":\n"); + // Move all that comes under this 'name' over by one space. + return indent + " "; + } + + /** + * Class that parses commandline arguments. + * Expected format is 'operationName=arg0,arg1,arg2...'. We are assuming no + * spaces nor comma's in argument values. + */ + protected class CommandParse { + private String cmd; + private String [] args; + + protected CommandParse(String command) throws ParseException { + parse(command); + } + + private void parse(String command) throws ParseException { + Matcher m = CMD_LINE_ARGS_PATTERN.matcher(command); + if (m == null || !m.matches()) { + throw new ParseException("Failed parse of " + command, 0); + } + + this.cmd = m.group(1); + if (m.group(2) != null && m.group(2).length() > 0) { + this.args = m.group(2).split(","); + } else { + this.args = null; + } + } + + protected String getCmd() { + return this.cmd; + } + + protected String [] getArgs() { + return this.args; + } + } + + protected Object doAttributeOperation(MBeanServerConnection mbsc, + ObjectInstance instance, String command, MBeanAttributeInfo [] infos) + throws Exception { + // Usually we get attributes. If an argument, then we're being asked + // to set attribute. + CommandParse parse = new CommandParse(command); + if (parse.getArgs() == null || parse.getArgs().length == 0) { + // Special-casing. If the subCommand is 'Attributes', then return + // list of all attributes. + if (command.equals("Attributes")) { + String [] names = new String[infos.length]; + for (int i = 0; i < infos.length; i++) { + names[i] = infos[i].getName(); + } + return mbsc.getAttributes(instance.getObjectName(), names); + } + return mbsc.getAttribute(instance.getObjectName(), parse.getCmd()); + } + if (parse.getArgs().length != 1) { + throw new IllegalArgumentException("One only argument setting " + + "attribute values: " + parse.getArgs()); + } + // Get first attribute of name 'cmd'. Assumption is no method + // overrides. Then, look at the attribute and use its type. + MBeanAttributeInfo info = + (MBeanAttributeInfo)getFeatureInfo(infos, parse.getCmd()); + java.lang.reflect.Constructor c = Class.forName( + info.getType()).getConstructor(new Class[] {String.class}); + Attribute a = new Attribute(parse.getCmd(), + c.newInstance(new Object[] {parse.getArgs()[0]})); + mbsc.setAttribute(instance.getObjectName(), a); + return null; + } + + protected Object doBeanOperation(MBeanServerConnection mbsc, + ObjectInstance instance, String command, MBeanOperationInfo [] infos) + throws Exception { + // Parse command line. + CommandParse parse = new CommandParse(command); + + // Get first method of name 'cmd'. Assumption is no method + // overrides. Then, look at the method and use its signature + // to make sure client sends over parameters of the correct type. + MBeanOperationInfo op = + (MBeanOperationInfo)getFeatureInfo(infos, parse.getCmd()); + Object result = null; + if (op == null) { + result = "Operation " + parse.getCmd() + " not found."; + } else { + MBeanParameterInfo [] paraminfos = op.getSignature(); + int paraminfosLength = (paraminfos == null)? 0: paraminfos.length; + int objsLength = (parse.getArgs() == null)? + 0: parse.getArgs().length; + if (paraminfosLength != objsLength) { + result = "Passed param count does not match signature count"; + } else { + String [] signature = new String[paraminfosLength]; + Object [] params = (paraminfosLength == 0)? null + : new Object[paraminfosLength]; + for (int i = 0; i < paraminfosLength; i++) { + MBeanParameterInfo paraminfo = paraminfos[i]; + java.lang.reflect.Constructor c = Class.forName( + paraminfo.getType()).getConstructor( + new Class[] {String.class}); + params[i] = + c.newInstance(new Object[] {parse.getArgs()[i]}); + signature[i] = paraminfo.getType(); + } + result = mbsc.invoke(instance.getObjectName(), parse.getCmd(), + params, signature); + } + } + return result; + } + + protected String listOptions(MBeanServerConnection mbsc, + ObjectInstance instance) + throws InstanceNotFoundException, IntrospectionException, + ReflectionException, IOException { + StringBuffer result = new StringBuffer(); + MBeanInfo info = mbsc.getMBeanInfo(instance.getObjectName()); + MBeanAttributeInfo [] attributes = info.getAttributes(); + if (attributes.length > 0) { + result.append("Attributes:"); + result.append("\n"); + for (int i = 0; i < attributes.length; i++) { + result.append(' ' + attributes[i].getName() + + ": " + attributes[i].getDescription() + + " (type=" + attributes[i].getType() + + ")"); + result.append("\n"); + } + } + MBeanOperationInfo [] operations = info.getOperations(); + if (operations.length > 0) { + result.append("Operations:"); + result.append("\n"); + for (int i = 0; i < operations.length; i++) { + MBeanParameterInfo [] params = operations[i].getSignature(); + StringBuffer paramsStrBuffer = new StringBuffer(); + if (params != null) { + for (int j = 0; j < params.length; j++) { + paramsStrBuffer.append("\n name="); + paramsStrBuffer.append(params[j].getName()); + paramsStrBuffer.append(" type="); + paramsStrBuffer.append(params[j].getType()); + paramsStrBuffer.append(" "); + paramsStrBuffer.append(params[j].getDescription()); + } + } + result.append(' ' + operations[i].getName() + + ": " + operations[i].getDescription() + + "\n Parameters " + params.length + + ", return type=" + operations[i].getReturnType() + + paramsStrBuffer.toString()); + result.append("\n"); + } + } + return result.toString(); + } + + /** + * Logger that writes entry on one line with less verbose date. + * Modelled on the OneLineSimpleLogger from Heritrix. + * + * @author stack + * @version $Revision$, $Date$ + */ + private class OneLineSimpleLogger extends SimpleFormatter { + /** + * Date instance. + * + * Keep around instance of date. + */ + private Date date = new Date(); + + /** + * Field position instance. + * + * Keep around this instance. + */ + private FieldPosition position = new FieldPosition(0); + + /** + * MessageFormatter for date. + */ + private SimpleDateFormat formatter = + new SimpleDateFormat("MM/dd/yyyy HH:mm:ss Z"); + + /** + * Persistent buffer in which we conjure the log. + */ + private StringBuffer buffer = new StringBuffer(); + + + public OneLineSimpleLogger() { + super(); + } + + public synchronized String format(LogRecord record) { + this.buffer.setLength(0); + this.date.setTime(record.getMillis()); + this.position.setBeginIndex(0); + this.formatter.format(this.date, this.buffer, this.position); + this.buffer.append(' '); + if (record.getSourceClassName() != null) { + this.buffer.append(record.getSourceClassName()); + } else { + this.buffer.append(record.getLoggerName()); + } + this.buffer.append(' '); + this.buffer.append(formatMessage(record)); + this.buffer.append(System.getProperty("line.separator")); + if (record.getThrown() != null) { + try { + StringWriter writer = new StringWriter(); + PrintWriter printer = new PrintWriter(writer); + record.getThrown().printStackTrace(printer); + writer.close(); + this.buffer.append(writer.toString()); + } catch (Exception e) { + this.buffer.append("Failed to get stack trace: " + + e.getMessage()); + } + } + return this.buffer.toString(); + } + } +} \ No newline at end of file diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 4804c0f796..3b1b84ef77 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -380,7 +380,7 @@ object Dependencies { val fileMailbox = Seq(Test.commonsIo, Test.scalatest, Test.junit) - val kernel = Seq(jmxClient, Test.scalatest, Test.junit) + val kernel = Seq(Test.scalatest, Test.junit) val camel = Seq(camelCore, Test.scalatest, Test.junit, Test.mockito) @@ -408,7 +408,6 @@ object Dependency { // Compile val camelCore = "org.apache.camel" % "camel-core" % V.Camel // ApacheV2 - val jmxClient = "cmdline-jmxclient" % "cmdline-jmxclient" % "0.10.3" // LGPL val netty = "io.netty" % "netty" % V.Netty // ApacheV2 val protobuf = "com.google.protobuf" % "protobuf-java" % V.Protobuf // New BSD val scalaStm = "org.scala-tools" % "scala-stm_2.9.1" % V.ScalaStm // Modified BSD (Scala) From 66600f9c52dfefb577fd1ef4bd89a0fde685d724 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 18 May 2012 16:51:12 +0200 Subject: [PATCH 063/538] Moved the dispatcher/router verification to RoutedActorRef, also checks dispatcher only through the config so we don't trigger creation of dispatcher. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 12 ------------ .../src/main/scala/akka/dispatch/Dispatchers.scala | 6 +++--- akka-actor/src/main/scala/akka/routing/Routing.scala | 10 ++++++++-- 3 files changed, 11 insertions(+), 17 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 9cc993062f..8c68ba3315 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -357,19 +357,7 @@ private[akka] class ActorCell( case _ ⇒ true } - private def verifyActorConfiguration(system: ActorSystem, props: Props, actorName: String): Unit = { - import akka.config.ConfigurationException - import akka.routing.NoRouter - // verify that a BalancingDispatcher is not used with a Router - if (system.dispatchers.lookup(props.dispatcher).isInstanceOf[BalancingDispatcher] && props.routerConfig != NoRouter) - throw new ConfigurationException( - "Configuration for actor [" + actorName + - "] is invalid - you can not use a 'BalancingDispatcher' together with any type of 'Router'") - } - private def _actorOf(props: Props, name: String): ActorRef = { - verifyActorConfiguration(systemImpl, props, name) - if (system.settings.SerializeAllCreators && !props.creator.isInstanceOf[NoSerializationVerificationNeeded]) { val ser = SerializationExtension(system) ser.serialize(props.creator) match { diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 93d44e007d..a81a8e6c2b 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -120,9 +120,9 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc * Throws: IllegalArgumentException if the value of "type" is not valid * IllegalArgumentException if it cannot create the MessageDispatcherConfigurator */ - private[akka] def from(cfg: Config): MessageDispatcher = { - configuratorFrom(cfg).dispatcher() - } + private[akka] def from(cfg: Config): MessageDispatcher = configuratorFrom(cfg).dispatcher() + + private[akka] def isBalancingDispatcher(id: String): Boolean = settings.config.hasPath(id) && config(id).getString("type") == "BalancingDispatcher" /* * Creates a MessageDispatcherConfigurator from a Config. diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index fdf14a5b96..58ecbfcdc5 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -31,11 +31,17 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup _supervisor, _path) { + // verify that a BalancingDispatcher is not used with a Router + if (_system.dispatchers.isBalancingDispatcher(_props.dispatcher) && _props.routerConfig != NoRouter) + throw new ConfigurationException( + "Configuration for actor [" + _path.toString + + "] is invalid - you can not use a 'BalancingDispatcher' together with any type of 'Router'") + /* * CAUTION: RoutedActorRef is PROBLEMATIC * ====================================== - * - * We are constructing/assembling the children outside of the scope of the + * + * We are constructing/assembling the children outside of the scope of the * Router actor, inserting them in its childrenRef list, which is not at all * synchronized. This is done exactly once at start-up, all other accesses * are done from the Router actor. This means that the only thing which is From e825a8ac4f0dd84297fb7d93f01f1f755e573631 Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 18 May 2012 18:44:53 +0200 Subject: [PATCH 064/538] switch MultiNodeSpec to use RoleName type when referring to participants - also add MultiNodeConfig base class for conveniently declaring a test setup including roles and (node specific) config settings --- .../akka/remote/testconductor/Conductor.scala | 32 ++-- .../akka/remote/testconductor/DataTypes.scala | 22 +-- .../akka/remote/testconductor/Player.scala | 10 +- .../testconductor/TestConductorSpec.scala | 41 +++-- .../remote/testconductor/BarrierSpec.scala | 164 +++++++++--------- .../remote/testconductor/ControllerSpec.scala | 9 +- .../akka/remote/testkit/MultiNodeSpec.scala | 87 ++++++---- 7 files changed, 197 insertions(+), 168 deletions(-) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index d4fa3152e6..6c26fcaae2 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -71,7 +71,7 @@ trait Conductor { this: TestConductorExt ⇒ * @param participants gives the number of participants which shall connect * before any of their startClient() operations complete. */ - def startController(participants: Int, name: String, controllerPort: InetSocketAddress): Future[InetSocketAddress] = { + def startController(participants: Int, name: RoleName, controllerPort: InetSocketAddress): Future[InetSocketAddress] = { if (_controller ne null) throw new RuntimeException("TestConductorServer was already started") _controller = system.actorOf(Props(new Controller(participants, controllerPort)), "controller") import Settings.BarrierTimeout @@ -106,7 +106,7 @@ trait Conductor { this: TestConductorExt ⇒ * @param direction can be either `Direction.Send`, `Direction.Receive` or `Direction.Both` * @param rateMBit is the maximum data rate in MBit */ - def throttle(node: String, target: String, direction: Direction, rateMBit: Double): Future[Done] = { + def throttle(node: RoleName, target: RoleName, direction: Direction, rateMBit: Double): Future[Done] = { import Settings.QueryTimeout controller ? Throttle(node, target, direction, rateMBit.toFloat) mapTo } @@ -121,7 +121,7 @@ trait Conductor { this: TestConductorExt ⇒ * @param target is the symbolic name of the other node to which connectivity shall be impeded * @param direction can be either `Direction.Send`, `Direction.Receive` or `Direction.Both` */ - def blackhole(node: String, target: String, direction: Direction): Future[Done] = { + def blackhole(node: RoleName, target: RoleName, direction: Direction): Future[Done] = { import Settings.QueryTimeout controller ? Throttle(node, target, direction, 0f) mapTo } @@ -134,7 +134,7 @@ trait Conductor { this: TestConductorExt ⇒ * @param node is the symbolic name of the node which is to be affected * @param target is the symbolic name of the other node to which connectivity shall be impeded */ - def disconnect(node: String, target: String): Future[Done] = { + def disconnect(node: RoleName, target: RoleName): Future[Done] = { import Settings.QueryTimeout controller ? Disconnect(node, target, false) mapTo } @@ -147,7 +147,7 @@ trait Conductor { this: TestConductorExt ⇒ * @param node is the symbolic name of the node which is to be affected * @param target is the symbolic name of the other node to which connectivity shall be impeded */ - def abort(node: String, target: String): Future[Done] = { + def abort(node: RoleName, target: RoleName): Future[Done] = { import Settings.QueryTimeout controller ? Disconnect(node, target, true) mapTo } @@ -159,7 +159,7 @@ trait Conductor { this: TestConductorExt ⇒ * @param node is the symbolic name of the node which is to be affected * @param exitValue is the return code which shall be given to System.exit */ - def shutdown(node: String, exitValue: Int): Future[Done] = { + def shutdown(node: RoleName, exitValue: Int): Future[Done] = { import Settings.QueryTimeout controller ? Terminate(node, exitValue) mapTo } @@ -169,7 +169,7 @@ trait Conductor { this: TestConductorExt ⇒ * * @param node is the symbolic name of the node which is to be affected */ - def kill(node: String): Future[Done] = { + def kill(node: RoleName): Future[Done] = { import Settings.QueryTimeout controller ? Terminate(node, -1) mapTo } @@ -177,7 +177,7 @@ trait Conductor { this: TestConductorExt ⇒ /** * Obtain the list of remote host names currently registered. */ - def getNodes: Future[Iterable[String]] = { + def getNodes: Future[Iterable[RoleName]] = { import Settings.QueryTimeout controller ? GetNodes mapTo } @@ -190,7 +190,7 @@ trait Conductor { this: TestConductorExt ⇒ * * @param node is the symbolic name of the node which is to be removed */ - def removeNode(node: String): Future[Done] = { + def removeNode(node: RoleName): Future[Done] = { import Settings.QueryTimeout controller ? Remove(node) mapTo } @@ -274,7 +274,7 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi when(Initial, stateTimeout = 10 seconds) { case Event(Hello(name, addr), _) ⇒ - controller ! NodeInfo(name, addr, self) + controller ! NodeInfo(RoleName(name), addr, self) goto(Ready) case Event(x: NetworkOp, _) ⇒ log.warning("client {} sent no Hello in first message (instead {}), disconnecting", getAddrString(channel), x) @@ -318,11 +318,11 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi } object Controller { - case class ClientDisconnected(name: String) + case class ClientDisconnected(name: RoleName) case object GetNodes case object GetSockAddr - case class NodeInfo(name: String, addr: Address, fsm: ActorRef) + case class NodeInfo(name: RoleName, addr: Address, fsm: ActorRef) } /** @@ -359,10 +359,10 @@ class Controller(private var initialParticipants: Int, controllerPort: InetSocke } val barrier = context.actorOf(Props[BarrierCoordinator], "barriers") - var nodes = Map[String, NodeInfo]() + var nodes = Map[RoleName, NodeInfo]() // map keeping unanswered queries for node addresses (enqueued upon GetAddress, serviced upon NodeInfo) - var addrInterest = Map[String, Set[ActorRef]]() + var addrInterest = Map[RoleName, Set[ActorRef]]() override def receive = LoggingReceive { case c @ NodeInfo(name, addr, fsm) ⇒ @@ -423,7 +423,7 @@ object BarrierCoordinator { case object Idle extends State case object Waiting extends State - case class RemoveClient(name: String) + case class RemoveClient(name: RoleName) case class Data(clients: Set[Controller.NodeInfo], barrier: String, arrived: List[ActorRef]) @@ -435,7 +435,7 @@ object BarrierCoordinator { case class DuplicateNode(data: Data, node: Controller.NodeInfo) extends RuntimeException with NoStackTrace with Printer case class WrongBarrier(barrier: String, client: ActorRef, data: Data) extends RuntimeException(barrier) with NoStackTrace with Printer case class BarrierEmpty(data: Data, msg: String) extends RuntimeException(msg) with NoStackTrace with Printer - case class ClientLost(data: Data, client: String) extends RuntimeException with NoStackTrace with Printer + case class ClientLost(data: Data, client: RoleName) extends RuntimeException with NoStackTrace with Printer } /** diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala index 0273055469..2bb7d50c37 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -11,6 +11,8 @@ import com.google.protobuf.Message import akka.actor.Address import org.jboss.netty.handler.codec.oneone.OneToOneDecoder +case class RoleName(name: String) + case class ToClient(msg: ClientOp with NetworkOp) case class ToServer(msg: ServerOp with NetworkOp) @@ -29,24 +31,24 @@ case class Hello(name: String, addr: Address) extends NetworkOp case class EnterBarrier(name: String) extends ServerOp with NetworkOp case class BarrierResult(name: String, success: Boolean) extends UnconfirmedClientOp with NetworkOp -case class Throttle(node: String, target: String, direction: Direction, rateMBit: Float) extends CommandOp +case class Throttle(node: RoleName, target: RoleName, direction: Direction, rateMBit: Float) extends CommandOp case class ThrottleMsg(target: Address, direction: Direction, rateMBit: Float) extends ConfirmedClientOp with NetworkOp -case class Disconnect(node: String, target: String, abort: Boolean) extends CommandOp +case class Disconnect(node: RoleName, target: RoleName, abort: Boolean) extends CommandOp case class DisconnectMsg(target: Address, abort: Boolean) extends ConfirmedClientOp with NetworkOp -case class Terminate(node: String, exitValueOrKill: Int) extends CommandOp +case class Terminate(node: RoleName, exitValueOrKill: Int) extends CommandOp case class TerminateMsg(exitValue: Int) extends ConfirmedClientOp with NetworkOp -case class GetAddress(node: String) extends ServerOp with NetworkOp -case class AddressReply(node: String, addr: Address) extends UnconfirmedClientOp with NetworkOp +case class GetAddress(node: RoleName) extends ServerOp with NetworkOp +case class AddressReply(node: RoleName, addr: Address) extends UnconfirmedClientOp with NetworkOp abstract class Done extends ServerOp with UnconfirmedClientOp with NetworkOp case object Done extends Done { def getInstance: Done = this } -case class Remove(node: String) extends CommandOp +case class Remove(node: RoleName) extends CommandOp class MsgEncoder extends OneToOneEncoder { def encode(ctx: ChannelHandlerContext, ch: Channel, msg: AnyRef): AnyRef = msg match { @@ -68,9 +70,9 @@ class MsgEncoder extends OneToOneEncoder { case TerminateMsg(exitValue) ⇒ w.setFailure(TCP.InjectFailure.newBuilder.setFailure(TCP.FailType.Shutdown).setExitValue(exitValue)) case GetAddress(node) ⇒ - w.setAddr(TCP.AddressRequest.newBuilder.setNode(node)) + w.setAddr(TCP.AddressRequest.newBuilder.setNode(node.name)) case AddressReply(node, addr) ⇒ - w.setAddr(TCP.AddressRequest.newBuilder.setNode(node).setAddr(addr)) + w.setAddr(TCP.AddressRequest.newBuilder.setNode(node.name).setAddr(addr)) case _: Done ⇒ w.setDone("") } @@ -100,8 +102,8 @@ class MsgDecoder extends OneToOneDecoder { } } else if (w.hasAddr) { val a = w.getAddr - if (a.hasAddr) AddressReply(a.getNode, a.getAddr) - else GetAddress(a.getNode) + if (a.hasAddr) AddressReply(RoleName(a.getNode), a.getAddr) + else GetAddress(RoleName(a.getNode)) } else if (w.hasDone) { Done } else { diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala index 27a2487364..10434007e1 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala @@ -50,7 +50,7 @@ trait Player { this: TestConductorExt ⇒ * this is a first barrier in itself). The number of expected participants is * set in [[akka.remote.testconductor.Conductor]]`.startController()`. */ - def startClient(name: String, controllerAddr: InetSocketAddress): Future[Done] = { + def startClient(name: RoleName, controllerAddr: InetSocketAddress): Future[Done] = { import ClientFSM._ import akka.actor.FSM._ import Settings.BarrierTimeout @@ -88,7 +88,7 @@ trait Player { this: TestConductorExt ⇒ /** * Query remote transport address of named node. */ - def getAddressFor(name: String): Future[Address] = { + def getAddressFor(name: RoleName): Future[Address] = { import Settings.BarrierTimeout client ? ToServer(GetAddress(name)) mapTo } @@ -117,7 +117,7 @@ object ClientFSM { * coordinator and react to the [[akka.remote.testconductor.Conductor]]’s * requests for failure injection. */ -class ClientFSM(name: String, controllerAddr: InetSocketAddress) extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { +class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { import ClientFSM._ val settings = TestConductor().Settings @@ -131,7 +131,7 @@ class ClientFSM(name: String, controllerAddr: InetSocketAddress) extends Actor w case Event(msg: ClientOp, _) ⇒ stay replying Status.Failure(new IllegalStateException("not connected yet")) case Event(Connected(channel), _) ⇒ - channel.write(Hello(name, TestConductor().address)) + channel.write(Hello(name.name, TestConductor().address)) goto(AwaitDone) using Data(Some(channel), None) case Event(_: ConnectionFailure, _) ⇒ goto(Failed) @@ -165,7 +165,7 @@ class ClientFSM(name: String, controllerAddr: InetSocketAddress) extends Actor w channel.write(msg) val token = msg match { case EnterBarrier(barrier) ⇒ barrier - case GetAddress(node) ⇒ node + case GetAddress(node) ⇒ node.name } stay using d.copy(runningOp = Some(token, sender)) case Event(ToServer(op), Data(channel, Some((token, _)))) ⇒ diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 7f3763fcc1..087aac55c7 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -13,9 +13,10 @@ import akka.testkit.ImplicitSender import java.net.InetSocketAddress import java.net.InetAddress import akka.remote.testkit.MultiNodeSpec +import akka.remote.testkit.MultiNodeConfig -object TestConductorMultiJvmSpec { - def commonConfig = ConfigFactory.parseString(""" +object TestConductorMultiJvmSpec extends MultiNodeConfig { + commonConfig(ConfigFactory.parseString(""" akka.loglevel = DEBUG akka.remote { log-received-messages = on @@ -25,18 +26,22 @@ object TestConductorMultiJvmSpec { receive = on fsm = on } - """) + """)) + + val master = role("master") + val slave = role("slave") } class TestConductorMultiJvmNode1 extends TestConductorSpec class TestConductorMultiJvmNode2 extends TestConductorSpec -class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec.commonConfig) with ImplicitSender { +class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec) with ImplicitSender { + + import TestConductorMultiJvmSpec._ def initialParticipants = 2 - lazy val roles = Seq("master", "slave") - runOn("master") { + runOn(master) { system.actorOf(Props(new Actor { def receive = { case x ⇒ testActor ! x; sender ! x @@ -44,7 +49,7 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec.commonCo }), "echo") } - val echo = system.actorFor(node("master") / "user" / "echo") + val echo = system.actorFor(node(master) / "user" / "echo") "A TestConductor" must { @@ -54,20 +59,20 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec.commonCo "support throttling of network connections" in { - runOn("slave") { + runOn(slave) { // start remote network connection so that it can be throttled echo ! "start" } expectMsg("start") - runOn("master") { - testConductor.throttle("slave", "master", Direction.Send, rateMBit = 0.01).await + runOn(master) { + testConductor.throttle(slave, master, Direction.Send, rateMBit = 0.01).await } testConductor.enter("throttled_send") - runOn("slave") { + runOn(slave) { for (i ← 0 to 9) echo ! i } @@ -78,19 +83,19 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec.commonCo testConductor.enter("throttled_send2") - runOn("master") { - testConductor.throttle("slave", "master", Direction.Send, -1).await - testConductor.throttle("slave", "master", Direction.Receive, rateMBit = 0.01).await + runOn(master) { + testConductor.throttle(slave, master, Direction.Send, -1).await + testConductor.throttle(slave, master, Direction.Receive, rateMBit = 0.01).await } testConductor.enter("throttled_recv") - runOn("slave") { + runOn(slave) { for (i ← 10 to 19) echo ! i } val (min, max) = - ifNode("master") { + ifNode(master) { (0 seconds, 500 millis) } { (0.6 seconds, 2 seconds) @@ -103,8 +108,8 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec.commonCo testConductor.enter("throttled_recv2") - runOn("master") { - testConductor.throttle("slave", "master", Direction.Receive, -1).await + runOn(master) { + testConductor.throttle(slave, master, Direction.Receive, -1).await } } diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala index aa14b93f9d..e0fd5dfb97 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala @@ -36,6 +36,10 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with import Controller._ import BarrierCoordinator._ + val A = RoleName("a") + val B = RoleName("b") + val C = RoleName("c") + override def afterEach { system.eventStream.setLogLevel(Logging.WarningLevel) } @@ -44,25 +48,25 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "register clients and remove them" in { val b = getBarrier() - b ! NodeInfo("a", AddressFromURIString("akka://sys"), system.deadLetters) - b ! RemoveClient("b") - b ! RemoveClient("a") + b ! NodeInfo(A, AddressFromURIString("akka://sys"), system.deadLetters) + b ! RemoveClient(B) + b ! RemoveClient(A) EventFilter[BarrierEmpty](occurrences = 1) intercept { - b ! RemoveClient("a") + b ! RemoveClient(A) } expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "no client to remove"))) } "register clients and disconnect them" in { val b = getBarrier() - b ! NodeInfo("a", AddressFromURIString("akka://sys"), system.deadLetters) - b ! ClientDisconnected("b") + b ! NodeInfo(A, AddressFromURIString("akka://sys"), system.deadLetters) + b ! ClientDisconnected(B) EventFilter[ClientLost](occurrences = 1) intercept { - b ! ClientDisconnected("a") + b ! ClientDisconnected(A) } - expectMsg(Failed(b, ClientLost(Data(Set(), "", Nil), "a"))) + expectMsg(Failed(b, ClientLost(Data(Set(), "", Nil), A))) EventFilter[BarrierEmpty](occurrences = 1) intercept { - b ! ClientDisconnected("a") + b ! ClientDisconnected(A) } expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "no client to disconnect"))) } @@ -76,8 +80,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "enter barrier" in { val barrier = getBarrier() val a, b = TestProbe() - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.send(barrier, EnterBarrier("bar")) noMsg(a, b) within(1 second) { @@ -90,10 +94,10 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "enter barrier with joining node" in { val barrier = getBarrier() val a, b, c = TestProbe() - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.send(barrier, EnterBarrier("bar")) - barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref) + barrier ! NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) b.send(barrier, EnterBarrier("bar")) noMsg(a, b, c) within(1 second) { @@ -107,29 +111,29 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "enter barrier with leaving node" in { val barrier = getBarrier() val a, b, c = TestProbe() - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) - barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) a.send(barrier, EnterBarrier("bar")) b.send(barrier, EnterBarrier("bar")) - barrier ! RemoveClient("a") - barrier ! ClientDisconnected("a") + barrier ! RemoveClient(A) + barrier ! ClientDisconnected(A) noMsg(a, b, c) b.within(1 second) { - barrier ! RemoveClient("c") + barrier ! RemoveClient(C) b.expectMsg(ToClient(BarrierResult("bar", true))) } - barrier ! ClientDisconnected("c") + barrier ! ClientDisconnected(C) expectNoMsg(1 second) } "leave barrier when last “arrived” is removed" in { val barrier = getBarrier() val a, b = TestProbe() - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.send(barrier, EnterBarrier("bar")) - barrier ! RemoveClient("a") + barrier ! RemoveClient(A) b.send(barrier, EnterBarrier("foo")) b.expectMsg(ToClient(BarrierResult("foo", true))) } @@ -137,38 +141,38 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail barrier with disconnecing node" in { val barrier = getBarrier() val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! nodeA - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.send(barrier, EnterBarrier("bar")) EventFilter[ClientLost](occurrences = 1) intercept { - barrier ! ClientDisconnected("b") + barrier ! ClientDisconnected(B) } - expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA), "bar", a.ref :: Nil), "b"))) + expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA), "bar", a.ref :: Nil), B))) } "fail barrier with disconnecing node who already arrived" in { val barrier = getBarrier() val a, b, c = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - val nodeC = NodeInfo("c", AddressFromURIString("akka://sys"), c.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeC = NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) barrier ! nodeA - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeC a.send(barrier, EnterBarrier("bar")) b.send(barrier, EnterBarrier("bar")) EventFilter[ClientLost](occurrences = 1) intercept { - barrier ! ClientDisconnected("b") + barrier ! ClientDisconnected(B) } - expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA, nodeC), "bar", a.ref :: Nil), "b"))) + expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA, nodeC), "bar", a.ref :: Nil), B))) } "fail when entering wrong barrier" in { val barrier = getBarrier() val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! nodeA - val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + val nodeB = NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeB a.send(barrier, EnterBarrier("bar")) EventFilter[WrongBarrier](occurrences = 1) intercept { @@ -181,10 +185,10 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val barrier = getBarrier() val a = TestProbe() EventFilter[BarrierEmpty](occurrences = 1) intercept { - barrier ! RemoveClient("a") + barrier ! RemoveClient(A) } expectMsg(Failed(barrier, BarrierEmpty(Data(Set(), "", Nil), "no client to remove"))) - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) a.send(barrier, EnterBarrier("right")) a.expectMsg(ToClient(BarrierResult("right", false))) } @@ -192,8 +196,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail after barrier timeout" in { val barrier = getBarrier() val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeB = NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeA barrier ! nodeB a.send(barrier, EnterBarrier("right")) @@ -205,8 +209,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail if a node registers twice" in { val barrier = getBarrier() val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - val nodeB = NodeInfo("a", AddressFromURIString("akka://sys"), b.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeB = NodeInfo(A, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeA EventFilter[DuplicateNode](occurrences = 1) intercept { barrier ! nodeB @@ -224,25 +228,25 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "register clients and remove them" in { val b = getController(1) - b ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor) + b ! NodeInfo(A, AddressFromURIString("akka://sys"), testActor) expectMsg(ToClient(Done)) - b ! Remove("b") - b ! Remove("a") + b ! Remove(B) + b ! Remove(A) EventFilter[BarrierEmpty](occurrences = 1) intercept { - b ! Remove("a") + b ! Remove(A) } } "register clients and disconnect them" in { val b = getController(1) - b ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor) + b ! NodeInfo(A, AddressFromURIString("akka://sys"), testActor) expectMsg(ToClient(Done)) - b ! ClientDisconnected("b") + b ! ClientDisconnected(B) EventFilter[ClientLost](occurrences = 1) intercept { - b ! ClientDisconnected("a") + b ! ClientDisconnected(A) } EventFilter[BarrierEmpty](occurrences = 1) intercept { - b ! ClientDisconnected("a") + b ! ClientDisconnected(A) } } @@ -255,8 +259,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "enter barrier" in { val barrier = getController(2) val a, b = TestProbe() - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) @@ -271,12 +275,12 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "enter barrier with joining node" in { val barrier = getController(2) val a, b, c = TestProbe() - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) - barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref) + barrier ! NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) c.expectMsg(ToClient(Done)) b.send(barrier, EnterBarrier("bar")) noMsg(a, b, c) @@ -291,34 +295,34 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "enter barrier with leaving node" in { val barrier = getController(3) val a, b, c = TestProbe() - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) - barrier ! NodeInfo("c", AddressFromURIString("akka://sys"), c.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) c.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) b.send(barrier, EnterBarrier("bar")) - barrier ! Remove("a") - barrier ! ClientDisconnected("a") + barrier ! Remove(A) + barrier ! ClientDisconnected(A) noMsg(a, b, c) b.within(1 second) { - barrier ! Remove("c") + barrier ! Remove(C) b.expectMsg(ToClient(BarrierResult("bar", true))) } - barrier ! ClientDisconnected("c") + barrier ! ClientDisconnected(C) expectNoMsg(1 second) } "leave barrier when last “arrived” is removed" in { val barrier = getController(2) val a, b = TestProbe() - barrier ! NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) - barrier ! Remove("a") + barrier ! Remove(A) b.send(barrier, EnterBarrier("foo")) b.expectMsg(ToClient(BarrierResult("foo", true))) } @@ -326,16 +330,16 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail barrier with disconnecing node" in { val barrier = getController(2) val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! nodeA - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) - barrier ! ClientDisconnected("unknown") + barrier ! ClientDisconnected(RoleName("unknown")) noMsg(a) EventFilter[ClientLost](occurrences = 1) intercept { - barrier ! ClientDisconnected("b") + barrier ! ClientDisconnected(B) } a.expectMsg(ToClient(BarrierResult("bar", false))) } @@ -343,10 +347,10 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail barrier with disconnecing node who already arrived" in { val barrier = getController(3) val a, b, c = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - val nodeC = NodeInfo("c", AddressFromURIString("akka://sys"), c.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeC = NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) barrier ! nodeA - barrier ! NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeC a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) @@ -354,7 +358,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with a.send(barrier, EnterBarrier("bar")) b.send(barrier, EnterBarrier("bar")) EventFilter[ClientLost](occurrences = 1) intercept { - barrier ! ClientDisconnected("b") + barrier ! ClientDisconnected(B) } a.expectMsg(ToClient(BarrierResult("bar", false))) } @@ -362,9 +366,9 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail when entering wrong barrier" in { val barrier = getController(2) val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! nodeA - val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + val nodeB = NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeB a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) @@ -379,8 +383,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "not really fail after barrier timeout" in { val barrier = getController(2) val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - val nodeB = NodeInfo("b", AddressFromURIString("akka://sys"), b.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeB = NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeA barrier ! nodeB a.expectMsg(ToClient(Done)) @@ -397,8 +401,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail if a node registers twice" in { val controller = getController(2) val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - val nodeB = NodeInfo("a", AddressFromURIString("akka://sys"), b.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeB = NodeInfo(A, AddressFromURIString("akka://sys"), b.ref) controller ! nodeA EventFilter[DuplicateNode](occurrences = 1) intercept { controller ! nodeB @@ -410,8 +414,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail subsequent barriers if a node registers twice" in { val controller = getController(1) val a, b = TestProbe() - val nodeA = NodeInfo("a", AddressFromURIString("akka://sys"), a.ref) - val nodeB = NodeInfo("a", AddressFromURIString("akka://sys"), b.ref) + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeB = NodeInfo(A, AddressFromURIString("akka://sys"), b.ref) controller ! nodeA a.expectMsg(ToClient(Done)) EventFilter[DuplicateNode](occurrences = 1) intercept { diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala index c4e0ca6cd0..13140adfb5 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala @@ -23,16 +23,19 @@ object ControllerSpec { class ControllerSpec extends AkkaSpec(ControllerSpec.config) with ImplicitSender { + val A = RoleName("a") + val B = RoleName("b") + "A Controller" must { "publish its nodes" in { val c = system.actorOf(Props(new Controller(1, new InetSocketAddress(InetAddress.getLocalHost, 0)))) - c ! NodeInfo("a", AddressFromURIString("akka://sys"), testActor) + c ! NodeInfo(A, AddressFromURIString("akka://sys"), testActor) expectMsg(ToClient(Done)) - c ! NodeInfo("b", AddressFromURIString("akka://sys"), testActor) + c ! NodeInfo(B, AddressFromURIString("akka://sys"), testActor) expectMsg(ToClient(Done)) c ! Controller.GetNodes - expectMsgType[Iterable[String]].toSet must be(Set("a", "b")) + expectMsgType[Iterable[RoleName]].toSet must be(Set(A, B)) } } diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 7acde4eac9..92e65247fb 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -16,6 +16,50 @@ import akka.dispatch.Await import akka.util.Duration import akka.actor.ActorPath import akka.actor.RootActorPath +import akka.remote.testconductor.RoleName + +/** + * Configure the role names and participants of the test, including configuration settings. + */ +abstract class MultiNodeConfig { + + private var _commonConf: Option[Config] = None + private var _nodeConf = Map[RoleName, Config]() + private var _roles = Seq[RoleName]() + + /** + * Register a common base config for all test participants, if so desired. + */ + def commonConfig(config: Config): Unit = _commonConf = Some(config) + + /** + * Register a config override for a specific participant. + */ + def nodeConfig(role: RoleName, config: Config): Unit = _nodeConf += role -> config + + /** + * Construct a RoleName and return it, to be used as an identifier in the + * test. Registration of a role name creates a role which then needs to be + * filled. + */ + def role(name: String): RoleName = { + if (_roles exists (_.name == name)) throw new IllegalArgumentException("non-unique role name " + name) + val r = RoleName(name) + _roles :+= r + r + } + + private[testkit] lazy val mySelf: RoleName = { + require(_roles.size > MultiNodeSpec.selfIndex, "not enough roles declared for this test") + _roles(MultiNodeSpec.selfIndex) + } + + private[testkit] def config: Config = { + val configs = (_nodeConf get mySelf).toList ::: _commonConf.toList ::: MultiNodeSpec.nodeConfig :: AkkaSpec.testConf :: Nil + configs reduce (_ withFallback _) + } + +} object MultiNodeSpec { @@ -52,18 +96,11 @@ object MultiNodeSpec { } -abstract class MultiNodeSpec(_system: ActorSystem) extends AkkaSpec(_system) { +abstract class MultiNodeSpec(val mySelf: RoleName, _system: ActorSystem) extends AkkaSpec(_system) { import MultiNodeSpec._ - def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName, - MultiNodeSpec.nodeConfig.withFallback(config.withFallback(AkkaSpec.testConf)))) - - def this(s: String) = this(ConfigFactory.parseString(s)) - - def this(configMap: Map[String, _]) = this(AkkaSpec.mapToConfig(configMap)) - - def this() = this(AkkaSpec.testConf) + def this(config: MultiNodeConfig) = this(config.mySelf, ActorSystem(AkkaSpec.getCallerName, config.config)) /* * Test Class Interface @@ -89,39 +126,17 @@ abstract class MultiNodeSpec(_system: ActorSystem) extends AkkaSpec(_system) { */ val testConductor: TestConductorExt = TestConductor(system) - /** - * TO BE DEFINED BY USER: The test class must define a set of role names to - * be used throughout the run, e.g. in naming nodes in failure injections. - * These will be mapped to the available nodes such that the first name will - * be the Controller, i.e. on this one you can do failure injection. - * - * Should be a lazy val due to initialization order: - * {{{ - * lazy val roles = Seq("master", "slave") - * }}} - */ - def roles: Seq[String] - - require(roles.size >= initialParticipants, "not enough roles for initialParticipants") - require(roles.size <= nodeNames.size, "not enough nodes for number of roles") - require(roles.distinct.size == roles.size, "role names must be distinct") - - val mySelf = { - if (selfIndex >= roles.size) System.exit(0) - roles(selfIndex) - } - /** * Execute the given block of code only on the given nodes (names according * to the `roleMap`). */ - def runOn(nodes: String*)(thunk: ⇒ Unit): Unit = { + def runOn(nodes: RoleName*)(thunk: ⇒ Unit): Unit = { if (nodes exists (_ == mySelf)) { thunk } } - def ifNode[T](nodes: String*)(yes: ⇒ T)(no: ⇒ T): T = { + def ifNode[T](nodes: RoleName*)(yes: ⇒ T)(no: ⇒ T): T = { if (nodes exists (_ == mySelf)) yes else no } @@ -133,7 +148,7 @@ abstract class MultiNodeSpec(_system: ActorSystem) extends AkkaSpec(_system) { * val serviceA = system.actorFor(node("master") / "user" / "serviceA") * }}} */ - def node(name: String): ActorPath = RootActorPath(testConductor.getAddressFor(name).await) + def node(role: RoleName): ActorPath = RootActorPath(testConductor.getAddressFor(role).await) /** * Enrich `.await()` onto all Awaitables, using BarrierTimeout. @@ -149,9 +164,9 @@ abstract class MultiNodeSpec(_system: ActorSystem) extends AkkaSpec(_system) { private val controllerAddr = new InetSocketAddress(nodeNames(0), 4711) if (selfIndex == 0) { - testConductor.startController(initialParticipants, roles(0), controllerAddr).await + testConductor.startController(initialParticipants, mySelf, controllerAddr).await } else { - testConductor.startClient(roles(selfIndex), controllerAddr).await + testConductor.startClient(mySelf, controllerAddr).await } } \ No newline at end of file From 94e71b7a18f05eb85bbe0a30f3364d999c3575b6 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 18 May 2012 19:25:43 +0200 Subject: [PATCH 065/538] Huge refactor + preparing for binary compatibility, last stretch for akka-actor.jar... --- .../src/main/scala/akka/AkkaException.scala | 37 ++- .../src/main/scala/akka/experimental.scala | 19 -- .../src/main/scala/akka/routing/Routing.scala | 127 +++----- .../akka/serialization/Serialization.scala | 12 +- .../scala/akka/serialization/Serializer.scala | 1 - .../akka/util/BoundedBlockingQueue.scala | 8 +- .../src/main/scala/akka/util/ByteString.scala | 48 ++- .../util/ClassLoaderObjectInputStream.scala | 7 + .../src/main/scala/akka/util/Convert.scala | 2 +- .../src/main/scala/akka/util/Crypt.scala | 4 +- .../src/main/scala/akka/util/Duration.scala | 276 ++++++------------ .../src/main/scala/akka/util/Helpers.scala | 17 +- .../src/main/scala/akka/util/Index.scala | 4 +- .../src/main/scala/akka/util/LockUtil.scala | 20 +- .../src/main/scala/akka/util/Reflect.scala | 4 +- .../src/main/scala/akka/util/Unsafe.java | 3 + .../main/scala/akka/util/cps/package.scala | 1 + .../scala/akka/util/duration/package.scala | 36 +-- 18 files changed, 218 insertions(+), 408 deletions(-) delete mode 100644 akka-actor/src/main/scala/akka/experimental.scala diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index 79d78b9d39..e5b0cb6c80 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -5,19 +5,26 @@ package akka object AkkaException { - + //FIXME DOC def toStringWithStackTrace(throwable: Throwable): String = throwable match { case null ⇒ "Unknown Throwable: was 'null'" case ae: AkkaException ⇒ ae.toLongString case e ⇒ "%s:%s\n%s" format (e.getClass.getName, e.getMessage, stackTraceToString(e)) } - def stackTraceToString(throwable: Throwable): String = { - val trace = throwable.getStackTrace - val sb = new StringBuilder - for (i ← 0 until trace.length) - sb.append("\tat %s\n" format trace(i)) - sb.toString + /** + * Returns the given Throwables stack trace as a String, or the empty String if no trace is found + * @param throwable + * @return + */ + def stackTraceToString(throwable: Throwable): String = throwable.getStackTrace match { + case null ⇒ "" + case x if x.length == 0 ⇒ "" + case trace ⇒ + val sb = new StringBuilder + for (i ← 0 until trace.length) + sb.append("\tat %s\n" format trace(i)) + sb.toString } } @@ -32,17 +39,15 @@ object AkkaException { */ //TODO add @SerialVersionUID(1L) when SI-4804 is fixed class AkkaException(message: String = "", cause: Throwable = null) extends RuntimeException(message, cause) with Serializable { - lazy val uuid = java.util.UUID.randomUUID().toString - - override lazy val toString = - "%s:%s\n[%s]".format(getClass.getName, message, uuid) - - lazy val toLongString = - "%s:%s\n[%s]\n%s".format(getClass.getName, message, uuid, stackTraceToString) - def this(msg: String) = this(msg, null) - def stackTraceToString = AkkaException.stackTraceToString(this) + lazy val uuid = java.util.UUID.randomUUID().toString + + override def toString: String = "%s:%s\n[%s]".format(getClass.getName, message, uuid) + + def toLongString: String = "%s:%s\n[%s]\n%s".format(getClass.getName, message, uuid, stackTraceToString) + + def stackTraceToString: String = AkkaException.stackTraceToString(this) } /** diff --git a/akka-actor/src/main/scala/akka/experimental.scala b/akka-actor/src/main/scala/akka/experimental.scala deleted file mode 100644 index aef3cb5c85..0000000000 --- a/akka-actor/src/main/scala/akka/experimental.scala +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka - -import annotation.target._ - -/** - * This annotation marks a feature which is not yet considered stable and may - * change or be removed in a future release. - * - * @since 1.2 - */ -@getter -@setter -@beanGetter -@beanSetter -final class experimental(since: String) extends annotation.StaticAnnotation diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index c3db8293d2..f27919d316 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -47,12 +47,11 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup ref: InternalActorRef, props: Props, supervisor: InternalActorRef, - receiveTimeout: Option[Duration]): ActorCell = - { - val cell = super.newActorCell(system, ref, props, supervisor, receiveTimeout) - Unsafe.instance.monitorEnter(cell) - cell - } + receiveTimeout: Option[Duration]): ActorCell = { + val cell = super.newActorCell(system, ref, props, supervisor, receiveTimeout) + Unsafe.instance.monitorEnter(cell) + cell + } private[akka] val routerConfig = _props.routerConfig private[akka] val routeeProps = _props.copy(routerConfig = NoRouter) @@ -303,8 +302,8 @@ trait Router extends Actor { final def receive = ({ case Router.Resize ⇒ - try ref.routerConfig.resizer foreach (_.resize(ref.routeeProps, ref.routeeProvider)) - finally assert(ref.resizeInProgress.getAndSet(false)) + val ab = ref.resizeInProgress + if (ab.get) try ref.routerConfig.resizer foreach (_.resize(ref.routeeProps, ref.routeeProvider)) finally ab.set(false) case Terminated(child) ⇒ ref.removeRoutees(IndexedSeq(child)) @@ -319,6 +318,9 @@ trait Router extends Actor { } } +/** + * INTERNAL API + */ private object Router { case object Resize @@ -372,9 +374,9 @@ case class Destination(sender: ActorRef, recipient: ActorRef) //TODO add @SerialVersionUID(1L) when SI-4804 is fixed abstract class NoRouter extends RouterConfig case object NoRouter extends NoRouter { - def createRoute(props: Props, routeeProvider: RouteeProvider): Route = null + def createRoute(props: Props, routeeProvider: RouteeProvider): Route = null // FIXME, null, really?? def routerDispatcher: String = "" - def supervisorStrategy = null + def supervisorStrategy = null // FIXME null, really?? override def withFallback(other: RouterConfig): RouterConfig = other /** @@ -404,9 +406,7 @@ case object FromConfig extends FromConfig { //TODO add @SerialVersionUID(1L) when SI-4804 is fixed class FromConfig(val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends RouterConfig - with Product - with Serializable - with Equals { + with Serializable { def this() = this(Dispatchers.DefaultDispatcherId) @@ -414,38 +414,6 @@ class FromConfig(val routerDispatcher: String = Dispatchers.DefaultDispatcherId) throw new ConfigurationException("router " + routeeProvider.context.self + " needs external configuration from file (e.g. application.conf)") def supervisorStrategy: SupervisorStrategy = Router.defaultSupervisorStrategy - - // open-coded case class to preserve binary compatibility, all deprecated for 2.1 - @deprecated("FromConfig does not make sense as case class", "2.0.1") - override def productPrefix = "FromConfig" - - @deprecated("FromConfig does not make sense as case class", "2.0.1") - def productArity = 1 - - @deprecated("FromConfig does not make sense as case class", "2.0.1") - def productElement(x: Int) = x match { - case 0 ⇒ routerDispatcher - case _ ⇒ throw new IndexOutOfBoundsException(x.toString) - } - - @deprecated("FromConfig does not make sense as case class", "2.0.1") - def copy(d: String = Dispatchers.DefaultDispatcherId): FromConfig = new FromConfig(d) - - @deprecated("FromConfig does not make sense as case class", "2.0.1") - def canEqual(o: Any) = o.isInstanceOf[FromConfig] - - @deprecated("FromConfig does not make sense as case class", "2.0.1") - override def hashCode = ScalaRunTime._hashCode(this) - - @deprecated("FromConfig does not make sense as case class", "2.0.1") - override def toString = "FromConfig(" + routerDispatcher + ")" - - @deprecated("FromConfig does not make sense as case class", "2.0.1") - override def equals(other: Any): Boolean = other match { - case FromConfig(x) ⇒ x == routerDispatcher - case _ ⇒ false - } - } object RoundRobinRouter { @@ -510,9 +478,7 @@ case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = * Constructor that sets nrOfInstances to be created. * Java API */ - def this(nr: Int) = { - this(nrOfInstances = nr) - } + def this(nr: Int) = this(nrOfInstances = nr) /** * Constructor that sets the routees to be used. @@ -520,9 +486,7 @@ case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = * @param routeePaths string representation of the actor paths of the routees that will be looked up * using `actorFor` in [[akka.actor.ActorRefProvider]] */ - def this(routeePaths: java.lang.Iterable[String]) = { - this(routees = iterableAsScalaIterable(routeePaths)) - } + def this(routeePaths: java.lang.Iterable[String]) = this(routees = iterableAsScalaIterable(routeePaths)) /** * Constructor that sets the resizer to be used. @@ -533,13 +497,13 @@ case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = /** * Java API for setting routerDispatcher */ - def withDispatcher(dispatcherId: String) = copy(routerDispatcher = dispatcherId) + def withDispatcher(dispatcherId: String): RoundRobinRouter = copy(routerDispatcher = dispatcherId) /** * Java API for setting the supervisor strategy to be used for the “head” * Router actor. */ - def withSupervisorStrategy(strategy: SupervisorStrategy) = copy(supervisorStrategy = strategy) + def withSupervisorStrategy(strategy: SupervisorStrategy): RoundRobinRouter = copy(supervisorStrategy = strategy) } trait RoundRobinLike { this: RouterConfig ⇒ @@ -630,9 +594,7 @@ case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, * Constructor that sets nrOfInstances to be created. * Java API */ - def this(nr: Int) = { - this(nrOfInstances = nr) - } + def this(nr: Int) = this(nrOfInstances = nr) /** * Constructor that sets the routees to be used. @@ -640,9 +602,7 @@ case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, * @param routeePaths string representation of the actor paths of the routees that will be looked up * using `actorFor` in [[akka.actor.ActorRefProvider]] */ - def this(routeePaths: java.lang.Iterable[String]) = { - this(routees = iterableAsScalaIterable(routeePaths)) - } + def this(routeePaths: java.lang.Iterable[String]) = this(routees = iterableAsScalaIterable(routeePaths)) /** * Constructor that sets the resizer to be used. @@ -653,13 +613,13 @@ case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, /** * Java API for setting routerDispatcher */ - def withDispatcher(dispatcherId: String) = copy(routerDispatcher = dispatcherId) + def withDispatcher(dispatcherId: String): RandomRouter = copy(routerDispatcher = dispatcherId) /** * Java API for setting the supervisor strategy to be used for the “head” * Router actor. */ - def withSupervisorStrategy(strategy: SupervisorStrategy) = copy(supervisorStrategy = strategy) + def withSupervisorStrategy(strategy: SupervisorStrategy): RandomRouter = copy(supervisorStrategy = strategy) } trait RandomLike { this: RouterConfig ⇒ @@ -756,9 +716,7 @@ case class SmallestMailboxRouter(nrOfInstances: Int = 0, routees: Iterable[Strin * Constructor that sets nrOfInstances to be created. * Java API */ - def this(nr: Int) = { - this(nrOfInstances = nr) - } + def this(nr: Int) = this(nrOfInstances = nr) /** * Constructor that sets the routees to be used. @@ -766,9 +724,7 @@ case class SmallestMailboxRouter(nrOfInstances: Int = 0, routees: Iterable[Strin * @param routeePaths string representation of the actor paths of the routees that will be looked up * using `actorFor` in [[akka.actor.ActorRefProvider]] */ - def this(routeePaths: java.lang.Iterable[String]) = { - this(routees = iterableAsScalaIterable(routeePaths)) - } + def this(routeePaths: java.lang.Iterable[String]) = this(routees = iterableAsScalaIterable(routeePaths)) /** * Constructor that sets the resizer to be used. @@ -779,19 +735,16 @@ case class SmallestMailboxRouter(nrOfInstances: Int = 0, routees: Iterable[Strin /** * Java API for setting routerDispatcher */ - def withDispatcher(dispatcherId: String) = copy(routerDispatcher = dispatcherId) + def withDispatcher(dispatcherId: String): SmallestMailboxRouter = copy(routerDispatcher = dispatcherId) /** * Java API for setting the supervisor strategy to be used for the “head” * Router actor. */ - def withSupervisorStrategy(strategy: SupervisorStrategy) = copy(supervisorStrategy = strategy) + def withSupervisorStrategy(strategy: SupervisorStrategy): SmallestMailboxRouter = copy(supervisorStrategy = strategy) } trait SmallestMailboxLike { this: RouterConfig ⇒ - - import java.security.SecureRandom - def nrOfInstances: Int def routees: Iterable[String] @@ -954,9 +907,7 @@ case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = N * Constructor that sets nrOfInstances to be created. * Java API */ - def this(nr: Int) = { - this(nrOfInstances = nr) - } + def this(nr: Int) = this(nrOfInstances = nr) /** * Constructor that sets the routees to be used. @@ -964,9 +915,7 @@ case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = N * @param routeePaths string representation of the actor paths of the routees that will be looked up * using `actorFor` in [[akka.actor.ActorRefProvider]] */ - def this(routeePaths: java.lang.Iterable[String]) = { - this(routees = iterableAsScalaIterable(routeePaths)) - } + def this(routeePaths: java.lang.Iterable[String]) = this(routees = iterableAsScalaIterable(routeePaths)) /** * Constructor that sets the resizer to be used. @@ -977,13 +926,13 @@ case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = N /** * Java API for setting routerDispatcher */ - def withDispatcher(dispatcherId: String) = copy(routerDispatcher = dispatcherId) + def withDispatcher(dispatcherId: String): BroadcastRouter = copy(routerDispatcher = dispatcherId) /** * Java API for setting the supervisor strategy to be used for the “head” * Router actor. */ - def withSupervisorStrategy(strategy: SupervisorStrategy) = copy(supervisorStrategy = strategy) + def withSupervisorStrategy(strategy: SupervisorStrategy): BroadcastRouter = copy(supervisorStrategy = strategy) } trait BroadcastLike { this: RouterConfig ⇒ @@ -1069,9 +1018,7 @@ case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: It * Constructor that sets nrOfInstances to be created. * Java API */ - def this(nr: Int, w: Duration) = { - this(nrOfInstances = nr, within = w) - } + def this(nr: Int, w: Duration) = this(nrOfInstances = nr, within = w) /** * Constructor that sets the routees to be used. @@ -1079,9 +1026,8 @@ case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: It * @param routeePaths string representation of the actor paths of the routees that will be looked up * using `actorFor` in [[akka.actor.ActorRefProvider]] */ - def this(routeePaths: java.lang.Iterable[String], w: Duration) = { + def this(routeePaths: java.lang.Iterable[String], w: Duration) = this(routees = iterableAsScalaIterable(routeePaths), within = w) - } /** * Constructor that sets the resizer to be used. @@ -1150,7 +1096,7 @@ trait Resizer { * This method is invoked only in the context of the Router actor in order to safely * create/stop children. */ - def resize(props: Props, routeeProvider: RouteeProvider) + def resize(props: Props, routeeProvider: RouteeProvider): Unit } case object DefaultResizer { @@ -1166,6 +1112,7 @@ case object DefaultResizer { messagesPerResize = resizerConfig.getInt("messages-per-resize")) } +//FIXME DOCUMENT ME case class DefaultResizer( /** * The fewest number of routees the router should ever have. @@ -1240,7 +1187,7 @@ case class DefaultResizer( def isTimeForResize(messageCounter: Long): Boolean = (messageCounter % messagesPerResize == 0) - def resize(props: Props, routeeProvider: RouteeProvider) { + def resize(props: Props, routeeProvider: RouteeProvider): Unit = { val currentRoutees = routeeProvider.routees val requestedCapacity = capacity(currentRoutees) @@ -1258,7 +1205,7 @@ case class DefaultResizer( * Give concurrent messages a chance to be placed in mailbox before * sending PoisonPill. */ - protected def delayedStop(scheduler: Scheduler, abandon: IndexedSeq[ActorRef]) { + protected def delayedStop(scheduler: Scheduler, abandon: IndexedSeq[ActorRef]): Unit = { if (abandon.nonEmpty) { if (stopDelay <= Duration.Zero) { abandon foreach (_ ! PoisonPill) @@ -1327,9 +1274,7 @@ case class DefaultResizer( * @param capacity current number of routees * @return proposed change in the capacity */ - def filter(pressure: Int, capacity: Int): Int = { - rampup(pressure, capacity) + backoff(pressure, capacity) - } + def filter(pressure: Int, capacity: Int): Int = rampup(pressure, capacity) + backoff(pressure, capacity) /** * Computes a proposed positive (or zero) capacity delta using diff --git a/akka-actor/src/main/scala/akka/serialization/Serialization.scala b/akka-actor/src/main/scala/akka/serialization/Serialization.scala index 03d03dc785..7355e4f7fb 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serialization.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serialization.scala @@ -14,8 +14,6 @@ import akka.util.NonFatal import scala.collection.mutable.ArrayBuffer import java.io.NotSerializableException -case class NoSerializerFoundException(m: String) extends AkkaException(m) - object Serialization { /** @@ -120,9 +118,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { possibilities(0)._2 } serializerMap.putIfAbsent(clazz, ser) match { - case null ⇒ - log.debug("Using serializer[{}] for message [{}]", ser.getClass.getName, clazz.getName) - ser + case null ⇒ log.debug("Using serializer[{}] for message [{}]", ser.getClass.getName, clazz.getName); ser case some ⇒ some } case ser ⇒ ser @@ -140,10 +136,8 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { * A Map of serializer from alias to implementation (class implementing akka.serialization.Serializer) * By default always contains the following mapping: "java" -> akka.serialization.JavaSerializer */ - private val serializers: Map[String, Serializer] = { - for ((k: String, v: String) ← settings.Serializers) - yield k -> serializerOf(v).fold(throw _, identity) - } + private val serializers: Map[String, Serializer] = + for ((k: String, v: String) ← settings.Serializers) yield k -> serializerOf(v).fold(throw _, identity) /** * bindings is a Seq of tuple representing the mapping from Class to Serializer. diff --git a/akka-actor/src/main/scala/akka/serialization/Serializer.scala b/akka-actor/src/main/scala/akka/serialization/Serializer.scala index 5696201f62..f6300ca998 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serializer.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serializer.scala @@ -6,7 +6,6 @@ package akka.serialization import java.io.{ ObjectOutputStream, ByteArrayOutputStream, ObjectInputStream, ByteArrayInputStream } import akka.util.ClassLoaderObjectInputStream -import akka.actor.DynamicAccess import akka.actor.ExtendedActorSystem import scala.util.DynamicVariable diff --git a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala index 7eb90b8ef0..c7c8308de0 100644 --- a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala +++ b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala @@ -8,6 +8,12 @@ import java.util.concurrent.locks.ReentrantLock import java.util.concurrent.{ TimeUnit, BlockingQueue } import java.util.{ AbstractQueue, Queue, Collection, Iterator } +/** + * BoundedBlockingQueue wraps any Queue and turns the result into a BlockingQueue with a limited capacity + * @param maxCapacity - the maximum capacity of this Queue, needs to be > 0 + * @param backing - the backing Queue + * @tparam E - The type of the contents of this Queue + */ class BoundedBlockingQueue[E <: AnyRef]( val maxCapacity: Int, private val backing: Queue[E]) extends AbstractQueue[E] with BlockingQueue[E] { @@ -22,7 +28,7 @@ class BoundedBlockingQueue[E <: AnyRef]( require(maxCapacity > 0) } - protected val lock = new ReentrantLock(false) + protected val lock = new ReentrantLock(false) // TODO might want to switch to ReentrantReadWriteLock private val notEmpty = lock.newCondition() private val notFull = lock.newCondition() diff --git a/akka-actor/src/main/scala/akka/util/ByteString.scala b/akka-actor/src/main/scala/akka/util/ByteString.scala index 6d869826a8..ac074d5b28 100644 --- a/akka-actor/src/main/scala/akka/util/ByteString.scala +++ b/akka-actor/src/main/scala/akka/util/ByteString.scala @@ -11,6 +11,7 @@ import scala.collection.mutable.{ Builder, WrappedArray } import scala.collection.immutable.{ IndexedSeq, VectorBuilder } import scala.collection.generic.CanBuildFrom +//FIXME MORE DOCS object ByteString { /** @@ -53,15 +54,16 @@ object ByteString { val empty: ByteString = CompactByteString(Array.empty[Byte]) - def newBuilder = new ByteStringBuilder + def newBuilder: ByteStringBuilder = new ByteStringBuilder - implicit def canBuildFrom = new CanBuildFrom[TraversableOnce[Byte], Byte, ByteString] { - def apply(from: TraversableOnce[Byte]) = newBuilder - def apply() = newBuilder - } + implicit val canBuildFrom: CanBuildFrom[TraversableOnce[Byte], Byte, ByteString] = + new CanBuildFrom[TraversableOnce[Byte], Byte, ByteString] { + def apply(ignore: TraversableOnce[Byte]): ByteStringBuilder = newBuilder + def apply(): ByteStringBuilder = newBuilder + } private[akka] object ByteString1C { - def apply(bytes: Array[Byte]) = new ByteString1C(bytes) + def apply(bytes: Array[Byte]): ByteString1C = new ByteString1C(bytes) } /** @@ -71,7 +73,7 @@ object ByteString { final class ByteString1C private (private val bytes: Array[Byte]) extends CompactByteString { def apply(idx: Int): Byte = bytes(idx) - override def length = bytes.length + override def length: Int = bytes.length def toArray: Array[Byte] = bytes.clone @@ -81,13 +83,11 @@ object ByteString { def compact: ByteString1C = this - def asByteBuffer: ByteBuffer = - toByteString1.asByteBuffer + def asByteBuffer: ByteBuffer = toByteString1.asByteBuffer def decodeString(charset: String): String = new String(bytes, charset) - def ++(that: ByteString): ByteString = - if (!that.isEmpty) toByteString1 ++ that else this + def ++(that: ByteString): ByteString = if (!that.isEmpty) toByteString1 ++ that else this override def slice(from: Int, until: Int): ByteString = if ((from != 0) || (until != length)) toByteString1.slice(from, until) @@ -96,12 +96,11 @@ object ByteString { override def copyToArray[A >: Byte](xs: Array[A], start: Int, len: Int): Unit = toByteString1.copyToArray(xs, start, len) - def copyToBuffer(buffer: ByteBuffer): Int = - toByteString1.copyToBuffer(buffer) + def copyToBuffer(buffer: ByteBuffer): Int = toByteString1.copyToBuffer(buffer) } private[akka] object ByteString1 { - def apply(bytes: Array[Byte]) = new ByteString1(bytes) + def apply(bytes: Array[Byte]): ByteString1 = new ByteString1(bytes) } /** @@ -113,7 +112,7 @@ object ByteString { def apply(idx: Int): Byte = bytes(checkRangeConvert(idx)) - private def checkRangeConvert(index: Int) = { + private def checkRangeConvert(index: Int): Int = { if (0 <= index && length > index) index + startIndex else @@ -128,8 +127,7 @@ object ByteString { override def clone: CompactByteString = ByteString1C(toArray) - def compact: CompactByteString = - if (length == bytes.length) ByteString1C(bytes) else clone + def compact: CompactByteString = if (length == bytes.length) ByteString1C(bytes) else clone def asByteBuffer: ByteBuffer = { val buffer = ByteBuffer.wrap(bytes, startIndex, length).asReadOnlyBuffer @@ -161,7 +159,6 @@ object ByteString { if (copyLength > 0) buffer.put(bytes, startIndex, copyLength) copyLength } - } private[akka] object ByteStrings { @@ -198,10 +195,11 @@ object ByteString { } // 0: both empty, 1: 2nd empty, 2: 1st empty, 3: neither empty + // Using length to check emptiness is prohibited by law def compare(b1: ByteString, b2: ByteString): Int = - if (b1.length == 0) - if (b2.length == 0) 0 else 2 - else if (b2.length == 0) 1 else 3 + if (b1.isEmpty) + if (b2.isEmpty) 0 else 2 + else if (b2.isEmpty) 1 else 3 } @@ -439,7 +437,7 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { private var _tempLength = 0 private var _tempCapacity = 0 - private def clearTemp() { + private def clearTemp(): Unit = { if (_tempLength > 0) { val arr = new Array[Byte](_tempLength) Array.copy(_temp, 0, arr, 0, _tempLength) @@ -448,14 +446,14 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { } } - private def resizeTemp(size: Int) { + private def resizeTemp(size: Int): Unit = { val newtemp = new Array[Byte](size) if (_tempLength > 0) Array.copy(_temp, 0, newtemp, 0, _tempLength) _temp = newtemp _tempCapacity = _temp.length } - private def ensureTempSize(size: Int) { + private def ensureTempSize(size: Int): Unit = { if (_tempCapacity < size || _tempCapacity == 0) { var newSize = if (_tempCapacity == 0) 16 else _tempCapacity * 2 while (newSize < size) newSize *= 2 @@ -498,7 +496,7 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { this } - def clear() { + def clear(): Unit = { _builder.clear _length = 0 _tempLength = 0 diff --git a/akka-actor/src/main/scala/akka/util/ClassLoaderObjectInputStream.scala b/akka-actor/src/main/scala/akka/util/ClassLoaderObjectInputStream.scala index 3ad55d69eb..ab2514861e 100644 --- a/akka-actor/src/main/scala/akka/util/ClassLoaderObjectInputStream.scala +++ b/akka-actor/src/main/scala/akka/util/ClassLoaderObjectInputStream.scala @@ -6,6 +6,13 @@ package akka.util import java.io.{ InputStream, ObjectInputStream, ObjectStreamClass } +/** + * ClassLoaderObjectInputStream tries to utilize the provided ClassLoader to load Classes and falls + * back to ObjectInputStreams resolver. + * + * @param classLoader - the ClassLoader which is to be used primarily + * @param is - the InputStream that is wrapped + */ class ClassLoaderObjectInputStream(classLoader: ClassLoader, is: InputStream) extends ObjectInputStream(is) { override protected def resolveClass(objectStreamClass: ObjectStreamClass): Class[_] = try Class.forName(objectStreamClass.getName, false, classLoader) catch { diff --git a/akka-actor/src/main/scala/akka/util/Convert.scala b/akka-actor/src/main/scala/akka/util/Convert.scala index a805b17fb2..3fead7aef7 100644 --- a/akka-actor/src/main/scala/akka/util/Convert.scala +++ b/akka-actor/src/main/scala/akka/util/Convert.scala @@ -3,7 +3,7 @@ */ package akka.util - +//FIXME DOCS! object Convert { def intToBytes(value: Int): Array[Byte] = { diff --git a/akka-actor/src/main/scala/akka/util/Crypt.scala b/akka-actor/src/main/scala/akka/util/Crypt.scala index 7dd678e748..280cd90768 100644 --- a/akka-actor/src/main/scala/akka/util/Crypt.scala +++ b/akka-actor/src/main/scala/akka/util/Crypt.scala @@ -5,7 +5,7 @@ package akka.util import java.security.{ MessageDigest, SecureRandom } - +//FIXME DOCS object Crypt { val hex = "0123456789ABCDEF" val lineSeparator = System.getProperty("line.separator") @@ -32,7 +32,7 @@ object Crypt { } def hexify(bytes: Array[Byte]): String = { - val builder = new StringBuilder + val builder = new StringBuilder(bytes.length * 2) bytes.foreach { byte ⇒ builder.append(hex.charAt((byte & 0xF0) >> 4)).append(hex.charAt(byte & 0xF)) } builder.toString } diff --git a/akka-actor/src/main/scala/akka/util/Duration.scala b/akka-actor/src/main/scala/akka/util/Duration.scala index a213fe1869..b37cf24c3b 100644 --- a/akka-actor/src/main/scala/akka/util/Duration.scala +++ b/akka-actor/src/main/scala/akka/util/Duration.scala @@ -110,6 +110,7 @@ object Duration { } val Zero: FiniteDuration = new FiniteDuration(0, NANOSECONDS) + val Undefined: Duration = new Duration with Infinite { override def toString = "Duration.Undefined" override def equals(other: Any) = other.asInstanceOf[AnyRef] eq this @@ -166,8 +167,8 @@ object Duration { * including itself. */ val Inf: Duration = new Duration with Infinite { - override def toString = "Duration.Inf" - def compare(other: Duration) = if (other eq this) 0 else 1 + override def toString: String = "Duration.Inf" + def compare(other: Duration): Int = if (other eq this) 0 else 1 def unary_- : Duration = MinusInf } @@ -177,7 +178,7 @@ object Duration { */ val MinusInf: Duration = new Duration with Infinite { override def toString = "Duration.MinusInf" - def compare(other: Duration) = if (other eq this) 0 else -1 + def compare(other: Duration): Int = if (other eq this) 0 else -1 def unary_- : Duration = Inf } @@ -188,7 +189,7 @@ object Duration { def parse(s: String): Duration = unapply(s).get implicit object DurationIsOrdered extends Ordering[Duration] { - def compare(a: Duration, b: Duration) = a compare b + def compare(a: Duration, b: Duration): Int = a compare b } } @@ -263,17 +264,17 @@ abstract class Duration extends Serializable with Ordered[Duration] { def fromNow: Deadline = Deadline.now + this // Java API - def lt(other: Duration) = this < other - def lteq(other: Duration) = this <= other - def gt(other: Duration) = this > other - def gteq(other: Duration) = this >= other - def plus(other: Duration) = this + other - def minus(other: Duration) = this - other - def mul(factor: Double) = this * factor - def div(factor: Double) = this / factor - def div(other: Duration) = this / other - def neg() = -this - def isFinite() = finite_? + def lt(other: Duration): Boolean = this < other + def lteq(other: Duration): Boolean = this <= other + def gt(other: Duration): Boolean = this > other + def gteq(other: Duration): Boolean = this >= other + def plus(other: Duration): Duration = this + other + def minus(other: Duration): Duration = this - other + def mul(factor: Double): Duration = this * factor + def div(factor: Double): Duration = this / factor + def div(other: Duration): Double = this / other + def neg(): Duration = -this + def isFinite(): Boolean = finite_? } object FiniteDuration { @@ -349,31 +350,19 @@ class FiniteDuration(val length: Long, val unit: TimeUnit) extends Duration { else c } - def +(other: Duration) = { - if (!other.finite_?) { - other - } else { - fromNanos(add(toNanos, other.toNanos)) - } - } + def +(other: Duration): Duration = if (!other.finite_?) other else fromNanos(add(toNanos, other.toNanos)) - def -(other: Duration) = { - if (!other.finite_?) { - other - } else { - fromNanos(add(toNanos, -other.toNanos)) - } - } + def -(other: Duration): Duration = if (!other.finite_?) other else fromNanos(add(toNanos, -other.toNanos)) - def *(factor: Double) = fromNanos(long2double(toNanos) * factor) + def *(factor: Double): FiniteDuration = fromNanos(long2double(toNanos) * factor) - def /(factor: Double) = fromNanos(long2double(toNanos) / factor) + def /(factor: Double): FiniteDuration = fromNanos(long2double(toNanos) / factor) - def /(other: Duration) = if (other.finite_?) long2double(toNanos) / other.toNanos else 0 + def /(other: Duration): Double = if (other.finite_?) long2double(toNanos) / other.toNanos else 0 - def unary_- = Duration(-length, unit) + def unary_- : FiniteDuration = Duration(-length, unit) - def finite_? = true + def finite_? : Boolean = true override def equals(other: Any) = (other.asInstanceOf[AnyRef] eq this) || other.isInstanceOf[FiniteDuration] && @@ -385,178 +374,74 @@ class FiniteDuration(val length: Long, val unit: TimeUnit) extends Duration { } } -class DurationInt(n: Int) { +private[akka] trait DurationOps { import duration.Classifier + protected def from(timeUnit: TimeUnit): FiniteDuration + def nanoseconds: FiniteDuration = from(NANOSECONDS) + def nanos: FiniteDuration = from(NANOSECONDS) + def nanosecond: FiniteDuration = from(NANOSECONDS) + def nano: FiniteDuration = from(NANOSECONDS) - def nanoseconds = Duration(n, NANOSECONDS) - def nanos = Duration(n, NANOSECONDS) - def nanosecond = Duration(n, NANOSECONDS) - def nano = Duration(n, NANOSECONDS) + def microseconds: FiniteDuration = from(MICROSECONDS) + def micros: FiniteDuration = from(MICROSECONDS) + def microsecond: FiniteDuration = from(MICROSECONDS) + def micro: FiniteDuration = from(MICROSECONDS) - def microseconds = Duration(n, MICROSECONDS) - def micros = Duration(n, MICROSECONDS) - def microsecond = Duration(n, MICROSECONDS) - def micro = Duration(n, MICROSECONDS) + def milliseconds: FiniteDuration = from(MILLISECONDS) + def millis: FiniteDuration = from(MILLISECONDS) + def millisecond: FiniteDuration = from(MILLISECONDS) + def milli: FiniteDuration = from(MILLISECONDS) - def milliseconds = Duration(n, MILLISECONDS) - def millis = Duration(n, MILLISECONDS) - def millisecond = Duration(n, MILLISECONDS) - def milli = Duration(n, MILLISECONDS) + def seconds: FiniteDuration = from(SECONDS) + def second: FiniteDuration = from(SECONDS) - def seconds = Duration(n, SECONDS) - def second = Duration(n, SECONDS) + def minutes: FiniteDuration = from(MINUTES) + def minute: FiniteDuration = from(MINUTES) - def minutes = Duration(n, MINUTES) - def minute = Duration(n, MINUTES) + def hours: FiniteDuration = from(HOURS) + def hour: FiniteDuration = from(HOURS) - def hours = Duration(n, HOURS) - def hour = Duration(n, HOURS) + def days: FiniteDuration = from(DAYS) + def day: FiniteDuration = from(DAYS) - def days = Duration(n, DAYS) - def day = Duration(n, DAYS) + def nanoseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(NANOSECONDS)) + def nanos[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(NANOSECONDS)) + def nanosecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(NANOSECONDS)) + def nano[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(NANOSECONDS)) - def nanoseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) - def nanos[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) - def nanosecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) - def nano[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) + def microseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MICROSECONDS)) + def micros[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MICROSECONDS)) + def microsecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MICROSECONDS)) + def micro[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MICROSECONDS)) - def microseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) - def micros[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) - def microsecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) - def micro[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) + def milliseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MILLISECONDS)) + def millis[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MILLISECONDS)) + def millisecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MILLISECONDS)) + def milli[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MILLISECONDS)) - def milliseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) - def millis[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) - def millisecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) - def milli[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) + def seconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(SECONDS)) + def second[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(SECONDS)) - def seconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, SECONDS)) - def second[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, SECONDS)) + def minutes[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MINUTES)) + def minute[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MINUTES)) - def minutes[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MINUTES)) - def minute[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MINUTES)) + def hours[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(HOURS)) + def hour[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(HOURS)) - def hours[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, HOURS)) - def hour[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, HOURS)) - - def days[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, DAYS)) - def day[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, DAYS)) + def days[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(DAYS)) + def day[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(DAYS)) } -class DurationLong(n: Long) { - import duration.Classifier - - def nanoseconds = Duration(n, NANOSECONDS) - def nanos = Duration(n, NANOSECONDS) - def nanosecond = Duration(n, NANOSECONDS) - def nano = Duration(n, NANOSECONDS) - - def microseconds = Duration(n, MICROSECONDS) - def micros = Duration(n, MICROSECONDS) - def microsecond = Duration(n, MICROSECONDS) - def micro = Duration(n, MICROSECONDS) - - def milliseconds = Duration(n, MILLISECONDS) - def millis = Duration(n, MILLISECONDS) - def millisecond = Duration(n, MILLISECONDS) - def milli = Duration(n, MILLISECONDS) - - def seconds = Duration(n, SECONDS) - def second = Duration(n, SECONDS) - - def minutes = Duration(n, MINUTES) - def minute = Duration(n, MINUTES) - - def hours = Duration(n, HOURS) - def hour = Duration(n, HOURS) - - def days = Duration(n, DAYS) - def day = Duration(n, DAYS) - - def nanoseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) - def nanos[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) - def nanosecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) - def nano[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) - - def microseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) - def micros[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) - def microsecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) - def micro[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) - - def milliseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) - def millis[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) - def millisecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) - def milli[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) - - def seconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, SECONDS)) - def second[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, SECONDS)) - - def minutes[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MINUTES)) - def minute[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MINUTES)) - - def hours[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, HOURS)) - def hour[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, HOURS)) - - def days[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, DAYS)) - def day[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, DAYS)) +class DurationInt(n: Int) extends DurationOps { + override protected def from(timeUnit: TimeUnit): FiniteDuration = Duration(n, timeUnit) } -class DurationDouble(d: Double) { - import duration.Classifier +class DurationLong(n: Long) extends DurationOps { + override protected def from(timeUnit: TimeUnit): FiniteDuration = Duration(n, timeUnit) +} - def nanoseconds = Duration(d, NANOSECONDS) - def nanos = Duration(d, NANOSECONDS) - def nanosecond = Duration(d, NANOSECONDS) - def nano = Duration(d, NANOSECONDS) - - def microseconds = Duration(d, MICROSECONDS) - def micros = Duration(d, MICROSECONDS) - def microsecond = Duration(d, MICROSECONDS) - def micro = Duration(d, MICROSECONDS) - - def milliseconds = Duration(d, MILLISECONDS) - def millis = Duration(d, MILLISECONDS) - def millisecond = Duration(d, MILLISECONDS) - def milli = Duration(d, MILLISECONDS) - - def seconds = Duration(d, SECONDS) - def second = Duration(d, SECONDS) - - def minutes = Duration(d, MINUTES) - def minute = Duration(d, MINUTES) - - def hours = Duration(d, HOURS) - def hour = Duration(d, HOURS) - - def days = Duration(d, DAYS) - def day = Duration(d, DAYS) - - def nanoseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, NANOSECONDS)) - def nanos[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, NANOSECONDS)) - def nanosecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, NANOSECONDS)) - def nano[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, NANOSECONDS)) - - def microseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MICROSECONDS)) - def micros[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MICROSECONDS)) - def microsecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MICROSECONDS)) - def micro[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MICROSECONDS)) - - def milliseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MILLISECONDS)) - def millis[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MILLISECONDS)) - def millisecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MILLISECONDS)) - def milli[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MILLISECONDS)) - - def seconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, SECONDS)) - def second[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, SECONDS)) - - def minutes[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MINUTES)) - def minute[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MINUTES)) - - def hours[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, HOURS)) - def hour[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, HOURS)) - - def days[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, DAYS)) - def day[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, DAYS)) +class DurationDouble(d: Double) extends DurationOps { + override protected def from(timeUnit: TimeUnit): FiniteDuration = Duration(d, timeUnit) } //TODO add @SerialVersionUID(1L) when SI-4804 is fixed @@ -565,24 +450,27 @@ case class Timeout(duration: Duration) { def this(length: Long, unit: TimeUnit) = this(Duration(length, unit)) } +/** + * A Timeout is a wrapper on top of Duration to be more specific about what the duration means. + */ object Timeout { /** * A timeout with zero duration, will cause most requests to always timeout. */ - val zero = new Timeout(Duration.Zero) + val zero: Timeout = new Timeout(Duration.Zero) /** * A Timeout with infinite duration. Will never timeout. Use extreme caution with this * as it may cause memory leaks, blocked threads, or may not even be supported by * the receiver, which would result in an exception. */ - val never = new Timeout(Duration.Inf) + val never: Timeout = new Timeout(Duration.Inf) - def apply(timeout: Long) = new Timeout(timeout) - def apply(length: Long, unit: TimeUnit) = new Timeout(length, unit) + def apply(timeout: Long): Timeout = new Timeout(timeout) + def apply(length: Long, unit: TimeUnit): Timeout = new Timeout(length, unit) - implicit def durationToTimeout(duration: Duration) = new Timeout(duration) - implicit def intToTimeout(timeout: Int) = new Timeout(timeout) - implicit def longToTimeout(timeout: Long) = new Timeout(timeout) + implicit def durationToTimeout(duration: Duration): Timeout = new Timeout(duration) + implicit def intToTimeout(timeout: Int): Timeout = new Timeout(timeout) + implicit def longToTimeout(timeout: Long): Timeout = new Timeout(timeout) } diff --git a/akka-actor/src/main/scala/akka/util/Helpers.scala b/akka-actor/src/main/scala/akka/util/Helpers.scala index 25cb279f2e..a3618359ac 100644 --- a/akka-actor/src/main/scala/akka/util/Helpers.scala +++ b/akka-actor/src/main/scala/akka/util/Helpers.scala @@ -45,18 +45,13 @@ object Helpers { else base64(next, sb) } - def ignore[E: Manifest](body: ⇒ Unit) { - try { - body - } catch { - case e if manifest[E].erasure.isAssignableFrom(e.getClass) ⇒ () - } - } + //FIXME docs + def ignore[E: Manifest](body: ⇒ Unit): Unit = + try body catch { case e if manifest[E].erasure.isAssignableFrom(e.getClass) ⇒ () } - def withPrintStackTraceOnError(body: ⇒ Unit) { - try { - body - } catch { + //FIXME docs + def withPrintStackTraceOnError(body: ⇒ Unit): Unit = { + try body catch { case e: Throwable ⇒ val sw = new java.io.StringWriter() var root = e diff --git a/akka-actor/src/main/scala/akka/util/Index.scala b/akka-actor/src/main/scala/akka/util/Index.scala index 1153c9e045..3289ed8f13 100644 --- a/akka-actor/src/main/scala/akka/util/Index.scala +++ b/akka-actor/src/main/scala/akka/util/Index.scala @@ -91,7 +91,7 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { /** * Applies the supplied function to all keys and their values */ - def foreach(fun: (K, V) ⇒ Unit) { + def foreach(fun: (K, V) ⇒ Unit): Unit = { import scala.collection.JavaConversions._ container.entrySet foreach { e ⇒ e.getValue.foreach(fun(e.getKey, _)) } } @@ -112,7 +112,7 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { /** * Returns the key set. */ - def keys = scala.collection.JavaConversions.collectionAsScalaIterable(container.keySet) + def keys: Iterable[K] = scala.collection.JavaConversions.collectionAsScalaIterable(container.keySet) /** * Disassociates the value of type V from the key of type K diff --git a/akka-actor/src/main/scala/akka/util/LockUtil.scala b/akka-actor/src/main/scala/akka/util/LockUtil.scala index 14c787d3f6..da93170019 100644 --- a/akka-actor/src/main/scala/akka/util/LockUtil.scala +++ b/akka-actor/src/main/scala/akka/util/LockUtil.scala @@ -24,9 +24,7 @@ class Switch(startAsOn: Boolean = false) { protected def transcend(from: Boolean, action: ⇒ Unit): Boolean = synchronized { if (switch.compareAndSet(from, !from)) { - try { - action - } catch { + try action catch { case e ⇒ switch.compareAndSet(!from, from) // revert status throw e @@ -62,18 +60,12 @@ class Switch(startAsOn: Boolean = false) { /** * Executes the provided action and returns its value if the switch is IMMEDIATELY on (i.e. no lock involved) */ - def ifOnYield[T](action: ⇒ T): Option[T] = { - if (switch.get) Some(action) - else None - } + def ifOnYield[T](action: ⇒ T): Option[T] = if (switch.get) Some(action) else None /** * Executes the provided action and returns its value if the switch is IMMEDIATELY off (i.e. no lock involved) */ - def ifOffYield[T](action: ⇒ T): Option[T] = { - if (!switch.get) Some(action) - else None - } + def ifOffYield[T](action: ⇒ T): Option[T] = if (!switch.get) Some(action) else None /** * Executes the provided action and returns if the action was executed or not, if the switch is IMMEDIATELY on (i.e. no lock involved) @@ -138,15 +130,15 @@ class Switch(startAsOn: Boolean = false) { /** * Executes the given code while holding this switch’s lock, i.e. protected from concurrent modification of the switch status. */ - def locked[T](code: ⇒ T) = synchronized { code } + def locked[T](code: ⇒ T): T = synchronized { code } /** * Returns whether the switch is IMMEDIATELY on (no locking) */ - def isOn = switch.get + def isOn: Boolean = switch.get /** * Returns whether the switch is IMMEDDIATELY off (no locking) */ - def isOff = !isOn + def isOff: Boolean = !isOn } diff --git a/akka-actor/src/main/scala/akka/util/Reflect.scala b/akka-actor/src/main/scala/akka/util/Reflect.scala index 25c56a983f..3a46edeab1 100644 --- a/akka-actor/src/main/scala/akka/util/Reflect.scala +++ b/akka-actor/src/main/scala/akka/util/Reflect.scala @@ -6,8 +6,10 @@ package akka.util /** * Collection of internal reflection utilities which may or may not be * available (most services specific to HotSpot, but fails gracefully). + * + * INTERNAL API */ -object Reflect { +private[akka] object Reflect { /** * This optionally holds a function which looks N levels above itself diff --git a/akka-actor/src/main/scala/akka/util/Unsafe.java b/akka-actor/src/main/scala/akka/util/Unsafe.java index 608cb3d46e..ace3c1baac 100644 --- a/akka-actor/src/main/scala/akka/util/Unsafe.java +++ b/akka-actor/src/main/scala/akka/util/Unsafe.java @@ -7,6 +7,9 @@ package akka.util; import java.lang.reflect.Field; +/** + * INTERNAL API + */ public final class Unsafe { public final static sun.misc.Unsafe instance; static { diff --git a/akka-actor/src/main/scala/akka/util/cps/package.scala b/akka-actor/src/main/scala/akka/util/cps/package.scala index 198c2beacd..a1b4bc39eb 100644 --- a/akka-actor/src/main/scala/akka/util/cps/package.scala +++ b/akka-actor/src/main/scala/akka/util/cps/package.scala @@ -7,6 +7,7 @@ package akka.util import scala.util.continuations._ import akka.dispatch.MessageDispatcher +//FIXME Needs docs package object cps { def matchC[A, B, C, D](in: A)(pf: PartialFunction[A, B @cpsParam[C, D]]): B @cpsParam[C, D] = pf(in) diff --git a/akka-actor/src/main/scala/akka/util/duration/package.scala b/akka-actor/src/main/scala/akka/util/duration/package.scala index 7f14a0be48..6a7d28a6e6 100644 --- a/akka-actor/src/main/scala/akka/util/duration/package.scala +++ b/akka-actor/src/main/scala/akka/util/duration/package.scala @@ -5,7 +5,7 @@ package akka.util import java.util.concurrent.TimeUnit - +//FIXME Needs docs package object duration { trait Classifier[C] { type R @@ -15,38 +15,32 @@ package object duration { object span implicit object spanConvert extends Classifier[span.type] { type R = FiniteDuration - def convert(d: FiniteDuration) = d + def convert(d: FiniteDuration): FiniteDuration = d } object fromNow implicit object fromNowConvert extends Classifier[fromNow.type] { type R = Deadline - def convert(d: FiniteDuration) = Deadline.now + d + def convert(d: FiniteDuration): Deadline = Deadline.now + d } - implicit def intToDurationInt(n: Int) = new DurationInt(n) - implicit def longToDurationLong(n: Long) = new DurationLong(n) - implicit def doubleToDurationDouble(d: Double) = new DurationDouble(d) + implicit def intToDurationInt(n: Int): DurationInt = new DurationInt(n) + implicit def longToDurationLong(n: Long): DurationLong = new DurationLong(n) + implicit def doubleToDurationDouble(d: Double): DurationDouble = new DurationDouble(d) - implicit def pairIntToDuration(p: (Int, TimeUnit)) = Duration(p._1, p._2) - implicit def pairLongToDuration(p: (Long, TimeUnit)) = Duration(p._1, p._2) - implicit def durationToPair(d: Duration) = (d.length, d.unit) + implicit def pairIntToDuration(p: (Int, TimeUnit)): FiniteDuration = Duration(p._1, p._2) + implicit def pairLongToDuration(p: (Long, TimeUnit)): FiniteDuration = Duration(p._1, p._2) + implicit def durationToPair(d: Duration): (Long, TimeUnit) = (d.length, d.unit) /* * avoid reflection based invocation by using non-duck type */ - class IntMult(i: Int) { - def *(d: Duration) = d * i - } - implicit def intMult(i: Int) = new IntMult(i) + class IntMult(i: Int) { def *(d: Duration): Duration = d * i } + implicit def intMult(i: Int): IntMult = new IntMult(i) - class LongMult(l: Long) { - def *(d: Duration) = d * l - } - implicit def longMult(l: Long) = new LongMult(l) + class LongMult(l: Long) { def *(d: Duration): Duration = d * l } + implicit def longMult(l: Long): LongMult = new LongMult(l) - class DoubleMult(f: Double) { - def *(d: Duration) = d * f - } - implicit def doubleMult(f: Double) = new DoubleMult(f) + class DoubleMult(f: Double) { def *(d: Duration): Duration = d * f } + implicit def doubleMult(f: Double): DoubleMult = new DoubleMult(f) } From 5cbcb612b2469d5b140798380d8016be5a2642f2 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Sun, 20 May 2012 15:56:52 +0200 Subject: [PATCH 066/538] Moving the HWT stuff from org.jboss.netty.akka.util to akka.util.internal --- .../util/internal/ConcurrentIdentityHashMap.java | 2 +- .../util/internal}/HashedWheelTimer.java | 12 +++++------- .../akka/util/internal/ReusableIterator.java | 2 +- .../akka/util/internal/SystemPropertyUtil.java | 2 +- .../akka/util => akka/util/internal}/Timeout.java | 2 +- .../akka/util => akka/util/internal}/Timer.java | 4 ++-- .../akka/util => akka/util/internal}/TimerTask.java | 6 +++--- .../src/main/scala/akka/actor/ActorSystem.scala | 5 ++--- akka-actor/src/main/scala/akka/actor/Scheduler.scala | 3 ++- 9 files changed, 18 insertions(+), 20 deletions(-) rename akka-actor/src/main/java/{org/jboss/netty => }/akka/util/internal/ConcurrentIdentityHashMap.java (99%) rename akka-actor/src/main/java/{org/jboss/netty/akka/util => akka/util/internal}/HashedWheelTimer.java (97%) rename akka-actor/src/main/java/{org/jboss/netty => }/akka/util/internal/ReusableIterator.java (95%) rename akka-actor/src/main/java/{org/jboss/netty => }/akka/util/internal/SystemPropertyUtil.java (98%) rename akka-actor/src/main/java/{org/jboss/netty/akka/util => akka/util/internal}/Timeout.java (97%) rename akka-actor/src/main/java/{org/jboss/netty/akka/util => akka/util/internal}/Timer.java (92%) rename akka-actor/src/main/java/{org/jboss/netty/akka/util => akka/util/internal}/TimerTask.java (82%) diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/internal/ConcurrentIdentityHashMap.java b/akka-actor/src/main/java/akka/util/internal/ConcurrentIdentityHashMap.java similarity index 99% rename from akka-actor/src/main/java/org/jboss/netty/akka/util/internal/ConcurrentIdentityHashMap.java rename to akka-actor/src/main/java/akka/util/internal/ConcurrentIdentityHashMap.java index ff8a568d02..eb83c98f35 100644 --- a/akka-actor/src/main/java/org/jboss/netty/akka/util/internal/ConcurrentIdentityHashMap.java +++ b/akka-actor/src/main/java/akka/util/internal/ConcurrentIdentityHashMap.java @@ -18,7 +18,7 @@ * Expert Group and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain */ -package org.jboss.netty.akka.util.internal; +package akka.util.internal; import java.util.AbstractCollection; import java.util.AbstractMap; diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java b/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java similarity index 97% rename from akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java rename to akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java index 9eba51e53f..25841861c5 100644 --- a/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java +++ b/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java @@ -13,12 +13,10 @@ * License for the specific language governing permissions and limitations * under the License. */ -package org.jboss.netty.akka.util; +package akka.util.internal; import akka.event.LoggingAdapter; import akka.util.Duration; -import org.jboss.netty.akka.util.internal.ConcurrentIdentityHashMap; -import org.jboss.netty.akka.util.internal.ReusableIterator; import java.util.*; import java.util.concurrent.ThreadFactory; @@ -34,7 +32,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; *

Tick Duration

* * As described with 'approximated', this timer does not execute the scheduled - * {@link TimerTask} on time. {@link org.jboss.netty.akka.util.HashedWheelTimer}, on every tick, will + * {@link TimerTask} on time. {@link HashedWheelTimer}, on every tick, will * check if there are any {@link TimerTask}s behind the schedule and execute * them. *

@@ -46,7 +44,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; * *

Ticks per Wheel (Wheel Size)

* - * {@link org.jboss.netty.akka.util.HashedWheelTimer} maintains a data structure called 'wheel'. + * {@link HashedWheelTimer} maintains a data structure called 'wheel'. * To put simply, a wheel is a hash table of {@link TimerTask}s whose hash * function is 'dead line of the task'. The default number of ticks per wheel * (i.e. the size of the wheel) is 512. You could specify a larger value @@ -54,7 +52,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; * *

Do not create many instances.

* - * {@link org.jboss.netty.akka.util.HashedWheelTimer} creates a new thread whenever it is instantiated and + * {@link HashedWheelTimer} creates a new thread whenever it is instantiated and * started. Therefore, you should make sure to create only one instance and * share it across your application. One of the common mistakes, that makes * your application unresponsive, is to create a new instance in @@ -63,7 +61,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; * *

Implementation Details

* - * {@link org.jboss.netty.akka.util.HashedWheelTimer} is based on + * {@link HashedWheelTimer} is based on * George Varghese and * Tony Lauck's paper, * 'Hashed diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/internal/ReusableIterator.java b/akka-actor/src/main/java/akka/util/internal/ReusableIterator.java similarity index 95% rename from akka-actor/src/main/java/org/jboss/netty/akka/util/internal/ReusableIterator.java rename to akka-actor/src/main/java/akka/util/internal/ReusableIterator.java index 210edbe65d..8c8e5e50e5 100644 --- a/akka-actor/src/main/java/org/jboss/netty/akka/util/internal/ReusableIterator.java +++ b/akka-actor/src/main/java/akka/util/internal/ReusableIterator.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package org.jboss.netty.akka.util.internal; +package akka.util.internal; import java.util.Iterator; diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/internal/SystemPropertyUtil.java b/akka-actor/src/main/java/akka/util/internal/SystemPropertyUtil.java similarity index 98% rename from akka-actor/src/main/java/org/jboss/netty/akka/util/internal/SystemPropertyUtil.java rename to akka-actor/src/main/java/akka/util/internal/SystemPropertyUtil.java index bf3e2ac571..affef54bfc 100644 --- a/akka-actor/src/main/java/org/jboss/netty/akka/util/internal/SystemPropertyUtil.java +++ b/akka-actor/src/main/java/akka/util/internal/SystemPropertyUtil.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package org.jboss.netty.akka.util.internal; +package akka.util.internal; import java.util.regex.Pattern; diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/Timeout.java b/akka-actor/src/main/java/akka/util/internal/Timeout.java similarity index 97% rename from akka-actor/src/main/java/org/jboss/netty/akka/util/Timeout.java rename to akka-actor/src/main/java/akka/util/internal/Timeout.java index dbda2110d3..a03534bb8d 100644 --- a/akka-actor/src/main/java/org/jboss/netty/akka/util/Timeout.java +++ b/akka-actor/src/main/java/akka/util/internal/Timeout.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package org.jboss.netty.akka.util; +package akka.util.internal; /** * A handle associated with a {@link TimerTask} that is returned by a diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/Timer.java b/akka-actor/src/main/java/akka/util/internal/Timer.java similarity index 92% rename from akka-actor/src/main/java/org/jboss/netty/akka/util/Timer.java rename to akka-actor/src/main/java/akka/util/internal/Timer.java index b5bd8c6a7c..9cb02794de 100644 --- a/akka-actor/src/main/java/org/jboss/netty/akka/util/Timer.java +++ b/akka-actor/src/main/java/akka/util/internal/Timer.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package org.jboss.netty.akka.util; +package akka.util.internal; import akka.util.Duration; import java.util.Set; @@ -45,7 +45,7 @@ public interface Timer { Timeout newTimeout(TimerTask task, Duration delay); /** - * Releases all resources acquired by this {@link org.jboss.netty.akka.util.Timer} and cancels all + * Releases all resources acquired by this {@link Timer} and cancels all * tasks which were scheduled but not executed yet. * * @return the handles associated with the tasks which were canceled by diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/TimerTask.java b/akka-actor/src/main/java/akka/util/internal/TimerTask.java similarity index 82% rename from akka-actor/src/main/java/org/jboss/netty/akka/util/TimerTask.java rename to akka-actor/src/main/java/akka/util/internal/TimerTask.java index 3d0190d8f5..673dde67c7 100644 --- a/akka-actor/src/main/java/org/jboss/netty/akka/util/TimerTask.java +++ b/akka-actor/src/main/java/akka/util/internal/TimerTask.java @@ -13,11 +13,11 @@ * License for the specific language governing permissions and limitations * under the License. */ -package org.jboss.netty.akka.util; +package akka.util.internal; /** * A task which is executed after the delay specified with - * {@link Timer#newTimeout(org.jboss.netty.akka.util.TimerTask, long, java.util.concurrent.TimeUnit)} + * {@link Timer#newTimeout(TimerTask, long, java.util.concurrent.TimeUnit)} * . * * @author The Netty Project @@ -28,7 +28,7 @@ public interface TimerTask { /** * Executed after the delay specified with - * {@link Timer#newTimeout(org.jboss.netty.akka.util.TimerTask, long, java.util.concurrent.TimeUnit)} + * {@link Timer#newTimeout(TimerTask, long, java.util.concurrent.TimeUnit)} * . * * @param timeout diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index a1d30ddbc6..ab2996f0a7 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -7,16 +7,15 @@ package akka.actor import akka.event._ import akka.dispatch._ import akka.pattern.ask -import org.jboss.netty.akka.util.HashedWheelTimer -import java.util.concurrent.TimeUnit.MILLISECONDS import com.typesafe.config.{ Config, ConfigFactory } import scala.annotation.tailrec -import org.jboss.netty.akka.util.internal.ConcurrentIdentityHashMap import java.io.Closeable import akka.dispatch.Await.{ Awaitable, CanAwait } import akka.util._ +import akka.util.internal.{ HashedWheelTimer, ConcurrentIdentityHashMap } import collection.immutable.Stack import java.util.concurrent.{ ThreadFactory, CountDownLatch, TimeoutException, RejectedExecutionException } +import java.util.concurrent.TimeUnit.MILLISECONDS object ActorSystem { diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 91e54a592d..8ad3d8ee98 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -5,12 +5,13 @@ package akka.actor import akka.util.Duration -import org.jboss.netty.akka.util.{ TimerTask, HashedWheelTimer, Timeout ⇒ HWTimeout, Timer } +import akka.util.internal.{ TimerTask, HashedWheelTimer, Timeout ⇒ HWTimeout, Timer } import akka.event.LoggingAdapter import akka.dispatch.MessageDispatcher import java.io.Closeable import java.util.concurrent.atomic.AtomicReference import scala.annotation.tailrec +import akka.util.internal._ //#scheduler /** From 1a3329baa2c9376288d2b534c6935ae870df26a4 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Sun, 20 May 2012 16:00:24 +0200 Subject: [PATCH 067/538] #2091 - Adding a small intro to the Microkernel docs to state what the purpose of it is. --- akka-docs/java/microkernel.rst | 4 ++++ akka-docs/scala/microkernel.rst | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/akka-docs/java/microkernel.rst b/akka-docs/java/microkernel.rst index 551c118e94..12afbc93c9 100644 --- a/akka-docs/java/microkernel.rst +++ b/akka-docs/java/microkernel.rst @@ -4,6 +4,10 @@ Microkernel (Java) ================== +The purpose of the Akka Microkernel is to offer a bundling mechanism so that you can distribute +an Akka application as a single payload, without the need to run in a Java Application Server or manually +having to create a launcher script. + The Akka Microkernel is included in the Akka download found at `downloads`_. .. _downloads: http://akka.io/downloads diff --git a/akka-docs/scala/microkernel.rst b/akka-docs/scala/microkernel.rst index 8fb1aec2c2..df0e623eee 100644 --- a/akka-docs/scala/microkernel.rst +++ b/akka-docs/scala/microkernel.rst @@ -4,6 +4,10 @@ Microkernel (Scala) =================== +The purpose of the Akka Microkernel is to offer a bundling mechanism so that you can distribute +an Akka application as a single payload, without the need to run in a Java Application Server or manually +having to create a launcher script. + The Akka Microkernel is included in the Akka download found at `downloads`_. .. _downloads: http://akka.io/downloads From 4a2227fc95314610577fd27eb75c669da1e98ad6 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Sun, 20 May 2012 19:03:20 +0200 Subject: [PATCH 068/538] Removing the AtomicReference from Dispatcher and restructured the code a bit --- .../akka/dispatch/BalancingDispatcher.scala | 2 +- .../main/scala/akka/dispatch/Dispatcher.scala | 33 ++++++++++++------- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index ee492409ec..dea29643c7 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -87,7 +87,7 @@ class BalancingDispatcher( @tailrec def scheduleOne(i: Iterator[ActorCell] = team.iterator): Unit = if (messageQueue.hasMessages && i.hasNext - && (executorService.get().executor match { + && (executorService match { case lm: LoadMetrics ⇒ lm.atFullThrottle == false case other ⇒ true }) diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index 3a73bf0718..8dd7ecf8a2 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -33,11 +33,15 @@ class Dispatcher( val shutdownTimeout: Duration) extends MessageDispatcher(_prerequisites) { - protected val executorServiceFactory: ExecutorServiceFactory = - executorServiceFactoryProvider.createExecutorServiceFactory(id, prerequisites.threadFactory) + private class LazyExecutorServiceDelegate(factory: ExecutorServiceFactory) extends ExecutorServiceDelegate { + lazy val executor: ExecutorService = factory.createExecutorService + def copy(): LazyExecutorServiceDelegate = new LazyExecutorServiceDelegate(factory) + } - protected val executorService = new AtomicReference[ExecutorServiceDelegate]( - new ExecutorServiceDelegate { lazy val executor = executorServiceFactory.createExecutorService }) + @volatile private var executorServiceDelegate: LazyExecutorServiceDelegate = + new LazyExecutorServiceDelegate(executorServiceFactoryProvider.createExecutorServiceFactory(id, prerequisites.threadFactory)) + + protected final def executorService: ExecutorService = executorServiceDelegate /** * INTERNAL USE ONLY @@ -62,11 +66,11 @@ class Dispatcher( */ protected[akka] def executeTask(invocation: TaskInvocation) { try { - executorService.get() execute invocation + executorService execute invocation } catch { case e: RejectedExecutionException ⇒ try { - executorService.get() execute invocation + executorService execute invocation } catch { case e2: RejectedExecutionException ⇒ prerequisites.eventStream.publish(Error(e, getClass.getName, getClass, "executeTask was rejected twice!")) @@ -83,10 +87,15 @@ class Dispatcher( /** * INTERNAL USE ONLY */ - protected[akka] def shutdown: Unit = - Option(executorService.getAndSet(new ExecutorServiceDelegate { - lazy val executor = executorServiceFactory.createExecutorService - })) foreach { _.shutdown() } + protected[akka] def shutdown: Unit = { + val newDelegate = executorServiceDelegate.copy() // Doesn't matter which one we copy + val es = synchronized { // FIXME getAndSet using ARFU or Unsafe + val service = executorServiceDelegate + executorServiceDelegate = newDelegate // just a quick getAndSet + service + } + es.shutdown() + } /** * Returns if it was registered @@ -97,12 +106,12 @@ class Dispatcher( if (mbox.canBeScheduledForExecution(hasMessageHint, hasSystemMessageHint)) { //This needs to be here to ensure thread safety and no races if (mbox.setAsScheduled()) { try { - executorService.get() execute mbox + executorService execute mbox true } catch { case e: RejectedExecutionException ⇒ try { - executorService.get() execute mbox + executorService execute mbox true } catch { //Retry once case e: RejectedExecutionException ⇒ From e357b9825b11617679a1ba44e1e5fdbb44e45f4c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Sun, 20 May 2012 19:06:31 +0200 Subject: [PATCH 069/538] Adding return types in Dispatcher --- akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index 8dd7ecf8a2..c8ae187c66 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -46,7 +46,7 @@ class Dispatcher( /** * INTERNAL USE ONLY */ - protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope) = { + protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope): Unit = { val mbox = receiver.mailbox mbox.enqueue(receiver.self, invocation) registerForExecution(mbox, true, false) @@ -55,7 +55,7 @@ class Dispatcher( /** * INTERNAL USE ONLY */ - protected[akka] def systemDispatch(receiver: ActorCell, invocation: SystemMessage) = { + protected[akka] def systemDispatch(receiver: ActorCell, invocation: SystemMessage): Unit = { val mbox = receiver.mailbox mbox.systemEnqueue(receiver.self, invocation) registerForExecution(mbox, false, true) @@ -124,7 +124,7 @@ class Dispatcher( } else false } - override val toString = Logging.simpleName(this) + "[" + id + "]" + override val toString: String = Logging.simpleName(this) + "[" + id + "]" } object PriorityGenerator { From 4e2c4955b34095781729b733cf3db296eb41643e Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Sun, 20 May 2012 20:16:06 +0200 Subject: [PATCH 070/538] Changed package to akka.jmx, for OSGi purposes. See #2079 --- akka-kernel/src/main/dist/bin/akka-cluster | 2 +- akka-kernel/src/main/java/{org/archive => akka}/jmx/Client.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename akka-kernel/src/main/java/{org/archive => akka}/jmx/Client.java (99%) diff --git a/akka-kernel/src/main/dist/bin/akka-cluster b/akka-kernel/src/main/dist/bin/akka-cluster index 7bb3a670da..3e76cdbb11 100755 --- a/akka-kernel/src/main/dist/bin/akka-cluster +++ b/akka-kernel/src/main/dist/bin/akka-cluster @@ -19,7 +19,7 @@ declare AKKA_HOME="$(cd "$(cd "$(dirname "$0")"; pwd -P)"/..; pwd)" [ -n "$JMX_CLIENT_CLASSPATH" ] || JMX_CLIENT_CLASSPATH="$AKKA_HOME/lib/akka/akka-kernel-*" # NOTE: The 'cmdline-jmxclient' is available as part of the Akka distribution. -JMX_CLIENT="java -cp $JMX_CLIENT_CLASSPATH org.archive.jmx.Client -" +JMX_CLIENT="java -cp $JMX_CLIENT_CLASSPATH akka.jmx.Client -" SELF=`basename $0` # script name HOST=$1 # cluster node:port to talk to through JMX diff --git a/akka-kernel/src/main/java/org/archive/jmx/Client.java b/akka-kernel/src/main/java/akka/jmx/Client.java similarity index 99% rename from akka-kernel/src/main/java/org/archive/jmx/Client.java rename to akka-kernel/src/main/java/akka/jmx/Client.java index 136de87ec3..9ebf63e9eb 100644 --- a/akka-kernel/src/main/java/org/archive/jmx/Client.java +++ b/akka-kernel/src/main/java/akka/jmx/Client.java @@ -23,7 +23,7 @@ * along with Heritrix; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -package org.archive.jmx; +package akka.jmx; import java.io.IOException; import java.io.PrintWriter; From 6eb8da6eb54c62e05920829089c29f966bfcbef1 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Sun, 20 May 2012 21:00:13 +0200 Subject: [PATCH 071/538] Minor adjustments from feedback. See #2072 --- .../akka/docs/testkit/TestKitUsageSpec.scala | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/akka-docs/scala/code/akka/docs/testkit/TestKitUsageSpec.scala b/akka-docs/scala/code/akka/docs/testkit/TestKitUsageSpec.scala index 4f6c97abf7..d2b2f9367d 100644 --- a/akka-docs/scala/code/akka/docs/testkit/TestKitUsageSpec.scala +++ b/akka-docs/scala/code/akka/docs/testkit/TestKitUsageSpec.scala @@ -36,8 +36,8 @@ class TestKitUsageSpec val filterRef = system.actorOf(Props(new FilteringActor(testActor))) val randomHead = Random.nextInt(6) val randomTail = Random.nextInt(10) - val headList = List().padTo(randomHead, "0") - val tailList = List().padTo(randomTail, "1") + val headList = Seq().padTo(randomHead, "0") + val tailList = Seq().padTo(randomTail, "1") val seqRef = system.actorOf(Props(new SequencingActor(testActor, headList, tailList))) override def afterAll { @@ -62,7 +62,7 @@ class TestKitUsageSpec } "A FilteringActor" should { "Filter all messages, except expected messagetypes it receives" in { - var messages = List[String]() + var messages = Seq[String]() within(500 millis) { filterRef ! "test" expectMsg("test") @@ -75,11 +75,11 @@ class TestKitUsageSpec filterRef ! 1 receiveWhile(500 millis) { - case msg: String ⇒ messages = msg :: messages + case msg: String ⇒ messages = msg +: messages } } messages.length should be(3) - messages.reverse should be(List("some", "more", "text")) + messages.reverse should be(Seq("some", "more", "text")) } } "A SequencingActor" should { @@ -142,13 +142,13 @@ object TestKitUsageSpec { * like to test that the interesting value is received and that you cant * be bothered with the rest */ - class SequencingActor(next: ActorRef, head: List[String], tail: List[String]) + class SequencingActor(next: ActorRef, head: Seq[String], tail: Seq[String]) extends Actor { def receive = { case msg ⇒ { - head map (next ! _) + head foreach { next ! _ } next ! msg - tail map (next ! _) + tail foreach { next ! _ } } } } From 1e82a231c9afd22a758b858d534981eccd6a280d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 21 May 2012 07:46:48 +0200 Subject: [PATCH 072/538] Incorporate review feedback. See #2061 --- .../akka/docs/actor/mailbox/DurableMailboxDocSpec.scala | 8 +++++++- akka-docs/modules/durable-mailbox.rst | 7 ++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala b/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala index 25f312cac3..c81f16e896 100644 --- a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala +++ b/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala @@ -73,13 +73,19 @@ class MyMessageQueue(_owner: ActorContext) def dequeue(): Envelope = { val data: Option[Array[Byte]] = storage.pull() - data.map(deserialize(_)).getOrElse(null) + data.map(deserialize).orNull } def hasMessages: Boolean = !storage.isEmpty def numberOfMessages: Int = storage.size + /** + * Called when the mailbox is disposed. + * An ordinary mailbox would send remaining messages to deadLetters, + * but the purpose of a durable mailbox is to continue + * with the same message queue when the actor is started again. + */ def cleanUp(owner: ActorContext, deadLetters: MessageQueue): Unit = () } diff --git a/akka-docs/modules/durable-mailbox.rst b/akka-docs/modules/durable-mailbox.rst index 4de60ea12b..aca9d51eb5 100644 --- a/akka-docs/modules/durable-mailbox.rst +++ b/akka-docs/modules/durable-mailbox.rst @@ -9,7 +9,7 @@ Overview ======== -A durable mailbox is a replacement for the standard actor mailbox that is durable. +A durable mailbox is a mailbox which stores the messages on durable storage. What this means in practice is that if there are pending messages in the actor's mailbox when the node of the actor resides on crashes, then when you restart the node, the actor will be able to continue processing as if nothing had happened; @@ -29,7 +29,7 @@ Open Source projects, such as: * `AMQP Durable Mailbox `_ -A durable mailbox typically doesn't implements transactions for current message. It's possible +A durable mailbox is like any other mailbox not likely to be transactional. It's possible if the actor crashes after receiving a message, but before completing processing of it, that the message could be lost. @@ -98,4 +98,5 @@ Add this dependency:: "com.typesafe.akka" % "akka-mailboxes-common-test" % "2.1-SNAPSHOT" For more inspiration you can look at the old implementations based on Redis, MongoDB, Beanstalk, -and ZooKeeper, which can be found in Akka git repository tag v2.0.1. \ No newline at end of file +and ZooKeeper, which can be found in Akka git repository tag +`v2.0.1 `_. \ No newline at end of file From 162d59db35b680594bcbdae56934807697371845 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 21 May 2012 08:08:51 +0200 Subject: [PATCH 073/538] Removed ResizerSpec test violating routing rules --- .../test/scala/akka/routing/ResizerSpec.scala | 30 ------------------- 1 file changed, 30 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala index 111460e3ac..ede4a69d7c 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -128,36 +128,6 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with current.routees.size must be(2) } - // FIXME this test violates the rule that you can not use a BalancingDispatcher with any kind of Router - now throws a ConfigurationException in verification process - "resize when busy" ignore { - - val busy = new TestLatch(1) - - val resizer = DefaultResizer( - lowerBound = 1, - upperBound = 3, - pressureThreshold = 0, - messagesPerResize = 1) - - val router = system.actorOf(Props[BusyActor].withRouter(RoundRobinRouter(resizer = Some(resizer))).withDispatcher("bal-disp")) - - val latch1 = new TestLatch(1) - router ! (latch1, busy) - Await.ready(latch1, 2 seconds) - - val latch2 = new TestLatch(1) - router ! (latch2, busy) - Await.ready(latch2, 2 seconds) - - val latch3 = new TestLatch(1) - router ! (latch3, busy) - Await.ready(latch3, 2 seconds) - - Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(3) - - busy.countDown() - } - "grow as needed under pressure" in { // make sure the pool starts at the expected lower limit and grows to the upper as needed // as influenced by the backlog of blocking pooled actors From 7784513537df669f048484feea900963fc1a9cb1 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 13:39:39 +0200 Subject: [PATCH 074/538] Fixing logic bug introduced in refactor --- .../src/main/scala/akka/dispatch/BalancingDispatcher.scala | 2 +- akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index dea29643c7..43e8944105 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -87,7 +87,7 @@ class BalancingDispatcher( @tailrec def scheduleOne(i: Iterator[ActorCell] = team.iterator): Unit = if (messageQueue.hasMessages && i.hasNext - && (executorService match { + && (executorService.executor match { case lm: LoadMetrics ⇒ lm.atFullThrottle == false case other ⇒ true }) diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index c8ae187c66..3c17ab8db4 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -41,7 +41,7 @@ class Dispatcher( @volatile private var executorServiceDelegate: LazyExecutorServiceDelegate = new LazyExecutorServiceDelegate(executorServiceFactoryProvider.createExecutorServiceFactory(id, prerequisites.threadFactory)) - protected final def executorService: ExecutorService = executorServiceDelegate + protected final def executorService: ExecutorServiceDelegate = executorServiceDelegate /** * INTERNAL USE ONLY From e14f9d01304db0fc42a96f641d6107ab48ee57ee Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 13:40:14 +0200 Subject: [PATCH 075/538] Switching to indexOf instead of Seq.contains --- akka-actor/src/main/scala/akka/actor/ActorSelection.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala index 44767cb0b6..93a26c6289 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala @@ -52,7 +52,7 @@ object ActorSelection { */ def apply(anchor: ActorRef, path: String): ActorSelection = { val elems = path.split("/+").dropWhile(_.isEmpty) - val compiled: Array[AnyRef] = elems map (x ⇒ if (x.contains('?') || x.contains('*')) Helpers.makePattern(x) else x) + val compiled: Array[AnyRef] = elems map (x ⇒ if ((x.indexOf('?') != -1) || (x.indexOf('*') != -1)) Helpers.makePattern(x) else x) new ActorSelection with ScalaActorSelection { def target = anchor def path = compiled From 26f6c48ae1c5642b8fb4752829211e16bc0e7762 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 13:47:48 +0200 Subject: [PATCH 076/538] HUGE CHANGE - moving behaviorStack into ActorCell --- .../test/scala/akka/actor/ActorRefSpec.scala | 2 +- .../scala/akka/actor/ActorWithStashSpec.scala | 2 +- .../test/scala/akka/actor/FSMActorSpec.scala | 2 +- .../scala/akka/actor/ReceiveTimeoutSpec.scala | 8 ++-- .../akka/actor/RestartStrategySpec.scala | 10 ++-- .../akka/actor/SupervisorHierarchySpec.scala | 4 +- .../scala/akka/actor/SupervisorMiscSpec.scala | 2 +- .../akka/actor/dispatch/ActorModelSpec.scala | 4 +- .../src/main/scala/akka/actor/Actor.scala | 45 ++---------------- .../src/main/scala/akka/actor/ActorCell.scala | 29 +++++++---- .../src/main/scala/akka/actor/ActorPath.scala | 4 +- .../src/main/scala/akka/actor/ActorRef.scala | 10 ++-- .../src/main/scala/akka/actor/FSM.scala | 2 +- .../main/scala/akka/actor/FaultHandling.scala | 7 +-- akka-actor/src/main/scala/akka/actor/IO.scala | 2 +- .../src/main/scala/akka/actor/Scheduler.scala | 2 +- .../main/scala/akka/actor/UntypedActor.scala | 2 +- .../src/main/scala/akka/camel/Producer.scala | 2 +- .../akka/camelexamples/ExamplesSupport.scala | 6 +-- .../_2_SupervisedConsumers.scala | 4 +- .../akka/camel/ProducerFeatureTest.scala | 4 +- .../component/ActorProducerTest.scala | 2 +- akka-docs/scala/actors.rst | 2 +- .../scala/akka/testkit/TestActorRef.scala | 2 +- .../zeromq/ConcurrentSocketActorSpec.scala | 2 +- file-based/mailbox_user__a | 0 file-based/mailbox_user__b | 0 file-based/mailbox_user__c | Bin 12892 -> 0 bytes file-based/mailbox_user__d | Bin 703 -> 0 bytes file-based/mailbox_user__e | Bin 703 -> 0 bytes file-based/mailbox_user__f | Bin 703 -> 0 bytes 31 files changed, 71 insertions(+), 90 deletions(-) delete mode 100644 file-based/mailbox_user__a delete mode 100644 file-based/mailbox_user__b delete mode 100644 file-based/mailbox_user__c delete mode 100644 file-based/mailbox_user__d delete mode 100644 file-based/mailbox_user__e delete mode 100644 file-based/mailbox_user__f diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala index e8c667bc7e..bec066d97a 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala @@ -393,7 +393,7 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { override def postRestart(reason: Throwable) = latch.countDown() })) - protected def receive = { case "sendKill" ⇒ ref ! Kill } + def receive = { case "sendKill" ⇒ ref ! Kill } })) boss ! "sendKill" diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala index c516a17a42..524913b01d 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala @@ -131,7 +131,7 @@ class ActorWithStashSpec extends AkkaSpec(ActorWithStashSpec.testConf) with Defa val hasMsgLatch = new TestLatch val slaveProps = myProps(new Actor with Stash { - protected def receive = { + def receive = { case "crash" ⇒ throw new Exception("Crashing...") diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala index ef49cbc18d..cc98a23f1f 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala @@ -147,7 +147,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im object Hello object Bye val tester = system.actorOf(Props(new Actor { - protected def receive = { + def receive = { case Hello ⇒ lock ! "hello" case "world" ⇒ answerLatch.open case Bye ⇒ lock ! "bye" diff --git a/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala index dc08df1c98..09fe9c103f 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala @@ -22,7 +22,7 @@ class ReceiveTimeoutSpec extends AkkaSpec { val timeoutActor = system.actorOf(Props(new Actor { context.setReceiveTimeout(500 milliseconds) - protected def receive = { + def receive = { case ReceiveTimeout ⇒ timeoutLatch.open } })) @@ -38,7 +38,7 @@ class ReceiveTimeoutSpec extends AkkaSpec { val timeoutActor = system.actorOf(Props(new Actor { context.setReceiveTimeout(500 milliseconds) - protected def receive = { + def receive = { case Tick ⇒ () case ReceiveTimeout ⇒ timeoutLatch.open } @@ -58,7 +58,7 @@ class ReceiveTimeoutSpec extends AkkaSpec { val timeoutActor = system.actorOf(Props(new Actor { context.setReceiveTimeout(500 milliseconds) - protected def receive = { + def receive = { case Tick ⇒ () case ReceiveTimeout ⇒ count.incrementAndGet @@ -78,7 +78,7 @@ class ReceiveTimeoutSpec extends AkkaSpec { val timeoutLatch = TestLatch() val timeoutActor = system.actorOf(Props(new Actor { - protected def receive = { + def receive = { case ReceiveTimeout ⇒ timeoutLatch.open } })) diff --git a/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala index 829ab081e0..8d114bc396 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala @@ -40,7 +40,7 @@ class RestartStrategySpec extends AkkaSpec with DefaultTimeout { val slaveProps = Props(new Actor { - protected def receive = { + def receive = { case Ping ⇒ countDownLatch.countDown() case Crash ⇒ throw new Exception("Crashing...") } @@ -83,7 +83,7 @@ class RestartStrategySpec extends AkkaSpec with DefaultTimeout { val slaveProps = Props(new Actor { - protected def receive = { + def receive = { case Crash ⇒ throw new Exception("Crashing...") } @@ -110,7 +110,7 @@ class RestartStrategySpec extends AkkaSpec with DefaultTimeout { val slaveProps = Props(new Actor { - protected def receive = { + def receive = { case Ping ⇒ if (!pingLatch.isOpen) pingLatch.open else secondPingLatch.open case Crash ⇒ throw new Exception("Crashing...") @@ -166,7 +166,7 @@ class RestartStrategySpec extends AkkaSpec with DefaultTimeout { val slaveProps = Props(new Actor { - protected def receive = { + def receive = { case Ping ⇒ countDownLatch.countDown() case Crash ⇒ throw new Exception("Crashing...") } @@ -221,7 +221,7 @@ class RestartStrategySpec extends AkkaSpec with DefaultTimeout { val slaveProps = Props(new Actor { - protected def receive = { + def receive = { case Ping ⇒ countDownLatch.countDown() case Crash ⇒ throw new Exception("Crashing...") } diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala index a04e83f39b..62752d8052 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala @@ -20,7 +20,7 @@ object SupervisorHierarchySpec { */ class CountDownActor(countDown: CountDownLatch, override val supervisorStrategy: SupervisorStrategy) extends Actor { - protected def receive = { + def receive = { case p: Props ⇒ sender ! context.actorOf(p) } // test relies on keeping children around during restart @@ -67,7 +67,7 @@ class SupervisorHierarchySpec extends AkkaSpec with DefaultTimeout { val crasher = context.watch(context.actorOf(Props(new CountDownActor(countDownMessages, SupervisorStrategy.defaultStrategy)))) - protected def receive = { + def receive = { case "killCrasher" ⇒ crasher ! Kill case Terminated(_) ⇒ countDownMax.countDown() } diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala index 92af540a9a..197e749d2e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala @@ -37,7 +37,7 @@ class SupervisorMiscSpec extends AkkaSpec(SupervisorMiscSpec.config) with Defaul val workerProps = Props(new Actor { override def postRestart(cause: Throwable) { countDownLatch.countDown() } - protected def receive = { + def receive = { case "status" ⇒ this.sender ! "OK" case _ ⇒ this.context.stop(self) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index 88358e9f16..854a562745 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -400,9 +400,9 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa val a = newTestActor(dispatcher.id) val f1 = a ? Reply("foo") val f2 = a ? Reply("bar") - val f3 = try { a ? Interrupt } catch { case ie: InterruptedException ⇒ Promise.failed(ActorInterruptedException(ie)) } + val f3 = try { a ? Interrupt } catch { case ie: InterruptedException ⇒ Promise.failed(new ActorInterruptedException(ie)) } val f4 = a ? Reply("foo2") - val f5 = try { a ? Interrupt } catch { case ie: InterruptedException ⇒ Promise.failed(ActorInterruptedException(ie)) } + val f5 = try { a ? Interrupt } catch { case ie: InterruptedException ⇒ Promise.failed(new ActorInterruptedException(ie)) } val f6 = a ? Reply("bar2") assert(Await.result(f1, timeout.duration) === "foo") diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 7c020925eb..b26da76ff2 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -152,7 +152,7 @@ case class DeathPactException private[akka] (dead: ActorRef) * When an InterruptedException is thrown inside an Actor, it is wrapped as an ActorInterruptedException as to * avoid cascading interrupts to other threads than the originally interrupted one. */ -case class ActorInterruptedException private[akka] (cause: Throwable) extends AkkaException(cause.getMessage, cause) with NoStackTrace +class ActorInterruptedException private[akka] (cause: Throwable) extends AkkaException(cause.getMessage, cause) with NoStackTrace /** * This message is published to the EventStream whenever an Actor receives a message it doesn't understand @@ -173,6 +173,7 @@ object Status { /** * This class/message type is preferably used to indicate failure of some operation performed. + * As an example, it is used to signal failure with AskSupport is used (ask/?). */ case class Failure(cause: Throwable) extends Status } @@ -317,7 +318,7 @@ trait Actor { * This defines the initial actor behavior, it must return a partial function * with the actor logic. */ - protected def receive: Receive + def receive: Receive /** * User overridable definition the strategy to use for supervising @@ -377,45 +378,5 @@ trait Actor { case _ ⇒ context.system.eventStream.publish(UnhandledMessage(message, sender, self)) } } - - // ========================================= - // ==== INTERNAL IMPLEMENTATION DETAILS ==== - // ========================================= - - /** - * For Akka internal use only. - */ - private[akka] final def apply(msg: Any) = { - //FIXME replace with behaviorStack.head.applyOrElse(msg, unhandled) + "-optimize" - val head = behaviorStack.head - if (head.isDefinedAt(msg)) head.apply(msg) else unhandled(msg) - } - - /** - * For Akka internal use only. - */ - private[akka] def pushBehavior(behavior: Receive): Unit = { - behaviorStack = behaviorStack.push(behavior) - } - - /** - * For Akka internal use only. - */ - private[akka] def popBehavior(): Unit = { - val original = behaviorStack - val popped = original.pop - behaviorStack = if (popped.isEmpty) original else popped - } - - /** - * For Akka internal use only. - */ - private[akka] def clearBehaviorStack(): Unit = - behaviorStack = Stack.empty[Receive].push(behaviorStack.last) - - /** - * For Akka internal use only. - */ - private var behaviorStack: Stack[Receive] = Stack.empty[Receive].push(receive) } diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index bd5342fec4..6a25ac04a9 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -409,6 +409,8 @@ private[akka] class ActorCell( var actor: Actor = _ + private var behaviorStack: Stack[Actor.Receive] = Stack.empty + @volatile //This must be volatile since it isn't protected by the mailbox status var mailbox: Mailbox = _ @@ -489,8 +491,7 @@ private[akka] class ActorCell( //This method is in charge of setting up the contextStack and create a new instance of the Actor protected def newActor(): Actor = { - val stackBefore = contextStack.get - contextStack.set(stackBefore.push(this)) + contextStack.set(contextStack.get.push(this)) try { val instance = props.creator() @@ -511,6 +512,7 @@ private[akka] class ActorCell( def create(): Unit = if (isNormal) { try { val created = newActor() + behaviorStack = Stack.empty.push(created.receive) actor = created created.preStart() checkReceiveTimeout @@ -612,7 +614,7 @@ private[akka] class ActorCell( cancelReceiveTimeout() // FIXME: leave this here??? messageHandle.message match { case msg: AutoReceivedMessage ⇒ autoReceiveMessage(messageHandle) - case msg ⇒ actor(msg) + case msg ⇒ receiveMessage(msg) } currentMessage = null // reset current message after successful invocation } catch { @@ -628,14 +630,14 @@ private[akka] class ActorCell( if (actor ne null) actor.supervisorStrategy.handleSupervisorFailing(self, children) } finally { t match { // Wrap InterruptedExceptions and rethrow - case _: InterruptedException ⇒ parent.tell(Failed(ActorInterruptedException(t)), self); throw t + case _: InterruptedException ⇒ parent.tell(Failed(new ActorInterruptedException(t)), self); throw t case _ ⇒ parent.tell(Failed(t), self) } } def become(behavior: Actor.Receive, discardOld: Boolean = true): Unit = { if (discardOld) unbecome() - actor.pushBehavior(behavior) + behaviorStack = behaviorStack.push(behavior) } /** @@ -651,9 +653,13 @@ private[akka] class ActorCell( become(newReceive, discardOld) } - def unbecome(): Unit = actor.popBehavior() + def unbecome(): Unit = { + val original = behaviorStack + val popped = original.pop + behaviorStack = if (popped.isEmpty) original else popped + } - def autoReceiveMessage(msg: Envelope) { + def autoReceiveMessage(msg: Envelope): Unit = { if (system.settings.DebugAutoReceive) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "received AutoReceiveMessage " + msg)) @@ -667,6 +673,12 @@ private[akka] class ActorCell( } } + final def receiveMessage(msg: Any): Unit = { + //FIXME replace with behaviorStack.head.applyOrElse(msg, unhandled) + "-optimize" + val head = behaviorStack.head + if (head.isDefinedAt(msg)) head.apply(msg) else actor.unhandled(msg) + } + private def doTerminate() { val a = actor try { @@ -682,7 +694,7 @@ private[akka] class ActorCell( if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped")) } finally { - if (a ne null) a.clearBehaviorStack() + behaviorStack = Stack.empty clearActorFields(a) actor = null } @@ -694,6 +706,7 @@ private[akka] class ActorCell( actor.supervisorStrategy.handleSupervisorRestarted(cause, self, children) val freshActor = newActor() + behaviorStack = Stack.empty.push(freshActor.receive) actor = freshActor // this must happen before postRestart has a chance to fail if (freshActor eq failedActor) setActorFields(freshActor, this, self) // If the creator returns the same instance, we need to restore our nulled out fields. diff --git a/akka-actor/src/main/scala/akka/actor/ActorPath.scala b/akka-actor/src/main/scala/akka/actor/ActorPath.scala index e8c277660f..aa93dbcc47 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorPath.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorPath.scala @@ -15,7 +15,9 @@ object ActorPath { } /** - * This Regular Expression is used to validate a path element (Actor Name) + * This Regular Expression is used to validate a path element (Actor Name). + * Since Actors form a tree, it is addressable using an URL, therefor an Actor Name has to conform to: + * http://www.ietf.org/rfc/rfc2396.txt */ val ElementRegex = """[-\w:@&=+,.!~*'_;][-\w:@&=+,.!~*'$_;]*""".r } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index d0ad270957..46e3440f95 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -335,9 +335,10 @@ private[akka] class LocalActorRef private[akka] ( /** * Memento pattern for serializing ActorRefs transparently + * INTERNAL API */ //TODO add @SerialVersionUID(1L) when SI-4804 is fixed -case class SerializedActorRef private (path: String) { +private[akka] case class SerializedActorRef private (path: String) { import akka.serialization.JavaSerializer.currentSystem @throws(classOf[java.io.ObjectStreamException]) @@ -350,8 +351,11 @@ case class SerializedActorRef private (path: String) { someSystem.actorFor(path) } } -//FIXME: Should SerializedActorRef be private[akka] ? -object SerializedActorRef { + +/** + * INTERNAL API + */ +private[akka] object SerializedActorRef { def apply(path: ActorPath): SerializedActorRef = { Serialization.currentTransportAddress.value match { case null ⇒ new SerializedActorRef(path.toString) diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 71d1ec7e69..762f23b16b 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -437,7 +437,7 @@ trait FSM[S, D] extends Listeners with ActorLogging { * Main actor receive() method * ******************************************* */ - override final protected def receive: Receive = { + override final def receive: Receive = { case TimeoutMarker(gen) ⇒ if (generation == gen) { processMsg(StateTimeout, "state timeout") diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index 383010f9de..8641153476 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -176,9 +176,10 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { /** * Sort so that subtypes always precede their supertypes, but without * obeying any order between unrelated subtypes (insert sort). + * + * INTERNAL API */ - //FIXME Should this really be public API? - def sort(in: Iterable[CauseDirective]): Seq[CauseDirective] = + private[akka] def sort(in: Iterable[CauseDirective]): Seq[CauseDirective] = (new ArrayBuffer[CauseDirective](in.size) /: in) { (buf, ca) ⇒ buf.indexWhere(_._1 isAssignableFrom ca._1) match { case -1 ⇒ buf append ca @@ -195,7 +196,7 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { } /** - * An Akka SupervisorStrategy is + * An Akka SupervisorStrategy is the policy to apply for crashing children */ abstract class SupervisorStrategy { diff --git a/akka-actor/src/main/scala/akka/actor/IO.scala b/akka-actor/src/main/scala/akka/actor/IO.scala index 3ff91c4fa8..63eb2e4b3a 100644 --- a/akka-actor/src/main/scala/akka/actor/IO.scala +++ b/akka-actor/src/main/scala/akka/actor/IO.scala @@ -987,7 +987,7 @@ final class IOManagerActor extends Actor with ActorLogging { } } - protected def receive = { + def receive = { case Select ⇒ select() if (running) self ! Select diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 8ad3d8ee98..850cb02048 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -236,7 +236,7 @@ private[akka] class ContinuousCancellable extends AtomicReference[HWTimeout](Con def isCancelled(): Boolean = get match { case null ⇒ true - case some ⇒ isCancelled() + case some ⇒ some.isCancelled() } def cancel(): Unit = diff --git a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala index c56a2a0167..9420ab84cc 100644 --- a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala @@ -156,7 +156,7 @@ abstract class UntypedActor extends Actor { */ override def postRestart(reason: Throwable): Unit = super.postRestart(reason) - final protected def receive = { case msg ⇒ onReceive(msg) } + final def receive = { case msg ⇒ onReceive(msg) } } /** diff --git a/akka-camel/src/main/scala/akka/camel/Producer.scala b/akka-camel/src/main/scala/akka/camel/Producer.scala index 80537fda12..33541d4611 100644 --- a/akka-camel/src/main/scala/akka/camel/Producer.scala +++ b/akka-camel/src/main/scala/akka/camel/Producer.scala @@ -134,7 +134,7 @@ trait Producer extends ProducerSupport { this: Actor ⇒ * Default implementation of Actor.receive. Any messages received by this actors * will be produced to the endpoint specified by endpointUri. */ - protected def receive = produce + def receive = produce } /** diff --git a/akka-camel/src/main/scala/akka/camelexamples/ExamplesSupport.scala b/akka-camel/src/main/scala/akka/camelexamples/ExamplesSupport.scala index 3e35b8c7c9..df5b0e5508 100644 --- a/akka-camel/src/main/scala/akka/camelexamples/ExamplesSupport.scala +++ b/akka-camel/src/main/scala/akka/camelexamples/ExamplesSupport.scala @@ -19,7 +19,7 @@ private[camelexamples] class SysOutConsumer extends Consumer { override def activationTimeout = 10 seconds def endpointUri = "file://data/input/CamelConsumer" - protected def receive = { + def receive = { case msg: CamelMessage ⇒ { printf("Received '%s'\n", msg.bodyAs[String]) } @@ -30,12 +30,12 @@ private[camelexamples] class TroubleMaker extends Consumer { def endpointUri = "WRONG URI" println("Trying to instantiate conumer with uri: " + endpointUri) - protected def receive = { case _ ⇒ } + def receive = { case _ ⇒ } } private[camelexamples] class SysOutActor(implicit camel: Camel) extends Actor { implicit val camelContext = camel.context - protected def receive = { + def receive = { case msg: CamelMessage ⇒ { printf("Received '%s'\n", msg.bodyAs[String]) } diff --git a/akka-camel/src/main/scala/akka/camelexamples/_2_SupervisedConsumers.scala b/akka-camel/src/main/scala/akka/camelexamples/_2_SupervisedConsumers.scala index 5d321b28db..cdf46f012f 100644 --- a/akka-camel/src/main/scala/akka/camelexamples/_2_SupervisedConsumers.scala +++ b/akka-camel/src/main/scala/akka/camelexamples/_2_SupervisedConsumers.scala @@ -14,7 +14,7 @@ private[camelexamples] object SupervisedConsumersExample extends App { system.actorOf(Props(new Actor { context.watch(context.actorOf(Props[EndpointManager])) - protected def receive = { + def receive = { case Terminated(ref) ⇒ system.shutdown() } })) @@ -30,7 +30,7 @@ private[camelexamples] class EndpointManager extends Actor { watch(actorOf(Props[SysOutConsumer])) watch(actorOf(Props[TroubleMaker])) - protected def receive = { + def receive = { case Terminated(ref) ⇒ { printf("Hey! One of the endpoints has died: %s. I am doing sepuku...\n", ref) self ! PoisonPill diff --git a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala index cef098b8fe..a7e5b9e4cb 100644 --- a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala @@ -244,7 +244,7 @@ object ProducerFeatureTest { } class TestResponder extends Actor { - protected def receive = { + def receive = { case msg: CamelMessage ⇒ msg.body match { case "fail" ⇒ context.sender ! akka.actor.Status.Failure(new AkkaCamelException(new Exception("failure"), msg.headers)) case _ ⇒ @@ -256,7 +256,7 @@ object ProducerFeatureTest { } class ReplyingForwardTarget extends Actor { - protected def receive = { + def receive = { case msg: CamelMessage ⇒ context.sender ! (msg.addHeader("test" -> "result")) case msg: akka.actor.Status.Failure ⇒ diff --git a/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala b/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala index 8146b17399..a0e153fd54 100644 --- a/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala +++ b/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala @@ -334,7 +334,7 @@ trait ActorProducerFixture extends MockitoSugar with BeforeAndAfterAll with Befo } def echoActor = system.actorOf(Props(new Actor { - protected def receive = { + def receive = { case msg ⇒ sender ! "received " + msg } })) diff --git a/akka-docs/scala/actors.rst b/akka-docs/scala/actors.rst index 5374c8a37c..9269c841f5 100644 --- a/akka-docs/scala/actors.rst +++ b/akka-docs/scala/actors.rst @@ -443,7 +443,7 @@ An Actor has to implement the ``receive`` method to receive messages: .. code-block:: scala - protected def receive: PartialFunction[Any, Unit] + def receive: PartialFunction[Any, Unit] Note: Akka has an alias to the ``PartialFunction[Any, Unit]`` type called ``Receive`` (``akka.actor.Actor.Receive``), so you can use this type instead for diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index 8a2f61bf76..0a5d6163e8 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -56,7 +56,7 @@ class TestActorRef[T <: Actor]( * thrown will be available to you, while still being able to use * become/unbecome. */ - def receive(o: Any) { underlyingActor.apply(o) } + def receive(o: Any): Unit = underlying.receiveMessage(o) /** * Retrieve reference to the underlying actor, where the static type matches the factory used inside the diff --git a/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala b/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala index dcc456b544..fe0d715dba 100644 --- a/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala +++ b/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala @@ -138,7 +138,7 @@ class ConcurrentSocketActorSpec extends AkkaSpec { } } - protected def receive = { + def receive = { case _ ⇒ val payload = "%s".format(messageNumber) messageNumber += 1 diff --git a/file-based/mailbox_user__a b/file-based/mailbox_user__a deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/file-based/mailbox_user__b b/file-based/mailbox_user__b deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/file-based/mailbox_user__c b/file-based/mailbox_user__c deleted file mode 100644 index e84f2a5e4f42e4329047049aad7186418b4bb478..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12892 zcmZQ7Wnf_NcU_QY&d2}E-d`)^;Mm4L+D95PfYcdKlYFSN2Nk%QJ$tcRGWi=UP8MUk?qcEeE)dN?W z84X-%W<+Lb7D9O$ZbW8jMh&aUEX}B4HJPOuHLNDHG^2*qWR_;su$s)$j2c#xS(;J9 zYBEbRDp+kyW@$zZs|T($GbXb%3#GguH72t(qlVRFmS)tjn#|IS8dj58no+}QGD|aR zSWRYWMh&aUEX}B3wF#M}88xgXvoxcI)dN?WnUGnUg;8FRnvhwVQNwC7OEYR%O=f9E z4Xep4&8T5DnWY&utR}NGqk`3@WR_;(lxHqeGD|aRSWRYWMh&aUEX}B4HJPOuHLNDH zG^2*qWR_;su$s)$j2c#xS(-&qAzzbOno+@OGcrpvYFJHXX+{mJ$t=yNVKteh88xgX zvoxcI)nt}t)UcY&(kznld~HT%X+{mJ$t=yNVKteh85OKHC$lu8hSg-2X4J5n%+ib+ zR+CwpMNyu)%*iaxs9`mkrCBr;d`)I)Mh&aUEX}B4HJPOuHLNDHG^2vm7G#!Y)UcY& z(u^8blUbTk!)h{1vslXWwFQ}_88xgXvoxcI)nt}t)UcY&(u^8blUbTk!)h{1vpC9A zxFwmT88xgXvoxcI)nt}t)UcY&(u^8blUbTk!)h{1Giq2(W@$zZtH~_Qs9`mkrCB@` oiWD+RvvLLo2J);H_o@E2Co+Z6FV+5~?A0N{bSca#DR0Gjo#iD}oDBll4oBQ;YOf zQiMXe0@u7{U@b0U5X?#}OVrCr%uCnv%qvMvPc0I9v|!2i#&!!P21ZW?*0RK$($tC~ z1_6XRztY^K)S|XEQ)RiQJm_X(V5lfyU|?VrU{q4)QpIg|N@`+?Pijd?YEd!c;Bq<> z2~KANh4kQZJ~IiN{bSca#DR0Gjo#iD}oDBll4oBQ;YOf zQiVdf0@u7{U@b0U5X?#}OVrCr%uCnv%qvMvPc0I9v|!2i#&!!P21ZW?*0RK$($tC~ z1_6XRztY^K)S|XEQ)RiQJm_X(V5lfyU|?VrU{q4)QpIg|N@`+?Pijd?YEd!c;Bq<> z2~KANh4kQZJ~IiX)PzmpCSuN{bSca#DR0Gjo#iD}oDBll4oBQ;YOf z(u6{}0@u7{U@b0U5X?#}OVrCr%uCnv%qvMvPc0I9v|!2i#&!!P21ZW?*0RK$($tC~ z1_6XRztY^K)S|XEQ)RiQJm_X(V5lfyU|?VrU{q4)QpIg|N@`+?Pijd?YEdyG8_2z2 z_uCB?_cM{;{=wyPW)fTu33&*~RKmc(U{6-~^Ko&3(oYEkb8%^|0HcyFmp1N*FRm;u UNzK(SNi8mMOfJbUGK5$U024mYO#lD@ From a98827c150e7df63aa88bb4708e472a1d262d901 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 21 May 2012 14:35:23 +0200 Subject: [PATCH 077/538] unb0rkening the akka-docs --- .../main/java/akka/remote/RemoteProtocol.java | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java index d168e5acc1..0794e54364 100644 --- a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java +++ b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java @@ -309,7 +309,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -981,7 +981,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1977,7 +1977,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -2527,7 +2527,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -2936,7 +2936,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3410,7 +3410,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3909,7 +3909,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -4487,7 +4487,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -5367,7 +5367,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -6067,7 +6067,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -6584,7 +6584,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } From 4a61a14d8b02a98ff263cb2bd5e0f2321432fddf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 21 May 2012 14:40:18 +0200 Subject: [PATCH 078/538] Added Akka Mist port and pointers to other community HTTP/REST projects to the HTTP docs --- akka-docs/modules/http.rst | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/akka-docs/modules/http.rst b/akka-docs/modules/http.rst index c2c01d3ebc..b0f54948d3 100644 --- a/akka-docs/modules/http.rst +++ b/akka-docs/modules/http.rst @@ -1,18 +1,30 @@ .. _http-module: HTTP -==== +#### -Play2-mini ----------- +Play2 Mini +========== -The Akka team recommends the `Play2-mini `_ framework when building RESTful +The Akka team recommends the `Play2 Mini `_ framework when building RESTful service applications that integrates with Akka. It provides a REST API on top of `Play2 `_. Getting started --------------- First you must make your application aware of play-mini. -In SBT you just have to add the following to your _libraryDependencies_:: +In SBT you just have to add the following to your ``libraryDependencies``:: libraryDependencies += "com.typesafe" %% "play-mini" % "" + +Akka Mist +========= + +If you are using Akka Mist (Akka's old HTTP/REST module) with Akka 1.x and wish to upgrade to 2.x +there is now a port of Akka Mist to Akka 2.x. You can find it `here `_. + +Other Alternatives +================== + +There are a bunch of other alternatives for using Akka with HTTP/REST. You can find some of them +among the `Community Projects `_. From 2a370520fd5fd7f8799d35c23950692056c94717 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 14:50:49 +0200 Subject: [PATCH 079/538] Cleaning up the cancellables, null-object pattern ftw --- .../src/main/scala/akka/actor/Scheduler.scala | 29 +++++++++---------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 850cb02048..30cadc5a3b 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -216,6 +216,14 @@ private[akka] object ContinuousCancellable { override def isCancelled: Boolean = false override def cancel: Unit = () } + + val cancelled: HWTimeout = new HWTimeout { + override def getTimer: Timer = null + override def getTask: TimerTask = null + override def isExpired: Boolean = false + override def isCancelled: Boolean = true + override def cancel: Unit = () + } } /** * Wrapper of a [[org.jboss.netty.akka.util.Timeout]] that delegates all @@ -229,24 +237,15 @@ private[akka] class ContinuousCancellable extends AtomicReference[HWTimeout](Con } @tailrec private[akka] final def swap(newTimeout: HWTimeout): Unit = get match { - case null ⇒ newTimeout.cancel() - case some if some.isCancelled ⇒ cancel(); newTimeout.cancel() + case some if some.isCancelled ⇒ try cancel() finally newTimeout.cancel() case some ⇒ if (!compareAndSet(some, newTimeout)) swap(newTimeout) } - def isCancelled(): Boolean = get match { - case null ⇒ true - case some ⇒ some.isCancelled() - } - - def cancel(): Unit = - getAndSet(null) match { - case null ⇒ - case some ⇒ some.cancel() - } + def isCancelled(): Boolean = get().isCancelled() + def cancel(): Unit = getAndSet(ContinuousCancellable.cancelled).cancel() } -private[akka] class DefaultCancellable(val timeout: HWTimeout) extends Cancellable { - override def cancel(): Unit = timeout.cancel() - override def isCancelled: Boolean = timeout.isCancelled +private[akka] class DefaultCancellable(timeout: HWTimeout) extends AtomicReference[HWTimeout](timeout) with Cancellable { + override def cancel(): Unit = getAndSet(ContinuousCancellable.cancelled).cancel() + override def isCancelled: Boolean = get().isCancelled } From c7ca6af9274dfb2b92f44f17062a831717fbc279 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 14:53:07 +0200 Subject: [PATCH 080/538] Adding a more robust test for the SchedulerSpec --- .../src/test/scala/akka/actor/SchedulerSpec.scala | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala index 3b87af2aad..beeb2a4c3b 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala @@ -18,7 +18,12 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout } override def afterEach { - while (cancellables.peek() ne null) { Option(cancellables.poll()).foreach(_.cancel()) } + while (cancellables.peek() ne null) { + for (c ← Option(cancellables.poll())) { + c.cancel() + c.isCancelled must be === true + } + } } "A Scheduler" must { From 6e46b089ff44617980fe8ce807a3000cbc088137 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 16:42:33 +0200 Subject: [PATCH 081/538] Adding the arguably more correct behavior stacking --- .../src/main/scala/akka/actor/ActorCell.scala | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 6a25ac04a9..c73e6fc4b2 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -185,6 +185,8 @@ private[akka] object ActorCell { final val emptyReceiveTimeoutData: (Long, Cancellable) = (-1, emptyCancellable) + final val behaviorStackPlaceHolder: Stack[Actor.Receive] = Stack.empty.push(Actor.emptyBehavior) + sealed trait SuspendReason case object UserRequest extends SuspendReason case class Recreation(cause: Throwable) extends SuspendReason @@ -493,11 +495,18 @@ private[akka] class ActorCell( protected def newActor(): Actor = { contextStack.set(contextStack.get.push(this)) try { + import ActorCell.behaviorStackPlaceHolder + + behaviorStack = behaviorStackPlaceHolder val instance = props.creator() if (instance eq null) throw ActorInitializationException(self, "Actor instance passed to actorOf can't be 'null'") + behaviorStack = behaviorStack match { + case `behaviorStackPlaceHolder` ⇒ Stack.empty.push(instance.receive) + case newBehaviors ⇒ Stack.empty.push(instance.receive).pushAll(newBehaviors.reverse.drop(1)) + } instance } finally { val stackAfter = contextStack.get @@ -512,7 +521,6 @@ private[akka] class ActorCell( def create(): Unit = if (isNormal) { try { val created = newActor() - behaviorStack = Stack.empty.push(created.receive) actor = created created.preStart() checkReceiveTimeout @@ -648,10 +656,8 @@ private[akka] class ActorCell( /* * UntypedActorContext impl */ - def become(behavior: Procedure[Any], discardOld: Boolean): Unit = { - def newReceive: Actor.Receive = { case msg ⇒ behavior.apply(msg) } - become(newReceive, discardOld) - } + def become(behavior: Procedure[Any], discardOld: Boolean): Unit = + become({ case msg ⇒ behavior.apply(msg) }: Actor.Receive, discardOld) def unbecome(): Unit = { val original = behaviorStack @@ -694,7 +700,7 @@ private[akka] class ActorCell( if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped")) } finally { - behaviorStack = Stack.empty + behaviorStack = ActorCell.behaviorStackPlaceHolder clearActorFields(a) actor = null } @@ -704,9 +710,7 @@ private[akka] class ActorCell( private def doRecreate(cause: Throwable, failedActor: Actor): Unit = try { // after all killed children have terminated, recreate the rest, then go on to start the new instance actor.supervisorStrategy.handleSupervisorRestarted(cause, self, children) - val freshActor = newActor() - behaviorStack = Stack.empty.push(freshActor.receive) actor = freshActor // this must happen before postRestart has a chance to fail if (freshActor eq failedActor) setActorFields(freshActor, this, self) // If the creator returns the same instance, we need to restore our nulled out fields. From ae84ee0a5765a88c9b0a37ae7d15aacac7cf1773 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 21 May 2012 16:45:15 +0200 Subject: [PATCH 082/538] Updated copyright to 2012 --- akka-actor/src/main/scala/akka/actor/Scheduler.scala | 2 +- .../src/test/scala/akka/cluster/NodeStartupSpec.scala | 2 +- akka-docs/project/licenses.rst | 2 +- akka-docs/scala/code/akka/docs/io/HTTPServer.scala | 2 +- akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala | 2 +- akka-remote/src/main/scala/akka/remote/netty/Client.scala | 2 +- .../src/main/scala/akka/remote/netty/NettyRemoteSupport.scala | 2 +- akka-remote/src/main/scala/akka/remote/netty/Server.scala | 2 +- akka-remote/src/main/scala/akka/remote/netty/Settings.scala | 4 ++-- .../test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala | 2 +- 10 files changed, 11 insertions(+), 11 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 827e511308..8b02734cc8 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2011 Typesafe Inc. + * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.actor diff --git a/akka-cluster/src/test/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/test/scala/akka/cluster/NodeStartupSpec.scala index 33f069e3bb..711a0552b4 100644 --- a/akka-cluster/src/test/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/NodeStartupSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2011 Typesafe Inc. + * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.cluster diff --git a/akka-docs/project/licenses.rst b/akka-docs/project/licenses.rst index d7d9865b31..b83b6a5f46 100644 --- a/akka-docs/project/licenses.rst +++ b/akka-docs/project/licenses.rst @@ -12,7 +12,7 @@ Akka License This software is licensed under the Apache 2 license, quoted below. - Copyright 2009-2011 Typesafe Inc. + Copyright 2009-2012 Typesafe Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of diff --git a/akka-docs/scala/code/akka/docs/io/HTTPServer.scala b/akka-docs/scala/code/akka/docs/io/HTTPServer.scala index 837dbf4264..01bb53023b 100644 --- a/akka-docs/scala/code/akka/docs/io/HTTPServer.scala +++ b/akka-docs/scala/code/akka/docs/io/HTTPServer.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2011 Typesafe Inc. + * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.docs.io diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala index 295227081a..7e4beecc7d 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2011 Typesafe Inc. + * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.remote diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 7baf3011ee..0d689ef30d 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2011 Typesafe Inc. + * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.remote.netty diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index c45fa986f2..4fd70b822f 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2011 Typesafe Inc. + * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.remote.netty diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index 7e4d1eaaa9..674023dd52 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2011 Typesafe Inc. + * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.remote.netty diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index e2f69d77b5..f73ba52ef6 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2011 Typesafe Inc. + * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.remote.netty @@ -73,4 +73,4 @@ class NettySettings(config: Config, val systemName: String) { case sz ⇒ sz } -} \ No newline at end of file +} diff --git a/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala b/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala index dcc456b544..ea4da04f5a 100644 --- a/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala +++ b/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2011 Typesafe Inc. + * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.zeromq From 803db7b5ac749083efc6f521ce23ab1be67fe396 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 21 May 2012 17:01:21 +0200 Subject: [PATCH 083/538] Added Typesafe to the 'sponsors' doc page --- akka-docs/project/sponsors.rst | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/akka-docs/project/sponsors.rst b/akka-docs/project/sponsors.rst index 085d35cc0d..630493f8c5 100644 --- a/akka-docs/project/sponsors.rst +++ b/akka-docs/project/sponsors.rst @@ -1,11 +1,23 @@ .. _sponsors: Sponsors -============ +======== + +Typesafe +-------- + +Typesafe is the company behind the Akka Project, Scala Programming Language, +Play Web Framework, Scala IDE, Simple Build Tool and many other open source +projects. It also provides the Typesafe Stack, a full-featured development +stack consisting of AKka, Play and Scala. Learn more at +`typesafe.com `_. YourKit ------- YourKit is kindly supporting open source projects with its full-featured Java Profiler. -YourKit, LLC is the creator of innovative and intelligent tools for profiling Java and .NET applications. -Take a look at YourKit’s leading software products: `YourKit Java Profiler `_ and `YourKit .NET Profiler `_ + +YourKit, LLC is the creator of innovative and intelligent tools for profiling Java +and .NET applications. Take a look at YourKit’s leading software products: +`YourKit Java Profiler `_ +and `YourKit .NET Profiler `_ From 22692eb95de6c3706c23d164190d7a5c4c2b3caf Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 19:33:50 +0200 Subject: [PATCH 084/538] Switching to logging InvocationException's getCause --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 2 +- akka-actor/src/main/scala/akka/util/LockUtil.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 8c68ba3315..68c9097d83 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -514,7 +514,7 @@ private[akka] class ActorCell( """exception during creation, this problem is likely to occur because the class of the Actor you tried to create is either, a non-static inner class (in which case make it a static inner class or use Props(new ...) or Props( new UntypedActorFactory ... ) or is missing an appropriate, reachable no-args constructor. - """, i) + """, i.getCause) case NonFatal(e) ⇒ throw ActorInitializationException(self, "exception during creation", e) } diff --git a/akka-actor/src/main/scala/akka/util/LockUtil.scala b/akka-actor/src/main/scala/akka/util/LockUtil.scala index c3295d4b52..739232c3c9 100644 --- a/akka-actor/src/main/scala/akka/util/LockUtil.scala +++ b/akka-actor/src/main/scala/akka/util/LockUtil.scala @@ -25,7 +25,7 @@ final class ReentrantGuard { * An atomic switch that can be either on or off */ class Switch(startAsOn: Boolean = false) { - private val switch = new AtomicBoolean(startAsOn) + private val switch = new AtomicBoolean(startAsOn) // FIXME switch to AQS protected def transcend(from: Boolean, action: ⇒ Unit): Boolean = synchronized { if (switch.compareAndSet(from, !from)) { From ac71d404422c9699be84987c9071884964879144 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 19:59:42 +0200 Subject: [PATCH 085/538] Removing most of AkkaException since it's to be considered outdated residue by now --- .../src/main/scala/akka/AkkaException.scala | 35 ++----------------- .../src/main/scala/akka/event/Logging.scala | 2 +- 2 files changed, 4 insertions(+), 33 deletions(-) diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index e5b0cb6c80..5f5910ae44 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -4,31 +4,6 @@ package akka -object AkkaException { - //FIXME DOC - def toStringWithStackTrace(throwable: Throwable): String = throwable match { - case null ⇒ "Unknown Throwable: was 'null'" - case ae: AkkaException ⇒ ae.toLongString - case e ⇒ "%s:%s\n%s" format (e.getClass.getName, e.getMessage, stackTraceToString(e)) - } - - /** - * Returns the given Throwables stack trace as a String, or the empty String if no trace is found - * @param throwable - * @return - */ - def stackTraceToString(throwable: Throwable): String = throwable.getStackTrace match { - case null ⇒ "" - case x if x.length == 0 ⇒ "" - case trace ⇒ - val sb = new StringBuilder - for (i ← 0 until trace.length) - sb.append("\tat %s\n" format trace(i)) - sb.toString - } - -} - /** * Akka base Exception. Each Exception gets: *
    @@ -38,21 +13,17 @@ object AkkaException { *
*/ //TODO add @SerialVersionUID(1L) when SI-4804 is fixed -class AkkaException(message: String = "", cause: Throwable = null) extends RuntimeException(message, cause) with Serializable { +class AkkaException(message: String, cause: Throwable) extends RuntimeException(message, cause) with Serializable { def this(msg: String) = this(msg, null) lazy val uuid = java.util.UUID.randomUUID().toString - override def toString: String = "%s:%s\n[%s]".format(getClass.getName, message, uuid) - - def toLongString: String = "%s:%s\n[%s]\n%s".format(getClass.getName, message, uuid, stackTraceToString) - - def stackTraceToString: String = AkkaException.stackTraceToString(this) + override def getMessage(): String = "[" + uuid + "] " + super.getMessage } /** * This exception is thrown when Akka detects a problem with the provided configuration */ -class ConfigurationException(message: String, cause: Throwable = null) extends AkkaException(message, cause) { +class ConfigurationException(message: String, cause: Throwable) extends AkkaException(message, cause) { def this(msg: String) = this(msg, null) } diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index 1230756517..8cb79fd299 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -530,7 +530,7 @@ object Logging { * Artificial exception injected into Error events if no Throwable is * supplied; used for getting a stack dump of error locations. */ - class EventHandlerException extends AkkaException + class EventHandlerException extends AkkaException("") /** * Exception that wraps a LogEvent. From 64cda1f3e75455c59f5e2cd4c2b397efb5fc730a Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 20:21:12 +0200 Subject: [PATCH 086/538] Fixing AkkaException once and for all --- .../src/test/scala/akka/actor/dispatch/ActorModelSpec.scala | 4 ++-- akka-actor/src/main/scala/akka/AkkaException.scala | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index 854a562745..acc416f04f 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -408,9 +408,9 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa assert(Await.result(f1, timeout.duration) === "foo") assert(Await.result(f2, timeout.duration) === "bar") assert(Await.result(f4, timeout.duration) === "foo2") - assert(intercept[ActorInterruptedException](Await.result(f3, timeout.duration)).getMessage === "Ping!") + assert(intercept[ActorInterruptedException](Await.result(f3, timeout.duration)).getCause.getMessage === "Ping!") assert(Await.result(f6, timeout.duration) === "bar2") - assert(intercept[ActorInterruptedException](Await.result(f5, timeout.duration)).getMessage === "Ping!") + assert(intercept[ActorInterruptedException](Await.result(f5, timeout.duration)).getCause.getMessage === "Ping!") } } diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index 7c58972d8c..04e820419f 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -18,7 +18,7 @@ class AkkaException(message: String, cause: Throwable) extends RuntimeException( lazy val uuid: String = java.util.UUID.randomUUID().toString - override def getMessage(): String = "[" + uuid + "] " + super.getMessage + override def toString(): String = uuid + super.toString() } /** From 8b3dbc2f7c7a0685fb03265a15ef6fb57d7c0c61 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 21 May 2012 21:17:28 +0200 Subject: [PATCH 087/538] Removing NoStackTrace from AkkaInitializationException --- akka-actor/src/main/scala/akka/actor/Actor.scala | 6 +++--- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index b26da76ff2..6fea72a5cf 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -123,10 +123,10 @@ case class InvalidActorNameException(message: String) extends AkkaException(mess /** * An ActorInitializationException is thrown when the the initialization logic for an Actor fails. */ -case class ActorInitializationException private[akka] (actor: ActorRef, message: String, cause: Throwable = null) - extends AkkaException(message, cause) - with NoStackTrace { +class ActorInitializationException private[akka] (actor: ActorRef, message: String, cause: Throwable) + extends AkkaException(message, cause) /*with NoStackTrace*/ { def this(msg: String) = this(null, msg, null) + def this(actor: ActorRef, msg: String) = this(actor, msg, null) } /** diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index a670af5ca6..3380d51de0 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -501,7 +501,7 @@ private[akka] class ActorCell( val instance = props.creator() if (instance eq null) - throw ActorInitializationException(self, "Actor instance passed to actorOf can't be 'null'") + throw new ActorInitializationException(self, "Actor instance passed to actorOf can't be 'null'") behaviorStack = behaviorStack match { case `behaviorStackPlaceHolder` ⇒ Stack.empty.push(instance.receive) @@ -527,13 +527,13 @@ private[akka] class ActorCell( if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(created), "started (" + created + ")")) } catch { case NonFatal(i: InstantiationException) ⇒ - throw ActorInitializationException(self, + throw new ActorInitializationException(self, """exception during creation, this problem is likely to occur because the class of the Actor you tried to create is either, a non-static inner class (in which case make it a static inner class or use Props(new ...) or Props( new UntypedActorFactory ... ) or is missing an appropriate, reachable no-args constructor. """, i.getCause) case NonFatal(e) ⇒ - throw ActorInitializationException(self, "exception during creation", e) + throw new ActorInitializationException(self, "exception during creation", e) } } @@ -557,7 +557,7 @@ private[akka] class ActorCell( doRecreate(cause, failedActor) } } catch { - case NonFatal(e) ⇒ throw ActorInitializationException(self, "exception during creation", e) + case NonFatal(e) ⇒ throw new ActorInitializationException(self, "exception during creation", e) } } @@ -726,7 +726,7 @@ private[akka] class ActorCell( actor.supervisorStrategy.handleSupervisorFailing(self, children) clearActorFields(actor) // If this fails, we need to ensure that preRestart isn't called. } finally { - parent.tell(Failed(ActorInitializationException(self, "exception during re-creation", e)), self) + parent.tell(Failed(new ActorInitializationException(self, "exception during re-creation", e)), self) } } From 623d0f070327754939a0f9b4825a43a38b1794e5 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 22 May 2012 10:44:28 +0200 Subject: [PATCH 088/538] Use sbt 0.11.3. See #2087 * Temporarily disabled sbt-ls * Removed unused schoir plugin --- ls.sbt | 14 +++++++------- project/AkkaBuild.scala | 5 ++--- project/build.properties | 2 +- project/plugins.sbt | 4 +--- 4 files changed, 11 insertions(+), 14 deletions(-) diff --git a/ls.sbt b/ls.sbt index 83e5babc79..87e68ed303 100644 --- a/ls.sbt +++ b/ls.sbt @@ -1,13 +1,13 @@ -seq(lsSettings:_*) +// seq(lsSettings:_*) -(description in LsKeys.lsync) := "Akka is the platform for the next generation of event-driven, scalable and fault-tolerant architectures on the JVM." +// (description in LsKeys.lsync) := "Akka is the platform for the next generation of event-driven, scalable and fault-tolerant architectures on the JVM." -(homepage in LsKeys.lsync) := Some(url("http://akka.io")) +// (homepage in LsKeys.lsync) := Some(url("http://akka.io")) -(LsKeys.tags in LsKeys.lsync) := Seq("actors", "stm", "concurrency", "distributed", "fault-tolerance", "scala", "java", "futures", "dataflow", "remoting") +// (LsKeys.tags in LsKeys.lsync) := Seq("actors", "stm", "concurrency", "distributed", "fault-tolerance", "scala", "java", "futures", "dataflow", "remoting") -(LsKeys.docsUrl in LsKeys.lsync) := Some(url("http://akka.io/docs")) +// (LsKeys.docsUrl in LsKeys.lsync) := Some(url("http://akka.io/docs")) -(licenses in LsKeys.lsync) := Seq(("Apache 2", url("http://www.apache.org/licenses/LICENSE-2.0.html"))) +// (licenses in LsKeys.lsync) := Seq(("Apache 2", url("http://www.apache.org/licenses/LICENSE-2.0.html"))) -(externalResolvers in LsKeys.lsync) := Seq("Typesafe Releases" at "http://repo.typesafe.com/typesafe/releases") +// (externalResolvers in LsKeys.lsync) := Seq("Typesafe Releases" at "http://repo.typesafe.com/typesafe/releases") diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index d216464fde..0dedada9f8 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -8,7 +8,6 @@ import sbt._ import sbt.Keys._ import com.typesafe.sbtmultijvm.MultiJvmPlugin import com.typesafe.sbtmultijvm.MultiJvmPlugin.{ MultiJvm, extraOptions, jvmOptions, scalatestOptions } -import com.typesafe.schoir.SchoirPlugin.schoirSettings import com.typesafe.sbtscalariform.ScalariformPlugin import com.typesafe.sbtscalariform.ScalariformPlugin.ScalariformKeys import java.lang.Boolean.getBoolean @@ -79,7 +78,7 @@ object AkkaBuild extends Build { id = "akka-remote", base = file("akka-remote"), dependencies = Seq(actor, actorTests % "test->test", testkit % "test->test"), - settings = defaultSettings ++ multiJvmSettings ++ schoirSettings ++ Seq( + settings = defaultSettings ++ multiJvmSettings ++ Seq( libraryDependencies ++= Dependencies.remote, // disable parallel tests parallelExecution in Test := false, @@ -98,7 +97,7 @@ object AkkaBuild extends Build { id = "akka-cluster", base = file("akka-cluster"), dependencies = Seq(remote, remote % "test->test", testkit % "test->test"), - settings = defaultSettings ++ multiJvmSettings ++ schoirSettings ++ Seq( + settings = defaultSettings ++ multiJvmSettings ++ Seq( libraryDependencies ++= Dependencies.cluster, // disable parallel tests parallelExecution in Test := false, diff --git a/project/build.properties b/project/build.properties index f4ff7a5afa..d4287112c6 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=0.11.2 +sbt.version=0.11.3 diff --git a/project/plugins.sbt b/project/plugins.sbt index 80ff9db95a..cb2b285a8a 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -3,12 +3,10 @@ resolvers += Classpaths.typesafeResolver addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.1.9") -addSbtPlugin("com.typesafe.schoir" % "schoir" % "0.1.2") - addSbtPlugin("com.typesafe.sbtscalariform" % "sbtscalariform" % "0.3.1") resolvers ++= Seq( "less is" at "http://repo.lessis.me", "coda" at "http://repo.codahale.com") -addSbtPlugin("me.lessis" % "ls-sbt" % "0.1.1") +// addSbtPlugin("me.lessis" % "ls-sbt" % "0.1.1") From 8ddd0ed262720263ca1a93a30a490eaa7cfa0a04 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 22 May 2012 10:50:42 +0200 Subject: [PATCH 089/538] sbtscalariform 0.4.0 --- project/plugins.sbt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/plugins.sbt b/project/plugins.sbt index cb2b285a8a..768904eacb 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -3,7 +3,7 @@ resolvers += Classpaths.typesafeResolver addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.1.9") -addSbtPlugin("com.typesafe.sbtscalariform" % "sbtscalariform" % "0.3.1") +addSbtPlugin("com.typesafe.sbtscalariform" % "sbtscalariform" % "0.4.0") resolvers ++= Seq( "less is" at "http://repo.lessis.me", From 1f38866b5b744bb6ef52a5b9c822312d17cf5d93 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 22 May 2012 10:51:38 +0200 Subject: [PATCH 090/538] Adding more robust tests to HotSwapSpec --- .../test/scala/akka/actor/HotSwapSpec.scala | 61 ++++++++++++++++--- 1 file changed, 54 insertions(+), 7 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala index 236d3bd014..120caa3e93 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala @@ -6,10 +6,60 @@ package akka.actor import akka.testkit._ +object HotSwapSpec { + abstract class Becomer extends Actor { + + } +} + @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class HotSwapSpec extends AkkaSpec with ImplicitSender { + import HotSwapSpec.Becomer "An Actor" must { + "be able to become in its constructor" in { + val a = system.actorOf(Props(new Becomer { + context.become { case always ⇒ sender ! always } + def receive = { case always ⇒ sender ! "FAILURE" } + })) + a ! "pigdog" + expectMsg("pigdog") + } + + "be able to become multiple times in its constructor" in { + val a = system.actorOf(Props(new Becomer { + for (i ← 1 to 4) context.become({ case always ⇒ sender ! i + ":" + always }) + def receive = { case always ⇒ sender ! "FAILURE" } + })) + a ! "pigdog" + expectMsg("4:pigdog") + } + + "be able to become with stacking in its constructor" in { + val a = system.actorOf(Props(new Becomer { + context.become({ case always ⇒ sender ! "pigdog:" + always; context.unbecome() }, false) + def receive = { case always ⇒ sender ! "badass:" + always } + })) + a ! "pigdog" + expectMsg("pigdog:pigdog") + a ! "badass" + expectMsg("badass:badass") + } + + "be able to become, with stacking, multiple times in its constructor" in { + val a = system.actorOf(Props(new Becomer { + for (i ← 1 to 4) context.become({ case always ⇒ sender ! i + ":" + always; context.unbecome() }, false) + def receive = { case always ⇒ sender ! "FAILURE" } + })) + a ! "pigdog" + a ! "pigdog" + a ! "pigdog" + a ! "pigdog" + expectMsg("4:pigdog") + expectMsg("3:pigdog") + expectMsg("2:pigdog") + expectMsg("1:pigdog") + } "be able to hotswap its behavior with become(..)" in { val a = system.actorOf(Props(new Actor { @@ -30,13 +80,10 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { val a = system.actorOf(Props(new Actor { def receive = { case "init" ⇒ sender ! "init" - case "swap" ⇒ - context.become({ - case "swapped" ⇒ - sender ! "swapped" - case "revert" ⇒ - context.unbecome() - }) + case "swap" ⇒ context.become({ + case "swapped" ⇒ sender ! "swapped" + case "revert" ⇒ context.unbecome() + }) } })) From 2e53513718fa4a6400b5649eb56e5171363f7fcd Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 22 May 2012 10:54:13 +0200 Subject: [PATCH 091/538] harden up FSM.scala, improve gracefulStop ScalaDoc --- .../src/main/scala/akka/actor/FSM.scala | 190 +++++++++++++----- .../akka/pattern/GracefulStopSupport.scala | 13 ++ 2 files changed, 154 insertions(+), 49 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 762f23b16b..50c769e156 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -8,29 +8,84 @@ import akka.util._ import scala.collection.mutable import akka.routing.{ Deafen, Listen, Listeners } -//FIXME: Roland, could you go through this file? - object FSM { + /** + * A partial function value which does not match anything and can be used to + * “reset” `whenUnhandled` and `onTermination` handlers. + * + * {{{ + * onTermination(FSM.NullFunction) + * }}} + */ object NullFunction extends PartialFunction[Any, Nothing] { def isDefinedAt(o: Any) = false def apply(o: Any) = sys.error("undefined") } + /** + * Message type which is sent directly to the subscribed actor in + * [[akka.actor.FSM.SubscribeTransitionCallback]] before sending any + * [[akka.actor.FSM.Transition]] messages. + */ case class CurrentState[S](fsmRef: ActorRef, state: S) + + /** + * Message type which is used to communicate transitions between states to + * all subscribed listeners (use [[akka.actor.FSM.SubscribeTransitionCallback]]). + */ case class Transition[S](fsmRef: ActorRef, from: S, to: S) + + /** + * Send this to an [[akka.actor.FSM]] to request first the [[akka.actor.CurrentState]] + * and then a series of [[akka.actor.Transition]] updates. Cancel the subscription + * using [[akka.actor.FSM.UnsubscribeTransitionCallback]]. + */ case class SubscribeTransitionCallBack(actorRef: ActorRef) + + /** + * Unsubscribe from [[akka.actor.FSM.Transition]] notifications which was + * effected by sending the corresponding [[akka.actor.FSM.SubscribeTransitionCallback]]. + */ case class UnsubscribeTransitionCallBack(actorRef: ActorRef) + /** + * Reason why this [[akka.actor.FSM]] is shutting down. + */ sealed trait Reason + + /** + * Default reason if calling `stop()`. + */ case object Normal extends Reason + + /** + * Reason given when someone was calling `system.stop(fsm)` from outside; + * also applies to `Stop` supervision directive. + */ case object Shutdown extends Reason + + /** + * Signifies that the [[akka.actor.FSM]] is shutting itself down because of + * an error, e.g. if the state to transition into does not exist. You can use + * this to communicate a more precise cause to the [[akka.actor.FSM$onTermination]] block. + */ case class Failure(cause: Any) extends Reason + /** + * This case object is received in case of a state timeout. + */ case object StateTimeout - case class TimeoutMarker(generation: Long) - case class Timer(name: String, msg: Any, repeat: Boolean, generation: Int)(implicit system: ActorSystem) { + /** + * Internal API + */ + private case class TimeoutMarker(generation: Long) + + /** + * Internal API + */ + private[akka] case class Timer(name: String, msg: Any, repeat: Boolean, generation: Int)(implicit system: ActorSystem) { private var ref: Option[Cancellable] = _ def schedule(actor: ActorRef, timeout: Duration) { @@ -57,8 +112,16 @@ object FSM { def unapply[S](in: (S, S)) = Some(in) } + /** + * Log Entry of the [[akka.actor.LoggingFSM]], can be obtained by calling `getLog`. + */ case class LogEntry[S, D](stateName: S, stateData: D, event: Any) + /** + * This captures all of the managed state of the [[akka.actor.FSM]]: the state + * name, the state data, possibly custom timeout, stop reason and replies + * accumulated while processing the last message. + */ case class State[S, D](stateName: S, stateData: D, timeout: Option[Duration] = None, stopReason: Option[Reason] = None, replies: List[Any] = Nil) { /** @@ -87,6 +150,9 @@ object FSM { copy(stateData = nextStateDate) } + /** + * Internal API. + */ private[akka] def withStopReason(reason: Reason): State[S, D] = { copy(stopReason = Some(reason)) } @@ -183,8 +249,19 @@ trait FSM[S, D] extends Listeners with ActorLogging { type Timeout = Option[Duration] type TransitionHandler = PartialFunction[(S, S), Unit] - // “import” so that it is visible without an import + /* + * “import” so that these are visible without an import + */ + + /** + * This extractor is just convenience for matching a (S, S) pair, including a + * reminder what the new state is. + */ val -> = FSM.-> + + /** + * This case object is received in case of a state timeout. + */ val StateTimeout = FSM.StateTimeout /** @@ -203,13 +280,9 @@ trait FSM[S, D] extends Listeners with ActorLogging { * @param stateTimeout default state timeout for this state * @param stateFunction partial function describing response to input */ - protected final def when(stateName: S, stateTimeout: Duration = null)(stateFunction: StateFunction): Unit = + final def when(stateName: S, stateTimeout: Duration = null)(stateFunction: StateFunction): Unit = register(stateName, stateFunction, Option(stateTimeout)) - @deprecated("use the more import-friendly variant taking a Duration", "2.0") - protected final def when(stateName: S, stateTimeout: Timeout)(stateFunction: StateFunction): Unit = - register(stateName, stateFunction, stateTimeout) - /** * Set initial state. Call this method from the constructor before the #initialize method. * @@ -217,9 +290,7 @@ trait FSM[S, D] extends Listeners with ActorLogging { * @param stateData initial state data * @param timeout state timeout for the initial state, overriding the default timeout for that state */ - protected final def startWith(stateName: S, - stateData: D, - timeout: Timeout = None): Unit = + final def startWith(stateName: S, stateData: D, timeout: Timeout = None): Unit = currentState = FSM.State(stateName, stateData, timeout) /** @@ -229,7 +300,7 @@ trait FSM[S, D] extends Listeners with ActorLogging { * @param nextStateName state designator for the next state * @return state transition descriptor */ - protected final def goto(nextStateName: S): State = FSM.State(nextStateName, currentState.stateData) + final def goto(nextStateName: S): State = FSM.State(nextStateName, currentState.stateData) /** * Produce "empty" transition descriptor. Return this from a state function @@ -237,29 +308,29 @@ trait FSM[S, D] extends Listeners with ActorLogging { * * @return descriptor for staying in current state */ - protected final def stay(): State = goto(currentState.stateName) // cannot directly use currentState because of the timeout field + final def stay(): State = goto(currentState.stateName) // cannot directly use currentState because of the timeout field /** * Produce change descriptor to stop this FSM actor with reason "Normal". */ - protected final def stop(): State = stop(Normal) + final def stop(): State = stop(Normal) /** * Produce change descriptor to stop this FSM actor including specified reason. */ - protected final def stop(reason: Reason): State = stop(reason, currentState.stateData) + final def stop(reason: Reason): State = stop(reason, currentState.stateData) /** * Produce change descriptor to stop this FSM actor including specified reason. */ - protected final def stop(reason: Reason, stateData: D): State = stay using stateData withStopReason (reason) + final def stop(reason: Reason, stateData: D): State = stay using stateData withStopReason (reason) - protected final class TransformHelper(func: StateFunction) { + final class TransformHelper(func: StateFunction) { def using(andThen: PartialFunction[State, State]): StateFunction = func andThen (andThen orElse { case x ⇒ x }) } - protected final def transform(func: StateFunction): TransformHelper = new TransformHelper(func) + final def transform(func: StateFunction): TransformHelper = new TransformHelper(func) /** * Schedule named timer to deliver message after given delay, possibly repeating. @@ -269,7 +340,9 @@ trait FSM[S, D] extends Listeners with ActorLogging { * @param repeat send once if false, scheduleAtFixedRate if true * @return current state descriptor */ - protected[akka] def setTimer(name: String, msg: Any, timeout: Duration, repeat: Boolean): State = { + final def setTimer(name: String, msg: Any, timeout: Duration, repeat: Boolean): State = { + if (debugEvent) + log.debug("setting " + (if (repeat) "repeating " else "") + "timer '" + name + "'/" + timeout + ": " + msg) if (timers contains name) { timers(name).cancel } @@ -283,24 +356,27 @@ trait FSM[S, D] extends Listeners with ActorLogging { * Cancel named timer, ensuring that the message is not subsequently delivered (no race). * @param name of the timer to cancel */ - protected[akka] def cancelTimer(name: String): Unit = + final def cancelTimer(name: String): Unit = { + if (debugEvent) + log.debug("canceling timer '" + name + "'") if (timers contains name) { timers(name).cancel timers -= name } + } /** * Inquire whether the named timer is still active. Returns true unless the * timer does not exist, has previously been canceled or if it was a * single-shot timer whose message was already received. */ - protected[akka] final def timerActive_?(name: String) = timers contains name + final def timerActive_?(name: String) = timers contains name /** * Set state timeout explicitly. This method can safely be used from within a * state handler. */ - protected final def setStateTimeout(state: S, timeout: Timeout): Unit = stateTimeouts(state) = timeout + final def setStateTimeout(state: S, timeout: Timeout): Unit = stateTimeouts(state) = timeout /** * Set handler which is called upon each state transition, i.e. not when @@ -327,50 +403,52 @@ trait FSM[S, D] extends Listeners with ActorLogging { * Multiple handlers may be installed, and every one of them will be * called, not only the first one matching. */ - protected final def onTransition(transitionHandler: TransitionHandler): Unit = transitionEvent :+= transitionHandler + final def onTransition(transitionHandler: TransitionHandler): Unit = transitionEvent :+= transitionHandler /** * Convenience wrapper for using a total function instead of a partial * function literal. To be used with onTransition. */ - implicit protected final def total2pf(transitionHandler: (S, S) ⇒ Unit): TransitionHandler = + implicit final def total2pf(transitionHandler: (S, S) ⇒ Unit): TransitionHandler = new TransitionHandler { def isDefinedAt(in: (S, S)) = true def apply(in: (S, S)) { transitionHandler(in._1, in._2) } } /** - * Set handler which is called upon termination of this FSM actor. + * Set handler which is called upon termination of this FSM actor. Calling + * this method again will overwrite the previous contents. */ - protected final def onTermination(terminationHandler: PartialFunction[StopEvent, Unit]): Unit = + final def onTermination(terminationHandler: PartialFunction[StopEvent, Unit]): Unit = terminateEvent = terminationHandler /** - * Set handler which is called upon reception of unhandled messages. + * Set handler which is called upon reception of unhandled messages. Calling + * this method again will overwrite the previous contents. */ - protected final def whenUnhandled(stateFunction: StateFunction): Unit = + final def whenUnhandled(stateFunction: StateFunction): Unit = handleEvent = stateFunction orElse handleEventDefault /** * Verify existence of initial state and setup timers. This should be the * last call within the constructor. */ - protected final def initialize: Unit = makeTransition(currentState) + final def initialize: Unit = makeTransition(currentState) /** * Return current state name (i.e. object of type S) */ - protected[akka] def stateName: S = currentState.stateName + final def stateName: S = currentState.stateName /** * Return current state data (i.e. object of type D) */ - protected[akka] def stateData: D = currentState.stateData + final def stateData: D = currentState.stateData /** * Return next state data (available in onTransition handlers) */ - protected[akka] def nextStateData = nextState.stateData + final def nextStateData = nextState.stateData /* * **************************************************************** @@ -378,6 +456,8 @@ trait FSM[S, D] extends Listeners with ActorLogging { * **************************************************************** */ + private[akka] def debugEvent: Boolean = false + /* * FSM State data and current timeout handling */ @@ -525,7 +605,21 @@ trait FSM[S, D] extends Listeners with ActorLogging { } } - override def postStop(): Unit = { terminate(stay withStopReason Shutdown) } + /** + * Call `onTermination` hook; if you want to retain this behavior when + * overriding make sure to call `super.postStop()`. + * + * Please note that this method is called by default from `preRestart()`, + * so override that one if `onTermination` shall not be called during + * restart. + */ + override def postStop(): Unit = { + /* + * setting this instance’s state to terminated does no harm during restart + * since the new instance will initialize fresh using startWith() + */ + terminate(stay withStopReason Shutdown) + } private def terminate(nextState: State): Unit = { if (!currentState.stopReason.isDefined) { @@ -542,13 +636,22 @@ trait FSM[S, D] extends Listeners with ActorLogging { } } + /** + * All messages sent to the [[akka.actor.FSM]] will be wrapped inside an + * `Event`, which allows pattern matching to extract both state and data. + */ case class Event(event: Any, stateData: D) + /** + * Case class representing the state of the [[akka.actor.FSM]] whithin the + * `onTermination` block. + */ case class StopEvent(reason: Reason, currentState: S, stateData: D) } /** - * Stackable trait for FSM which adds a rolling event log. + * Stackable trait for [[akka.actor.FSM]] which adds a rolling event log and + * debug logging capabilities (analogous to [[akka.event.LoggingReceive]]). * * @since 1.2 */ @@ -558,7 +661,7 @@ trait LoggingFSM[S, D] extends FSM[S, D] { this: Actor ⇒ def logDepth: Int = 0 - private val debugEvent = context.system.settings.FsmDebugEvent + private[akka] override val debugEvent = context.system.settings.FsmDebugEvent private val events = new Array[Event](logDepth) private val states = new Array[AnyRef](logDepth) @@ -575,18 +678,6 @@ trait LoggingFSM[S, D] extends FSM[S, D] { this: Actor ⇒ } } - protected[akka] abstract override def setTimer(name: String, msg: Any, timeout: Duration, repeat: Boolean): State = { - if (debugEvent) - log.debug("setting " + (if (repeat) "repeating " else "") + "timer '" + name + "'/" + timeout + ": " + msg) - super.setTimer(name, msg, timeout, repeat) - } - - protected[akka] abstract override def cancelTimer(name: String): Unit = { - if (debugEvent) - log.debug("canceling timer '" + name + "'") - super.cancelTimer(name) - } - private[akka] abstract override def processEvent(event: Event, source: AnyRef): Unit = { if (debugEvent) { val srcstr = source match { @@ -615,6 +706,7 @@ trait LoggingFSM[S, D] extends FSM[S, D] { this: Actor ⇒ /** * Retrieve current rolling log in oldest-first order. The log is filled with * each incoming event before processing by the user supplied state handler. + * The log entries are lost when this actor is restarted. */ protected def getLog: IndexedSeq[LogEntry[S, D]] = { val log = events zip states filter (_._1 ne null) map (x ⇒ LogEntry(x._2.asInstanceOf[S], x._1.stateData, x._1.event)) diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index 5f78e8ba27..adcbe53f0b 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -17,6 +17,19 @@ trait GracefulStopSupport { * Useful when you need to wait for termination or compose ordered termination of several actors, * which should only be done outside of the ActorSystem as blocking inside Actors is discouraged. * + * IMPORTANT NOTICE: the actor being terminated and its supervisor + * being informed of the availability of the deceased actor’s name are two + * distinct operations, which do not obey any reliable ordering. Especially + * the following will NOT work: + * + * {{{ + * def receive = { + * case msg => + * Await.result(gracefulStop(someChild, timeout), timeout) + * context.actorOf(Props(...), "someChild") // assuming that that was someChild’s name, this will NOT work + * } + * }}} + * * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] * is completed with failure [[akka.pattern.AskTimeoutException]]. */ From d66d642b8fd5a21cdc24003a9d23f45d38ab4bf0 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 22 May 2012 10:57:26 +0200 Subject: [PATCH 092/538] Fixing bad ScalaDoc --- akka-actor/src/main/scala/akka/actor/Actor.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 6fea72a5cf..b611d96842 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -181,13 +181,13 @@ object Status { /** * Mix in ActorLogging into your Actor to easily obtain a reference to a logger, which is available under the name "log". * - * {{ + * {{{ * class MyActor extends Actor with ActorLogging { * def receive = { * case "pigdog" => log.info("We've got yet another pigdog on our hands") * } * } - * }} + * }}} */ trait ActorLogging { this: Actor ⇒ val log = akka.event.Logging(context.system, this) From 09469b73e1ad45c286e0c39bfa60a001c4c36e80 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 22 May 2012 11:08:29 +0200 Subject: [PATCH 093/538] Cleaning up some horribly written Java code --- akka-kernel/src/main/java/akka/jmx/Client.java | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/akka-kernel/src/main/java/akka/jmx/Client.java b/akka-kernel/src/main/java/akka/jmx/Client.java index 9ebf63e9eb..83a8f9246f 100644 --- a/akka-kernel/src/main/java/akka/jmx/Client.java +++ b/akka-kernel/src/main/java/akka/jmx/Client.java @@ -201,12 +201,10 @@ public class Client { * @param password * @return Credentials as map for RMI. */ - protected Map formatCredentials(final String login, + protected Map formatCredentials(final String login, final String password) { - Map env = null; - String[] creds = new String[] {login, password}; - env = new HashMap(1); - env.put(JMXConnector.CREDENTIALS, creds); + Map env = new HashMap(1); + env.put(JMXConnector.CREDENTIALS, new String[] {login, password}); return env; } @@ -214,10 +212,8 @@ public class Client { final String login, final String password) throws IOException { // Make up the jmx rmi URL and get a connector. - JMXServiceURL rmiurl = new JMXServiceURL("service:jmx:rmi://" - + hostport + "/jndi/rmi://" + hostport + "/jmxrmi"); - return JMXConnectorFactory.connect(rmiurl, - formatCredentials(login, password)); + JMXServiceURL rmiurl = new JMXServiceURL("service:jmx:rmi://"+hostport+"/jndi/rmi://"+hostport+"/jmxrmi"); + return JMXConnectorFactory.connect(rmiurl,formatCredentials(login, password)); } protected ObjectName getObjectName(final String beanname) From 0eae9d8d2289d1e4db4594e8cc149b662e609e41 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 22 May 2012 11:37:09 +0200 Subject: [PATCH 094/538] Move all doc examples out of the akka-package to avoid use of private APIs. See #2092 --- .../general/code/{akka => }/docs/config/ConfigDoc.java | 2 +- .../code/{akka => }/docs/config/ConfigDocSpec.scala | 2 +- .../java/code/{akka => }/docs/actor/FSMDocTest.scala | 2 +- .../code/{akka => }/docs/actor/FSMDocTestBase.java | 2 +- .../code/{akka => }/docs/actor/FaultHandlingTest.scala | 2 +- .../{akka => }/docs/actor/FaultHandlingTestBase.java | 2 +- .../code/{akka => }/docs/actor/FirstUntypedActor.java | 2 +- .../code/{akka => }/docs/actor/ImmutableMessage.java | 2 +- .../docs/actor/MyReceivedTimeoutUntypedActor.java | 2 +- .../code/{akka => }/docs/actor/MyUntypedActor.java | 2 +- .../code/{akka => }/docs/actor/SchedulerDocTest.scala | 2 +- .../{akka => }/docs/actor/SchedulerDocTestBase.java | 2 +- .../code/{akka => }/docs/actor/TypedActorDocTest.scala | 2 +- .../{akka => }/docs/actor/TypedActorDocTestBase.java | 2 +- .../{akka => }/docs/actor/UntypedActorDocTest.scala | 2 +- .../{akka => }/docs/actor/UntypedActorDocTestBase.java | 2 +- .../{akka => }/docs/actor/UntypedActorSwapper.java | 4 ++-- .../docs/actor/japi/FaultHandlingDocSample.java | 10 +++++----- .../code/{akka => }/docs/agent/AgentDocJavaSpec.scala | 4 ++-- .../java/code/{akka => }/docs/agent/AgentDocTest.java | 2 +- .../{akka => }/docs/dispatcher/DispatcherDocTest.scala | 2 +- .../docs/dispatcher/DispatcherDocTestBase.java | 6 +++--- .../code/{akka => }/docs/event/LoggingDocTest.scala | 2 +- .../code/{akka => }/docs/event/LoggingDocTestBase.java | 2 +- .../{akka => }/docs/extension/ExtensionDocTest.scala | 2 +- .../docs/extension/ExtensionDocTestBase.java | 2 +- .../docs/extension/SettingsExtensionDocTest.scala | 2 +- .../docs/extension/SettingsExtensionDocTestBase.java | 2 +- .../code/{akka => }/docs/future/FutureDocTest.scala | 2 +- .../code/{akka => }/docs/future/FutureDocTestBase.java | 2 +- .../{akka => }/docs/jrouting/CustomRouterDocTest.scala | 2 +- .../docs/jrouting/CustomRouterDocTestBase.java | 8 ++++---- .../code/{akka => }/docs/jrouting/FibonacciActor.java | 2 +- .../code/{akka => }/docs/jrouting/ParentActor.java | 2 +- .../code/{akka => }/docs/jrouting/PrintlnActor.java | 2 +- .../docs/jrouting/RouterViaConfigExample.java | 2 +- .../docs/jrouting/RouterViaProgramExample.java | 2 +- .../{akka => }/docs/remoting/RemoteActorExample.java | 2 +- .../docs/remoting/RemoteDeploymentDocTest.scala | 2 +- .../docs/remoting/RemoteDeploymentDocTestBase.java | 2 +- .../docs/serialization/SerializationDocTest.scala | 2 +- .../docs/serialization/SerializationDocTestBase.java | 2 +- .../{akka => }/docs/transactor/CoordinatedCounter.java | 2 +- .../code/{akka => }/docs/transactor/Coordinator.java | 2 +- .../java/code/{akka => }/docs/transactor/Counter.java | 2 +- .../{akka => }/docs/transactor/FriendlyCounter.java | 2 +- .../code/{akka => }/docs/transactor/Increment.java | 2 +- .../java/code/{akka => }/docs/transactor/Message.java | 2 +- .../docs/transactor/TransactorDocJavaSpec.scala | 4 ++-- .../{akka => }/docs/transactor/TransactorDocTest.java | 2 +- .../code/{akka => }/docs/zeromq/ZeromqDocTest.scala | 2 +- .../code/{akka => }/docs/zeromq/ZeromqDocTestBase.java | 2 +- akka-docs/java/extending-akka.rst | 2 +- akka-docs/java/serialization.rst | 2 +- .../docs/actor/mailbox/DurableMailboxDocSpec.scala | 4 ++-- .../docs/actor/mailbox/DurableMailboxDocTest.scala | 2 +- .../docs/actor/mailbox/DurableMailboxDocTestBase.java | 2 +- .../code/{akka => }/docs/actor/ActorDocSpec.scala | 2 +- .../scala/code/{akka => }/docs/actor/FSMDocSpec.scala | 2 +- .../{akka => }/docs/actor/FaultHandlingDocSample.scala | 2 +- .../{akka => }/docs/actor/FaultHandlingDocSpec.scala | 2 +- .../code/{akka => }/docs/actor/SchedulerDocSpec.scala | 2 +- .../code/{akka => }/docs/actor/TypedActorDocSpec.scala | 2 +- .../code/{akka => }/docs/actor/UnnestedReceives.scala | 2 +- .../code/{akka => }/docs/agent/AgentDocSpec.scala | 2 +- .../scala/code/{akka => }/docs/camel/Consumers.scala | 2 +- .../code/{akka => }/docs/camel/Introduction.scala | 2 +- .../{akka => }/docs/dispatcher/DispatcherDocSpec.scala | 6 +++--- .../code/{akka => }/docs/event/LoggingDocSpec.scala | 2 +- .../{akka => }/docs/extension/ExtensionDocSpec.scala | 4 ++-- .../docs/extension/SettingsExtensionDocSpec.scala | 2 +- .../code/{akka => }/docs/future/FutureDocSpec.scala | 2 +- .../scala/code/{akka => }/docs/io/HTTPServer.scala | 2 +- .../docs/remoting/RemoteDeploymentDocSpec.scala | 2 +- .../code/{akka => }/docs/routing/RouterDocSpec.scala | 2 +- .../{akka => }/docs/routing/RouterTypeExample.scala | 2 +- .../docs/routing/RouterViaConfigExample.scala | 2 +- .../docs/routing/RouterViaProgramExample.scala | 2 +- .../docs/serialization/SerializationDocSpec.scala | 10 +++++----- .../code/{akka => }/docs/testkit/PlainWordSpec.scala | 2 +- .../{akka => }/docs/testkit/TestKitUsageSpec.scala | 2 +- .../code/{akka => }/docs/testkit/TestkitDocSpec.scala | 2 +- .../{akka => }/docs/transactor/TransactorDocSpec.scala | 2 +- .../code/{akka => }/docs/zeromq/ZeromqDocSpec.scala | 2 +- akka-docs/scala/serialization.rst | 2 +- 85 files changed, 105 insertions(+), 105 deletions(-) rename akka-docs/general/code/{akka => }/docs/config/ConfigDoc.java (97%) rename akka-docs/general/code/{akka => }/docs/config/ConfigDocSpec.scala (97%) rename akka-docs/java/code/{akka => }/docs/actor/FSMDocTest.scala (87%) rename akka-docs/java/code/{akka => }/docs/actor/FSMDocTestBase.java (99%) rename akka-docs/java/code/{akka => }/docs/actor/FaultHandlingTest.scala (88%) rename akka-docs/java/code/{akka => }/docs/actor/FaultHandlingTestBase.java (99%) rename akka-docs/java/code/{akka => }/docs/actor/FirstUntypedActor.java (95%) rename akka-docs/java/code/{akka => }/docs/actor/ImmutableMessage.java (96%) rename akka-docs/java/code/{akka => }/docs/actor/MyReceivedTimeoutUntypedActor.java (96%) rename akka-docs/java/code/{akka => }/docs/actor/MyUntypedActor.java (95%) rename akka-docs/java/code/{akka => }/docs/actor/SchedulerDocTest.scala (88%) rename akka-docs/java/code/{akka => }/docs/actor/SchedulerDocTestBase.java (98%) rename akka-docs/java/code/{akka => }/docs/actor/TypedActorDocTest.scala (88%) rename akka-docs/java/code/{akka => }/docs/actor/TypedActorDocTestBase.java (99%) rename akka-docs/java/code/{akka => }/docs/actor/UntypedActorDocTest.scala (88%) rename akka-docs/java/code/{akka => }/docs/actor/UntypedActorDocTestBase.java (99%) rename akka-docs/java/code/{akka => }/docs/actor/UntypedActorSwapper.java (93%) rename akka-docs/java/code/{akka => }/docs/actor/japi/FaultHandlingDocSample.java (97%) rename akka-docs/java/code/{akka => }/docs/agent/AgentDocJavaSpec.scala (78%) rename akka-docs/java/code/{akka => }/docs/agent/AgentDocTest.java (98%) rename akka-docs/java/code/{akka => }/docs/dispatcher/DispatcherDocTest.scala (86%) rename akka-docs/java/code/{akka => }/docs/dispatcher/DispatcherDocTestBase.java (96%) rename akka-docs/java/code/{akka => }/docs/event/LoggingDocTest.scala (88%) rename akka-docs/java/code/{akka => }/docs/event/LoggingDocTestBase.java (99%) rename akka-docs/java/code/{akka => }/docs/extension/ExtensionDocTest.scala (86%) rename akka-docs/java/code/{akka => }/docs/extension/ExtensionDocTestBase.java (98%) rename akka-docs/java/code/{akka => }/docs/extension/SettingsExtensionDocTest.scala (87%) rename akka-docs/java/code/{akka => }/docs/extension/SettingsExtensionDocTestBase.java (98%) rename akka-docs/java/code/{akka => }/docs/future/FutureDocTest.scala (87%) rename akka-docs/java/code/{akka => }/docs/future/FutureDocTestBase.java (99%) rename akka-docs/java/code/{akka => }/docs/jrouting/CustomRouterDocTest.scala (80%) rename akka-docs/java/code/{akka => }/docs/jrouting/CustomRouterDocTestBase.java (95%) rename akka-docs/java/code/{akka => }/docs/jrouting/FibonacciActor.java (97%) rename akka-docs/java/code/{akka => }/docs/jrouting/ParentActor.java (98%) rename akka-docs/java/code/{akka => }/docs/jrouting/PrintlnActor.java (92%) rename akka-docs/java/code/{akka => }/docs/jrouting/RouterViaConfigExample.java (98%) rename akka-docs/java/code/{akka => }/docs/jrouting/RouterViaProgramExample.java (99%) rename akka-docs/java/code/{akka => }/docs/remoting/RemoteActorExample.java (95%) rename akka-docs/java/code/{akka => }/docs/remoting/RemoteDeploymentDocTest.scala (87%) rename akka-docs/java/code/{akka => }/docs/remoting/RemoteDeploymentDocTestBase.java (97%) rename akka-docs/java/code/{akka => }/docs/serialization/SerializationDocTest.scala (85%) rename akka-docs/java/code/{akka => }/docs/serialization/SerializationDocTestBase.java (99%) rename akka-docs/java/code/{akka => }/docs/transactor/CoordinatedCounter.java (97%) rename akka-docs/java/code/{akka => }/docs/transactor/Coordinator.java (96%) rename akka-docs/java/code/{akka => }/docs/transactor/Counter.java (95%) rename akka-docs/java/code/{akka => }/docs/transactor/FriendlyCounter.java (97%) rename akka-docs/java/code/{akka => }/docs/transactor/Increment.java (93%) rename akka-docs/java/code/{akka => }/docs/transactor/Message.java (77%) rename akka-docs/java/code/{akka => }/docs/transactor/TransactorDocJavaSpec.scala (75%) rename akka-docs/java/code/{akka => }/docs/transactor/TransactorDocTest.java (99%) rename akka-docs/java/code/{akka => }/docs/zeromq/ZeromqDocTest.scala (87%) rename akka-docs/java/code/{akka => }/docs/zeromq/ZeromqDocTestBase.java (99%) rename akka-docs/modules/code/{akka => }/docs/actor/mailbox/DurableMailboxDocSpec.scala (97%) rename akka-docs/modules/code/{akka => }/docs/actor/mailbox/DurableMailboxDocTest.scala (85%) rename akka-docs/modules/code/{akka => }/docs/actor/mailbox/DurableMailboxDocTestBase.java (97%) rename akka-docs/scala/code/{akka => }/docs/actor/ActorDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/actor/FSMDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/actor/FaultHandlingDocSample.scala (99%) rename akka-docs/scala/code/{akka => }/docs/actor/FaultHandlingDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/actor/SchedulerDocSpec.scala (98%) rename akka-docs/scala/code/{akka => }/docs/actor/TypedActorDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/actor/UnnestedReceives.scala (98%) rename akka-docs/scala/code/{akka => }/docs/agent/AgentDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/camel/Consumers.scala (96%) rename akka-docs/scala/code/{akka => }/docs/camel/Introduction.scala (98%) rename akka-docs/scala/code/{akka => }/docs/dispatcher/DispatcherDocSpec.scala (97%) rename akka-docs/scala/code/{akka => }/docs/event/LoggingDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/extension/ExtensionDocSpec.scala (96%) rename akka-docs/scala/code/{akka => }/docs/extension/SettingsExtensionDocSpec.scala (98%) rename akka-docs/scala/code/{akka => }/docs/future/FutureDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/io/HTTPServer.scala (99%) rename akka-docs/scala/code/{akka => }/docs/remoting/RemoteDeploymentDocSpec.scala (98%) rename akka-docs/scala/code/{akka => }/docs/routing/RouterDocSpec.scala (96%) rename akka-docs/scala/code/{akka => }/docs/routing/RouterTypeExample.scala (99%) rename akka-docs/scala/code/{akka => }/docs/routing/RouterViaConfigExample.scala (98%) rename akka-docs/scala/code/{akka => }/docs/routing/RouterViaProgramExample.scala (98%) rename akka-docs/scala/code/{akka => }/docs/serialization/SerializationDocSpec.scala (94%) rename akka-docs/scala/code/{akka => }/docs/testkit/PlainWordSpec.scala (97%) rename akka-docs/scala/code/{akka => }/docs/testkit/TestKitUsageSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/testkit/TestkitDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/transactor/TransactorDocSpec.scala (99%) rename akka-docs/scala/code/{akka => }/docs/zeromq/ZeromqDocSpec.scala (99%) diff --git a/akka-docs/general/code/akka/docs/config/ConfigDoc.java b/akka-docs/general/code/docs/config/ConfigDoc.java similarity index 97% rename from akka-docs/general/code/akka/docs/config/ConfigDoc.java rename to akka-docs/general/code/docs/config/ConfigDoc.java index 69d856947f..ee6393fb1a 100644 --- a/akka-docs/general/code/akka/docs/config/ConfigDoc.java +++ b/akka-docs/general/code/docs/config/ConfigDoc.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.config; +package docs.config; import akka.actor.ActorSystem; import com.typesafe.config.*; diff --git a/akka-docs/general/code/akka/docs/config/ConfigDocSpec.scala b/akka-docs/general/code/docs/config/ConfigDocSpec.scala similarity index 97% rename from akka-docs/general/code/akka/docs/config/ConfigDocSpec.scala rename to akka-docs/general/code/docs/config/ConfigDocSpec.scala index 3b7cb10ed2..643116e14f 100644 --- a/akka-docs/general/code/akka/docs/config/ConfigDocSpec.scala +++ b/akka-docs/general/code/docs/config/ConfigDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.config +package docs.config import org.scalatest.WordSpec import org.scalatest.matchers.MustMatchers diff --git a/akka-docs/java/code/akka/docs/actor/FSMDocTest.scala b/akka-docs/java/code/docs/actor/FSMDocTest.scala similarity index 87% rename from akka-docs/java/code/akka/docs/actor/FSMDocTest.scala rename to akka-docs/java/code/docs/actor/FSMDocTest.scala index 11bb542808..7077365d6d 100644 --- a/akka-docs/java/code/akka/docs/actor/FSMDocTest.scala +++ b/akka-docs/java/code/docs/actor/FSMDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/actor/FSMDocTestBase.java b/akka-docs/java/code/docs/actor/FSMDocTestBase.java similarity index 99% rename from akka-docs/java/code/akka/docs/actor/FSMDocTestBase.java rename to akka-docs/java/code/docs/actor/FSMDocTestBase.java index aeaca63f92..9064833cb0 100644 --- a/akka-docs/java/code/akka/docs/actor/FSMDocTestBase.java +++ b/akka-docs/java/code/docs/actor/FSMDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; //#imports-data import java.util.ArrayList; diff --git a/akka-docs/java/code/akka/docs/actor/FaultHandlingTest.scala b/akka-docs/java/code/docs/actor/FaultHandlingTest.scala similarity index 88% rename from akka-docs/java/code/akka/docs/actor/FaultHandlingTest.scala rename to akka-docs/java/code/docs/actor/FaultHandlingTest.scala index 03802d6968..9b6fad0609 100644 --- a/akka-docs/java/code/akka/docs/actor/FaultHandlingTest.scala +++ b/akka-docs/java/code/docs/actor/FaultHandlingTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor import org.scalatest.junit.JUnitSuite class FaultHandlingTest extends FaultHandlingTestBase with JUnitSuite diff --git a/akka-docs/java/code/akka/docs/actor/FaultHandlingTestBase.java b/akka-docs/java/code/docs/actor/FaultHandlingTestBase.java similarity index 99% rename from akka-docs/java/code/akka/docs/actor/FaultHandlingTestBase.java rename to akka-docs/java/code/docs/actor/FaultHandlingTestBase.java index dc2ce9bae7..bcc4705948 100644 --- a/akka-docs/java/code/akka/docs/actor/FaultHandlingTestBase.java +++ b/akka-docs/java/code/docs/actor/FaultHandlingTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; //#testkit import akka.actor.ActorRef; diff --git a/akka-docs/java/code/akka/docs/actor/FirstUntypedActor.java b/akka-docs/java/code/docs/actor/FirstUntypedActor.java similarity index 95% rename from akka-docs/java/code/akka/docs/actor/FirstUntypedActor.java rename to akka-docs/java/code/docs/actor/FirstUntypedActor.java index 6cfbe75b99..fa5d3d35a0 100644 --- a/akka-docs/java/code/akka/docs/actor/FirstUntypedActor.java +++ b/akka-docs/java/code/docs/actor/FirstUntypedActor.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; import akka.actor.ActorRef; import akka.actor.Props; diff --git a/akka-docs/java/code/akka/docs/actor/ImmutableMessage.java b/akka-docs/java/code/docs/actor/ImmutableMessage.java similarity index 96% rename from akka-docs/java/code/akka/docs/actor/ImmutableMessage.java rename to akka-docs/java/code/docs/actor/ImmutableMessage.java index 41bc4eb0e5..60e72ecfb5 100644 --- a/akka-docs/java/code/akka/docs/actor/ImmutableMessage.java +++ b/akka-docs/java/code/docs/actor/ImmutableMessage.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; import java.util.ArrayList; import java.util.Collections; diff --git a/akka-docs/java/code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java b/akka-docs/java/code/docs/actor/MyReceivedTimeoutUntypedActor.java similarity index 96% rename from akka-docs/java/code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java rename to akka-docs/java/code/docs/actor/MyReceivedTimeoutUntypedActor.java index 97742d9bd1..025d634b09 100644 --- a/akka-docs/java/code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java +++ b/akka-docs/java/code/docs/actor/MyReceivedTimeoutUntypedActor.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; //#receive-timeout import akka.actor.ReceiveTimeout; diff --git a/akka-docs/java/code/akka/docs/actor/MyUntypedActor.java b/akka-docs/java/code/docs/actor/MyUntypedActor.java similarity index 95% rename from akka-docs/java/code/akka/docs/actor/MyUntypedActor.java rename to akka-docs/java/code/docs/actor/MyUntypedActor.java index 93a817ef2c..f31fc402c7 100644 --- a/akka-docs/java/code/akka/docs/actor/MyUntypedActor.java +++ b/akka-docs/java/code/docs/actor/MyUntypedActor.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; //#my-untyped-actor import akka.actor.UntypedActor; diff --git a/akka-docs/java/code/akka/docs/actor/SchedulerDocTest.scala b/akka-docs/java/code/docs/actor/SchedulerDocTest.scala similarity index 88% rename from akka-docs/java/code/akka/docs/actor/SchedulerDocTest.scala rename to akka-docs/java/code/docs/actor/SchedulerDocTest.scala index ecad03213e..9e6b4c9613 100644 --- a/akka-docs/java/code/akka/docs/actor/SchedulerDocTest.scala +++ b/akka-docs/java/code/docs/actor/SchedulerDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/actor/SchedulerDocTestBase.java b/akka-docs/java/code/docs/actor/SchedulerDocTestBase.java similarity index 98% rename from akka-docs/java/code/akka/docs/actor/SchedulerDocTestBase.java rename to akka-docs/java/code/docs/actor/SchedulerDocTestBase.java index 7a58da0f5e..d7e8fa644f 100644 --- a/akka-docs/java/code/akka/docs/actor/SchedulerDocTestBase.java +++ b/akka-docs/java/code/docs/actor/SchedulerDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; //#imports1 import akka.actor.Props; diff --git a/akka-docs/java/code/akka/docs/actor/TypedActorDocTest.scala b/akka-docs/java/code/docs/actor/TypedActorDocTest.scala similarity index 88% rename from akka-docs/java/code/akka/docs/actor/TypedActorDocTest.scala rename to akka-docs/java/code/docs/actor/TypedActorDocTest.scala index 476d570b4a..0d9796ca56 100644 --- a/akka-docs/java/code/akka/docs/actor/TypedActorDocTest.scala +++ b/akka-docs/java/code/docs/actor/TypedActorDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/actor/TypedActorDocTestBase.java b/akka-docs/java/code/docs/actor/TypedActorDocTestBase.java similarity index 99% rename from akka-docs/java/code/akka/docs/actor/TypedActorDocTestBase.java rename to akka-docs/java/code/docs/actor/TypedActorDocTestBase.java index 30db92ee0f..99dda513ab 100644 --- a/akka-docs/java/code/akka/docs/actor/TypedActorDocTestBase.java +++ b/akka-docs/java/code/docs/actor/TypedActorDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; //#imports diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTest.scala b/akka-docs/java/code/docs/actor/UntypedActorDocTest.scala similarity index 88% rename from akka-docs/java/code/akka/docs/actor/UntypedActorDocTest.scala rename to akka-docs/java/code/docs/actor/UntypedActorDocTest.scala index e341914c8c..8047b94df9 100644 --- a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTest.scala +++ b/akka-docs/java/code/docs/actor/UntypedActorDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java b/akka-docs/java/code/docs/actor/UntypedActorDocTestBase.java similarity index 99% rename from akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java rename to akka-docs/java/code/docs/actor/UntypedActorDocTestBase.java index 146131f61e..c82ce30661 100644 --- a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java +++ b/akka-docs/java/code/docs/actor/UntypedActorDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; //#imports import akka.actor.ActorRef; diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorSwapper.java b/akka-docs/java/code/docs/actor/UntypedActorSwapper.java similarity index 93% rename from akka-docs/java/code/akka/docs/actor/UntypedActorSwapper.java rename to akka-docs/java/code/docs/actor/UntypedActorSwapper.java index b2fb98c305..985c75bfd7 100644 --- a/akka-docs/java/code/akka/docs/actor/UntypedActorSwapper.java +++ b/akka-docs/java/code/docs/actor/UntypedActorSwapper.java @@ -1,9 +1,9 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor; +package docs.actor; -import static akka.docs.actor.UntypedActorSwapper.Swap.SWAP; +import static docs.actor.UntypedActorSwapper.Swap.SWAP; import akka.actor.ActorRef; import akka.actor.Props; import akka.actor.ActorSystem; diff --git a/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java b/akka-docs/java/code/docs/actor/japi/FaultHandlingDocSample.java similarity index 97% rename from akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java rename to akka-docs/java/code/docs/actor/japi/FaultHandlingDocSample.java index 4ba8358174..1213ab5949 100644 --- a/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java +++ b/akka-docs/java/code/docs/actor/japi/FaultHandlingDocSample.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor.japi; +package docs.actor.japi; //#all //#imports @@ -26,10 +26,10 @@ import static akka.actor.SupervisorStrategy.*; import static akka.pattern.Patterns.ask; import static akka.pattern.Patterns.pipe; -import static akka.docs.actor.japi.FaultHandlingDocSample.WorkerApi.*; -import static akka.docs.actor.japi.FaultHandlingDocSample.CounterServiceApi.*; -import static akka.docs.actor.japi.FaultHandlingDocSample.CounterApi.*; -import static akka.docs.actor.japi.FaultHandlingDocSample.StorageApi.*; +import static docs.actor.japi.FaultHandlingDocSample.WorkerApi.*; +import static docs.actor.japi.FaultHandlingDocSample.CounterServiceApi.*; +import static docs.actor.japi.FaultHandlingDocSample.CounterApi.*; +import static docs.actor.japi.FaultHandlingDocSample.StorageApi.*; //#imports diff --git a/akka-docs/java/code/akka/docs/agent/AgentDocJavaSpec.scala b/akka-docs/java/code/docs/agent/AgentDocJavaSpec.scala similarity index 78% rename from akka-docs/java/code/akka/docs/agent/AgentDocJavaSpec.scala rename to akka-docs/java/code/docs/agent/AgentDocJavaSpec.scala index c3c0c296ed..566a439c62 100644 --- a/akka-docs/java/code/akka/docs/agent/AgentDocJavaSpec.scala +++ b/akka-docs/java/code/docs/agent/AgentDocJavaSpec.scala @@ -1,10 +1,10 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.agent +package docs.agent import org.scalatest.junit.JUnitWrapperSuite class AgentDocJavaSpec extends JUnitWrapperSuite( - "akka.docs.agent.AgentDocTest", + "docs.agent.AgentDocTest", Thread.currentThread.getContextClassLoader) \ No newline at end of file diff --git a/akka-docs/java/code/akka/docs/agent/AgentDocTest.java b/akka-docs/java/code/docs/agent/AgentDocTest.java similarity index 98% rename from akka-docs/java/code/akka/docs/agent/AgentDocTest.java rename to akka-docs/java/code/docs/agent/AgentDocTest.java index 553d64eee5..0da96ebfc9 100644 --- a/akka-docs/java/code/akka/docs/agent/AgentDocTest.java +++ b/akka-docs/java/code/docs/agent/AgentDocTest.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.agent; +package docs.agent; import static org.junit.Assert.*; diff --git a/akka-docs/java/code/akka/docs/dispatcher/DispatcherDocTest.scala b/akka-docs/java/code/docs/dispatcher/DispatcherDocTest.scala similarity index 86% rename from akka-docs/java/code/akka/docs/dispatcher/DispatcherDocTest.scala rename to akka-docs/java/code/docs/dispatcher/DispatcherDocTest.scala index 8216c36757..62c9e37051 100644 --- a/akka-docs/java/code/akka/docs/dispatcher/DispatcherDocTest.scala +++ b/akka-docs/java/code/docs/dispatcher/DispatcherDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.dispatcher +package docs.dispatcher import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/dispatcher/DispatcherDocTestBase.java b/akka-docs/java/code/docs/dispatcher/DispatcherDocTestBase.java similarity index 96% rename from akka-docs/java/code/akka/docs/dispatcher/DispatcherDocTestBase.java rename to akka-docs/java/code/docs/dispatcher/DispatcherDocTestBase.java index f080dd52b9..94e4b38121 100644 --- a/akka-docs/java/code/akka/docs/dispatcher/DispatcherDocTestBase.java +++ b/akka-docs/java/code/docs/dispatcher/DispatcherDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.dispatcher; +package docs.dispatcher; //#imports import akka.actor.*; @@ -32,8 +32,8 @@ import static org.junit.Assert.*; import com.typesafe.config.ConfigFactory; -import akka.docs.actor.MyUntypedActor; -import akka.docs.actor.UntypedActorDocTestBase.MyActor; +import docs.actor.MyUntypedActor; +import docs.actor.UntypedActorDocTestBase.MyActor; import akka.testkit.AkkaSpec; public class DispatcherDocTestBase { diff --git a/akka-docs/java/code/akka/docs/event/LoggingDocTest.scala b/akka-docs/java/code/docs/event/LoggingDocTest.scala similarity index 88% rename from akka-docs/java/code/akka/docs/event/LoggingDocTest.scala rename to akka-docs/java/code/docs/event/LoggingDocTest.scala index ee44f502a4..1d7f34827f 100644 --- a/akka-docs/java/code/akka/docs/event/LoggingDocTest.scala +++ b/akka-docs/java/code/docs/event/LoggingDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.event +package docs.event import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/event/LoggingDocTestBase.java b/akka-docs/java/code/docs/event/LoggingDocTestBase.java similarity index 99% rename from akka-docs/java/code/akka/docs/event/LoggingDocTestBase.java rename to akka-docs/java/code/docs/event/LoggingDocTestBase.java index 8f7b63d8a8..77e46b3f92 100644 --- a/akka-docs/java/code/akka/docs/event/LoggingDocTestBase.java +++ b/akka-docs/java/code/docs/event/LoggingDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.event; +package docs.event; //#imports import akka.event.Logging; diff --git a/akka-docs/java/code/akka/docs/extension/ExtensionDocTest.scala b/akka-docs/java/code/docs/extension/ExtensionDocTest.scala similarity index 86% rename from akka-docs/java/code/akka/docs/extension/ExtensionDocTest.scala rename to akka-docs/java/code/docs/extension/ExtensionDocTest.scala index 7b1b43b6ca..f22e300cfc 100644 --- a/akka-docs/java/code/akka/docs/extension/ExtensionDocTest.scala +++ b/akka-docs/java/code/docs/extension/ExtensionDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.extension +package docs.extension import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/extension/ExtensionDocTestBase.java b/akka-docs/java/code/docs/extension/ExtensionDocTestBase.java similarity index 98% rename from akka-docs/java/code/akka/docs/extension/ExtensionDocTestBase.java rename to akka-docs/java/code/docs/extension/ExtensionDocTestBase.java index 11dfe4c198..7623d1cc0a 100644 --- a/akka-docs/java/code/akka/docs/extension/ExtensionDocTestBase.java +++ b/akka-docs/java/code/docs/extension/ExtensionDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.extension; +package docs.extension; //#imports import akka.actor.*; diff --git a/akka-docs/java/code/akka/docs/extension/SettingsExtensionDocTest.scala b/akka-docs/java/code/docs/extension/SettingsExtensionDocTest.scala similarity index 87% rename from akka-docs/java/code/akka/docs/extension/SettingsExtensionDocTest.scala rename to akka-docs/java/code/docs/extension/SettingsExtensionDocTest.scala index 0979c00d4f..60289bfdca 100644 --- a/akka-docs/java/code/akka/docs/extension/SettingsExtensionDocTest.scala +++ b/akka-docs/java/code/docs/extension/SettingsExtensionDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.extension +package docs.extension import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/extension/SettingsExtensionDocTestBase.java b/akka-docs/java/code/docs/extension/SettingsExtensionDocTestBase.java similarity index 98% rename from akka-docs/java/code/akka/docs/extension/SettingsExtensionDocTestBase.java rename to akka-docs/java/code/docs/extension/SettingsExtensionDocTestBase.java index 9aef290ecb..265c91b206 100644 --- a/akka-docs/java/code/akka/docs/extension/SettingsExtensionDocTestBase.java +++ b/akka-docs/java/code/docs/extension/SettingsExtensionDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.extension; +package docs.extension; //#imports import akka.actor.Extension; diff --git a/akka-docs/java/code/akka/docs/future/FutureDocTest.scala b/akka-docs/java/code/docs/future/FutureDocTest.scala similarity index 87% rename from akka-docs/java/code/akka/docs/future/FutureDocTest.scala rename to akka-docs/java/code/docs/future/FutureDocTest.scala index 8716beeced..fef5f3d967 100644 --- a/akka-docs/java/code/akka/docs/future/FutureDocTest.scala +++ b/akka-docs/java/code/docs/future/FutureDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.future +package docs.future import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java b/akka-docs/java/code/docs/future/FutureDocTestBase.java similarity index 99% rename from akka-docs/java/code/akka/docs/future/FutureDocTestBase.java rename to akka-docs/java/code/docs/future/FutureDocTestBase.java index d8e59f5d30..2fe2220223 100644 --- a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java +++ b/akka-docs/java/code/docs/future/FutureDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.future; +package docs.future; //#imports1 import akka.dispatch.*; diff --git a/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTest.scala b/akka-docs/java/code/docs/jrouting/CustomRouterDocTest.scala similarity index 80% rename from akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTest.scala rename to akka-docs/java/code/docs/jrouting/CustomRouterDocTest.scala index 48e323c634..d11b07f22a 100644 --- a/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTest.scala +++ b/akka-docs/java/code/docs/jrouting/CustomRouterDocTest.scala @@ -1,4 +1,4 @@ -package akka.docs.jrouting; +package docs.jrouting; import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java b/akka-docs/java/code/docs/jrouting/CustomRouterDocTestBase.java similarity index 95% rename from akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java rename to akka-docs/java/code/docs/jrouting/CustomRouterDocTestBase.java index dc4d140ec3..74e7759b62 100644 --- a/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java +++ b/akka-docs/java/code/docs/jrouting/CustomRouterDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.jrouting; +package docs.jrouting; import java.util.List; import java.util.Arrays; @@ -22,9 +22,9 @@ import akka.testkit.AkkaSpec; import com.typesafe.config.ConfigFactory; import static akka.pattern.Patterns.ask; -import static akka.docs.jrouting.CustomRouterDocTestBase.DemocratActor; -import static akka.docs.jrouting.CustomRouterDocTestBase.RepublicanActor; -import static akka.docs.jrouting.CustomRouterDocTestBase.Message.*; +import static docs.jrouting.CustomRouterDocTestBase.DemocratActor; +import static docs.jrouting.CustomRouterDocTestBase.RepublicanActor; +import static docs.jrouting.CustomRouterDocTestBase.Message.*; public class CustomRouterDocTestBase { diff --git a/akka-docs/java/code/akka/docs/jrouting/FibonacciActor.java b/akka-docs/java/code/docs/jrouting/FibonacciActor.java similarity index 97% rename from akka-docs/java/code/akka/docs/jrouting/FibonacciActor.java rename to akka-docs/java/code/docs/jrouting/FibonacciActor.java index 8e426cf8fe..e316f27bce 100644 --- a/akka-docs/java/code/akka/docs/jrouting/FibonacciActor.java +++ b/akka-docs/java/code/docs/jrouting/FibonacciActor.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.jrouting; +package docs.jrouting; import java.io.Serializable; diff --git a/akka-docs/java/code/akka/docs/jrouting/ParentActor.java b/akka-docs/java/code/docs/jrouting/ParentActor.java similarity index 98% rename from akka-docs/java/code/akka/docs/jrouting/ParentActor.java rename to akka-docs/java/code/docs/jrouting/ParentActor.java index 32a33b3a1b..ada9e92138 100644 --- a/akka-docs/java/code/akka/docs/jrouting/ParentActor.java +++ b/akka-docs/java/code/docs/jrouting/ParentActor.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.jrouting; +package docs.jrouting; import akka.routing.ScatterGatherFirstCompletedRouter; import akka.routing.BroadcastRouter; diff --git a/akka-docs/java/code/akka/docs/jrouting/PrintlnActor.java b/akka-docs/java/code/docs/jrouting/PrintlnActor.java similarity index 92% rename from akka-docs/java/code/akka/docs/jrouting/PrintlnActor.java rename to akka-docs/java/code/docs/jrouting/PrintlnActor.java index d6ad652ebe..adf56fe863 100644 --- a/akka-docs/java/code/akka/docs/jrouting/PrintlnActor.java +++ b/akka-docs/java/code/docs/jrouting/PrintlnActor.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.jrouting; +package docs.jrouting; import akka.actor.UntypedActor; diff --git a/akka-docs/java/code/akka/docs/jrouting/RouterViaConfigExample.java b/akka-docs/java/code/docs/jrouting/RouterViaConfigExample.java similarity index 98% rename from akka-docs/java/code/akka/docs/jrouting/RouterViaConfigExample.java rename to akka-docs/java/code/docs/jrouting/RouterViaConfigExample.java index 61b9a573d7..1505766196 100644 --- a/akka-docs/java/code/akka/docs/jrouting/RouterViaConfigExample.java +++ b/akka-docs/java/code/docs/jrouting/RouterViaConfigExample.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.jrouting; +package docs.jrouting; import akka.routing.FromConfig; import akka.actor.ActorRef; diff --git a/akka-docs/java/code/akka/docs/jrouting/RouterViaProgramExample.java b/akka-docs/java/code/docs/jrouting/RouterViaProgramExample.java similarity index 99% rename from akka-docs/java/code/akka/docs/jrouting/RouterViaProgramExample.java rename to akka-docs/java/code/docs/jrouting/RouterViaProgramExample.java index 44984c3ec7..ce46307eb7 100644 --- a/akka-docs/java/code/akka/docs/jrouting/RouterViaProgramExample.java +++ b/akka-docs/java/code/docs/jrouting/RouterViaProgramExample.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.jrouting; +package docs.jrouting; import akka.routing.RoundRobinRouter; import akka.routing.DefaultResizer; diff --git a/akka-docs/java/code/akka/docs/remoting/RemoteActorExample.java b/akka-docs/java/code/docs/remoting/RemoteActorExample.java similarity index 95% rename from akka-docs/java/code/akka/docs/remoting/RemoteActorExample.java rename to akka-docs/java/code/docs/remoting/RemoteActorExample.java index f7686e744a..3ca25bd153 100644 --- a/akka-docs/java/code/akka/docs/remoting/RemoteActorExample.java +++ b/akka-docs/java/code/docs/remoting/RemoteActorExample.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.remoting; +package docs.remoting; import akka.actor.ActorRef; import akka.actor.UntypedActor; diff --git a/akka-docs/java/code/akka/docs/remoting/RemoteDeploymentDocTest.scala b/akka-docs/java/code/docs/remoting/RemoteDeploymentDocTest.scala similarity index 87% rename from akka-docs/java/code/akka/docs/remoting/RemoteDeploymentDocTest.scala rename to akka-docs/java/code/docs/remoting/RemoteDeploymentDocTest.scala index 9290b7c897..4ac46c4504 100644 --- a/akka-docs/java/code/akka/docs/remoting/RemoteDeploymentDocTest.scala +++ b/akka-docs/java/code/docs/remoting/RemoteDeploymentDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.remoting +package docs.remoting import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/remoting/RemoteDeploymentDocTestBase.java b/akka-docs/java/code/docs/remoting/RemoteDeploymentDocTestBase.java similarity index 97% rename from akka-docs/java/code/akka/docs/remoting/RemoteDeploymentDocTestBase.java rename to akka-docs/java/code/docs/remoting/RemoteDeploymentDocTestBase.java index b105e2b42a..cfb12ac7c4 100644 --- a/akka-docs/java/code/akka/docs/remoting/RemoteDeploymentDocTestBase.java +++ b/akka-docs/java/code/docs/remoting/RemoteDeploymentDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.remoting; +package docs.remoting; import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/akka-docs/java/code/akka/docs/serialization/SerializationDocTest.scala b/akka-docs/java/code/docs/serialization/SerializationDocTest.scala similarity index 85% rename from akka-docs/java/code/akka/docs/serialization/SerializationDocTest.scala rename to akka-docs/java/code/docs/serialization/SerializationDocTest.scala index 26685dea80..ffac606928 100644 --- a/akka-docs/java/code/akka/docs/serialization/SerializationDocTest.scala +++ b/akka-docs/java/code/docs/serialization/SerializationDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.serialization +package docs.serialization import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/serialization/SerializationDocTestBase.java b/akka-docs/java/code/docs/serialization/SerializationDocTestBase.java similarity index 99% rename from akka-docs/java/code/akka/docs/serialization/SerializationDocTestBase.java rename to akka-docs/java/code/docs/serialization/SerializationDocTestBase.java index aa24c92249..a62827fc98 100644 --- a/akka-docs/java/code/akka/docs/serialization/SerializationDocTestBase.java +++ b/akka-docs/java/code/docs/serialization/SerializationDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.serialization; +package docs.serialization; import org.junit.Test; import static org.junit.Assert.*; diff --git a/akka-docs/java/code/akka/docs/transactor/CoordinatedCounter.java b/akka-docs/java/code/docs/transactor/CoordinatedCounter.java similarity index 97% rename from akka-docs/java/code/akka/docs/transactor/CoordinatedCounter.java rename to akka-docs/java/code/docs/transactor/CoordinatedCounter.java index dd7f119005..4bd679f1eb 100644 --- a/akka-docs/java/code/akka/docs/transactor/CoordinatedCounter.java +++ b/akka-docs/java/code/docs/transactor/CoordinatedCounter.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor; +package docs.transactor; //#class import akka.actor.*; diff --git a/akka-docs/java/code/akka/docs/transactor/Coordinator.java b/akka-docs/java/code/docs/transactor/Coordinator.java similarity index 96% rename from akka-docs/java/code/akka/docs/transactor/Coordinator.java rename to akka-docs/java/code/docs/transactor/Coordinator.java index f1f04761cd..644eb4312e 100644 --- a/akka-docs/java/code/akka/docs/transactor/Coordinator.java +++ b/akka-docs/java/code/docs/transactor/Coordinator.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor; +package docs.transactor; import akka.actor.*; import akka.transactor.*; diff --git a/akka-docs/java/code/akka/docs/transactor/Counter.java b/akka-docs/java/code/docs/transactor/Counter.java similarity index 95% rename from akka-docs/java/code/akka/docs/transactor/Counter.java rename to akka-docs/java/code/docs/transactor/Counter.java index ea2291afeb..06092c5db0 100644 --- a/akka-docs/java/code/akka/docs/transactor/Counter.java +++ b/akka-docs/java/code/docs/transactor/Counter.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor; +package docs.transactor; //#class import akka.transactor.*; diff --git a/akka-docs/java/code/akka/docs/transactor/FriendlyCounter.java b/akka-docs/java/code/docs/transactor/FriendlyCounter.java similarity index 97% rename from akka-docs/java/code/akka/docs/transactor/FriendlyCounter.java rename to akka-docs/java/code/docs/transactor/FriendlyCounter.java index 18f2137ea4..f24c044750 100644 --- a/akka-docs/java/code/akka/docs/transactor/FriendlyCounter.java +++ b/akka-docs/java/code/docs/transactor/FriendlyCounter.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor; +package docs.transactor; //#class import akka.actor.*; diff --git a/akka-docs/java/code/akka/docs/transactor/Increment.java b/akka-docs/java/code/docs/transactor/Increment.java similarity index 93% rename from akka-docs/java/code/akka/docs/transactor/Increment.java rename to akka-docs/java/code/docs/transactor/Increment.java index 1d789c99e2..3794ce631d 100644 --- a/akka-docs/java/code/akka/docs/transactor/Increment.java +++ b/akka-docs/java/code/docs/transactor/Increment.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor; +package docs.transactor; //#class import akka.actor.ActorRef; diff --git a/akka-docs/java/code/akka/docs/transactor/Message.java b/akka-docs/java/code/docs/transactor/Message.java similarity index 77% rename from akka-docs/java/code/akka/docs/transactor/Message.java rename to akka-docs/java/code/docs/transactor/Message.java index 6a8da72070..0f1edfc51f 100644 --- a/akka-docs/java/code/akka/docs/transactor/Message.java +++ b/akka-docs/java/code/docs/transactor/Message.java @@ -2,6 +2,6 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor; +package docs.transactor; public class Message {} diff --git a/akka-docs/java/code/akka/docs/transactor/TransactorDocJavaSpec.scala b/akka-docs/java/code/docs/transactor/TransactorDocJavaSpec.scala similarity index 75% rename from akka-docs/java/code/akka/docs/transactor/TransactorDocJavaSpec.scala rename to akka-docs/java/code/docs/transactor/TransactorDocJavaSpec.scala index 4c61a156e8..6293b2effa 100644 --- a/akka-docs/java/code/akka/docs/transactor/TransactorDocJavaSpec.scala +++ b/akka-docs/java/code/docs/transactor/TransactorDocJavaSpec.scala @@ -2,10 +2,10 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor +package docs.transactor import org.scalatest.junit.JUnitWrapperSuite class TransactorDocJavaSpec extends JUnitWrapperSuite( - "akka.docs.transactor.TransactorDocTest", + "docs.transactor.TransactorDocTest", Thread.currentThread.getContextClassLoader) \ No newline at end of file diff --git a/akka-docs/java/code/akka/docs/transactor/TransactorDocTest.java b/akka-docs/java/code/docs/transactor/TransactorDocTest.java similarity index 99% rename from akka-docs/java/code/akka/docs/transactor/TransactorDocTest.java rename to akka-docs/java/code/docs/transactor/TransactorDocTest.java index bb1d38651b..4eaaa0bb31 100644 --- a/akka-docs/java/code/akka/docs/transactor/TransactorDocTest.java +++ b/akka-docs/java/code/docs/transactor/TransactorDocTest.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor; +package docs.transactor; import static org.junit.Assert.*; import org.junit.Test; diff --git a/akka-docs/java/code/akka/docs/zeromq/ZeromqDocTest.scala b/akka-docs/java/code/docs/zeromq/ZeromqDocTest.scala similarity index 87% rename from akka-docs/java/code/akka/docs/zeromq/ZeromqDocTest.scala rename to akka-docs/java/code/docs/zeromq/ZeromqDocTest.scala index a9747959e3..c5e6f224da 100644 --- a/akka-docs/java/code/akka/docs/zeromq/ZeromqDocTest.scala +++ b/akka-docs/java/code/docs/zeromq/ZeromqDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.zeromq +package docs.zeromq import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/java/code/akka/docs/zeromq/ZeromqDocTestBase.java b/akka-docs/java/code/docs/zeromq/ZeromqDocTestBase.java similarity index 99% rename from akka-docs/java/code/akka/docs/zeromq/ZeromqDocTestBase.java rename to akka-docs/java/code/docs/zeromq/ZeromqDocTestBase.java index ee8252a6ad..1a311c9529 100644 --- a/akka-docs/java/code/akka/docs/zeromq/ZeromqDocTestBase.java +++ b/akka-docs/java/code/docs/zeromq/ZeromqDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.zeromq; +package docs.zeromq; //#pub-socket import akka.zeromq.Bind; diff --git a/akka-docs/java/extending-akka.rst b/akka-docs/java/extending-akka.rst index 6e8bfca4cb..38fedf575a 100644 --- a/akka-docs/java/extending-akka.rst +++ b/akka-docs/java/extending-akka.rst @@ -54,7 +54,7 @@ in the "akka.extensions" section of the config you provide to your ``ActorSystem :: akka { - extensions = ["akka.docs.extension.ExtensionDocTestBase.CountExtension"] + extensions = ["docs.extension.ExtensionDocTestBase.CountExtension"] } Applicability diff --git a/akka-docs/java/serialization.rst b/akka-docs/java/serialization.rst index 4c7b023959..b57a6494e5 100644 --- a/akka-docs/java/serialization.rst +++ b/akka-docs/java/serialization.rst @@ -85,7 +85,7 @@ Customization ============= So, lets say that you want to create your own ``Serializer``, -you saw the ``akka.docs.serialization.MyOwnSerializer`` in the config example above? +you saw the ``docs.serialization.MyOwnSerializer`` in the config example above? Creating new Serializers ------------------------ diff --git a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala similarity index 97% rename from akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala rename to akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala index c81f16e896..ac6c58ad08 100644 --- a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala +++ b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor.mailbox +package docs.actor.mailbox //#imports import akka.actor.Props @@ -107,7 +107,7 @@ import akka.actor.mailbox.DurableMailboxSpec object MyMailboxSpec { val config = """ MyStorage-dispatcher { - mailbox-type = akka.docs.actor.mailbox.MyMailboxType + mailbox-type = docs.actor.mailbox.MyMailboxType } """ } diff --git a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTest.scala b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocTest.scala similarity index 85% rename from akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTest.scala rename to akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocTest.scala index eba732e6a7..6b156ef791 100644 --- a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTest.scala +++ b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocTest.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor.mailbox +package docs.actor.mailbox import org.scalatest.junit.JUnitSuite diff --git a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocTestBase.java similarity index 97% rename from akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java rename to akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocTestBase.java index 25158446b6..06e867c786 100644 --- a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java +++ b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocTestBase.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor.mailbox; +package docs.actor.mailbox; //#imports import akka.actor.Props; diff --git a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala b/akka-docs/scala/code/docs/actor/ActorDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala rename to akka-docs/scala/code/docs/actor/ActorDocSpec.scala index 8aed17605c..ee05e95d42 100644 --- a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/ActorDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor //#imports1 import akka.actor.Actor diff --git a/akka-docs/scala/code/akka/docs/actor/FSMDocSpec.scala b/akka-docs/scala/code/docs/actor/FSMDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/actor/FSMDocSpec.scala rename to akka-docs/scala/code/docs/actor/FSMDocSpec.scala index 158f8979a0..75b0309a42 100644 --- a/akka-docs/scala/code/akka/docs/actor/FSMDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/FSMDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor //#test-code import akka.testkit.AkkaSpec diff --git a/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala b/akka-docs/scala/code/docs/actor/FaultHandlingDocSample.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala rename to akka-docs/scala/code/docs/actor/FaultHandlingDocSample.scala index d08bcb53b2..79f5a5d084 100644 --- a/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala +++ b/akka-docs/scala/code/docs/actor/FaultHandlingDocSample.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor //#all //#imports diff --git a/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSpec.scala b/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSpec.scala rename to akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala index ca1eccb73a..8ce16f1021 100644 --- a/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor //#testkit import akka.testkit.{ AkkaSpec, ImplicitSender, EventFilter } diff --git a/akka-docs/scala/code/akka/docs/actor/SchedulerDocSpec.scala b/akka-docs/scala/code/docs/actor/SchedulerDocSpec.scala similarity index 98% rename from akka-docs/scala/code/akka/docs/actor/SchedulerDocSpec.scala rename to akka-docs/scala/code/docs/actor/SchedulerDocSpec.scala index b6bffecb46..f711d85129 100644 --- a/akka-docs/scala/code/akka/docs/actor/SchedulerDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/SchedulerDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor //#imports1 import akka.actor.Actor diff --git a/akka-docs/scala/code/akka/docs/actor/TypedActorDocSpec.scala b/akka-docs/scala/code/docs/actor/TypedActorDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/actor/TypedActorDocSpec.scala rename to akka-docs/scala/code/docs/actor/TypedActorDocSpec.scala index f7c5fa9bf7..e2c8db16a4 100644 --- a/akka-docs/scala/code/akka/docs/actor/TypedActorDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/TypedActorDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor //#imports import akka.dispatch.{ Promise, Future, Await } diff --git a/akka-docs/scala/code/akka/docs/actor/UnnestedReceives.scala b/akka-docs/scala/code/docs/actor/UnnestedReceives.scala similarity index 98% rename from akka-docs/scala/code/akka/docs/actor/UnnestedReceives.scala rename to akka-docs/scala/code/docs/actor/UnnestedReceives.scala index 194a958cce..bb77fe4d1d 100644 --- a/akka-docs/scala/code/akka/docs/actor/UnnestedReceives.scala +++ b/akka-docs/scala/code/docs/actor/UnnestedReceives.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.actor +package docs.actor import akka.actor._ import scala.collection.mutable.ListBuffer diff --git a/akka-docs/scala/code/akka/docs/agent/AgentDocSpec.scala b/akka-docs/scala/code/docs/agent/AgentDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/agent/AgentDocSpec.scala rename to akka-docs/scala/code/docs/agent/AgentDocSpec.scala index 1e9ec1fd69..418159f638 100644 --- a/akka-docs/scala/code/akka/docs/agent/AgentDocSpec.scala +++ b/akka-docs/scala/code/docs/agent/AgentDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.agent +package docs.agent import akka.agent.Agent import akka.util.duration._ diff --git a/akka-docs/scala/code/akka/docs/camel/Consumers.scala b/akka-docs/scala/code/docs/camel/Consumers.scala similarity index 96% rename from akka-docs/scala/code/akka/docs/camel/Consumers.scala rename to akka-docs/scala/code/docs/camel/Consumers.scala index 90f181df3f..df7161b9e6 100644 --- a/akka-docs/scala/code/akka/docs/camel/Consumers.scala +++ b/akka-docs/scala/code/docs/camel/Consumers.scala @@ -1,4 +1,4 @@ -package akka.docs.camel +package docs.camel object Consumers { { diff --git a/akka-docs/scala/code/akka/docs/camel/Introduction.scala b/akka-docs/scala/code/docs/camel/Introduction.scala similarity index 98% rename from akka-docs/scala/code/akka/docs/camel/Introduction.scala rename to akka-docs/scala/code/docs/camel/Introduction.scala index 4899843a27..eaf4c400f6 100644 --- a/akka-docs/scala/code/akka/docs/camel/Introduction.scala +++ b/akka-docs/scala/code/docs/camel/Introduction.scala @@ -1,4 +1,4 @@ -package akka.docs.camel +package docs.camel object Introduction { { diff --git a/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala b/akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala similarity index 97% rename from akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala rename to akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala index 1452d72088..3ff8d9c1ea 100644 --- a/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala +++ b/akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.dispatcher +package docs.dispatcher import org.scalatest.{ BeforeAndAfterAll, WordSpec } import org.scalatest.matchers.MustMatchers @@ -91,13 +91,13 @@ object DispatcherDocSpec { //#prio-dispatcher-config prio-dispatcher { - mailbox-type = "akka.docs.dispatcher.DispatcherDocSpec$MyPrioMailbox" + mailbox-type = "docs.dispatcher.DispatcherDocSpec$MyPrioMailbox" } //#prio-dispatcher-config //#prio-dispatcher-config-java prio-dispatcher-java { - mailbox-type = "akka.docs.dispatcher.DispatcherDocTestBase$MyPrioMailbox" + mailbox-type = "docs.dispatcher.DispatcherDocTestBase$MyPrioMailbox" //Other dispatcher configuration goes here } //#prio-dispatcher-config-java diff --git a/akka-docs/scala/code/akka/docs/event/LoggingDocSpec.scala b/akka-docs/scala/code/docs/event/LoggingDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/event/LoggingDocSpec.scala rename to akka-docs/scala/code/docs/event/LoggingDocSpec.scala index 0aa29549c9..7e2fccb876 100644 --- a/akka-docs/scala/code/akka/docs/event/LoggingDocSpec.scala +++ b/akka-docs/scala/code/docs/event/LoggingDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.event +package docs.event import akka.testkit.AkkaSpec import akka.actor.Actor diff --git a/akka-docs/scala/code/akka/docs/extension/ExtensionDocSpec.scala b/akka-docs/scala/code/docs/extension/ExtensionDocSpec.scala similarity index 96% rename from akka-docs/scala/code/akka/docs/extension/ExtensionDocSpec.scala rename to akka-docs/scala/code/docs/extension/ExtensionDocSpec.scala index 05baa28ecb..c2558fb4f1 100644 --- a/akka-docs/scala/code/akka/docs/extension/ExtensionDocSpec.scala +++ b/akka-docs/scala/code/docs/extension/ExtensionDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.extension +package docs.extension import java.util.concurrent.atomic.AtomicLong import akka.actor.Actor @@ -45,7 +45,7 @@ object ExtensionDocSpec { val config = """ //#config akka { - extensions = ["akka.docs.extension.CountExtension$"] + extensions = ["docs.extension.CountExtension$"] } //#config """ diff --git a/akka-docs/scala/code/akka/docs/extension/SettingsExtensionDocSpec.scala b/akka-docs/scala/code/docs/extension/SettingsExtensionDocSpec.scala similarity index 98% rename from akka-docs/scala/code/akka/docs/extension/SettingsExtensionDocSpec.scala rename to akka-docs/scala/code/docs/extension/SettingsExtensionDocSpec.scala index 05765d27a5..a1e033e386 100644 --- a/akka-docs/scala/code/akka/docs/extension/SettingsExtensionDocSpec.scala +++ b/akka-docs/scala/code/docs/extension/SettingsExtensionDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.extension +package docs.extension //#imports import akka.actor.Extension diff --git a/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala b/akka-docs/scala/code/docs/future/FutureDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala rename to akka-docs/scala/code/docs/future/FutureDocSpec.scala index cee2eaeef8..66e80578fd 100644 --- a/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala +++ b/akka-docs/scala/code/docs/future/FutureDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.future +package docs.future import org.scalatest.{ BeforeAndAfterAll, WordSpec } import org.scalatest.matchers.MustMatchers diff --git a/akka-docs/scala/code/akka/docs/io/HTTPServer.scala b/akka-docs/scala/code/docs/io/HTTPServer.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/io/HTTPServer.scala rename to akka-docs/scala/code/docs/io/HTTPServer.scala index 01bb53023b..b6b80aa27f 100644 --- a/akka-docs/scala/code/akka/docs/io/HTTPServer.scala +++ b/akka-docs/scala/code/docs/io/HTTPServer.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.io +package docs.io //#imports import akka.actor._ diff --git a/akka-docs/scala/code/akka/docs/remoting/RemoteDeploymentDocSpec.scala b/akka-docs/scala/code/docs/remoting/RemoteDeploymentDocSpec.scala similarity index 98% rename from akka-docs/scala/code/akka/docs/remoting/RemoteDeploymentDocSpec.scala rename to akka-docs/scala/code/docs/remoting/RemoteDeploymentDocSpec.scala index 0c65b3dc76..b391494a3b 100644 --- a/akka-docs/scala/code/akka/docs/remoting/RemoteDeploymentDocSpec.scala +++ b/akka-docs/scala/code/docs/remoting/RemoteDeploymentDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.remoting +package docs.remoting import akka.actor.{ ExtendedActorSystem, ActorSystem, Actor, ActorRef } import akka.testkit.{ AkkaSpec, ImplicitSender } diff --git a/akka-docs/scala/code/akka/docs/routing/RouterDocSpec.scala b/akka-docs/scala/code/docs/routing/RouterDocSpec.scala similarity index 96% rename from akka-docs/scala/code/akka/docs/routing/RouterDocSpec.scala rename to akka-docs/scala/code/docs/routing/RouterDocSpec.scala index 229c66f13e..c71228d06c 100644 --- a/akka-docs/scala/code/akka/docs/routing/RouterDocSpec.scala +++ b/akka-docs/scala/code/docs/routing/RouterDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.routing +package docs.routing import RouterDocSpec.MyActor import akka.actor.{ Props, Actor } diff --git a/akka-docs/scala/code/akka/docs/routing/RouterTypeExample.scala b/akka-docs/scala/code/docs/routing/RouterTypeExample.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/routing/RouterTypeExample.scala rename to akka-docs/scala/code/docs/routing/RouterTypeExample.scala index 6ec475a874..421c7af3bb 100644 --- a/akka-docs/scala/code/akka/docs/routing/RouterTypeExample.scala +++ b/akka-docs/scala/code/docs/routing/RouterTypeExample.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.routing +package docs.routing import akka.routing.{ ScatterGatherFirstCompletedRouter, BroadcastRouter, RandomRouter, RoundRobinRouter } import annotation.tailrec diff --git a/akka-docs/scala/code/akka/docs/routing/RouterViaConfigExample.scala b/akka-docs/scala/code/docs/routing/RouterViaConfigExample.scala similarity index 98% rename from akka-docs/scala/code/akka/docs/routing/RouterViaConfigExample.scala rename to akka-docs/scala/code/docs/routing/RouterViaConfigExample.scala index cc840eedc5..5d34e429bb 100644 --- a/akka-docs/scala/code/akka/docs/routing/RouterViaConfigExample.scala +++ b/akka-docs/scala/code/docs/routing/RouterViaConfigExample.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.routing +package docs.routing import akka.actor.{ Actor, Props, ActorSystem } import com.typesafe.config.ConfigFactory diff --git a/akka-docs/scala/code/akka/docs/routing/RouterViaProgramExample.scala b/akka-docs/scala/code/docs/routing/RouterViaProgramExample.scala similarity index 98% rename from akka-docs/scala/code/akka/docs/routing/RouterViaProgramExample.scala rename to akka-docs/scala/code/docs/routing/RouterViaProgramExample.scala index 50b141e7b7..195fc20445 100644 --- a/akka-docs/scala/code/akka/docs/routing/RouterViaProgramExample.scala +++ b/akka-docs/scala/code/docs/routing/RouterViaProgramExample.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.routing +package docs.routing import akka.routing.RoundRobinRouter import akka.actor.{ ActorRef, Props, Actor, ActorSystem } diff --git a/akka-docs/scala/code/akka/docs/serialization/SerializationDocSpec.scala b/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala similarity index 94% rename from akka-docs/scala/code/akka/docs/serialization/SerializationDocSpec.scala rename to akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala index e614cc9903..61086b78a6 100644 --- a/akka-docs/scala/code/akka/docs/serialization/SerializationDocSpec.scala +++ b/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.serialization +package docs.serialization import org.scalatest.matchers.MustMatchers import akka.testkit._ @@ -87,7 +87,7 @@ class SerializationDocSpec extends AkkaSpec { serializers { java = "akka.serialization.JavaSerializer" proto = "akka.serialization.ProtobufSerializer" - myown = "akka.docs.serialization.MyOwnSerializer" + myown = "docs.serialization.MyOwnSerializer" } } } @@ -105,14 +105,14 @@ class SerializationDocSpec extends AkkaSpec { serializers { java = "akka.serialization.JavaSerializer" proto = "akka.serialization.ProtobufSerializer" - myown = "akka.docs.serialization.MyOwnSerializer" + myown = "docs.serialization.MyOwnSerializer" } serialization-bindings { "java.lang.String" = java - "akka.docs.serialization.Customer" = java + "docs.serialization.Customer" = java "com.google.protobuf.Message" = proto - "akka.docs.serialization.MyOwnSerializable" = myown + "docs.serialization.MyOwnSerializable" = myown "java.lang.Boolean" = myown } } diff --git a/akka-docs/scala/code/akka/docs/testkit/PlainWordSpec.scala b/akka-docs/scala/code/docs/testkit/PlainWordSpec.scala similarity index 97% rename from akka-docs/scala/code/akka/docs/testkit/PlainWordSpec.scala rename to akka-docs/scala/code/docs/testkit/PlainWordSpec.scala index 8df13da2ca..2da67c9156 100644 --- a/akka-docs/scala/code/akka/docs/testkit/PlainWordSpec.scala +++ b/akka-docs/scala/code/docs/testkit/PlainWordSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.testkit +package docs.testkit //#plain-spec import akka.actor.ActorSystem diff --git a/akka-docs/scala/code/akka/docs/testkit/TestKitUsageSpec.scala b/akka-docs/scala/code/docs/testkit/TestKitUsageSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/testkit/TestKitUsageSpec.scala rename to akka-docs/scala/code/docs/testkit/TestKitUsageSpec.scala index d2b2f9367d..2ca1dbcef8 100644 --- a/akka-docs/scala/code/akka/docs/testkit/TestKitUsageSpec.scala +++ b/akka-docs/scala/code/docs/testkit/TestKitUsageSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.testkit +package docs.testkit //#testkit-usage import scala.util.Random diff --git a/akka-docs/scala/code/akka/docs/testkit/TestkitDocSpec.scala b/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/testkit/TestkitDocSpec.scala rename to akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala index 2b2cb003a9..ddb3eeaf1d 100644 --- a/akka-docs/scala/code/akka/docs/testkit/TestkitDocSpec.scala +++ b/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.testkit +package docs.testkit //#imports-test-probe import akka.testkit.TestProbe diff --git a/akka-docs/scala/code/akka/docs/transactor/TransactorDocSpec.scala b/akka-docs/scala/code/docs/transactor/TransactorDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/transactor/TransactorDocSpec.scala rename to akka-docs/scala/code/docs/transactor/TransactorDocSpec.scala index fa76f54744..c1556b837d 100644 --- a/akka-docs/scala/code/akka/docs/transactor/TransactorDocSpec.scala +++ b/akka-docs/scala/code/docs/transactor/TransactorDocSpec.scala @@ -2,7 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.transactor +package docs.transactor import akka.actor._ import akka.transactor._ diff --git a/akka-docs/scala/code/akka/docs/zeromq/ZeromqDocSpec.scala b/akka-docs/scala/code/docs/zeromq/ZeromqDocSpec.scala similarity index 99% rename from akka-docs/scala/code/akka/docs/zeromq/ZeromqDocSpec.scala rename to akka-docs/scala/code/docs/zeromq/ZeromqDocSpec.scala index dba4989d87..812e0edaaa 100644 --- a/akka-docs/scala/code/akka/docs/zeromq/ZeromqDocSpec.scala +++ b/akka-docs/scala/code/docs/zeromq/ZeromqDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.docs.zeromq +package docs.zeromq import akka.actor.Actor import akka.actor.Props diff --git a/akka-docs/scala/serialization.rst b/akka-docs/scala/serialization.rst index 2ab0a7b633..15525a29f2 100644 --- a/akka-docs/scala/serialization.rst +++ b/akka-docs/scala/serialization.rst @@ -84,7 +84,7 @@ Customization ============= So, lets say that you want to create your own ``Serializer``, -you saw the ``akka.docs.serialization.MyOwnSerializer`` in the config example above? +you saw the ``docs.serialization.MyOwnSerializer`` in the config example above? Creating new Serializers ------------------------ From 5c48cbb1451764da682cfbc302eb9fe846e460d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 22 May 2012 12:08:49 +0200 Subject: [PATCH 095/538] Added section to remote docs about remote security as well as improved doc in reference config --- akka-docs/java/remoting.rst | 60 ++++++++++++++++++ akka-docs/scala/remoting.rst | 61 +++++++++++++++++++ akka-remote/src/main/resources/reference.conf | 4 +- 3 files changed, 123 insertions(+), 2 deletions(-) diff --git a/akka-docs/java/remoting.rst b/akka-docs/java/remoting.rst index 376eab2584..c4c5edee5f 100644 --- a/akka-docs/java/remoting.rst +++ b/akka-docs/java/remoting.rst @@ -294,3 +294,63 @@ which holds the transport used (RemoteTransport) and optionally the address that To intercept when an inbound remote client has been closed you listen to ``RemoteServerClientClosed`` which holds the transport used (RemoteTransport) and optionally the address of the remote client that was closed (Option
). +Remote Security +^^^^^^^^^^^^^^^ + +Akka provides a couple of ways to enhance security between remote nodes (client/server): + +* Untrusted Mode +* Security Cookie Handshake + +Untrusted Mode +-------------- + +You can enable untrusted mode for preventing system messages to be send by clients, e.g. messages like. +This will prevent the client to send these messages to the server: + +* ``Create`` +* ``Recreate`` +* ``Suspend`` +* ``Resume`` +* ``Terminate`` +* ``Supervise`` +* ``ChildTerminated`` +* ``Link`` +* ``Unlink`` + +Here is how to turn it on in the config:: + + akka { + actor { + remote { + untrusted-mode = on + } + } + } + +Secure Cookie Handshake +----------------------- + +Akka remoting also allows you to specify a secure cookie that will be exchanged and ensured to be identical +in the connection handshake between the client and the server. If they are not identical then the client +will be refused to connect to the server. + +The secure cookie can be any kind of string. But the recommended approach is to generate a cryptographically +secure cookie using this script ``$AKKA_HOME/scripts/generate_config_with_secure_cookie.sh`` or from code +using the ``akka.util.Crypt.generateSecureCookie()`` utility method. + +You have to ensure that both the connecting client and the server have the same secure cookie as well +as the ``require-cookie`` option turned on. + +Here is an example config:: + + akka { + actor { + remote { + netty { + secure-cookie = "090A030E0F0A05010900000A0C0E0C0B03050D05" + require-cookie = on + } + } + } + } diff --git a/akka-docs/scala/remoting.rst b/akka-docs/scala/remoting.rst index 88096d90d1..5b36226b24 100644 --- a/akka-docs/scala/remoting.rst +++ b/akka-docs/scala/remoting.rst @@ -301,3 +301,64 @@ which holds the transport used (RemoteTransport) and optionally the address that To intercept when an inbound remote client has been closed you listen to ``RemoteServerClientClosed`` which holds the transport used (RemoteTransport) and optionally the address of the remote client that was closed (Option[Address]). + +Remote Security +^^^^^^^^^^^^^^^ + +Akka provides a couple of ways to enhance security between remote nodes (client/server): + +* Untrusted Mode +* Security Cookie Handshake + +Untrusted Mode +-------------- + +You can enable untrusted mode for preventing system messages to be send by clients, e.g. messages like. +This will prevent the client to send these messages to the server: + +* ``Create`` +* ``Recreate`` +* ``Suspend`` +* ``Resume`` +* ``Terminate`` +* ``Supervise`` +* ``ChildTerminated`` +* ``Link`` +* ``Unlink`` + +Here is how to turn it on in the config:: + + akka { + actor { + remote { + untrusted-mode = on + } + } + } + +Secure Cookie Handshake +----------------------- + +Akka remoting also allows you to specify a secure cookie that will be exchanged and ensured to be identical +in the connection handshake between the client and the server. If they are not identical then the client +will be refused to connect to the server. + +The secure cookie can be any kind of string. But the recommended approach is to generate a cryptographically +secure cookie using this script ``$AKKA_HOME/scripts/generate_config_with_secure_cookie.sh`` or from code +using the ``akka.util.Crypt.generateSecureCookie()`` utility method. + +You have to ensure that both the connecting client and the server have the same secure cookie as well +as the ``require-cookie`` option turned on. + +Here is an example config:: + + akka { + actor { + remote { + netty { + secure-cookie = "090A030E0F0A05010900000A0C0E0C0B03050D05" + require-cookie = on + } + } + } + } diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 11a4da0711..97b85895ed 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -60,8 +60,8 @@ akka { # default is a TCP-based remote transport based on Netty transport = "akka.remote.netty.NettyRemoteTransport" - # Enable untrusted mode for full security of server managed actors, allows - # untrusted clients to connect. + # Enable untrusted mode for full security of server managed actors, prevents system messages to be send + # by clients, e.g. messages like 'Create', 'Suspend', 'Resume', 'Terminate', 'Supervise', 'Link' etc. untrusted-mode = off # Timeout for ACK of cluster operations, like checking actor out etc. From 916c2d4d11f4701dbf0cf11f94b5dd5f20180cdf Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 22 May 2012 12:14:33 +0200 Subject: [PATCH 096/538] Switching to checking InstantiationException for both create and recreate --- akka-actor/src/main/scala/akka/actor/Actor.scala | 2 +- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index b611d96842..3d93e52a54 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -118,7 +118,7 @@ class ActorKilledException private[akka] (message: String, cause: Throwable) * An InvalidActorNameException is thrown when you try to convert something, usually a String, to an Actor name * which doesn't validate. */ -case class InvalidActorNameException(message: String) extends AkkaException(message) +class InvalidActorNameException(message: String) extends AkkaException(message) /** * An ActorInitializationException is thrown when the the initialization logic for an Actor fails. diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 3380d51de0..3b2c743a6b 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -498,7 +498,7 @@ private[akka] class ActorCell( import ActorCell.behaviorStackPlaceHolder behaviorStack = behaviorStackPlaceHolder - val instance = props.creator() + val instance = props.creator.apply() if (instance eq null) throw new ActorInitializationException(self, "Actor instance passed to actorOf can't be 'null'") @@ -532,8 +532,7 @@ private[akka] class ActorCell( a non-static inner class (in which case make it a static inner class or use Props(new ...) or Props( new UntypedActorFactory ... ) or is missing an appropriate, reachable no-args constructor. """, i.getCause) - case NonFatal(e) ⇒ - throw new ActorInitializationException(self, "exception during creation", e) + case NonFatal(e) ⇒ throw new ActorInitializationException(self, "exception during creation", e) } } @@ -557,7 +556,10 @@ private[akka] class ActorCell( doRecreate(cause, failedActor) } } catch { - case NonFatal(e) ⇒ throw new ActorInitializationException(self, "exception during creation", e) + case NonFatal(e) ⇒ throw new ActorInitializationException(self, "exception during creation", e match { + case i: InstantiationException => i.getCause + case other => other + }) } } From 9ac11a643201a27eb65c4963da794e3a042ed326 Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 22 May 2012 13:07:05 +0200 Subject: [PATCH 097/538] improve documentation of explicitly given routees --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 4 ++-- .../java/code/docs/jrouting/RouterViaProgramExample.java | 2 +- akka-docs/java/routing.rst | 7 ++++++- .../scala/code/docs/routing/RouterViaProgramExample.scala | 2 +- akka-docs/scala/routing.rst | 7 ++++++- 5 files changed, 16 insertions(+), 6 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 3b2c743a6b..0955595640 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -557,8 +557,8 @@ private[akka] class ActorCell( } } catch { case NonFatal(e) ⇒ throw new ActorInitializationException(self, "exception during creation", e match { - case i: InstantiationException => i.getCause - case other => other + case i: InstantiationException ⇒ i.getCause + case other ⇒ other }) } } diff --git a/akka-docs/java/code/docs/jrouting/RouterViaProgramExample.java b/akka-docs/java/code/docs/jrouting/RouterViaProgramExample.java index ce46307eb7..72843b44c6 100644 --- a/akka-docs/java/code/docs/jrouting/RouterViaProgramExample.java +++ b/akka-docs/java/code/docs/jrouting/RouterViaProgramExample.java @@ -55,7 +55,7 @@ public class RouterViaProgramExample { ActorRef actor2 = system.actorOf(new Props(ExampleActor.class)); ActorRef actor3 = system.actorOf(new Props(ExampleActor.class)); Iterable routees = Arrays.asList(new ActorRef[] { actor1, actor2, actor3 }); - ActorRef router2 = system.actorOf(new Props(ExampleActor.class).withRouter(RoundRobinRouter.create(routees))); + ActorRef router2 = system.actorOf(new Props().withRouter(RoundRobinRouter.create(routees))); //#programmaticRoutingRoutees for (int i = 1; i <= 6; i++) { router2.tell(new ExampleActor.Message(i)); diff --git a/akka-docs/java/routing.rst b/akka-docs/java/routing.rst index 4d01642a72..9bd770f9f6 100644 --- a/akka-docs/java/routing.rst +++ b/akka-docs/java/routing.rst @@ -33,6 +33,11 @@ You can also give the router already created routees as in: .. includecode:: code/akka/docs/jrouting/RouterViaProgramExample.java#programmaticRoutingRoutees +It should be noted that no actor factory or class needs to be provided in this +case, as the ``Router`` will not create any children on its own (which is not +true anymore when using a resizer). The routees can also be specified by giving +their path strings. + When you create a router programmatically you define the number of routees *or* you pass already created routees to it. If you send both parameters to the router *only* the latter will be used, i.e. ``nrOfInstances`` is disregarded. @@ -48,7 +53,7 @@ Once you have the router actor it is just to send messages to it as you would to router.tell(new MyMsg()); -The router will apply its behavior to the message it receives and forward it to the routees. +The router will forward the message to its routees according to its routing policy. Remotely Deploying Routees ************************** diff --git a/akka-docs/scala/code/docs/routing/RouterViaProgramExample.scala b/akka-docs/scala/code/docs/routing/RouterViaProgramExample.scala index 195fc20445..79219b742b 100644 --- a/akka-docs/scala/code/docs/routing/RouterViaProgramExample.scala +++ b/akka-docs/scala/code/docs/routing/RouterViaProgramExample.scala @@ -29,7 +29,7 @@ object RoutingProgrammaticallyExample extends App { val actor2 = system.actorOf(Props[ExampleActor1]) val actor3 = system.actorOf(Props[ExampleActor1]) val routees = Vector[ActorRef](actor1, actor2, actor3) - val router2 = system.actorOf(Props[ExampleActor1].withRouter( + val router2 = system.actorOf(Props().withRouter( RoundRobinRouter(routees = routees))) //#programmaticRoutingRoutees 1 to 6 foreach { i ⇒ router2 ! Message1(i) } diff --git a/akka-docs/scala/routing.rst b/akka-docs/scala/routing.rst index 737c9e31e7..a66e7f890d 100644 --- a/akka-docs/scala/routing.rst +++ b/akka-docs/scala/routing.rst @@ -33,6 +33,11 @@ You can also give the router already created routees as in: .. includecode:: code/akka/docs/routing/RouterViaProgramExample.scala#programmaticRoutingRoutees +It should be noted that no actor factory or class needs to be provided in this +case, as the ``Router`` will not create any children on its own (which is not +true anymore when using a resizer). The routees can also be specified by giving +their path strings. + When you create a router programmatically you define the number of routees *or* you pass already created routees to it. If you send both parameters to the router *only* the latter will be used, i.e. ``nrOfInstances`` is disregarded. @@ -48,7 +53,7 @@ Once you have the router actor it is just to send messages to it as you would to router ! MyMsg -The router will apply its behavior to the message it receives and forward it to the routees. +The router will forward the message to its routees according to its routing policy. Remotely Deploying Routees ************************** From 4dc4cdde26538a43a16e90deaffc368b7d7de403 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 22 May 2012 13:49:04 +0200 Subject: [PATCH 098/538] Arrow-formatting ;-) --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 3b2c743a6b..0955595640 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -557,8 +557,8 @@ private[akka] class ActorCell( } } catch { case NonFatal(e) ⇒ throw new ActorInitializationException(self, "exception during creation", e match { - case i: InstantiationException => i.getCause - case other => other + case i: InstantiationException ⇒ i.getCause + case other ⇒ other }) } } From f92f7431dd0f53532e5bd99389aedf5315024331 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 22 May 2012 14:57:27 +0200 Subject: [PATCH 099/538] Adding references to Roman Levensteins akka-protostuff and akka-quickser in the serialization docs --- akka-docs/java/serialization.rst | 7 +++++++ akka-docs/scala/serialization.rst | 8 ++++++++ 2 files changed, 15 insertions(+) diff --git a/akka-docs/java/serialization.rst b/akka-docs/java/serialization.rst index 4c7b023959..3e8c8162e7 100644 --- a/akka-docs/java/serialization.rst +++ b/akka-docs/java/serialization.rst @@ -137,3 +137,10 @@ representation into a real reference. :class:`DynamicVariable` is a thread-local variable, so be sure to have it set while deserializing anything which might contain actor references. +External Akka Serializers +========================= + +`Akka-protostuff by Roman Levenstein`_ + + +`Akka-quickser by Roman Levenstein`_ diff --git a/akka-docs/scala/serialization.rst b/akka-docs/scala/serialization.rst index 2ab0a7b633..296afa7068 100644 --- a/akka-docs/scala/serialization.rst +++ b/akka-docs/scala/serialization.rst @@ -135,3 +135,11 @@ representation into a real reference. :class:`DynamicVariable` is a thread-local variable, so be sure to have it set while deserializing anything which might contain actor references. + +External Akka Serializers +========================= + +`Akka-protostuff by Roman Levenstein`_ + + +`Akka-quickser by Roman Levenstein`_ From 508d8f70a5a24e02e5462f58f747e109fef2daf7 Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 22 May 2012 15:19:45 +0200 Subject: [PATCH 100/538] incorporate review comments into TestConductor work - protect all internal API using private[akka] and ScalaDoc - remove package object which was after a previous refactoring only used from a single place anyway - document all public API methods, add brief description how failure injector works - include remoteTests in the top-level aggregate project --- .../akka/remote/testconductor/Conductor.scala | 42 ++++++++--- .../akka/remote/testconductor/DataTypes.scala | 73 +++++++++++++------ .../akka/remote/testconductor/Extension.scala | 18 ++++- .../NetworkFailureInjector.scala | 24 +++++- .../akka/remote/testconductor/Player.scala | 13 +++- .../testconductor/RemoteConnection.scala | 25 +++++-- .../TestConductorTransport.scala | 5 +- .../akka/remote/testconductor/package.scala | 31 -------- .../remote/netty/NettyRemoteSupport.scala | 37 +++++++++- project/AkkaBuild.scala | 2 +- 10 files changed, 188 insertions(+), 82 deletions(-) delete mode 100644 akka-remote-tests/src/main/scala/akka/remote/testconductor/package.scala diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index 6c26fcaae2..1ec172e9ce 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -50,7 +50,7 @@ trait Conductor { this: TestConductorExt ⇒ private var _controller: ActorRef = _ private def controller: ActorRef = _controller match { - case null ⇒ throw new RuntimeException("TestConductorServer was not started") + case null ⇒ throw new IllegalStateException("TestConductorServer was not started") case x ⇒ x } @@ -169,10 +169,11 @@ trait Conductor { this: TestConductorExt ⇒ * * @param node is the symbolic name of the node which is to be affected */ - def kill(node: RoleName): Future[Done] = { - import Settings.QueryTimeout - controller ? Terminate(node, -1) mapTo - } + // TODO: uncomment (and implement in Controller) if really needed + // def kill(node: RoleName): Future[Done] = { + // import Settings.QueryTimeout + // controller ? Terminate(node, -1) mapTo + // } /** * Obtain the list of remote host names currently registered. @@ -201,8 +202,10 @@ trait Conductor { this: TestConductorExt ⇒ * This handler is installed at the end of the controller’s netty pipeline. Its only * purpose is to dispatch incoming messages to the right ServerFSM actor. There is * one shared instance of this class for all connections accepted by one Controller. + * + * INTERNAL API. */ -class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { +private[akka] class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { val clients = new ConcurrentHashMap[Channel, ActorRef]() @@ -235,7 +238,10 @@ class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAd } -object ServerFSM { +/** + * INTERNAL API. + */ +private[akka] object ServerFSM { sealed trait State case object Initial extends State case object Ready extends State @@ -253,8 +259,10 @@ object ServerFSM { * [[akka.remote.testconductor.Done]] message, and there can be only one such * request outstanding at a given time (i.e. a Send fails if the previous has * not yet been acknowledged). + * + * INTERNAL API. */ -class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor with LoggingFSM[ServerFSM.State, Option[ActorRef]] { +private[akka] class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor with LoggingFSM[ServerFSM.State, Option[ActorRef]] { import ServerFSM._ import akka.actor.FSM._ import Controller._ @@ -317,7 +325,10 @@ class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor wi } } -object Controller { +/** + * INTERNAL API. + */ +private[akka] object Controller { case class ClientDisconnected(name: RoleName) case object GetNodes case object GetSockAddr @@ -329,8 +340,10 @@ object Controller { * This controls test execution by managing barriers (delegated to * [[akka.remote.testconductor.BarrierCoordinator]], its child) and allowing * network and other failures to be injected at the test nodes. + * + * INTERNAL API. */ -class Controller(private var initialParticipants: Int, controllerPort: InetSocketAddress) extends Actor { +private[akka] class Controller(private var initialParticipants: Int, controllerPort: InetSocketAddress) extends Actor { import Controller._ import BarrierCoordinator._ @@ -418,7 +431,10 @@ class Controller(private var initialParticipants: Int, controllerPort: InetSocke } } -object BarrierCoordinator { +/** + * INTERNAL API. + */ +private[akka] object BarrierCoordinator { sealed trait State case object Idle extends State case object Waiting extends State @@ -447,8 +463,10 @@ object BarrierCoordinator { * EnterBarrier return message. In case of planned removals, this may just happen * earlier, in case of failures the current barrier (and all subsequent ones) will * be failed by sending BarrierFailed responses. + * + * INTERNAL API. */ -class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, BarrierCoordinator.Data] { +private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, BarrierCoordinator.Data] { import BarrierCoordinator._ import akka.actor.FSM._ import Controller._ diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala index 2bb7d50c37..022ae2d89b 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -13,44 +13,59 @@ import org.jboss.netty.handler.codec.oneone.OneToOneDecoder case class RoleName(name: String) -case class ToClient(msg: ClientOp with NetworkOp) -case class ToServer(msg: ServerOp with NetworkOp) +private[akka] case class ToClient(msg: ClientOp with NetworkOp) +private[akka] case class ToServer(msg: ServerOp with NetworkOp) -sealed trait ClientOp // messages sent to from Conductor to Player -sealed trait ServerOp // messages sent to from Player to Conductor -sealed trait CommandOp // messages sent from TestConductorExt to Conductor -sealed trait NetworkOp // messages sent over the wire -sealed trait UnconfirmedClientOp extends ClientOp // unconfirmed messages going to the Player -sealed trait ConfirmedClientOp extends ClientOp +private[akka] sealed trait ClientOp // messages sent to from Conductor to Player +private[akka] sealed trait ServerOp // messages sent to from Player to Conductor +private[akka] sealed trait CommandOp // messages sent from TestConductorExt to Conductor +private[akka] sealed trait NetworkOp // messages sent over the wire +private[akka] sealed trait UnconfirmedClientOp extends ClientOp // unconfirmed messages going to the Player +private[akka] sealed trait ConfirmedClientOp extends ClientOp /** * First message of connection sets names straight. */ -case class Hello(name: String, addr: Address) extends NetworkOp +private[akka] case class Hello(name: String, addr: Address) extends NetworkOp -case class EnterBarrier(name: String) extends ServerOp with NetworkOp -case class BarrierResult(name: String, success: Boolean) extends UnconfirmedClientOp with NetworkOp +private[akka] case class EnterBarrier(name: String) extends ServerOp with NetworkOp +private[akka] case class BarrierResult(name: String, success: Boolean) extends UnconfirmedClientOp with NetworkOp -case class Throttle(node: RoleName, target: RoleName, direction: Direction, rateMBit: Float) extends CommandOp -case class ThrottleMsg(target: Address, direction: Direction, rateMBit: Float) extends ConfirmedClientOp with NetworkOp +private[akka] case class Throttle(node: RoleName, target: RoleName, direction: Direction, rateMBit: Float) extends CommandOp +private[akka] case class ThrottleMsg(target: Address, direction: Direction, rateMBit: Float) extends ConfirmedClientOp with NetworkOp -case class Disconnect(node: RoleName, target: RoleName, abort: Boolean) extends CommandOp -case class DisconnectMsg(target: Address, abort: Boolean) extends ConfirmedClientOp with NetworkOp +private[akka] case class Disconnect(node: RoleName, target: RoleName, abort: Boolean) extends CommandOp +private[akka] case class DisconnectMsg(target: Address, abort: Boolean) extends ConfirmedClientOp with NetworkOp -case class Terminate(node: RoleName, exitValueOrKill: Int) extends CommandOp -case class TerminateMsg(exitValue: Int) extends ConfirmedClientOp with NetworkOp +private[akka] case class Terminate(node: RoleName, exitValueOrKill: Int) extends CommandOp +private[akka] case class TerminateMsg(exitValue: Int) extends ConfirmedClientOp with NetworkOp -case class GetAddress(node: RoleName) extends ServerOp with NetworkOp -case class AddressReply(node: RoleName, addr: Address) extends UnconfirmedClientOp with NetworkOp +private[akka] case class GetAddress(node: RoleName) extends ServerOp with NetworkOp +private[akka] case class AddressReply(node: RoleName, addr: Address) extends UnconfirmedClientOp with NetworkOp -abstract class Done extends ServerOp with UnconfirmedClientOp with NetworkOp -case object Done extends Done { +private[akka] abstract class Done extends ServerOp with UnconfirmedClientOp with NetworkOp +private[akka] case object Done extends Done { def getInstance: Done = this } -case class Remove(node: RoleName) extends CommandOp +private[akka] case class Remove(node: RoleName) extends CommandOp + +private[akka] class MsgEncoder extends OneToOneEncoder { + + implicit def address2proto(addr: Address): TCP.Address = + TCP.Address.newBuilder + .setProtocol(addr.protocol) + .setSystem(addr.system) + .setHost(addr.host.get) + .setPort(addr.port.get) + .build + + implicit def direction2proto(dir: Direction): TCP.Direction = dir match { + case Direction.Send ⇒ TCP.Direction.Send + case Direction.Receive ⇒ TCP.Direction.Receive + case Direction.Both ⇒ TCP.Direction.Both + } -class MsgEncoder extends OneToOneEncoder { def encode(ctx: ChannelHandlerContext, ch: Channel, msg: AnyRef): AnyRef = msg match { case x: NetworkOp ⇒ val w = TCP.Wrapper.newBuilder @@ -81,7 +96,17 @@ class MsgEncoder extends OneToOneEncoder { } } -class MsgDecoder extends OneToOneDecoder { +private[akka] class MsgDecoder extends OneToOneDecoder { + + implicit def address2scala(addr: TCP.Address): Address = + Address(addr.getProtocol, addr.getSystem, addr.getHost, addr.getPort) + + implicit def direction2scala(dir: TCP.Direction): Direction = dir match { + case TCP.Direction.Send ⇒ Direction.Send + case TCP.Direction.Receive ⇒ Direction.Receive + case TCP.Direction.Both ⇒ Direction.Both + } + def decode(ctx: ChannelHandlerContext, ch: Channel, msg: AnyRef): AnyRef = msg match { case w: TCP.Wrapper if w.getAllFields.size == 1 ⇒ if (w.hasHello) { diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala index 7f6b576128..6800253ae0 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala @@ -32,6 +32,9 @@ object TestConductor extends ExtensionKey[TestConductorExt] { * [[akka.remote.testconductor.Player]] roles inside an Akka * [[akka.actor.Extension]]. Please follow the aforementioned links for * more information. + * + * This extension requires the `akka.actor.provider` + * to be a [[akka.remote.RemoteActorRefProvider]]. */ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with Conductor with Player { @@ -47,9 +50,22 @@ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with C val PacketSplitThreshold = Duration(config.getMilliseconds("akka.testconductor.packet-split-threshold"), MILLISECONDS) } + /** + * Remote transport used by the actor ref provider. + */ val transport = system.provider.asInstanceOf[RemoteActorRefProvider].transport + + /** + * Transport address of this Netty-like remote transport. + */ val address = transport.address - val failureInjectors = new ConcurrentHashMap[Address, FailureInjector] + /** + * INTERNAL API. + * + * [[akka.remote.testconductor.FailureInjector]]s register themselves here so that + * failures can be injected. + */ + private[akka] val failureInjectors = new ConcurrentHashMap[Address, FailureInjector] } \ No newline at end of file diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index b853523979..629a15d51f 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -31,7 +31,10 @@ import org.jboss.netty.channel.ChannelFuture import org.jboss.netty.channel.ChannelFutureListener import org.jboss.netty.channel.ChannelFuture -case class FailureInjector(sender: ActorRef, receiver: ActorRef) { +/** + * INTERNAL API. + */ +private[akka] case class FailureInjector(sender: ActorRef, receiver: ActorRef) { def refs(dir: Direction) = dir match { case Direction.Send ⇒ Seq(sender) case Direction.Receive ⇒ Seq(receiver) @@ -39,12 +42,27 @@ case class FailureInjector(sender: ActorRef, receiver: ActorRef) { } } -object NetworkFailureInjector { +/** + * INTERNAL API. + */ +private[akka] object NetworkFailureInjector { case class SetRate(rateMBit: Float) case class Disconnect(abort: Boolean) } -class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { +/** + * Brief overview: all network traffic passes through the `sender`/`receiver` FSMs, which can + * pass through requests immediately, drop them or throttle to a desired rate. The FSMs are + * registered in the TestConductorExt.failureInjectors so that settings can be applied from + * the ClientFSMs. + * + * I found that simply forwarding events using ctx.sendUpstream/sendDownstream does not work, + * it deadlocks and gives strange errors; in the end I just trusted the Netty docs which + * recommend to prefer `Channels.write()` and `Channels.fireMessageReceived()`. + * + * INTERNAL API. + */ +private[akka] class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { val log = Logging(system, "FailureInjector") diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala index 10434007e1..2a4eeb6ad1 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala @@ -94,7 +94,10 @@ trait Player { this: TestConductorExt ⇒ } } -object ClientFSM { +/** + * INTERNAL API. + */ +private[akka] object ClientFSM { sealed trait State case object Connecting extends State case object AwaitDone extends State @@ -116,8 +119,10 @@ object ClientFSM { * done the same. After that, it will pass barrier requests to and from the * coordinator and react to the [[akka.remote.testconductor.Conductor]]’s * requests for failure injection. + * + * INTERNAL API. */ -class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { +private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) extends Actor with LoggingFSM[ClientFSM.State, ClientFSM.Data] { import ClientFSM._ val settings = TestConductor().Settings @@ -236,8 +241,10 @@ class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) extends Actor /** * This handler only forwards messages received from the conductor to the [[akka.remote.testconductor.ClientFSM]]. + * + * INTERNAL API. */ -class PlayerHandler( +private[akka] class PlayerHandler( server: InetSocketAddress, private var reconnects: Int, backoff: Duration, diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala index 5b1c454b0c..5aeb484c42 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala @@ -13,7 +13,10 @@ import org.jboss.netty.handler.timeout.{ ReadTimeoutHandler, ReadTimeoutExceptio import java.net.InetSocketAddress import java.util.concurrent.Executors -class TestConductorPipelineFactory(handler: ChannelUpstreamHandler) extends ChannelPipelineFactory { +/** + * INTERNAL API. + */ +private[akka] class TestConductorPipelineFactory(handler: ChannelUpstreamHandler) extends ChannelPipelineFactory { def getPipeline: ChannelPipeline = { val encap = List(new LengthFieldPrepender(4), new LengthFieldBasedFrameDecoder(10000, 0, 4, 0, 4)) val proto = List(new ProtobufEncoder, new ProtobufDecoder(TestConductorProtocol.Wrapper.getDefaultInstance)) @@ -22,11 +25,23 @@ class TestConductorPipelineFactory(handler: ChannelUpstreamHandler) extends Chan } } -sealed trait Role -case object Client extends Role -case object Server extends Role +/** + * INTERNAL API. + */ +private[akka] sealed trait Role +/** + * INTERNAL API. + */ +private[akka] case object Client extends Role +/** + * INTERNAL API. + */ +private[akka] case object Server extends Role -object RemoteConnection { +/** + * INTERNAL API. + */ +private[akka] object RemoteConnection { def apply(role: Role, sockaddr: InetSocketAddress, handler: ChannelUpstreamHandler): Channel = { role match { case Client ⇒ diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala index 2c51c2cf18..a036bcfff0 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala @@ -10,7 +10,10 @@ import akka.remote.RemoteActorRefProvider import org.jboss.netty.channel.ChannelHandler import org.jboss.netty.channel.ChannelPipelineFactory -class TestConductorTransport(_remoteSettings: RemoteSettings, _system: ActorSystemImpl, _provider: RemoteActorRefProvider) +/** + * INTERNAL API. + */ +private[akka] class TestConductorTransport(_remoteSettings: RemoteSettings, _system: ActorSystemImpl, _provider: RemoteActorRefProvider) extends NettyRemoteTransport(_remoteSettings, _system, _provider) { override def createPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean): ChannelPipelineFactory = diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/package.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/package.scala deleted file mode 100644 index b24279dbf6..0000000000 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/package.scala +++ /dev/null @@ -1,31 +0,0 @@ -package akka.remote - -import akka.actor.Address -import testconductor.{ TestConductorProtocol ⇒ TCP } - -package object testconductor { - - implicit def address2proto(addr: Address): TCP.Address = - TCP.Address.newBuilder - .setProtocol(addr.protocol) - .setSystem(addr.system) - .setHost(addr.host.get) - .setPort(addr.port.get) - .build - - implicit def address2scala(addr: TCP.Address): Address = - Address(addr.getProtocol, addr.getSystem, addr.getHost, addr.getPort) - - implicit def direction2proto(dir: Direction): TCP.Direction = dir match { - case Direction.Send ⇒ TCP.Direction.Send - case Direction.Receive ⇒ TCP.Direction.Receive - case Direction.Both ⇒ TCP.Direction.Both - } - - implicit def direction2scala(dir: TCP.Direction): Direction = dir match { - case TCP.Direction.Send ⇒ Direction.Send - case TCP.Direction.Receive ⇒ Direction.Receive - case TCP.Direction.Both ⇒ Direction.Both - } - -} \ No newline at end of file diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 60c2ac6097..f0b6cd1870 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -46,13 +46,31 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor Executors.newCachedThreadPool(system.threadFactory), Executors.newCachedThreadPool(system.threadFactory)) + /** + * Backing scaffolding for the default implementation of NettyRemoteSupport.createPipeline. + */ object PipelineFactory { + /** + * Construct a StaticChannelPipeline from a sequence of handlers; to be used + * in implementations of ChannelPipelineFactory. + */ def apply(handlers: Seq[ChannelHandler]): StaticChannelPipeline = new StaticChannelPipeline(handlers: _*) + + /** + * Constructs the NettyRemoteTransport default pipeline with the give “head” handler, which + * is taken by-name to allow it not to be shared across pipelines. + * + * @param withTimeout determines whether an IdleStateHandler shall be included + */ def apply(endpoint: ⇒ Seq[ChannelHandler], withTimeout: Boolean): ChannelPipelineFactory = new ChannelPipelineFactory { def getPipeline = apply(defaultStack(withTimeout) ++ endpoint) } + /** + * Construct a default protocol stack, excluding the “head” handler (i.e. the one which + * actually dispatches the received messages to the local target actors). + */ def defaultStack(withTimeout: Boolean): Seq[ChannelHandler] = (if (withTimeout) timeout :: Nil else Nil) ::: msgFormat ::: @@ -60,17 +78,28 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor executionHandler :: Nil + /** + * Construct an IdleStateHandler which uses [[akka.remote.netty.NettyRemoteTransport]].timer. + */ def timeout = new IdleStateHandler(timer, settings.ReadTimeout.toSeconds.toInt, settings.WriteTimeout.toSeconds.toInt, settings.AllTimeout.toSeconds.toInt) + /** + * Construct frame&protobuf encoder/decoder. + */ def msgFormat = new LengthFieldBasedFrameDecoder(settings.MessageFrameSize, 0, 4, 0, 4) :: new LengthFieldPrepender(4) :: new RemoteMessageDecoder :: new RemoteMessageEncoder(NettyRemoteTransport.this) :: Nil + /** + * Construct an ExecutionHandler which is used to ensure that message dispatch does not + * happen on a netty thread (that could be bad if re-sending over the network for + * remote-deployed actors). + */ val executionHandler = new ExecutionHandler(new OrderedMemoryAwareThreadPoolExecutor( settings.ExecutionPoolSize, settings.MaxChannelMemorySize, @@ -79,6 +108,11 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor settings.ExecutionPoolKeepalive.unit, system.threadFactory)) + /** + * Construct and authentication handler which uses the SecureCookie to somewhat + * protect the TCP port from unauthorized use (don’t rely on it too much, though, + * as this is NOT a cryptographic feature). + */ def authenticator = if (settings.RequireCookie) new RemoteServerAuthenticationHandler(settings.SecureCookie) :: Nil else Nil } @@ -98,7 +132,8 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor /** * Override this method to inject a subclass of NettyRemoteServer instead of - * the normal one, e.g. for inserting security hooks. + * the normal one, e.g. for inserting security hooks. If this method throws + * an exception, the transport will shut itself down and re-throw. */ protected def createServer(): NettyRemoteServer = new NettyRemoteServer(this) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index b899bdec45..26eab59037 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -32,7 +32,7 @@ object AkkaBuild extends Build { Unidoc.unidocExclude := Seq(samples.id, tutorials.id), Dist.distExclude := Seq(actorTests.id, akkaSbtPlugin.id, docs.id) ), - aggregate = Seq(actor, testkit, actorTests, remote, camel, cluster, slf4j, agent, transactor, mailboxes, zeroMQ, kernel, akkaSbtPlugin, actorMigration, samples, tutorials, docs) + aggregate = Seq(actor, testkit, actorTests, remote, remoteTests, camel, cluster, slf4j, agent, transactor, mailboxes, zeroMQ, kernel, akkaSbtPlugin, actorMigration, samples, tutorials, docs) ) lazy val actor = Project( From 5a8f79b619f5a5f43dfa2929211f11de75093130 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 22 May 2012 15:27:19 +0200 Subject: [PATCH 101/538] Preparing Agent for binary compat --- .../src/main/scala/akka/agent/Agent.scala | 30 +++++++++++-------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/akka-agent/src/main/scala/akka/agent/Agent.scala b/akka-agent/src/main/scala/akka/agent/Agent.scala index af551d00c8..64834178a8 100644 --- a/akka-agent/src/main/scala/akka/agent/Agent.scala +++ b/akka-agent/src/main/scala/akka/agent/Agent.scala @@ -96,18 +96,18 @@ object Agent { * }}} */ class Agent[T](initialValue: T, system: ActorSystem) { - private[akka] val ref = Ref(initialValue) - private[akka] val updater = system.actorOf(Props(new AgentUpdater(this))).asInstanceOf[LocalActorRef] //TODO can we avoid this somehow? + private val ref = Ref(initialValue) + private val updater = system.actorOf(Props(new AgentUpdater(this, ref))).asInstanceOf[LocalActorRef] //TODO can we avoid this somehow? /** * Read the internal state of the agent. */ - def get() = ref.single.get + def get(): T = ref.single.get /** * Read the internal state of the agent. */ - def apply() = get + def apply(): T = get /** * Dispatch a function to update the internal state. @@ -154,7 +154,7 @@ class Agent[T](initialValue: T, system: ActorSystem) { def sendOff(f: T ⇒ T): Unit = { send((value: T) ⇒ { suspend() - val threadBased = system.actorOf(Props(new ThreadBasedAgentUpdater(this)).withDispatcher("akka.agent.send-off-dispatcher")) + val threadBased = system.actorOf(Props(new ThreadBasedAgentUpdater(this, ref)).withDispatcher("akka.agent.send-off-dispatcher")) threadBased ! Update(f) value }) @@ -171,7 +171,7 @@ class Agent[T](initialValue: T, system: ActorSystem) { val result = Promise[T]()(system.dispatcher) send((value: T) ⇒ { suspend() - val threadBased = system.actorOf(Props(new ThreadBasedAgentUpdater(this)).withDispatcher("akka.agent.alter-off-dispatcher")) + val threadBased = system.actorOf(Props(new ThreadBasedAgentUpdater(this, ref)).withDispatcher("akka.agent.alter-off-dispatcher")) result completeWith ask(threadBased, Alter(f))(timeout).asInstanceOf[Future[T]] value }) @@ -209,18 +209,18 @@ class Agent[T](initialValue: T, system: ActorSystem) { /** * Suspends processing of `send` actions for the agent. */ - def suspend() = updater.suspend() + def suspend(): Unit = updater.suspend() /** * Resumes processing of `send` actions for the agent. */ - def resume() = updater.resume() + def resume(): Unit = updater.resume() /** * Closes the agents and makes it eligible for garbage collection. * A closed agent cannot accept any `send` actions. */ - def close() = updater.stop() + def close(): Unit = updater.stop() // --------------------------------------------- // Support for Java API Functions and Procedures @@ -281,8 +281,10 @@ class Agent[T](initialValue: T, system: ActorSystem) { /** * Agent updater actor. Used internally for `send` actions. + * + * INTERNAL API */ -class AgentUpdater[T](agent: Agent[T]) extends Actor { +private[akka] class AgentUpdater[T](agent: Agent[T], ref: Ref[T]) extends Actor { def receive = { case u: Update[_] ⇒ update(u.function.asInstanceOf[T ⇒ T]) case a: Alter[_] ⇒ sender ! update(a.function.asInstanceOf[T ⇒ T]) @@ -290,13 +292,15 @@ class AgentUpdater[T](agent: Agent[T]) extends Actor { case _ ⇒ } - def update(function: T ⇒ T): T = agent.ref.single.transformAndGet(function) + def update(function: T ⇒ T): T = ref.single.transformAndGet(function) } /** * Thread-based agent updater actor. Used internally for `sendOff` actions. + * + * INTERNAL API */ -class ThreadBasedAgentUpdater[T](agent: Agent[T]) extends Actor { +private[akka] class ThreadBasedAgentUpdater[T](agent: Agent[T], ref: Ref[T]) extends Actor { def receive = { case u: Update[_] ⇒ try { update(u.function.asInstanceOf[T ⇒ T]) @@ -313,5 +317,5 @@ class ThreadBasedAgentUpdater[T](agent: Agent[T]) extends Actor { case _ ⇒ context.stop(self) } - def update(function: T ⇒ T): T = agent.ref.single.transformAndGet(function) + def update(function: T ⇒ T): T = ref.single.transformAndGet(function) } From a211e4daf6a2e866546813a3303cd866b1fc9d63 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 22 May 2012 16:35:27 +0200 Subject: [PATCH 102/538] Adjusted tests to latest testconductor, remote deploy still fails. See #2069 --- .../{SimpleRemoteMultiJvmSpec.scala => SimpleRemoteSpec.scala} | 0 project/plugins.sbt | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename akka-remote-tests/src/multi-jvm/scala/akka/remote/{SimpleRemoteMultiJvmSpec.scala => SimpleRemoteSpec.scala} (100%) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteMultiJvmSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala similarity index 100% rename from akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteMultiJvmSpec.scala rename to akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala diff --git a/project/plugins.sbt b/project/plugins.sbt index f49cfb688d..0a7f9999a7 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -1,7 +1,7 @@ resolvers += Classpaths.typesafeResolver -addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.2.0-SNAPSHOT") +addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.2.0-M1") addSbtPlugin("com.typesafe.schoir" % "schoir" % "0.1.2") From 1577bffe17db53da665c843a71de5d1a2689b012 Mon Sep 17 00:00:00 2001 From: Roland Date: Wed, 23 May 2012 09:25:12 +0200 Subject: [PATCH 103/538] =?UTF-8?q?make=20too=20verbose=20logging=20in=20T?= =?UTF-8?q?estConductorSpec=20go=20away=20(d=E2=80=99oh)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../scala/akka/remote/testconductor/TestConductorSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 087aac55c7..e311fa0023 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -17,7 +17,7 @@ import akka.remote.testkit.MultiNodeConfig object TestConductorMultiJvmSpec extends MultiNodeConfig { commonConfig(ConfigFactory.parseString(""" - akka.loglevel = DEBUG + # akka.loglevel = DEBUG akka.remote { log-received-messages = on log-sent-messages = on From 12ff07f0251525fc7232f25570432241b938f966 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 23 May 2012 09:26:20 +0200 Subject: [PATCH 104/538] Adjusted tests to latest testconductor, remote deploy still fails. See #2069 --- .../scala/akka/remote/SimpleRemoteSpec.scala | 97 ++++++-------- .../DirectRoutedRemoteActorMultiJvmSpec.scala | 118 +++++++++--------- 2 files changed, 97 insertions(+), 118 deletions(-) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala index 9209deb9a5..70cca7c34b 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala @@ -1,21 +1,19 @@ /** - * Copyright (C) 2009-2011 Typesafe Inc. + * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.remote +import com.typesafe.config.ConfigFactory + import akka.actor.Actor import akka.actor.ActorRef import akka.actor.Props -import akka.dispatch.Await import akka.pattern.ask -import akka.remote.testconductor.TestConductor -import akka.testkit.DefaultTimeout -import akka.testkit.ImplicitSender -import akka.util.Duration -import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ -object SimpleRemoteMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { - override def NrOfNodes = 2 +object SimpleRemoteMultiJvmSpec extends MultiNodeConfig { class SomeActor extends Actor with Serializable { def receive = { @@ -23,60 +21,47 @@ object SimpleRemoteMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { } } - override def commonConfig = ConfigFactory.parseString(""" - akka { - loglevel = INFO - actor { - provider = akka.remote.RemoteActorRefProvider - debug { - receive = on - fsm = on - } - } - remote { - transport = akka.remote.testconductor.TestConductorTransport - log-received-messages = on - log-sent-messages = on - } - testconductor { - host = localhost - port = 4712 - } - }""") + commonConfig(ConfigFactory.parseString(""" + akka.loglevel = DEBUG + akka.remote { + log-received-messages = on + log-sent-messages = on + } + akka.actor.debug { + receive = on + fsm = on + } + """)) - def nameConfig(n: Int) = ConfigFactory.parseString("akka.testconductor.name = node" + n).withFallback(nodeConfigs(n)) -} - -class SimpleRemoteMultiJvmNode1 extends AkkaRemoteSpec(SimpleRemoteMultiJvmSpec.nameConfig(0)) { - import SimpleRemoteMultiJvmSpec._ - val nodes = NrOfNodes - val tc = TestConductor(system) - - "lookup remote actor" in { - Await.result(tc.startController(2), Duration.Inf) - system.actorOf(Props[SomeActor], "service-hello") - tc.enter("begin", "done") - } + val master = role("master") + val slave = role("slave") } -class SimpleRemoteMultiJvmNode2 extends AkkaRemoteSpec(SimpleRemoteMultiJvmSpec.nameConfig(1)) +class SimpleRemoteMultiJvmNode1 extends SimpleRemoteSpec +class SimpleRemoteMultiJvmNode2 extends SimpleRemoteSpec + +class SimpleRemoteSpec extends MultiNodeSpec(SimpleRemoteMultiJvmSpec) with ImplicitSender with DefaultTimeout { - import SimpleRemoteMultiJvmSpec._ - val nodes = NrOfNodes - val tc = TestConductor(system) - "lookup remote actor" in { - Await.result(tc.startClient(4712), Duration.Inf) - tc.enter("begin") - log.info("### begin ok") - val actor = system.actorFor("akka://" + akkaSpec(0) + "/user/service-hello") - log.info("### actor lookup " + akkaSpec(0) + "/service-hello") - actor.isInstanceOf[RemoteActorRef] must be(true) - Await.result(actor ? "identify", timeout.duration).asInstanceOf[ActorRef].path.address.hostPort must equal(akkaSpec(0)) - log.info("### actor ok") - tc.enter("done") + def initialParticipants = 2 + + runOn(master) { + system.actorOf(Props[SomeActor], "service-hello") + } + + "Remoting" must { + "lookup remote actor" in { + runOn(slave) { + val hello = system.actorFor(node(master) / "user" / "service-hello") + hello.isInstanceOf[RemoteActorRef] must be(true) + val masterAddress = testConductor.getAddressFor(master).await + (hello ? "identify").await.asInstanceOf[ActorRef].path.address must equal(masterAddress) + } + testConductor.enter("done") + } } } + diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala index d44beff605..2690378ef1 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala @@ -1,20 +1,20 @@ /** - * Copyright (C) 2009-2011 Typesafe Inc. + * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.remote.router -import akka.actor.{ Actor, ActorRef, Props } -import akka.remote.AkkaRemoteSpec -import akka.remote.AbstractRemoteActorMultiJvmSpec -import akka.remote.RemoteActorRef -import akka.remote.testconductor.TestConductor -import akka.testkit._ -import akka.dispatch.Await -import akka.pattern.ask -import akka.util.Duration +import com.typesafe.config.ConfigFactory -object DirectRoutedRemoteActorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { - override def NrOfNodes = 2 +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.Props +import akka.pattern.ask +import akka.remote.RemoteActorRef +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ + +object DirectRoutedRemoteActorMultiJvmSpec extends MultiNodeConfig { class SomeActor extends Actor with Serializable { def receive = { @@ -23,68 +23,62 @@ object DirectRoutedRemoteActorMultiJvmSpec extends AbstractRemoteActorMultiJvmSp } import com.typesafe.config.ConfigFactory - override def commonConfig = ConfigFactory.parseString(""" - akka { - loglevel = INFO - actor { - provider = akka.remote.RemoteActorRefProvider - deployment { - /service-hello.remote = %s - } - debug { - receive = on - fsm = on - } - } - remote { - transport = akka.remote.testconductor.TestConductorTransport - log-received-messages = on - log-sent-messages = on - } - testconductor { - host = localhost - port = 4712 - } - }""" format akkaURIs(1)) - - def nameConfig(n: Int) = ConfigFactory.parseString("akka.testconductor.name = node" + n).withFallback(nodeConfigs(n)) -} - -class DirectRoutedRemoteActorMultiJvmNode1 extends AkkaRemoteSpec(DirectRoutedRemoteActorMultiJvmSpec.nameConfig(0)) { - import DirectRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - val tc = TestConductor(system) - - "A new remote actor configured with a Direct router" must { - "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { - Await.result(tc.startController(2), Duration.Inf) - tc.enter("begin", "done") + commonConfig(ConfigFactory.parseString(""" + akka.loglevel = DEBUG + akka.remote { + log-received-messages = on + log-sent-messages = on } - } + akka.actor.debug { + receive = on + fsm = on + } + """)) + + val master = role("master") + val slave = role("slave") + + nodeConfig(master, ConfigFactory.parseString(""" + akka.actor { + deployment { + /service-hello.remote = "akka://MultiNodeSpec@%s" + } + } + # FIXME When using NettyRemoteTransport instead of TestConductorTransport it works + # akka.remote.transport = "akka.remote.netty.NettyRemoteTransport" + """.format("localhost:2553"))) // FIXME is there a way to avoid hardcoding the host:port here? + + nodeConfig(slave, ConfigFactory.parseString(""" + akka.remote.netty.port = 2553 + """)) } -class DirectRoutedRemoteActorMultiJvmNode2 extends AkkaRemoteSpec(DirectRoutedRemoteActorMultiJvmSpec.nameConfig(1)) +class DirectRoutedRemoteActorMultiJvmNode1 extends DirectRoutedRemoteActorSpec +class DirectRoutedRemoteActorMultiJvmNode2 extends DirectRoutedRemoteActorSpec + +class DirectRoutedRemoteActorSpec extends MultiNodeSpec(DirectRoutedRemoteActorMultiJvmSpec) with ImplicitSender with DefaultTimeout { - import DirectRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - val tc = TestConductor(system) + + def initialParticipants = 2 "A new remote actor configured with a Direct router" must { "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { - Await.result(tc.startClient(4712), Duration.Inf) - tc.enter("begin") - val actor = system.actorOf(Props[SomeActor], "service-hello") - actor.isInstanceOf[RemoteActorRef] must be(true) + runOn(master) { + val actor = system.actorOf(Props[SomeActor], "service-hello") + actor.isInstanceOf[RemoteActorRef] must be(true) - Await.result(actor ? "identify", timeout.duration).asInstanceOf[ActorRef].path.address.hostPort must equal(akkaSpec(0)) + val slaveAddress = testConductor.getAddressFor(slave).await + (actor ? "identify").await.asInstanceOf[ActorRef].path.address must equal(slaveAddress) - // shut down the actor before we let the other node(s) shut down so we don't try to send - // "Terminate" to a shut down node - system.stop(actor) - tc.enter("done") + // shut down the actor before we let the other node(s) shut down so we don't try to send + // "Terminate" to a shut down node + system.stop(actor) + } + + testConductor.enter("done") } } } From b45cec3da4011311f2d01a95a066b227325818a6 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 23 May 2012 15:17:49 +0200 Subject: [PATCH 105/538] Preparing Akka Camel for bin compat --- .../akka/dispatch/AbstractDispatcher.scala | 2 +- .../main/scala/akka/dispatch/Mailbox.scala | 10 ++-- .../main/scala/akka/camel/Activation.scala | 28 ++++------- .../camel/ActorNotRegisteredException.scala | 2 +- .../akka/camel/ActorRouteDefinition.scala | 6 ++- .../src/main/scala/akka/camel/Camel.scala | 4 +- .../main/scala/akka/camel/CamelMessage.scala | 23 ++++----- .../src/main/scala/akka/camel/Consumer.scala | 6 +-- .../src/main/scala/akka/camel/Producer.scala | 40 ++++++++-------- .../camel/internal/ActivationMessage.scala | 2 +- .../camel/internal/ActivationTracker.scala | 10 ++-- .../camel/internal/CamelExchangeAdapter.scala | 14 +++--- .../akka/camel/internal/DefaultCamel.scala | 10 ++-- .../camel/internal/ProducerRegistry.scala | 16 ++++--- .../internal/component/ActorComponent.scala | 48 ++++++++----------- .../akka/camel/javaapi/UntypedConsumer.scala | 2 +- .../camel/javaapi/UntypedProducerActor.scala | 10 ++-- akka-camel/src/main/scala/akka/package.scala | 1 + 18 files changed, 109 insertions(+), 125 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 9aec23b4c6..682e6ba4bf 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -214,7 +214,7 @@ private[akka] object MessageDispatcher { // dispatcher debugging helper using println (see below) // since this is a compile-time constant, scalac will elide code behind if (MessageDispatcher.debug) (RK checked with 2.9.1) - final val debug = false + final val debug = false // Deliberately without type ascription to make it a compile-time constant lazy val actors = new Index[MessageDispatcher, ActorRef](16, _ compareTo _) def printActors: Unit = if (debug) { for { diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index 0f0bbad1ee..35b1e35012 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -31,15 +31,15 @@ private[akka] object Mailbox { */ // primary status: only first three - final val Open = 0 // _status is not initialized in AbstractMailbox, so default must be zero! - final val Suspended = 1 - final val Closed = 2 + final val Open = 0 // _status is not initialized in AbstractMailbox, so default must be zero! Deliberately without type ascription to make it a compile-time constant + final val Suspended = 1 // Deliberately without type ascription to make it a compile-time constant + final val Closed = 2 // Deliberately without type ascription to make it a compile-time constant // secondary status: Scheduled bit may be added to Open/Suspended - final val Scheduled = 4 + final val Scheduled = 4 // Deliberately without type ascription to make it a compile-time constant // mailbox debugging helper using println (see below) // since this is a compile-time constant, scalac will elide code behind if (Mailbox.debug) (RK checked with 2.9.1) - final val debug = false + final val debug = false // Deliberately without type ascription to make it a compile-time constant } /** diff --git a/akka-camel/src/main/scala/akka/camel/Activation.scala b/akka-camel/src/main/scala/akka/camel/Activation.scala index 56d116dca8..d01c990136 100644 --- a/akka-camel/src/main/scala/akka/camel/Activation.scala +++ b/akka-camel/src/main/scala/akka/camel/Activation.scala @@ -18,9 +18,9 @@ import akka.pattern._ trait Activation { import akka.dispatch.Await - def system: ActorSystem + def system: ActorSystem //FIXME Why is this here, what's it needed for and who should use it? - private val activationTracker = system.actorOf(Props[ActivationTracker], "camelActivationTracker") + private val activationTracker = system.actorOf(Props[ActivationTracker], "camelActivationTracker") //FIXME Why is this also top level? /** * Awaits for endpoint to be activated. It blocks until the endpoint is registered in camel context or timeout expires. @@ -29,13 +29,10 @@ trait Activation { * @throws akka.camel.ActivationTimeoutException if endpoint is not activated within timeout. * @return the activated ActorRef */ - def awaitActivation(endpoint: ActorRef, timeout: Duration): ActorRef = { - try { - Await.result(activationFutureFor(endpoint, timeout), timeout) - } catch { + def awaitActivation(endpoint: ActorRef, timeout: Duration): ActorRef = + try Await.result(activationFutureFor(endpoint, timeout), timeout) catch { case e: TimeoutException ⇒ throw new ActivationTimeoutException(endpoint, timeout) } - } /** * Awaits for endpoint to be de-activated. It is blocking until endpoint is unregistered in camel context or timeout expires. @@ -43,37 +40,32 @@ trait Activation { * @param timeout the timeout for the wait * @throws akka.camel.DeActivationTimeoutException if endpoint is not de-activated within timeout. */ - def awaitDeactivation(endpoint: ActorRef, timeout: Duration) { - try { - Await.result(deactivationFutureFor(endpoint, timeout), timeout) - } catch { + def awaitDeactivation(endpoint: ActorRef, timeout: Duration): Unit = + try Await.result(deactivationFutureFor(endpoint, timeout), timeout) catch { case e: TimeoutException ⇒ throw new DeActivationTimeoutException(endpoint, timeout) } - } /** * Similar to `awaitActivation` but returns a future instead. * @param endpoint the endpoint to be activated * @param timeout the timeout for the Future */ - def activationFutureFor(endpoint: ActorRef, timeout: Duration): Future[ActorRef] = { + def activationFutureFor(endpoint: ActorRef, timeout: Duration): Future[ActorRef] = (activationTracker.ask(AwaitActivation(endpoint))(Timeout(timeout))).map[ActorRef] { case EndpointActivated(_) ⇒ endpoint case EndpointFailedToActivate(_, cause) ⇒ throw cause } - } /** * Similar to awaitDeactivation but returns a future instead. * @param endpoint the endpoint to be deactivated * @param timeout the timeout of the Future */ - def deactivationFutureFor(endpoint: ActorRef, timeout: Duration): Future[Unit] = { + def deactivationFutureFor(endpoint: ActorRef, timeout: Duration): Future[Unit] = (activationTracker.ask(AwaitDeActivation(endpoint))(Timeout(timeout))).map[Unit] { case EndpointDeActivated(_) ⇒ () case EndpointFailedToDeActivate(_, cause) ⇒ throw cause } - } } /** @@ -82,7 +74,7 @@ trait Activation { * @param timeout the timeout */ class DeActivationTimeoutException(endpoint: ActorRef, timeout: Duration) extends TimeoutException { - override def getMessage = "Timed out after %s, while waiting for de-activation of %s" format (timeout, endpoint.path) + override def getMessage: String = "Timed out after %s, while waiting for de-activation of %s" format (timeout, endpoint.path) } /** @@ -91,5 +83,5 @@ class DeActivationTimeoutException(endpoint: ActorRef, timeout: Duration) extend * @param timeout the timeout */ class ActivationTimeoutException(endpoint: ActorRef, timeout: Duration) extends TimeoutException { - override def getMessage = "Timed out after %s, while waiting for activation of %s" format (timeout, endpoint.path) + override def getMessage: String = "Timed out after %s, while waiting for activation of %s" format (timeout, endpoint.path) } \ No newline at end of file diff --git a/akka-camel/src/main/scala/akka/camel/ActorNotRegisteredException.scala b/akka-camel/src/main/scala/akka/camel/ActorNotRegisteredException.scala index a468eeace5..7a303e47b3 100644 --- a/akka-camel/src/main/scala/akka/camel/ActorNotRegisteredException.scala +++ b/akka-camel/src/main/scala/akka/camel/ActorNotRegisteredException.scala @@ -6,5 +6,5 @@ package akka.camel * @author Martin Krasser */ class ActorNotRegisteredException(uri: String) extends RuntimeException { - override def getMessage = "Actor [%s] doesn't exist" format uri + override def getMessage: String = "Actor [%s] doesn't exist" format uri } diff --git a/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala b/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala index f5175b90eb..6286edad87 100644 --- a/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala +++ b/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala @@ -29,7 +29,8 @@ class ActorRouteDefinition(definition: ProcessorDefinition[_]) { * @param actorRef the consumer with a default configuration. * @return the path to the actor, as a camel uri String */ - def to(actorRef: ActorRef) = definition.to(ActorEndpointPath(actorRef).toCamelPath()) + def to(actorRef: ActorRef) = //FIXME What is the return type of this? + definition.to(ActorEndpointPath(actorRef).toCamelPath()) /** * Sends the message to an ActorRef endpoint @@ -37,6 +38,7 @@ class ActorRouteDefinition(definition: ProcessorDefinition[_]) { * @param consumerConfig the configuration for the consumer * @return the path to the actor, as a camel uri String */ - def to(actorRef: ActorRef, consumerConfig: ConsumerConfig) = definition.to(ActorEndpointPath(actorRef).toCamelPath(consumerConfig)) + def to(actorRef: ActorRef, consumerConfig: ConsumerConfig) = //FIXME What is the return type of this? + definition.to(ActorEndpointPath(actorRef).toCamelPath(consumerConfig)) } diff --git a/akka-camel/src/main/scala/akka/camel/Camel.scala b/akka-camel/src/main/scala/akka/camel/Camel.scala index 4e96f038e5..72252212cf 100644 --- a/akka-camel/src/main/scala/akka/camel/Camel.scala +++ b/akka-camel/src/main/scala/akka/camel/Camel.scala @@ -50,13 +50,13 @@ object CamelExtension extends ExtensionId[Camel] with ExtensionIdProvider { /** * Creates a new instance of Camel and makes sure it gets stopped when the actor system is shutdown. */ - def createExtension(system: ExtendedActorSystem) = { + override def createExtension(system: ExtendedActorSystem): Camel = { val camel = new DefaultCamel(system).start system.registerOnTermination(camel.shutdown()) camel } - def lookup(): ExtensionId[Camel] = CamelExtension + override def lookup(): ExtensionId[Camel] = CamelExtension override def get(system: ActorSystem): Camel = super.get(system) } \ No newline at end of file diff --git a/akka-camel/src/main/scala/akka/camel/CamelMessage.scala b/akka-camel/src/main/scala/akka/camel/CamelMessage.scala index 2ea046b856..4f617c83a4 100644 --- a/akka-camel/src/main/scala/akka/camel/CamelMessage.scala +++ b/akka-camel/src/main/scala/akka/camel/CamelMessage.scala @@ -21,12 +21,12 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) { def this(body: Any, headers: JMap[String, Any]) = this(body, headers.toMap) //for Java - override def toString = "CamelMessage(%s, %s)" format (body, headers) + override def toString: String = "CamelMessage(%s, %s)" format (body, headers) /** * Returns those headers from this message whose name is contained in names. */ - def headers(names: Set[String]): Map[String, Any] = headers.filterKeys(names contains _) + def headers(names: Set[String]): Map[String, Any] = headers filterKeys names /** * Returns those headers from this message whose name is contained in names. @@ -75,7 +75,7 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) { /** * Creates a CamelMessage with a given body. */ - def withBody(body: Any) = CamelMessage(body, this.headers) + def withBody(body: Any): CamelMessage = CamelMessage(body, this.headers) /** * Creates a new CamelMessage with given headers. @@ -119,9 +119,9 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) { * Creates a new CamelMessage where the header with given headerName is removed from * the existing headers. */ - def withoutHeader(headerName: String) = copy(this.body, this.headers - headerName) + def withoutHeader(headerName: String): CamelMessage = copy(this.body, this.headers - headerName) - def copyContentTo(to: JCamelMessage) = { + def copyContentTo(to: JCamelMessage): Unit = { to.setBody(this.body) for ((name, value) ← this.headers) to.getHeaders.put(name, value.asInstanceOf[AnyRef]) } @@ -145,8 +145,7 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) { * Java API * */ - def getBodyAs[T](clazz: Class[T], camelContext: CamelContext): T = - camelContext.getTypeConverter.mandatoryConvertTo[T](clazz, body) + def getBodyAs[T](clazz: Class[T], camelContext: CamelContext): T = camelContext.getTypeConverter.mandatoryConvertTo[T](clazz, body) /** * Creates a CamelMessage with current body converted to type T. @@ -184,7 +183,7 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) { *

* Java API */ - def getHeaderAs[T](name: String, clazz: Class[T], camelContext: CamelContext) = headerAs[T](name)(Manifest.classType(clazz), camelContext).get + def getHeaderAs[T](name: String, clazz: Class[T], camelContext: CamelContext): T = headerAs[T](name)(Manifest.classType(clazz), camelContext).get } @@ -201,7 +200,7 @@ object CamelMessage { * so that it can be correlated with an asynchronous response. Messages send to Consumer * actors have this header already set. */ - val MessageExchangeId = "MessageExchangeId".intern + val MessageExchangeId = "MessageExchangeId".intern //Deliberately without type ascription to make it a constant /** * Creates a canonical form of the given message msg. If msg of type @@ -244,5 +243,7 @@ case object Ack { * message or Exchange.getOut message, depending on the exchange pattern. * */ -class AkkaCamelException private[akka] (cause: Throwable, val headers: Map[String, Any] = Map.empty) - extends AkkaException(cause.getMessage, cause) +class AkkaCamelException private[akka] (cause: Throwable, val headers: Map[String, Any]) + extends AkkaException(cause.getMessage, cause) { + def this(cause: Throwable) = this(cause, Map.empty) +} diff --git a/akka-camel/src/main/scala/akka/camel/Consumer.scala b/akka-camel/src/main/scala/akka/camel/Consumer.scala index 1d21ffbec7..0351ce39cb 100644 --- a/akka-camel/src/main/scala/akka/camel/Consumer.scala +++ b/akka-camel/src/main/scala/akka/camel/Consumer.scala @@ -31,7 +31,7 @@ trait ConsumerConfig { /** * How long the actor should wait for activation before it fails. */ - def activationTimeout: Duration = 10 seconds + def activationTimeout: Duration = 10 seconds // FIXME Should be configured in reference.conf /** * When endpoint is out-capable (can produce responses) replyTimeout is the maximum time @@ -39,14 +39,14 @@ trait ConsumerConfig { * This setting is used for out-capable, in-only, manually acknowledged communication. * When the blocking is set to Blocking replyTimeout is ignored. */ - def replyTimeout: Duration = 1 minute + def replyTimeout: Duration = 1 minute // FIXME Should be configured in reference.conf /** * Determines whether one-way communications between an endpoint and this consumer actor * should be auto-acknowledged or application-acknowledged. * This flag has only effect when exchange is in-only. */ - def autoack: Boolean = true + def autoack: Boolean = true // FIXME Should be configured in reference.conf /** * The route definition handler for creating a custom route to this consumer instance. diff --git a/akka-camel/src/main/scala/akka/camel/Producer.scala b/akka-camel/src/main/scala/akka/camel/Producer.scala index 33541d4611..5a7262a133 100644 --- a/akka-camel/src/main/scala/akka/camel/Producer.scala +++ b/akka-camel/src/main/scala/akka/camel/Producer.scala @@ -6,8 +6,9 @@ package akka.camel import akka.actor.Actor import internal.CamelExchangeAdapter -import org.apache.camel.{ Exchange, ExchangePattern, AsyncCallback } import akka.actor.Status.Failure +import org.apache.camel.{ Endpoint, Exchange, ExchangePattern, AsyncCallback } +import org.apache.camel.processor.SendProcessor /** * Support trait for producing messages to Camel endpoints. @@ -15,19 +16,19 @@ import akka.actor.Status.Failure * @author Martin Krasser */ trait ProducerSupport { this: Actor ⇒ - protected[this] implicit def camel = CamelExtension(context.system) + protected[this] implicit def camel = CamelExtension(context.system) // FIXME This is duplicated from Consumer, create a common base-trait? /** * camelContext implicit is useful when using advanced methods of CamelMessage. */ - protected[this] implicit def camelContext = camel.context + protected[this] implicit def camelContext = camel.context // FIXME This is duplicated from Consumer, create a common base-trait? - protected[this] lazy val (endpoint, processor) = camel.registerProducer(self, endpointUri) + protected[this] lazy val (endpoint: Endpoint, processor: SendProcessor) = camel.registerProducer(self, endpointUri) /** * CamelMessage headers to copy by default from request message to response-message. */ - private val headersToCopyDefault = Set(CamelMessage.MessageExchangeId) + private val headersToCopyDefault: Set[String] = Set(CamelMessage.MessageExchangeId) /** * If set to false (default), this producer expects a response message from the Camel endpoint. @@ -64,20 +65,21 @@ trait ProducerSupport { this: Actor ⇒ * @param pattern exchange pattern */ protected def produce(msg: Any, pattern: ExchangePattern): Unit = { - implicit def toExchangeAdapter(exchange: Exchange): CamelExchangeAdapter = new CamelExchangeAdapter(exchange) + // Need copies of sender reference here since the callback could be done + // later by another thread. + val producer = self + val originalSender = sender val cmsg = CamelMessage.canonicalize(msg) - val exchange = endpoint.createExchange(pattern) - exchange.setRequest(cmsg) - processor.process(exchange, new AsyncCallback { - val producer = self - // Need copies of sender reference here since the callback could be done - // later by another thread. - val originalSender = sender + val xchg = new CamelExchangeAdapter(endpoint.createExchange(pattern)) + + xchg.setRequest(cmsg) + + processor.process(xchg.exchange, new AsyncCallback { // Ignoring doneSync, sending back async uniformly. def done(doneSync: Boolean): Unit = producer.tell( - if (exchange.isFailed) exchange.toFailureResult(cmsg.headers(headersToCopy)) - else MessageResult(exchange.toResponseMessage(cmsg.headers(headersToCopy))), originalSender) + if (xchg.exchange.isFailed) xchg.toFailureResult(cmsg.headers(headersToCopy)) + else MessageResult(xchg.toResponseMessage(cmsg.headers(headersToCopy))), originalSender) }) } @@ -94,9 +96,7 @@ trait ProducerSupport { this: Actor ⇒ val e = new AkkaCamelException(res.cause, res.headers) routeResponse(Failure(e)) throw e - case msg ⇒ - val exchangePattern = if (oneway) ExchangePattern.InOnly else ExchangePattern.InOut - produce(transformOutgoingMessage(msg), exchangePattern) + case msg ⇒ produce(transformOutgoingMessage(msg), if (oneway) ExchangePattern.InOnly else ExchangePattern.InOut) } /** @@ -134,7 +134,7 @@ trait Producer extends ProducerSupport { this: Actor ⇒ * Default implementation of Actor.receive. Any messages received by this actors * will be produced to the endpoint specified by endpointUri. */ - def receive = produce + def receive: Actor.Receive = produce } /** @@ -153,6 +153,6 @@ private case class FailureResult(cause: Throwable, headers: Map[String, Any] = M * @author Martin Krasser */ trait Oneway extends Producer { this: Actor ⇒ - override def oneway = true + override def oneway: Boolean = true } diff --git a/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala b/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala index b8c3f42a47..bdd915ff70 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala @@ -20,7 +20,7 @@ private[camel] abstract class ActivationMessage(val actor: ActorRef) * */ private[camel] object ActivationMessage { - def unapply(msg: ActivationMessage): Option[ActorRef] = Some(msg.actor) + def unapply(msg: ActivationMessage): Option[ActorRef] = Option(msg.actor) } /** diff --git a/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala b/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala index 0b93460be0..f5a87eff25 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala @@ -96,17 +96,15 @@ private[akka] final class ActivationTracker extends Actor with ActorLogging { /** * Subscribes self to messages of type ActivationMessage */ - override def preStart() { - context.system.eventStream.subscribe(self, classOf[ActivationMessage]) - } + override def preStart(): Unit = context.system.eventStream.subscribe(self, classOf[ActivationMessage]) override def receive = { case msg @ ActivationMessage(ref) ⇒ - val state = activations.getOrElseUpdate(ref, new ActivationStateMachine) - (state.receive orElse logStateWarning(ref))(msg) + (activations.getOrElseUpdate(ref, new ActivationStateMachine).receive orElse logStateWarning(ref))(msg) } - private[this] def logStateWarning(actorRef: ActorRef): Receive = { case msg ⇒ log.warning("Message [{}] not expected in current state of actor [{}]", msg, actorRef) } + private[this] def logStateWarning(actorRef: ActorRef): Receive = + { case msg ⇒ log.warning("Message [{}] not expected in current state of actor [{}]", msg, actorRef) } } /** diff --git a/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala b/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala index 1f2d80e6df..5de9eb447d 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala @@ -16,34 +16,34 @@ import akka.camel.{ FailureResult, AkkaCamelException, CamelMessage } * * @author Martin Krasser */ -private[camel] class CamelExchangeAdapter(exchange: Exchange) { +private[camel] class CamelExchangeAdapter(val exchange: Exchange) { /** * Returns the exchange id */ - def getExchangeId = exchange.getExchangeId + def getExchangeId: String = exchange.getExchangeId /** * Returns if the exchange is out capable. */ - def isOutCapable = exchange.getPattern.isOutCapable + def isOutCapable: Boolean = exchange.getPattern.isOutCapable /** * Sets Exchange.getIn from the given CamelMessage object. */ - def setRequest(msg: CamelMessage) { msg.copyContentTo(request) } + def setRequest(msg: CamelMessage): Unit = msg.copyContentTo(request) /** * Depending on the exchange pattern, sets Exchange.getIn or Exchange.getOut from the given * CamelMessage object. If the exchange is out-capable then the Exchange.getOut is set, otherwise * Exchange.getIn. */ - def setResponse(msg: CamelMessage) { msg.copyContentTo(response) } + def setResponse(msg: CamelMessage): Unit = msg.copyContentTo(response) /** * Sets Exchange.getException from the given FailureResult message. Headers of the FailureResult message * are ignored. */ - def setFailure(msg: FailureResult) { exchange.setException(msg.cause) } + def setFailure(msg: FailureResult): Unit = exchange.setException(msg.cause) /** * Creates an immutable CamelMessage object from Exchange.getIn so it can be used with Actors. @@ -120,7 +120,7 @@ private[camel] class CamelExchangeAdapter(exchange: Exchange) { */ def toResponseMessage(headers: Map[String, Any]): CamelMessage = CamelMessage.from(response, headers) - private def request = exchange.getIn + private def request: JCamelMessage = exchange.getIn private def response: JCamelMessage = ExchangeHelper.getResultMessage(exchange) diff --git a/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala b/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala index 1754bb0073..2ac35fdec2 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala @@ -2,12 +2,12 @@ package akka.camel.internal import akka.actor.ActorSystem import component.{ DurationTypeConverter, ActorComponent } -import org.apache.camel.CamelContext import org.apache.camel.impl.DefaultCamelContext import scala.Predef._ import akka.event.Logging import akka.camel.Camel import akka.util.{ NonFatal, Duration } +import org.apache.camel.{ ProducerTemplate, CamelContext } /** * For internal use only. @@ -33,14 +33,14 @@ private[camel] class DefaultCamel(val system: ActorSystem) extends Camel { ctx } - lazy val template = context.createProducerTemplate() + lazy val template: ProducerTemplate = context.createProducerTemplate() /** * Starts camel and underlying camel context and template. * Only the creator of Camel should start and stop it. * @see akka.camel.DefaultCamel#stop() */ - def start = { + def start(): this.type = { context.start() try template.start() catch { case NonFatal(e) ⇒ context.stop(); throw e } log.debug("Started CamelContext[{}] for ActorSystem[{}]", context.getName, system.name) @@ -54,9 +54,9 @@ private[camel] class DefaultCamel(val system: ActorSystem) extends Camel { * * @see akka.camel.DefaultCamel#start() */ - def shutdown() { + def shutdown(): Unit = { try context.stop() finally { - try { template.stop() } catch { case NonFatal(e) ⇒ log.debug("Swallowing non-fatal exception [{}] on stopping Camel producer template", e) } + try template.stop() catch { case NonFatal(e) ⇒ log.debug("Swallowing non-fatal exception [{}] on stopping Camel producer template", e) } } log.debug("Stopped CamelContext[{}] for ActorSystem[{}]", context.getName, system.name) } diff --git a/akka-camel/src/main/scala/akka/camel/internal/ProducerRegistry.scala b/akka-camel/src/main/scala/akka/camel/internal/ProducerRegistry.scala index 03d130efe2..d338dbfdea 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ProducerRegistry.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ProducerRegistry.scala @@ -11,6 +11,8 @@ import akka.util.NonFatal * Watches the end of life of Producers. * Removes a Producer from the ProducerRegistry when it is Terminated, * which in turn stops the SendProcessor. + * + * INTERNAL API */ private class ProducerWatcher(registry: ProducerRegistry) extends Actor { override def receive = { @@ -19,6 +21,9 @@ private class ProducerWatcher(registry: ProducerRegistry) extends Actor { } } +/** + * INTERNAL API + */ private case class RegisterProducer(actorRef: ActorRef) /** @@ -27,14 +32,11 @@ private case class RegisterProducer(actorRef: ActorRef) * Every Producer needs an Endpoint and a SendProcessor * to produce messages over an Exchange. */ -private[camel] trait ProducerRegistry { - this: Camel ⇒ +private[camel] trait ProducerRegistry { this: Camel ⇒ private val camelObjects = new ConcurrentHashMap[ActorRef, (Endpoint, SendProcessor)]() - private val watcher = system.actorOf(Props(new ProducerWatcher(this))) + private val watcher = system.actorOf(Props(new ProducerWatcher(this))) //FIXME should this really be top level? - private def registerWatch(actorRef: ActorRef) { - watcher ! RegisterProducer(actorRef) - } + private def registerWatch(actorRef: ActorRef): Unit = watcher ! RegisterProducer(actorRef) /** * For internal use only. @@ -77,7 +79,7 @@ private[camel] trait ProducerRegistry { case NonFatal(e) ⇒ { system.eventStream.publish(EndpointFailedToActivate(actorRef, e)) // can't return null to the producer actor, so blow up actor in initialization. - throw e + throw e //FIXME I'm not a huge fan of log-rethrow, either log or rethrow } } } diff --git a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala index 7ec5919dc9..a8d7a59b61 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala @@ -35,10 +35,8 @@ private[camel] class ActorComponent(camel: Camel) extends DefaultComponent { /** * @see org.apache.camel.Component */ - def createEndpoint(uri: String, remaining: String, parameters: JMap[String, Object]): ActorEndpoint = { - val path = ActorEndpointPath.fromCamelPath(remaining) - new ActorEndpoint(uri, this, path, camel) - } + def createEndpoint(uri: String, remaining: String, parameters: JMap[String, Object]): ActorEndpoint = + new ActorEndpoint(uri, this, ActorEndpointPath.fromCamelPath(remaining), camel) } /** @@ -92,7 +90,7 @@ private[camel] class ActorEndpoint(uri: String, private[camel] trait ActorEndpointConfig { def path: ActorEndpointPath - @BeanProperty var replyTimeout: Duration = 1 minute + @BeanProperty var replyTimeout: Duration = 1 minute // FIXME default should be in config, not code /** * Whether to auto-acknowledge one-way message exchanges with (untyped) actors. This is @@ -117,7 +115,7 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex * Calls the asynchronous version of the method and waits for the result (blocking). * @param exchange the exchange to process */ - def process(exchange: Exchange) { processExchangeAdapter(new CamelExchangeAdapter(exchange)) } + def process(exchange: Exchange): Unit = processExchangeAdapter(new CamelExchangeAdapter(exchange)) /** * Processes the message exchange. the caller supports having the exchange asynchronously processed. @@ -129,13 +127,15 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex * The callback should therefore be careful of starting recursive loop. * @return (doneSync) true to continue execute synchronously, false to continue being executed asynchronously */ - def process(exchange: Exchange, callback: AsyncCallback): Boolean = { processExchangeAdapter(new CamelExchangeAdapter(exchange), callback) } + def process(exchange: Exchange, callback: AsyncCallback): Boolean = processExchangeAdapter(new CamelExchangeAdapter(exchange), callback) /** * For internal use only. Processes the [[akka.camel.internal.CamelExchangeAdapter]] * @param exchange the [[akka.camel.internal.CamelExchangeAdapter]] + * + * WARNING UNBOUNDED BLOCKING AWAITS */ - private[camel] def processExchangeAdapter(exchange: CamelExchangeAdapter) { + private[camel] def processExchangeAdapter(exchange: CamelExchangeAdapter): Unit = { val isDone = new CountDownLatch(1) processExchangeAdapter(exchange, new AsyncCallback { def done(doneSync: Boolean) { isDone.countDown() } }) isDone.await() // this should never wait forever as the process(exchange, callback) method guarantees that. @@ -151,10 +151,10 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex private[camel] def processExchangeAdapter(exchange: CamelExchangeAdapter, callback: AsyncCallback): Boolean = { // these notify methods are just a syntax sugar - def notifyDoneSynchronously[A](a: A = null) = callback.done(true) - def notifyDoneAsynchronously[A](a: A = null) = callback.done(false) + def notifyDoneSynchronously[A](a: A = null): Unit = callback.done(true) + def notifyDoneAsynchronously[A](a: A = null): Unit = callback.done(false) - def message = messageFor(exchange) + def message: CamelMessage = messageFor(exchange) if (exchange.isOutCapable) { //InOut sendAsync(message, onComplete = forwardResponseTo(exchange) andThen notifyDoneAsynchronously) @@ -186,39 +186,29 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex private def sendAsync(message: CamelMessage, onComplete: PartialFunction[Either[Throwable, Any], Unit]): Boolean = { try { - val actor = actorFor(endpoint.path) - val future = actor.ask(message)(new Timeout(endpoint.replyTimeout)) - future.onComplete(onComplete) + actorFor(endpoint.path).ask(message)(Timeout(endpoint.replyTimeout)).onComplete(onComplete) } catch { case NonFatal(e) ⇒ onComplete(Left(e)) } false // Done async } - private def fireAndForget(message: CamelMessage, exchange: CamelExchangeAdapter) { - try { - actorFor(endpoint.path) ! message - } catch { - case e ⇒ exchange.setFailure(new FailureResult(e)) - } - } + private def fireAndForget(message: CamelMessage, exchange: CamelExchangeAdapter): Unit = + try { actorFor(endpoint.path) ! message } catch { case NonFatal(e) ⇒ exchange.setFailure(new FailureResult(e)) } private[this] def actorFor(path: ActorEndpointPath): ActorRef = path.findActorIn(camel.system) getOrElse (throw new ActorNotRegisteredException(path.actorPath)) private[this] def messageFor(exchange: CamelExchangeAdapter) = exchange.toRequestMessage(Map(CamelMessage.MessageExchangeId -> exchange.getExchangeId)) - } /** * For internal use only. Converts Strings to [[akka.util.Duration]]s */ private[camel] object DurationTypeConverter extends TypeConverter { - def convertTo[T](`type`: Class[T], value: AnyRef) = { - Duration(value.toString).asInstanceOf[T] - } - def convertTo[T](`type`: Class[T], exchange: Exchange, value: AnyRef) = convertTo(`type`, value) + def convertTo[T](`type`: Class[T], value: AnyRef) = Duration(value.toString).asInstanceOf[T] //FIXME WTF + def convertTo[T](`type`: Class[T], exchange: Exchange, value: AnyRef): T = convertTo(`type`, value) def mandatoryConvertTo[T](`type`: Class[T], value: AnyRef) = convertTo(`type`, value) def mandatoryConvertTo[T](`type`: Class[T], exchange: Exchange, value: AnyRef) = convertTo(`type`, value) def toString(duration: Duration) = duration.toNanos + " nanos" @@ -243,15 +233,15 @@ private[camel] case class ActorEndpointPath private (actorPath: String) { * For internal use only. Companion of `ActorEndpointPath` */ private[camel] object ActorEndpointPath { - private val consumerConfig = new ConsumerConfig {} + private val consumerConfig: ConsumerConfig = new ConsumerConfig {} - def apply(actorRef: ActorRef) = new ActorEndpointPath(actorRef.path.toString) + def apply(actorRef: ActorRef): ActorEndpointPath = new ActorEndpointPath(actorRef.path.toString) /** * Creates an [[akka.camel.internal.component.ActorEndpointPath]] from the remaining part of the endpoint URI (the part after the scheme, without the parameters of the URI). * Expects the remaining part of the URI (the actor path) in a format: path:%s */ - def fromCamelPath(camelPath: String) = camelPath match { + def fromCamelPath(camelPath: String): ActorEndpointPath = camelPath match { case id if id startsWith "path:" ⇒ new ActorEndpointPath(id substring 5) case _ ⇒ throw new IllegalArgumentException("Invalid path: [%s] - should be path:" format camelPath) } diff --git a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumer.scala b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumer.scala index 56f11831d0..a4671583bb 100644 --- a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumer.scala +++ b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumer.scala @@ -13,7 +13,7 @@ import org.apache.camel.{ ProducerTemplate, CamelContext } * class is meant to be used from Java. */ abstract class UntypedConsumerActor extends UntypedActor with Consumer { - final def endpointUri = getEndpointUri + final def endpointUri: String = getEndpointUri /** * Returns the Camel endpoint URI to consume messages from. diff --git a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala index c4d0a9c1a0..f44daf0725 100644 --- a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala +++ b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala @@ -40,16 +40,14 @@ abstract class UntypedProducerActor extends UntypedActor with ProducerSupport { final override def transformResponse(msg: Any): AnyRef = onTransformResponse(msg.asInstanceOf[AnyRef]) final override def routeResponse(msg: Any): Unit = onRouteResponse(msg.asInstanceOf[AnyRef]) - final override def endpointUri = getEndpointUri + final override def endpointUri: String = getEndpointUri - final override def oneway = isOneway + final override def oneway: Boolean = isOneway /** * Default implementation of UntypedActor.onReceive */ - def onReceive(message: Any) { - produce(message) - } + def onReceive(message: Any): Unit = produce(message) /** * Returns the Camel endpoint URI to produce messages to. @@ -61,7 +59,7 @@ abstract class UntypedProducerActor extends UntypedActor with ProducerSupport { * If set to true, this producer communicates with the Camel endpoint with an in-only message * exchange pattern (fire and forget). */ - def isOneway() = super.oneway + def isOneway(): Boolean = super.oneway /** * Returns the CamelContext. diff --git a/akka-camel/src/main/scala/akka/package.scala b/akka-camel/src/main/scala/akka/package.scala index 436d2fc1b3..10382d96ee 100644 --- a/akka-camel/src/main/scala/akka/package.scala +++ b/akka-camel/src/main/scala/akka/package.scala @@ -7,5 +7,6 @@ package akka import org.apache.camel.model.ProcessorDefinition package object camel { + //TODO Why do I exist? implicit def toActorRouteDefinition(definition: ProcessorDefinition[_]) = new ActorRouteDefinition(definition) } \ No newline at end of file From 975d73dea642df31592c54381175668658183520 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 23 May 2012 15:46:29 +0200 Subject: [PATCH 106/538] Adding some docs to the Camel package object --- akka-camel/src/main/scala/akka/package.scala | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/akka-camel/src/main/scala/akka/package.scala b/akka-camel/src/main/scala/akka/package.scala index 10382d96ee..d3e60ae24f 100644 --- a/akka-camel/src/main/scala/akka/package.scala +++ b/akka-camel/src/main/scala/akka/package.scala @@ -7,6 +7,12 @@ package akka import org.apache.camel.model.ProcessorDefinition package object camel { - //TODO Why do I exist? + /** + * To allow using Actors with the Camel Route DSL: + * + * {{{ + * from("file://data/input/CamelConsumer").to(actor) + * }}} + */ implicit def toActorRouteDefinition(definition: ProcessorDefinition[_]) = new ActorRouteDefinition(definition) } \ No newline at end of file From 96c5c9392b0baf9ed9feefd9c885eb0dd6e4649e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 23 May 2012 16:18:01 +0200 Subject: [PATCH 107/538] Preparing the FileBasedMailbox for binary compatibility --- .../akka/actor/mailbox/FileBasedMailbox.scala | 31 ++++++------------- .../mailbox/FileBasedMailboxSettings.scala | 28 ++++++++--------- 2 files changed, 23 insertions(+), 36 deletions(-) diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala index e57bfd57d2..c595fdcdd3 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala @@ -23,47 +23,37 @@ class FileBasedMailboxType(systemSettings: ActorSystem.Settings, config: Config) } class FileBasedMessageQueue(_owner: ActorContext, val settings: FileBasedMailboxSettings) extends DurableMessageQueue(_owner) with DurableMessageSerialization { - - val log = Logging(system, "FileBasedMessageQueue") - - val queuePath = settings.QueuePath + // TODO Is it reasonable for all FileBasedMailboxes to have their own logger? + private val log = Logging(system, "FileBasedMessageQueue") private val queue = try { - (new java.io.File(queuePath)) match { + (new java.io.File(settings.QueuePath)) match { case dir if dir.exists && !dir.isDirectory ⇒ throw new IllegalStateException("Path already occupied by non-directory " + dir) case dir if !dir.exists ⇒ if (!dir.mkdirs() && !dir.isDirectory) throw new IllegalStateException("Creation of directory failed " + dir) case _ ⇒ //All good } - val queue = new filequeue.PersistentQueue(queuePath, name, settings, log) + val queue = new filequeue.PersistentQueue(settings.QueuePath, name, settings, log) queue.setup // replays journal queue.discardExpired queue } catch { - case e: Exception ⇒ + case NonFatal(e) ⇒ log.error(e, "Could not create a file-based mailbox") throw e } - def enqueue(receiver: ActorRef, envelope: Envelope) { - queue.add(serialize(envelope)) - } + def enqueue(receiver: ActorRef, envelope: Envelope): Unit = queue.add(serialize(envelope)) def dequeue(): Envelope = try { - val item = queue.remove - if (item.isDefined) { - queue.confirmRemove(item.get.xid) - deserialize(item.get.data) - } else null + queue.remove.map(item ⇒ { queue.confirmRemove(item.xid); deserialize(item.data) }).orNull } catch { - case e: java.util.NoSuchElementException ⇒ null - case e: Exception ⇒ + case _: java.util.NoSuchElementException ⇒ null + case NonFatal(e) ⇒ log.error(e, "Couldn't dequeue from file-based mailbox") throw e } - def numberOfMessages: Int = { - queue.length.toInt - } + def numberOfMessages: Int = queue.length.toInt def hasMessages: Boolean = numberOfMessages > 0 @@ -78,5 +68,4 @@ class FileBasedMessageQueue(_owner: ActorContext, val settings: FileBasedMailbox } def cleanUp(owner: ActorContext, deadLetters: MessageQueue): Unit = () - } diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala index 6511bf9e00..87dc25840f 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala @@ -11,24 +11,22 @@ import akka.actor.ActorSystem class FileBasedMailboxSettings(val systemSettings: ActorSystem.Settings, val userConfig: Config) extends DurableMailboxSettings { - def name = "file-based" + def name: String = "file-based" val config = initialize - import config._ - val QueuePath = getString("directory-path") - - val MaxItems = getInt("max-items") - val MaxSize = getBytes("max-size") - val MaxItemSize = getBytes("max-item-size") - val MaxAge = Duration(getMilliseconds("max-age"), MILLISECONDS) - val MaxJournalSize = getBytes("max-journal-size") - val MaxMemorySize = getBytes("max-memory-size") - val MaxJournalOverflow = getInt("max-journal-overflow") - val MaxJournalSizeAbsolute = getBytes("max-journal-size-absolute") - val DiscardOldWhenFull = getBoolean("discard-old-when-full") - val KeepJournal = getBoolean("keep-journal") - val SyncJournal = getBoolean("sync-journal") + val QueuePath: String = getString("directory-path") + val MaxItems: Int = getInt("max-items") + val MaxSize: Long = getBytes("max-size") + val MaxItemSize: Long = getBytes("max-item-size") + val MaxAge: Duration = Duration(getMilliseconds("max-age"), MILLISECONDS) + val MaxJournalSize: Long = getBytes("max-journal-size") + val MaxMemorySize: Long = getBytes("max-memory-size") + val MaxJournalOverflow: Int = getInt("max-journal-overflow") + val MaxJournalSizeAbsolute: Long = getBytes("max-journal-size-absolute") + val DiscardOldWhenFull: Boolean = getBoolean("discard-old-when-full") + val KeepJournal: Boolean = getBoolean("keep-journal") + val SyncJournal: Boolean = getBoolean("sync-journal") } \ No newline at end of file From ee4a7ce76a4cf245c2f86ef33951568fa7f5be33 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 23 May 2012 16:27:30 +0200 Subject: [PATCH 108/538] Preparing mailbox-commons for bin compat --- .../akka/actor/mailbox/DurableMailbox.scala | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala b/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala index 0744215bae..b21878d00e 100644 --- a/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala +++ b/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala @@ -3,12 +3,11 @@ */ package akka.actor.mailbox -import akka.actor.{ ActorContext, ActorRef, ExtendedActorSystem } import akka.dispatch.{ Envelope, MessageQueue } import akka.remote.MessageSerializer import akka.remote.RemoteProtocol.{ ActorRefProtocol, RemoteMessageProtocol } import com.typesafe.config.Config -import akka.actor.ActorSystem +import akka.actor._ private[akka] object DurableExecutableMailboxConfig { val Name = "[\\.\\/\\$\\s]".r @@ -18,14 +17,21 @@ abstract class DurableMessageQueue(val owner: ActorContext) extends MessageQueue import DurableExecutableMailboxConfig._ def system: ExtendedActorSystem = owner.system.asInstanceOf[ExtendedActorSystem] - def ownerPath = owner.self.path - val ownerPathString = ownerPath.elements.mkString("/") - val name = "mailbox_" + Name.replaceAllIn(ownerPathString, "_") + def ownerPath: ActorPath = owner.self.path + val ownerPathString: String = ownerPath.elements.mkString("/") + val name: String = "mailbox_" + Name.replaceAllIn(ownerPathString, "_") } +/** + * DurableMessageSerialization can be mixed into a DurableMessageQueue and adds functionality + * to serialize and deserialize Envelopes (messages) + */ trait DurableMessageSerialization { this: DurableMessageQueue ⇒ + /** + * Serializes the given Envelope into an Array of Bytes using an efficient serialization/deserialization strategy + */ def serialize(durableMessage: Envelope): Array[Byte] = { // It's alright to use ref.path.toString here @@ -42,6 +48,10 @@ trait DurableMessageSerialization { this: DurableMessageQueue ⇒ builder.build.toByteArray } + /** + * Deserializes an array of Bytes that were serialized using the DurableMessageSerialization.serialize method, + * into an Envelope. + */ def deserialize(bytes: Array[Byte]): Envelope = { def deserializeActorRef(refProtocol: ActorRefProtocol): ActorRef = system.actorFor(refProtocol.getPath) @@ -50,7 +60,7 @@ trait DurableMessageSerialization { this: DurableMessageQueue ⇒ val message = MessageSerializer.deserialize(system, durableMessage.getMessage) val sender = deserializeActorRef(durableMessage.getSender) - new Envelope(message, sender)(system) + Envelope(message, sender)(system) } } From 25ff921b42f607a00de842017245574cfabbcef8 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 23 May 2012 16:31:13 +0200 Subject: [PATCH 109/538] Making most of the innards of microkernel.Main private --- akka-kernel/src/main/scala/akka/kernel/Main.scala | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/akka-kernel/src/main/scala/akka/kernel/Main.scala b/akka-kernel/src/main/scala/akka/kernel/Main.scala index ead2c28121..97ff625ab8 100644 --- a/akka-kernel/src/main/scala/akka/kernel/Main.scala +++ b/akka-kernel/src/main/scala/akka/kernel/Main.scala @@ -59,9 +59,9 @@ trait Bootable { * Main class for running the microkernel. */ object Main { - val quiet = getBoolean("akka.kernel.quiet") + private val quiet = getBoolean("akka.kernel.quiet") - def log(s: String) = if (!quiet) println(s) + private def log(s: String) = if (!quiet) println(s) def main(args: Array[String]) = { if (args.isEmpty) { @@ -90,7 +90,7 @@ object Main { log("Successfully started Akka") } - def createClassLoader(): ClassLoader = { + private def createClassLoader(): ClassLoader = { if (ActorSystem.GlobalHome.isDefined) { val home = ActorSystem.GlobalHome.get val deploy = new File(home, "deploy") @@ -106,7 +106,7 @@ object Main { } } - def loadDeployJars(deploy: File): ClassLoader = { + private def loadDeployJars(deploy: File): ClassLoader = { val jars = deploy.listFiles.filter(_.getName.endsWith(".jar")) val nestedJars = jars flatMap { jar ⇒ @@ -122,7 +122,7 @@ object Main { new URLClassLoader(urls, Thread.currentThread.getContextClassLoader) } - def addShutdownHook(bootables: Seq[Bootable]): Unit = { + private def addShutdownHook(bootables: Seq[Bootable]): Unit = { Runtime.getRuntime.addShutdownHook(new Thread(new Runnable { def run = { log("") @@ -138,7 +138,7 @@ object Main { })) } - def banner = """ + private def banner = """ ============================================================================== ZZ: From 3ab02e95199cb3b386e8f25a9b50138500fefc8d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 23 May 2012 16:35:42 +0200 Subject: [PATCH 110/538] Convert NodeStartupSpec to MultiNodeSpec. See #1948 --- .../scala/akka/cluster/NodeStartupSpec.scala | 87 +++++++++++++++++++ .../scala/akka/cluster/NodeStartupSpec.scala | 84 ------------------ .../testconductor/TestConductorSpec.scala | 19 ++-- .../akka/remote/testkit/MultiNodeSpec.scala | 19 ++++ project/AkkaBuild.scala | 2 +- 5 files changed, 113 insertions(+), 98 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala delete mode 100644 akka-cluster/src/test/scala/akka/cluster/NodeStartupSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala new file mode 100644 index 0000000000..a0e0e19943 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -0,0 +1,87 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory + +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ + +object NodeStartupMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + # FIXME get rid of this hardcoded host:port + akka.cluster.node-to-join = "akka://MultiNodeSpec@localhost:2601" + """))) + + nodeConfig(first, ConfigFactory.parseString(""" + # FIXME get rid of this hardcoded port + akka.remote.netty.port=2601 + """)) + +} + +class NodeStartupMultiJvmNode1 extends NodeStartupSpec +class NodeStartupMultiJvmNode2 extends NodeStartupSpec + +class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with ImplicitSender { + import NodeStartupMultiJvmSpec._ + + override def initialParticipants = 2 + + var firstNode: Cluster = _ + + runOn(first) { + firstNode = Cluster(system) + } + + "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { + + "be a singleton cluster when started up" taggedAs LongRunningTest in { + runOn(first) { + awaitCond(firstNode.isSingletonCluster) + } + + testConductor.enter("done") + } + + "be in 'Joining' phase when started up" taggedAs LongRunningTest in { + runOn(first) { + val members = firstNode.latestGossip.members + members.size must be(1) + val firstAddress = testConductor.getAddressFor(first).await + val joiningMember = members find (_.address == firstAddress) + joiningMember must not be (None) + joiningMember.get.status must be(MemberStatus.Joining) + } + + testConductor.enter("done") + } + } + + "A second cluster node with a 'node-to-join' config defined" must { + "join the other node cluster when sending a Join command" taggedAs LongRunningTest in { + runOn(second) { + // start cluster on second node, and join + Cluster(system) + } + + runOn(first) { + val secondAddress = testConductor.getAddressFor(second).await + awaitCond { + firstNode.latestGossip.members.exists { member ⇒ + member.address == secondAddress && member.status == MemberStatus.Up + } + } + firstNode.latestGossip.members.size must be(2) + } + + testConductor.enter("done") + } + } + +} diff --git a/akka-cluster/src/test/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/test/scala/akka/cluster/NodeStartupSpec.scala deleted file mode 100644 index 711a0552b4..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/NodeStartupSpec.scala +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import java.net.InetSocketAddress - -import akka.testkit._ -import akka.dispatch._ -import akka.actor._ -import akka.remote._ -import akka.util.duration._ - -import com.typesafe.config._ - -class NodeStartupSpec extends ClusterSpec with ImplicitSender { - val portPrefix = 8 - - var node0: Cluster = _ - var node1: Cluster = _ - var system0: ActorSystemImpl = _ - var system1: ActorSystemImpl = _ - - try { - "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { - system0 = ActorSystem("system0", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port=%d550 - }""".format(portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote0 = system0.provider.asInstanceOf[RemoteActorRefProvider] - node0 = Cluster(system0) - - "be a singleton cluster when started up" taggedAs LongRunningTest in { - Thread.sleep(1.seconds.dilated.toMillis) - node0.isSingletonCluster must be(true) - } - - "be in 'Joining' phase when started up" taggedAs LongRunningTest in { - val members = node0.latestGossip.members - val joiningMember = members find (_.address.port.get == 550.withPortPrefix) - joiningMember must be('defined) - joiningMember.get.status must be(MemberStatus.Joining) - } - } - - "A second cluster node with a 'node-to-join' config defined" must { - "join the other node cluster when sending a Join command" taggedAs LongRunningTest in { - system1 = ActorSystem("system1", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port=%d551 - cluster.node-to-join = "akka://system0@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote1 = system1.provider.asInstanceOf[RemoteActorRefProvider] - node1 = Cluster(system1) - - Thread.sleep(10.seconds.dilated.toMillis) // give enough time for node1 to JOIN node0 and leader to move him to UP - val members = node0.latestGossip.members - val joiningMember = members find (_.address.port.get == 551.withPortPrefix) - joiningMember must be('defined) - joiningMember.get.status must be(MemberStatus.Up) - } - } - } catch { - case e: Exception ⇒ - e.printStackTrace - fail(e.toString) - } - - override def atTermination() { - if (node0 ne null) node0.shutdown() - if (system0 ne null) system0.shutdown() - - if (node1 ne null) node1.shutdown() - if (system1 ne null) system1.shutdown() - } -} diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index e311fa0023..5ff19a806b 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -1,3 +1,6 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ package akka.remote.testconductor import akka.remote.AkkaRemoteSpec @@ -16,18 +19,8 @@ import akka.remote.testkit.MultiNodeSpec import akka.remote.testkit.MultiNodeConfig object TestConductorMultiJvmSpec extends MultiNodeConfig { - commonConfig(ConfigFactory.parseString(""" - # akka.loglevel = DEBUG - akka.remote { - log-received-messages = on - log-sent-messages = on - } - akka.actor.debug { - receive = on - fsm = on - } - """)) - + commonConfig(debugConfig(on = true)) + val master = role("master") val slave = role("slave") } @@ -36,7 +29,7 @@ class TestConductorMultiJvmNode1 extends TestConductorSpec class TestConductorMultiJvmNode2 extends TestConductorSpec class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec) with ImplicitSender { - + import TestConductorMultiJvmSpec._ def initialParticipants = 2 diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 92e65247fb..3822a1f529 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -37,6 +37,25 @@ abstract class MultiNodeConfig { */ def nodeConfig(role: RoleName, config: Config): Unit = _nodeConf += role -> config + /** + * Include for verbose debug logging + * @param on when `true` debug Config is returned, otherwise empty Config + */ + def debugConfig(on: Boolean): Config = + if (on) + ConfigFactory.parseString(""" + akka.loglevel = DEBUG + akka.remote { + log-received-messages = on + log-sent-messages = on + } + akka.actor.debug { + receive = on + fsm = on + } + """) + else ConfigFactory.empty + /** * Construct a RoleName and return it, to be used as an identifier in the * test. Registration of a role name creates a role which then needs to be diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 206e32e1f3..f884894d52 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -114,7 +114,7 @@ object AkkaBuild extends Build { lazy val cluster = Project( id = "akka-cluster", base = file("akka-cluster"), - dependencies = Seq(remote, remote % "test->test", testkit % "test->test"), + dependencies = Seq(remote, remoteTests % "compile;test->test;multi-jvm->multi-jvm", testkit % "test->test"), settings = defaultSettings ++ multiJvmSettings ++ Seq( libraryDependencies ++= Dependencies.cluster, // disable parallel tests From 3e416a4b2c2b9a3dcc3d8723f62b38cf95142166 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 23 May 2012 17:39:34 +0200 Subject: [PATCH 111/538] Add convergence verification to NodeStartupSpec. See #1948 --- .../src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index a0e0e19943..10b5945ee5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -67,7 +67,8 @@ class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with Implic "join the other node cluster when sending a Join command" taggedAs LongRunningTest in { runOn(second) { // start cluster on second node, and join - Cluster(system) + val secondNode = Cluster(system) + awaitCond(secondNode.convergence.isDefined) } runOn(first) { @@ -78,6 +79,7 @@ class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with Implic } } firstNode.latestGossip.members.size must be(2) + awaitCond(firstNode.convergence.isDefined) } testConductor.enter("done") From a934d7f29d92c2ff0268b20c1c07e0b519e213e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Wed, 23 May 2012 22:52:43 +0200 Subject: [PATCH 112/538] Make sure normal tests are run even if multi-jvm tests fail --- project/AkkaBuild.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 206e32e1f3..62c81bd3eb 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -89,7 +89,7 @@ object AkkaBuild extends Build { jvmOptions in MultiJvm := { if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil }, - test in Test <<= (test in Test) dependsOn (test in MultiJvm) + test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } ) ) configs (MultiJvm) @@ -107,7 +107,7 @@ object AkkaBuild extends Build { jvmOptions in MultiJvm := { if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil }, - test in Test <<= (test in Test) dependsOn (test in MultiJvm) + test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } ) ) configs (MultiJvm) @@ -126,7 +126,7 @@ object AkkaBuild extends Build { jvmOptions in MultiJvm := { if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil }, - test in Test <<= (test in Test) dependsOn (test in MultiJvm) + test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } ) ) configs (MultiJvm) From b9bfcaf0563896b1b6c212ae3219a7f359fad9c0 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 23 May 2012 23:59:55 +0200 Subject: [PATCH 113/538] Fixing the DurationConverterTest and the DurationConverter to conform to Camel Spec --- .../internal/component/ActorComponent.scala | 17 +++++++++++---- .../component/DurationConverterTest.scala | 21 ++++++++++++------- 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala index a8d7a59b61..d52f74f2f6 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala @@ -207,11 +207,20 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex * For internal use only. Converts Strings to [[akka.util.Duration]]s */ private[camel] object DurationTypeConverter extends TypeConverter { - def convertTo[T](`type`: Class[T], value: AnyRef) = Duration(value.toString).asInstanceOf[T] //FIXME WTF + override def convertTo[T](`type`: Class[T], value: AnyRef): T = `type`.cast(try { + val d = Duration(value.toString) + if (`type`.isInstance(d)) d else null + } catch { + case NonFatal(_) ⇒ null + }) + def convertTo[T](`type`: Class[T], exchange: Exchange, value: AnyRef): T = convertTo(`type`, value) - def mandatoryConvertTo[T](`type`: Class[T], value: AnyRef) = convertTo(`type`, value) - def mandatoryConvertTo[T](`type`: Class[T], exchange: Exchange, value: AnyRef) = convertTo(`type`, value) - def toString(duration: Duration) = duration.toNanos + " nanos" + def mandatoryConvertTo[T](`type`: Class[T], value: AnyRef): T = convertTo(`type`, value) match { + case null ⇒ throw new NoTypeConversionAvailableException(value, `type`) + case some ⇒ some + } + def mandatoryConvertTo[T](`type`: Class[T], exchange: Exchange, value: AnyRef): T = mandatoryConvertTo(`type`, value) + def toString(duration: Duration): String = duration.toNanos + " nanos" } /** diff --git a/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala b/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala index 3787a9f46f..53729a0b6f 100644 --- a/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala +++ b/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala @@ -8,8 +8,9 @@ import org.scalatest.matchers.MustMatchers import akka.util.duration._ import akka.util.Duration import org.scalatest.WordSpec +import org.apache.camel.NoTypeConversionAvailableException -class DurationConverterTest extends WordSpec with MustMatchers { +class DurationConverterSpec extends WordSpec with MustMatchers { import DurationTypeConverter._ "DurationTypeConverter must convert '10 nanos'" in { @@ -21,15 +22,19 @@ class DurationConverterTest extends WordSpec with MustMatchers { } "DurationTypeConverter must throw if invalid format" in { - intercept[Exception] { - convertTo(classOf[Duration], "abc nanos") must be(10 nanos) - } + convertTo(classOf[Duration], "abc nanos") must be === null + + intercept[NoTypeConversionAvailableException] { + mandatoryConvertTo(classOf[Duration], "abc nanos") must be(10 nanos) + }.getValue must be === "abc nanos" } - "DurationTypeConverter must throw if doesn't end with nanos" in { - intercept[Exception] { - convertTo(classOf[Duration], "10233") must be(10 nanos) - } + "DurationTypeConverter must throw if doesn't end with time unit" in { + convertTo(classOf[Duration], "10233") must be === null + + intercept[NoTypeConversionAvailableException] { + mandatoryConvertTo(classOf[Duration], "10233") must be(10 nanos) + }.getValue must be === "10233" } } From 2198462ed2b4afadaf0514196c0120bb1d8d491c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 24 May 2012 01:16:50 +0200 Subject: [PATCH 114/538] Binary compat for SLF4J module --- .../akka/event/slf4j/Slf4jEventHandler.scala | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jEventHandler.scala b/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jEventHandler.scala index 966f57b938..9e2fefffd9 100644 --- a/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jEventHandler.scala +++ b/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jEventHandler.scala @@ -18,12 +18,29 @@ trait SLF4JLogging { lazy val log = Logger(this.getClass.getName) } +/** + * Logger is a factory for obtaining SLF4J-Loggers + */ object Logger { + /** + * @param logger - which logger + * @return a Logger that corresponds for the given logger name + */ def apply(logger: String): SLFLogger = SLFLoggerFactory getLogger logger + + /** + * @param logClass - the class to log for + * @param logSource - the textual representation of the source of this log stream + * @return a Logger for the specified parameters + */ def apply(logClass: Class[_], logSource: String): SLFLogger = logClass match { case c if c == classOf[DummyClassForStringSources] ⇒ apply(logSource) case _ ⇒ SLFLoggerFactory getLogger logClass } + + /** + * Returns the SLF4J Root Logger + */ def root: SLFLogger = apply(SLFLogger.ROOT_LOGGER_NAME) } From 568c02d1580b1ee707bacbb2f49fbfbba5710ffb Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 24 May 2012 01:38:41 +0200 Subject: [PATCH 115/538] Preparing transactors for binary compat --- .../scala/akka/transactor/Coordinated.scala | 27 ++++++++++++------- .../scala/akka/transactor/Transactor.scala | 10 ++++--- .../akka/transactor/TransactorExtension.scala | 4 +-- 3 files changed, 26 insertions(+), 15 deletions(-) diff --git a/akka-transactor/src/main/scala/akka/transactor/Coordinated.scala b/akka-transactor/src/main/scala/akka/transactor/Coordinated.scala index 2463f0e436..792824be24 100644 --- a/akka-transactor/src/main/scala/akka/transactor/Coordinated.scala +++ b/akka-transactor/src/main/scala/akka/transactor/Coordinated.scala @@ -12,19 +12,29 @@ import java.util.concurrent.Callable /** * Akka-specific exception for coordinated transactions. */ -class CoordinatedTransactionException(message: String, cause: Throwable = null) extends AkkaException(message, cause) { - def this(msg: String) = this(msg, null); +class CoordinatedTransactionException(message: String, cause: Throwable) extends AkkaException(message, cause) { + def this(msg: String) = this(msg, null) } /** * Coordinated transactions across actors. */ object Coordinated { - def apply(message: Any = null)(implicit timeout: Timeout) = new Coordinated(message, createInitialMember(timeout)) + /** + * Creates a new Coordinated with the given message and Timeout + * @param message - the message which will be coordinated + * @param timeout - the timeout for the coordination + * @return a new Coordinated + */ + def apply(message: Any = null)(implicit timeout: Timeout): Coordinated = + new Coordinated(message, CommitBarrier(timeout.duration.toMillis).addMember()) + + /** + * @param c - a Coordinated to be unapplied + * @return the message associated with the given Coordinated + */ def unapply(c: Coordinated): Option[Any] = Some(c.message) - - def createInitialMember(timeout: Timeout) = CommitBarrier(timeout.duration.toMillis).addMember() } /** @@ -91,16 +101,15 @@ class Coordinated(val message: Any, member: CommitBarrier.Member) { // Java API constructors - def this(message: Any, timeout: Timeout) = this(message, Coordinated.createInitialMember(timeout)) + def this(message: Any, timeout: Timeout) = this(message, CommitBarrier(timeout.duration.toMillis).addMember()) - def this(timeout: Timeout) = this(null, Coordinated.createInitialMember(timeout)) + def this(timeout: Timeout) = this(null, timeout) /** * Create a new Coordinated object and increment the number of members by one. * Use this method to ''pass on'' the coordination. */ - def apply(msg: Any): Coordinated = - new Coordinated(msg, member.commitBarrier.addMember()) + def apply(msg: Any): Coordinated = new Coordinated(msg, member.commitBarrier.addMember()) /** * Create a new Coordinated object but *do not* increment the number of members by one. diff --git a/akka-transactor/src/main/scala/akka/transactor/Transactor.scala b/akka-transactor/src/main/scala/akka/transactor/Transactor.scala index 6e390a6623..fd802e1f21 100644 --- a/akka-transactor/src/main/scala/akka/transactor/Transactor.scala +++ b/akka-transactor/src/main/scala/akka/transactor/Transactor.scala @@ -176,8 +176,10 @@ trait Transactor extends Actor { /** * Default catch-all for the different Receive methods. */ - def doNothing: Receive = new Receive { - def apply(any: Any) = {} - def isDefinedAt(any: Any) = false - } + def doNothing: Receive = EmptyReceive +} + +private[akka] object EmptyReceive extends PartialFunction[Any, Unit] { + def apply(any: Any): Unit = () + def isDefinedAt(any: Any): Boolean = false } diff --git a/akka-transactor/src/main/scala/akka/transactor/TransactorExtension.scala b/akka-transactor/src/main/scala/akka/transactor/TransactorExtension.scala index 96aea8904c..85cb8c46fd 100644 --- a/akka-transactor/src/main/scala/akka/transactor/TransactorExtension.scala +++ b/akka-transactor/src/main/scala/akka/transactor/TransactorExtension.scala @@ -15,11 +15,11 @@ import java.util.concurrent.TimeUnit.MILLISECONDS */ object TransactorExtension extends ExtensionId[TransactorSettings] with ExtensionIdProvider { override def get(system: ActorSystem): TransactorSettings = super.get(system) - override def lookup = TransactorExtension + override def lookup: TransactorExtension.type = TransactorExtension override def createExtension(system: ExtendedActorSystem): TransactorSettings = new TransactorSettings(system.settings.config) } class TransactorSettings(val config: Config) extends Extension { import config._ - val CoordinatedTimeout = Timeout(Duration(getMilliseconds("akka.transactor.coordinated-timeout"), MILLISECONDS)) + val CoordinatedTimeout: Timeout = Timeout(Duration(getMilliseconds("akka.transactor.coordinated-timeout"), MILLISECONDS)) } \ No newline at end of file From d3511d25a497e7b0df7a945ef86e18fa9a18b965 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 08:32:50 +0200 Subject: [PATCH 116/538] Placed the barrier in after instead. See #1948 --- .../scala/akka/cluster/NodeStartupSpec.scala | 31 ++++++++++--------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index 10b5945ee5..6807d7032a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -4,7 +4,7 @@ package akka.cluster import com.typesafe.config.ConfigFactory - +import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -13,43 +13,48 @@ object NodeStartupMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - # FIXME get rid of this hardcoded host:port - akka.cluster.node-to-join = "akka://MultiNodeSpec@localhost:2601" - """))) + commonConfig(debugConfig(on = false)) nodeConfig(first, ConfigFactory.parseString(""" # FIXME get rid of this hardcoded port akka.remote.netty.port=2601 """)) + nodeConfig(second, ConfigFactory.parseString(""" + # FIXME get rid of this hardcoded host:port + akka.cluster.node-to-join = "akka://MultiNodeSpec@localhost:2601" + """)) + } class NodeStartupMultiJvmNode1 extends NodeStartupSpec class NodeStartupMultiJvmNode2 extends NodeStartupSpec -class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with ImplicitSender { +class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with ImplicitSender with BeforeAndAfter { import NodeStartupMultiJvmSpec._ override def initialParticipants = 2 var firstNode: Cluster = _ + after { + testConductor.enter("after") + } + runOn(first) { firstNode = Cluster(system) } "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { - "be a singleton cluster when started up" taggedAs LongRunningTest in { + "be a singleton cluster when started up" in { runOn(first) { awaitCond(firstNode.isSingletonCluster) + firstNode.convergence must be(None) } - - testConductor.enter("done") } - "be in 'Joining' phase when started up" taggedAs LongRunningTest in { + "be in 'Joining' phase when started up" in { runOn(first) { val members = firstNode.latestGossip.members members.size must be(1) @@ -58,13 +63,11 @@ class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with Implic joiningMember must not be (None) joiningMember.get.status must be(MemberStatus.Joining) } - - testConductor.enter("done") } } "A second cluster node with a 'node-to-join' config defined" must { - "join the other node cluster when sending a Join command" taggedAs LongRunningTest in { + "join the other node cluster when sending a Join command" in { runOn(second) { // start cluster on second node, and join val secondNode = Cluster(system) @@ -81,8 +84,6 @@ class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with Implic firstNode.latestGossip.members.size must be(2) awaitCond(firstNode.convergence.isDefined) } - - testConductor.enter("done") } } From db4730978f77a0a22c8c2b1a5f8cd8d954bfddb8 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 08:51:36 +0200 Subject: [PATCH 117/538] FIXME singletonCluster should reach convergence. See #2117 --- .../src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index 6807d7032a..694d4ac57d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -50,7 +50,8 @@ class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with Implic "be a singleton cluster when started up" in { runOn(first) { awaitCond(firstNode.isSingletonCluster) - firstNode.convergence must be(None) + // FIXME #2117 singletonCluster should reach convergence + //awaitCond(firstNode.convergence.isDefined) } } From be87215fc6c9b11de736c803c9b90d9c1b8c5d23 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 10:00:35 +0200 Subject: [PATCH 118/538] Pass timefactor to multi-jvm process --- project/AkkaBuild.scala | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 9e0a32ebce..fd18e931c7 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -86,9 +86,7 @@ object AkkaBuild extends Build { (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq }, scalatestOptions in MultiJvm := Seq("-r", "org.scalatest.akka.QuietReporter"), - jvmOptions in MultiJvm := { - if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil - }, + jvmOptions in MultiJvm := defaultMultiJvmOptions, test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } ) ) configs (MultiJvm) @@ -104,9 +102,7 @@ object AkkaBuild extends Build { (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq }, scalatestOptions in MultiJvm := Seq("-r", "org.scalatest.akka.QuietReporter"), - jvmOptions in MultiJvm := { - if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil - }, + jvmOptions in MultiJvm := defaultMultiJvmOptions, test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } ) ) configs (MultiJvm) @@ -123,9 +119,7 @@ object AkkaBuild extends Build { (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq }, scalatestOptions in MultiJvm := Seq("-r", "org.scalatest.akka.QuietReporter"), - jvmOptions in MultiJvm := { - if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil - }, + jvmOptions in MultiJvm := defaultMultiJvmOptions, test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } ) ) configs (MultiJvm) @@ -304,6 +298,14 @@ object AkkaBuild extends Build { val defaultExcludedTags = Seq("timing", "long-running") + val defaultMultiJvmOptions: Seq[String] = { + (System.getProperty("akka.test.timefactor") match { + case null => Nil + case x => List("-Dakka.test.timefactor=" + x) + }) ::: + (if (getBoolean("sbt.log.noformat")) List("-Dakka.test.nocolor=true") else Nil) + } + lazy val defaultSettings = baseSettings ++ formatSettings ++ Seq( resolvers += "Typesafe Repo" at "http://repo.typesafe.com/typesafe/releases/", From e05481604734ea287c9185c1c499673f91a86d72 Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 24 May 2012 10:56:32 +0200 Subject: [PATCH 119/538] make failure injection idempotent - instead of creating local top-level actors per pipeline, just create one system actor through which everything is sent - this enables storing settings (like what to throttle how) within this actor and applying settings when connections come up later - it also gets rid of the blocking actor creation from NetworkFailureInjector, fixing the dead-lock - moved also the ServerFSMs to be children of the Controller - all actors have proper names now for easier debugging --- .../src/main/scala/akka/event/Logging.scala | 2 +- .../akka/remote/testconductor/Conductor.scala | 37 +- .../akka/remote/testconductor/Extension.scala | 4 +- .../NetworkFailureInjector.scala | 475 ++++++++++-------- .../akka/remote/testconductor/Player.scala | 16 +- .../scala/akka/remote/SimpleRemoteSpec.scala | 2 +- .../DirectRoutedRemoteActorMultiJvmSpec.scala | 2 +- .../testconductor/TestConductorSpec.scala | 2 +- .../main/scala/akka/remote/netty/Client.scala | 5 +- .../remote/netty/NettyRemoteSupport.scala | 5 + .../main/scala/akka/remote/netty/Server.scala | 16 +- 11 files changed, 325 insertions(+), 241 deletions(-) diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index 93019318dd..b044fd09ab 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -601,7 +601,7 @@ object Logging { import java.text.SimpleDateFormat import java.util.Date - val dateFormat = new SimpleDateFormat("MM/dd/yyyy HH:mm:ss.S") + val dateFormat = new SimpleDateFormat("MM/dd/yyyy HH:mm:ss.SSS") def timestamp = dateFormat.format(new Date) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index 6c26fcaae2..5e467fde19 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -27,12 +27,26 @@ import akka.actor.SupervisorStrategy import java.util.concurrent.ConcurrentHashMap import akka.actor.Status -sealed trait Direction +sealed trait Direction { + def includes(other: Direction): Boolean +} object Direction { - case object Send extends Direction - case object Receive extends Direction - case object Both extends Direction + case object Send extends Direction { + override def includes(other: Direction): Boolean = other match { + case Send ⇒ true + case _ ⇒ false + } + } + case object Receive extends Direction { + override def includes(other: Direction): Boolean = other match { + case Receive ⇒ true + case _ ⇒ false + } + } + case object Both extends Direction { + override def includes(other: Direction): Boolean = true + } } /** @@ -202,14 +216,15 @@ trait Conductor { this: TestConductorExt ⇒ * purpose is to dispatch incoming messages to the right ServerFSM actor. There is * one shared instance of this class for all connections accepted by one Controller. */ -class ConductorHandler(system: ActorSystem, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { +class ConductorHandler(_createTimeout: Timeout, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { + implicit val createTimeout = _createTimeout val clients = new ConcurrentHashMap[Channel, ActorRef]() override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { val channel = event.getChannel log.debug("connection from {}", getAddrString(channel)) - val fsm = system.actorOf(Props(new ServerFSM(controller, channel))) + val fsm: ActorRef = Await.result(controller ? Controller.CreateServerFSM(channel) mapTo, Duration.Inf) clients.put(channel, fsm) } @@ -321,6 +336,7 @@ object Controller { case class ClientDisconnected(name: RoleName) case object GetNodes case object GetSockAddr + case class CreateServerFSM(channel: Channel) case class NodeInfo(name: RoleName, addr: Address, fsm: ActorRef) } @@ -336,7 +352,7 @@ class Controller(private var initialParticipants: Int, controllerPort: InetSocke val settings = TestConductor().Settings val connection = RemoteConnection(Server, controllerPort, - new ConductorHandler(context.system, self, Logging(context.system, "ConductorHandler"))) + new ConductorHandler(settings.QueryTimeout, self, Logging(context.system, "ConductorHandler"))) /* * Supervision of the BarrierCoordinator means to catch all his bad emotions @@ -363,8 +379,15 @@ class Controller(private var initialParticipants: Int, controllerPort: InetSocke // map keeping unanswered queries for node addresses (enqueued upon GetAddress, serviced upon NodeInfo) var addrInterest = Map[RoleName, Set[ActorRef]]() + val generation = Iterator from 1 override def receive = LoggingReceive { + case CreateServerFSM(channel) ⇒ + val (ip, port) = channel.getRemoteAddress match { + case s: InetSocketAddress ⇒ (s.getHostString, s.getPort) + } + val name = ip + ":" + port + "-server" + generation.next + sender ! context.actorOf(Props(new ServerFSM(self, channel)), name) case c @ NodeInfo(name, addr, fsm) ⇒ barrier forward c if (nodes contains name) { diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala index 7f6b576128..09ffd7319f 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala @@ -10,6 +10,8 @@ import java.util.concurrent.TimeUnit.MILLISECONDS import akka.actor.ActorRef import java.util.concurrent.ConcurrentHashMap import akka.actor.Address +import akka.actor.ActorSystemImpl +import akka.actor.Props /** * Access to the [[akka.remote.testconductor.TestConductorExt]] extension: @@ -50,6 +52,6 @@ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with C val transport = system.provider.asInstanceOf[RemoteActorRefProvider].transport val address = transport.address - val failureInjectors = new ConcurrentHashMap[Address, FailureInjector] + val failureInjector = system.asInstanceOf[ActorSystemImpl].systemActorOf(Props[FailureInjector], "FailureInjector") } \ No newline at end of file diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index b853523979..1fcb1a7bf9 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -4,236 +4,303 @@ package akka.remote.testconductor import java.net.InetSocketAddress -import scala.collection.immutable.Queue -import org.jboss.netty.buffer.ChannelBuffer -import org.jboss.netty.channel.ChannelState.BOUND -import org.jboss.netty.channel.ChannelState.OPEN -import org.jboss.netty.channel.Channel -import org.jboss.netty.channel.ChannelEvent -import org.jboss.netty.channel.ChannelHandlerContext -import org.jboss.netty.channel.ChannelStateEvent -import org.jboss.netty.channel.MessageEvent -import akka.actor.FSM -import akka.actor.Actor -import akka.util.duration.doubleToDurationDouble -import akka.util.Index -import akka.actor.Address -import akka.actor.ActorSystem -import akka.actor.Props -import akka.actor.ActorRef -import akka.event.Logging -import org.jboss.netty.channel.SimpleChannelHandler -import scala.annotation.tailrec -import akka.util.Duration -import akka.actor.LoggingFSM -import org.jboss.netty.channel.Channels -import org.jboss.netty.channel.ChannelFuture -import org.jboss.netty.channel.ChannelFutureListener -import org.jboss.netty.channel.ChannelFuture -case class FailureInjector(sender: ActorRef, receiver: ActorRef) { - def refs(dir: Direction) = dir match { - case Direction.Send ⇒ Seq(sender) - case Direction.Receive ⇒ Seq(receiver) - case Direction.Both ⇒ Seq(sender, receiver) +import scala.annotation.tailrec +import scala.collection.immutable.Queue + +import org.jboss.netty.buffer.ChannelBuffer +import org.jboss.netty.channel.{ SimpleChannelHandler, MessageEvent, Channels, ChannelStateEvent, ChannelHandlerContext, ChannelFutureListener, ChannelFuture } + +import akka.actor.{ Props, LoggingFSM, Address, ActorSystem, ActorRef, ActorLogging, Actor, FSM } +import akka.event.Logging +import akka.remote.netty.ChannelAddress +import akka.util.Duration +import akka.util.duration._ + +class FailureInjector extends Actor with ActorLogging { + import ThrottleActor._ + import NetworkFailureInjector._ + + case class ChannelSettings( + ctx: Option[ChannelHandlerContext] = None, + throttleSend: Option[SetRate] = None, + throttleReceive: Option[SetRate] = None) + case class Injectors(sender: ActorRef, receiver: ActorRef) + + var channels = Map[ChannelHandlerContext, Injectors]() + var settings = Map[Address, ChannelSettings]() + var generation = Iterator from 1 + + /** + * Only for a NEW ctx, start ThrottleActors, prime them and update all maps. + */ + def ingestContextAddress(ctx: ChannelHandlerContext, addr: Address): Injectors = { + val gen = generation.next + val name = addr.host.get + ":" + addr.port.get + val thrSend = context.actorOf(Props(new ThrottleActor(ctx)), name + "-snd" + gen) + val thrRecv = context.actorOf(Props(new ThrottleActor(ctx)), name + "-rcv" + gen) + val injectors = Injectors(thrSend, thrRecv) + channels += ctx -> injectors + settings += addr -> (settings get addr map { + case c @ ChannelSettings(prevCtx, ts, tr) ⇒ + ts foreach (thrSend ! _) + tr foreach (thrRecv ! _) + prevCtx match { + case Some(p) ⇒ log.warning("installing context {} instead of {} for address {}", ctx, p, addr) + case None ⇒ // okay + } + c.copy(ctx = Some(ctx)) + } getOrElse ChannelSettings(Some(ctx))) + injectors + } + + /** + * Retrieve target settings, also if they were sketchy before (i.e. no system name) + */ + def retrieveTargetSettings(target: Address): Option[ChannelSettings] = { + settings get target orElse { + val host = target.host + val port = target.port + settings find { + case (Address("akka", "", `host`, `port`), s) ⇒ true + case _ ⇒ false + } map { + case (_, s) ⇒ settings += target -> s; s + } + } + } + + def receive = { + case RemoveContext(ctx) ⇒ + channels get ctx foreach { inj ⇒ + context stop inj.sender + context stop inj.receiver + } + channels -= ctx + settings ++= settings collect { case (addr, c @ ChannelSettings(Some(`ctx`), _, _)) ⇒ (addr, c.copy(ctx = None)) } + case ThrottleMsg(target, dir, rateMBit) ⇒ + val setting = retrieveTargetSettings(target) + settings += target -> ((setting getOrElse ChannelSettings() match { + case cs @ ChannelSettings(ctx, _, _) if dir includes Direction.Send ⇒ + ctx foreach (c ⇒ channels get c foreach (_.sender ! SetRate(rateMBit))) + cs.copy(throttleSend = Some(SetRate(rateMBit))) + case x ⇒ x + }) match { + case cs @ ChannelSettings(ctx, _, _) if dir includes Direction.Receive ⇒ + ctx foreach (c ⇒ channels get c foreach (_.receiver ! SetRate(rateMBit))) + cs.copy(throttleReceive = Some(SetRate(rateMBit))) + case x ⇒ x + }) + sender ! "ok" + case DisconnectMsg(target, abort) ⇒ + retrieveTargetSettings(target) foreach { + case ChannelSettings(Some(ctx), _, _) ⇒ + val ch = ctx.getChannel + if (abort) { + ch.getConfig.setOption("soLinger", 0) + log.info("aborting connection {}", ch) + } else log.info("closing connection {}", ch) + ch.close + case _ ⇒ log.debug("no connection to {} to close or abort", target) + } + sender ! "ok" + case s @ Send(ctx, direction, future, msg) ⇒ + channels get ctx match { + case Some(Injectors(snd, rcv)) ⇒ + if (direction includes Direction.Send) snd ! s + if (direction includes Direction.Receive) rcv ! s + case None ⇒ + val (ipaddr, ip, port) = ctx.getChannel.getRemoteAddress match { + case s: InetSocketAddress ⇒ (s.getAddress, s.getAddress.getHostAddress, s.getPort) + } + val addr = ChannelAddress.get(ctx.getChannel) orElse { + settings collect { case (a @ Address("akka", _, Some(`ip`), Some(`port`)), _) ⇒ a } headOption + } orElse { + val name = ipaddr.getHostName + if (name == ip) None + else settings collect { case (a @ Address("akka", _, Some(`name`), Some(`port`)), _) ⇒ a } headOption + } getOrElse Address("akka", "", ip, port) // this will not match later requests directly, but be picked up by retrieveTargetSettings + val inj = ingestContextAddress(ctx, addr) + if (direction includes Direction.Send) inj.sender ! s + if (direction includes Direction.Receive) inj.receiver ! s + } } } object NetworkFailureInjector { - case class SetRate(rateMBit: Float) - case class Disconnect(abort: Boolean) + case class RemoveContext(ctx: ChannelHandlerContext) } class NetworkFailureInjector(system: ActorSystem) extends SimpleChannelHandler { + import NetworkFailureInjector._ - val log = Logging(system, "FailureInjector") + private val log = Logging(system, "FailureInjector") - // everything goes via these Throttle actors to enable easy steering - private val sender = system.actorOf(Props(new Throttle(Direction.Send))) - private val receiver = system.actorOf(Props(new Throttle(Direction.Receive))) - - private val packetSplitThreshold = TestConductor(system).Settings.PacketSplitThreshold - - /* - * State, Data and Messages for the internal Throttle actor - */ - sealed private trait State - private case object PassThrough extends State - private case object Throttle extends State - private case object Blackhole extends State - - private case class Data(lastSent: Long, rateMBit: Float, queue: Queue[Send]) - - private case class Send(ctx: ChannelHandlerContext, future: Option[ChannelFuture], msg: AnyRef) - private case class SetContext(ctx: ChannelHandlerContext) - private case object Tick - - private class Throttle(dir: Direction) extends Actor with LoggingFSM[State, Data] { - import FSM._ - - var channelContext: ChannelHandlerContext = _ - - startWith(PassThrough, Data(0, -1, Queue())) - - when(PassThrough) { - case Event(s @ Send(_, _, msg), _) ⇒ - log.debug("sending msg (PassThrough): {}", msg) - send(s) - stay - } - - when(Throttle) { - case Event(s: Send, data @ Data(_, _, Queue())) ⇒ - stay using sendThrottled(data.copy(lastSent = System.nanoTime, queue = Queue(s))) - case Event(s: Send, data) ⇒ - stay using sendThrottled(data.copy(queue = data.queue.enqueue(s))) - case Event(Tick, data) ⇒ - stay using sendThrottled(data) - } - - onTransition { - case Throttle -> PassThrough ⇒ - for (s ← stateData.queue) { - log.debug("sending msg (Transition): {}", s.msg) - send(s) - } - cancelTimer("send") - case Throttle -> Blackhole ⇒ - cancelTimer("send") - } - - when(Blackhole) { - case Event(Send(_, _, msg), _) ⇒ - log.debug("dropping msg {}", msg) - stay - } - - whenUnhandled { - case Event(NetworkFailureInjector.SetRate(rate), d) ⇒ - sender ! "ok" - if (rate > 0) { - goto(Throttle) using d.copy(lastSent = System.nanoTime, rateMBit = rate, queue = Queue()) - } else if (rate == 0) { - goto(Blackhole) - } else { - goto(PassThrough) - } - case Event(SetContext(ctx), _) ⇒ channelContext = ctx; stay - case Event(NetworkFailureInjector.Disconnect(abort), Data(ctx, _, _)) ⇒ - sender ! "ok" - // TODO implement abort - channelContext.getChannel.disconnect() - stay - } - - initialize - - private def sendThrottled(d: Data): Data = { - val (data, toSend, toTick) = schedule(d) - for (s ← toSend) { - log.debug("sending msg (Tick): {}", s.msg) - send(s) - } - if (!timerActive_?("send")) - for (time ← toTick) { - log.debug("scheduling next Tick in {}", time) - setTimer("send", Tick, time, false) - } - data - } - - private def send(s: Send): Unit = dir match { - case Direction.Send ⇒ Channels.write(s.ctx, s.future getOrElse Channels.future(s.ctx.getChannel), s.msg) - case Direction.Receive ⇒ Channels.fireMessageReceived(s.ctx, s.msg) - case _ ⇒ - } - - private def schedule(d: Data): (Data, Seq[Send], Option[Duration]) = { - val now = System.nanoTime - @tailrec def rec(d: Data, toSend: Seq[Send]): (Data, Seq[Send], Option[Duration]) = { - if (d.queue.isEmpty) (d, toSend, None) - else { - val timeForPacket = d.lastSent + (1000 * size(d.queue.head.msg) / d.rateMBit).toLong - if (timeForPacket <= now) rec(Data(timeForPacket, d.rateMBit, d.queue.tail), toSend :+ d.queue.head) - else { - val splitThreshold = d.lastSent + packetSplitThreshold.toNanos - if (now < splitThreshold) (d, toSend, Some((timeForPacket - now).nanos min (splitThreshold - now).nanos)) - else { - val microsToSend = (now - d.lastSent) / 1000 - val (s1, s2) = split(d.queue.head, (microsToSend * d.rateMBit / 8).toInt) - (d.copy(queue = s2 +: d.queue.tail), toSend :+ s1, Some((timeForPacket - now).nanos min packetSplitThreshold)) - } - } - } - } - rec(d, Seq()) - } - - private def split(s: Send, bytes: Int): (Send, Send) = { - s.msg match { - case buf: ChannelBuffer ⇒ - val f = s.future map { f ⇒ - val newF = Channels.future(s.ctx.getChannel) - newF.addListener(new ChannelFutureListener { - def operationComplete(future: ChannelFuture) { - if (future.isCancelled) f.cancel() - else future.getCause match { - case null ⇒ - case thr ⇒ f.setFailure(thr) - } - } - }) - newF - } - val b = buf.slice() - b.writerIndex(b.readerIndex + bytes) - buf.readerIndex(buf.readerIndex + bytes) - (Send(s.ctx, f, b), Send(s.ctx, s.future, buf)) - } - } - - private def size(msg: AnyRef) = msg match { - case b: ChannelBuffer ⇒ b.readableBytes() * 8 - case _ ⇒ throw new UnsupportedOperationException("NetworkFailureInjector only supports ChannelBuffer messages") - } - } - - private var remote: Option[Address] = None - - override def messageReceived(ctx: ChannelHandlerContext, msg: MessageEvent) { - log.debug("upstream(queued): {}", msg) - receiver ! Send(ctx, Option(msg.getFuture), msg.getMessage) - } + private val conductor = TestConductor(system) + private var announced = false override def channelConnected(ctx: ChannelHandlerContext, state: ChannelStateEvent) { state.getValue match { case a: InetSocketAddress ⇒ val addr = Address("akka", "", a.getHostName, a.getPort) log.debug("connected to {}", addr) - TestConductor(system).failureInjectors.put(addr, FailureInjector(sender, receiver)) match { - case null ⇒ // okay - case fi ⇒ system.log.error("{} already registered for address {}", fi, addr) - } - remote = Some(addr) - sender ! SetContext(ctx) case x ⇒ throw new IllegalArgumentException("unknown address type: " + x) } } override def channelDisconnected(ctx: ChannelHandlerContext, state: ChannelStateEvent) { - log.debug("disconnected from {}", remote) - remote = remote flatMap { addr ⇒ - TestConductor(system).failureInjectors.remove(addr) - system.stop(sender) - system.stop(receiver) - None - } + log.debug("disconnected from {}", state.getChannel) + conductor.failureInjector ! RemoveContext(ctx) + } + + override def messageReceived(ctx: ChannelHandlerContext, msg: MessageEvent) { + log.debug("upstream(queued): {}", msg) + conductor.failureInjector ! ThrottleActor.Send(ctx, Direction.Receive, Option(msg.getFuture), msg.getMessage) } override def writeRequested(ctx: ChannelHandlerContext, msg: MessageEvent) { log.debug("downstream(queued): {}", msg) - sender ! Send(ctx, Option(msg.getFuture), msg.getMessage) + conductor.failureInjector ! ThrottleActor.Send(ctx, Direction.Send, Option(msg.getFuture), msg.getMessage) } } +private[akka] object ThrottleActor { + sealed trait State + case object PassThrough extends State + case object Throttle extends State + case object Blackhole extends State + + case class Data(lastSent: Long, rateMBit: Float, queue: Queue[Send]) + + case class Send(ctx: ChannelHandlerContext, direction: Direction, future: Option[ChannelFuture], msg: AnyRef) + case class SetRate(rateMBit: Float) + case object Tick +} + +private[akka] class ThrottleActor(channelContext: ChannelHandlerContext) + extends Actor with LoggingFSM[ThrottleActor.State, ThrottleActor.Data] { + + import ThrottleActor._ + import FSM._ + + private val packetSplitThreshold = TestConductor(context.system).Settings.PacketSplitThreshold + + startWith(PassThrough, Data(0, -1, Queue())) + + when(PassThrough) { + case Event(s @ Send(_, _, _, msg), _) ⇒ + log.debug("sending msg (PassThrough): {}", msg) + send(s) + stay + } + + when(Throttle) { + case Event(s: Send, data @ Data(_, _, Queue())) ⇒ + stay using sendThrottled(data.copy(lastSent = System.nanoTime, queue = Queue(s))) + case Event(s: Send, data) ⇒ + stay using sendThrottled(data.copy(queue = data.queue.enqueue(s))) + case Event(Tick, data) ⇒ + stay using sendThrottled(data) + } + + onTransition { + case Throttle -> PassThrough ⇒ + for (s ← stateData.queue) { + log.debug("sending msg (Transition): {}", s.msg) + send(s) + } + cancelTimer("send") + case Throttle -> Blackhole ⇒ + cancelTimer("send") + } + + when(Blackhole) { + case Event(Send(_, _, _, msg), _) ⇒ + log.debug("dropping msg {}", msg) + stay + } + + whenUnhandled { + case Event(SetRate(rate), d) ⇒ + if (rate > 0) { + goto(Throttle) using d.copy(lastSent = System.nanoTime, rateMBit = rate, queue = Queue()) + } else if (rate == 0) { + goto(Blackhole) + } else { + goto(PassThrough) + } + } + + initialize + + private def sendThrottled(d: Data): Data = { + val (data, toSend, toTick) = schedule(d) + for (s ← toSend) { + log.debug("sending msg (Tick): {}", s.msg) + send(s) + } + if (!timerActive_?("send")) + for (time ← toTick) { + log.debug("scheduling next Tick in {}", time) + setTimer("send", Tick, time, false) + } + data + } + + private def send(s: Send): Unit = s.direction match { + case Direction.Send ⇒ Channels.write(s.ctx, s.future getOrElse Channels.future(s.ctx.getChannel), s.msg) + case Direction.Receive ⇒ Channels.fireMessageReceived(s.ctx, s.msg) + case _ ⇒ + } + + private def schedule(d: Data): (Data, Seq[Send], Option[Duration]) = { + val now = System.nanoTime + @tailrec def rec(d: Data, toSend: Seq[Send]): (Data, Seq[Send], Option[Duration]) = { + if (d.queue.isEmpty) (d, toSend, None) + else { + val timeForPacket = d.lastSent + (1000 * size(d.queue.head.msg) / d.rateMBit).toLong + if (timeForPacket <= now) rec(Data(timeForPacket, d.rateMBit, d.queue.tail), toSend :+ d.queue.head) + else { + val splitThreshold = d.lastSent + packetSplitThreshold.toNanos + if (now < splitThreshold) (d, toSend, Some((timeForPacket - now).nanos min (splitThreshold - now).nanos)) + else { + val microsToSend = (now - d.lastSent) / 1000 + val (s1, s2) = split(d.queue.head, (microsToSend * d.rateMBit / 8).toInt) + (d.copy(queue = s2 +: d.queue.tail), toSend :+ s1, Some((timeForPacket - now).nanos min packetSplitThreshold)) + } + } + } + } + rec(d, Seq()) + } + + private def split(s: Send, bytes: Int): (Send, Send) = { + s.msg match { + case buf: ChannelBuffer ⇒ + val f = s.future map { f ⇒ + val newF = Channels.future(s.ctx.getChannel) + newF.addListener(new ChannelFutureListener { + def operationComplete(future: ChannelFuture) { + if (future.isCancelled) f.cancel() + else future.getCause match { + case null ⇒ + case thr ⇒ f.setFailure(thr) + } + } + }) + newF + } + val b = buf.slice() + b.writerIndex(b.readerIndex + bytes) + buf.readerIndex(buf.readerIndex + bytes) + (Send(s.ctx, s.direction, f, b), Send(s.ctx, s.direction, s.future, buf)) + } + } + + private def size(msg: AnyRef) = msg match { + case b: ChannelBuffer ⇒ b.readableBytes() * 8 + case _ ⇒ throw new UnsupportedOperationException("NetworkFailureInjector only supports ChannelBuffer messages") + } +} + diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala index 10434007e1..254b1a7d45 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala @@ -195,21 +195,13 @@ class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) extends Actor log.warning("did not expect {}", op) } stay using d.copy(runningOp = None) - case ThrottleMsg(target, dir, rate) ⇒ + case t: ThrottleMsg ⇒ import settings.QueryTimeout - import context.dispatcher - TestConductor().failureInjectors.get(target.copy(system = "")) match { - case null ⇒ log.warning("cannot throttle unknown address {}", target) - case inj ⇒ - Future.sequence(inj.refs(dir) map (_ ? NetworkFailureInjector.SetRate(rate))) map (_ ⇒ ToServer(Done)) pipeTo self - } + TestConductor().failureInjector ? t map (_ ⇒ ToServer(Done)) pipeTo self stay - case DisconnectMsg(target, abort) ⇒ + case d: DisconnectMsg ⇒ import settings.QueryTimeout - TestConductor().failureInjectors.get(target.copy(system = "")) match { - case null ⇒ log.warning("cannot disconnect unknown address {}", target) - case inj ⇒ inj.sender ? NetworkFailureInjector.Disconnect(abort) map (_ ⇒ ToServer(Done)) pipeTo self - } + TestConductor().failureInjector ? d map (_ ⇒ ToServer(Done)) pipeTo self stay case TerminateMsg(exit) ⇒ System.exit(exit) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala index 70cca7c34b..9f9257c69b 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala @@ -22,7 +22,7 @@ object SimpleRemoteMultiJvmSpec extends MultiNodeConfig { } commonConfig(ConfigFactory.parseString(""" - akka.loglevel = DEBUG + # akka.loglevel = DEBUG akka.remote { log-received-messages = on log-sent-messages = on diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala index 2690378ef1..e15027cc73 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala @@ -24,7 +24,7 @@ object DirectRoutedRemoteActorMultiJvmSpec extends MultiNodeConfig { import com.typesafe.config.ConfigFactory commonConfig(ConfigFactory.parseString(""" - akka.loglevel = DEBUG + # akka.loglevel = DEBUG akka.remote { log-received-messages = on log-sent-messages = on diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 087aac55c7..e311fa0023 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -17,7 +17,7 @@ import akka.remote.testkit.MultiNodeConfig object TestConductorMultiJvmSpec extends MultiNodeConfig { commonConfig(ConfigFactory.parseString(""" - akka.loglevel = DEBUG + # akka.loglevel = DEBUG akka.remote { log-received-messages = on log-sent-messages = on diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 4735132534..9091864348 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -173,6 +173,7 @@ class ActiveRemoteClient private[akka] ( notifyListeners(RemoteClientError(connection.getCause, netty, remoteAddress)) false } else { + ChannelAddress.set(connection.getChannel, Some(remoteAddress)) sendSecureCookie(connection) notifyListeners(RemoteClientStarted(netty, remoteAddress)) true @@ -196,8 +197,10 @@ class ActiveRemoteClient private[akka] ( notifyListeners(RemoteClientShutdown(netty, remoteAddress)) try { - if ((connection ne null) && (connection.getChannel ne null)) + if ((connection ne null) && (connection.getChannel ne null)) { + ChannelAddress.remove(connection.getChannel) connection.getChannel.close() + } } finally { try { if (openChannels ne null) openChannels.close.awaitUninterruptibly() diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 60c2ac6097..84ee77bbbe 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -29,6 +29,11 @@ import org.jboss.netty.handler.codec.frame.LengthFieldBasedFrameDecoder import org.jboss.netty.handler.timeout.IdleStateHandler import org.jboss.netty.channel.ChannelPipelineFactory import org.jboss.netty.handler.execution.ExecutionHandler +import org.jboss.netty.channel.ChannelLocal + +object ChannelAddress extends ChannelLocal[Option[Address]] { + override def initialValue(ch: Channel): Option[Address] = None +} /** * Provides the implementation of the Netty remote support diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index 87993f783d..5903dacd83 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -102,19 +102,11 @@ class RemoteServerAuthenticationHandler(secureCookie: Option[String]) extends Si } } -object ChannelLocalSystem extends ChannelLocal[ActorSystemImpl] { - override def initialValue(ch: Channel): ActorSystemImpl = null -} - @ChannelHandler.Sharable class RemoteServerHandler( val openChannels: ChannelGroup, val netty: NettyRemoteTransport) extends SimpleChannelUpstreamHandler { - val channelAddress = new ChannelLocal[Option[Address]](false) { - override def initialValue(channel: Channel) = None - } - import netty.settings private var addressToSet = true @@ -138,16 +130,16 @@ class RemoteServerHandler( override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = () override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { - netty.notifyListeners(RemoteServerClientDisconnected(netty, channelAddress.get(ctx.getChannel))) + netty.notifyListeners(RemoteServerClientDisconnected(netty, ChannelAddress.get(ctx.getChannel))) } override def channelClosed(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { - val address = channelAddress.get(ctx.getChannel) + val address = ChannelAddress.get(ctx.getChannel) if (address.isDefined && settings.UsePassiveConnections) netty.unbindClient(address.get) netty.notifyListeners(RemoteServerClientClosed(netty, address)) - channelAddress.remove(ctx.getChannel) + ChannelAddress.remove(ctx.getChannel) } override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) = try { @@ -161,7 +153,7 @@ class RemoteServerHandler( case CommandType.CONNECT ⇒ val origin = instruction.getOrigin val inbound = Address("akka", origin.getSystem, origin.getHostname, origin.getPort) - channelAddress.set(event.getChannel, Option(inbound)) + ChannelAddress.set(event.getChannel, Option(inbound)) //If we want to reuse the inbound connections as outbound we need to get busy if (settings.UsePassiveConnections) From 6bed19c6c9bc0e3581dfa077247cbfb21c3bcafd Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 24 May 2012 11:44:39 +0200 Subject: [PATCH 120/538] Binary compat work for Akka Remote --- .../src/main/scala/akka/actor/ActorRef.scala | 5 +- .../scala/akka/remote/MessageSerializer.scala | 11 +- .../akka/remote/RemoteActorRefProvider.scala | 51 ++-- .../scala/akka/remote/RemoteDeployer.scala | 4 +- .../scala/akka/remote/RemoteSettings.scala | 15 +- .../scala/akka/remote/RemoteTransport.scala | 232 +++++++++++------- .../main/scala/akka/remote/netty/Client.scala | 21 +- .../remote/netty/NettyRemoteSupport.scala | 11 +- .../main/scala/akka/remote/netty/Server.scala | 8 +- .../scala/akka/remote/netty/Settings.scala | 38 +-- 10 files changed, 219 insertions(+), 177 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 46e3440f95..460bd02076 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -154,17 +154,16 @@ trait ScalaActorRef { ref: ActorRef ⇒ } -//FIXME should ActorScope be private[akka], me thinks so - √ /** * All ActorRefs have a scope which describes where they live. Since it is * often necessary to distinguish between local and non-local references, this * is the only method provided on the scope. */ -trait ActorRefScope { +private[akka] trait ActorRefScope { def isLocal: Boolean } -trait LocalRef extends ActorRefScope { +private[akka] trait LocalRef extends ActorRefScope { final def isLocal = true } diff --git a/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala index 65777d49ca..6bd61dd812 100644 --- a/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala @@ -9,8 +9,14 @@ import com.google.protobuf.ByteString import akka.actor.ExtendedActorSystem import akka.serialization.SerializationExtension -object MessageSerializer { +/** + * MessageSerializer is a helper for serialize and deserialize messages + */ +private[akka] object MessageSerializer { + /** + * Uses Akka Serialization for the specified ActorSystem to transform the given MessageProtocol to a message + */ def deserialize(system: ExtendedActorSystem, messageProtocol: MessageProtocol): AnyRef = { val clazz = if (messageProtocol.hasMessageManifest) { @@ -24,6 +30,9 @@ object MessageSerializer { } } + /** + * Uses Akka Serialization for the specified ActorSystem to transform the given message to a MessageProtocol + */ def serialize(system: ExtendedActorSystem, message: AnyRef): MessageProtocol = { val s = SerializationExtension(system) val serializer = s.findSerializerFor(message) diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index bf55edf24c..a12c5f5578 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -4,34 +4,26 @@ package akka.remote -import akka.AkkaException import akka.actor._ import akka.dispatch._ import akka.event.{ DeathWatch, Logging, LoggingAdapter } import akka.event.EventStream -import akka.ConfigurationException -import java.util.concurrent.{ TimeoutException } -import com.typesafe.config.Config import akka.serialization.Serialization import akka.serialization.SerializationExtension -class RemoteException(msg: String) extends AkkaException(msg) -class RemoteCommunicationException(msg: String) extends RemoteException(msg) -class RemoteConnectionException(msg: String) extends RemoteException(msg) - /** * Remote ActorRefProvider. Starts up actor on remote node and creates a RemoteActorRef representing it. */ -class RemoteActorRefProvider( +private[akka] class RemoteActorRefProvider( val systemName: String, val settings: ActorSystem.Settings, val eventStream: EventStream, val scheduler: Scheduler, val dynamicAccess: DynamicAccess) extends ActorRefProvider { - val remoteSettings = new RemoteSettings(settings.config, systemName) + val remoteSettings: RemoteSettings = new RemoteSettings(settings.config, systemName) - val deployer = new RemoteDeployer(settings, dynamicAccess) + val deployer: RemoteDeployer = new RemoteDeployer(settings, dynamicAccess) private val local = new LocalActorRefProvider(systemName, settings, eventStream, scheduler, deployer) @@ -39,21 +31,21 @@ class RemoteActorRefProvider( private var _log = local.log def log: LoggingAdapter = _log - def rootPath = local.rootPath - def deadLetters = local.deadLetters + override def rootPath: ActorPath = local.rootPath + override def deadLetters: InternalActorRef = local.deadLetters - val deathWatch = new RemoteDeathWatch(local.deathWatch, this) + override val deathWatch: DeathWatch = new RemoteDeathWatch(local.deathWatch, this) // these are only available after init() - def rootGuardian = local.rootGuardian - def guardian = local.guardian - def systemGuardian = local.systemGuardian - def terminationFuture = local.terminationFuture - def dispatcher = local.dispatcher - def registerTempActor(actorRef: InternalActorRef, path: ActorPath) = local.registerTempActor(actorRef, path) - def unregisterTempActor(path: ActorPath) = local.unregisterTempActor(path) - def tempPath() = local.tempPath() - def tempContainer = local.tempContainer + override def rootGuardian: InternalActorRef = local.rootGuardian + override def guardian: InternalActorRef = local.guardian + override def systemGuardian: InternalActorRef = local.systemGuardian + override def terminationFuture: Promise[Unit] = local.terminationFuture + override def dispatcher: MessageDispatcher = local.dispatcher + override def registerTempActor(actorRef: InternalActorRef, path: ActorPath): Unit = local.registerTempActor(actorRef, path) + override def unregisterTempActor(path: ActorPath): Unit = local.unregisterTempActor(path) + override def tempPath(): ActorPath = local.tempPath() + override def tempContainer: VirtualPathContainer = local.tempContainer @volatile private var _transport: RemoteTransport = _ @@ -61,13 +53,13 @@ class RemoteActorRefProvider( @volatile private var _serialization: Serialization = _ - def serialization = _serialization + def serialization: Serialization = _serialization @volatile private var _remoteDaemon: InternalActorRef = _ - def remoteDaemon = _remoteDaemon + def remoteDaemon: InternalActorRef = _remoteDaemon - def init(system: ActorSystemImpl) { + def init(system: ActorSystemImpl): Unit = { local.init(system) _remoteDaemon = new RemoteSystemDaemon(system, rootPath / "remote", rootGuardian, log) @@ -193,7 +185,7 @@ class RemoteActorRefProvider( /** * Using (checking out) actor on a specific node. */ - def useActorOnNode(path: ActorPath, props: Props, deploy: Deploy, supervisor: ActorRef) { + def useActorOnNode(path: ActorPath, props: Props, deploy: Deploy, supervisor: ActorRef): Unit = { log.debug("[{}] Instantiating Remote Actor [{}]", rootPath, path) // we don’t wait for the ACK, because the remote end will process this command before any other message to the new actor @@ -211,7 +203,7 @@ class RemoteActorRefProvider( } } -trait RemoteRef extends ActorRefScope { +private[akka] trait RemoteRef extends ActorRefScope { final def isLocal = false } @@ -256,7 +248,7 @@ private[akka] class RemoteActorRef private[akka] ( private def writeReplace(): AnyRef = SerializedActorRef(path) } -class RemoteDeathWatch(val local: DeathWatch, val provider: RemoteActorRefProvider) extends DeathWatch { +private[akka] class RemoteDeathWatch(val local: DeathWatch, val provider: RemoteActorRefProvider) extends DeathWatch { override def subscribe(watcher: ActorRef, watched: ActorRef): Boolean = watched match { case r: RemoteRef ⇒ @@ -275,5 +267,4 @@ class RemoteDeathWatch(val local: DeathWatch, val provider: RemoteActorRefProvid override def unsubscribe(watcher: ActorRef): Unit = local.unsubscribe(watcher) override def publish(event: Terminated): Unit = local.publish(event) - } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala index e869c4ef4c..25df64795d 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala @@ -12,8 +12,7 @@ case class RemoteScope(node: Address) extends Scope { def withFallback(other: Scope): Scope = this } -class RemoteDeployer(_settings: ActorSystem.Settings, _pm: DynamicAccess) extends Deployer(_settings, _pm) { - +private[akka] class RemoteDeployer(_settings: ActorSystem.Settings, _pm: DynamicAccess) extends Deployer(_settings, _pm) { override protected def parseConfig(path: String, config: Config): Option[Deploy] = { import scala.collection.JavaConverters._ @@ -30,5 +29,4 @@ class RemoteDeployer(_settings: ActorSystem.Settings, _pm: DynamicAccess) extend case None ⇒ None } } - } \ No newline at end of file diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index 0b26311286..951c007fbc 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -6,17 +6,12 @@ package akka.remote import com.typesafe.config.Config import akka.util.Duration import java.util.concurrent.TimeUnit.MILLISECONDS -import java.net.InetAddress -import akka.ConfigurationException -import scala.collection.JavaConverters._ -import akka.actor.Address -import akka.actor.AddressFromURIString class RemoteSettings(val config: Config, val systemName: String) { import config._ - val RemoteTransport = getString("akka.remote.transport") - val LogReceive = getBoolean("akka.remote.log-received-messages") - val LogSend = getBoolean("akka.remote.log-sent-messages") - val RemoteSystemDaemonAckTimeout = Duration(getMilliseconds("akka.remote.remote-daemon-ack-timeout"), MILLISECONDS) - val UntrustedMode = getBoolean("akka.remote.untrusted-mode") + val RemoteTransport: String = getString("akka.remote.transport") + val LogReceive: Boolean = getBoolean("akka.remote.log-received-messages") + val LogSend: Boolean = getBoolean("akka.remote.log-sent-messages") + val RemoteSystemDaemonAckTimeout: Duration = Duration(getMilliseconds("akka.remote.remote-daemon-ack-timeout"), MILLISECONDS) + val UntrustedMode: Boolean = getBoolean("akka.remote.untrusted-mode") } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala index 3bade97460..d912d1d878 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala @@ -10,7 +10,6 @@ import akka.event.{ LoggingAdapter, Logging } import akka.AkkaException import akka.serialization.Serialization import akka.remote.RemoteProtocol._ -import akka.dispatch.ChildTerminated import akka.actor._ /** @@ -27,54 +26,67 @@ trait RemoteClientLifeCycleEvent extends RemoteLifeCycleEvent { def remoteAddress: Address } +/** + * A RemoteClientError is a general error that is thrown within or from a RemoteClient + */ case class RemoteClientError( @BeanProperty cause: Throwable, @transient @BeanProperty remote: RemoteTransport, @BeanProperty remoteAddress: Address) extends RemoteClientLifeCycleEvent { - override def logLevel = Logging.ErrorLevel - override def toString = - "RemoteClientError@" + remoteAddress + ": Error[" + cause + "]" + override def logLevel: Logging.LogLevel = Logging.ErrorLevel + override def toString: String = "RemoteClientError@" + remoteAddress + ": Error[" + cause + "]" } +/** + * RemoteClientDisconnected is published when a RemoteClient's connection is disconnected + */ case class RemoteClientDisconnected( @transient @BeanProperty remote: RemoteTransport, @BeanProperty remoteAddress: Address) extends RemoteClientLifeCycleEvent { - override def logLevel = Logging.DebugLevel - override def toString = - "RemoteClientDisconnected@" + remoteAddress + override def logLevel: Logging.LogLevel = Logging.DebugLevel + override def toString: String = "RemoteClientDisconnected@" + remoteAddress } +/** + * RemoteClientConnected is published when a RemoteClient's connection is established + */ case class RemoteClientConnected( @transient @BeanProperty remote: RemoteTransport, @BeanProperty remoteAddress: Address) extends RemoteClientLifeCycleEvent { - override def logLevel = Logging.DebugLevel - override def toString = - "RemoteClientConnected@" + remoteAddress + override def logLevel: Logging.LogLevel = Logging.DebugLevel + override def toString: String = "RemoteClientConnected@" + remoteAddress } +/** + * RemoteClientStarted is published when a RemoteClient has started up + */ case class RemoteClientStarted( @transient @BeanProperty remote: RemoteTransport, @BeanProperty remoteAddress: Address) extends RemoteClientLifeCycleEvent { - override def logLevel = Logging.InfoLevel - override def toString = - "RemoteClientStarted@" + remoteAddress + override def logLevel: Logging.LogLevel = Logging.InfoLevel + override def toString: String = "RemoteClientStarted@" + remoteAddress } +/** + * RemoteClientShutdown is published when a RemoteClient has shut down + */ case class RemoteClientShutdown( @transient @BeanProperty remote: RemoteTransport, @BeanProperty remoteAddress: Address) extends RemoteClientLifeCycleEvent { - override def logLevel = Logging.InfoLevel - override def toString = - "RemoteClientShutdown@" + remoteAddress + override def logLevel: Logging.LogLevel = Logging.InfoLevel + override def toString: String = "RemoteClientShutdown@" + remoteAddress } +/** + * RemoteClientWriteFailed is published when a remote send of a message detectably fails (throws an exception). + */ case class RemoteClientWriteFailed( @BeanProperty request: AnyRef, @BeanProperty cause: Throwable, @transient @BeanProperty remote: RemoteTransport, @BeanProperty remoteAddress: Address) extends RemoteClientLifeCycleEvent { - override def logLevel = Logging.WarningLevel - override def toString = + override def logLevel: Logging.LogLevel = Logging.WarningLevel + override def toString: String = "RemoteClientWriteFailed@" + remoteAddress + ": MessageClass[" + (if (request ne null) request.getClass.getName else "no message") + "] Error[" + cause + "]" @@ -85,53 +97,65 @@ case class RemoteClientWriteFailed( */ trait RemoteServerLifeCycleEvent extends RemoteLifeCycleEvent +/** + * RemoteServerStarted is published when a local RemoteServer has started up + */ case class RemoteServerStarted( @transient @BeanProperty remote: RemoteTransport) extends RemoteServerLifeCycleEvent { - override def logLevel = Logging.InfoLevel - override def toString = - "RemoteServerStarted@" + remote + override def logLevel: Logging.LogLevel = Logging.InfoLevel + override def toString: String = "RemoteServerStarted@" + remote } +/** + * RemoteServerShutdown is published when a local RemoteServer has shut down + */ case class RemoteServerShutdown( @transient @BeanProperty remote: RemoteTransport) extends RemoteServerLifeCycleEvent { - override def logLevel = Logging.InfoLevel - override def toString = - "RemoteServerShutdown@" + remote + override def logLevel: Logging.LogLevel = Logging.InfoLevel + override def toString: String = "RemoteServerShutdown@" + remote } +/** + * A RemoteServerError is a general error that is thrown within or from a RemoteServer + */ case class RemoteServerError( @BeanProperty val cause: Throwable, @transient @BeanProperty remote: RemoteTransport) extends RemoteServerLifeCycleEvent { - override def logLevel = Logging.ErrorLevel - override def toString = - "RemoteServerError@" + remote + "] Error[" + cause + "]" + override def logLevel: Logging.LogLevel = Logging.ErrorLevel + override def toString: String = "RemoteServerError@" + remote + "] Error[" + cause + "]" } +/** + * RemoteServerClientConnected is published when an inbound connection has been established + */ case class RemoteServerClientConnected( @transient @BeanProperty remote: RemoteTransport, @BeanProperty val clientAddress: Option[Address]) extends RemoteServerLifeCycleEvent { - override def logLevel = Logging.DebugLevel - override def toString = - "RemoteServerClientConnected@" + remote + - ": Client[" + clientAddress.getOrElse("no address") + "]" + override def logLevel: Logging.LogLevel = Logging.DebugLevel + override def toString: String = + "RemoteServerClientConnected@" + remote + ": Client[" + clientAddress.getOrElse("no address") + "]" } +/** + * RemoteServerClientConnected is published when an inbound connection has been disconnected + */ case class RemoteServerClientDisconnected( @transient @BeanProperty remote: RemoteTransport, @BeanProperty val clientAddress: Option[Address]) extends RemoteServerLifeCycleEvent { - override def logLevel = Logging.DebugLevel - override def toString = - "RemoteServerClientDisconnected@" + remote + - ": Client[" + clientAddress.getOrElse("no address") + "]" + override def logLevel: Logging.LogLevel = Logging.DebugLevel + override def toString: String = + "RemoteServerClientDisconnected@" + remote + ": Client[" + clientAddress.getOrElse("no address") + "]" } +/** + * RemoteServerClientClosed is published when an inbound RemoteClient is closed + */ case class RemoteServerClientClosed( @transient @BeanProperty remote: RemoteTransport, @BeanProperty val clientAddress: Option[Address]) extends RemoteServerLifeCycleEvent { - override def logLevel = Logging.DebugLevel - override def toString = - "RemoteServerClientClosed@" + remote + - ": Client[" + clientAddress.getOrElse("no address") + "]" + override def logLevel: Logging.LogLevel = Logging.DebugLevel + override def toString: String = + "RemoteServerClientClosed@" + remote + ": Client[" + clientAddress.getOrElse("no address") + "]" } /** @@ -142,6 +166,10 @@ class RemoteClientException private[akka] ( @transient @BeanProperty val client: RemoteTransport, val remoteAddress: Address, cause: Throwable = null) extends AkkaException(message, cause) +/** + * RemoteTransportException represents a general failure within a RemoteTransport, + * such as inability to start, wrong configuration etc. + */ class RemoteTransportException(message: String, cause: Throwable) extends AkkaException(message, cause) /** @@ -178,71 +206,56 @@ abstract class RemoteTransport(val system: ExtendedActorSystem, val provider: Re */ def restartClientConnection(address: Address): Boolean - /** Methods that needs to be implemented by a transport **/ - - def send(message: Any, - senderOption: Option[ActorRef], - recipient: RemoteActorRef): Unit + /** + * Sends the given message to the recipient supplying the sender if any + */ + def send(message: Any, senderOption: Option[ActorRef], recipient: RemoteActorRef): Unit + /** + * Default implementation both publishes the message to the eventStream as well as logs it using the system logger + */ def notifyListeners(message: RemoteLifeCycleEvent): Unit = { system.eventStream.publish(message) system.log.log(message.logLevel, "{}", message) } - override def toString = address.toString -} - -class RemoteMessage(input: RemoteMessageProtocol, system: ExtendedActorSystem) { - - def originalReceiver = input.getRecipient.getPath - - lazy val sender: ActorRef = - if (input.hasSender) system.provider.actorFor(system.provider.rootGuardian, input.getSender.getPath) - else system.deadLetters - - lazy val recipient: InternalActorRef = system.provider.actorFor(system.provider.rootGuardian, originalReceiver) - - lazy val payload: AnyRef = MessageSerializer.deserialize(system, input.getMessage) - - override def toString = "RemoteMessage: " + payload + " to " + recipient + "<+{" + originalReceiver + "} from " + sender -} - -trait RemoteMarshallingOps { + /** + * Returns this RemoteTransports Address' textual representation + */ + override def toString: String = address.toString + /** + * A Logger that can be used to log issues that may occur + */ def log: LoggingAdapter - def system: ExtendedActorSystem - - def provider: RemoteActorRefProvider - - def address: Address - + /** + * When this method returns true, some functionality will be turned off for security purposes. + */ protected def useUntrustedMode: Boolean - def createMessageSendEnvelope(rmp: RemoteMessageProtocol): AkkaRemoteProtocol = { - val arp = AkkaRemoteProtocol.newBuilder - arp.setMessage(rmp) - arp.build - } + /** + * Returns a newly created AkkaRemoteProtocol with the given message payload. + */ + def createMessageSendEnvelope(rmp: RemoteMessageProtocol): AkkaRemoteProtocol = + AkkaRemoteProtocol.newBuilder.setMessage(rmp).build - def createControlEnvelope(rcp: RemoteControlProtocol): AkkaRemoteProtocol = { - val arp = AkkaRemoteProtocol.newBuilder - arp.setInstruction(rcp) - arp.build - } + /** + * Returns a newly created AkkaRemoteProtocol with the given control payload. + */ + def createControlEnvelope(rcp: RemoteControlProtocol): AkkaRemoteProtocol = + AkkaRemoteProtocol.newBuilder.setInstruction(rcp).build /** * Serializes the ActorRef instance into a Protocol Buffers (protobuf) Message. */ - def toRemoteActorRefProtocol(actor: ActorRef): ActorRefProtocol = { + def toRemoteActorRefProtocol(actor: ActorRef): ActorRefProtocol = ActorRefProtocol.newBuilder.setPath(actor.path.toStringWithAddress(address)).build - } - - def createRemoteMessageProtocolBuilder( - recipient: ActorRef, - message: Any, - senderOption: Option[ActorRef]): RemoteMessageProtocol.Builder = { + /** + * Returns a new RemoteMessageProtocol containing the serialized representation of the given parameters. + */ + def createRemoteMessageProtocolBuilder(recipient: ActorRef, message: Any, senderOption: Option[ActorRef]): RemoteMessageProtocol.Builder = { val messageBuilder = RemoteMessageProtocol.newBuilder.setRecipient(toRemoteActorRefProtocol(recipient)) if (senderOption.isDefined) messageBuilder.setSender(toRemoteActorRefProtocol(senderOption.get)) @@ -253,7 +266,12 @@ trait RemoteMarshallingOps { messageBuilder } - def receiveMessage(remoteMessage: RemoteMessage) { + /** + * Call this method with an inbound RemoteMessage and this will take care of security (see: "useUntrustedMode") + * as well as making sure that the message ends up at its destination (best effort). + * There is also a fair amount of logging produced by this method, which is good for debugging. + */ + def receiveMessage(remoteMessage: RemoteMessage): Unit = { val remoteDaemon = provider.remoteDaemon remoteMessage.recipient match { @@ -289,3 +307,43 @@ trait RemoteMarshallingOps { } } } + +/** + * RemoteMessage is a wrapper around a message that has come in over the wire, + * it allows to easily obtain references to the deserialized message, its intended recipient + * and the sender. + */ +class RemoteMessage(input: RemoteMessageProtocol, system: ExtendedActorSystem) { + /** + * Returns a String-representation of the ActorPath that this RemoteMessage is destined for + */ + def originalReceiver: String = input.getRecipient.getPath + + /** + * Returns an Option with the String representation of the ActorPath of the Actor who is the sender of this message + */ + def originalSender: Option[String] = if (input.hasSender) Some(input.getSender.getPath) else None + + /** + * Returns a reference to the Actor that sent this message, or DeadLetterActorRef if not present or found. + */ + lazy val sender: ActorRef = + if (input.hasSender) system.provider.actorFor(system.provider.rootGuardian, input.getSender.getPath) + else system.deadLetters + + /** + * Returns a reference to the Actor that this message is destined for. + * In case this returns a DeadLetterActorRef, you have access to the path using the "originalReceiver" method. + */ + lazy val recipient: InternalActorRef = system.provider.actorFor(system.provider.rootGuardian, originalReceiver) + + /** + * Returns the message + */ + lazy val payload: AnyRef = MessageSerializer.deserialize(system, input.getMessage) + + /** + * Returns a String representation of this RemoteMessage, intended for debugging purposes. + */ + override def toString: String = "RemoteMessage: " + payload + " to " + recipient + "<+{" + originalReceiver + "} from " + sender +} diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 3c52179e4a..f4f200aef6 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -12,7 +12,6 @@ import org.jboss.netty.channel.{ ChannelFutureListener, ChannelHandler, StaticCh import org.jboss.netty.handler.codec.frame.{ LengthFieldPrepender, LengthFieldBasedFrameDecoder } import org.jboss.netty.handler.execution.ExecutionHandler import org.jboss.netty.handler.timeout.{ IdleState, IdleStateEvent, IdleStateAwareChannelHandler, IdleStateHandler } - import akka.remote.RemoteProtocol.{ RemoteControlProtocol, CommandType, AkkaRemoteProtocol } import akka.remote.{ RemoteProtocol, RemoteMessage, RemoteLifeCycleEvent, RemoteClientStarted, RemoteClientShutdown, RemoteClientException, RemoteClientError, RemoteClientDisconnected, RemoteClientConnected, RemoteClientWriteFailed } import akka.actor.{ Address, ActorRef } @@ -20,18 +19,12 @@ import akka.AkkaException import akka.event.Logging import akka.util.Switch -class RemoteClientMessageBufferException(message: String, cause: Throwable) extends AkkaException(message, cause) { - def this(msg: String) = this(msg, null) -} - /** * This is the abstract baseclass for netty remote clients, currently there's only an * ActiveRemoteClient, but others could be feasible, like a PassiveRemoteClient that * reuses an already established connection. */ -abstract class RemoteClient private[akka] ( - val netty: NettyRemoteTransport, - val remoteAddress: Address) { +private[akka] abstract class RemoteClient private[akka] (val netty: NettyRemoteTransport, val remoteAddress: Address) { val log = Logging(netty.system, "RemoteClient") @@ -92,7 +85,7 @@ abstract class RemoteClient private[akka] ( /** * RemoteClient represents a connection to an Akka node. Is used to send messages to remote actors on the node. */ -class ActiveRemoteClient private[akka] ( +private[akka] class ActiveRemoteClient private[akka] ( netty: NettyRemoteTransport, remoteAddress: Address, localAddress: Address) @@ -225,7 +218,7 @@ class ActiveRemoteClient private[akka] ( } @ChannelHandler.Sharable -class ActiveRemoteClientHandler( +private[akka] class ActiveRemoteClientHandler( val name: String, val bootstrap: ClientBootstrap, val remoteAddress: Address, @@ -314,7 +307,7 @@ class ActiveRemoteClientHandler( } } -class ActiveRemoteClientPipelineFactory( +private[akka] class ActiveRemoteClientPipelineFactory( name: String, bootstrap: ClientBootstrap, executionHandler: ExecutionHandler, @@ -339,9 +332,9 @@ class ActiveRemoteClientPipelineFactory( } } -class PassiveRemoteClient(val currentChannel: Channel, - netty: NettyRemoteTransport, - remoteAddress: Address) +private[akka] class PassiveRemoteClient(val currentChannel: Channel, + netty: NettyRemoteTransport, + remoteAddress: Address) extends RemoteClient(netty, remoteAddress) { def connect(reconnectIfAlreadyConnected: Boolean = false): Boolean = runSwitch switchOn { diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 4fd70b822f..d09c17f160 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -16,17 +16,16 @@ import org.jboss.netty.channel.{ ChannelHandlerContext, Channel } import org.jboss.netty.handler.codec.protobuf.{ ProtobufEncoder, ProtobufDecoder } import org.jboss.netty.handler.execution.OrderedMemoryAwareThreadPoolExecutor import org.jboss.netty.util.HashedWheelTimer -import akka.dispatch.MonitorableThreadFactory import akka.event.Logging import akka.remote.RemoteProtocol.AkkaRemoteProtocol -import akka.remote.{ RemoteTransportException, RemoteTransport, RemoteSettings, RemoteMarshallingOps, RemoteActorRefProvider, RemoteActorRef, RemoteServerStarted } +import akka.remote.{ RemoteTransportException, RemoteTransport, RemoteActorRefProvider, RemoteActorRef, RemoteServerStarted } import akka.util.NonFatal import akka.actor.{ ExtendedActorSystem, Address, ActorRef } /** * Provides the implementation of the Netty remote support */ -class NettyRemoteTransport(_system: ExtendedActorSystem, _provider: RemoteActorRefProvider) extends RemoteTransport(_system, _provider) with RemoteMarshallingOps { +private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider: RemoteActorRefProvider) extends RemoteTransport(_system, _provider) { import provider.remoteSettings @@ -192,7 +191,7 @@ class NettyRemoteTransport(_system: ExtendedActorSystem, _provider: RemoteActorR } -class RemoteMessageEncoder(remoteSupport: NettyRemoteTransport) extends ProtobufEncoder { +private[akka] class RemoteMessageEncoder(remoteSupport: NettyRemoteTransport) extends ProtobufEncoder { override def encode(ctx: ChannelHandlerContext, channel: Channel, msg: AnyRef): AnyRef = { msg match { case (message: Any, sender: Option[_], recipient: ActorRef) ⇒ @@ -207,9 +206,9 @@ class RemoteMessageEncoder(remoteSupport: NettyRemoteTransport) extends Protobuf } } -class RemoteMessageDecoder extends ProtobufDecoder(AkkaRemoteProtocol.getDefaultInstance) +private[akka] class RemoteMessageDecoder extends ProtobufDecoder(AkkaRemoteProtocol.getDefaultInstance) -class DefaultDisposableChannelGroup(name: String) extends DefaultChannelGroup(name) { +private[akka] class DefaultDisposableChannelGroup(name: String) extends DefaultChannelGroup(name) { protected val guard = new ReentrantReadWriteLock protected val open = new AtomicBoolean(true) diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index 674023dd52..5c18bc6933 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -20,7 +20,7 @@ import java.net.InetAddress import akka.actor.ActorSystemImpl import org.jboss.netty.channel._ -class NettyRemoteServer(val netty: NettyRemoteTransport) { +private[akka] class NettyRemoteServer(val netty: NettyRemoteTransport) { import netty.settings @@ -82,7 +82,7 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { } } -class RemoteServerPipelineFactory( +private[akka] class RemoteServerPipelineFactory( val openChannels: ChannelGroup, val executionHandler: ExecutionHandler, val netty: NettyRemoteTransport) extends ChannelPipelineFactory { @@ -103,7 +103,7 @@ class RemoteServerPipelineFactory( } @ChannelHandler.Sharable -class RemoteServerAuthenticationHandler(secureCookie: Option[String]) extends SimpleChannelUpstreamHandler { +private[akka] class RemoteServerAuthenticationHandler(secureCookie: Option[String]) extends SimpleChannelUpstreamHandler { val authenticated = new AnyRef override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) = secureCookie match { @@ -130,7 +130,7 @@ class RemoteServerAuthenticationHandler(secureCookie: Option[String]) extends Si } @ChannelHandler.Sharable -class RemoteServerHandler( +private[akka] class RemoteServerHandler( val openChannels: ChannelGroup, val netty: NettyRemoteTransport) extends SimpleChannelUpstreamHandler { diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index bb33cb9570..64bc184408 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -9,37 +9,37 @@ import java.util.concurrent.TimeUnit._ import java.net.InetAddress import akka.ConfigurationException -class NettySettings(config: Config, val systemName: String) { +private[akka] class NettySettings(config: Config, val systemName: String) { import config._ - val BackoffTimeout = Duration(getMilliseconds("backoff-timeout"), MILLISECONDS) + val BackoffTimeout: Duration = Duration(getMilliseconds("backoff-timeout"), MILLISECONDS) val SecureCookie: Option[String] = getString("secure-cookie") match { case "" ⇒ None case cookie ⇒ Some(cookie) } - val RequireCookie = { + val RequireCookie: Boolean = { val requireCookie = getBoolean("require-cookie") if (requireCookie && SecureCookie.isEmpty) throw new ConfigurationException( "Configuration option 'akka.remote.netty.require-cookie' is turned on but no secure cookie is defined in 'akka.remote.netty.secure-cookie'.") requireCookie } - val UsePassiveConnections = getBoolean("use-passive-connections") - val UseDispatcherForIO = getString("use-dispatcher-for-io") match { + val UsePassiveConnections: Boolean = getBoolean("use-passive-connections") + val UseDispatcherForIO: Option[String] = getString("use-dispatcher-for-io") match { case "" | null ⇒ None case dispatcher ⇒ Some(dispatcher) } - val ReconnectionTimeWindow = Duration(getMilliseconds("reconnection-time-window"), MILLISECONDS) - val ReadTimeout = Duration(getMilliseconds("read-timeout"), MILLISECONDS) - val WriteTimeout = Duration(getMilliseconds("write-timeout"), MILLISECONDS) - val AllTimeout = Duration(getMilliseconds("all-timeout"), MILLISECONDS) - val ReconnectDelay = Duration(getMilliseconds("reconnect-delay"), MILLISECONDS) - val MessageFrameSize = getBytes("message-frame-size").toInt + val ReconnectionTimeWindow: Duration = Duration(getMilliseconds("reconnection-time-window"), MILLISECONDS) + val ReadTimeout: Duration = Duration(getMilliseconds("read-timeout"), MILLISECONDS) + val WriteTimeout: Duration = Duration(getMilliseconds("write-timeout"), MILLISECONDS) + val AllTimeout: Duration = Duration(getMilliseconds("all-timeout"), MILLISECONDS) + val ReconnectDelay: Duration = Duration(getMilliseconds("reconnect-delay"), MILLISECONDS) + val MessageFrameSize: Int = getBytes("message-frame-size").toInt - val Hostname = getString("hostname") match { + val Hostname: String = getString("hostname") match { case "" ⇒ InetAddress.getLocalHost.getHostAddress case value ⇒ value } @@ -50,25 +50,25 @@ class NettySettings(config: Config, val systemName: String) { } @deprecated("WARNING: This should only be used by professionals.", "2.0") - val PortSelector = getInt("port") + val PortSelector: Int = getInt("port") - val ConnectionTimeout = Duration(getMilliseconds("connection-timeout"), MILLISECONDS) + val ConnectionTimeout: Duration = Duration(getMilliseconds("connection-timeout"), MILLISECONDS) - val Backlog = getInt("backlog") + val Backlog: Int = getInt("backlog") - val ExecutionPoolKeepalive = Duration(getMilliseconds("execution-pool-keepalive"), MILLISECONDS) + val ExecutionPoolKeepalive: Duration = Duration(getMilliseconds("execution-pool-keepalive"), MILLISECONDS) - val ExecutionPoolSize = getInt("execution-pool-size") match { + val ExecutionPoolSize: Int = getInt("execution-pool-size") match { case sz if sz < 1 ⇒ throw new IllegalArgumentException("akka.remote.netty.execution-pool-size is less than 1") case sz ⇒ sz } - val MaxChannelMemorySize = getBytes("max-channel-memory-size") match { + val MaxChannelMemorySize: Long = getBytes("max-channel-memory-size") match { case sz if sz < 0 ⇒ throw new IllegalArgumentException("akka.remote.netty.max-channel-memory-size is less than 0 bytes") case sz ⇒ sz } - val MaxTotalMemorySize = getBytes("max-total-memory-size") match { + val MaxTotalMemorySize: Long = getBytes("max-total-memory-size") match { case sz if sz < 0 ⇒ throw new IllegalArgumentException("akka.remote.netty.max-total-memory-size is less than 0 bytes") case sz ⇒ sz } From 55b185be02376b0dfd7d682186eeead6d3859514 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 11:58:57 +0200 Subject: [PATCH 121/538] Fix compilation error, getHostString, java7 --- .../src/main/scala/akka/remote/testconductor/Conductor.scala | 2 +- .../akka/remote/testconductor/NetworkFailureInjector.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index 89fa807762..643bc68cdb 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -397,7 +397,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP override def receive = LoggingReceive { case CreateServerFSM(channel) ⇒ val (ip, port) = channel.getRemoteAddress match { - case s: InetSocketAddress ⇒ (s.getHostString, s.getPort) + case s: InetSocketAddress ⇒ (s.getHostName, s.getPort) } val name = ip + ":" + port + "-server" + generation.next sender ! context.actorOf(Props(new ServerFSM(self, channel)), name) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index ba8f8d1285..b425518044 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -135,7 +135,7 @@ private[akka] object NetworkFailureInjector { } /** - * Brief overview: all network traffic passes through the `sender`/`receiver` FSMs managed + * Brief overview: all network traffic passes through the `sender`/`receiver` FSMs managed * by the FailureInjector of the TestConductor extension. These can * pass through requests immediately, drop them or throttle to a desired rate. The FSMs are * registered in the TestConductorExt.failureInjector so that settings can be applied from From 33cec55dbe1a949c942d3ebb9072ff480a951d59 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 09:06:43 +0200 Subject: [PATCH 122/538] Moved NodeMembershipSpec to multi-jvm. See #2115 --- .../scala/akka/cluster/NodeMembershipSpec.scala | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) rename akka-cluster/src/{test => multi-jvm}/scala/akka/cluster/NodeMembershipSpec.scala (95%) diff --git a/akka-cluster/src/test/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala similarity index 95% rename from akka-cluster/src/test/scala/akka/cluster/NodeMembershipSpec.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index 923a3267ca..783560ec09 100644 --- a/akka-cluster/src/test/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -3,15 +3,12 @@ */ package akka.cluster -import java.net.InetSocketAddress - -import akka.testkit._ -import akka.dispatch._ -import akka.actor._ -import akka.remote._ -import akka.util.duration._ - -import com.typesafe.config._ +import akka.actor.ActorSystem +import akka.actor.ActorSystemImpl +import akka.remote.RemoteActorRefProvider +import akka.testkit.ImplicitSender +import akka.testkit.LongRunningTest +import com.typesafe.config.ConfigFactory class NodeMembershipSpec extends ClusterSpec with ImplicitSender { val portPrefix = 7 From dd5a78597aa9ae722d86ba9ec7c70d909eb2c8c3 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 11:43:55 +0200 Subject: [PATCH 123/538] Port NodeMembershipSpec to MultiNodeSpec. See #2115 --- .../akka/cluster/NodeMembershipSpec.scala | 233 +++++++++--------- .../scala/akka/cluster/NodeStartupSpec.scala | 50 ++-- 2 files changed, 144 insertions(+), 139 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index 783560ec09..8e3fb04ec6 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -3,131 +3,122 @@ */ package akka.cluster -import akka.actor.ActorSystem -import akka.actor.ActorSystemImpl -import akka.remote.RemoteActorRefProvider -import akka.testkit.ImplicitSender -import akka.testkit.LongRunningTest import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ -class NodeMembershipSpec extends ClusterSpec with ImplicitSender { - val portPrefix = 7 +object NodeMembershipMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") - var node0: Cluster = _ - var node1: Cluster = _ - var node2: Cluster = _ - - var system0: ActorSystemImpl = _ - var system1: ActorSystemImpl = _ - var system2: ActorSystemImpl = _ - - try { - "A set of connected cluster systems" must { - "(when two systems) start gossiping to each other so that both systems gets the same gossip info" taggedAs LongRunningTest in { - - // ======= NODE 0 ======== - system0 = ActorSystem("system0", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d550 - }""".format(portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote0 = system0.provider.asInstanceOf[RemoteActorRefProvider] - node0 = Cluster(system0) - - // ======= NODE 1 ======== - system1 = ActorSystem("system1", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d551 - cluster.node-to-join = "akka://system0@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote1 = system1.provider.asInstanceOf[RemoteActorRefProvider] - node1 = Cluster(system1) - - // check cluster convergence - awaitConvergence(node0 :: node1 :: Nil) - - val members0 = node0.latestGossip.members.toArray - members0.size must be(2) - members0(0).address.port.get must be(550.withPortPrefix) - members0(0).status must be(MemberStatus.Up) - members0(1).address.port.get must be(551.withPortPrefix) - members0(1).status must be(MemberStatus.Up) - - val members1 = node1.latestGossip.members.toArray - members1.size must be(2) - members1(0).address.port.get must be(550.withPortPrefix) - members1(0).status must be(MemberStatus.Up) - members1(1).address.port.get must be(551.withPortPrefix) - members1(1).status must be(MemberStatus.Up) - } - - "(when three systems) start gossiping to each other so that both systems gets the same gossip info" taggedAs LongRunningTest ignore { - - // ======= NODE 2 ======== - system2 = ActorSystem("system2", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d552 - cluster.node-to-join = "akka://system0@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote2 = system2.provider.asInstanceOf[RemoteActorRefProvider] - node2 = Cluster(system2) - - awaitConvergence(node0 :: node1 :: node2 :: Nil) - - val members0 = node0.latestGossip.members.toArray - val version = node0.latestGossip.version - members0.size must be(3) - members0(0).address.port.get must be(550.withPortPrefix) - members0(0).status must be(MemberStatus.Up) - members0(1).address.port.get must be(551.withPortPrefix) - members0(1).status must be(MemberStatus.Up) - members0(2).address.port.get must be(552.withPortPrefix) - members0(2).status must be(MemberStatus.Up) - - val members1 = node1.latestGossip.members.toArray - members1.size must be(3) - members1(0).address.port.get must be(550.withPortPrefix) - members1(0).status must be(MemberStatus.Up) - members1(1).address.port.get must be(551.withPortPrefix) - members1(1).status must be(MemberStatus.Up) - members1(2).address.port.get must be(552.withPortPrefix) - members1(2).status must be(MemberStatus.Up) - - val members2 = node2.latestGossip.members.toArray - members2.size must be(3) - members2(0).address.port.get must be(550.withPortPrefix) - members2(0).status must be(MemberStatus.Up) - members2(1).address.port.get must be(551.withPortPrefix) - members2(1).status must be(MemberStatus.Up) - members2(2).address.port.get must be(552.withPortPrefix) - members2(2).status must be(MemberStatus.Up) - } + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + akka.cluster { + gossip-frequency = 200 ms + leader-actions-frequency = 200 ms + periodic-tasks-initial-delay = 300 ms + # FIXME get rid of this hardcoded host:port + node-to-join = "akka://MultiNodeSpec@localhost:2602" } - } catch { - case e: Exception ⇒ - e.printStackTrace - fail(e.toString) - } + """))) - override def atTermination() { - if (node0 ne null) node0.shutdown() - if (system0 ne null) system0.shutdown() + nodeConfig(first, ConfigFactory.parseString(""" + # FIXME get rid of this hardcoded port + akka.remote.netty.port=2602 + """)) + +} + +class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec { + override var node: Cluster = _ +} +class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec { + override var node: Cluster = _ +} +class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec { + override var node: Cluster = _ +} + +abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) with ImplicitSender with BeforeAndAfter { + import NodeMembershipMultiJvmSpec._ + + override def initialParticipants = 3 + + var node: Cluster + + after { + testConductor.enter("after") + } + + "A set of connected cluster systems" must { + + val firstAddress = testConductor.getAddressFor(first).await + val secondAddress = testConductor.getAddressFor(second).await + val thirdAddress = testConductor.getAddressFor(third).await + + "(when two systems) start gossiping to each other so that both systems gets the same gossip info" in { + + def assertMembers: Unit = { + val members = node.latestGossip.members.toIndexedSeq + members.size must be(2) + members(0).address must be(firstAddress) + members(1).address must be(secondAddress) + awaitCond { + node.latestGossip.members.forall(_.status == MemberStatus.Up) + } + } + + runOn(first) { + node = Cluster(system) + awaitCond(node.latestGossip.members.size == 2) + assertMembers + node.convergence.isDefined + } + + runOn(second) { + node = Cluster(system) + awaitCond(node.latestGossip.members.size == 2) + assertMembers + node.convergence.isDefined + } + + } + + "(when three systems) start gossiping to each other so that both systems gets the same gossip info" in { + + def assertMembers: Unit = { + val members = node.latestGossip.members.toIndexedSeq + members.size must be(3) + members(0).address must be(firstAddress) + members(1).address must be(secondAddress) + members(2).address must be(thirdAddress) + awaitCond { + node.latestGossip.members.forall(_.status == MemberStatus.Up) + } + } + + runOn(third) { + node = Cluster(system) + awaitCond(node.latestGossip.members.size == 3) + awaitCond(node.convergence.isDefined) + assertMembers + } + + runOn(first) { + awaitCond(node.latestGossip.members.size == 3) + assertMembers + node.convergence.isDefined + } + + runOn(second) { + awaitCond(node.latestGossip.members.size == 3) + assertMembers + node.convergence.isDefined + } + + } + } - if (node1 ne null) node1.shutdown() - if (system1 ne null) system1.shutdown() - - if (node2 ne null) node2.shutdown() - if (system2 ne null) system2.shutdown() - } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index 694d4ac57d..3a5eeb846d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -13,7 +13,13 @@ object NodeStartupMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false)) + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + akka.cluster { + gossip-frequency = 200 ms + leader-actions-frequency = 200 ms + periodic-tasks-initial-delay = 300 ms + } + """))) nodeConfig(first, ConfigFactory.parseString(""" # FIXME get rid of this hardcoded port @@ -27,29 +33,33 @@ object NodeStartupMultiJvmSpec extends MultiNodeConfig { } -class NodeStartupMultiJvmNode1 extends NodeStartupSpec -class NodeStartupMultiJvmNode2 extends NodeStartupSpec +class NodeStartupMultiJvmNode1 extends NodeStartupSpec { + override var node: Cluster = _ +} +class NodeStartupMultiJvmNode2 extends NodeStartupSpec { + override var node: Cluster = _ +} -class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with ImplicitSender with BeforeAndAfter { +abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with ImplicitSender with BeforeAndAfter { import NodeStartupMultiJvmSpec._ override def initialParticipants = 2 - var firstNode: Cluster = _ + var node: Cluster after { testConductor.enter("after") } runOn(first) { - firstNode = Cluster(system) + node = Cluster(system) } "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { "be a singleton cluster when started up" in { runOn(first) { - awaitCond(firstNode.isSingletonCluster) + awaitCond(node.isSingletonCluster) // FIXME #2117 singletonCluster should reach convergence //awaitCond(firstNode.convergence.isDefined) } @@ -57,7 +67,7 @@ class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with Implic "be in 'Joining' phase when started up" in { runOn(first) { - val members = firstNode.latestGossip.members + val members = node.latestGossip.members members.size must be(1) val firstAddress = testConductor.getAddressFor(first).await val joiningMember = members find (_.address == firstAddress) @@ -69,21 +79,25 @@ class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with Implic "A second cluster node with a 'node-to-join' config defined" must { "join the other node cluster when sending a Join command" in { + val secondAddress = testConductor.getAddressFor(second).await + + def awaitSecondUp = awaitCond { + node.latestGossip.members.exists { member ⇒ + member.address == secondAddress && member.status == MemberStatus.Up + } + } + runOn(second) { // start cluster on second node, and join - val secondNode = Cluster(system) - awaitCond(secondNode.convergence.isDefined) + node = Cluster(system) + awaitSecondUp + node.convergence.isDefined } runOn(first) { - val secondAddress = testConductor.getAddressFor(second).await - awaitCond { - firstNode.latestGossip.members.exists { member ⇒ - member.address == secondAddress && member.status == MemberStatus.Up - } - } - firstNode.latestGossip.members.size must be(2) - awaitCond(firstNode.convergence.isDefined) + awaitSecondUp + node.latestGossip.members.size must be(2) + node.convergence.isDefined } } } From cb7de1db751792d7f15472c87af0b19577f64bca Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 24 May 2012 12:12:45 +0200 Subject: [PATCH 124/538] Binary compat work for routing --- .../src/main/scala/akka/routing/Routing.scala | 53 +++++++++++------- .../akka/routing/RemoteRouterConfig.scala | 2 +- file-based/mailbox_user__a | 0 file-based/mailbox_user__b | 0 file-based/mailbox_user__c | Bin 14692 -> 0 bytes 5 files changed, 35 insertions(+), 20 deletions(-) delete mode 100644 file-based/mailbox_user__a delete mode 100644 file-based/mailbox_user__b delete mode 100644 file-based/mailbox_user__c diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 96da72eda2..94eed672f4 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -174,7 +174,7 @@ trait RouterConfig { def createRoute(routeeProps: Props, routeeProvider: RouteeProvider): Route - def createRouteeProvider(context: ActorContext) = new RouteeProvider(context, resizer) + def createRouteeProvider(context: ActorContext): RouteeProvider = new RouteeProvider(context, resizer) def createActor(): Router = new Router { override def supervisorStrategy: SupervisorStrategy = RouterConfig.this.supervisorStrategy @@ -195,7 +195,8 @@ trait RouterConfig { */ def withFallback(other: RouterConfig): RouterConfig = this - protected def toAll(sender: ActorRef, routees: Iterable[ActorRef]): Iterable[Destination] = routees.map(Destination(sender, _)) + protected def toAll(sender: ActorRef, routees: Iterable[ActorRef]): Iterable[Destination] = + routees.map(Destination(sender, _)) /** * Routers with dynamically resizable number of routees return the [[akka.routing.Resizer]] @@ -218,9 +219,7 @@ class RouteeProvider(val context: ActorContext, val resizer: Option[Resizer]) { * Not thread safe, but intended to be called from protected points, such as * `RouterConfig.createRoute` and `Resizer.resize`. */ - def registerRoutees(routees: IndexedSeq[ActorRef]): Unit = { - routedRef.addRoutees(routees) - } + def registerRoutees(routees: IndexedSeq[ActorRef]): Unit = routedRef.addRoutees(routees) /** * Adds the routees to the router. @@ -240,9 +239,7 @@ class RouteeProvider(val context: ActorContext, val resizer: Option[Resizer]) { * Not thread safe, but intended to be called from protected points, such as * `Resizer.resize`. */ - def unregisterRoutees(routees: IndexedSeq[ActorRef]): Unit = { - routedRef.removeRoutees(routees) - } + def unregisterRoutees(routees: IndexedSeq[ActorRef]): Unit = routedRef.removeRoutees(routees) def createRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] = (nrOfInstances, routees) match { @@ -253,11 +250,8 @@ class RouteeProvider(val context: ActorContext, val resizer: Option[Resizer]) { case (_, xs) ⇒ xs.map(context.actorFor(_))(scala.collection.breakOut) } - def createAndRegisterRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): Unit = { - if (resizer.isEmpty) { - registerRoutees(createRoutees(props, nrOfInstances, routees)) - } - } + def createAndRegisterRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): Unit = + if (resizer.isEmpty) registerRoutees(createRoutees(props, nrOfInstances, routees)) /** * All routees of the router @@ -265,7 +259,6 @@ class RouteeProvider(val context: ActorContext, val resizer: Option[Resizer]) { def routees: IndexedSeq[ActorRef] = routedRef.routees private def routedRef = context.self.asInstanceOf[RoutedActorRef] - } /** @@ -423,7 +416,11 @@ class FromConfig(val routerDispatcher: String = Dispatchers.DefaultDispatcherId) } object RoundRobinRouter { - def apply(routees: Iterable[ActorRef]) = new RoundRobinRouter(routees = routees map (_.path.toString)) + /** + * Creates a new RoundRobinRouter, routing to the specified routees + */ + def apply(routees: Iterable[ActorRef]): RoundRobinRouter = + new RoundRobinRouter(routees = routees map (_.path.toString)) /** * Java API to create router with the supplied 'routees' actors. @@ -539,7 +536,10 @@ trait RoundRobinLike { this: RouterConfig ⇒ } object RandomRouter { - def apply(routees: Iterable[ActorRef]) = new RandomRouter(routees = routees map (_.path.toString)) + /** + * Creates a new RandomRouter, routing to the specified routees + */ + def apply(routees: Iterable[ActorRef]): RandomRouter = new RandomRouter(routees = routees map (_.path.toString)) /** * Java API to create router with the supplied 'routees' actors. @@ -652,7 +652,11 @@ trait RandomLike { this: RouterConfig ⇒ } object SmallestMailboxRouter { - def apply(routees: Iterable[ActorRef]) = new SmallestMailboxRouter(routees = routees map (_.path.toString)) + /** + * Creates a new SmallestMailboxRouter, routing to the specified routees + */ + def apply(routees: Iterable[ActorRef]): SmallestMailboxRouter = + new SmallestMailboxRouter(routees = routees map (_.path.toString)) /** * Java API to create router with the supplied 'routees' actors. @@ -852,7 +856,10 @@ trait SmallestMailboxLike { this: RouterConfig ⇒ } object BroadcastRouter { - def apply(routees: Iterable[ActorRef]) = new BroadcastRouter(routees = routees map (_.path.toString)) + /** + * Creates a new BroadcastRouter, routing to the specified routees + */ + def apply(routees: Iterable[ActorRef]): BroadcastRouter = new BroadcastRouter(routees = routees map (_.path.toString)) /** * Java API to create router with the supplied 'routees' actors. @@ -957,7 +964,11 @@ trait BroadcastLike { this: RouterConfig ⇒ } object ScatterGatherFirstCompletedRouter { - def apply(routees: Iterable[ActorRef], within: Duration) = new ScatterGatherFirstCompletedRouter(routees = routees map (_.path.toString), within = within) + /** + * Creates a new ScatterGatherFirstCompletedRouter, routing to the specified routees, timing out after the specified Duration + */ + def apply(routees: Iterable[ActorRef], within: Duration): ScatterGatherFirstCompletedRouter = + new ScatterGatherFirstCompletedRouter(routees = routees map (_.path.toString), within = within) /** * Java API to create router with the supplied 'routees' actors. @@ -1106,6 +1117,10 @@ trait Resizer { } case object DefaultResizer { + + /** + * Creates a new DefaultResizer from the given configuration + */ def apply(resizerConfig: Config): DefaultResizer = DefaultResizer( lowerBound = resizerConfig.getInt("lower-bound"), diff --git a/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala index 714b854a69..9a71f309fc 100644 --- a/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala +++ b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala @@ -59,7 +59,7 @@ class RemoteRouteeProvider(nodes: Iterable[Address], _context: ActorContext, _re extends RouteeProvider(_context, _resizer) { // need this iterator as instance variable since Resizer may call createRoutees several times - private val nodeAddressIter = Stream.continually(nodes).flatten.iterator + private val nodeAddressIter: Iterator[Address] = Stream.continually(nodes).flatten.iterator override def createRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] = (nrOfInstances, routees, nodes) match { diff --git a/file-based/mailbox_user__a b/file-based/mailbox_user__a deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/file-based/mailbox_user__b b/file-based/mailbox_user__b deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/file-based/mailbox_user__c b/file-based/mailbox_user__c deleted file mode 100644 index 4b6ae68e6d608e0534358c9f4e9b0364c7240672..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14692 zcmZQ-XJBAh8Rc1L&d2}R5D#=3pT-fq`K& zW%)>eiw9ETv4ToGgTdq&3d&z5s)r0EMK9GILrK|7b;nRr_)^_5l$5?8$84cyZla|4 zrMhD%DSxT%7)lCYsyl|#5}2B4#)#4qn5vGUv;^Kt#oS~>X$eeK$52`VQ`Iq)mcUeX z45cM7RUJcV2~1VTP+9_Sqh@ZRv;?NAV<;_wsp%MFN=sm>I)>5`n5vGUv;?NAV<;_w zw^K1U8BI)>5`cqcV;6Qw0ERUJcV2~1VTP+9_0(=n!$ zmcUeX45cM7RUJcV3A~Gnxyh8$5}2xvp|k|1s$(cEfvM^kN=sm>I)>5`n5vGUv;^Kw z&D=z32~17Lm{D2+Q`Iq)mcUeX45cM7RUJcV2~1VTP+9`-p<-?_qqGF3s$(cEfvM^k zN=sm>I)>5`n5vGUv;?N6W6UWnfvM^kN=x9qRLt7ul$O9$bqu8?FjXBxX$eeK$52`V zQ`Iq)mcUeX45cM7RUJcV3A~S*S(}0qm=V0_ikhympr8nbEWib=6``t&C@qJn>KF>j zVdw&7s)r8+r7^9XL}{74pNjd*f`T#`DSW8vBuYzWsyc>(l9^WFLuvU;)$pODd`1+B z)O3<11%))Y#yCL5{AEc&`HYl)sOlsNN@?nc4|U3Cs)i4xF#a<)w5d0|NtvP7>teh3xEL1MlphpqPdP( Date: Thu, 24 May 2012 12:18:53 +0200 Subject: [PATCH 125/538] Changed the stupid node var. See #2115 --- .../scala/akka/cluster/NodeMembershipSpec.scala | 14 ++++---------- .../scala/akka/cluster/NodeStartupSpec.scala | 10 +++------- 2 files changed, 7 insertions(+), 17 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index 8e3fb04ec6..4b9436165d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -31,22 +31,16 @@ object NodeMembershipMultiJvmSpec extends MultiNodeConfig { } -class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec { - override var node: Cluster = _ -} -class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec { - override var node: Cluster = _ -} -class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec { - override var node: Cluster = _ -} +class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec +class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec +class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) with ImplicitSender with BeforeAndAfter { import NodeMembershipMultiJvmSpec._ override def initialParticipants = 3 - var node: Cluster + var node: Cluster = _ after { testConductor.enter("after") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index 3a5eeb846d..6d95b4d651 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -33,19 +33,15 @@ object NodeStartupMultiJvmSpec extends MultiNodeConfig { } -class NodeStartupMultiJvmNode1 extends NodeStartupSpec { - override var node: Cluster = _ -} -class NodeStartupMultiJvmNode2 extends NodeStartupSpec { - override var node: Cluster = _ -} +class NodeStartupMultiJvmNode1 extends NodeStartupSpec +class NodeStartupMultiJvmNode2 extends NodeStartupSpec abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with ImplicitSender with BeforeAndAfter { import NodeStartupMultiJvmSpec._ override def initialParticipants = 2 - var node: Cluster + var node: Cluster = _ after { testConductor.enter("after") From a9945f77f61f86833012cc0e18062514a6a33150 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 24 May 2012 12:19:39 +0200 Subject: [PATCH 126/538] Binary compat for serialization --- .../akka/serialization/DaemonMsgCreateSerializer.scala | 8 ++++---- .../akka/serialization/DaemonMsgWatchSerializer.scala | 4 +++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala b/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala index ce54ff5adb..2905c3ef3b 100644 --- a/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala +++ b/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala @@ -30,8 +30,10 @@ import akka.actor.FromClassCreator * Serialization of contained RouterConfig, Config, and Scope * is done with configured serializer for those classes, by * default java.io.Serializable. + * + * INTERNAL API */ -class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) extends Serializer { +private[akka] class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) extends Serializer { import ProtobufSerializer.serializeActorRef import ProtobufSerializer.deserializeActorRef @@ -81,7 +83,7 @@ class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) extends Seriali def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { val proto = DaemonMsgCreateProtocol.parseFrom(bytes) - def deploy(protoDeploy: DeployProtocol) = { + def deploy(protoDeploy: DeployProtocol): Deploy = { val config = if (protoDeploy.hasConfig) deserialize(protoDeploy.getConfig, classOf[Config]) else ConfigFactory.empty @@ -146,7 +148,5 @@ class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) extends Seriali case _ ⇒ throw e // the first exception } } - } - } \ No newline at end of file diff --git a/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala b/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala index 0ca5216da0..016d7f14cb 100644 --- a/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala +++ b/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala @@ -12,8 +12,10 @@ import akka.actor.ExtendedActorSystem /** * Serializes akka's internal DaemonMsgWatch using protobuf. + * + * INTERNAL API */ -class DaemonMsgWatchSerializer(val system: ExtendedActorSystem) extends Serializer { +private[akka] class DaemonMsgWatchSerializer(val system: ExtendedActorSystem) extends Serializer { import ProtobufSerializer.serializeActorRef import ProtobufSerializer.deserializeActorRef From c9ab35d5f0998001c71f2fec3ea43d602f86459e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 24 May 2012 12:34:18 +0200 Subject: [PATCH 127/538] Binary compat work for the 0mq module --- .../akka/zeromq/ConcurrentSocketActor.scala | 9 ++------ .../main/scala/akka/zeromq/SocketOption.scala | 4 +++- .../akka/zeromq/ZMQMessageDeserializer.scala | 2 +- .../scala/akka/zeromq/ZeroMQExtension.scala | 21 +++++++------------ .../src/main/scala/akka/zeromq/package.scala | 4 ++-- 5 files changed, 16 insertions(+), 24 deletions(-) diff --git a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala index c4e6d08f59..e848809644 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala @@ -205,11 +205,6 @@ private[zeromq] class ConcurrentSocketActor(params: Seq[SocketOption]) extends A } private val listenerOpt = params collectFirst { case Listener(l) ⇒ l } - private def watchListener() { - listenerOpt foreach context.watch - } - - private def notifyListener(message: Any) { - listenerOpt foreach { _ ! message } - } + private def watchListener(): Unit = listenerOpt foreach context.watch + private def notifyListener(message: Any): Unit = listenerOpt foreach { _ ! message } } diff --git a/akka-zeromq/src/main/scala/akka/zeromq/SocketOption.scala b/akka-zeromq/src/main/scala/akka/zeromq/SocketOption.scala index 1e4c83bcef..c5d5919fb7 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/SocketOption.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/SocketOption.scala @@ -255,7 +255,9 @@ case class Linger(value: Long) extends SocketOption /** * Gets the linger option @see [[akka.zeromq.Linger]] */ -object Linger extends SocketOptionQuery +object Linger extends SocketOptionQuery { + val no: Linger = Linger(0) +} /** * Sets the recovery interval for multicast transports using the specified socket. diff --git a/akka-zeromq/src/main/scala/akka/zeromq/ZMQMessageDeserializer.scala b/akka-zeromq/src/main/scala/akka/zeromq/ZMQMessageDeserializer.scala index 1776f21211..2d41424e88 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/ZMQMessageDeserializer.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/ZMQMessageDeserializer.scala @@ -20,5 +20,5 @@ case class Frame(payload: Seq[Byte]) { * Deserializes ZeroMQ messages into an immutable sequence of frames */ class ZMQMessageDeserializer extends Deserializer { - def apply(frames: Seq[Frame]) = ZMQMessage(frames) + def apply(frames: Seq[Frame]): ZMQMessage = ZMQMessage(frames) } diff --git a/akka-zeromq/src/main/scala/akka/zeromq/ZeroMQExtension.scala b/akka-zeromq/src/main/scala/akka/zeromq/ZeroMQExtension.scala index 1ddd213325..85a9ea6642 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/ZeroMQExtension.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/ZeroMQExtension.scala @@ -19,7 +19,7 @@ import org.zeromq.ZMQException * @param patch */ case class ZeroMQVersion(major: Int, minor: Int, patch: Int) { - override def toString = "%d.%d.%d".format(major, minor, patch) + override def toString: String = "%d.%d.%d".format(major, minor, patch) } /** @@ -27,17 +27,14 @@ case class ZeroMQVersion(major: Int, minor: Int, patch: Int) { */ object ZeroMQExtension extends ExtensionId[ZeroMQExtension] with ExtensionIdProvider { override def get(system: ActorSystem): ZeroMQExtension = super.get(system) - def lookup() = this - def createExtension(system: ExtendedActorSystem) = new ZeroMQExtension(system) + def lookup(): this.type = this + override def createExtension(system: ExtendedActorSystem): ZeroMQExtension = new ZeroMQExtension(system) private val minVersionString = "2.1.0" private val minVersion = JZMQ.makeVersion(2, 1, 0) - private[zeromq] def check[TOption <: SocketOption: Manifest](parameters: Seq[SocketOption]) = { - parameters exists { p ⇒ - ClassManifest.singleType(p) <:< manifest[TOption] - } - } + private[zeromq] def check[TOption <: SocketOption: Manifest](parameters: Seq[SocketOption]) = + parameters exists { p ⇒ ClassManifest.singleType(p) <:< manifest[TOption] } } /** @@ -47,16 +44,14 @@ object ZeroMQExtension extends ExtensionId[ZeroMQExtension] with ExtensionIdProv */ class ZeroMQExtension(system: ActorSystem) extends Extension { - val DefaultPollTimeout = Duration(system.settings.config.getMilliseconds("akka.zeromq.poll-timeout"), TimeUnit.MILLISECONDS) - val NewSocketTimeout = Timeout(Duration(system.settings.config.getMilliseconds("akka.zeromq.new-socket-timeout"), TimeUnit.MILLISECONDS)) + val DefaultPollTimeout: Duration = Duration(system.settings.config.getMilliseconds("akka.zeromq.poll-timeout"), TimeUnit.MILLISECONDS) + val NewSocketTimeout: Timeout = Timeout(Duration(system.settings.config.getMilliseconds("akka.zeromq.new-socket-timeout"), TimeUnit.MILLISECONDS)) /** * The version of the ZeroMQ library * @return a [[akka.zeromq.ZeroMQVersion]] */ - def version = { - ZeroMQVersion(JZMQ.getMajorVersion, JZMQ.getMinorVersion, JZMQ.getPatchVersion) - } + def version: ZeroMQVersion = ZeroMQVersion(JZMQ.getMajorVersion, JZMQ.getMinorVersion, JZMQ.getPatchVersion) /** * Factory method to create the [[akka.actor.Props]] to build the ZeroMQ socket actor. diff --git a/akka-zeromq/src/main/scala/akka/zeromq/package.scala b/akka-zeromq/src/main/scala/akka/zeromq/package.scala index 6eeba5b92a..1241700fcb 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/package.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/package.scala @@ -20,10 +20,10 @@ package object zeromq { /** * Convenience accessor to subscribe to all events */ - val SubscribeAll = Subscribe(Seq.empty) + val SubscribeAll: Subscribe = Subscribe.all /** * Set the linger to 0, doesn't block and discards messages that haven't been sent yet. */ - val NoLinger = Linger(0) + val NoLinger: Linger = Linger.no } \ No newline at end of file From 8cad9bb1b61fa104861b22944afe238e0a75317d Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 24 May 2012 12:40:52 +0200 Subject: [PATCH 128/538] add deployOn/deployOnAll DSL to MultiNodeSpec, see #2118 --- .../src/main/scala/akka/actor/Deployer.scala | 2 +- .../NetworkFailureInjector.scala | 2 +- .../DirectRoutedRemoteActorMultiJvmSpec.scala | 38 +++++++++------ .../akka/remote/testkit/MultiNodeSpec.scala | 47 +++++++++++++++++-- .../scala/akka/remote/RemoteDeployer.scala | 4 +- 5 files changed, 70 insertions(+), 23 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 2fd9538d77..821691d853 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -128,7 +128,7 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce add(d.path.split("/").drop(1), d) } - protected def parseConfig(key: String, config: Config): Option[Deploy] = { + def parseConfig(key: String, config: Config): Option[Deploy] = { val deployment = config.withFallback(default) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index ba8f8d1285..b425518044 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -135,7 +135,7 @@ private[akka] object NetworkFailureInjector { } /** - * Brief overview: all network traffic passes through the `sender`/`receiver` FSMs managed + * Brief overview: all network traffic passes through the `sender`/`receiver` FSMs managed * by the FailureInjector of the TestConductor extension. These can * pass through requests immediately, drop them or throttle to a desired rate. The FSMs are * registered in the TestConductorExt.failureInjector so that settings can be applied from diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala index 3f23f60b37..294bc80884 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala @@ -27,20 +27,9 @@ object DirectRoutedRemoteActorMultiJvmSpec extends MultiNodeConfig { val master = role("master") val slave = role("slave") - nodeConfig(master, ConfigFactory.parseString(""" - akka.actor { - deployment { - /service-hello.remote = "akka://MultiNodeSpec@%s" - } - } - # FIXME When using NettyRemoteTransport instead of TestConductorTransport it works - # akka.remote.transport = "akka.remote.netty.NettyRemoteTransport" - """.format("localhost:2553"))) // FIXME is there a way to avoid hardcoding the host:port here? - - nodeConfig(slave, ConfigFactory.parseString(""" - akka.remote.netty.port = 2553 - """)) - + deployOn(master, """/service-hello.remote = "@slave@" """) + + deployOnAll("""/service-hello2.remote = "@slave@" """) } class DirectRoutedRemoteActorMultiJvmNode1 extends DirectRoutedRemoteActorSpec @@ -60,7 +49,26 @@ class DirectRoutedRemoteActorSpec extends MultiNodeSpec(DirectRoutedRemoteActorM actor.isInstanceOf[RemoteActorRef] must be(true) val slaveAddress = testConductor.getAddressFor(slave).await - (actor ? "identify").await.asInstanceOf[ActorRef].path.address must equal(slaveAddress) + actor ! "identify" + expectMsgType[ActorRef].path.address must equal(slaveAddress) + + // shut down the actor before we let the other node(s) shut down so we don't try to send + // "Terminate" to a shut down node + system.stop(actor) + } + + testConductor.enter("done") + } + + "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef (with deployOnAll)" in { + + runOn(master) { + val actor = system.actorOf(Props[SomeActor], "service-hello2") + actor.isInstanceOf[RemoteActorRef] must be(true) + + val slaveAddress = testConductor.getAddressFor(slave).await + actor ! "identify" + expectMsgType[ActorRef].path.address must equal(slaveAddress) // shut down the actor before we let the other node(s) shut down so we don't try to send // "Terminate" to a shut down node diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 3822a1f529..e7bce0890c 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -4,7 +4,7 @@ package akka.remote.testkit import akka.testkit.AkkaSpec -import akka.actor.ActorSystem +import akka.actor.{ ActorSystem, ExtendedActorSystem } import akka.remote.testconductor.TestConductor import java.net.InetAddress import java.net.InetSocketAddress @@ -17,6 +17,8 @@ import akka.util.Duration import akka.actor.ActorPath import akka.actor.RootActorPath import akka.remote.testconductor.RoleName +import akka.actor.Deploy +import com.typesafe.config.ConfigObject /** * Configure the role names and participants of the test, including configuration settings. @@ -25,7 +27,9 @@ abstract class MultiNodeConfig { private var _commonConf: Option[Config] = None private var _nodeConf = Map[RoleName, Config]() - private var _roles = Seq[RoleName]() + private var _roles = Vector[RoleName]() + private var _deployments = Map[RoleName, Seq[String]]() + private var _allDeploy = Vector[String]() /** * Register a common base config for all test participants, if so desired. @@ -68,6 +72,11 @@ abstract class MultiNodeConfig { r } + def deployOn(role: RoleName, deployment: String): Unit = + _deployments += role -> ((_deployments get role getOrElse Vector()) :+ deployment) + + def deployOnAll(deployment: String): Unit = _allDeploy :+= deployment + private[testkit] lazy val mySelf: RoleName = { require(_roles.size > MultiNodeSpec.selfIndex, "not enough roles declared for this test") _roles(MultiNodeSpec.selfIndex) @@ -78,6 +87,10 @@ abstract class MultiNodeConfig { configs reduce (_ withFallback _) } + private[testkit] def deployments(node: RoleName): Seq[String] = (_deployments get node getOrElse Nil) ++ _allDeploy + + private[testkit] def roles: Seq[RoleName] = _roles + } object MultiNodeSpec { @@ -115,11 +128,13 @@ object MultiNodeSpec { } -abstract class MultiNodeSpec(val mySelf: RoleName, _system: ActorSystem) extends AkkaSpec(_system) { +abstract class MultiNodeSpec(val mySelf: RoleName, _system: ActorSystem, roles: Seq[RoleName], deployments: RoleName ⇒ Seq[String]) + extends AkkaSpec(_system) { import MultiNodeSpec._ - def this(config: MultiNodeConfig) = this(config.mySelf, ActorSystem(AkkaSpec.getCallerName, config.config)) + def this(config: MultiNodeConfig) = + this(config.mySelf, ActorSystem(AkkaSpec.getCallerName, config.config), config.roles, config.deployments) /* * Test Class Interface @@ -188,4 +203,28 @@ abstract class MultiNodeSpec(val mySelf: RoleName, _system: ActorSystem) extends testConductor.startClient(mySelf, controllerAddr).await } + // now add deployments, if so desired + + private case class Replacement(tag: String, role: RoleName) { + lazy val addr = node(role).address.toString + } + private val replacements = roles map (r ⇒ Replacement("@" + r.name + "@", r)) + private val deployer = system.asInstanceOf[ExtendedActorSystem].provider.deployer + deployments(mySelf) foreach { str ⇒ + val deployString = (str /: replacements) { + case (base, r @ Replacement(tag, _)) ⇒ + base.indexOf(tag) match { + case -1 ⇒ base + case start ⇒ base.replace(tag, r.addr) + } + } + import scala.collection.JavaConverters._ + ConfigFactory.parseString(deployString).root.asScala foreach { + case (key, value: ConfigObject) ⇒ + deployer.parseConfig(key, value.toConfig) foreach deployer.deploy + case (key, x) ⇒ + throw new IllegalArgumentException("key " + key + " must map to deployment section, not simple value " + x) + } + } + } \ No newline at end of file diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala index 0858c66405..296a773625 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala @@ -12,9 +12,9 @@ case class RemoteScope(node: Address) extends Scope { def withFallback(other: Scope): Scope = this } -class RemoteDeployer(_settings: ActorSystem.Settings, _pm: DynamicAccess) extends Deployer(_settings, _pm) { +private[akka] class RemoteDeployer(_settings: ActorSystem.Settings, _pm: DynamicAccess) extends Deployer(_settings, _pm) { - override protected def parseConfig(path: String, config: Config): Option[Deploy] = { + override def parseConfig(path: String, config: Config): Option[Deploy] = { import scala.collection.JavaConverters._ super.parseConfig(path, config) match { From ea090bdfd2686dad41550420b069f42c4c254a26 Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 24 May 2012 13:56:50 +0200 Subject: [PATCH 129/538] get better auto-generated actor system names in tests, see #2122 --- .../src/test/scala/akka/cluster/ClusterSpec.scala | 4 ++-- .../testconductor/NetworkFailureInjector.scala | 2 +- .../scala/akka/remote/testkit/MultiNodeSpec.scala | 2 +- .../src/test/scala/akka/testkit/AkkaSpec.scala | 12 ++++++++---- 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index 38017ad00c..854d9e5584 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -47,11 +47,11 @@ abstract class ClusterSpec(_system: ActorSystem) extends AkkaSpec(_system) { def portPrefix: Int - def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName, config.withFallback(ClusterSpec.testConf))) + def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName(classOf[ClusterSpec]), config.withFallback(ClusterSpec.testConf))) def this(s: String) = this(ConfigFactory.parseString(s)) - def this() = this(ActorSystem(AkkaSpec.getCallerName, ClusterSpec.testConf)) + def this() = this(ActorSystem(AkkaSpec.getCallerName(classOf[ClusterSpec]), ClusterSpec.testConf)) def awaitConvergence(nodes: Iterable[Cluster], maxWaitTime: Duration = 60 seconds) { val deadline = maxWaitTime.fromNow diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index ba8f8d1285..b425518044 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -135,7 +135,7 @@ private[akka] object NetworkFailureInjector { } /** - * Brief overview: all network traffic passes through the `sender`/`receiver` FSMs managed + * Brief overview: all network traffic passes through the `sender`/`receiver` FSMs managed * by the FailureInjector of the TestConductor extension. These can * pass through requests immediately, drop them or throttle to a desired rate. The FSMs are * registered in the TestConductorExt.failureInjector so that settings can be applied from diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 3822a1f529..e6a1ca6dac 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -119,7 +119,7 @@ abstract class MultiNodeSpec(val mySelf: RoleName, _system: ActorSystem) extends import MultiNodeSpec._ - def this(config: MultiNodeConfig) = this(config.mySelf, ActorSystem(AkkaSpec.getCallerName, config.config)) + def this(config: MultiNodeConfig) = this(config.mySelf, ActorSystem(AkkaSpec.getCallerName(classOf[MultiNodeSpec]), config.config)) /* * Test Class Interface diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala index f24ea49b8c..c7000f2cf7 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala @@ -46,9 +46,13 @@ object AkkaSpec { ConfigFactory.parseMap(map.asJava) } - def getCallerName: String = { + def getCallerName(clazz: Class[_]): String = { val s = Thread.currentThread.getStackTrace map (_.getClassName) drop 1 dropWhile (_ matches ".*AkkaSpec.?$") - s.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_") + val reduced = s.lastIndexWhere(_ == clazz.getName) match { + case -1 ⇒ s + case z ⇒ s drop (z + 1) + } + reduced.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_") } } @@ -56,13 +60,13 @@ object AkkaSpec { abstract class AkkaSpec(_system: ActorSystem) extends TestKit(_system) with WordSpec with MustMatchers with BeforeAndAfterAll { - def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName, config.withFallback(AkkaSpec.testConf))) + def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName(getClass), config.withFallback(AkkaSpec.testConf))) def this(s: String) = this(ConfigFactory.parseString(s)) def this(configMap: Map[String, _]) = this(AkkaSpec.mapToConfig(configMap)) - def this() = this(ActorSystem(AkkaSpec.getCallerName, AkkaSpec.testConf)) + def this() = this(ActorSystem(AkkaSpec.getCallerName(getClass), AkkaSpec.testConf)) val log: LoggingAdapter = Logging(system, this.getClass) From ad4725aa708a11171493a74f347b4032a220f2a4 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 13:57:04 +0200 Subject: [PATCH 130/538] Moved MembershipChangeListenerSpec to multi-jvm. See #2114 --- .../MembershipChangeListenerSpec.scala | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) rename akka-cluster/src/{test => multi-jvm}/scala/akka/cluster/MembershipChangeListenerSpec.scala (90%) diff --git a/akka-cluster/src/test/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala similarity index 90% rename from akka-cluster/src/test/scala/akka/cluster/MembershipChangeListenerSpec.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index 17a7c6ed7a..6ff97fe483 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -3,19 +3,19 @@ */ package akka.cluster -import akka.testkit._ -import akka.dispatch._ -import akka.actor._ -import akka.remote._ -import akka.util.duration._ - -import java.net.InetSocketAddress -import java.util.concurrent.{ CountDownLatch, TimeUnit } - +import akka.actor.ActorSystem +import akka.actor.ActorSystemImpl +import akka.remote.RemoteActorRefProvider +import akka.testkit.ImplicitSender +import akka.testkit.LongRunningTest +import akka.testkit.duration2TestDuration +import akka.util.duration.intToDurationInt +import com.typesafe.config.ConfigFactory +import java.util.concurrent.CountDownLatch +import java.util.concurrent.TimeUnit +import scala.annotation.tailrec import scala.collection.immutable.SortedSet -import com.typesafe.config._ - class MembershipChangeListenerSpec extends ClusterSpec with ImplicitSender { val portPrefix = 6 From 7322ff3ef0801eaa6cc95126f29d20cc19bd1ab4 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 14:46:35 +0200 Subject: [PATCH 131/538] Port MembershipChangeListenerSpec to MultiNodeSpec. See #2114 --- .../GossipMembershipMultiJvmSpec.scala | 134 ------------ .../MembershipChangeListenerSpec.scala | 204 ++++++++---------- 2 files changed, 88 insertions(+), 250 deletions(-) delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/GossipMembershipMultiJvmSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipMembershipMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipMembershipMultiJvmSpec.scala deleted file mode 100644 index c380d3e5eb..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipMembershipMultiJvmSpec.scala +++ /dev/null @@ -1,134 +0,0 @@ -// package akka.cluster - -// import akka.actor.Actor -// import akka.remote._ -// import akka.routing._ -// import akka.routing.Routing.Broadcast - -// object GossipMembershipMultiJvmSpec { -// val NrOfNodes = 4 -// class SomeActor extends Actor with Serializable { -// def receive = { -// case "hit" ⇒ sender ! system.nodename -// case "end" ⇒ self.stop() -// } -// } - -// import com.typesafe.config.ConfigFactory -// val commonConfig = ConfigFactory.parseString(""" -// akka { -// loglevel = "WARNING" -// cluster { -// seed-nodes = ["localhost:9991"] -// } -// remote.server.hostname = "localhost" -// }""") - -// val node1Config = ConfigFactory.parseString(""" -// akka { -// remote.server.port = "9991" -// cluster.nodename = "node1" -// }""") withFallback commonConfig - -// val node2Config = ConfigFactory.parseString(""" -// akka { -// remote.server.port = "9992" -// cluster.nodename = "node2" -// }""") withFallback commonConfig - -// val node3Config = ConfigFactory.parseString(""" -// akka { -// remote.server.port = "9993" -// cluster.nodename = "node3" -// }""") withFallback commonConfig - -// val node4Config = ConfigFactory.parseString(""" -// akka { -// remote.server.port = "9994" -// cluster.nodename = "node4" -// }""") withFallback commonConfig -// } - -// class GossipMembershipMultiJvmNode1 extends AkkaRemoteSpec(GossipMembershipMultiJvmSpec.node1Config) { -// import GossipMembershipMultiJvmSpec._ -// val nodes = NrOfNodes -// "A cluster" must { -// "allow new node to join and should reach convergence with new membership table" in { - -// barrier("setup") -// remote.start() - -// barrier("start") -// val actor = system.actorOf(Props[SomeActor]("service-hello") -// actor.isInstanceOf[RoutedActorRef] must be(true) - -// val connectionCount = NrOfNodes - 1 -// val iterationCount = 10 - -// var replies = Map( -// "node1" -> 0, -// "node2" -> 0, -// "node3" -> 0) - -// for (i ← 0 until iterationCount) { -// for (k ← 0 until connectionCount) { -// val nodeName = (actor ? "hit").as[String].getOrElse(fail("No id returned by actor")) -// replies = replies + (nodeName -> (replies(nodeName) + 1)) -// } -// } - -// barrier("broadcast-end") -// actor ! Broadcast("end") - -// barrier("end") -// replies.values foreach { _ must be > (0) } - -// barrier("done") -// } -// } -// } - -// class GossipMembershipMultiJvmNode2 extends AkkaRemoteSpec(GossipMembershipMultiJvmSpec.node2Config) { -// import GossipMembershipMultiJvmSpec._ -// val nodes = NrOfNodes -// "___" must { -// "___" in { -// barrier("setup") -// remote.start() -// barrier("start") -// barrier("broadcast-end") -// barrier("end") -// barrier("done") -// } -// } -// } - -// class GossipMembershipMultiJvmNode3 extends AkkaRemoteSpec(GossipMembershipMultiJvmSpec.node3Config) { -// import GossipMembershipMultiJvmSpec._ -// val nodes = NrOfNodes -// "___" must { -// "___" in { -// barrier("setup") -// remote.start() -// barrier("start") -// barrier("broadcast-end") -// barrier("end") -// barrier("done") -// } -// } -// } - -// class GossipMembershipMultiJvmNode4 extends AkkaRemoteSpec(GossipMembershipMultiJvmSpec.node4Config) { -// import GossipMembershipMultiJvmSpec._ -// val nodes = NrOfNodes -// "___" must { -// "___" in { -// barrier("setup") -// remote.start() -// barrier("start") -// barrier("broadcast-end") -// barrier("end") -// barrier("done") -// } -// } -// } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index 6ff97fe483..e81c52ad36 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -3,127 +3,99 @@ */ package akka.cluster -import akka.actor.ActorSystem -import akka.actor.ActorSystemImpl -import akka.remote.RemoteActorRefProvider -import akka.testkit.ImplicitSender -import akka.testkit.LongRunningTest -import akka.testkit.duration2TestDuration -import akka.util.duration.intToDurationInt -import com.typesafe.config.ConfigFactory -import java.util.concurrent.CountDownLatch -import java.util.concurrent.TimeUnit -import scala.annotation.tailrec import scala.collection.immutable.SortedSet +import org.scalatest.BeforeAndAfter +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit.ImplicitSender +import akka.testkit.TestLatch -class MembershipChangeListenerSpec extends ClusterSpec with ImplicitSender { - val portPrefix = 6 +object MembershipChangeListenerMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") - var node0: Cluster = _ - var node1: Cluster = _ - var node2: Cluster = _ - - var system0: ActorSystemImpl = _ - var system1: ActorSystemImpl = _ - var system2: ActorSystemImpl = _ - - try { - "A set of connected cluster systems" must { - "(when two systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { - system0 = ActorSystem("system0", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d550 - }""".format(portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote0 = system0.provider.asInstanceOf[RemoteActorRefProvider] - node0 = Cluster(system0) - - system1 = ActorSystem("system1", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port=%d551 - cluster.node-to-join = "akka://system0@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote1 = system1.provider.asInstanceOf[RemoteActorRefProvider] - node1 = Cluster(system1) - - val latch = new CountDownLatch(2) - - node0.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - latch.countDown() - } - }) - node1.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - latch.countDown() - } - }) - - latch.await(10.seconds.dilated.toMillis, TimeUnit.MILLISECONDS) - - Thread.sleep(10.seconds.dilated.toMillis) - - // check cluster convergence - node0.convergence must be('defined) - node1.convergence must be('defined) - } - - "(when three systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { - - // ======= NODE 2 ======== - system2 = ActorSystem("system2", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port=%d552 - cluster.node-to-join = "akka://system0@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote2 = system2.provider.asInstanceOf[RemoteActorRefProvider] - node2 = Cluster(system2) - - val latch = new CountDownLatch(3) - node0.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - latch.countDown() - } - }) - node1.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - latch.countDown() - } - }) - node2.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - latch.countDown() - } - }) - - latch.await(30.seconds.dilated.toMillis, TimeUnit.MILLISECONDS) - } + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + akka.cluster { + gossip-frequency = 200 ms + leader-actions-frequency = 200 ms + periodic-tasks-initial-delay = 300 ms } - } catch { - case e: Exception ⇒ - e.printStackTrace - fail(e.toString) - } + """))) - override def atTermination() { - if (node0 ne null) node0.shutdown() - if (system0 ne null) system0.shutdown() + nodeConfig(first, ConfigFactory.parseString(""" + # FIXME get rid of this hardcoded port + akka.remote.netty.port=2603 + """)) - if (node1 ne null) node1.shutdown() - if (system1 ne null) system1.shutdown() + nodeConfig(second, ConfigFactory.parseString(""" + # FIXME get rid of this hardcoded host:port + akka.cluster.node-to-join = "akka://MultiNodeSpec@localhost:2603" + """)) + + nodeConfig(third, ConfigFactory.parseString(""" + # FIXME get rid of this hardcoded host:port + akka.cluster.node-to-join = "akka://MultiNodeSpec@localhost:2603" + """)) + +} + +class MembershipChangeListenerMultiJvmNode1 extends MembershipChangeListenerSpec +class MembershipChangeListenerMultiJvmNode2 extends MembershipChangeListenerSpec +class MembershipChangeListenerMultiJvmNode3 extends MembershipChangeListenerSpec + +abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChangeListenerMultiJvmSpec) with ImplicitSender with BeforeAndAfter { + import MembershipChangeListenerMultiJvmSpec._ + + override def initialParticipants = 3 + + var node: Cluster = _ + + after { + testConductor.enter("after") + } + + "A set of connected cluster systems" must { + + val firstAddress = testConductor.getAddressFor(first).await + val secondAddress = testConductor.getAddressFor(second).await + + "(when two systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" in { + + runOn(first, second) { + node = Cluster(system) + val latch = TestLatch() + node.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) + latch.countDown() + } + }) + latch.await + node.convergence.isDefined must be(true) + } + + } + + "(when three systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" in { + + runOn(third) { + node = Cluster(system) + } + + // runOn all + val latch = TestLatch() + node.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + if (members.size == 3 && members.forall(_.status == MemberStatus.Up)) + latch.countDown() + } + }) + latch.await + node.convergence.isDefined must be(true) + + } + } - if (node2 ne null) node2.shutdown() - if (system2 ne null) system2.shutdown() - } } From d16590f82e5c1fcf07de0986157b5d956b4cc82a Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 15:34:11 +0200 Subject: [PATCH 132/538] Remove node var. See #2115 --- .../akka/cluster/NodeMembershipSpec.scala | 62 +++++-------------- .../scala/akka/cluster/NodeStartupSpec.scala | 32 +++------- 2 files changed, 25 insertions(+), 69 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index 4b9436165d..22c76cae67 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -40,7 +40,7 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp override def initialParticipants = 3 - var node: Cluster = _ + def node() = Cluster(system) after { testConductor.enter("after") @@ -54,63 +54,33 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp "(when two systems) start gossiping to each other so that both systems gets the same gossip info" in { - def assertMembers: Unit = { - val members = node.latestGossip.members.toIndexedSeq + runOn(first, second) { + awaitCond(node().latestGossip.members.size == 2) + val members = node().latestGossip.members.toIndexedSeq members.size must be(2) members(0).address must be(firstAddress) members(1).address must be(secondAddress) awaitCond { - node.latestGossip.members.forall(_.status == MemberStatus.Up) + node().latestGossip.members.forall(_.status == MemberStatus.Up) } - } - - runOn(first) { - node = Cluster(system) - awaitCond(node.latestGossip.members.size == 2) - assertMembers - node.convergence.isDefined - } - - runOn(second) { - node = Cluster(system) - awaitCond(node.latestGossip.members.size == 2) - assertMembers - node.convergence.isDefined + awaitCond(node().convergence.isDefined) } } "(when three systems) start gossiping to each other so that both systems gets the same gossip info" in { - def assertMembers: Unit = { - val members = node.latestGossip.members.toIndexedSeq - members.size must be(3) - members(0).address must be(firstAddress) - members(1).address must be(secondAddress) - members(2).address must be(thirdAddress) - awaitCond { - node.latestGossip.members.forall(_.status == MemberStatus.Up) - } - } - - runOn(third) { - node = Cluster(system) - awaitCond(node.latestGossip.members.size == 3) - awaitCond(node.convergence.isDefined) - assertMembers - } - - runOn(first) { - awaitCond(node.latestGossip.members.size == 3) - assertMembers - node.convergence.isDefined - } - - runOn(second) { - awaitCond(node.latestGossip.members.size == 3) - assertMembers - node.convergence.isDefined + // runOn all + awaitCond(node().latestGossip.members.size == 3) + val members = node().latestGossip.members.toIndexedSeq + members.size must be(3) + members(0).address must be(firstAddress) + members(1).address must be(secondAddress) + members(2).address must be(thirdAddress) + awaitCond { + node().latestGossip.members.forall(_.status == MemberStatus.Up) } + awaitCond(node().convergence.isDefined) } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index 6d95b4d651..5f79d85ecb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -41,29 +41,25 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi override def initialParticipants = 2 - var node: Cluster = _ + def node() = Cluster(system) after { testConductor.enter("after") } - runOn(first) { - node = Cluster(system) - } - "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { "be a singleton cluster when started up" in { runOn(first) { - awaitCond(node.isSingletonCluster) + awaitCond(node().isSingletonCluster) // FIXME #2117 singletonCluster should reach convergence - //awaitCond(firstNode.convergence.isDefined) + //awaitCond(node().convergence.isDefined) } } "be in 'Joining' phase when started up" in { runOn(first) { - val members = node.latestGossip.members + val members = node().latestGossip.members members.size must be(1) val firstAddress = testConductor.getAddressFor(first).await val joiningMember = members find (_.address == firstAddress) @@ -75,26 +71,16 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi "A second cluster node with a 'node-to-join' config defined" must { "join the other node cluster when sending a Join command" in { - val secondAddress = testConductor.getAddressFor(second).await - def awaitSecondUp = awaitCond { + // runOn all + val secondAddress = testConductor.getAddressFor(second).await + awaitCond { node.latestGossip.members.exists { member ⇒ member.address == secondAddress && member.status == MemberStatus.Up } } - - runOn(second) { - // start cluster on second node, and join - node = Cluster(system) - awaitSecondUp - node.convergence.isDefined - } - - runOn(first) { - awaitSecondUp - node.latestGossip.members.size must be(2) - node.convergence.isDefined - } + node().latestGossip.members.size must be(2) + awaitCond(node().convergence.isDefined) } } From e63d5b26d1047c777645311118b46302538e23d9 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 15:42:34 +0200 Subject: [PATCH 133/538] Remove node var. See #2114 --- .../cluster/MembershipChangeListenerSpec.scala | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index e81c52ad36..3411228d5a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -50,7 +50,7 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan override def initialParticipants = 3 - var node: Cluster = _ + def node(): Cluster = Cluster(system) after { testConductor.enter("after") @@ -64,36 +64,31 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan "(when two systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" in { runOn(first, second) { - node = Cluster(system) val latch = TestLatch() - node.registerListener(new MembershipChangeListener { + node().registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) latch.countDown() } }) latch.await - node.convergence.isDefined must be(true) + node().convergence.isDefined must be(true) } } "(when three systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" in { - runOn(third) { - node = Cluster(system) - } - // runOn all val latch = TestLatch() - node.registerListener(new MembershipChangeListener { + node().registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { if (members.size == 3 && members.forall(_.status == MemberStatus.Up)) latch.countDown() } }) latch.await - node.convergence.isDefined must be(true) + node().convergence.isDefined must be(true) } } From 60b85e7da6ad03adba4f36c65187af143ec1e285 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 16:17:53 +0200 Subject: [PATCH 134/538] Remove hardcode host/port for node-to-join. See #2114 --- .../MembershipChangeListenerSpec.scala | 20 +++++-------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index 3411228d5a..c648cdf631 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -24,21 +24,6 @@ object MembershipChangeListenerMultiJvmSpec extends MultiNodeConfig { } """))) - nodeConfig(first, ConfigFactory.parseString(""" - # FIXME get rid of this hardcoded port - akka.remote.netty.port=2603 - """)) - - nodeConfig(second, ConfigFactory.parseString(""" - # FIXME get rid of this hardcoded host:port - akka.cluster.node-to-join = "akka://MultiNodeSpec@localhost:2603" - """)) - - nodeConfig(third, ConfigFactory.parseString(""" - # FIXME get rid of this hardcoded host:port - akka.cluster.node-to-join = "akka://MultiNodeSpec@localhost:2603" - """)) - } class MembershipChangeListenerMultiJvmNode1 extends MembershipChangeListenerSpec @@ -64,6 +49,7 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan "(when two systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" in { runOn(first, second) { + node().join(firstAddress) val latch = TestLatch() node().registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { @@ -79,6 +65,10 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan "(when three systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" in { + runOn(third) { + node().join(firstAddress) + } + // runOn all val latch = TestLatch() node().registerListener(new MembershipChangeListener { From ee73090c9873bf778ddd0ac704d551368304313d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 16:25:57 +0200 Subject: [PATCH 135/538] Remove hardcode host/port for node-to-join. See #2115 --- .../akka/cluster/NodeMembershipSpec.scala | 12 +++++------ .../scala/akka/cluster/NodeStartupSpec.scala | 20 ++++++++----------- 2 files changed, 13 insertions(+), 19 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index 22c76cae67..de734088f1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -19,16 +19,9 @@ object NodeMembershipMultiJvmSpec extends MultiNodeConfig { gossip-frequency = 200 ms leader-actions-frequency = 200 ms periodic-tasks-initial-delay = 300 ms - # FIXME get rid of this hardcoded host:port - node-to-join = "akka://MultiNodeSpec@localhost:2602" } """))) - nodeConfig(first, ConfigFactory.parseString(""" - # FIXME get rid of this hardcoded port - akka.remote.netty.port=2602 - """)) - } class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec @@ -55,6 +48,7 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp "(when two systems) start gossiping to each other so that both systems gets the same gossip info" in { runOn(first, second) { + node().join(firstAddress) awaitCond(node().latestGossip.members.size == 2) val members = node().latestGossip.members.toIndexedSeq members.size must be(2) @@ -70,6 +64,10 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp "(when three systems) start gossiping to each other so that both systems gets the same gossip info" in { + runOn(third) { + node().join(firstAddress) + } + // runOn all awaitCond(node().latestGossip.members.size == 3) val members = node().latestGossip.members.toIndexedSeq diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index 5f79d85ecb..8bb76fb613 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -21,16 +21,6 @@ object NodeStartupMultiJvmSpec extends MultiNodeConfig { } """))) - nodeConfig(first, ConfigFactory.parseString(""" - # FIXME get rid of this hardcoded port - akka.remote.netty.port=2601 - """)) - - nodeConfig(second, ConfigFactory.parseString(""" - # FIXME get rid of this hardcoded host:port - akka.cluster.node-to-join = "akka://MultiNodeSpec@localhost:2601" - """)) - } class NodeStartupMultiJvmNode1 extends NodeStartupSpec @@ -47,6 +37,8 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi testConductor.enter("after") } + val firstAddress = testConductor.getAddressFor(first).await + "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { "be a singleton cluster when started up" in { @@ -61,7 +53,7 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi runOn(first) { val members = node().latestGossip.members members.size must be(1) - val firstAddress = testConductor.getAddressFor(first).await + val joiningMember = members find (_.address == firstAddress) joiningMember must not be (None) joiningMember.get.status must be(MemberStatus.Joining) @@ -69,9 +61,13 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi } } - "A second cluster node with a 'node-to-join' config defined" must { + "A second cluster node" must { "join the other node cluster when sending a Join command" in { + runOn(second) { + node().join(firstAddress) + } + // runOn all val secondAddress = testConductor.getAddressFor(second).await awaitCond { From af4e0c2ce83375ee588339c10fb98ef748b4cb77 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 24 May 2012 16:49:24 +0200 Subject: [PATCH 136/538] Fixing some additional binary compat thingies + documented the heaviness of the ActorSystem --- akka-actor/src/main/scala/akka/actor/ActorSystem.scala | 4 +--- akka-actor/src/main/scala/akka/actor/Address.scala | 4 +++- akka-actor/src/main/scala/akka/actor/FaultHandling.scala | 2 +- akka-actor/src/main/scala/akka/actor/IO.scala | 4 ++-- akka-actor/src/main/scala/akka/event/Logging.scala | 6 +++--- akka-docs/general/actor-systems.rst | 5 +++++ 6 files changed, 15 insertions(+), 10 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 0b8e68c56a..c5595212c2 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -428,9 +428,7 @@ abstract class ExtendedActorSystem extends ActorSystem { def dynamicAccess: DynamicAccess } -//FIXME This should most probably not be protected[akka] right? - √ -//FIXME We also need to decide whether this should be supported API or not - √ -class ActorSystemImpl protected[akka] (val name: String, applicationConfig: Config, classLoader: ClassLoader) extends ExtendedActorSystem { +private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, classLoader: ClassLoader) extends ExtendedActorSystem { if (!name.matches("""^[a-zA-Z0-9][a-zA-Z0-9-]*$""")) throw new IllegalArgumentException( diff --git a/akka-actor/src/main/scala/akka/actor/Address.scala b/akka-actor/src/main/scala/akka/actor/Address.scala index 67f147b836..44c12eed35 100644 --- a/akka-actor/src/main/scala/akka/actor/Address.scala +++ b/akka-actor/src/main/scala/akka/actor/Address.scala @@ -114,7 +114,9 @@ object AddressFromURIString { def parse(addr: String): Address = apply(addr) } -//FIXME is this public API? - √ +/** + * Given an ActorPath it returns the Address and the path elements if the path is well-formed + */ object ActorPathExtractor extends PathUtils { def unapply(addr: String): Option[(Address, Iterable[String])] = try { diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index 8641153476..27a9f346db 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -165,7 +165,7 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { def makeDecider(flat: Iterable[CauseDirective]): Decider = { val directives = sort(flat) - { case x ⇒ directives find (_._1 isInstance x) map (_._2) getOrElse Escalate } + { case x ⇒ directives collectFirst { case (c, d) if c isInstance x ⇒ d } getOrElse Escalate } } /** diff --git a/akka-actor/src/main/scala/akka/actor/IO.scala b/akka-actor/src/main/scala/akka/actor/IO.scala index 63eb2e4b3a..07af4213fc 100644 --- a/akka-actor/src/main/scala/akka/actor/IO.scala +++ b/akka-actor/src/main/scala/akka/actor/IO.scala @@ -320,7 +320,7 @@ object IO { } object Chunk { - val empty = new Chunk(ByteString.empty) + val empty: Chunk = new Chunk(ByteString.empty) } /** @@ -344,7 +344,7 @@ object IO { * Iteratee.recover() in order to handle it properly. */ case class EOF(cause: Option[Exception]) extends Input { - final override def ++(that: Input) = that + final override def ++(that: Input): Input = that } object Iteratee { diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index 58aa6aadf4..6e6f92ad0d 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -276,9 +276,9 @@ object LogSource { // this one unfortunately does not work as implicit, because existential types have some weird behavior val fromClass: LogSource[Class[_]] = new LogSource[Class[_]] { - def genString(c: Class[_]) = Logging.simpleName(c) - override def genString(c: Class[_], system: ActorSystem) = genString(c) + "(" + system + ")" - override def getClazz(c: Class[_]) = c + def genString(c: Class[_]): String = Logging.simpleName(c) + override def genString(c: Class[_], system: ActorSystem): String = genString(c) + "(" + system + ")" + override def getClazz(c: Class[_]): Class[_] = c } implicit def fromAnyClass[T]: LogSource[Class[T]] = fromClass.asInstanceOf[LogSource[Class[T]]] diff --git a/akka-docs/general/actor-systems.rst b/akka-docs/general/actor-systems.rst index 2051f2d845..1b7d6a7759 100644 --- a/akka-docs/general/actor-systems.rst +++ b/akka-docs/general/actor-systems.rst @@ -14,6 +14,11 @@ which means that we need not concern ourselves with their emotional state or moral issues). The result can then serve as a mental scaffolding for building the software implementation. +.. note:: + + An ActorSystem is a heavyweight structure that will allocate 1…N Threads, + so create one per logical application. + Hierarchical Structure ---------------------- From 8d114f5da5e938e0967a0aeee1201b9ff3d92f8d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 16:31:43 +0200 Subject: [PATCH 137/538] Move JoinTwoClustersSpec to multi-jvm. See #2111 --- .../scala/akka/cluster/JoinTwoClustersSpec.scala | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) rename akka-cluster/src/{test => multi-jvm}/scala/akka/cluster/JoinTwoClustersSpec.scala (95%) diff --git a/akka-cluster/src/test/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala similarity index 95% rename from akka-cluster/src/test/scala/akka/cluster/JoinTwoClustersSpec.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index ff12cb6c60..1efd356698 100644 --- a/akka-cluster/src/test/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -4,15 +4,13 @@ package akka.cluster -import akka.testkit._ -import akka.dispatch._ -import akka.actor._ -import akka.remote._ -import akka.util.duration._ - -import com.typesafe.config._ - -import java.net.InetSocketAddress +import akka.actor.ActorSystem +import akka.actor.ActorSystemImpl +import akka.testkit.ImplicitSender +import akka.testkit.LongRunningTest +import akka.testkit.duration2TestDuration +import akka.util.duration.intToDurationInt +import com.typesafe.config.ConfigFactory class JoinTwoClustersSpec extends ClusterSpec("akka.cluster.failure-detector.threshold = 5") with ImplicitSender { val portPrefix = 3 From db16b6c4b38bb80c0b918b52ed082261d9812b3b Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 17:08:28 +0200 Subject: [PATCH 138/538] Port JoinTwoClustersSpec to MultiNodeSpec. See #2111 --- .../akka/cluster/JoinTwoClustersSpec.scala | 235 +++++++----------- 1 file changed, 87 insertions(+), 148 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index 1efd356698..9c59beb70e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -4,174 +4,113 @@ package akka.cluster -import akka.actor.ActorSystem -import akka.actor.ActorSystemImpl -import akka.testkit.ImplicitSender -import akka.testkit.LongRunningTest -import akka.testkit.duration2TestDuration -import akka.util.duration.intToDurationInt +import org.scalatest.BeforeAndAfter import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit.ImplicitSender -class JoinTwoClustersSpec extends ClusterSpec("akka.cluster.failure-detector.threshold = 5") with ImplicitSender { - val portPrefix = 3 +object JoinTwoClustersMultiJvmSpec extends MultiNodeConfig { + val a1 = role("a1") + val a2 = role("a2") + val b1 = role("b1") + val b2 = role("b2") + val c1 = role("c1") + val c2 = role("c2") - var node1: Cluster = _ - var node2: Cluster = _ - var node3: Cluster = _ - var node4: Cluster = _ - var node5: Cluster = _ - var node6: Cluster = _ + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + akka.cluster { + gossip-frequency = 200 ms + leader-actions-frequency = 200 ms + periodic-tasks-initial-delay = 300 ms + } + """))) - var system1: ActorSystemImpl = _ - var system2: ActorSystemImpl = _ - var system3: ActorSystemImpl = _ - var system4: ActorSystemImpl = _ - var system5: ActorSystemImpl = _ - var system6: ActorSystemImpl = _ +} - try { - "Three different clusters (A, B and C)" must { +class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec +class JoinTwoClustersMultiJvmNode2 extends JoinTwoClustersSpec +class JoinTwoClustersMultiJvmNode3 extends JoinTwoClustersSpec +class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec +class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec +class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec - // ======= NODE 1 ======== - system1 = ActorSystem("system1", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d551 - }""".format(portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - node1 = Cluster(system1) +abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) with ImplicitSender with BeforeAndAfter { + import JoinTwoClustersMultiJvmSpec._ - // ======= NODE 2 ======== - system2 = ActorSystem("system2", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d552 - cluster.node-to-join = "akka://system1@localhost:%d551" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - node2 = Cluster(system2) + override def initialParticipants = 6 - // ======= NODE 3 ======== - system3 = ActorSystem("system3", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d553 - }""".format(portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - node3 = Cluster(system3) + def node(): Cluster = Cluster(system) - // ======= NODE 4 ======== - system4 = ActorSystem("system4", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d554 - cluster.node-to-join = "akka://system3@localhost:%d553" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - node4 = Cluster(system4) + after { + testConductor.enter("after") + } - // ======= NODE 5 ======== - system5 = ActorSystem("system5", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d555 - }""".format(portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - node5 = Cluster(system5) + val a1Address = testConductor.getAddressFor(a1).await + val b1Address = testConductor.getAddressFor(b1).await + val c1Address = testConductor.getAddressFor(c1).await - // ======= NODE 6 ======== - system6 = ActorSystem("system6", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d556 - cluster.node-to-join = "akka://system5@localhost:%d555" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - node6 = Cluster(system6) + def awaitUpConvergence(numberOfMembers: Int): Unit = { + awaitCond(node().latestGossip.members.size == numberOfMembers) + awaitCond(node().latestGossip.members.forall(_.status == MemberStatus.Up)) + awaitCond(node().convergence.isDefined) + } - "be able to 'elect' a single leader after joining (A -> B)" taggedAs LongRunningTest in { + "Three different clusters (A, B and C)" must { - println("Give the system time to converge...") - awaitConvergence(node1 :: node2 :: node3 :: node4 :: node5 :: node6 :: Nil) + "be able to 'elect' a single leader after joining (A -> B)" in { - // check leader - node1.isLeader must be(true) - node2.isLeader must be(false) - node3.isLeader must be(true) - node4.isLeader must be(false) - node5.isLeader must be(true) - node6.isLeader must be(false) - - // join - node4.join(node1.remoteAddress) - //node1.scheduleNodeJoin(node4.remoteAddress) - - println("Give the system time to converge...") - Thread.sleep(10.seconds.dilated.toMillis) - awaitConvergence(node1 :: node2 :: node3 :: node4 :: node5 :: node6 :: Nil) - - // check leader - node1.isLeader must be(true) - node2.isLeader must be(false) - node3.isLeader must be(false) - node4.isLeader must be(false) - node5.isLeader must be(true) - node6.isLeader must be(false) + runOn(a1, a2) { + node().join(a1Address) + } + runOn(b1, b2) { + node().join(b1Address) + } + runOn(c1, c2) { + node().join(c1Address) } - "be able to 'elect' a single leader after joining (C -> A + B)" taggedAs LongRunningTest in { - // join - node4.join(node5.remoteAddress) - //node5.scheduleNodeJoin(node4.remoteAddress) + awaitUpConvergence(numberOfMembers = 2) - println("Give the system time to converge...") - Thread.sleep(10.seconds.dilated.toMillis) - awaitConvergence(node1 :: node2 :: node3 :: node4 :: node5 :: node6 :: Nil) + runOn(a1, b1, c1) { + node().isLeader must be(true) + } + runOn(a2, b2, c2) { + node().isLeader must be(false) + } - // check leader - node1.isLeader must be(true) - node2.isLeader must be(false) - node3.isLeader must be(false) - node4.isLeader must be(false) - node5.isLeader must be(false) - node6.isLeader must be(false) + runOn(b2) { + node().join(a1Address) + } + + runOn(a1, a2, b1, b2) { + awaitUpConvergence(numberOfMembers = 4) + } + + runOn(a1, c1) { + node().isLeader must be(true) + } + runOn(a2, b1, b2, c2) { + node().isLeader must be(false) + } + + } + + "be able to 'elect' a single leader after joining (C -> A + B)" in { + + runOn(b2) { + node().join(c1Address) + } + + awaitUpConvergence(numberOfMembers = 6) + + runOn(a1) { + node().isLeader must be(true) + } + runOn(a2, b1, b2, c1, c2) { + node().isLeader must be(false) } } - } catch { - case e: Exception ⇒ - e.printStackTrace - fail(e.toString) } - override def atTermination() { - if (node1 ne null) node1.shutdown() - if (system1 ne null) system1.shutdown() - - if (node2 ne null) node2.shutdown() - if (system2 ne null) system2.shutdown() - - if (node3 ne null) node3.shutdown() - if (system3 ne null) system3.shutdown() - - if (node4 ne null) node4.shutdown() - if (system4 ne null) system4.shutdown() - - if (node5 ne null) node5.shutdown() - if (system5 ne null) system5.shutdown() - - if (node6 ne null) node6.shutdown() - if (system6 ne null) system6.shutdown() - } } From 0d51fb2ed5a76be97f679b8a5ee2fac7a0d8dc25 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 24 May 2012 17:36:00 +0200 Subject: [PATCH 139/538] Use ifNode for asserts. See #2111 --- .../akka/cluster/JoinTwoClustersSpec.scala | 21 +++---------------- 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index 9c59beb70e..b5e764ea23 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -72,12 +72,7 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm awaitUpConvergence(numberOfMembers = 2) - runOn(a1, b1, c1) { - node().isLeader must be(true) - } - runOn(a2, b2, c2) { - node().isLeader must be(false) - } + node().isLeader must be(ifNode(a1, b1, c1)(true)(false)) runOn(b2) { node().join(a1Address) @@ -87,12 +82,7 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm awaitUpConvergence(numberOfMembers = 4) } - runOn(a1, c1) { - node().isLeader must be(true) - } - runOn(a2, b1, b2, c2) { - node().isLeader must be(false) - } + node().isLeader must be(ifNode(a1, c1)(true)(false)) } @@ -104,12 +94,7 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm awaitUpConvergence(numberOfMembers = 6) - runOn(a1) { - node().isLeader must be(true) - } - runOn(a2, b1, b2, c1, c2) { - node().isLeader must be(false) - } + node().isLeader must be(ifNode(a1)(true)(false)) } } From d8bb688ecebd5f7b5db60568f2845a475da6161c Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 24 May 2012 17:59:36 +0200 Subject: [PATCH 140/538] add comments to clarify code in NetworkFailureInjector.scala --- .../NetworkFailureInjector.scala | 56 ++++++++++++++++++- 1 file changed, 53 insertions(+), 3 deletions(-) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index b425518044..bf5d7d6007 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -58,7 +58,9 @@ private[akka] class FailureInjector extends Actor with ActorLogging { } /** - * Retrieve target settings, also if they were sketchy before (i.e. no system name) + * Retrieve target settings, also if they were sketchy before (i.e. no system name). + * In the latter case, copy settings from the sketchy address and remove the old + * mapping. */ def retrieveTargetSettings(target: Address): Option[ChannelSettings] = { settings get target orElse { @@ -68,12 +70,16 @@ private[akka] class FailureInjector extends Actor with ActorLogging { case (Address("akka", "", `host`, `port`), s) ⇒ true case _ ⇒ false } map { - case (_, s) ⇒ settings += target -> s; s + case (a, s) ⇒ settings -= a; settings += target -> s; s } } } def receive = { + /* + * If a channel handler tells us that he’s been disconnected, stop the + * throttle actors and forget about them (but not possibly applied settings) + */ case RemoveContext(ctx) ⇒ channels get ctx foreach { inj ⇒ context stop inj.sender @@ -81,6 +87,12 @@ private[akka] class FailureInjector extends Actor with ActorLogging { } channels -= ctx settings ++= settings collect { case (addr, c @ ChannelSettings(Some(`ctx`), _, _)) ⇒ (addr, c.copy(ctx = None)) } + /* + * Throttle/Blackhole/Unblock connections, based on the sign of rateMBit; + * will inform throttle actors for that destination if currently connected + * and update the settings for the target address; reply is needed to + * confirm this operation and tell the master that he can proceed. + */ case ThrottleMsg(target, dir, rateMBit) ⇒ val setting = retrieveTargetSettings(target) settings += target -> ((setting getOrElse ChannelSettings() match { @@ -95,6 +107,10 @@ private[akka] class FailureInjector extends Actor with ActorLogging { case x ⇒ x }) sender ! "ok" + /* + * Disconnect the currently active connection to the given target; reply is + * needed to confirm this operation and tell the master the he can proceed. + */ case DisconnectMsg(target, abort) ⇒ retrieveTargetSettings(target) foreach { case ChannelSettings(Some(ctx), _, _) ⇒ @@ -107,22 +123,39 @@ private[akka] class FailureInjector extends Actor with ActorLogging { case _ ⇒ log.debug("no connection to {} to close or abort", target) } sender ! "ok" + /* + * All data transfers up or down the pipeline are redirected through this + * case statement, which dispatches to the throttle actors for the given + * channel handler context. If none exist yet, they will be created, and + * this is a bit complicated in the case where the first message has not + * yet been exchanged, i.e. the other side’s Address is not yet known + * (keep in mind that an actor system’s remote address is not necessarily + * connected in any way to the IP from which we receive the connection). + */ case s @ Send(ctx, direction, future, msg) ⇒ channels get ctx match { case Some(Injectors(snd, rcv)) ⇒ if (direction includes Direction.Send) snd ! s if (direction includes Direction.Receive) rcv ! s case None ⇒ + // don’t do reverse lookup at first val (ipaddr, ip, port) = ctx.getChannel.getRemoteAddress match { case s: InetSocketAddress ⇒ (s.getAddress, s.getAddress.getHostAddress, s.getPort) } val addr = ChannelAddress.get(ctx.getChannel) orElse { settings collect { case (a @ Address("akka", _, Some(`ip`), Some(`port`)), _) ⇒ a } headOption } orElse { + // only if raw IP failed, try with hostname val name = ipaddr.getHostName if (name == ip) None else settings collect { case (a @ Address("akka", _, Some(`name`), Some(`port`)), _) ⇒ a } headOption - } getOrElse Address("akka", "", ip, port) // this will not match later requests directly, but be picked up by retrieveTargetSettings + } getOrElse Address("akka", "", ip, port) + /* + * ^- the above last resort will not match later requests directly, but be + * picked up by retrieveTargetSettings, so that throttle ops are + * applied to the right throttle actors, assuming that there can + * be only one actor system per host:port. + */ val inj = ingestContextAddress(ctx, addr) if (direction includes Direction.Send) inj.sender ! s if (direction includes Direction.Receive) inj.receiver ! s @@ -276,6 +309,16 @@ private[akka] class ThrottleActor(channelContext: ChannelHandlerContext) case _ ⇒ } + /** + * Core of the throttling engine: delay Send operations until their bit count + * would actually have had time to travel down the line at the configured + * data rate, and split up send operations which are so big that gaps larger + * than packetSplitThreshold would be planned (they will happen nevertheless + * due to HashedWheelTimer’s semantics, but we compensate by sending more the + * next time, in proportion to how long the Tick was overdue). So, this should + * lead to the correct rate on average, with increased latency of the order of + * HWT granularity. + */ private def schedule(d: Data): (Data, Seq[Send], Option[Duration]) = { val now = System.nanoTime @tailrec def rec(d: Data, toSend: Seq[Send]): (Data, Seq[Send], Option[Duration]) = { @@ -297,6 +340,13 @@ private[akka] class ThrottleActor(channelContext: ChannelHandlerContext) rec(d, Seq()) } + /** + * Split one Send operation in two, cutting off the given number of bytes at + * the front. If it was Direction.Send, i.e. a channel.write(), then also + * split the Future so that a failure in either part will complete the original + * with that failure. Data are not copied, as long as ChannelBuffer.slice does + * not copy them. + */ private def split(s: Send, bytes: Int): (Send, Send) = { s.msg match { case buf: ChannelBuffer ⇒ From 7d342e5c968e949492c97c988b163038c7287749 Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 15 May 2012 20:57:39 +0200 Subject: [PATCH 141/538] add docs about how to serialize ActorRefs - scala & java samples of how to obtain the correct address to inject when calling ActorPath.toStringWithAddress --- .../SerializationDocTestBase.java | 137 ++++++++++++++---- akka-docs/java/serialization.rst | 41 ++++++ .../serialization/SerializationDocSpec.scala | 41 +++++- akka-docs/scala/serialization.rst | 41 ++++++ 4 files changed, 233 insertions(+), 27 deletions(-) diff --git a/akka-docs/java/code/docs/serialization/SerializationDocTestBase.java b/akka-docs/java/code/docs/serialization/SerializationDocTestBase.java index a62827fc98..5d27e4f37f 100644 --- a/akka-docs/java/code/docs/serialization/SerializationDocTestBase.java +++ b/akka-docs/java/code/docs/serialization/SerializationDocTestBase.java @@ -7,6 +7,7 @@ import org.junit.Test; import static org.junit.Assert.*; //#imports import akka.actor.*; +import akka.remote.RemoteActorRefProvider; import akka.serialization.*; import com.typesafe.config.*; @@ -78,32 +79,118 @@ public class SerializationDocTestBase { //#actorref-serializer theActorSystem.shutdown(); } + + //#external-address + public static class ExternalAddressExt implements Extension { + private final ExtendedActorSystem system; + public ExternalAddressExt(ExtendedActorSystem system) { + this.system = system; + } - @Test public void demonstrateTheProgrammaticAPI() { - //#programmatic - ActorSystem system = ActorSystem.create("example"); - - // Get the Serialization Extension - Serialization serialization = SerializationExtension.get(system); - - // Have something to serialize - String original = "woohoo"; - - // Find the Serializer for it - Serializer serializer = serialization.findSerializerFor(original); - - // Turn it into bytes - byte[] bytes = serializer.toBinary(original); - - // Turn it back into an object, - // the nulls are for the class manifest and for the classloader - String back = (String)serializer.fromBinary(bytes); - - // Voilá! - assertEquals(original, back); - - //#programmatic - system.shutdown(); + public Address getAddressFor(Address remoteAddress) { + final scala.Option

optAddr = system.provider() + .getExternalAddressFor(remoteAddress); + if (optAddr.isDefined()) { + return optAddr.get(); + } else { + throw new UnsupportedOperationException( + "cannot send to remote address " + remoteAddress); + } + } } + + public static class ExternalAddress extends + AbstractExtensionId implements ExtensionIdProvider { + public static final ExternalAddress ID = new ExternalAddress(); + + public ExternalAddress lookup() { + return ID; + } + + public ExternalAddressExt createExtension(ExtendedActorSystem system) { + return new ExternalAddressExt(system); + } + } + + //#external-address + + public void demonstrateExternalAddress() { + // this is not meant to be run, only to be compiled + final ActorSystem system = ActorSystem.create(); + final Address remoteAddr = new Address("", ""); + // #external-address + final Address addr = ExternalAddress.ID.get(system).getAddressFor(remoteAddr); + // #external-address + } + + //#external-address-default + public static class DefaultAddressExt implements Extension { + private final ExtendedActorSystem system; + + public DefaultAddressExt(ExtendedActorSystem system) { + this.system = system; + } + + public Address getAddress() { + final ActorRefProvider provider = system.provider(); + if (provider instanceof RemoteActorRefProvider) { + return ((RemoteActorRefProvider) provider).transport().address(); + } else { + throw new UnsupportedOperationException("need RemoteActorRefProvider"); + } + } + } + + public static class DefaultAddress extends + AbstractExtensionId implements ExtensionIdProvider { + public static final DefaultAddress ID = new DefaultAddress(); + + public DefaultAddress lookup() { + return ID; + } + + public DefaultAddressExt createExtension(ExtendedActorSystem system) { + return new DefaultAddressExt(system); + } + } + + //#external-address-default + + public void demonstrateDefaultAddress() { + // this is not meant to be run, only to be compiled + final ActorSystem system = ActorSystem.create(); + final Address remoteAddr = new Address("", ""); + // #external-address-default + final Address addr = DefaultAddress.ID.get(system).getAddress(); + // #external-address-default + } + + @Test + public void demonstrateTheProgrammaticAPI() { + // #programmatic + ActorSystem system = ActorSystem.create("example"); + + // Get the Serialization Extension + Serialization serialization = SerializationExtension.get(system); + + // Have something to serialize + String original = "woohoo"; + + // Find the Serializer for it + Serializer serializer = serialization.findSerializerFor(original); + + // Turn it into bytes + byte[] bytes = serializer.toBinary(original); + + // Turn it back into an object, + // the nulls are for the class manifest and for the classloader + String back = (String) serializer.fromBinary(bytes); + + // Voilá! + assertEquals(original, back); + + // #programmatic + system.shutdown(); + } } diff --git a/akka-docs/java/serialization.rst b/akka-docs/java/serialization.rst index 2710dd1f4c..7618ffa4a8 100644 --- a/akka-docs/java/serialization.rst +++ b/akka-docs/java/serialization.rst @@ -109,6 +109,47 @@ you might want to know how to serialize and deserialize them properly, here's th .. includecode:: code/akka/docs/serialization/SerializationDocTestBase.java :include: imports,actorref-serializer +.. note:: + + ``ActorPath.toStringWithAddress`` only differs from ``toString`` if the + address does not already have ``host`` and ``port`` components, i.e. it only + inserts address information for local addresses. + +This assumes that serialization happens in the context of sending a message +through the remote transport. There are other uses of serialization, though, +e.g. storing actor references outside of an actor application (database, +durable mailbox, etc.). In this case, it is important to keep in mind that the +address part of an actor’s path determines how that actor is communicated with. +Storing a local actor path might be the right choice if the retrieval happens +in the same logical context, but it is not enough when deserializing it on a +different network host: for that it would need to include the system’s remote +transport address. An actor system is not limited to having just one remote +transport per se, which makes this question a bit more interesting. + +In the general case, the local address to be used depends on the type of remote +address which shall be the recipient of the serialized information. Use +:meth:`ActorRefProvider.getExternalAddressFor(remoteAddr)` to query the system +for the appropriate address to use when sending to ``remoteAddr``: + +.. includecode:: code/akka/docs/serialization/SerializationDocTestBase.java + :include: external-address + +This requires that you know at least which type of address will be supported by +the system which will deserialize the resulting actor reference; if you have no +concrete address handy you can create a dummy one for the right protocol using +``new Address(protocol, "", "", 0)`` (assuming that the actual transport used is as +lenient as Akka’s RemoteActorRefProvider). + +There is a possible simplification available if you are just using the default +:class:`NettyRemoteTransport` with the :meth:`RemoteActorRefProvider`, which is +enabled by the fact that this combination has just a single remote address: + +.. includecode:: code/akka/docs/serialization/SerializationDocTestBase.java + :include: external-address-default + +This solution has to be adapted once other providers are used (like the planned +extensions for clustering). + Deep serialization of Actors ---------------------------- diff --git a/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala b/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala index 61086b78a6..b3eb4cfe13 100644 --- a/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala +++ b/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala @@ -5,13 +5,17 @@ package docs.serialization import org.scalatest.matchers.MustMatchers import akka.testkit._ -import akka.actor.{ ActorRef, ActorSystem } - //#imports +import akka.actor.{ ActorRef, ActorSystem } import akka.serialization._ import com.typesafe.config.ConfigFactory //#imports +import akka.actor.ExtensionKey +import akka.actor.ExtendedActorSystem +import akka.actor.Extension +import akka.actor.Address +import akka.remote.RemoteActorRefProvider //#my-own-serializer class MyOwnSerializer extends Serializer { @@ -176,5 +180,38 @@ class SerializationDocSpec extends AkkaSpec { val deserializedActorRef = theActorSystem actorFor identifier // Then just use the ActorRef //#actorref-serializer + + //#external-address + object ExternalAddress extends ExtensionKey[ExternalAddressExt] + + class ExternalAddressExt(system: ExtendedActorSystem) extends Extension { + def addressFor(remoteAddr: Address): Address = + system.provider.getExternalAddressFor(remoteAddr) getOrElse + (throw new UnsupportedOperationException("cannot send to " + remoteAddr)) + } + + def serializeTo(ref: ActorRef, remote: Address): String = + ref.path.toStringWithAddress(ExternalAddress(theActorSystem).addressFor(remote)) + //#external-address + } + + "demonstrate how to do default Akka serialization of ActorRef" in { + val theActorSystem: ActorSystem = system + + //#external-address-default + object ExternalAddress extends ExtensionKey[ExternalAddressExt] + + class ExternalAddressExt(system: ExtendedActorSystem) extends Extension { + def addressForAkka: Address = system.provider match { + case r: RemoteActorRefProvider ⇒ r.transport.address + case _ ⇒ + throw new UnsupportedOperationException( + "this method requires the RemoteActorRefProvider to be configured") + } + } + + def serializeAkkaDefault(ref: ActorRef): String = + ref.path.toStringWithAddress(ExternalAddress(theActorSystem).addressForAkka) + //#external-address-default } } diff --git a/akka-docs/scala/serialization.rst b/akka-docs/scala/serialization.rst index fc97bbd0df..88fe74fd13 100644 --- a/akka-docs/scala/serialization.rst +++ b/akka-docs/scala/serialization.rst @@ -107,6 +107,47 @@ you might want to know how to serialize and deserialize them properly, here's th .. includecode:: code/akka/docs/serialization/SerializationDocSpec.scala :include: imports,actorref-serializer +.. note:: + + ``ActorPath.toStringWithAddress`` only differs from ``toString`` if the + address does not already have ``host`` and ``port`` components, i.e. it only + inserts address information for local addresses. + +This assumes that serialization happens in the context of sending a message +through the remote transport. There are other uses of serialization, though, +e.g. storing actor references outside of an actor application (database, +durable mailbox, etc.). In this case, it is important to keep in mind that the +address part of an actor’s path determines how that actor is communicated with. +Storing a local actor path might be the right choice if the retrieval happens +in the same logical context, but it is not enough when deserializing it on a +different network host: for that it would need to include the system’s remote +transport address. An actor system is not limited to having just one remote +transport per se, which makes this question a bit more interesting. + +In the general case, the local address to be used depends on the type of remote +address which shall be the recipient of the serialized information. Use +:meth:`ActorRefProvider.getExternalAddressFor(remoteAddr)` to query the system +for the appropriate address to use when sending to ``remoteAddr``: + +.. includecode:: code/akka/docs/serialization/SerializationDocSpec.scala + :include: external-address + +This requires that you know at least which type of address will be supported by +the system which will deserialize the resulting actor reference; if you have no +concrete address handy you can create a dummy one for the right protocol using +``Address(protocol, "", "", 0)`` (assuming that the actual transport used is as +lenient as Akka’s RemoteActorRefProvider). + +There is a possible simplification available if you are just using the default +:class:`NettyRemoteTransport` with the :meth:`RemoteActorRefProvider`, which is +enabled by the fact that this combination has just a single remote address: + +.. includecode:: code/akka/docs/serialization/SerializationDocSpec.scala + :include: external-address-default + +This solution has to be adapted once other providers are used (like the planned +extensions for clustering). + Deep serialization of Actors ---------------------------- From dd30e81a1a0281ae98a0e72f4f5f680fabfe290a Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 24 May 2012 22:23:36 +0200 Subject: [PATCH 142/538] document Specs2 + TestKit, see #2068, plus fix up broken includes - include move of doc samples out of akka package also in the includecode directives - fix broken serialization docs, which require one thing in the akka package --- akka-docs/general/configuration.rst | 4 +- akka-docs/java/agents.rst | 18 +- akka-docs/java/dispatchers.rst | 16 +- akka-docs/java/event-bus.rst | 6 +- akka-docs/java/extending-akka.rst | 14 +- akka-docs/java/fault-tolerance-sample.rst | 2 +- akka-docs/java/fault-tolerance.rst | 24 +- akka-docs/java/fsm.rst | 10 +- akka-docs/java/futures.rst | 44 +-- akka-docs/java/logging.rst | 6 +- akka-docs/java/remoting.rst | 10 +- akka-docs/java/routing.rst | 50 +-- akka-docs/java/scheduler.rst | 6 +- akka-docs/java/serialization.rst | 18 +- akka-docs/java/transactors.rst | 20 +- akka-docs/java/typed-actors.rst | 28 +- akka-docs/java/untyped-actors.rst | 38 +- akka-docs/java/zeromq.rst | 26 +- akka-docs/modules/durable-mailbox.rst | 10 +- akka-docs/scala/actors.rst | 42 +-- akka-docs/scala/agents.rst | 24 +- akka-docs/scala/camel.rst | 10 +- .../serialization/SerializationDocSpec.scala | 324 +++++++++--------- .../docs/testkit/Specs2DemoAcceptance.scala | 34 ++ .../code/docs/testkit/Specs2DemoSpec.scala | 34 ++ akka-docs/scala/dispatchers.rst | 18 +- akka-docs/scala/event-bus.rst | 2 +- akka-docs/scala/extending-akka.rst | 18 +- akka-docs/scala/fault-tolerance-sample.rst | 2 +- akka-docs/scala/fault-tolerance.rst | 24 +- akka-docs/scala/fsm.rst | 38 +- akka-docs/scala/futures.rst | 52 +-- akka-docs/scala/io.rst | 28 +- akka-docs/scala/logging.rst | 8 +- akka-docs/scala/remoting.rst | 6 +- akka-docs/scala/routing.rst | 36 +- akka-docs/scala/scheduler.rst | 6 +- akka-docs/scala/serialization.rst | 28 +- akka-docs/scala/testing.rst | 71 +++- akka-docs/scala/testkit-example.rst | 2 +- akka-docs/scala/transactors.rst | 24 +- akka-docs/scala/typed-actors.rst | 32 +- akka-docs/scala/zeromq.rst | 20 +- project/AkkaBuild.scala | 3 +- 44 files changed, 681 insertions(+), 555 deletions(-) create mode 100644 akka-docs/scala/code/docs/testkit/Specs2DemoAcceptance.scala create mode 100644 akka-docs/scala/code/docs/testkit/Specs2DemoSpec.scala diff --git a/akka-docs/general/configuration.rst b/akka-docs/general/configuration.rst index a0dc9dd49a..3be3704b22 100644 --- a/akka-docs/general/configuration.rst +++ b/akka-docs/general/configuration.rst @@ -256,7 +256,7 @@ result:: You may also specify and parse the configuration programmatically in other ways when instantiating the ``ActorSystem``. -.. includecode:: code/akka/docs/config/ConfigDocSpec.scala +.. includecode:: code/docs/config/ConfigDocSpec.scala :include: imports,custom-config Reading configuration from a custom location @@ -301,7 +301,7 @@ you could put a config string in code using You can also combine your custom config with the usual config, that might look like: -.. includecode:: code/akka/docs/config/ConfigDoc.java +.. includecode:: code/docs/config/ConfigDoc.java :include: java-custom-config When working with ``Config`` objects, keep in mind that there are diff --git a/akka-docs/java/agents.rst b/akka-docs/java/agents.rst index 3fcc886d17..0ba7dd90ce 100644 --- a/akka-docs/java/agents.rst +++ b/akka-docs/java/agents.rst @@ -40,17 +40,17 @@ application. An ``ActorSystem`` is required to create the underlying Actors. See Here is an example of creating an Agent: -.. includecode:: code/akka/docs/agent/AgentDocTest.java +.. includecode:: code/docs/agent/AgentDocTest.java :include: import-system,import-agent :language: java -.. includecode:: code/akka/docs/agent/AgentDocTest.java#create +.. includecode:: code/docs/agent/AgentDocTest.java#create :language: java An Agent will be running until you invoke ``close`` on it. Then it will be eligible for garbage collection (unless you hold on to it in some way). -.. includecode:: code/akka/docs/agent/AgentDocTest.java#close +.. includecode:: code/docs/agent/AgentDocTest.java#close :language: java @@ -65,10 +65,10 @@ the update will be applied but dispatches to an Agent from a single thread will occur in order. You apply a value or a function by invoking the ``send`` function. -.. includecode:: code/akka/docs/agent/AgentDocTest.java#import-function +.. includecode:: code/docs/agent/AgentDocTest.java#import-function :language: java -.. includecode:: code/akka/docs/agent/AgentDocTest.java#send +.. includecode:: code/docs/agent/AgentDocTest.java#send :language: java You can also dispatch a function to update the internal state but on its own @@ -77,7 +77,7 @@ long-running or blocking operations. You do this with the ``sendOff`` method. Dispatches using either ``sendOff`` or ``send`` will still be executed in order. -.. includecode:: code/akka/docs/agent/AgentDocTest.java#send-off +.. includecode:: code/docs/agent/AgentDocTest.java#send-off :language: java @@ -87,7 +87,7 @@ Reading an Agent's value Agents can be dereferenced (you can get an Agent's value) by calling the get method: -.. includecode:: code/akka/docs/agent/AgentDocTest.java#read-get +.. includecode:: code/docs/agent/AgentDocTest.java#read-get :language: java Reading an Agent's current value does not involve any message passing and @@ -101,8 +101,8 @@ Awaiting an Agent's value It is also possible to read the value after all currently queued sends have completed. You can do this with ``await``: -.. includecode:: code/akka/docs/agent/AgentDocTest.java#import-timeout +.. includecode:: code/docs/agent/AgentDocTest.java#import-timeout :language: java -.. includecode:: code/akka/docs/agent/AgentDocTest.java#read-await +.. includecode:: code/docs/agent/AgentDocTest.java#read-await :language: java diff --git a/akka-docs/java/dispatchers.rst b/akka-docs/java/dispatchers.rst index 90a0e9cb6a..f7e0db9c3c 100644 --- a/akka-docs/java/dispatchers.rst +++ b/akka-docs/java/dispatchers.rst @@ -18,7 +18,7 @@ Setting the dispatcher for an Actor So in case you want to give your ``Actor`` a different dispatcher than the default, you need to do two things, of which the first is: -.. includecode:: ../java/code/akka/docs/dispatcher/DispatcherDocTestBase.java#defining-dispatcher +.. includecode:: ../java/code/docs/dispatcher/DispatcherDocTestBase.java#defining-dispatcher .. note:: The "dispatcherId" you specify in withDispatcher is in fact a path into your configuration. @@ -27,11 +27,11 @@ So in case you want to give your ``Actor`` a different dispatcher than the defau And then you just need to configure that dispatcher in your configuration: -.. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#my-dispatcher-config +.. includecode:: ../scala/code/docs/dispatcher/DispatcherDocSpec.scala#my-dispatcher-config And here's another example that uses the "thread-pool-executor": -.. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#my-thread-pool-dispatcher-config +.. includecode:: ../scala/code/docs/dispatcher/DispatcherDocSpec.scala#my-thread-pool-dispatcher-config For more options, see the default-dispatcher section of the :ref:`configuration`. @@ -106,11 +106,11 @@ More dispatcher configuration examples Configuring a ``PinnedDispatcher``: -.. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#my-pinned-dispatcher-config +.. includecode:: ../scala/code/docs/dispatcher/DispatcherDocSpec.scala#my-pinned-dispatcher-config And then using it: -.. includecode:: ../java/code/akka/docs/dispatcher/DispatcherDocTestBase.java#defining-pinned-dispatcher +.. includecode:: ../java/code/docs/dispatcher/DispatcherDocTestBase.java#defining-pinned-dispatcher Mailboxes --------- @@ -162,15 +162,15 @@ Mailbox configuration examples How to create a PriorityMailbox: -.. includecode:: ../java/code/akka/docs/dispatcher/DispatcherDocTestBase.java#prio-mailbox +.. includecode:: ../java/code/docs/dispatcher/DispatcherDocTestBase.java#prio-mailbox And then add it to the configuration: -.. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#prio-dispatcher-config +.. includecode:: ../scala/code/docs/dispatcher/DispatcherDocSpec.scala#prio-dispatcher-config And then an example on how you would use it: -.. includecode:: ../java/code/akka/docs/dispatcher/DispatcherDocTestBase.java#prio-dispatcher +.. includecode:: ../java/code/docs/dispatcher/DispatcherDocTestBase.java#prio-dispatcher .. note:: diff --git a/akka-docs/java/event-bus.rst b/akka-docs/java/event-bus.rst index 542b94e821..faecd1d209 100644 --- a/akka-docs/java/event-bus.rst +++ b/akka-docs/java/event-bus.rst @@ -158,12 +158,12 @@ Classification`_ which enables registering to related sets of channels (as is used for :class:`RemoteLifeCycleMessage`). The following example demonstrates how a simple subscription works. Given a simple actor: -.. includecode:: code/akka/docs/event/LoggingDocTestBase.java#imports-deadletter -.. includecode:: code/akka/docs/event/LoggingDocTestBase.java#deadletter-actor +.. includecode:: code/docs/event/LoggingDocTestBase.java#imports-deadletter +.. includecode:: code/docs/event/LoggingDocTestBase.java#deadletter-actor it can be subscribed like this: -.. includecode:: code/akka/docs/event/LoggingDocTestBase.java#deadletters +.. includecode:: code/docs/event/LoggingDocTestBase.java#deadletters Default Handlers ---------------- diff --git a/akka-docs/java/extending-akka.rst b/akka-docs/java/extending-akka.rst index 38fedf575a..c90fd40858 100644 --- a/akka-docs/java/extending-akka.rst +++ b/akka-docs/java/extending-akka.rst @@ -25,22 +25,22 @@ So let's create a sample extension that just lets us count the number of times s First, we define what our ``Extension`` should do: -.. includecode:: code/akka/docs/extension/ExtensionDocTestBase.java +.. includecode:: code/docs/extension/ExtensionDocTestBase.java :include: imports,extension Then we need to create an ``ExtensionId`` for our extension so we can grab ahold of it. -.. includecode:: code/akka/docs/extension/ExtensionDocTestBase.java +.. includecode:: code/docs/extension/ExtensionDocTestBase.java :include: imports,extensionid Wicked! Now all we need to do is to actually use it: -.. includecode:: code/akka/docs/extension/ExtensionDocTestBase.java +.. includecode:: code/docs/extension/ExtensionDocTestBase.java :include: extension-usage Or from inside of an Akka Actor: -.. includecode:: code/akka/docs/extension/ExtensionDocTestBase.java +.. includecode:: code/docs/extension/ExtensionDocTestBase.java :include: extension-usage-actor That's all there is to it! @@ -72,17 +72,17 @@ The :ref:`configuration` can be used for application specific settings. A good p Sample configuration: -.. includecode:: ../scala/code/akka/docs/extension/SettingsExtensionDocSpec.scala +.. includecode:: ../scala/code/docs/extension/SettingsExtensionDocSpec.scala :include: config The ``Extension``: -.. includecode:: code/akka/docs/extension/SettingsExtensionDocTestBase.java +.. includecode:: code/docs/extension/SettingsExtensionDocTestBase.java :include: imports,extension,extensionid Use it: -.. includecode:: code/akka/docs/extension/SettingsExtensionDocTestBase.java +.. includecode:: code/docs/extension/SettingsExtensionDocTestBase.java :include: extension-usage-actor diff --git a/akka-docs/java/fault-tolerance-sample.rst b/akka-docs/java/fault-tolerance-sample.rst index 8e379c5fcc..cb7e1e774d 100644 --- a/akka-docs/java/fault-tolerance-sample.rst +++ b/akka-docs/java/fault-tolerance-sample.rst @@ -49,5 +49,5 @@ Step Description Full Source Code of the Fault Tolerance Sample (Java) ------------------------------------------------------ -.. includecode:: code/akka/docs/actor/japi/FaultHandlingDocSample.java#all +.. includecode:: code/docs/actor/japi/FaultHandlingDocSample.java#all diff --git a/akka-docs/java/fault-tolerance.rst b/akka-docs/java/fault-tolerance.rst index fc172b8fa6..a444f79ec0 100644 --- a/akka-docs/java/fault-tolerance.rst +++ b/akka-docs/java/fault-tolerance.rst @@ -24,7 +24,7 @@ sample as it is easy to follow the log output to understand what is happening in fault-tolerance-sample -.. includecode:: code/akka/docs/actor/japi/FaultHandlingDocSample.java#all +.. includecode:: code/docs/actor/japi/FaultHandlingDocSample.java#all :exclude: imports,messages,dummydb Creating a Supervisor Strategy @@ -35,7 +35,7 @@ in more depth. For the sake of demonstration let us consider the following strategy: -.. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java +.. includecode:: code/docs/actor/FaultHandlingTestBase.java :include: strategy I have chosen a few well-known exception types in order to demonstrate the @@ -70,49 +70,49 @@ Test Application The following section shows the effects of the different directives in practice, wherefor a test setup is needed. First off, we need a suitable supervisor: -.. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java +.. includecode:: code/docs/actor/FaultHandlingTestBase.java :include: supervisor This supervisor will be used to create a child, with which we can experiment: -.. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java +.. includecode:: code/docs/actor/FaultHandlingTestBase.java :include: child The test is easier by using the utilities described in :ref:`akka-testkit`, where ``TestProbe`` provides an actor ref useful for receiving and inspecting replies. -.. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java +.. includecode:: code/docs/actor/FaultHandlingTestBase.java :include: testkit Let us create actors: -.. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java +.. includecode:: code/docs/actor/FaultHandlingTestBase.java :include: create The first test shall demonstrate the ``Resume`` directive, so we try it out by setting some non-initial state in the actor and have it fail: -.. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java +.. includecode:: code/docs/actor/FaultHandlingTestBase.java :include: resume As you can see the value 42 survives the fault handling directive. Now, if we change the failure to a more serious ``NullPointerException``, that will no longer be the case: -.. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java +.. includecode:: code/docs/actor/FaultHandlingTestBase.java :include: restart And finally in case of the fatal ``IllegalArgumentException`` the child will be terminated by the supervisor: -.. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java +.. includecode:: code/docs/actor/FaultHandlingTestBase.java :include: stop Up to now the supervisor was completely unaffected by the child’s failure, because the directives set did handle it. In case of an ``Exception``, this is not true anymore and the supervisor escalates the failure. -.. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java +.. includecode:: code/docs/actor/FaultHandlingTestBase.java :include: escalate-kill The supervisor itself is supervised by the top-level actor provided by the @@ -125,12 +125,12 @@ child not to survive this failure. In case this is not desired (which depends on the use case), we need to use a different supervisor which overrides this behavior. -.. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java +.. includecode:: code/docs/actor/FaultHandlingTestBase.java :include: supervisor2 With this parent, the child survives the escalated restart, as demonstrated in the last test: -.. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java +.. includecode:: code/docs/actor/FaultHandlingTestBase.java :include: escalate-restart diff --git a/akka-docs/java/fsm.rst b/akka-docs/java/fsm.rst index bd8712d460..582b3528f2 100644 --- a/akka-docs/java/fsm.rst +++ b/akka-docs/java/fsm.rst @@ -35,9 +35,9 @@ using a small well-defined set of methods. One way to achieve this is to assemble all mutable state in a superclass which keeps it private and offers protected methods for mutating it. -.. includecode:: code/akka/docs/actor/FSMDocTestBase.java#imports-data +.. includecode:: code/docs/actor/FSMDocTestBase.java#imports-data -.. includecode:: code/akka/docs/actor/FSMDocTestBase.java#base +.. includecode:: code/docs/actor/FSMDocTestBase.java#base The benefit of this approach is that state changes can be acted upon in one central place, which makes it impossible to forget inserting code for reacting @@ -50,15 +50,15 @@ The base class shown above is designed to support a similar example as for the Scala FSM documentation: an actor which receives and queues messages, to be delivered in batches to a configurable target actor. The messages involved are: -.. includecode:: code/akka/docs/actor/FSMDocTestBase.java#data +.. includecode:: code/docs/actor/FSMDocTestBase.java#data This actor has only the two states ``IDLE`` and ``ACTIVE``, making their handling quite straight-forward in the concrete actor derived from the base class: -.. includecode:: code/akka/docs/actor/FSMDocTestBase.java#imports-actor +.. includecode:: code/docs/actor/FSMDocTestBase.java#imports-actor -.. includecode:: code/akka/docs/actor/FSMDocTestBase.java#actor +.. includecode:: code/docs/actor/FSMDocTestBase.java#actor The trick here is to factor out common functionality like :meth:`whenUnhandled` and :meth:`transition` in order to obtain a few well-defined points for diff --git a/akka-docs/java/futures.rst b/akka-docs/java/futures.rst index cc119c46e6..e0639420cb 100644 --- a/akka-docs/java/futures.rst +++ b/akka-docs/java/futures.rst @@ -18,7 +18,7 @@ which is very similar to a ``java.util.concurrent.Executor``. if you have an ``A it will use its default dispatcher as the ``ExecutionContext``, or you can use the factory methods provided by the ``ExecutionContexts`` class to wrap ``Executors`` and ``ExecutorServices``, or even create your own. -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: imports1,imports7,diy-execution-context Use with Actors @@ -30,7 +30,7 @@ which only works if the original sender was an ``UntypedActor``) and the second Using the ``ActorRef``\'s ``ask`` method to send a message will return a Future. To wait for and retrieve the actual result the simplest method is: -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: imports1,ask-blocking This will cause the current thread to block and wait for the ``UntypedActor`` to 'complete' the ``Future`` with it's reply. @@ -47,7 +47,7 @@ A common use case within Akka is to have some computation performed concurrently the extra utility of an ``UntypedActor``. If you find yourself creating a pool of ``UntypedActor``\s for the sole reason of performing a calculation in parallel, there is an easier (and faster) way: -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: imports2,future-eval In the above code the block passed to ``future`` will be executed by the default ``Dispatcher``, @@ -57,12 +57,12 @@ and we also avoid the overhead of managing an ``UntypedActor``. You can also create already completed Futures using the ``Futures`` class, which can be either successes: -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: successful Or failures: -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: failed Functional Futures @@ -78,7 +78,7 @@ The first method for working with ``Future`` functionally is ``map``. This metho some operation on the result of the ``Future``, and returning a new result. The return value of the ``map`` method is another ``Future`` that will contain the new result: -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: imports2,map In this example we are joining two strings together within a Future. Instead of waiting for f1 to complete, @@ -92,7 +92,7 @@ Something to note when using these methods: if the ``Future`` is still being pro it will be the completing thread that actually does the work. If the ``Future`` is already complete though, it will be run in our current thread. For example: -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: map2 The original ``Future`` will take at least 0.1 second to execute now, which means it is still being processed at @@ -101,7 +101,7 @@ by the dispatcher when the result is ready. If we do the opposite: -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: map3 Our little string has been processed long before our 0.1 second sleep has finished. Because of this, @@ -112,7 +112,7 @@ Normally this works quite well as it means there is very little overhead to runn If there is a possibility of the function taking a non-trivial amount of time to process it might be better to have this done concurrently, and for that we use ``flatMap``: -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: flat-map Now our second Future is executed concurrently as well. This technique can also be used to combine the results @@ -120,7 +120,7 @@ of several Futures into a single calculation, which will be better explained in If you need to do conditional propagation, you can use ``filter``: -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: filter Composing Futures @@ -129,7 +129,7 @@ Composing Futures It is very often desirable to be able to combine different Futures with each other, below are some examples on how that can be done in a non-blocking fashion. -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: imports3,sequence To better explain what happened in the example, ``Future.sequence`` is taking the ``Iterable>`` @@ -139,7 +139,7 @@ and we aggregate the sum of the ``Iterable``. The ``traverse`` method is similar to ``sequence``, but it takes a sequence of ``A``s and applies a function from ``A`` to ``Future`` and returns a ``Future>``, enabling parallel ``map`` over the sequence, if you use ``Futures.future`` to create the ``Future``. -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: imports4,traverse It's as simple as that! @@ -150,7 +150,7 @@ and the type of the futures and returns something with the same type as the star and then applies the function to all elements in the sequence of futures, non-blockingly, the execution will be started when the last of the Futures is completed. -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: imports5,fold That's all it takes! @@ -160,7 +160,7 @@ If the sequence passed to ``fold`` is empty, it will return the start-value, in In some cases you don't have a start-value and you're able to use the value of the first completing Future in the sequence as the start-value, you can use ``reduce``, it works like this: -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: imports6,reduce Same as with ``fold``, the execution will be started when the last of the Futures is completed, you can also parallelize @@ -174,13 +174,13 @@ Callbacks Sometimes you just want to listen to a ``Future`` being completed, and react to that not by creating a new Future, but by side-effecting. For this Akka supports ``onComplete``, ``onSuccess`` and ``onFailure``, of which the latter two are specializations of the first. -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: onSuccess -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: onFailure -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: onComplete Ordering @@ -192,7 +192,7 @@ But there's a solution! And it's name is ``andThen``, and it creates a new Futur the specified callback, a Future that will have the same result as the Future it's called on, which allows for ordering like in the following sample: -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: and-then Auxiliary methods @@ -201,13 +201,13 @@ Auxiliary methods ``Future`` ``fallbackTo`` combines 2 Futures into a new ``Future``, and will hold the successful value of the second ``Future`` if the first ``Future`` fails. -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: fallback-to You can also combine two Futures into a new ``Future`` that will hold a tuple of the two Futures successful results, using the ``zip`` operation. -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: zip Exceptions @@ -221,7 +221,7 @@ calling ``Await.result`` will cause it to be thrown again so it can be handled p It is also possible to handle an ``Exception`` by returning a different result. This is done with the ``recover`` method. For example: -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: recover In this example, if the actor replied with a ``akka.actor.Status.Failure`` containing the ``ArithmeticException``, @@ -232,6 +232,6 @@ it will behave as if we hadn't used the ``recover`` method. You can also use the ``recoverWith`` method, which has the same relationship to ``recover`` as ``flatMap`` has to ``map``, and is use like this: -.. includecode:: code/akka/docs/future/FutureDocTestBase.java +.. includecode:: code/docs/future/FutureDocTestBase.java :include: try-recover diff --git a/akka-docs/java/logging.rst b/akka-docs/java/logging.rst index 177cdb7295..0f6f4479e5 100644 --- a/akka-docs/java/logging.rst +++ b/akka-docs/java/logging.rst @@ -11,7 +11,7 @@ How to Log Create a ``LoggingAdapter`` and use the ``error``, ``warning``, ``info``, or ``debug`` methods, as illustrated in this example: -.. includecode:: code/akka/docs/event/LoggingDocTestBase.java +.. includecode:: code/docs/event/LoggingDocTestBase.java :include: imports,my-actor The first parameter to ``Logging.getLogger`` could also be any @@ -33,7 +33,7 @@ placeholders results in a warning being appended to the log statement (i.e. on the same line with the same severity). You may pass a Java array as the only substitution argument to have its elements be treated individually: -.. includecode:: code/akka/docs/event/LoggingDocTestBase.java#array +.. includecode:: code/docs/event/LoggingDocTestBase.java#array The Java :class:`Class` of the log source is also included in the generated :class:`LogEvent`. In case of a simple string this is replaced with a “marker” @@ -170,7 +170,7 @@ event handler available in the 'akka-slf4j' module. Example of creating a listener: -.. includecode:: code/akka/docs/event/LoggingDocTestBase.java +.. includecode:: code/docs/event/LoggingDocTestBase.java :include: imports,imports-listener,my-event-listener diff --git a/akka-docs/java/remoting.rst b/akka-docs/java/remoting.rst index c4c5edee5f..ae2ac9c246 100644 --- a/akka-docs/java/remoting.rst +++ b/akka-docs/java/remoting.rst @@ -82,11 +82,11 @@ The "app" in this case refers to the name of the ``ActorSystem`` (only showing d Logical path lookup is supported on the node you are on, i.e. to use the actor created above you would do the following: -.. includecode:: code/akka/docs/remoting/RemoteActorExample.java#localNodeActor +.. includecode:: code/docs/remoting/RemoteActorExample.java#localNodeActor This will obtain an ``ActorRef`` on a remote node: -.. includecode:: code/akka/docs/remoting/RemoteActorExample.java#remoteNodeActor +.. includecode:: code/docs/remoting/RemoteActorExample.java#remoteNodeActor As you can see from the example above the following pattern is used to find an ``ActorRef`` on a remote node:: @@ -103,15 +103,15 @@ precedence. With these imports: -.. includecode:: code/akka/docs/remoting/RemoteDeploymentDocTestBase.java#import +.. includecode:: code/docs/remoting/RemoteDeploymentDocTestBase.java#import and a remote address like this: -.. includecode:: code/akka/docs/remoting/RemoteDeploymentDocTestBase.java#make-address +.. includecode:: code/docs/remoting/RemoteDeploymentDocTestBase.java#make-address you can advise the system to create a child on that remote node like so: -.. includecode:: code/akka/docs/remoting/RemoteDeploymentDocTestBase.java#deploy +.. includecode:: code/docs/remoting/RemoteDeploymentDocTestBase.java#deploy Serialization ^^^^^^^^^^^^^ diff --git a/akka-docs/java/routing.rst b/akka-docs/java/routing.rst index 7027925fbc..a55b41c43d 100644 --- a/akka-docs/java/routing.rst +++ b/akka-docs/java/routing.rst @@ -21,17 +21,17 @@ Routers In Action This is an example of how to create a router that is defined in configuration: -.. includecode:: ../scala/code/akka/docs/routing/RouterViaConfigExample.scala#config +.. includecode:: ../scala/code/docs/routing/RouterViaConfigExample.scala#config -.. includecode:: code/akka/docs/jrouting/RouterViaConfigExample.java#configurableRouting +.. includecode:: code/docs/jrouting/RouterViaConfigExample.java#configurableRouting This is an example of how to programmatically create a router and set the number of routees it should create: -.. includecode:: code/akka/docs/jrouting/RouterViaProgramExample.java#programmaticRoutingNrOfInstances +.. includecode:: code/docs/jrouting/RouterViaProgramExample.java#programmaticRoutingNrOfInstances You can also give the router already created routees as in: -.. includecode:: code/akka/docs/jrouting/RouterViaProgramExample.java#programmaticRoutingRoutees +.. includecode:: code/docs/jrouting/RouterViaProgramExample.java#programmaticRoutingRoutees It should be noted that no actor factory or class needs to be provided in this case, as the ``Router`` will not create any children on its own (which is not @@ -65,7 +65,7 @@ configuration in a :class:`RemoteRouterConfig`, attaching the remote addresses o the nodes to deploy to. Naturally, this requires your to include the ``akka-remote`` module on your classpath: -.. includecode:: code/akka/docs/jrouting/RouterViaProgramExample.java#remoteRoutees +.. includecode:: code/docs/jrouting/RouterViaProgramExample.java#remoteRoutees How Routing is Designed within Akka ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -95,9 +95,9 @@ deterministic fashion. Since each actor knows its own external representation as well as that of its parent, the routees decide where replies should be sent when reacting to a message: -.. includecode:: code/akka/docs/jrouting/RouterViaProgramExample.java#reply-with-parent +.. includecode:: code/docs/jrouting/RouterViaProgramExample.java#reply-with-parent -.. includecode:: code/akka/docs/jrouting/RouterViaProgramExample.java#reply-with-self +.. includecode:: code/docs/jrouting/RouterViaProgramExample.java#reply-with-self It is apparent now why routing needs to be enabled in code rather than being possible to “bolt on” later: whether or not an actor is routed means a change @@ -121,7 +121,7 @@ not have an effect on the number of actors in the pool. Setting the strategy is easily done: -.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java +.. includecode:: code/docs/jrouting/CustomRouterDocTestBase.java :include: supervision Another potentially useful approach is to give the router the same strategy as @@ -140,18 +140,18 @@ Router usage In this section we will describe how to use the different router types. First we need to create some actors that will be used in the examples: -.. includecode:: code/akka/docs/jrouting/PrintlnActor.java#printlnActor +.. includecode:: code/docs/jrouting/PrintlnActor.java#printlnActor and -.. includecode:: code/akka/docs/jrouting/FibonacciActor.java#fibonacciActor +.. includecode:: code/docs/jrouting/FibonacciActor.java#fibonacciActor RoundRobinRouter **************** Routes in a `round-robin `_ fashion to its routees. Code example: -.. includecode:: code/akka/docs/jrouting/ParentActor.java#roundRobinRouter +.. includecode:: code/docs/jrouting/ParentActor.java#roundRobinRouter When run you should see a similar output to this: @@ -180,7 +180,7 @@ the message it receives to this routee. This procedure will happen each time it receives a message. Code example: -.. includecode:: code/akka/docs/jrouting/ParentActor.java#randomRouter +.. includecode:: code/docs/jrouting/ParentActor.java#randomRouter When run you should see a similar output to this: @@ -213,14 +213,14 @@ The selection is done in this order: Code example: -.. includecode:: code/akka/docs/jrouting/ParentActor.java#smallestMailboxRouter +.. includecode:: code/docs/jrouting/ParentActor.java#smallestMailboxRouter BroadcastRouter *************** A broadcast router forwards the message it receives to *all* its routees. Code example: -.. includecode:: code/akka/docs/jrouting/ParentActor.java#broadcastRouter +.. includecode:: code/docs/jrouting/ParentActor.java#broadcastRouter When run you should see a similar output to this: @@ -240,7 +240,7 @@ The ScatterGatherFirstCompletedRouter will send the message on to all its routee It then waits for first result it gets back. This result will be sent back to original sender. Code example: -.. includecode:: code/akka/docs/jrouting/ParentActor.java#scatterGatherFirstCompletedRouter +.. includecode:: code/docs/jrouting/ParentActor.java#scatterGatherFirstCompletedRouter When run you should see this: @@ -272,16 +272,16 @@ of routees dynamically. This is an example of how to create a resizable router that is defined in configuration: -.. includecode:: ../scala/code/akka/docs/routing/RouterViaConfigExample.scala#config-resize +.. includecode:: ../scala/code/docs/routing/RouterViaConfigExample.scala#config-resize -.. includecode:: code/akka/docs/jrouting/RouterViaConfigExample.java#configurableRoutingWithResizer +.. includecode:: code/docs/jrouting/RouterViaConfigExample.java#configurableRoutingWithResizer Several more configuration options are available and described in ``akka.actor.deployment.default.resizer`` section of the reference :ref:`configuration`. This is an example of how to programmatically create a resizable router: -.. includecode:: code/akka/docs/jrouting/RouterViaProgramExample.java#programmaticRoutingWithResizer +.. includecode:: code/docs/jrouting/RouterViaProgramExample.java#programmaticRoutingWithResizer *It is also worth pointing out that if you define the ``router`` in the configuration file then this value will be used instead of any programmatically sent parameters.* @@ -308,12 +308,12 @@ democrat related messages to the Democrat actor and all republican related messa We begin with defining the class: -.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crRouter +.. includecode:: code/docs/jrouting/CustomRouterDocTestBase.java#crRouter :exclude: crRoute The next step is to implement the ``createCustomRoute`` method in the class just defined: -.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crRoute +.. includecode:: code/docs/jrouting/CustomRouterDocTestBase.java#crRoute As you can see above we start off by creating the routees and put them in a collection. @@ -322,12 +322,12 @@ It registers the routees internally and failing to call this method will cause a ``ActorInitializationException`` to be thrown when the router is used. Therefore always make sure to do the following in your custom router: -.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crRegisterRoutees +.. includecode:: code/docs/jrouting/CustomRouterDocTestBase.java#crRegisterRoutees The routing logic is where your magic sauce is applied. In our example it inspects the message types and forwards to the correct routee based on this: -.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crRoutingLogic +.. includecode:: code/docs/jrouting/CustomRouterDocTestBase.java#crRoutingLogic As you can see above what's returned in the ``CustomRoute`` function, which defines the mapping from incoming sender/message to a ``List`` of ``Destination(sender, routee)``. @@ -338,11 +338,11 @@ For more information about how to alter the original sender we refer to the sour All in all the custom router looks like this: -.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#CustomRouter +.. includecode:: code/docs/jrouting/CustomRouterDocTestBase.java#CustomRouter If you are interested in how to use the VoteCountRouter it looks like this: -.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crTest +.. includecode:: code/docs/jrouting/CustomRouterDocTestBase.java#crTest .. caution:: @@ -391,5 +391,5 @@ the actor system’s default dispatcher. All standard routers allow setting this property in their constructor or factory method, custom routers have to implement the method in a suitable way. -.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#dispatchers +.. includecode:: code/docs/jrouting/CustomRouterDocTestBase.java#dispatchers diff --git a/akka-docs/java/scheduler.rst b/akka-docs/java/scheduler.rst index faff8d9fe0..28da7feeca 100644 --- a/akka-docs/java/scheduler.rst +++ b/akka-docs/java/scheduler.rst @@ -25,13 +25,13 @@ scheduled operation. Some examples ------------- -.. includecode:: code/akka/docs/actor/SchedulerDocTestBase.java +.. includecode:: code/docs/actor/SchedulerDocTestBase.java :include: imports1,schedule-one-off-message -.. includecode:: code/akka/docs/actor/SchedulerDocTestBase.java +.. includecode:: code/docs/actor/SchedulerDocTestBase.java :include: schedule-one-off-thunk -.. includecode:: code/akka/docs/actor/SchedulerDocTestBase.java +.. includecode:: code/docs/actor/SchedulerDocTestBase.java :include: imports1,imports2,schedule-recurring From ``akka.actor.ActorSystem`` diff --git a/akka-docs/java/serialization.rst b/akka-docs/java/serialization.rst index 7618ffa4a8..c352b6e1ae 100644 --- a/akka-docs/java/serialization.rst +++ b/akka-docs/java/serialization.rst @@ -21,12 +21,12 @@ For Akka to know which ``Serializer`` to use for what, you need edit your :ref:` in the "akka.actor.serializers"-section you bind names to implementations of the ``akka.serialization.Serializer`` you wish to use, like this: -.. includecode:: ../scala/code/akka/docs/serialization/SerializationDocSpec.scala#serialize-serializers-config +.. includecode:: ../scala/code/docs/serialization/SerializationDocSpec.scala#serialize-serializers-config After you've bound names to different implementations of ``Serializer`` you need to wire which classes should be serialized using which ``Serializer``, this is done in the "akka.actor.serialization-bindings"-section: -.. includecode:: ../scala/code/akka/docs/serialization/SerializationDocSpec.scala#serialization-bindings-config +.. includecode:: ../scala/code/docs/serialization/SerializationDocSpec.scala#serialization-bindings-config You only need to specify the name of an interface or abstract base class of the messages. In case of ambiguity, i.e. the message implements several of the @@ -53,7 +53,7 @@ Verification If you want to verify that your messages are serializable you can enable the following config option: -.. includecode:: ../scala/code/akka/docs/serialization/SerializationDocSpec.scala#serialize-messages-config +.. includecode:: ../scala/code/docs/serialization/SerializationDocSpec.scala#serialize-messages-config .. warning:: @@ -62,7 +62,7 @@ If you want to verify that your messages are serializable you can enable the fol If you want to verify that your ``Props`` are serializable you can enable the following config option: -.. includecode:: ../scala/code/akka/docs/serialization/SerializationDocSpec.scala#serialize-creators-config +.. includecode:: ../scala/code/docs/serialization/SerializationDocSpec.scala#serialize-creators-config .. warning:: @@ -75,7 +75,7 @@ Programmatic If you want to programmatically serialize/deserialize using Akka Serialization, here's some examples: -.. includecode:: code/akka/docs/serialization/SerializationDocTestBase.java +.. includecode:: code/docs/serialization/SerializationDocTestBase.java :include: imports,programmatic For more information, have a look at the ``ScalaDoc`` for ``akka.serialization._`` @@ -93,7 +93,7 @@ Creating new Serializers First you need to create a class definition of your ``Serializer``, which is done by extending ``akka.serialization.JSerializer``, like this: -.. includecode:: code/akka/docs/serialization/SerializationDocTestBase.java +.. includecode:: code/docs/serialization/SerializationDocTestBase.java :include: imports,my-own-serializer :exclude: ... @@ -106,7 +106,7 @@ Serializing ActorRefs All ActorRefs are serializable using JavaSerializer, but in case you are writing your own serializer, you might want to know how to serialize and deserialize them properly, here's the magic incantation: -.. includecode:: code/akka/docs/serialization/SerializationDocTestBase.java +.. includecode:: code/docs/serialization/SerializationDocTestBase.java :include: imports,actorref-serializer .. note:: @@ -131,7 +131,7 @@ address which shall be the recipient of the serialized information. Use :meth:`ActorRefProvider.getExternalAddressFor(remoteAddr)` to query the system for the appropriate address to use when sending to ``remoteAddr``: -.. includecode:: code/akka/docs/serialization/SerializationDocTestBase.java +.. includecode:: code/docs/serialization/SerializationDocTestBase.java :include: external-address This requires that you know at least which type of address will be supported by @@ -144,7 +144,7 @@ There is a possible simplification available if you are just using the default :class:`NettyRemoteTransport` with the :meth:`RemoteActorRefProvider`, which is enabled by the fact that this combination has just a single remote address: -.. includecode:: code/akka/docs/serialization/SerializationDocTestBase.java +.. includecode:: code/docs/serialization/SerializationDocTestBase.java :include: external-address-default This solution has to be adapted once other providers are used (like the planned diff --git a/akka-docs/java/transactors.rst b/akka-docs/java/transactors.rst index c9ac0cd90f..7b7c080ffe 100644 --- a/akka-docs/java/transactors.rst +++ b/akka-docs/java/transactors.rst @@ -63,22 +63,22 @@ Here is an example of coordinating two simple counter UntypedActors so that they both increment together in coordinated transactions. If one of them was to fail to increment, the other would also fail. -.. includecode:: code/akka/docs/transactor/Increment.java#class +.. includecode:: code/docs/transactor/Increment.java#class :language: java -.. includecode:: code/akka/docs/transactor/CoordinatedCounter.java#class +.. includecode:: code/docs/transactor/CoordinatedCounter.java#class :language: java -.. includecode:: code/akka/docs/transactor/TransactorDocTest.java#imports +.. includecode:: code/docs/transactor/TransactorDocTest.java#imports :language: java -.. includecode:: code/akka/docs/transactor/TransactorDocTest.java#coordinated-example +.. includecode:: code/docs/transactor/TransactorDocTest.java#coordinated-example :language: java To start a new coordinated transaction that you will also participate in, create a ``Coordinated`` object, passing in a ``Timeout``: -.. includecode:: code/akka/docs/transactor/TransactorDocTest.java#create-coordinated +.. includecode:: code/docs/transactor/TransactorDocTest.java#create-coordinated :language: java To start a coordinated transaction that you won't participate in yourself you @@ -86,7 +86,7 @@ can create a ``Coordinated`` object with a message and send it directly to an actor. The recipient of the message will be the first member of the coordination set: -.. includecode:: code/akka/docs/transactor/TransactorDocTest.java#send-coordinated +.. includecode:: code/docs/transactor/TransactorDocTest.java#send-coordinated :language: java To include another actor in the same coordinated transaction that you've created @@ -94,13 +94,13 @@ or received, use the ``coordinate`` method on that object. This will increment the number of parties involved by one and create a new ``Coordinated`` object to be sent. -.. includecode:: code/akka/docs/transactor/TransactorDocTest.java#include-coordinated +.. includecode:: code/docs/transactor/TransactorDocTest.java#include-coordinated :language: java To enter the coordinated transaction use the atomic method of the coordinated object, passing in a ``java.lang.Runnable``. -.. includecode:: code/akka/docs/transactor/Coordinator.java#coordinated-atomic +.. includecode:: code/docs/transactor/Coordinator.java#coordinated-atomic :language: java The coordinated transaction will wait for the other transactions before @@ -124,7 +124,7 @@ coordinating transactions, using the explicit coordination described above. Here's an example of a simple untyped transactor that will join a coordinated transaction: -.. includecode:: code/akka/docs/transactor/Counter.java#class +.. includecode:: code/docs/transactor/Counter.java#class :language: java You could send this Counter transactor a ``Coordinated(Increment)`` message. If @@ -140,7 +140,7 @@ to easily coordinate with other transactors. Here's an example of coordinating an increment, using an untyped transactor, similar to the explicitly coordinated example above. -.. includecode:: code/akka/docs/transactor/FriendlyCounter.java#class +.. includecode:: code/docs/transactor/FriendlyCounter.java#class :language: java To execute directly before or after the coordinated transaction, override the diff --git a/akka-docs/java/typed-actors.rst b/akka-docs/java/typed-actors.rst index b2d7a9bfae..7ab2274425 100644 --- a/akka-docs/java/typed-actors.rst +++ b/akka-docs/java/typed-actors.rst @@ -38,7 +38,7 @@ The tools of the trade Before we create our first Typed Actor we should first go through the tools that we have at our disposal, it's located in ``akka.actor.TypedActor``. -.. includecode:: code/akka/docs/actor/TypedActorDocTestBase.java +.. includecode:: code/docs/actor/TypedActorDocTestBase.java :include: typed-actor-extension-tools .. warning:: @@ -55,37 +55,37 @@ To create a Typed Actor you need to have one or more interfaces, and one impleme Our example interface: -.. includecode:: code/akka/docs/actor/TypedActorDocTestBase.java +.. includecode:: code/docs/actor/TypedActorDocTestBase.java :include: imports,typed-actor-iface :exclude: typed-actor-iface-methods Our example implementation of that interface: -.. includecode:: code/akka/docs/actor/TypedActorDocTestBase.java +.. includecode:: code/docs/actor/TypedActorDocTestBase.java :include: imports,typed-actor-impl :exclude: typed-actor-impl-methods The most trivial way of creating a Typed Actor instance of our ``Squarer``: -.. includecode:: code/akka/docs/actor/TypedActorDocTestBase.java +.. includecode:: code/docs/actor/TypedActorDocTestBase.java :include: typed-actor-create1 First type is the type of the proxy, the second type is the type of the implementation. If you need to call a specific constructor you do it like this: -.. includecode:: code/akka/docs/actor/TypedActorDocTestBase.java +.. includecode:: code/docs/actor/TypedActorDocTestBase.java :include: typed-actor-create2 Since you supply a ``Props``, you can specify which dispatcher to use, what the default timeout should be used and more. Now, our ``Squarer`` doesn't have any methods, so we'd better add those. -.. includecode:: code/akka/docs/actor/TypedActorDocTestBase.java +.. includecode:: code/docs/actor/TypedActorDocTestBase.java :include: imports,typed-actor-iface Alright, now we've got some methods we can call, but we need to implement those in ``SquarerImpl``. -.. includecode:: code/akka/docs/actor/TypedActorDocTestBase.java +.. includecode:: code/docs/actor/TypedActorDocTestBase.java :include: imports,typed-actor-impl Excellent, now we have an interface and an implementation of that interface, @@ -113,7 +113,7 @@ we *strongly* recommend that parameters passed are immutable. One-way message send ^^^^^^^^^^^^^^^^^^^^ -.. includecode:: code/akka/docs/actor/TypedActorDocTestBase.java +.. includecode:: code/docs/actor/TypedActorDocTestBase.java :include: typed-actor-call-oneway As simple as that! The method will be executed on another thread; asynchronously. @@ -121,13 +121,13 @@ As simple as that! The method will be executed on another thread; asynchronously Request-reply message send ^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. includecode:: code/akka/docs/actor/TypedActorDocTestBase.java +.. includecode:: code/docs/actor/TypedActorDocTestBase.java :include: typed-actor-call-option This will block for as long as the timeout that was set in the ``Props`` of the Typed Actor, if needed. It will return ``None`` if a timeout occurs. -.. includecode:: code/akka/docs/actor/TypedActorDocTestBase.java +.. includecode:: code/docs/actor/TypedActorDocTestBase.java :include: typed-actor-call-strict This will block for as long as the timeout that was set in the ``Props` of the Typed Actor, @@ -136,7 +136,7 @@ if needed. It will throw a ``java.util.concurrent.TimeoutException`` if a timeou Request-reply-with-future message send ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. includecode:: code/akka/docs/actor/TypedActorDocTestBase.java +.. includecode:: code/docs/actor/TypedActorDocTestBase.java :include: typed-actor-call-future This call is asynchronous, and the Future returned can be used for asynchronous composition. @@ -146,12 +146,12 @@ Stopping Typed Actors Since Akka's Typed Actors are backed by Akka Actors they must be stopped when they aren't needed anymore. -.. includecode:: code/akka/docs/actor/TypedActorDocTestBase.java +.. includecode:: code/docs/actor/TypedActorDocTestBase.java :include: typed-actor-stop This asynchronously stops the Typed Actor associated with the specified proxy ASAP. -.. includecode:: code/akka/docs/actor/TypedActorDocTestBase.java +.. includecode:: code/docs/actor/TypedActorDocTestBase.java :include: typed-actor-poisonpill This asynchronously stops the Typed Actor associated with the specified proxy @@ -204,4 +204,4 @@ Lookup & Remoting Since ``TypedActors`` are backed by ``Akka Actors``, you can use ``actorFor`` together with ``typedActorOf`` to proxy ``ActorRefs`` potentially residing on remote nodes. -.. includecode:: code/akka/docs/actor/TypedActorDocTestBase.java#typed-actor-remote \ No newline at end of file +.. includecode:: code/docs/actor/TypedActorDocTestBase.java#typed-actor-remote \ No newline at end of file diff --git a/akka-docs/java/untyped-actors.rst b/akka-docs/java/untyped-actors.rst index 38d2d4c430..d7c99199ed 100644 --- a/akka-docs/java/untyped-actors.rst +++ b/akka-docs/java/untyped-actors.rst @@ -36,7 +36,7 @@ Actor in Java are implemented by extending the ``UntypedActor`` class and implem Here is an example: -.. includecode:: code/akka/docs/actor/MyUntypedActor.java#my-untyped-actor +.. includecode:: code/docs/actor/MyUntypedActor.java#my-untyped-actor Props ----- @@ -44,7 +44,7 @@ Props ``Props`` is a configuration class to specify options for the creation of actors. Here are some examples on how to create a ``Props`` instance. -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java#creating-props-config +.. includecode:: code/docs/actor/UntypedActorDocTestBase.java#creating-props-config Creating Actors with Props @@ -52,13 +52,13 @@ Creating Actors with Props Actors are created by passing in a ``Props`` instance into the ``actorOf`` factory method. -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java#creating-props +.. includecode:: code/docs/actor/UntypedActorDocTestBase.java#creating-props Creating Actors with default constructor ---------------------------------------- -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java +.. includecode:: code/docs/actor/UntypedActorDocTestBase.java :include: imports,system-actorOf The call to :meth:`actorOf` returns an instance of ``ActorRef``. This is a handle to @@ -75,7 +75,7 @@ how the supervisor hierarchy is arranged. When using the context the current act will be supervisor of the created child actor. When using the system it will be a top level actor, that is supervised by the system (internal guardian actor). -.. includecode:: code/akka/docs/actor/FirstUntypedActor.java#context-actorOf +.. includecode:: code/docs/actor/FirstUntypedActor.java#context-actorOf The name parameter is optional, but you should preferably name your actors, since that is used in log messages and for identifying actors. The name must not be empty @@ -110,7 +110,7 @@ in which you can create the Actor in any way you like. Here is an example: -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java#creating-constructor +.. includecode:: code/docs/actor/UntypedActorDocTestBase.java#creating-constructor This way of creating the Actor is also great for integrating with Dependency Injection (DI) frameworks like Guice or Spring. @@ -144,7 +144,7 @@ In addition, it offers: The remaining visible methods are user-overridable life-cycle hooks which are described in the following: -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java#lifecycle-callbacks +.. includecode:: code/docs/actor/UntypedActorDocTestBase.java#lifecycle-callbacks The implementations shown above are the defaults provided by the :class:`UntypedActor` class. @@ -163,7 +163,7 @@ termination (see `Stopping Actors`_). This service is provided by the Registering a monitor is easy (see fourth line, the rest is for demonstrating the whole functionality): -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java#watch +.. includecode:: code/docs/actor/UntypedActorDocTestBase.java#watch It should be noted that the :class:`Terminated` message is generated independent of the order in which registration and termination occur. @@ -279,7 +279,7 @@ convention. Here is an example of an immutable message: -.. includecode:: code/akka/docs/actor/ImmutableMessage.java#immutable-message +.. includecode:: code/docs/actor/ImmutableMessage.java#immutable-message Send messages @@ -332,9 +332,9 @@ Ask: Send-And-Receive-Future The ``ask`` pattern involves actors as well as futures, hence it is offered as a use pattern rather than a method on :class:`ActorRef`: -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java#import-askPipe +.. includecode:: code/docs/actor/UntypedActorDocTestBase.java#import-askPipe -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java#ask-pipe +.. includecode:: code/docs/actor/UntypedActorDocTestBase.java#ask-pipe This example demonstrates ``ask`` together with the ``pipe`` pattern on futures, because this is likely to be a common combination. Please note that @@ -355,7 +355,7 @@ To complete the future with an exception you need send a Failure message to the This is *not done automatically* when an actor throws an exception while processing a message. -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java#reply-exception +.. includecode:: code/docs/actor/UntypedActorDocTestBase.java#reply-exception If the actor does not complete the future, it will expire after the timeout period, specified as parameter to the ``ask`` method; this will complete the @@ -399,7 +399,7 @@ an abstract method on the ``UntypedActor`` base class that needs to be defined. Here is an example: -.. includecode:: code/akka/docs/actor/MyUntypedActor.java#my-untyped-actor +.. includecode:: code/docs/actor/MyUntypedActor.java#my-untyped-actor An alternative to using if-instanceof checks is to use `Apache Commons MethodUtils `_ @@ -432,7 +432,7 @@ received within a certain time. To receive this timeout you have to set the ``receiveTimeout`` property and declare handing for the ReceiveTimeout message. -.. includecode:: code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java#receive-timeout +.. includecode:: code/docs/actor/MyReceivedTimeoutUntypedActor.java#receive-timeout .. _stopping-actors-java: @@ -494,7 +494,7 @@ in the mailbox. Use it like this: -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java +.. includecode:: code/docs/actor/UntypedActorDocTestBase.java :include: import-actors,poison-pill Graceful Stop @@ -503,7 +503,7 @@ Graceful Stop :meth:`gracefulStop` is useful if you need to wait for termination or compose ordered termination of several actors: -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java +.. includecode:: code/docs/actor/UntypedActorDocTestBase.java :include: import-gracefulStop,gracefulStop When ``gracefulStop()`` returns successfully, the actor’s ``postStop()`` hook @@ -537,7 +537,7 @@ The hotswapped code is kept in a Stack which can be pushed and popped. To hotswap the Actor using ``getContext().become``: -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java +.. includecode:: code/docs/actor/UntypedActorDocTestBase.java :include: import-procedure,hot-swap-actor The ``become`` method is useful for many different things, such as to implement @@ -545,7 +545,7 @@ a Finite State Machine (FSM). Here is another little cute example of ``become`` and ``unbecome`` in action: -.. includecode:: code/akka/docs/actor/UntypedActorSwapper.java#swapper +.. includecode:: code/docs/actor/UntypedActorSwapper.java#swapper Downgrade --------- @@ -567,7 +567,7 @@ through regular supervisor semantics. Use it like this: -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java +.. includecode:: code/docs/actor/UntypedActorDocTestBase.java :include: import-actors,kill Actors and exceptions diff --git a/akka-docs/java/zeromq.rst b/akka-docs/java/zeromq.rst index ca136a588e..08d1a9541f 100644 --- a/akka-docs/java/zeromq.rst +++ b/akka-docs/java/zeromq.rst @@ -19,15 +19,15 @@ Connection ZeroMQ supports multiple connectivity patterns, each aimed to meet a different set of requirements. Currently, this module supports publisher-subscriber connections and connections based on dealers and routers. For connecting or accepting connections, a socket must be created. Sockets are always created using the ``akka.zeromq.ZeroMQExtension``, for example: -.. includecode:: code/akka/docs/zeromq/ZeromqDocTestBase.java#pub-socket +.. includecode:: code/docs/zeromq/ZeromqDocTestBase.java#pub-socket Above examples will create a ZeroMQ Publisher socket that is Bound to the port 1233 on localhost. Similarly you can create a subscription socket, with a listener, that subscribes to all messages from the publisher using: -.. includecode:: code/akka/docs/zeromq/ZeromqDocTestBase.java#sub-socket +.. includecode:: code/docs/zeromq/ZeromqDocTestBase.java#sub-socket -.. includecode:: code/akka/docs/zeromq/ZeromqDocTestBase.java#listener-actor +.. includecode:: code/docs/zeromq/ZeromqDocTestBase.java#listener-actor The following sub-sections describe the supported connection patterns and how they can be used in an Akka environment. However, for a comprehensive discussion of connection patterns, please refer to `ZeroMQ -- The Guide `_. @@ -43,18 +43,18 @@ When you're using zeromq pub/sub you should be aware that it needs multicast - c An actor is subscribed to a topic as follows: -.. includecode:: code/akka/docs/zeromq/ZeromqDocTestBase.java#sub-topic-socket +.. includecode:: code/docs/zeromq/ZeromqDocTestBase.java#sub-topic-socket It is a prefix match so it is subscribed to all topics starting with ``foo.bar``. Note that if the given string is empty or ``Subscribe.all()`` is used, the actor is subscribed to all topics. To unsubscribe from a topic you do the following: -.. includecode:: code/akka/docs/zeromq/ZeromqDocTestBase.java#unsub-topic-socket +.. includecode:: code/docs/zeromq/ZeromqDocTestBase.java#unsub-topic-socket To publish messages to a topic you must use two Frames with the topic in the first frame. -.. includecode:: code/akka/docs/zeromq/ZeromqDocTestBase.java#pub-topic +.. includecode:: code/docs/zeromq/ZeromqDocTestBase.java#pub-topic Pub-Sub in Action ^^^^^^^^^^^^^^^^^ @@ -64,22 +64,22 @@ The following example illustrates one publisher with two subscribers. The publisher monitors current heap usage and system load and periodically publishes ``Heap`` events on the ``"health.heap"`` topic and ``Load`` events on the ``"health.load"`` topic. -.. includecode:: code/akka/docs/zeromq/ZeromqDocTestBase.java#health +.. includecode:: code/docs/zeromq/ZeromqDocTestBase.java#health -.. includecode:: code/akka/docs/zeromq/ZeromqDocTestBase.java#health2 +.. includecode:: code/docs/zeromq/ZeromqDocTestBase.java#health2 Let's add one subscriber that logs the information. It subscribes to all topics starting with ``"health"``, i.e. both ``Heap`` and ``Load`` events. -.. includecode:: code/akka/docs/zeromq/ZeromqDocTestBase.java#logger +.. includecode:: code/docs/zeromq/ZeromqDocTestBase.java#logger -.. includecode:: code/akka/docs/zeromq/ZeromqDocTestBase.java#logger2 +.. includecode:: code/docs/zeromq/ZeromqDocTestBase.java#logger2 Another subscriber keep track of used heap and warns if too much heap is used. It only subscribes to ``Heap`` events. -.. includecode:: code/akka/docs/zeromq/ZeromqDocTestBase.java#alerter +.. includecode:: code/docs/zeromq/ZeromqDocTestBase.java#alerter -.. includecode:: code/akka/docs/zeromq/ZeromqDocTestBase.java#alerter2 +.. includecode:: code/docs/zeromq/ZeromqDocTestBase.java#alerter2 Router-Dealer Connection ------------------------ @@ -90,7 +90,7 @@ With those socket types you can build your own reliable pub sub broker that uses To create a Router socket that has a high watermark configured, you would do: -.. includecode:: code/akka/docs/zeromq/ZeromqDocTestBase.java#high-watermark +.. includecode:: code/docs/zeromq/ZeromqDocTestBase.java#high-watermark The akka-zeromq module accepts most if not all the available configuration options for a zeromq socket. diff --git a/akka-docs/modules/durable-mailbox.rst b/akka-docs/modules/durable-mailbox.rst index aca9d51eb5..2a9ca174cf 100644 --- a/akka-docs/modules/durable-mailbox.rst +++ b/akka-docs/modules/durable-mailbox.rst @@ -52,17 +52,17 @@ you need. In the configuration of the dispatcher you specify the fully qualified class name of the mailbox: -.. includecode:: code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala +.. includecode:: code/docs/actor/mailbox/DurableMailboxDocSpec.scala :include: dispatcher-config Here is an example of how to create an actor with a durable dispatcher, in Scala: -.. includecode:: code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala +.. includecode:: code/docs/actor/mailbox/DurableMailboxDocSpec.scala :include: imports,dispatcher-config-use Corresponding example in Java: -.. includecode:: code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java +.. includecode:: code/docs/actor/mailbox/DurableMailboxDocTestBase.java :include: imports,dispatcher-config-use You can also configure and tune the file-based durable mailbox. This is done in @@ -82,14 +82,14 @@ envelope that needs to be stored. As a help utility you can mixin DurableMessage to serialize and deserialize the envelope using the ordinary :ref:`serialization-scala` mechanism. This optional and you may store the envelope data in any way you like. -.. includecode:: code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala +.. includecode:: code/docs/actor/mailbox/DurableMailboxDocSpec.scala :include: custom-mailbox To facilitate testing of a durable mailbox you may use ``DurableMailboxSpec`` as base class. It implements a few basic tests and helps you setup the a fixture. More tests can be added in concrete subclass like this: -.. includecode:: code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala +.. includecode:: code/docs/actor/mailbox/DurableMailboxDocSpec.scala :include: custom-mailbox-test You find DurableMailboxDocSpec in ``akka-mailboxes-common-test-2.1-SNAPSHOT.jar``. diff --git a/akka-docs/scala/actors.rst b/akka-docs/scala/actors.rst index 9269c841f5..92c335120a 100644 --- a/akka-docs/scala/actors.rst +++ b/akka-docs/scala/actors.rst @@ -39,7 +39,7 @@ along with the implementation of how the messages should be processed. Here is an example: -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala +.. includecode:: code/docs/actor/ActorDocSpec.scala :include: imports1,my-actor Please note that the Akka Actor ``receive`` message loop is exhaustive, which is @@ -52,7 +52,7 @@ published to the ``ActorSystem``'s ``EventStream``. Creating Actors with default constructor ---------------------------------------- -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala +.. includecode:: code/docs/actor/ActorDocSpec.scala :include: imports2,system-actorOf The call to :meth:`actorOf` returns an instance of ``ActorRef``. This is a handle to @@ -69,7 +69,7 @@ how the supervisor hierarchy is arranged. When using the context the current act will be supervisor of the created child actor. When using the system it will be a top level actor, that is supervised by the system (internal guardian actor). -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#context-actorOf +.. includecode:: code/docs/actor/ActorDocSpec.scala#context-actorOf The name parameter is optional, but you should preferably name your actors, since that is used in log messages and for identifying actors. The name must not be empty @@ -103,7 +103,7 @@ a call-by-name block in which you can create the Actor in any way you like. Here is an example: -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#creating-constructor +.. includecode:: code/docs/actor/ActorDocSpec.scala#creating-constructor Props @@ -112,7 +112,7 @@ Props ``Props`` is a configuration class to specify options for the creation of actors. Here are some examples on how to create a ``Props`` instance. -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#creating-props-config +.. includecode:: code/docs/actor/ActorDocSpec.scala#creating-props-config Creating Actors with Props @@ -120,7 +120,7 @@ Creating Actors with Props Actors are created by passing in a ``Props`` instance into the ``actorOf`` factory method. -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#creating-props +.. includecode:: code/docs/actor/ActorDocSpec.scala#creating-props Creating Actors using anonymous classes @@ -128,7 +128,7 @@ Creating Actors using anonymous classes When spawning actors for specific sub-tasks from within an actor, it may be convenient to include the code to be executed directly in place, using an anonymous class. -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#anonymous-actor +.. includecode:: code/docs/actor/ActorDocSpec.scala#anonymous-actor .. warning:: @@ -170,7 +170,7 @@ In addition, it offers: You can import the members in the :obj:`context` to avoid prefixing access with ``context.`` -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#import-context +.. includecode:: code/docs/actor/ActorDocSpec.scala#import-context The remaining visible methods are user-overridable life-cycle hooks which are described in the following:: @@ -199,7 +199,7 @@ termination (see `Stopping Actors`_). This service is provided by the Registering a monitor is easy: -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#watch +.. includecode:: code/docs/actor/ActorDocSpec.scala#watch It should be noted that the :class:`Terminated` message is generated independent of the order in which registration and termination occur. @@ -371,7 +371,7 @@ Ask: Send-And-Receive-Future The ``ask`` pattern involves actors as well as futures, hence it is offered as a use pattern rather than a method on :class:`ActorRef`: -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#ask-pipeTo +.. includecode:: code/docs/actor/ActorDocSpec.scala#ask-pipeTo This example demonstrates ``ask`` together with the ``pipeTo`` pattern on futures, because this is likely to be a common combination. Please note that @@ -391,7 +391,7 @@ To complete the future with an exception you need send a Failure message to the This is *not done automatically* when an actor throws an exception while processing a message. -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#reply-exception +.. includecode:: code/docs/actor/ActorDocSpec.scala#reply-exception If the actor does not complete the future, it will expire after the timeout period, completing it with an :class:`AskTimeoutException`. The timeout is @@ -399,11 +399,11 @@ taken from one of the following locations in order of precedence: 1. explicitly given timeout as in: -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#using-explicit-timeout +.. includecode:: code/docs/actor/ActorDocSpec.scala#using-explicit-timeout 2. implicit argument of type :class:`akka.util.Timeout`, e.g. -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#using-implicit-timeout +.. includecode:: code/docs/actor/ActorDocSpec.scala#using-implicit-timeout See :ref:`futures-scala` for more information on how to await or query a future. @@ -453,7 +453,7 @@ This method should return a ``PartialFunction``, e.g. a ‘match/case’ clause which the message can be matched against the different case clauses using Scala pattern matching. Here is an example: -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala +.. includecode:: code/docs/actor/ActorDocSpec.scala :include: imports1,my-actor @@ -483,7 +483,7 @@ received within a certain time. To receive this timeout you have to set the ``receiveTimeout`` property and declare a case handing the ReceiveTimeout object. -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#receive-timeout +.. includecode:: code/docs/actor/ActorDocSpec.scala#receive-timeout .. _stopping-actors-scala: @@ -548,7 +548,7 @@ Graceful Stop :meth:`gracefulStop` is useful if you need to wait for termination or compose ordered termination of several actors: -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#gracefulStop +.. includecode:: code/docs/actor/ActorDocSpec.scala#gracefulStop When ``gracefulStop()`` returns successfully, the actor’s ``postStop()`` hook will have been executed: there exists a happens-before edge between the end of @@ -584,7 +584,7 @@ pushed and popped. To hotswap the Actor behavior using ``become``: -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#hot-swap-actor +.. includecode:: code/docs/actor/ActorDocSpec.scala#hot-swap-actor The ``become`` method is useful for many different things, but a particular nice example of it is in example where it is used to implement a Finite State Machine @@ -594,12 +594,12 @@ example of it is in example where it is used to implement a Finite State Machine Here is another little cute example of ``become`` and ``unbecome`` in action: -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#swapper +.. includecode:: code/docs/actor/ActorDocSpec.scala#swapper Encoding Scala Actors nested receives without accidentally leaking memory ------------------------------------------------------------------------- -See this `Unnested receive example `_. +See this `Unnested receive example `_. Downgrade @@ -675,8 +675,8 @@ A bit advanced but very useful way of defining a base message handler and then extend that, either through inheritance or delegation, is to use ``PartialFunction.orElse`` chaining. -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#receive-orElse +.. includecode:: code/docs/actor/ActorDocSpec.scala#receive-orElse Or: -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#receive-orElse2 +.. includecode:: code/docs/actor/ActorDocSpec.scala#receive-orElse2 diff --git a/akka-docs/scala/agents.rst b/akka-docs/scala/agents.rst index aad2dc6bd2..5657986a88 100644 --- a/akka-docs/scala/agents.rst +++ b/akka-docs/scala/agents.rst @@ -36,22 +36,22 @@ Creating and stopping Agents Agents are created by invoking ``Agent(value)`` passing in the Agent's initial value: -.. includecode:: code/akka/docs/agent/AgentDocSpec.scala#create +.. includecode:: code/docs/agent/AgentDocSpec.scala#create Note that creating an Agent requires an implicit ``ActorSystem`` (for creating the underlying actors). See :ref:`actor-systems` for more information about actor systems. An ActorSystem can be in implicit scope when creating an Agent: -.. includecode:: code/akka/docs/agent/AgentDocSpec.scala#create-implicit-system +.. includecode:: code/docs/agent/AgentDocSpec.scala#create-implicit-system Or the ActorSystem can be passed explicitly when creating an Agent: -.. includecode:: code/akka/docs/agent/AgentDocSpec.scala#create-explicit-system +.. includecode:: code/docs/agent/AgentDocSpec.scala#create-explicit-system An Agent will be running until you invoke ``close`` on it. Then it will be eligible for garbage collection (unless you hold on to it in some way). -.. includecode:: code/akka/docs/agent/AgentDocSpec.scala#close +.. includecode:: code/docs/agent/AgentDocSpec.scala#close Updating Agents @@ -65,7 +65,7 @@ the update will be applied but dispatches to an Agent from a single thread will occur in order. You apply a value or a function by invoking the ``send`` function. -.. includecode:: code/akka/docs/agent/AgentDocSpec.scala#send +.. includecode:: code/docs/agent/AgentDocSpec.scala#send You can also dispatch a function to update the internal state but on its own thread. This does not use the reactive thread pool and can be used for @@ -73,7 +73,7 @@ long-running or blocking operations. You do this with the ``sendOff`` method. Dispatches using either ``sendOff`` or ``send`` will still be executed in order. -.. includecode:: code/akka/docs/agent/AgentDocSpec.scala#send-off +.. includecode:: code/docs/agent/AgentDocSpec.scala#send-off Reading an Agent's value @@ -82,11 +82,11 @@ Reading an Agent's value Agents can be dereferenced (you can get an Agent's value) by invoking the Agent with parentheses like this: -.. includecode:: code/akka/docs/agent/AgentDocSpec.scala#read-apply +.. includecode:: code/docs/agent/AgentDocSpec.scala#read-apply Or by using the get method: -.. includecode:: code/akka/docs/agent/AgentDocSpec.scala#read-get +.. includecode:: code/docs/agent/AgentDocSpec.scala#read-get Reading an Agent's current value does not involve any message passing and happens immediately. So while updates to an Agent are asynchronous, reading the @@ -99,12 +99,12 @@ Awaiting an Agent's value It is also possible to read the value after all currently queued sends have completed. You can do this with ``await``: -.. includecode:: code/akka/docs/agent/AgentDocSpec.scala#read-await +.. includecode:: code/docs/agent/AgentDocSpec.scala#read-await You can also get a ``Future`` to this value, that will be completed after the currently queued updates have completed: -.. includecode:: code/akka/docs/agent/AgentDocSpec.scala#read-future +.. includecode:: code/docs/agent/AgentDocSpec.scala#read-future Transactional Agents @@ -115,7 +115,7 @@ that transaction. If you send to an Agent within a transaction then the dispatch to the Agent will be held until that transaction commits, and discarded if the transaction is aborted. Here's an example: -.. includecode:: code/akka/docs/agent/AgentDocSpec.scala#transfer-example +.. includecode:: code/docs/agent/AgentDocSpec.scala#transfer-example Monadic usage @@ -128,4 +128,4 @@ as-is. They are so-called 'persistent'. Example of monadic usage: -.. includecode:: code/akka/docs/agent/AgentDocSpec.scala#monadic-example +.. includecode:: code/docs/agent/AgentDocSpec.scala#monadic-example diff --git a/akka-docs/scala/camel.rst b/akka-docs/scala/camel.rst index 9c24d123f0..886e00c320 100644 --- a/akka-docs/scala/camel.rst +++ b/akka-docs/scala/camel.rst @@ -52,7 +52,7 @@ Consumer Usage of Camel's integration components in Akka is essentially a one-liner. Here's an example. -.. includecode:: code/akka/docs/camel/Introduction.scala#Consumer-mina +.. includecode:: code/docs/camel/Introduction.scala#Consumer-mina The above example exposes an actor over a tcp endpoint on port 6200 via Apache Camel's `Mina component`_. The actor implements the endpointUri method to define @@ -64,14 +64,14 @@ component`_), only the actor's endpointUri method must be changed. .. _Mina component: http://camel.apache.org/mina.html .. _Jetty component: http://camel.apache.org/jetty.html -.. includecode:: code/akka/docs/camel/Introduction.scala#Consumer +.. includecode:: code/docs/camel/Introduction.scala#Consumer Producer -------- Actors can also trigger message exchanges with external systems i.e. produce to Camel endpoints. -.. includecode:: code/akka/docs/camel/Introduction.scala +.. includecode:: code/docs/camel/Introduction.scala :include: imports,Producer In the above example, any message sent to this actor will be sent to @@ -127,7 +127,7 @@ messages from the ``file:data/input/actor`` Camel endpoint. .. _Consumer: http://github.com/akka/akka/blob/master/akka-camel/src/main/scala/akka/camel/Consumer.scala -.. includecode:: code/akka/docs/camel/Consumers.scala#Consumer1 +.. includecode:: code/docs/camel/Consumers.scala#Consumer1 Whenever a file is put into the data/input/actor directory, its content is picked up by the Camel `file component`_ and sent as message to the @@ -146,7 +146,7 @@ from localhost on port 8877. .. _Jetty component: http://camel.apache.org/jetty.html .. _Jetty: http://www.eclipse.org/jetty/ -.. includecode:: code/akka/docs/camel/Consumers.scala#Consumer2 +.. includecode:: code/docs/camel/Consumers.scala#Consumer2 After starting the actor, clients can send messages to that actor by POSTing to ``http://localhost:8877/camel/default``. The actor sends a response by using the diff --git a/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala b/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala index b3eb4cfe13..5fba0c4f97 100644 --- a/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala +++ b/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala @@ -1,91 +1,105 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package docs.serialization -import org.scalatest.matchers.MustMatchers -import akka.testkit._ -//#imports -import akka.actor.{ ActorRef, ActorSystem } -import akka.serialization._ -import com.typesafe.config.ConfigFactory - -//#imports -import akka.actor.ExtensionKey -import akka.actor.ExtendedActorSystem -import akka.actor.Extension -import akka.actor.Address -import akka.remote.RemoteActorRefProvider - -//#my-own-serializer -class MyOwnSerializer extends Serializer { - - // This is whether "fromBinary" requires a "clazz" or not - def includeManifest: Boolean = false - - // Pick a unique identifier for your Serializer, - // you've got a couple of billions to choose from, - // 0 - 16 is reserved by Akka itself - def identifier = 1234567 - - // "toBinary" serializes the given object to an Array of Bytes - def toBinary(obj: AnyRef): Array[Byte] = { - // Put the code that serializes the object here - //#... - Array[Byte]() - //#... - } - - // "fromBinary" deserializes the given array, - // using the type hint (if any, see "includeManifest" above) - // into the optionally provided classLoader. - def fromBinary(bytes: Array[Byte], - clazz: Option[Class[_]]): AnyRef = { - // Put your code that deserializes here - //#... - null - //#... - } +//#extract-transport +package object akka { + // needs to be inside the akka package because accessing unsupported API ! + def transportOf(system: actor.ExtendedActorSystem): remote.RemoteTransport = + system.provider match { + case r: remote.RemoteActorRefProvider ⇒ r.transport + case _ ⇒ + throw new UnsupportedOperationException( + "this method requires the RemoteActorRefProvider to be configured") + } } -//#my-own-serializer +//#extract-transport -trait MyOwnSerializable -case class Customer(name: String) extends MyOwnSerializable +package docs.serialization { -class SerializationDocSpec extends AkkaSpec { - "demonstrate configuration of serialize messages" in { - //#serialize-messages-config - val config = ConfigFactory.parseString(""" + import org.scalatest.matchers.MustMatchers + import akka.testkit._ + //#imports + import akka.actor.{ ActorRef, ActorSystem } + import akka.serialization._ + import com.typesafe.config.ConfigFactory + + //#imports + import akka.actor.ExtensionKey + import akka.actor.ExtendedActorSystem + import akka.actor.Extension + import akka.actor.Address + import akka.remote.RemoteActorRefProvider + + //#my-own-serializer + class MyOwnSerializer extends Serializer { + + // This is whether "fromBinary" requires a "clazz" or not + def includeManifest: Boolean = false + + // Pick a unique identifier for your Serializer, + // you've got a couple of billions to choose from, + // 0 - 16 is reserved by Akka itself + def identifier = 1234567 + + // "toBinary" serializes the given object to an Array of Bytes + def toBinary(obj: AnyRef): Array[Byte] = { + // Put the code that serializes the object here + //#... + Array[Byte]() + //#... + } + + // "fromBinary" deserializes the given array, + // using the type hint (if any, see "includeManifest" above) + // into the optionally provided classLoader. + def fromBinary(bytes: Array[Byte], + clazz: Option[Class[_]]): AnyRef = { + // Put your code that deserializes here + //#... + null + //#... + } + } + //#my-own-serializer + + trait MyOwnSerializable + case class Customer(name: String) extends MyOwnSerializable + + class SerializationDocSpec extends AkkaSpec { + "demonstrate configuration of serialize messages" in { + //#serialize-messages-config + val config = ConfigFactory.parseString(""" akka { actor { serialize-messages = on } } """) - //#serialize-messages-config - val a = ActorSystem("system", config) - a.settings.SerializeAllMessages must be(true) - a.shutdown() - } + //#serialize-messages-config + val a = ActorSystem("system", config) + a.settings.SerializeAllMessages must be(true) + a.shutdown() + } - "demonstrate configuration of serialize creators" in { - //#serialize-creators-config - val config = ConfigFactory.parseString(""" + "demonstrate configuration of serialize creators" in { + //#serialize-creators-config + val config = ConfigFactory.parseString(""" akka { actor { serialize-creators = on } } """) - //#serialize-creators-config - val a = ActorSystem("system", config) - a.settings.SerializeAllCreators must be(true) - a.shutdown() - } + //#serialize-creators-config + val a = ActorSystem("system", config) + a.settings.SerializeAllCreators must be(true) + a.shutdown() + } - "demonstrate configuration of serializers" in { - //#serialize-serializers-config - val config = ConfigFactory.parseString(""" + "demonstrate configuration of serializers" in { + //#serialize-serializers-config + val config = ConfigFactory.parseString(""" akka { actor { serializers { @@ -96,14 +110,14 @@ class SerializationDocSpec extends AkkaSpec { } } """) - //#serialize-serializers-config - val a = ActorSystem("system", config) - a.shutdown() - } + //#serialize-serializers-config + val a = ActorSystem("system", config) + a.shutdown() + } - "demonstrate configuration of serialization-bindings" in { - //#serialization-bindings-config - val config = ConfigFactory.parseString(""" + "demonstrate configuration of serialization-bindings" in { + //#serialization-bindings-config + val config = ConfigFactory.parseString(""" akka { actor { serializers { @@ -122,96 +136,92 @@ class SerializationDocSpec extends AkkaSpec { } } """) - //#serialization-bindings-config - val a = ActorSystem("system", config) - SerializationExtension(a).serializerFor(classOf[String]).getClass must equal(classOf[JavaSerializer]) - SerializationExtension(a).serializerFor(classOf[Customer]).getClass must equal(classOf[JavaSerializer]) - SerializationExtension(a).serializerFor(classOf[java.lang.Boolean]).getClass must equal(classOf[MyOwnSerializer]) - a.shutdown() - } - - "demonstrate the programmatic API" in { - //#programmatic - val system = ActorSystem("example") - - // Get the Serialization Extension - val serialization = SerializationExtension(system) - - // Have something to serialize - val original = "woohoo" - - // Find the Serializer for it - val serializer = serialization.findSerializerFor(original) - - // Turn it into bytes - val bytes = serializer.toBinary(original) - - // Turn it back into an object - val back = serializer.fromBinary(bytes, manifest = None) - - // Voilá! - back must equal(original) - - //#programmatic - system.shutdown() - } - - "demonstrate serialization of ActorRefs" in { - val theActorRef: ActorRef = system.deadLetters - val theActorSystem: ActorSystem = system - - //#actorref-serializer - // Serialize - // (beneath toBinary) - - // If there is no transportAddress, - // it means that either this Serializer isn't called - // within a piece of code that sets it, - // so either you need to supply your own, - // or simply use the local path. - val identifier: String = Serialization.currentTransportAddress.value match { - case null ⇒ theActorRef.path.toString - case address ⇒ theActorRef.path.toStringWithAddress(address) - } - // Then just serialize the identifier however you like - - // Deserialize - // (beneath fromBinary) - val deserializedActorRef = theActorSystem actorFor identifier - // Then just use the ActorRef - //#actorref-serializer - - //#external-address - object ExternalAddress extends ExtensionKey[ExternalAddressExt] - - class ExternalAddressExt(system: ExtendedActorSystem) extends Extension { - def addressFor(remoteAddr: Address): Address = - system.provider.getExternalAddressFor(remoteAddr) getOrElse - (throw new UnsupportedOperationException("cannot send to " + remoteAddr)) + //#serialization-bindings-config + val a = ActorSystem("system", config) + SerializationExtension(a).serializerFor(classOf[String]).getClass must equal(classOf[JavaSerializer]) + SerializationExtension(a).serializerFor(classOf[Customer]).getClass must equal(classOf[JavaSerializer]) + SerializationExtension(a).serializerFor(classOf[java.lang.Boolean]).getClass must equal(classOf[MyOwnSerializer]) + a.shutdown() } - def serializeTo(ref: ActorRef, remote: Address): String = - ref.path.toStringWithAddress(ExternalAddress(theActorSystem).addressFor(remote)) - //#external-address - } + "demonstrate the programmatic API" in { + //#programmatic + val system = ActorSystem("example") - "demonstrate how to do default Akka serialization of ActorRef" in { - val theActorSystem: ActorSystem = system + // Get the Serialization Extension + val serialization = SerializationExtension(system) - //#external-address-default - object ExternalAddress extends ExtensionKey[ExternalAddressExt] + // Have something to serialize + val original = "woohoo" - class ExternalAddressExt(system: ExtendedActorSystem) extends Extension { - def addressForAkka: Address = system.provider match { - case r: RemoteActorRefProvider ⇒ r.transport.address - case _ ⇒ - throw new UnsupportedOperationException( - "this method requires the RemoteActorRefProvider to be configured") + // Find the Serializer for it + val serializer = serialization.findSerializerFor(original) + + // Turn it into bytes + val bytes = serializer.toBinary(original) + + // Turn it back into an object + val back = serializer.fromBinary(bytes, manifest = None) + + // Voilá! + back must equal(original) + + //#programmatic + system.shutdown() + } + + "demonstrate serialization of ActorRefs" in { + val theActorRef: ActorRef = system.deadLetters + val theActorSystem: ActorSystem = system + + //#actorref-serializer + // Serialize + // (beneath toBinary) + + // If there is no transportAddress, + // it means that either this Serializer isn't called + // within a piece of code that sets it, + // so either you need to supply your own, + // or simply use the local path. + val identifier: String = Serialization.currentTransportAddress.value match { + case null ⇒ theActorRef.path.toString + case address ⇒ theActorRef.path.toStringWithAddress(address) } + // Then just serialize the identifier however you like + + // Deserialize + // (beneath fromBinary) + val deserializedActorRef = theActorSystem actorFor identifier + // Then just use the ActorRef + //#actorref-serializer + + //#external-address + object ExternalAddress extends ExtensionKey[ExternalAddressExt] + + class ExternalAddressExt(system: ExtendedActorSystem) extends Extension { + def addressFor(remoteAddr: Address): Address = + system.provider.getExternalAddressFor(remoteAddr) getOrElse + (throw new UnsupportedOperationException("cannot send to " + remoteAddr)) + } + + def serializeTo(ref: ActorRef, remote: Address): String = + ref.path.toStringWithAddress(ExternalAddress(theActorSystem).addressFor(remote)) + //#external-address } - def serializeAkkaDefault(ref: ActorRef): String = - ref.path.toStringWithAddress(ExternalAddress(theActorSystem).addressForAkka) - //#external-address-default + "demonstrate how to do default Akka serialization of ActorRef" in { + val theActorSystem: ActorSystem = system + + //#external-address-default + object ExternalAddress extends ExtensionKey[ExternalAddressExt] + + class ExternalAddressExt(system: ExtendedActorSystem) extends Extension { + def addressForAkka: Address = akka.transportOf(system).address + } + + def serializeAkkaDefault(ref: ActorRef): String = + ref.path.toStringWithAddress(ExternalAddress(theActorSystem).addressForAkka) + //#external-address-default + } } } diff --git a/akka-docs/scala/code/docs/testkit/Specs2DemoAcceptance.scala b/akka-docs/scala/code/docs/testkit/Specs2DemoAcceptance.scala new file mode 100644 index 0000000000..a3edb6a093 --- /dev/null +++ b/akka-docs/scala/code/docs/testkit/Specs2DemoAcceptance.scala @@ -0,0 +1,34 @@ +package docs.testkit + +import org.specs2._ +import org.specs2.specification.Scope + +import akka.actor.{ Props, ActorSystem, Actor } +import akka.testkit.{ TestKit, ImplicitSender } + +class Specs2DemoAcceptance extends Specification { + def is = + + "This is a specification of basic TestKit interop" ^ + p ^ + "A TestKit should" ^ + "work properly with Specs2 acceptance tests" ! e1 ^ + "correctly convert durations" ! e2 + + val system = ActorSystem() + + implicit def d2d(d: org.specs2.time.Duration): akka.util.FiniteDuration = + akka.util.Duration(d.inMilliseconds, "millis") + + def e1 = new TestKit(system) with Scope with ImplicitSender { + within(1 second) { + system.actorOf(Props(new Actor { + def receive = { case x ⇒ sender ! x } + })) ! "hallo" + + expectMsgType[String] must be equalTo "hallo" + } + } + + def e2 = ((1 second): akka.util.Duration).toMillis must be equalTo 1000 +} diff --git a/akka-docs/scala/code/docs/testkit/Specs2DemoSpec.scala b/akka-docs/scala/code/docs/testkit/Specs2DemoSpec.scala new file mode 100644 index 0000000000..efe7b6088e --- /dev/null +++ b/akka-docs/scala/code/docs/testkit/Specs2DemoSpec.scala @@ -0,0 +1,34 @@ +package docs.testkit + +import org.specs2.mutable.Specification +import org.specs2.specification.Scope + +import akka.actor.{ Props, ActorSystem, Actor } +import akka.testkit.{ TestKit, ImplicitSender } + +class Specs2DemoUnitSpec extends Specification { + + val system = ActorSystem() + + implicit def d2d(d: org.specs2.time.Duration): akka.util.FiniteDuration = + akka.util.Duration(d.inMilliseconds, "millis") + + /* + * this is needed if different test cases would clash when run concurrently, + * e.g. when creating specifically named top-level actors + */ + sequential + + "A TestKit" should { + "work properly with Specs2 unit tests" in + new TestKit(system) with Scope with ImplicitSender { + within(1 second) { + system.actorOf(Props(new Actor { + def receive = { case x ⇒ sender ! x } + })) ! "hallo" + + expectMsgType[String] must be equalTo "hallo" + } + } + } +} diff --git a/akka-docs/scala/dispatchers.rst b/akka-docs/scala/dispatchers.rst index a1cc431643..478136e428 100644 --- a/akka-docs/scala/dispatchers.rst +++ b/akka-docs/scala/dispatchers.rst @@ -18,7 +18,7 @@ Setting the dispatcher for an Actor So in case you want to give your ``Actor`` a different dispatcher than the default, you need to do two things, of which the first is: -.. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#defining-dispatcher +.. includecode:: ../scala/code/docs/dispatcher/DispatcherDocSpec.scala#defining-dispatcher .. note:: The "dispatcherId" you specify in withDispatcher is in fact a path into your configuration. @@ -27,11 +27,11 @@ So in case you want to give your ``Actor`` a different dispatcher than the defau And then you just need to configure that dispatcher in your configuration: -.. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#my-dispatcher-config +.. includecode:: ../scala/code/docs/dispatcher/DispatcherDocSpec.scala#my-dispatcher-config And here's another example that uses the "thread-pool-executor": -.. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#my-thread-pool-dispatcher-config +.. includecode:: ../scala/code/docs/dispatcher/DispatcherDocSpec.scala#my-thread-pool-dispatcher-config For more options, see the default-dispatcher section of the :ref:`configuration`. @@ -108,11 +108,11 @@ More dispatcher configuration examples Configuring a ``PinnedDispatcher``: -.. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#my-pinned-dispatcher-config +.. includecode:: ../scala/code/docs/dispatcher/DispatcherDocSpec.scala#my-pinned-dispatcher-config And then using it: -.. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#defining-pinned-dispatcher +.. includecode:: ../scala/code/docs/dispatcher/DispatcherDocSpec.scala#defining-pinned-dispatcher Note that ``thread-pool-executor`` configuration as per the above ``my-thread-pool-dispatcher`` exmaple is NOT applicable. This is because every actor will have its own thread pool when using ``PinnedDispatcher``, @@ -168,22 +168,22 @@ Mailbox configuration examples How to create a PriorityMailbox: -.. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#prio-mailbox +.. includecode:: ../scala/code/docs/dispatcher/DispatcherDocSpec.scala#prio-mailbox And then add it to the configuration: -.. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#prio-dispatcher-config +.. includecode:: ../scala/code/docs/dispatcher/DispatcherDocSpec.scala#prio-dispatcher-config And then an example on how you would use it: -.. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#prio-dispatcher +.. includecode:: ../scala/code/docs/dispatcher/DispatcherDocSpec.scala#prio-dispatcher Creating your own Mailbox type ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ An example is worth a thousand quacks: -.. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#mailbox-implementation-example +.. includecode:: ../scala/code/docs/dispatcher/DispatcherDocSpec.scala#mailbox-implementation-example And then you just specify the FQCN of your MailboxType as the value of the "mailbox-type" in the dispatcher configuration. diff --git a/akka-docs/scala/event-bus.rst b/akka-docs/scala/event-bus.rst index aec59083c6..2487e806fe 100644 --- a/akka-docs/scala/event-bus.rst +++ b/akka-docs/scala/event-bus.rst @@ -158,7 +158,7 @@ Classification`_ which enables registering to related sets of channels (as is used for :class:`RemoteLifeCycleMessage`). The following example demonstrates how a simple subscription works: -.. includecode:: code/akka/docs/event/LoggingDocSpec.scala#deadletters +.. includecode:: code/docs/event/LoggingDocSpec.scala#deadletters Default Handlers ---------------- diff --git a/akka-docs/scala/extending-akka.rst b/akka-docs/scala/extending-akka.rst index 0f0934a799..9c890de252 100644 --- a/akka-docs/scala/extending-akka.rst +++ b/akka-docs/scala/extending-akka.rst @@ -24,27 +24,27 @@ So let's create a sample extension that just lets us count the number of times s First, we define what our ``Extension`` should do: -.. includecode:: code/akka/docs/extension/ExtensionDocSpec.scala +.. includecode:: code/docs/extension/ExtensionDocSpec.scala :include: extension Then we need to create an ``ExtensionId`` for our extension so we can grab ahold of it. -.. includecode:: code/akka/docs/extension/ExtensionDocSpec.scala +.. includecode:: code/docs/extension/ExtensionDocSpec.scala :include: extensionid Wicked! Now all we need to do is to actually use it: -.. includecode:: code/akka/docs/extension/ExtensionDocSpec.scala +.. includecode:: code/docs/extension/ExtensionDocSpec.scala :include: extension-usage Or from inside of an Akka Actor: -.. includecode:: code/akka/docs/extension/ExtensionDocSpec.scala +.. includecode:: code/docs/extension/ExtensionDocSpec.scala :include: extension-usage-actor You can also hide extension behind traits: -.. includecode:: code/akka/docs/extension/ExtensionDocSpec.scala +.. includecode:: code/docs/extension/ExtensionDocSpec.scala :include: extension-usage-actor-trait That's all there is to it! @@ -55,7 +55,7 @@ Loading from Configuration To be able to load extensions from your Akka configuration you must add FQCNs of implementations of either ``ExtensionId`` or ``ExtensionIdProvider`` in the ``akka.extensions`` section of the config you provide to your ``ActorSystem``. -.. includecode:: code/akka/docs/extension/ExtensionDocSpec.scala +.. includecode:: code/docs/extension/ExtensionDocSpec.scala :include: config Note that in this case ``CountExtension`` is an object and therefore the class name ends with ``$``. @@ -75,17 +75,17 @@ The :ref:`configuration` can be used for application specific settings. A good p Sample configuration: -.. includecode:: code/akka/docs/extension/SettingsExtensionDocSpec.scala +.. includecode:: code/docs/extension/SettingsExtensionDocSpec.scala :include: config The ``Extension``: -.. includecode:: code/akka/docs/extension/SettingsExtensionDocSpec.scala +.. includecode:: code/docs/extension/SettingsExtensionDocSpec.scala :include: imports,extension,extensionid Use it: -.. includecode:: code/akka/docs/extension/SettingsExtensionDocSpec.scala +.. includecode:: code/docs/extension/SettingsExtensionDocSpec.scala :include: extension-usage-actor diff --git a/akka-docs/scala/fault-tolerance-sample.rst b/akka-docs/scala/fault-tolerance-sample.rst index ccda303e45..56ac838b1f 100644 --- a/akka-docs/scala/fault-tolerance-sample.rst +++ b/akka-docs/scala/fault-tolerance-sample.rst @@ -51,5 +51,5 @@ Step Description Full Source Code of the Fault Tolerance Sample (Scala) ------------------------------------------------------ -.. includecode:: code/akka/docs/actor/FaultHandlingDocSample.scala#all +.. includecode:: code/docs/actor/FaultHandlingDocSample.scala#all diff --git a/akka-docs/scala/fault-tolerance.rst b/akka-docs/scala/fault-tolerance.rst index 8448bd2cce..c1d6158954 100644 --- a/akka-docs/scala/fault-tolerance.rst +++ b/akka-docs/scala/fault-tolerance.rst @@ -24,7 +24,7 @@ sample as it is easy to follow the log output to understand what is happening in fault-tolerance-sample -.. includecode:: code/akka/docs/actor/FaultHandlingDocSample.scala#all +.. includecode:: code/docs/actor/FaultHandlingDocSample.scala#all :exclude: imports,messages,dummydb Creating a Supervisor Strategy @@ -35,7 +35,7 @@ in more depth. For the sake of demonstration let us consider the following strategy: -.. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala +.. includecode:: code/docs/actor/FaultHandlingDocSpec.scala :include: strategy I have chosen a few well-known exception types in order to demonstrate the @@ -75,50 +75,50 @@ Test Application The following section shows the effects of the different directives in practice, wherefor a test setup is needed. First off, we need a suitable supervisor: -.. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala +.. includecode:: code/docs/actor/FaultHandlingDocSpec.scala :include: supervisor This supervisor will be used to create a child, with which we can experiment: -.. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala +.. includecode:: code/docs/actor/FaultHandlingDocSpec.scala :include: child The test is easier by using the utilities described in :ref:`akka-testkit`, where ``AkkaSpec`` is a convenient mixture of ``TestKit with WordSpec with MustMatchers`` -.. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala +.. includecode:: code/docs/actor/FaultHandlingDocSpec.scala :include: testkit Let us create actors: -.. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala +.. includecode:: code/docs/actor/FaultHandlingDocSpec.scala :include: create The first test shall demonstrate the ``Resume`` directive, so we try it out by setting some non-initial state in the actor and have it fail: -.. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala +.. includecode:: code/docs/actor/FaultHandlingDocSpec.scala :include: resume As you can see the value 42 survives the fault handling directive. Now, if we change the failure to a more serious ``NullPointerException``, that will no longer be the case: -.. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala +.. includecode:: code/docs/actor/FaultHandlingDocSpec.scala :include: restart And finally in case of the fatal ``IllegalArgumentException`` the child will be terminated by the supervisor: -.. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala +.. includecode:: code/docs/actor/FaultHandlingDocSpec.scala :include: stop Up to now the supervisor was completely unaffected by the child’s failure, because the directives set did handle it. In case of an ``Exception``, this is not true anymore and the supervisor escalates the failure. -.. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala +.. includecode:: code/docs/actor/FaultHandlingDocSpec.scala :include: escalate-kill The supervisor itself is supervised by the top-level actor provided by the @@ -131,12 +131,12 @@ child not to survive this failure. In case this is not desired (which depends on the use case), we need to use a different supervisor which overrides this behavior. -.. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala +.. includecode:: code/docs/actor/FaultHandlingDocSpec.scala :include: supervisor2 With this parent, the child survives the escalated restart, as demonstrated in the last test: -.. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala +.. includecode:: code/docs/actor/FaultHandlingDocSpec.scala :include: escalate-restart diff --git a/akka-docs/scala/fsm.rst b/akka-docs/scala/fsm.rst index 807cd7567c..e47fdaa055 100644 --- a/akka-docs/scala/fsm.rst +++ b/akka-docs/scala/fsm.rst @@ -30,17 +30,17 @@ send them on after the burst ended or a flush request is received. First, consider all of the below to use these import statements: -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala#simple-imports +.. includecode:: code/docs/actor/FSMDocSpec.scala#simple-imports The contract of our “Buncher” actor is that is accepts or produces the following messages: -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala#simple-events +.. includecode:: code/docs/actor/FSMDocSpec.scala#simple-events ``SetTarget`` is needed for starting it up, setting the destination for the ``Batches`` to be passed on; ``Queue`` will add to the internal queue while ``Flush`` will mark the end of a burst. -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala#simple-state +.. includecode:: code/docs/actor/FSMDocSpec.scala#simple-state The actor can be in two states: no message queued (aka ``Idle``) or some message queued (aka ``Active``). It will stay in the active state as long as @@ -50,7 +50,7 @@ the actual queue of messages. Now let’s take a look at the skeleton for our FSM actor: -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala +.. includecode:: code/docs/actor/FSMDocSpec.scala :include: simple-fsm :exclude: transition-elided,unhandled-elided @@ -79,7 +79,7 @@ shall work identically in both states, we make use of the fact that any event which is not handled by the ``when()`` block is passed to the ``whenUnhandled()`` block: -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala#unhandled-elided +.. includecode:: code/docs/actor/FSMDocSpec.scala#unhandled-elided The first case handled here is adding ``Queue()`` requests to the internal queue and going to the ``Active`` state (this does the obvious thing of staying @@ -93,7 +93,7 @@ target, for which we use the ``onTransition`` mechanism: you can declare multiple such blocks and all of them will be tried for matching behavior in case a state transition occurs (i.e. only when the state actually changes). -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala#transition-elided +.. includecode:: code/docs/actor/FSMDocSpec.scala#transition-elided The transition callback is a partial function which takes as input a pair of states—the current and the next state. The FSM trait includes a convenience @@ -106,7 +106,7 @@ To verify that this buncher actually works, it is quite easy to write a test using the :ref:`akka-testkit`, which is conveniently bundled with ScalaTest traits into ``AkkaSpec``: -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala +.. includecode:: code/docs/actor/FSMDocSpec.scala :include: test-code :exclude: fsm-code-elided @@ -120,7 +120,7 @@ The :class:`FSM` trait may only be mixed into an :class:`Actor`. Instead of extending :class:`Actor`, the self type approach was chosen in order to make it obvious that an actor is actually created: -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala +.. includecode:: code/docs/actor/FSMDocSpec.scala :include: simple-fsm :exclude: fsm-body @@ -165,7 +165,7 @@ The :meth:`stateFunction` argument is a :class:`PartialFunction[Event, State]`, which is conveniently given using the partial function literal syntax as demonstrated below: -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala +.. includecode:: code/docs/actor/FSMDocSpec.scala :include: when-syntax The :class:`Event(msg: Any, data: D)` case class is parameterized with the data @@ -189,7 +189,7 @@ If a state doesn't handle a received event a warning is logged. If you want to do something else in this case you can specify that with :func:`whenUnhandled(stateFunction)`: -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala +.. includecode:: code/docs/actor/FSMDocSpec.scala :include: unhandled-syntax **IMPORTANT**: This handler is not stacked, meaning that each invocation of @@ -230,7 +230,7 @@ of the modifiers described in the following: All modifier can be chained to achieve a nice and concise description: -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala +.. includecode:: code/docs/actor/FSMDocSpec.scala :include: modifier-syntax The parentheses are not actually needed in all cases, but they visually @@ -267,7 +267,7 @@ The handler is a partial function which takes a pair of states as input; no resulting state is needed as it is not possible to modify the transition in progress. -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala +.. includecode:: code/docs/actor/FSMDocSpec.scala :include: transition-syntax The convenience extractor :obj:`->` enables decomposition of the pair of states @@ -280,7 +280,7 @@ It is also possible to pass a function object accepting two states to :func:`onTransition`, in case your transition handling logic is implemented as a method: -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala +.. includecode:: code/docs/actor/FSMDocSpec.scala :include: alt-transition-syntax The handlers registered with this method are stacked, so you can intersperse @@ -319,14 +319,14 @@ transformed using Scala’s full supplement of functional programming tools. In order to retain type inference, there is a helper function which may be used in case some common handling logic shall be applied to different clauses: -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala +.. includecode:: code/docs/actor/FSMDocSpec.scala :include: transform-syntax It goes without saying that the arguments to this method may also be stored, to be used several times, e.g. when applying the same transformation to several ``when()`` blocks: -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala +.. includecode:: code/docs/actor/FSMDocSpec.scala :include: alt-transform-syntax Timers @@ -371,14 +371,14 @@ state data which is available during termination handling. the same way as a state transition (but note that the ``return`` statement may not be used within a :meth:`when` block). -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala +.. includecode:: code/docs/actor/FSMDocSpec.scala :include: stop-syntax You can use :func:`onTermination(handler)` to specify custom code that is executed when the FSM is stopped. The handler is a partial function which takes a :class:`StopEvent(reason, stateName, stateData)` as argument: -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala +.. includecode:: code/docs/actor/FSMDocSpec.scala :include: termination-syntax As for the :func:`whenUnhandled` case, this handler is not stacked, so each @@ -412,7 +412,7 @@ Event Tracing The setting ``akka.actor.debug.fsm`` in :ref:`configuration` enables logging of an event trace by :class:`LoggingFSM` instances: -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala +.. includecode:: code/docs/actor/FSMDocSpec.scala :include: logging-fsm :exclude: body-elided @@ -433,7 +433,7 @@ The :class:`LoggingFSM` trait adds one more feature to the FSM: a rolling event log which may be used during debugging (for tracing how the FSM entered a certain failure state) or for other creative uses: -.. includecode:: code/akka/docs/actor/FSMDocSpec.scala +.. includecode:: code/docs/actor/FSMDocSpec.scala :include: logging-fsm The :meth:`logDepth` defaults to zero, which turns off the event log. diff --git a/akka-docs/scala/futures.rst b/akka-docs/scala/futures.rst index d84b742c6f..26936b0493 100644 --- a/akka-docs/scala/futures.rst +++ b/akka-docs/scala/futures.rst @@ -19,7 +19,7 @@ which is very similar to a ``java.util.concurrent.Executor``. if you have an ``A it will use its default dispatcher as the ``ExecutionContext``, or you can use the factory methods provided by the ``ExecutionContext`` companion object to wrap ``Executors`` and ``ExecutorServices``, or even create your own. -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: diy-execution-context Use With Actors @@ -30,7 +30,7 @@ which only works if the original sender was an ``Actor``) and the second is thro Using an ``Actor``\'s ``?`` method to send a message will return a Future. To wait for and retrieve the actual result the simplest method is: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: ask-blocking This will cause the current thread to block and wait for the ``Actor`` to 'complete' the ``Future`` with it's reply. @@ -40,7 +40,7 @@ Alternatives to blocking are discussed further within this documentation. Also n an ``Actor`` is a ``Future[Any]`` since an ``Actor`` is dynamic. That is why the ``asInstanceOf`` is used in the above sample. When using non-blocking it is better to use the ``mapTo`` method to safely try to cast a ``Future`` to an expected type: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: map-to The ``mapTo`` method will return a new ``Future`` that contains the result if the cast was successful, @@ -53,7 +53,7 @@ A common use case within Akka is to have some computation performed concurrently If you find yourself creating a pool of ``Actor``\s for the sole reason of performing a calculation in parallel, there is an easier (and faster) way: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: future-eval In the above code the block passed to ``Future`` will be executed by the default ``Dispatcher``, @@ -63,12 +63,12 @@ and we also avoid the overhead of managing an ``Actor``. You can also create already completed Futures using the ``Promise`` companion, which can be either successes: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: successful Or failures: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: failed Functional Futures @@ -84,7 +84,7 @@ The first method for working with ``Future`` functionally is ``map``. This metho which performs some operation on the result of the ``Future``, and returning a new result. The return value of the ``map`` method is another ``Future`` that will contain the new result: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: map In this example we are joining two strings together within a ``Future``. Instead of waiting for this to complete, @@ -97,12 +97,12 @@ string "HelloWorld" and is unaffected by the ``map``. The ``map`` method is fine if we are modifying a single ``Future``, but if 2 or more ``Future``\s are involved ``map`` will not allow you to combine them together: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: wrong-nested-map ``f3`` is a ``Future[Future[Int]]`` instead of the desired ``Future[Int]``. Instead, the ``flatMap`` method should be used: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: flat-map Composing futures using nested combinators it can sometimes become quite complicated and hard read, in these cases using Scala's @@ -110,7 +110,7 @@ Composing futures using nested combinators it can sometimes become quite complic If you need to do conditional propagation, you can use ``filter``: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: filter For Comprehensions @@ -118,7 +118,7 @@ For Comprehensions Since ``Future`` has a ``map``, ``filter`` and ``flatMap`` method it can be easily used in a 'for comprehension': -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: for-comprehension Something to keep in mind when doing this is even though it looks like parts of the above example can run in parallel, @@ -134,14 +134,14 @@ A common use case for this is combining the replies of several ``Actor``\s into without resorting to calling ``Await.result`` or ``Await.ready`` to block for each result. First an example of using ``Await.result``: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: composing-wrong Here we wait for the results from the first 2 ``Actor``\s before sending that result to the third ``Actor``. We called ``Await.result`` 3 times, which caused our little program to block 3 times before getting our final result. Now compare that to this example: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: composing Here we have 2 actors processing a single message each. Once the 2 results are available @@ -153,7 +153,7 @@ The ``sequence`` and ``traverse`` helper methods can make it easier to handle mo Both of these methods are ways of turning, for a subclass ``T`` of ``Traversable``, ``T[Future[A]]`` into a ``Future[T[A]]``. For example: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: sequence-ask To better explain what happened in the example, ``Future.sequence`` is taking the ``List[Future[Int]]`` @@ -163,12 +163,12 @@ and we find the sum of the ``List``. The ``traverse`` method is similar to ``sequence``, but it takes a ``T[A]`` and a function ``A => Future[B]`` to return a ``Future[T[B]]``, where ``T`` is again a subclass of Traversable. For example, to use ``traverse`` to sum the first 100 odd numbers: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: traverse This is the same result as this example: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: sequence But it may be faster to use ``traverse`` as it doesn't have to create an intermediate ``List[Future[Int]]``. @@ -178,7 +178,7 @@ from the type of the start-value and the type of the futures and returns somethi and then applies the function to all elements in the sequence of futures, asynchronously, the execution will start when the last of the Futures is completed. -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: fold That's all it takes! @@ -188,7 +188,7 @@ If the sequence passed to ``fold`` is empty, it will return the start-value, in In some cases you don't have a start-value and you're able to use the value of the first completing Future in the sequence as the start-value, you can use ``reduce``, it works like this: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: reduce Same as with ``fold``, the execution will be done asynchronously when the last of the Future is completed, @@ -200,13 +200,13 @@ Callbacks Sometimes you just want to listen to a ``Future`` being completed, and react to that not by creating a new Future, but by side-effecting. For this Akka supports ``onComplete``, ``onSuccess`` and ``onFailure``, of which the latter two are specializations of the first. -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: onSuccess -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: onFailure -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: onComplete Define Ordering @@ -218,7 +218,7 @@ But there's a solution and it's name is ``andThen``. It creates a new ``Future`` the specified callback, a ``Future`` that will have the same result as the ``Future`` it's called on, which allows for ordering like in the following sample: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: and-then Auxiliary Methods @@ -227,13 +227,13 @@ Auxiliary Methods ``Future`` ``fallbackTo`` combines 2 Futures into a new ``Future``, and will hold the successful value of the second ``Future`` if the first ``Future`` fails. -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: fallback-to You can also combine two Futures into a new ``Future`` that will hold a tuple of the two Futures successful results, using the ``zip`` operation. -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: zip Exceptions @@ -247,7 +247,7 @@ If a ``Future`` does contain an ``Exception``, calling ``Await.result`` will cau It is also possible to handle an ``Exception`` by returning a different result. This is done with the ``recover`` method. For example: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: recover In this example, if the actor replied with a ``akka.actor.Status.Failure`` containing the ``ArithmeticException``, @@ -258,6 +258,6 @@ it will behave as if we hadn't used the ``recover`` method. You can also use the ``recoverWith`` method, which has the same relationship to ``recover`` as ``flatMap`` has to ``map``, and is use like this: -.. includecode:: code/akka/docs/future/FutureDocSpec.scala +.. includecode:: code/docs/future/FutureDocSpec.scala :include: try-recover diff --git a/akka-docs/scala/io.rst b/akka-docs/scala/io.rst index 2916dcd03d..2c4e1608f3 100644 --- a/akka-docs/scala/io.rst +++ b/akka-docs/scala/io.rst @@ -103,29 +103,29 @@ Http Server This example will create a simple high performance HTTP server. We begin with our imports: -.. includecode:: code/akka/docs/io/HTTPServer.scala +.. includecode:: code/docs/io/HTTPServer.scala :include: imports Some commonly used constants: -.. includecode:: code/akka/docs/io/HTTPServer.scala +.. includecode:: code/docs/io/HTTPServer.scala :include: constants And case classes to hold the resulting request: -.. includecode:: code/akka/docs/io/HTTPServer.scala +.. includecode:: code/docs/io/HTTPServer.scala :include: request-class Now for our first ``Iteratee``. There are 3 main sections of a HTTP request: the request line, the headers, and an optional body. The main request ``Iteratee`` handles each section separately: -.. includecode:: code/akka/docs/io/HTTPServer.scala +.. includecode:: code/docs/io/HTTPServer.scala :include: read-request In the above code ``readRequest`` takes the results of 3 different ``Iteratees`` (``readRequestLine``, ``readHeaders``, ``readBody``) and combines them into a single ``Request`` object. ``readRequestLine`` actually returns a tuple, so we extract it's individual components. ``readBody`` depends on values contained within the header section, so we must pass those to the method. The request line has 3 parts to it: the HTTP method, the requested URI, and the HTTP version. The parts are separated by a single space, and the entire request line ends with a ``CRLF``. -.. includecode:: code/akka/docs/io/HTTPServer.scala +.. includecode:: code/docs/io/HTTPServer.scala :include: read-request-line Reading the request method is simple as it is a single string ending in a space. The simple ``Iteratee`` that performs this is ``IO.takeUntil(delimiter: ByteString): Iteratee[ByteString]``. It keeps consuming input until the specified delimiter is found. Reading the HTTP version is also a simple string that ends with a ``CRLF``. @@ -134,14 +134,14 @@ The ``ascii`` method is a helper that takes a ``ByteString`` and parses it as a Reading the request URI is a bit more complicated because we want to parse the individual components of the URI instead of just returning a simple string: -.. includecode:: code/akka/docs/io/HTTPServer.scala +.. includecode:: code/docs/io/HTTPServer.scala :include: read-request-uri For this example we are only interested in handling absolute paths. To detect if we the URI is an absolute path we use ``IO.peek(length: Int): Iteratee[ByteString]``, which returns a ``ByteString`` of the request length but doesn't actually consume the input. We peek at the next bit of input and see if it matches our ``PATH`` constant (defined above as ``ByteString("/")``). If it doesn't match we throw an error, but for a more robust solution we would want to handle other valid URIs. Next we handle the path itself: -.. includecode:: code/akka/docs/io/HTTPServer.scala +.. includecode:: code/docs/io/HTTPServer.scala :include: read-path The ``step`` method is a recursive method that takes a ``List`` of the accumulated path segments. It first checks if the remaining input starts with the ``PATH`` constant, and if it does, it drops that input, and returns the ``readUriPart`` ``Iteratee`` which has it's result added to the path segment accumulator and the ``step`` method is run again. @@ -150,39 +150,39 @@ If after reading in a path segment the next input does not start with a path, we Following the path we read in the query (if it exists): -.. includecode:: code/akka/docs/io/HTTPServer.scala +.. includecode:: code/docs/io/HTTPServer.scala :include: read-query It is much simpler then reading the path since we aren't doing any parsing of the query since there is no standard format of the query string. Both the path and query used the ``readUriPart`` ``Iteratee``, which is next: -.. includecode:: code/akka/docs/io/HTTPServer.scala +.. includecode:: code/docs/io/HTTPServer.scala :include: read-uri-part Here we have several ``Set``\s that contain valid characters pulled from the URI spec. The ``readUriPart`` method takes a ``Set`` of valid characters (already mapped to ``Byte``\s) and will continue to match characters until it reaches on that is not part of the ``Set``. If it is a percent encoded character then that is handled as a valid character and processing continues, or else we are done collecting this part of the URI. Headers are next: -.. includecode:: code/akka/docs/io/HTTPServer.scala +.. includecode:: code/docs/io/HTTPServer.scala :include: read-headers And if applicable, we read in the message body: -.. includecode:: code/akka/docs/io/HTTPServer.scala +.. includecode:: code/docs/io/HTTPServer.scala :include: read-body Finally we get to the actual ``Actor``: -.. includecode:: code/akka/docs/io/HTTPServer.scala +.. includecode:: code/docs/io/HTTPServer.scala :include: actor And it's companion object: -.. includecode:: code/akka/docs/io/HTTPServer.scala +.. includecode:: code/docs/io/HTTPServer.scala :include: actor-companion A ``main`` method to start everything up: -.. includecode:: code/akka/docs/io/HTTPServer.scala +.. includecode:: code/docs/io/HTTPServer.scala :include: main diff --git a/akka-docs/scala/logging.rst b/akka-docs/scala/logging.rst index 9b1fe42d3e..66cc6ae398 100644 --- a/akka-docs/scala/logging.rst +++ b/akka-docs/scala/logging.rst @@ -11,7 +11,7 @@ How to Log Create a ``LoggingAdapter`` and use the ``error``, ``warning``, ``info``, or ``debug`` methods, as illustrated in this example: -.. includecode:: code/akka/docs/event/LoggingDocSpec.scala +.. includecode:: code/docs/event/LoggingDocSpec.scala :include: my-actor For convenience you can mixin the ``log`` member into actors, instead of defining it as above. @@ -37,7 +37,7 @@ placeholders results in a warning being appended to the log statement (i.e. on the same line with the same severity). You may pass a Java array as the only substitution argument to have its elements be treated individually: -.. includecode:: code/akka/docs/event/LoggingDocSpec.scala#array +.. includecode:: code/docs/event/LoggingDocSpec.scala#array The Java :class:`Class` of the log source is also included in the generated :class:`LogEvent`. In case of a simple string this is replaced with a “marker” @@ -176,7 +176,7 @@ using implicit parameters and thus fully customizable: simply create your own instance of :class:`LogSource[T]` and have it in scope when creating the logger. -.. includecode:: code/akka/docs/event/LoggingDocSpec.scala#my-source +.. includecode:: code/docs/event/LoggingDocSpec.scala#my-source This example creates a log source which mimics traditional usage of Java loggers, which are based upon the originating object’s class name as log @@ -217,7 +217,7 @@ event handler available in the 'akka-slf4j' module. Example of creating a listener: -.. includecode:: code/akka/docs/event/LoggingDocSpec.scala +.. includecode:: code/docs/event/LoggingDocSpec.scala :include: my-event-listener .. _slf4j-scala: diff --git a/akka-docs/scala/remoting.rst b/akka-docs/scala/remoting.rst index 5b36226b24..0f55ccdff4 100644 --- a/akka-docs/scala/remoting.rst +++ b/akka-docs/scala/remoting.rst @@ -116,15 +116,15 @@ precedence. With these imports: -.. includecode:: code/akka/docs/remoting/RemoteDeploymentDocSpec.scala#import +.. includecode:: code/docs/remoting/RemoteDeploymentDocSpec.scala#import and a remote address like this: -.. includecode:: code/akka/docs/remoting/RemoteDeploymentDocSpec.scala#make-address +.. includecode:: code/docs/remoting/RemoteDeploymentDocSpec.scala#make-address you can advise the system to create a child on that remote node like so: -.. includecode:: code/akka/docs/remoting/RemoteDeploymentDocSpec.scala#deploy +.. includecode:: code/docs/remoting/RemoteDeploymentDocSpec.scala#deploy Serialization ^^^^^^^^^^^^^ diff --git a/akka-docs/scala/routing.rst b/akka-docs/scala/routing.rst index 2a5ac138c9..4d434b2cab 100644 --- a/akka-docs/scala/routing.rst +++ b/akka-docs/scala/routing.rst @@ -21,17 +21,17 @@ Routers In Action This is an example of how to create a router that is defined in configuration: -.. includecode:: code/akka/docs/routing/RouterViaConfigExample.scala#config +.. includecode:: code/docs/routing/RouterViaConfigExample.scala#config -.. includecode:: code/akka/docs/routing/RouterViaConfigExample.scala#configurableRouting +.. includecode:: code/docs/routing/RouterViaConfigExample.scala#configurableRouting This is an example of how to programmatically create a router and set the number of routees it should create: -.. includecode:: code/akka/docs/routing/RouterViaProgramExample.scala#programmaticRoutingNrOfInstances +.. includecode:: code/docs/routing/RouterViaProgramExample.scala#programmaticRoutingNrOfInstances You can also give the router already created routees as in: -.. includecode:: code/akka/docs/routing/RouterViaProgramExample.scala#programmaticRoutingRoutees +.. includecode:: code/docs/routing/RouterViaProgramExample.scala#programmaticRoutingRoutees It should be noted that no actor factory or class needs to be provided in this case, as the ``Router`` will not create any children on its own (which is not @@ -65,7 +65,7 @@ configuration in a :class:`RemoteRouterConfig`, attaching the remote addresses o the nodes to deploy to. Naturally, this requires your to include the ``akka-remote`` module on your classpath: -.. includecode:: code/akka/docs/routing/RouterViaProgramExample.scala#remoteRoutees +.. includecode:: code/docs/routing/RouterViaProgramExample.scala#remoteRoutees How Routing is Designed within Akka ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -95,9 +95,9 @@ deterministic fashion. Since each actor knows its own external representation as well as that of its parent, the routees decide where replies should be sent when reacting to a message: -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#reply-with-sender +.. includecode:: code/docs/actor/ActorDocSpec.scala#reply-with-sender -.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#reply-without-sender +.. includecode:: code/docs/actor/ActorDocSpec.scala#reply-without-sender It is apparent now why routing needs to be enabled in code rather than being possible to “bolt on” later: whether or not an actor is routed means a change @@ -141,11 +141,11 @@ Router usage In this section we will describe how to use the different router types. First we need to create some actors that will be used in the examples: -.. includecode:: code/akka/docs/routing/RouterTypeExample.scala#printlnActor +.. includecode:: code/docs/routing/RouterTypeExample.scala#printlnActor and -.. includecode:: code/akka/docs/routing/RouterTypeExample.scala#fibonacciActor +.. includecode:: code/docs/routing/RouterTypeExample.scala#fibonacciActor RoundRobinRouter @@ -153,7 +153,7 @@ RoundRobinRouter Routes in a `round-robin `_ fashion to its routees. Code example: -.. includecode:: code/akka/docs/routing/RouterTypeExample.scala#roundRobinRouter +.. includecode:: code/docs/routing/RouterTypeExample.scala#roundRobinRouter When run you should see a similar output to this: @@ -182,7 +182,7 @@ the message it receives to this routee. This procedure will happen each time it receives a message. Code example: -.. includecode:: code/akka/docs/routing/RouterTypeExample.scala#randomRouter +.. includecode:: code/docs/routing/RouterTypeExample.scala#randomRouter When run you should see a similar output to this: @@ -215,14 +215,14 @@ The selection is done in this order: Code example: -.. includecode:: code/akka/docs/routing/RouterTypeExample.scala#smallestMailboxRouter +.. includecode:: code/docs/routing/RouterTypeExample.scala#smallestMailboxRouter BroadcastRouter *************** A broadcast router forwards the message it receives to *all* its routees. Code example: -.. includecode:: code/akka/docs/routing/RouterTypeExample.scala#broadcastRouter +.. includecode:: code/docs/routing/RouterTypeExample.scala#broadcastRouter When run you should see a similar output to this: @@ -242,7 +242,7 @@ The ScatterGatherFirstCompletedRouter will send the message on to all its routee It then waits for first result it gets back. This result will be sent back to original sender. Code example: -.. includecode:: code/akka/docs/routing/RouterTypeExample.scala#scatterGatherFirstCompletedRouter +.. includecode:: code/docs/routing/RouterTypeExample.scala#scatterGatherFirstCompletedRouter When run you should see this: @@ -274,16 +274,16 @@ of routees dynamically. This is an example of how to create a resizable router that is defined in configuration: -.. includecode:: code/akka/docs/routing/RouterViaConfigExample.scala#config-resize +.. includecode:: code/docs/routing/RouterViaConfigExample.scala#config-resize -.. includecode:: code/akka/docs/routing/RouterViaConfigExample.scala#configurableRoutingWithResizer +.. includecode:: code/docs/routing/RouterViaConfigExample.scala#configurableRoutingWithResizer Several more configuration options are available and described in ``akka.actor.deployment.default.resizer`` section of the reference :ref:`configuration`. This is an example of how to programmatically create a resizable router: -.. includecode:: code/akka/docs/routing/RouterViaProgramExample.scala#programmaticRoutingWithResizer +.. includecode:: code/docs/routing/RouterViaProgramExample.scala#programmaticRoutingWithResizer *It is also worth pointing out that if you define the ``router`` in the configuration file then this value will be used instead of any programmatically sent parameters.* @@ -398,5 +398,5 @@ the actor system’s default dispatcher. All standard routers allow setting this property in their constructor or factory method, custom routers have to implement the method in a suitable way. -.. includecode:: code/akka/docs/routing/RouterDocSpec.scala#dispatchers +.. includecode:: code/docs/routing/RouterDocSpec.scala#dispatchers diff --git a/akka-docs/scala/scheduler.rst b/akka-docs/scala/scheduler.rst index a98f0f563c..0a5b8aed51 100644 --- a/akka-docs/scala/scheduler.rst +++ b/akka-docs/scala/scheduler.rst @@ -25,13 +25,13 @@ scheduled operation. Some examples ------------- -.. includecode:: code/akka/docs/actor/SchedulerDocSpec.scala +.. includecode:: code/docs/actor/SchedulerDocSpec.scala :include: imports1,schedule-one-off-message -.. includecode:: code/akka/docs/actor/SchedulerDocSpec.scala +.. includecode:: code/docs/actor/SchedulerDocSpec.scala :include: schedule-one-off-thunk -.. includecode:: code/akka/docs/actor/SchedulerDocSpec.scala +.. includecode:: code/docs/actor/SchedulerDocSpec.scala :include: schedule-recurring From ``akka.actor.ActorSystem`` diff --git a/akka-docs/scala/serialization.rst b/akka-docs/scala/serialization.rst index 88fe74fd13..c1c2c16a8b 100644 --- a/akka-docs/scala/serialization.rst +++ b/akka-docs/scala/serialization.rst @@ -21,12 +21,12 @@ For Akka to know which ``Serializer`` to use for what, you need edit your :ref:` in the "akka.actor.serializers"-section you bind names to implementations of the ``akka.serialization.Serializer`` you wish to use, like this: -.. includecode:: code/akka/docs/serialization/SerializationDocSpec.scala#serialize-serializers-config +.. includecode:: code/docs/serialization/SerializationDocSpec.scala#serialize-serializers-config After you've bound names to different implementations of ``Serializer`` you need to wire which classes should be serialized using which ``Serializer``, this is done in the "akka.actor.serialization-bindings"-section: -.. includecode:: code/akka/docs/serialization/SerializationDocSpec.scala#serialization-bindings-config +.. includecode:: code/docs/serialization/SerializationDocSpec.scala#serialization-bindings-config You only need to specify the name of an interface or abstract base class of the messages. In case of ambiguity, i.e. the message implements several of the @@ -53,7 +53,7 @@ Verification If you want to verify that your messages are serializable you can enable the following config option: -.. includecode:: code/akka/docs/serialization/SerializationDocSpec.scala#serialize-messages-config +.. includecode:: code/docs/serialization/SerializationDocSpec.scala#serialize-messages-config .. warning:: @@ -62,7 +62,7 @@ If you want to verify that your messages are serializable you can enable the fol If you want to verify that your ``Props`` are serializable you can enable the following config option: -.. includecode:: code/akka/docs/serialization/SerializationDocSpec.scala#serialize-creators-config +.. includecode:: code/docs/serialization/SerializationDocSpec.scala#serialize-creators-config .. warning:: @@ -75,7 +75,7 @@ Programmatic If you want to programmatically serialize/deserialize using Akka Serialization, here's some examples: -.. includecode:: code/akka/docs/serialization/SerializationDocSpec.scala +.. includecode:: code/docs/serialization/SerializationDocSpec.scala :include: imports,programmatic For more information, have a look at the ``ScalaDoc`` for ``akka.serialization._`` @@ -91,7 +91,7 @@ Creating new Serializers First you need to create a class definition of your ``Serializer`` like so: -.. includecode:: code/akka/docs/serialization/SerializationDocSpec.scala +.. includecode:: code/docs/serialization/SerializationDocSpec.scala :include: imports,my-own-serializer :exclude: ... @@ -104,7 +104,7 @@ Serializing ActorRefs All ActorRefs are serializable using JavaSerializer, but in case you are writing your own serializer, you might want to know how to serialize and deserialize them properly, here's the magic incantation: -.. includecode:: code/akka/docs/serialization/SerializationDocSpec.scala +.. includecode:: code/docs/serialization/SerializationDocSpec.scala :include: imports,actorref-serializer .. note:: @@ -129,7 +129,7 @@ address which shall be the recipient of the serialized information. Use :meth:`ActorRefProvider.getExternalAddressFor(remoteAddr)` to query the system for the appropriate address to use when sending to ``remoteAddr``: -.. includecode:: code/akka/docs/serialization/SerializationDocSpec.scala +.. includecode:: code/docs/serialization/SerializationDocSpec.scala :include: external-address This requires that you know at least which type of address will be supported by @@ -140,9 +140,17 @@ lenient as Akka’s RemoteActorRefProvider). There is a possible simplification available if you are just using the default :class:`NettyRemoteTransport` with the :meth:`RemoteActorRefProvider`, which is -enabled by the fact that this combination has just a single remote address: +enabled by the fact that this combination has just a single remote address. +This approach relies on internal API, which means that it is not guaranteed to +be supported in future versions. To make this caveat more obvious, some bridge +code in the ``akka`` package is required to make it work: -.. includecode:: code/akka/docs/serialization/SerializationDocSpec.scala +.. includecode:: code/docs/serialization/SerializationDocSpec.scala + :include: extract-transport + +And with this, the address extraction goes like this: + +.. includecode:: code/docs/serialization/SerializationDocSpec.scala :include: external-address-default This solution has to be adapted once other providers are used (like the planned diff --git a/akka-docs/scala/testing.rst b/akka-docs/scala/testing.rst index abb9e0d115..ac27655342 100644 --- a/akka-docs/scala/testing.rst +++ b/akka-docs/scala/testing.rst @@ -61,7 +61,7 @@ Having access to the actual :class:`Actor` object allows application of all traditional unit testing techniques on the contained methods. Obtaining a reference is done like this: -.. includecode:: code/akka/docs/testkit/TestkitDocSpec.scala#test-actor-ref +.. includecode:: code/docs/testkit/TestkitDocSpec.scala#test-actor-ref Since :class:`TestActorRef` is generic in the actor type it returns the underlying actor with its proper static type. From this point on you may bring @@ -74,7 +74,7 @@ Testing that an expected exception is thrown while processing a message sent to the actor under test can be done by using a :class:`TestActorRef` :meth:`receive` based invocation: -.. includecode:: code/akka/docs/testkit/TestkitDocSpec.scala#test-expecting-exceptions +.. includecode:: code/docs/testkit/TestkitDocSpec.scala#test-expecting-exceptions .. _TestFSMRef: @@ -85,7 +85,7 @@ If your actor under test is a :class:`FSM`, you may use the special :class:`TestFSMRef` which offers all features of a normal :class:`TestActorRef` and in addition allows access to the internal state: -.. includecode:: code/akka/docs/testkit/TestkitDocSpec.scala#test-fsm-ref +.. includecode:: code/docs/testkit/TestkitDocSpec.scala#test-fsm-ref Due to a limitation in Scala’s type inference, there is only the factory method shown above, so you will probably write code like ``TestFSMRef(new MyFSM)`` @@ -114,7 +114,7 @@ usual. This trick is made possible by the :class:`CallingThreadDispatcher` described below; this dispatcher is set implicitly for any actor instantiated into a :class:`TestActorRef`. -.. includecode:: code/akka/docs/testkit/TestkitDocSpec.scala#test-behavior +.. includecode:: code/docs/testkit/TestkitDocSpec.scala#test-behavior As the :class:`TestActorRef` is a subclass of :class:`LocalActorRef` with a few special extras, also aspects like supervision and restarting work properly, but @@ -143,7 +143,7 @@ any thrown exceptions, then there is another mode available for you: just use the :meth:`receive` method :class:`TestActorRef`, which will be forwarded to the underlying actor: -.. includecode:: code/akka/docs/testkit/TestkitDocSpec.scala#test-unhandled +.. includecode:: code/docs/testkit/TestkitDocSpec.scala#test-unhandled The above sample assumes the default behavior for unhandled messages, i.e. that the actor doesn't swallow all messages and doesn't override :meth:`unhandled`. @@ -182,7 +182,7 @@ single procedure drives the test. The :class:`TestKit` class contains a collection of tools which makes this common task easy. -.. includecode:: code/akka/docs/testkit/PlainWordSpec.scala#plain-spec +.. includecode:: code/docs/testkit/PlainWordSpec.scala#plain-spec The :class:`TestKit` contains an actor named :obj:`testActor` which is the entry point for messages to be examined with the various ``expectMsg...`` @@ -339,7 +339,7 @@ handler with the :class:`TestEventListener` and using an :class:`EventFilter` allows assertions on log messages, including those which are generated by exceptions: -.. includecode:: code/akka/docs/testkit/TestkitDocSpec.scala#event-filter +.. includecode:: code/docs/testkit/TestkitDocSpec.scala#event-filter .. _TestKit.within: @@ -372,7 +372,7 @@ It should be noted that if the last message-receiving assertion of the block is latencies. This means that while individual contained assertions still use the maximum time bound, the overall block may take arbitrarily longer in this case. -.. includecode:: code/akka/docs/testkit/TestkitDocSpec.scala#test-within +.. includecode:: code/docs/testkit/TestkitDocSpec.scala#test-within .. note:: @@ -395,7 +395,7 @@ internally scaled by a factor taken from the :ref:`configuration`, You can scale other durations with the same factor by using the implicit conversion in ``akka.testkit`` package object to add dilated function to :class:`Duration`. -.. includecode:: code/akka/docs/testkit/TestkitDocSpec.scala#duration-dilation +.. includecode:: code/docs/testkit/TestkitDocSpec.scala#duration-dilation Resolving Conflicts with Implicit ActorRef ------------------------------------------ @@ -403,7 +403,7 @@ Resolving Conflicts with Implicit ActorRef If you want the sender of messages inside your TestKit-based tests to be the ``testActor`` simply mix in ``ÌmplicitSender`` into your test. -.. includecode:: code/akka/docs/testkit/PlainWordSpec.scala#implicit-sender +.. includecode:: code/docs/testkit/PlainWordSpec.scala#implicit-sender Using Multiple Probe Actors --------------------------- @@ -416,7 +416,7 @@ message flows. To make this more powerful and convenient, there is a concrete implementation called :class:`TestProbe`. The functionality is best explained using a small example: -.. includecode:: code/akka/docs/testkit/TestkitDocSpec.scala +.. includecode:: code/docs/testkit/TestkitDocSpec.scala :include: imports-test-probe,my-double-echo,test-probe Here a the system under test is simulated by :class:`MyDoubleEcho`, which is @@ -430,7 +430,7 @@ the test setup. Probes may also be equipped with custom assertions to make your test code even more concise and clear: -.. includecode:: code/akka/docs/testkit/TestkitDocSpec.scala +.. includecode:: code/docs/testkit/TestkitDocSpec.scala :include: test-special-probe You have complete flexibility here in mixing and matching the :class:`TestKit` @@ -444,7 +444,7 @@ Replying to Messages Received by Probes The probes keep track of the communications channel for replies, if possible, so they can also reply: -.. includecode:: code/akka/docs/testkit/TestkitDocSpec.scala#test-probe-reply +.. includecode:: code/docs/testkit/TestkitDocSpec.scala#test-probe-reply Forwarding Messages Received by Probes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -455,7 +455,7 @@ sent to a :class:`TestProbe` ``probe`` instead, you can make assertions concerning volume and timing of the message flow while still keeping the network functioning: -.. includecode:: code/akka/docs/testkit/TestkitDocSpec.scala +.. includecode:: code/docs/testkit/TestkitDocSpec.scala :include: test-probe-forward-actors,test-probe-forward The ``dest`` actor will receive the same message invocation as if no test probe @@ -516,7 +516,7 @@ How to use it Just set the dispatcher as you normally would: -.. includecode:: code/akka/docs/testkit/TestkitDocSpec.scala#calling-thread-dispatcher +.. includecode:: code/docs/testkit/TestkitDocSpec.scala#calling-thread-dispatcher How it works ------------ @@ -620,7 +620,7 @@ options: ``akka.actor.debug.receive`` — which enables the :meth:`loggable` statement to be applied to an actor’s :meth:`receive` function: -.. includecode:: code/akka/docs/testkit/TestkitDocSpec.scala#logging-receive +.. includecode:: code/docs/testkit/TestkitDocSpec.scala#logging-receive . If the abovementioned setting is not given in the :ref:`configuration`, this method will @@ -659,3 +659,42 @@ full logging of actor activities using this configuration fragment:: } } +Different Testing Frameworks +============================ + +Akka’s own test suite is written using `ScalaTest `_, +which also shines through in documentation examples. However, the TestKit and +its facilities do not depend on that framework, you can essentially use +whichever suits your development style best. + +This section contains a collection of known gotchas with some other frameworks, +which is by no means exhaustive and does not imply endorsement or special +support. + +Specs2 +------ + +Some `Specs2 `_ users have contributed examples of how to work around some clashes which may arise: + +* Mixing TestKit into :class:`org.specs2.mutable.Specification` results in a + name clash involving the ``end`` method (which is a private variable in + TestKit and an abstract method in Specification); if mixing in TestKit first, + the code may compile but might then fail at runtime. The work-around—which is + actually beneficial also for the third point—is to apply the TestKit together + with :class:`org.specs2.specification.Scope`. +* The Specification traits provide a :class:`Duration` DSL which uses partly + the same method names as :class:`akka.util.Duration`, resulting in ambiguous + implicits if ``akka.util.duration._`` is imported. The work-around is to use + the Specification variants and supply an implicit conversion to the Akka + Duration. This conversion is not supplied with the Akka distribution because + that would mean that our JAR files would dependon Specs2, which is not + justified by this little feature. +* Specifications are by default executed concurrently, which requires some care + when writing the tests or alternatively the ``sequential`` keyword. + +You can use the following two examples as guidelines: + +.. includecode:: code/docs/testkit/Specs2DemoSpec.scala + +.. includecode:: code/docs/testkit/Specs2DemoAcceptance.scala + diff --git a/akka-docs/scala/testkit-example.rst b/akka-docs/scala/testkit-example.rst index 7208de5828..dd7aba0812 100644 --- a/akka-docs/scala/testkit-example.rst +++ b/akka-docs/scala/testkit-example.rst @@ -6,5 +6,5 @@ TestKit Example (Scala) Ray Roestenburg's example code from `his blog `_ adapted to work with Akka 2.x. -.. includecode:: code/akka/docs/testkit/TestkitUsageSpec.scala#testkit-usage +.. includecode:: code/docs/testkit/TestkitUsageSpec.scala#testkit-usage diff --git a/akka-docs/scala/transactors.rst b/akka-docs/scala/transactors.rst index 1dc1d76c28..d915b15aa4 100644 --- a/akka-docs/scala/transactors.rst +++ b/akka-docs/scala/transactors.rst @@ -63,9 +63,9 @@ Here is an example of coordinating two simple counter Actors so that they both increment together in coordinated transactions. If one of them was to fail to increment, the other would also fail. -.. includecode:: code/akka/docs/transactor/TransactorDocSpec.scala#coordinated-example +.. includecode:: code/docs/transactor/TransactorDocSpec.scala#coordinated-example -.. includecode:: code/akka/docs/transactor/TransactorDocSpec.scala#run-coordinated-example +.. includecode:: code/docs/transactor/TransactorDocSpec.scala#run-coordinated-example Note that creating a ``Coordinated`` object requires a ``Timeout`` to be specified for the coordinated transaction. This can be done implicitly, by @@ -73,36 +73,36 @@ having an implicit ``Timeout`` in scope, or explicitly, by passing the timeout when creating a a ``Coordinated`` object. Here's an example of specifying an implicit timeout: -.. includecode:: code/akka/docs/transactor/TransactorDocSpec.scala#implicit-timeout +.. includecode:: code/docs/transactor/TransactorDocSpec.scala#implicit-timeout To start a new coordinated transaction that you will also participate in, just create a ``Coordinated`` object (this assumes an implicit timeout): -.. includecode:: code/akka/docs/transactor/TransactorDocSpec.scala#create-coordinated +.. includecode:: code/docs/transactor/TransactorDocSpec.scala#create-coordinated To start a coordinated transaction that you won't participate in yourself you can create a ``Coordinated`` object with a message and send it directly to an actor. The recipient of the message will be the first member of the coordination set: -.. includecode:: code/akka/docs/transactor/TransactorDocSpec.scala#send-coordinated +.. includecode:: code/docs/transactor/TransactorDocSpec.scala#send-coordinated To receive a coordinated message in an actor simply match it in a case statement: -.. includecode:: code/akka/docs/transactor/TransactorDocSpec.scala#receive-coordinated +.. includecode:: code/docs/transactor/TransactorDocSpec.scala#receive-coordinated :exclude: coordinated-atomic To include another actor in the same coordinated transaction that you've created or received, use the apply method on that object. This will increment the number of parties involved by one and create a new ``Coordinated`` object to be sent. -.. includecode:: code/akka/docs/transactor/TransactorDocSpec.scala#include-coordinated +.. includecode:: code/docs/transactor/TransactorDocSpec.scala#include-coordinated To enter the coordinated transaction use the atomic method of the coordinated object: -.. includecode:: code/akka/docs/transactor/TransactorDocSpec.scala#coordinated-atomic +.. includecode:: code/docs/transactor/TransactorDocSpec.scala#coordinated-atomic The coordinated transaction will wait for the other transactions before committing. If any of the coordinated transactions fail then they all fail. @@ -125,7 +125,7 @@ transactions, using the explicit coordination described above. Here's an example of a simple transactor that will join a coordinated transaction: -.. includecode:: code/akka/docs/transactor/TransactorDocSpec.scala#counter-example +.. includecode:: code/docs/transactor/TransactorDocSpec.scala#counter-example You could send this Counter transactor a ``Coordinated(Increment)`` message. If you were to send it just an ``Increment`` message it will create its own @@ -141,16 +141,16 @@ allows you to specify both the actor to send to, and the message to send. Example of coordinating an increment: -.. includecode:: code/akka/docs/transactor/TransactorDocSpec.scala#friendly-counter-example +.. includecode:: code/docs/transactor/TransactorDocSpec.scala#friendly-counter-example Using ``include`` to include more than one transactor: -.. includecode:: code/akka/docs/transactor/TransactorDocSpec.scala#coordinate-include +.. includecode:: code/docs/transactor/TransactorDocSpec.scala#coordinate-include Using ``sendTo`` to coordinate transactions but pass-on a different message than the one that was received: -.. includecode:: code/akka/docs/transactor/TransactorDocSpec.scala#coordinate-sendto +.. includecode:: code/docs/transactor/TransactorDocSpec.scala#coordinate-sendto To execute directly before or after the coordinated transaction, override the ``before`` and ``after`` methods. These methods also expect partial functions diff --git a/akka-docs/scala/typed-actors.rst b/akka-docs/scala/typed-actors.rst index fc570e60a7..694078a58d 100644 --- a/akka-docs/scala/typed-actors.rst +++ b/akka-docs/scala/typed-actors.rst @@ -38,7 +38,7 @@ The tools of the trade Before we create our first Typed Actor we should first go through the tools that we have at our disposal, it's located in ``akka.actor.TypedActor``. -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala +.. includecode:: code/docs/actor/TypedActorDocSpec.scala :include: typed-actor-extension-tools .. warning:: @@ -55,37 +55,37 @@ To create a Typed Actor you need to have one or more interfaces, and one impleme Our example interface: -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala +.. includecode:: code/docs/actor/TypedActorDocSpec.scala :include: imports,typed-actor-iface :exclude: typed-actor-iface-methods Our example implementation of that interface: -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala +.. includecode:: code/docs/actor/TypedActorDocSpec.scala :include: imports,typed-actor-impl :exclude: typed-actor-impl-methods The most trivial way of creating a Typed Actor instance of our Squarer: -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala +.. includecode:: code/docs/actor/TypedActorDocSpec.scala :include: typed-actor-create1 First type is the type of the proxy, the second type is the type of the implementation. If you need to call a specific constructor you do it like this: -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala +.. includecode:: code/docs/actor/TypedActorDocSpec.scala :include: typed-actor-create2 Since you supply a Props, you can specify which dispatcher to use, what the default timeout should be used and more. Now, our Squarer doesn't have any methods, so we'd better add those. -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala +.. includecode:: code/docs/actor/TypedActorDocSpec.scala :include: imports,typed-actor-iface Alright, now we've got some methods we can call, but we need to implement those in SquarerImpl. -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala +.. includecode:: code/docs/actor/TypedActorDocSpec.scala :include: imports,typed-actor-impl Excellent, now we have an interface and an implementation of that interface, @@ -113,7 +113,7 @@ we *strongly* recommend that parameters passed are immutable. One-way message send ^^^^^^^^^^^^^^^^^^^^ -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala +.. includecode:: code/docs/actor/TypedActorDocSpec.scala :include: typed-actor-call-oneway As simple as that! The method will be executed on another thread; asynchronously. @@ -121,13 +121,13 @@ As simple as that! The method will be executed on another thread; asynchronously Request-reply message send ^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala +.. includecode:: code/docs/actor/TypedActorDocSpec.scala :include: typed-actor-call-option This will block for as long as the timeout that was set in the Props of the Typed Actor, if needed. It will return ``None`` if a timeout occurs. -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala +.. includecode:: code/docs/actor/TypedActorDocSpec.scala :include: typed-actor-call-strict This will block for as long as the timeout that was set in the Props of the Typed Actor, @@ -136,7 +136,7 @@ if needed. It will throw a ``java.util.concurrent.TimeoutException`` if a timeou Request-reply-with-future message send ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala +.. includecode:: code/docs/actor/TypedActorDocSpec.scala :include: typed-actor-call-future This call is asynchronous, and the Future returned can be used for asynchronous composition. @@ -146,12 +146,12 @@ Stopping Typed Actors Since Akkas Typed Actors are backed by Akka Actors they must be stopped when they aren't needed anymore. -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala +.. includecode:: code/docs/actor/TypedActorDocSpec.scala :include: typed-actor-stop This asynchronously stops the Typed Actor associated with the specified proxy ASAP. -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala +.. includecode:: code/docs/actor/TypedActorDocSpec.scala :include: typed-actor-poisonpill This asynchronously stops the Typed Actor associated with the specified proxy @@ -208,13 +208,13 @@ Lookup & Remoting Since ``TypedActors`` are backed by ``Akka Actors``, you can use ``actorFor`` together with ``typedActorOf`` to proxy ``ActorRefs`` potentially residing on remote nodes. -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala#typed-actor-remote +.. includecode:: code/docs/actor/TypedActorDocSpec.scala#typed-actor-remote Supercharging ------------- Here's an example on how you can use traits to mix in behavior in your Typed Actors. -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala#typed-actor-supercharge +.. includecode:: code/docs/actor/TypedActorDocSpec.scala#typed-actor-supercharge -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala#typed-actor-supercharge-usage +.. includecode:: code/docs/actor/TypedActorDocSpec.scala#typed-actor-supercharge-usage diff --git a/akka-docs/scala/zeromq.rst b/akka-docs/scala/zeromq.rst index 5c06aeda23..d94ee81270 100644 --- a/akka-docs/scala/zeromq.rst +++ b/akka-docs/scala/zeromq.rst @@ -19,18 +19,18 @@ Connection ZeroMQ supports multiple connectivity patterns, each aimed to meet a different set of requirements. Currently, this module supports publisher-subscriber connections and connections based on dealers and routers. For connecting or accepting connections, a socket must be created. Sockets are always created using the ``akka.zeromq.ZeroMQExtension``, for example: -.. includecode:: code/akka/docs/zeromq/ZeromqDocSpec.scala#pub-socket +.. includecode:: code/docs/zeromq/ZeromqDocSpec.scala#pub-socket or by importing the ``akka.zeromq._`` package to make newSocket method available on system, via an implicit conversion. -.. includecode:: code/akka/docs/zeromq/ZeromqDocSpec.scala#pub-socket2 +.. includecode:: code/docs/zeromq/ZeromqDocSpec.scala#pub-socket2 Above examples will create a ZeroMQ Publisher socket that is Bound to the port 1234 on localhost. Similarly you can create a subscription socket, with a listener, that subscribes to all messages from the publisher using: -.. includecode:: code/akka/docs/zeromq/ZeromqDocSpec.scala#sub-socket +.. includecode:: code/docs/zeromq/ZeromqDocSpec.scala#sub-socket The following sub-sections describe the supported connection patterns and how they can be used in an Akka environment. However, for a comprehensive discussion of connection patterns, please refer to `ZeroMQ -- The Guide `_. @@ -46,18 +46,18 @@ When you're using zeromq pub/sub you should be aware that it needs multicast - c An actor is subscribed to a topic as follows: -.. includecode:: code/akka/docs/zeromq/ZeromqDocSpec.scala#sub-topic-socket +.. includecode:: code/docs/zeromq/ZeromqDocSpec.scala#sub-topic-socket It is a prefix match so it is subscribed to all topics starting with ``foo.bar``. Note that if the given string is empty or ``SubscribeAll`` is used, the actor is subscribed to all topics. To unsubscribe from a topic you do the following: -.. includecode:: code/akka/docs/zeromq/ZeromqDocSpec.scala#unsub-topic-socket +.. includecode:: code/docs/zeromq/ZeromqDocSpec.scala#unsub-topic-socket To publish messages to a topic you must use two Frames with the topic in the first frame. -.. includecode:: code/akka/docs/zeromq/ZeromqDocSpec.scala#pub-topic +.. includecode:: code/docs/zeromq/ZeromqDocSpec.scala#pub-topic Pub-Sub in Action ^^^^^^^^^^^^^^^^^ @@ -67,16 +67,16 @@ The following example illustrates one publisher with two subscribers. The publisher monitors current heap usage and system load and periodically publishes ``Heap`` events on the ``"health.heap"`` topic and ``Load`` events on the ``"health.load"`` topic. -.. includecode:: code/akka/docs/zeromq/ZeromqDocSpec.scala#health +.. includecode:: code/docs/zeromq/ZeromqDocSpec.scala#health Let's add one subscriber that logs the information. It subscribes to all topics starting with ``"health"``, i.e. both ``Heap`` and ``Load`` events. -.. includecode:: code/akka/docs/zeromq/ZeromqDocSpec.scala#logger +.. includecode:: code/docs/zeromq/ZeromqDocSpec.scala#logger Another subscriber keep track of used heap and warns if too much heap is used. It only subscribes to ``Heap`` events. -.. includecode:: code/akka/docs/zeromq/ZeromqDocSpec.scala#alerter +.. includecode:: code/docs/zeromq/ZeromqDocSpec.scala#alerter Router-Dealer Connection ------------------------ @@ -87,7 +87,7 @@ With those socket types you can build your own reliable pub sub broker that uses To create a Router socket that has a high watermark configured, you would do: -.. includecode:: code/akka/docs/zeromq/ZeromqDocSpec.scala#high-watermark +.. includecode:: code/docs/zeromq/ZeromqDocSpec.scala#high-watermark The akka-zeromq module accepts most if not all the available configuration options for a zeromq socket. diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index fd18e931c7..13c90ed61e 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -408,7 +408,7 @@ object Dependencies { val tutorials = Seq(Test.scalatest, Test.junit) - val docs = Seq(Test.scalatest, Test.junit) + val docs = Seq(Test.scalatest, Test.junit, Test.specs2) val zeroMQ = Seq(protobuf, Dependency.zeroMQ, Test.scalatest, Test.junit) } @@ -452,6 +452,7 @@ object Dependency { val mockito = "org.mockito" % "mockito-all" % "1.8.1" % "test" // MIT val scalatest = "org.scalatest" % "scalatest_2.9.1" % V.Scalatest % "test" // ApacheV2 val scalacheck = "org.scala-tools.testing" % "scalacheck_2.9.1" % "1.9" % "test" // New BSD + val specs2 = "org.specs2" % "specs2_2.9.1" % "1.9" % "test" // Modified BSD / ApacheV2 val zookeeper = "org.apache.hadoop.zookeeper" % "zookeeper" % "3.4.0" % "test" // ApacheV2 val log4j = "log4j" % "log4j" % "1.2.14" % "test" // ApacheV2 } From 2e4f01b6123c2acc7d3e704f4555069bf4ba5065 Mon Sep 17 00:00:00 2001 From: Gert Vanthienen Date: Thu, 24 May 2012 23:39:13 +0200 Subject: [PATCH 143/538] Inadventently deleted akka-intellij-code-style.jar --- .../_sphinx/static/akka-intellij-code-style.jar | Bin 0 -> 8587 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 akka-docs/_sphinx/static/akka-intellij-code-style.jar diff --git a/akka-docs/_sphinx/static/akka-intellij-code-style.jar b/akka-docs/_sphinx/static/akka-intellij-code-style.jar new file mode 100644 index 0000000000000000000000000000000000000000..55866c22c522de2f21a4fb2a6c26ec30ad3c5486 GIT binary patch literal 8587 zcmWIWW@h1H00GJ33DzJQhPfG77?Sf-Qj1F}b5e`-|D9rB04Ww>;9%fjKvyisz<{hc zF*`dk9;7)QtXr=lH|OmQ$NVM(3AP8;`xm{ObXR8An*^gPA#as9n;MI5wwkQ7N@rE~ zI<`MP@QUkE5#jAKzqmc9{=KyJl8`}EyfL59whr~c8x33AwODjy-0~+E+-b0@TzMsN z^-9S{eB6&7TF*8VxFnhFU^VxWSJ`eM{or#6!ORCbo_QPk#XYRH_z)n^zU=N%vyTcM z8u6X%r;m0yE|6E3xpX)$PR@ck$!gnnxd@SUn`gg193l7NO!%JHA7_|btb1o+W|%9V zx|n_WBLCLFrdykq-h8t8lOO-i70w%WpYGA5Ic*P1k>{f-#}7OoQunA9p1vH%mHnbs?o-sqn83SiSJ{16Yze*SrM3G0#;&PX z)_RjwUxU}qQ=?b2| z$LCL3wyc&pz?+@pgM)Hg3nK$VgCKHznxaL%AOizKenClQeqOPDeo01Z5hx0u#{4gS zv`g^+-@l@IZ%f|Y-7`aAl6R5st->CTea2>6eT&R^u3Ko+i$=8wuCU!ng{`DX*|4Fj; zp&LDOCCnu>1(+kZ2*iDOwC&qV1(Ek2-i) zyk|Kl;@%o?Roc~W%LbVjfyc_lS6jCyG5<2}^JIBtP$+YlTUTPzgkID9Y_Hgq@(Eo! zDfZtcOxvH&abL#1@x6MVe_^x#I-i&e*AE?k_Ji|$!B&UL3<;0TIl=E1+HEpjxnJnn zo;?=o2JJrG2ik&j*!w4GYOE=nGfnYiS6P0!=vAk-Ol99)!GYR%cJ=sNW?TA2)OV9e?BnDm(a#o}Y!E%WZ}OA# z-`ni2?AVf77sj^jXXxgt6HSTVCSJX4t!BP-P4==>o>XshpLKo9LcX6!+I-G`!nvkZ z-gAvYeeVkxPYS6nbDtRU_ujUBd>5u2I~K5+`>$v03yGdp_5oL>Doc1?P&TYBb?V%g z^KjDT8|554?4mrCR=+hOi_V)(Ff}o;-kN83*2CGm{af21pL1J&S1qra%U~73l`Nwx ze%#cIxzqR9sWS)WyUt>H^jr0(^xJDz*_(B8^iQ5YXa2lg$ojwRlSvs`lWu+~pYX@p zH(#jYqv4ZxT@D&u9x4(RDuFsDRAy9MR!RD}%Sfa)h==pg$-SFiy%cNrj^;~Ht-8|N zrz;)Leob%Ec@Ew;rn{P*Dy;WsO16iL05dQS-ab`pX8Id ze_)ErMTzGcJ~LbE472j3CUERl54`8%q4a;-M3;l^=Z~H{Zu#}`c5{>)@5QLlcRUe?ET{jO!>7cf(4s$W?dgX5 z6V#Odb>u1(uZT)((<|k@=^VW4zLdo^!HJ7X73N8K#2r^vsVY_4-sfRHWs{?ylF{bl zo-Upt?y0AKC0yOS(%-Ew{(79;tZDc3Ppn$JB}M7B%dC|Vru6>I&`|~Rl6BA z3|a1M6PC#Em;|WK^nclJD;vOCY$m+S`|l}NmV_nzE9&^U913~*S99syoV1HQaoLRK zs>}ty7ivkG{W^3dxykoq<-x^CUvr8prq5io@0o4sv-y_icZE9k{p7j1>_Teiv#`fa z8^5Z4{(P$8ft!Ec^2n1n z$~DdgPJGW2e6c6;-wlC?-FtqoJ$?FgkVEa^Wk-)cJHKB|MQ%^Yv}VCqM`|AkZx@=&l&dt0#n=a1eRqqJ+Y&egB=F4#5gZS_I!GF7wm zEz=a2PrkY}FN&+(N5% z*@u#y8=vfAc%FUtZO>(CC7EB`-A`l+ZT9!W0m|M4^9b?M2M^7R6CfkGd6W?c3!Vn}#k*p?Ax zuR3Slrab3tnfzHk(%v(!>pa{uJB^J$dqdJZ#y`s+E}CdHbEaI*75&3=6xYfn7gl9S zCArS`$=-eB<)XqTELS|w_w)S8e&qV~wea$MuD9_l5smeNGY;?f*8FnPewyIlc>C+? z&diZ}T`jDmf6PuSk9G2y57t{NKIAnlo_YA2`YYDLUxjIGQiU65tmA!eBJ?2Y*c`jF zI;W+R17F^lF0k&%o!!~br(WLO&meeo#vR>P2etJr&P4t#5{Y^7Sn|x8tsJj+AA9M0 z&2Ynf5gi^IOSV_DFRrvWYIs?!cDm}smxTr2w#i*6_|>~pd*8<-2b1rweirOv%y=(x zK>xd3t18#mk3HhQrdp)6e>mRIq+6KA3k$*}fao7kr6j=9pe zwJ!BBa%xKMf3QR7d%e8M8;xUe1t0$GzL-$#!FbwyzPrHR#cI9Be67lA)PGkqIh^7S zn)JS{^ES&-{~t%XW$za^O-?t;+Lgj))Zd`7oijyL_vQS_C6`;5#T(8%`hEAdBn$Bt ztDo*BGoKzanSV5w3R(JJZ9h_R)Ap$Ge~!<8<7$>Y6nyq}uf_WJ{LgRk z{%u^(Kl5cQ`{eI0HD`#dIoEjEV4m@*b0;#R<}9<~o^<52aCgq5B>wQa=$z~X@rdAk z#+Tw`+O7Q;x2x$Km}Z(V)i?e}U+bl5W&Y;n^W4*QudmvDuQHzxO!;?d zRnLUiF1GK_|4V=Mx`llz#~#hucUDBNiDBo9WL~qLd2h_&!iM|H-50hoEZ!F!STAkt zcRzf~yKDancKuzPk|9vOz3^d4Rf@!}K&=SPKegA-)E+uBlX=FTq;(l#l@sdu3rhbO zHMmaS;e9%zFzV=maxvxfKS3g?W-L~Hdko%HTwEpcSXWSc)`_?2XAiS(>zdFJDVh3Z zb<=&X=hNj>Tz$Efd_M$6U%Q%E|J3ESP3T1*@1W2tH@m&2EqH6kGIj3egMTM%Tw;5B zdAIZy-E)0;>deWXYnFXq8oJJKmM+`rn-LS^W9#?W{5=u(ApWG3`HZ_2jI2+dCYSR+ z6Q3@xe@k5cyuMe>^&fAH+_#xaANe`+cUtP#S${OLUmo!<*%~!f`9;@*0+Z7xAL?d3 zUVOV{`_bPkg&&8fD^?|Lk&oW?Cq-iF8NQbSwV#}-I2OM5eH3ov74JVUIsIqaf5V>I zPjg%>jHi4p|7{o?^G0Oa6_dFKWG7aB+;wne&)!!FraGTio;2m#wDRt;^X2RZ9M*8% zkGtBTySg&sZg^PQvDTPhznGaa8s$X3%$Qmkdj0yFcNWa;7Z*Lc$I2|l)jdh6+vswK zN@H|r_hxUFmdh*!%Uv`*ikfx=8TttcbHpF{uA8(>wLT^*Vq)pR{49?0vLZ%-%4HL8 z%zjcHu}VGQvhLE^&S!$s*aB-;XP#Ez3W?1A$G=IF$GLp(pGV7frtJ2YN_-gk!upLy z#$AECZ{&J^vS|O8_{2PGKO@)6zF=RmLeJDPAuB`eS0Z~1n|XGwXqynAqi&*RJoCqd z$@xjMstYx=ug z(XWN`?GFE5HTUi7zvoxg7D4e)6>bab9-WlSw)%zRs(I%EHp>YESLd;=6h9baQ)rcJltDJnkD` zj~-R!x-GMUYj@?rGSM6NPDI7NU3p_)#&-KTvGa<*{TA&16dXHi=7UZDonEd^d}92- zq{CnhU&HK-e|r=+92fg(^!fdU!;_AcdQ82)Eawzo%FZn-Sq#6`zMniPYa8c;%?VfL zoc|;ExS&XF+s(H@Vb_#?%au&|c_jC-X5Zodv&;M$X4xK`+<*Q0^>AKCBm3La@}6@_ zKZ(7Ru;R(BGo_rh3%oD4^5?9r-j(~uQ>sB}vqs+c5`%?(Nq<((ysNR`?4<{cv3;}7 zFPL`pB>P6yZJ)k-zWI5R`Qy#bBLWPQEdH(y%(=haZ~eK`e#IM}l%6$t{-)VBea)&T z^X6{%ukX0==<)h9d8<^mM@@e#uQ>DW{|}Ll-d8iC*S_vv%%OW;-`Gq3xVqm9M|r(% z{+~jBKRR4^B){k9Ot%HTyH)4iy3nb0QT*-a)$%6i6OBIYEqq$BUjO{Q`o37BWxna} zE>5dGXQ#5pM#*(jsFD2^y>sXK?T;`0KJ{2l-OaS+OYBe7YqQ_lXSd&`??m|Vugfho z;NL6WTrG6?h-qT0B>+S!QLq|IYU-Kb+TEH@C1{_o$Isv+3`cp40EXt$7@(7}s+1 z<{5Q{KdO`O^?CpP*_(5qe%Yr6i}l8guln}{3O$xOvd1)VmB;V<&)iP89NxQQ&#Hx$ zvvuZP+j4J`-rKnbAD8(2e$MeA-f-UWOZ8qm9~%d}|M|bOsQd1gb>bIi95;7wxc4;4 z?a3cDb@s^fv+aI<(D%#!s*}EoT_^qVf+;cPd;gx{yW>=I;@%_i;H`dVR8p2pCOtiN z<@QteP!kWa%S_#;OtYu^d{gF%n|8*gdxCmb&f+aMN)|6-&p(p)GVtivG;!u5N7l)T zhg_8wzR7>2r1TmH_`SNrblc`LnHtxU1*=9U|a6VxyBG79Ajot;y0cxDzy?;&EtM<|Nda&1?e|y_1q4J9k%uGIiY)C z=go&E=Dgf2PQSV%zHmN%o3%4@-Ol@Q*|y%+24ejGes8F{I_bF)`9i>vr9uK_nho&%U0f(x$dvhUH|`VpkDk>Jy)}Keg=jUevn=~ zWC-VL>{Tn+@C}IOV_;xN%gjj)NzEs_4s z)8&zaO4ia-?ccCv^VL$gHMHi7B)C zmIOpC5)V3gTG`^jIjckU*WSMW@Oyqa$h$GMvf(8R3=9((kiDB1egnk|pg<_W;#G8Y z!VC-y2zABDi8+a&;f=S`;{6X9h#bBDQ^b6#K<~wavl-bU6aH}DE||?S-$#~pddb^2 zR!8#R-(ZsxF{&||xc%POJH`34K6_uwSSQL*=vT!*R^uZz|@JV7<;A)l)yII?q7cFYt@3!HPiQl;{ro9i(TzDOD?QGu8 zqQKQ=0%ts*aN19KdEtwkjT}phC(ESUDFZd9Tim`=i}p|d70*)J+S)(=W!ZycHVaE-xrAJI zKA)pg;PcevHZ$Y0{2#VOD*UE9zw@N$oxAggGcWA^j+J|k#h4jYy_yhOzz6^-8t_J7qi*TcV@eTkJRTkIvsG?ER+6y_ItybA9vpl zn!uY8tRZ>f_-}SE2fZtsCcJLg_oR8Z`mMJXc21F;eFqp$My=k(m67XkxzG8;%gG*b zP7d0!9!pXVyVkC0*;6>#pzlufBR9|cOH(-07It%e-=y0n6e781ik>A~$Vo#B2vm*{m`xIaOpw&BY{=RFS6VSlRBe#(J%98cH`$}S+5;G9^G$SxXLN# zWXJVOx>HzK|Kkj+|zNi_Ueet)r{PDm^@s|&M%9+EXYc2Kfrpc+} znMpSdPCZ_!`sJ#WWukHP{+?LV?l`k~H~sQwEPeUstXNL&%&&(sD-Ol!oXO9a)xYQs zf7E^J+j{q{<$Q}{?%gacm3ht%ihvXCJ9j!VGB8YGMvj1_fL@e=Vo8(;C{Il)PAw|S zOinEZXR7Gv;@jJV>dvn}uwoXU?PKPhFTL}!ju*aBoB69q$MWTwnZ|M*ExKAF2bdP9 znojoh*5iQoMf84LF;^ zes_XKBfF~Di_iV`f3|$z^m}3-TiPAY3pby=zoXvOt@p|;a&^DN+=XdH`OlYjYi`)M zD$~r;`_A5FuV$<=n= zH*NKcGNzoE5U6?4WbQmeuVcLBo3Gz|vb$Y9f7i6r;w;xb9>3ReXM!bL`2nSG8@E1t zcxQQGmXOtb$N3NS`&>_4aS;4}IY3~3lj6ncQvwgNb!T>*e>r9KHXbXD1((DVy>7k# z<0dJ3YwEY>8$=#to;asEcl~_5Aa>0S-2~mVf6|P>tE}?G2eV2GtuMH z75jtE`5hc5HBQ-VIk{DV=h;Sv$xDA_Jk{Xl2xb@5VV>HqGxJvI!$(q+XZ%R+;>_s& ztFgoARn+{#n{Ly>kL%WUrtt2xJQbq)Tjs+=rZ&IMfVi`B65pt; z-q@v9UY?L}DX%gtWp{dl)5bim(2k<$b>}j!Zp?l2ym;A*n3?Jo&)63CXubb#|0Z5; z&OS?>rRy&}5dCy8()!Tlhqn(Go4;Hg=ean1ug+Af&sm%1gbQXJQwZ^Fw_0s>S$yZl zws#FKfmZw1@vmK%_jTfRhdsd=4YiJmM!U98NdEC+iMa}s(}M}DhV42Vn6v2UEuRzlg;D_y~ma>=KOl~|G$2_<(%KSe@zQ|XT+R)Gg;_mix;!O$J&&4Eh}Ap zn%7S|_+PW-y7yb5RljGeOuN5cQSW=v^fMcSK6c0XFaMNrW@**OocA~5^x2=Ql}7Q! zMqd{Gc~(1$Ws`Skxzo*M^C~sJPWcv-F0j#=qvS^U(FBZb&&M3!a``u8o=A%=RX8LOzQ8R&tKxZu(c~QxH`lGXbzt?@gK{!F-mCZA zvpcZFZiA2X<|#{Z!Y0jMdt+Iy<2v^|h0QA$<$jrDG%aQ7@m04?qh9}RTAK3i&q~gP z&0APnFX{Z9|8UaFudlLBEm@&fJm-}5PCXq(iRi4jZ3niB#>ckqc&)=aUF%@b#;udH z{bgCFOJ3!vjos_ zYZ@F0e$pV|I$_z987?>LE{DZDtBosOx_4f;sFm4cWvzz%hNjd$?_bkTEO*@;KX1$3 zGvZTPA0&DFir)5M17|q*`Z$lQ^`{qK{`~mw!w(PktMt#JXwM z+B)CK7ri$3^?0EY7Z^SIB+?-89-f1GQEd)^#(ewHuq{Z4=I)BdrqE^$87 zo?~B^OP;YWYdF96b6H2c;$HCy`afe5%JO@qd!t_IUb&xq)?VInVV$U@55Jw#zDG%c zjdvd%IW{TTGq0<7A&>W~S4XlAD4!SoP=5P}|KjTbA5N{0x{$kib?%9+tJj(F?v%L3 z(sUwV@zju5@wnYbJ5)}UE&DE3EZKZ1(p2Z6N4s>}j@|D$)}`bus68QZPwhgkiM3wp zq*?P%c^|KQnd)+7>dXC8vuC`#eX4HZLk|8(v$yLM))f~oe0lHSWp<0dyD!VGUj90M z)2jX9S8LewTz#wWgeN_>;k)B==bKpgZHGC3r28)2pZxRq!EYM1)l>G_di{?}@VpZz zf9HO=w06$*zeSIyT}pn-ukh~uY1W2wpEoz>|GCZjob^hyLh0|dhrUI#N_=HVefVj` z@vfbl--{K!@7P$S^l`?l+w-EQ++FnMvD3{xS$XkQdD|b)PW;evrd7J-pihLF&R&&O z_Rbgl99ZkP&pO_6S>}G9ed`ioEtS{=q4o%EhXXx_Y|VbJy(4qtc0ci&j z)i*D_x+3MT=;Sxs|JPaTobHEOvl2 zIY4cC1_t!$Nm&L42G6{b)SR45F9lB*S4Rc+ocyH39EIT2l9J54^x^<-MkX!>h-UP; zYS6?ssCR_GpqXm$M0GKAnwybHgaOjp1ZhT|G6hX^BQ)v~oc0De1ZEb<7eDn79XHS< zE5fY5j98{zLE2&dz}CA%Xg@5-fYH}OcmveiL+(T=A~Z5EFucdmi>2>}>=g7iI3%E9 zW^NKf?q7lQ!9oC}54lwj>WRU0F)-Xh(FgCBp&NkQDg`zE5eBRfLADsy6+qSxYP%vQ zF;LqTp Date: Fri, 25 May 2012 00:49:45 +0200 Subject: [PATCH 144/538] Adding docs about creating TypedActor children --- .../src/main/scala/akka/camel/CamelMessage.scala | 2 +- .../code/docs/actor/TypedActorDocTestBase.java | 16 ++++++++++++++++ akka-docs/java/typed-actors.rst | 8 ++++++-- .../code/docs/actor/TypedActorDocSpec.scala | 12 ++++++++++++ .../serialization/SerializationDocSpec.scala | 2 +- akka-docs/scala/typed-actors.rst | 8 ++++++-- 6 files changed, 42 insertions(+), 6 deletions(-) diff --git a/akka-camel/src/main/scala/akka/camel/CamelMessage.scala b/akka-camel/src/main/scala/akka/camel/CamelMessage.scala index 4f617c83a4..cb4121189d 100644 --- a/akka-camel/src/main/scala/akka/camel/CamelMessage.scala +++ b/akka-camel/src/main/scala/akka/camel/CamelMessage.scala @@ -200,7 +200,7 @@ object CamelMessage { * so that it can be correlated with an asynchronous response. Messages send to Consumer * actors have this header already set. */ - val MessageExchangeId = "MessageExchangeId".intern //Deliberately without type ascription to make it a constant + val MessageExchangeId = "MessageExchangeId" //Deliberately without type ascription to make it a constant /** * Creates a canonical form of the given message msg. If msg of type diff --git a/akka-docs/java/code/docs/actor/TypedActorDocTestBase.java b/akka-docs/java/code/docs/actor/TypedActorDocTestBase.java index 99dda513ab..fdd677c78b 100644 --- a/akka-docs/java/code/docs/actor/TypedActorDocTestBase.java +++ b/akka-docs/java/code/docs/actor/TypedActorDocTestBase.java @@ -5,6 +5,7 @@ package docs.actor; //#imports +import akka.actor.TypedActor; import akka.dispatch.*; import akka.actor.*; import akka.japi.*; @@ -151,6 +152,21 @@ public class TypedActorDocTestBase { } } + @Test public void createHierarchies() { + try { + //#typed-actor-hierarchy + Squarer childSquarer = + TypedActor.get(TypedActor.context()). + typedActorOf( + new TypedProps(Squarer.class, SquarerImpl.class) + ); + //Use "childSquarer" as a Squarer + //#typed-actor-hierarchy + } catch (Exception e) { + //dun care + } + } + @Test public void proxyAnyActorRef() { try { //#typed-actor-remote diff --git a/akka-docs/java/typed-actors.rst b/akka-docs/java/typed-actors.rst index 7ab2274425..7712622dfe 100644 --- a/akka-docs/java/typed-actors.rst +++ b/akka-docs/java/typed-actors.rst @@ -163,7 +163,11 @@ Typed Actor Hierarchies Since you can obtain a contextual Typed Actor Extension by passing in an ``ActorContext`` you can create child Typed Actors by invoking ``typedActorOf(..)`` on that. -This also works for creating child Typed Actors in regular Akka Actors. +.. includecode:: code/akka/docs/actor/TypedActorDocTestBase.java + :include: typed-actor-hierarchy + +You can also create a child Typed Actor in regular Akka Actors by giving the ``UntypedActorContext`` +as an input parameter to TypedActor.get(…). Supervisor Strategy ------------------- @@ -204,4 +208,4 @@ Lookup & Remoting Since ``TypedActors`` are backed by ``Akka Actors``, you can use ``actorFor`` together with ``typedActorOf`` to proxy ``ActorRefs`` potentially residing on remote nodes. -.. includecode:: code/docs/actor/TypedActorDocTestBase.java#typed-actor-remote \ No newline at end of file +.. includecode:: code/docs/actor/TypedActorDocTestBase.java#typed-actor-remote diff --git a/akka-docs/scala/code/docs/actor/TypedActorDocSpec.scala b/akka-docs/scala/code/docs/actor/TypedActorDocSpec.scala index e2c8db16a4..0c2f3bd5b8 100644 --- a/akka-docs/scala/code/docs/actor/TypedActorDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/TypedActorDocSpec.scala @@ -151,6 +151,18 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { //#typed-actor-remote } + "create hierarchies" in { + try { + //#typed-actor-hierarchy + //Inside your Typed Actor + val childSquarer: Squarer = TypedActor(TypedActor.context).typedActorOf(TypedProps[SquarerImpl]()) + //Use "childSquarer" as a Squarer + //#typed-actor-hierarchy + } catch { + case e: Exception ⇒ //ignore + } + } + "supercharge" in { //#typed-actor-supercharge-usage val awesomeFooBar: Foo with Bar = TypedActor(system).typedActorOf(TypedProps[FooBar]()) diff --git a/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala b/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala index 5fba0c4f97..9b222436da 100644 --- a/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala +++ b/akka-docs/scala/code/docs/serialization/SerializationDocSpec.scala @@ -54,7 +54,7 @@ package docs.serialization { // using the type hint (if any, see "includeManifest" above) // into the optionally provided classLoader. def fromBinary(bytes: Array[Byte], - clazz: Option[Class[_]]): AnyRef = { + clazz: Option[Class[_]]): AnyRef = { // Put your code that deserializes here //#... null diff --git a/akka-docs/scala/typed-actors.rst b/akka-docs/scala/typed-actors.rst index 694078a58d..7c039a1db6 100644 --- a/akka-docs/scala/typed-actors.rst +++ b/akka-docs/scala/typed-actors.rst @@ -161,9 +161,13 @@ Typed Actor Hierarchies ----------------------- Since you can obtain a contextual Typed Actor Extension by passing in an ``ActorContext`` -you can create child Typed Actors by invoking ``typedActorOf(..)`` on that. +you can create child Typed Actors by invoking ``typedActorOf(..)`` on that: -This also works for creating child Typed Actors in regular Akka Actors. +.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala + :include: typed-actor-hierarchy + +You can also create a child Typed Actor in regular Akka Actors by giving the ``ActorContext`` +as an input parameter to TypedActor.get(…). Supervisor Strategy ------------------- From f25e962f7eb2d547773786be5c8d64631a92019f Mon Sep 17 00:00:00 2001 From: Peter Badenhorst Date: Fri, 25 May 2012 00:59:17 +0200 Subject: [PATCH 145/538] Added changes to Netty pipelines to support SSL/TLS. Fixes #1978 1) Netty server and client pipelines updated to conditionally load keystore/truststore if SSL is enabled in the config 2) Supports any available encryption protocol via 'ssl-protocol' 3) Supported encryption algorithms are specified via 'ssl-encryption-protocol' config key --- akka-remote/src/main/resources/reference.conf | 27 +++++++ .../main/scala/akka/remote/netty/Client.scala | 60 +++++++++++++++- .../main/scala/akka/remote/netty/Server.scala | 70 +++++++++++++++++-- .../scala/akka/remote/netty/Settings.scala | 41 +++++++++++ .../akka/remote/Ticket1978ConfigSpec.scala | 46 ++++++++++++ 5 files changed, 239 insertions(+), 5 deletions(-) create mode 100644 akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 4512ea3a98..c96ec951d7 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -151,6 +151,33 @@ akka { # (O) Maximum time window that a client should try to reconnect for reconnection-time-window = 600s + + # (I&O) Enable SSL/TLS encryption. + # This must be enabled on both the client and server to work. + enable-ssl = off + + # (I) This is the Java Key Store used by the server connection + ssl-key-store = "keystore" + + # This password is used for decrypting the key store + ssl-key-store-password = "changeme" + + # (O) This is the Java Key Store used by the client connection + ssl-trust-store = "truststore" + + # This password is used for decrypting the trust store + ssl-trust-store-password = "changeme" + + # (I&O) Protocol to use for SSL encryption, choose from: + # Java 6 & 7: + # SSLv3, TLSv1, + # Java 7: + # TLSv1.1, TLSv1.2 + ssl-protocol = "TLSv1" + + # You need to install the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256 + # More info here: http://docs.oracle.com/javase/7/docs/technotes/guides/security/SunProviders.html#SunJCEProvider + ssl-supported-algorithms = ["TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"] } } } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 7baf3011ee..36df8b4d1f 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -20,10 +20,15 @@ import akka.actor.ActorRef import org.jboss.netty.channel.ChannelFutureListener import akka.remote.RemoteClientWriteFailed import java.net.InetAddress +import java.security.{ SecureRandom, KeyStore, GeneralSecurityException } import org.jboss.netty.util.TimerTask import org.jboss.netty.util.Timeout import java.util.concurrent.TimeUnit import org.jboss.netty.handler.timeout.{ IdleState, IdleStateEvent, IdleStateAwareChannelHandler, IdleStateHandler } +import java.security.cert.X509Certificate +import javax.net.ssl.{ SSLContext, X509TrustManager, TrustManagerFactory, TrustManager } +import org.jboss.netty.handler.ssl.SslHandler +import java.io.FileInputStream class RemoteClientMessageBufferException(message: String, cause: Throwable) extends AkkaException(message, cause) { def this(msg: String) = this(msg, null) @@ -329,7 +334,53 @@ class ActiveRemoteClientPipelineFactory( import client.netty.settings + def initTLS(trustStorePath: String, trustStorePassword: String): Option[SSLContext] = { + if (trustStorePath != null && trustStorePassword != null) + try { + val trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm) + val trustStore = KeyStore.getInstance(KeyStore.getDefaultType) + val stream = new FileInputStream(trustStorePath) + trustStore.load(stream, trustStorePassword.toCharArray) + trustManagerFactory.init(trustStore); + val trustManagers: Array[TrustManager] = trustManagerFactory.getTrustManagers + + val sslContext = SSLContext.getInstance("TLS") + sslContext.init(null, trustManagers, new SecureRandom()) + Some(sslContext) + } catch { + case e: GeneralSecurityException ⇒ { + client.log.error(e, "TLS connection could not be established. TLS is not used!"); + None + } + } + else { + client.log.error("TLS connection could not be established because trust store details are missing") + None + } + } + + def getSSLHandler_? : Option[SslHandler] = { + val sslContext: Option[SSLContext] = { + if (settings.EnableSSL) { + client.log.debug("Client SSL is enabled, initialising ...") + initTLS(settings.SSLTrustStore.get, settings.SSLTrustStorePassword.get) + } else { + None + } + } + if (sslContext.isDefined) { + client.log.debug("Client Using SSL context to create SSLEngine ...") + val sslEngine = sslContext.get.createSSLEngine + sslEngine.setUseClientMode(true) + sslEngine.setEnabledCipherSuites(settings.SSLSupportedAlgorithms.toArray.map(_.toString)) + Some(new SslHandler(sslEngine)) + } else { + None + } + } + def getPipeline: ChannelPipeline = { + val sslHandler = getSSLHandler_? val timeout = new IdleStateHandler(client.netty.timer, settings.ReadTimeout.toSeconds.toInt, settings.WriteTimeout.toSeconds.toInt, @@ -340,7 +391,14 @@ class ActiveRemoteClientPipelineFactory( val messageEnc = new RemoteMessageEncoder(client.netty) val remoteClient = new ActiveRemoteClientHandler(name, bootstrap, remoteAddress, localAddress, client.netty.timer, client) - new StaticChannelPipeline(timeout, lenDec, messageDec, lenPrep, messageEnc, executionHandler, remoteClient) + val stages: List[ChannelHandler] = timeout :: lenDec :: messageDec :: lenPrep :: messageEnc :: executionHandler :: remoteClient :: Nil + if (sslHandler.isDefined) { + client.log.debug("Client creating pipeline with SSL handler...") + new StaticChannelPipeline(sslHandler.get :: stages: _*) + } else { + client.log.debug("Client creating pipeline without SSL handler...") + new StaticChannelPipeline(stages: _*) + } } } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index 7e4d1eaaa9..2f572ba1d7 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -5,6 +5,7 @@ package akka.remote.netty import java.net.InetSocketAddress import java.util.concurrent.Executors +import java.io.FileNotFoundException import scala.Option.option2Iterable import org.jboss.netty.bootstrap.ServerBootstrap import org.jboss.netty.channel.ChannelHandler.Sharable @@ -12,13 +13,17 @@ import org.jboss.netty.channel.group.ChannelGroup import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory import org.jboss.netty.handler.codec.frame.{ LengthFieldPrepender, LengthFieldBasedFrameDecoder } import org.jboss.netty.handler.execution.ExecutionHandler -import akka.event.Logging import akka.remote.RemoteProtocol.{ RemoteControlProtocol, CommandType, AkkaRemoteProtocol } import akka.remote.{ RemoteServerShutdown, RemoteServerError, RemoteServerClientDisconnected, RemoteServerClientConnected, RemoteServerClientClosed, RemoteProtocol, RemoteMessage } import akka.actor.Address import java.net.InetAddress import akka.actor.ActorSystemImpl import org.jboss.netty.channel._ +import org.jboss.netty.handler.ssl.SslHandler +import java.security.{ SecureRandom, KeyStore, GeneralSecurityException } +import javax.net.ssl.{ KeyManagerFactory, SSLContext } +import java.io.FileInputStream +import akka.event.{ LoggingAdapter, Logging } class NettyRemoteServer(val netty: NettyRemoteTransport) { @@ -26,6 +31,8 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { val ip = InetAddress.getByName(settings.Hostname) + lazy val log = Logging(netty.system, "NettyRemoteServer(" + ip + ")") + private val factory = settings.UseDispatcherForIO match { case Some(id) ⇒ @@ -42,7 +49,7 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { private val bootstrap = { val b = new ServerBootstrap(factory) - b.setPipelineFactory(new RemoteServerPipelineFactory(openChannels, executionHandler, netty)) + b.setPipelineFactory(new RemoteServerPipelineFactory(openChannels, executionHandler, netty, log)) b.setOption("backlog", settings.Backlog) b.setOption("tcpNoDelay", true) b.setOption("child.keepAlive", true) @@ -85,11 +92,60 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { class RemoteServerPipelineFactory( val openChannels: ChannelGroup, val executionHandler: ExecutionHandler, - val netty: NettyRemoteTransport) extends ChannelPipelineFactory { + val netty: NettyRemoteTransport, + val log: LoggingAdapter) extends ChannelPipelineFactory { import netty.settings + def initTLS(keyStorePath: String, keyStorePassword: String): Option[SSLContext] = { + if (keyStorePath != null && keyStorePassword != null) { + try { + val factory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm) + val keyStore = KeyStore.getInstance(KeyStore.getDefaultType) + val stream = new FileInputStream(keyStorePath) + keyStore.load(stream, keyStorePassword.toCharArray) + factory.init(keyStore, keyStorePassword.toCharArray) + val sslContext = SSLContext.getInstance(settings.SSLProtocol.get) + sslContext.init(factory.getKeyManagers, null, new SecureRandom()) + Some(sslContext) + } catch { + case e: FileNotFoundException ⇒ { + log.error(e, "TLS connection could not be established because keystore could not be loaded") + None + } + case e: GeneralSecurityException ⇒ { + log.error(e, "TLS connection could not be established") + None + } + } + } else { + log.error("TLS connection could not be established because key store details are missing") + None + } + } + + def getSSLHandler_? : Option[SslHandler] = { + val sslContext: Option[SSLContext] = { + if (settings.EnableSSL) { + log.debug("SSL is enabled, initialising...") + initTLS(settings.SSLKeyStore.get, settings.SSLKeyStorePassword.get) + } else { + None + } + } + if (sslContext.isDefined) { + log.debug("Using SSL context to create SSLEngine...") + val sslEngine = sslContext.get.createSSLEngine + sslEngine.setUseClientMode(false) + sslEngine.setEnabledCipherSuites(settings.SSLSupportedAlgorithms.toArray.map(_.toString)) + Some(new SslHandler(sslEngine)) + } else { + None + } + } + def getPipeline: ChannelPipeline = { + val sslHandler = getSSLHandler_? val lenDec = new LengthFieldBasedFrameDecoder(settings.MessageFrameSize, 0, 4, 0, 4) val lenPrep = new LengthFieldPrepender(4) val messageDec = new RemoteMessageDecoder @@ -98,7 +154,13 @@ class RemoteServerPipelineFactory( val authenticator = if (settings.RequireCookie) new RemoteServerAuthenticationHandler(settings.SecureCookie) :: Nil else Nil val remoteServer = new RemoteServerHandler(openChannels, netty) val stages: List[ChannelHandler] = lenDec :: messageDec :: lenPrep :: messageEnc :: executionHandler :: authenticator ::: remoteServer :: Nil - new StaticChannelPipeline(stages: _*) + if (sslHandler.isDefined) { + log.debug("Creating pipeline with SSL handler...") + new StaticChannelPipeline(sslHandler.get :: stages: _*) + } else { + log.debug("Creating pipeline without SSL handler...") + new StaticChannelPipeline(stages: _*) + } } } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index e2f69d77b5..2105620c18 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -73,4 +73,45 @@ class NettySettings(config: Config, val systemName: String) { case sz ⇒ sz } + val SSLKeyStore = getString("ssl-key-store") match { + case "" ⇒ None + case keyStore ⇒ Some(keyStore) + } + + val SSLTrustStore = getString("ssl-trust-store") match { + case "" ⇒ None + case trustStore ⇒ Some(trustStore) + } + + val SSLKeyStorePassword = getString("ssl-key-store-password") match { + case "" ⇒ None + case password ⇒ Some(password) + } + + val SSLTrustStorePassword = getString("ssl-trust-store-password") match { + case "" ⇒ None + case password ⇒ Some(password) + } + + val SSLSupportedAlgorithms = getStringList("ssl-supported-algorithms") + + val SSLProtocol = getString("ssl-protocol") match { + case "" ⇒ None + case protocol ⇒ Some(protocol) + } + + val EnableSSL = { + val enableSSL = getBoolean("enable-ssl") + if (enableSSL) { + if (SSLProtocol.isEmpty) throw new ConfigurationException( + "Configuration option 'akka.remote.netty.enable-ssl is turned on but no protocol is defined in 'akka.remote.netty.ssl-protocol'.") + if (SSLKeyStore.isEmpty && SSLTrustStore.isEmpty) throw new ConfigurationException( + "Configuration option 'akka.remote.netty.enable-ssl is turned on but no key/trust store is defined in 'akka.remote.netty.ssl-key-store' / 'akka.remote.netty.ssl-trust-store'.") + if (SSLKeyStore.isDefined && SSLKeyStorePassword.isEmpty) throw new ConfigurationException( + "Configuration option 'akka.remote.netty.ssl-key-store' is defined but no key-store password is defined in 'akka.remote.netty.ssl-key-store-password'.") + if (SSLTrustStore.isDefined && SSLTrustStorePassword.isEmpty) throw new ConfigurationException( + "Configuration option 'akka.remote.netty.ssl-trust-store' is defined but no trust-store password is defined in 'akka.remote.netty.ssl-trust-store-password'.") + } + enableSSL + } } \ No newline at end of file diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala new file mode 100644 index 0000000000..0d429043c2 --- /dev/null +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala @@ -0,0 +1,46 @@ +package akka.remote + +import akka.testkit._ +import akka.actor._ +import com.typesafe.config._ +import akka.actor.ExtendedActorSystem +import akka.util.duration._ +import akka.util.Duration +import akka.remote.netty.NettyRemoteTransport +import java.util.ArrayList + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class Ticket1978ConfigSpec extends AkkaSpec(""" +akka { + actor.provider = "akka.remote.RemoteActorRefProvider" + remote.netty { + hostname = localhost + port = 12345 + } + actor.deployment { + /blub.remote = "akka://remote-sys@localhost:12346" + /looker/child.remote = "akka://remote-sys@localhost:12346" + /looker/child/grandchild.remote = "akka://RemoteCommunicationSpec@localhost:12345" + } +} +""") with ImplicitSender with DefaultTimeout { + + "SSL Remoting" must { + "be able to parse these extra Netty config elements" in { + val settings = + system.asInstanceOf[ExtendedActorSystem] + .provider.asInstanceOf[RemoteActorRefProvider] + .transport.asInstanceOf[NettyRemoteTransport] + .settings + import settings._ + + EnableSSL must be(false) + SSLKeyStore must be(Some("keystore")) + SSLKeyStorePassword must be(Some("changeme")) + SSLTrustStore must be(Some("truststore")) + SSLTrustStorePassword must be(Some("changeme")) + SSLProtocol must be(Some("TLSv1")) + SSLSupportedAlgorithms must be(java.util.Arrays.asList("TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA")) + } + } +} From 0ac7f967dd6d6e70ee713127abbc349e4f66586a Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 25 May 2012 08:32:42 +0200 Subject: [PATCH 146/538] Change node naming. See 2111 --- .../akka/cluster/JoinTwoClustersSpec.scala | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index b5e764ea23..9ed003944f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -40,20 +40,20 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm override def initialParticipants = 6 - def node(): Cluster = Cluster(system) + def cluster: Cluster = Cluster(system) after { testConductor.enter("after") } - val a1Address = testConductor.getAddressFor(a1).await - val b1Address = testConductor.getAddressFor(b1).await - val c1Address = testConductor.getAddressFor(c1).await + val a1Address = node(a1).address + val b1Address = node(b1).address + val c1Address = node(c1).address def awaitUpConvergence(numberOfMembers: Int): Unit = { - awaitCond(node().latestGossip.members.size == numberOfMembers) - awaitCond(node().latestGossip.members.forall(_.status == MemberStatus.Up)) - awaitCond(node().convergence.isDefined) + awaitCond(cluster.latestGossip.members.size == numberOfMembers) + awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up)) + awaitCond(cluster.convergence.isDefined) } "Three different clusters (A, B and C)" must { @@ -61,40 +61,40 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm "be able to 'elect' a single leader after joining (A -> B)" in { runOn(a1, a2) { - node().join(a1Address) + cluster.join(a1Address) } runOn(b1, b2) { - node().join(b1Address) + cluster.join(b1Address) } runOn(c1, c2) { - node().join(c1Address) + cluster.join(c1Address) } awaitUpConvergence(numberOfMembers = 2) - node().isLeader must be(ifNode(a1, b1, c1)(true)(false)) + cluster.isLeader must be(ifNode(a1, b1, c1)(true)(false)) runOn(b2) { - node().join(a1Address) + cluster.join(a1Address) } runOn(a1, a2, b1, b2) { awaitUpConvergence(numberOfMembers = 4) } - node().isLeader must be(ifNode(a1, c1)(true)(false)) + cluster.isLeader must be(ifNode(a1, c1)(true)(false)) } "be able to 'elect' a single leader after joining (C -> A + B)" in { runOn(b2) { - node().join(c1Address) + cluster.join(c1Address) } awaitUpConvergence(numberOfMembers = 6) - node().isLeader must be(ifNode(a1)(true)(false)) + cluster.isLeader must be(ifNode(a1)(true)(false)) } } From ed31c1738fd2912e0c9a7784fc08baf94e26fb65 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 25 May 2012 08:43:11 +0200 Subject: [PATCH 147/538] Change node naming. See 2115 --- .../akka/cluster/NodeMembershipSpec.scala | 30 +++++++++---------- .../scala/akka/cluster/NodeStartupSpec.scala | 21 +++++++------ 2 files changed, 25 insertions(+), 26 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index de734088f1..bb537091c3 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -33,31 +33,31 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp override def initialParticipants = 3 - def node() = Cluster(system) + def cluster: Cluster = Cluster(system) after { testConductor.enter("after") } - "A set of connected cluster systems" must { + val firstAddress = node(first).address + val secondAddress = node(second).address + val thirdAddress = node(third).address - val firstAddress = testConductor.getAddressFor(first).await - val secondAddress = testConductor.getAddressFor(second).await - val thirdAddress = testConductor.getAddressFor(third).await + "A set of connected cluster systems" must { "(when two systems) start gossiping to each other so that both systems gets the same gossip info" in { runOn(first, second) { - node().join(firstAddress) - awaitCond(node().latestGossip.members.size == 2) - val members = node().latestGossip.members.toIndexedSeq + cluster.join(firstAddress) + awaitCond(cluster.latestGossip.members.size == 2) + val members = cluster.latestGossip.members.toIndexedSeq members.size must be(2) members(0).address must be(firstAddress) members(1).address must be(secondAddress) awaitCond { - node().latestGossip.members.forall(_.status == MemberStatus.Up) + cluster.latestGossip.members.forall(_.status == MemberStatus.Up) } - awaitCond(node().convergence.isDefined) + awaitCond(cluster.convergence.isDefined) } } @@ -65,20 +65,20 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp "(when three systems) start gossiping to each other so that both systems gets the same gossip info" in { runOn(third) { - node().join(firstAddress) + cluster.join(firstAddress) } // runOn all - awaitCond(node().latestGossip.members.size == 3) - val members = node().latestGossip.members.toIndexedSeq + awaitCond(cluster.latestGossip.members.size == 3) + val members = cluster.latestGossip.members.toIndexedSeq members.size must be(3) members(0).address must be(firstAddress) members(1).address must be(secondAddress) members(2).address must be(thirdAddress) awaitCond { - node().latestGossip.members.forall(_.status == MemberStatus.Up) + cluster.latestGossip.members.forall(_.status == MemberStatus.Up) } - awaitCond(node().convergence.isDefined) + awaitCond(cluster.convergence.isDefined) } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index 8bb76fb613..f2206f8d89 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -31,27 +31,28 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi override def initialParticipants = 2 - def node() = Cluster(system) + def cluster: Cluster = Cluster(system) after { testConductor.enter("after") } - val firstAddress = testConductor.getAddressFor(first).await + val firstAddress = node(first).address + val secondAddress = node(second).address "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { "be a singleton cluster when started up" in { runOn(first) { - awaitCond(node().isSingletonCluster) + awaitCond(cluster.isSingletonCluster) // FIXME #2117 singletonCluster should reach convergence - //awaitCond(node().convergence.isDefined) + //awaitCond(cluster.convergence.isDefined) } } "be in 'Joining' phase when started up" in { runOn(first) { - val members = node().latestGossip.members + val members = cluster.latestGossip.members members.size must be(1) val joiningMember = members find (_.address == firstAddress) @@ -65,18 +66,16 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi "join the other node cluster when sending a Join command" in { runOn(second) { - node().join(firstAddress) + cluster.join(firstAddress) } - // runOn all - val secondAddress = testConductor.getAddressFor(second).await awaitCond { - node.latestGossip.members.exists { member ⇒ + cluster.latestGossip.members.exists { member ⇒ member.address == secondAddress && member.status == MemberStatus.Up } } - node().latestGossip.members.size must be(2) - awaitCond(node().convergence.isDefined) + cluster.latestGossip.members.size must be(2) + awaitCond(cluster.convergence.isDefined) } } From 8d1dea1750073ff68555158971fe67e2a37a1c50 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 25 May 2012 08:59:13 +0200 Subject: [PATCH 148/538] Change node naming. See 2114 --- .../MembershipChangeListenerSpec.scala | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index c648cdf631..c5ae2f11e7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -35,7 +35,7 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan override def initialParticipants = 3 - def node(): Cluster = Cluster(system) + def cluster: Cluster = Cluster(system) after { testConductor.enter("after") @@ -43,22 +43,22 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan "A set of connected cluster systems" must { - val firstAddress = testConductor.getAddressFor(first).await - val secondAddress = testConductor.getAddressFor(second).await + val firstAddress = node(first).address + val secondAddress = node(second).address "(when two systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" in { runOn(first, second) { - node().join(firstAddress) + cluster.join(firstAddress) val latch = TestLatch() - node().registerListener(new MembershipChangeListener { + cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) latch.countDown() } }) latch.await - node().convergence.isDefined must be(true) + cluster.convergence.isDefined must be(true) } } @@ -66,19 +66,18 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan "(when three systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" in { runOn(third) { - node().join(firstAddress) + cluster.join(firstAddress) } - // runOn all val latch = TestLatch() - node().registerListener(new MembershipChangeListener { + cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { if (members.size == 3 && members.forall(_.status == MemberStatus.Up)) latch.countDown() } }) latch.await - node().convergence.isDefined must be(true) + cluster.convergence.isDefined must be(true) } } From 3aba8dc424e643f7085821729b19f93926bcae8d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 25 May 2012 10:00:30 +0200 Subject: [PATCH 149/538] Assert with sorted member addresses, since ports are random --- .../scala/akka/cluster/NodeMembershipSpec.scala | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index bb537091c3..a8af644fe0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -52,8 +52,9 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp awaitCond(cluster.latestGossip.members.size == 2) val members = cluster.latestGossip.members.toIndexedSeq members.size must be(2) - members(0).address must be(firstAddress) - members(1).address must be(secondAddress) + val sortedAddresses = IndexedSeq(firstAddress, secondAddress).sortBy(_.toString) + members(0).address must be(sortedAddresses(0)) + members(1).address must be(sortedAddresses(1)) awaitCond { cluster.latestGossip.members.forall(_.status == MemberStatus.Up) } @@ -72,9 +73,10 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp awaitCond(cluster.latestGossip.members.size == 3) val members = cluster.latestGossip.members.toIndexedSeq members.size must be(3) - members(0).address must be(firstAddress) - members(1).address must be(secondAddress) - members(2).address must be(thirdAddress) + val sortedAddresses = IndexedSeq(firstAddress, secondAddress, thirdAddress).sortBy(_.toString) + members(0).address must be(sortedAddresses(0)) + members(1).address must be(sortedAddresses(1)) + members(2).address must be(sortedAddresses(2)) awaitCond { cluster.latestGossip.members.forall(_.status == MemberStatus.Up) } From 0bc0c237e6e915ce310c659e844a3500d6cc8f64 Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 25 May 2012 11:27:27 +0200 Subject: [PATCH 150/538] try to fix the race between sending a message and the client shutting down --- .../NetworkFailureInjector.scala | 41 ++++++++++--------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index bf5d7d6007..d30872cd6e 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -139,26 +139,29 @@ private[akka] class FailureInjector extends Actor with ActorLogging { if (direction includes Direction.Receive) rcv ! s case None ⇒ // don’t do reverse lookup at first - val (ipaddr, ip, port) = ctx.getChannel.getRemoteAddress match { - case s: InetSocketAddress ⇒ (s.getAddress, s.getAddress.getHostAddress, s.getPort) + ctx.getChannel.getRemoteAddress match { + case sockAddr: InetSocketAddress ⇒ + val (ipaddr, ip, port) = (sockAddr.getAddress, sockAddr.getAddress.getHostAddress, sockAddr.getPort) + val addr = ChannelAddress.get(ctx.getChannel) orElse { + settings collect { case (a @ Address("akka", _, Some(`ip`), Some(`port`)), _) ⇒ a } headOption + } orElse { + // only if raw IP failed, try with hostname + val name = ipaddr.getHostName + if (name == ip) None + else settings collect { case (a @ Address("akka", _, Some(`name`), Some(`port`)), _) ⇒ a } headOption + } getOrElse Address("akka", "", ip, port) + /* + * ^- the above last resort will not match later requests directly, but be + * picked up by retrieveTargetSettings, so that throttle ops are + * applied to the right throttle actors, assuming that there can + * be only one actor system per host:port. + */ + val inj = ingestContextAddress(ctx, addr) + if (direction includes Direction.Send) inj.sender ! s + if (direction includes Direction.Receive) inj.receiver ! s + case null ⇒ + log.debug("sending {} in direction {} when socket {} already closed, dropping", msg, direction, ctx.getChannel) } - val addr = ChannelAddress.get(ctx.getChannel) orElse { - settings collect { case (a @ Address("akka", _, Some(`ip`), Some(`port`)), _) ⇒ a } headOption - } orElse { - // only if raw IP failed, try with hostname - val name = ipaddr.getHostName - if (name == ip) None - else settings collect { case (a @ Address("akka", _, Some(`name`), Some(`port`)), _) ⇒ a } headOption - } getOrElse Address("akka", "", ip, port) - /* - * ^- the above last resort will not match later requests directly, but be - * picked up by retrieveTargetSettings, so that throttle ops are - * applied to the right throttle actors, assuming that there can - * be only one actor system per host:port. - */ - val inj = ingestContextAddress(ctx, addr) - if (direction includes Direction.Send) inj.sender ! s - if (direction includes Direction.Receive) inj.receiver ! s } } } From b569869b61aa64cd4d0027131697d1a336c03a34 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 25 May 2012 12:10:17 +0200 Subject: [PATCH 151/538] Use better sort order of members. See #2133 --- .../src/main/scala/akka/cluster/Cluster.scala | 12 +++++++- .../test/scala/akka/cluster/MemberSpec.scala | 30 +++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 76e3356143..55f9967596 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -100,7 +100,17 @@ class Member(val address: Address, val status: MemberStatus) extends ClusterMess object Member { import MemberStatus._ - implicit val ordering = Ordering.fromLessThan[Member](_.address.toString < _.address.toString) + implicit val addressOrdering: Ordering[Address] = Ordering.fromLessThan[Address] { (a, b) ⇒ + if (a.protocol < b.protocol) true + else if (a.system < b.system) true + else if (a.host.getOrElse("") < b.host.getOrElse("")) true + else if (a.port.getOrElse(0) < b.port.getOrElse(0)) true + else false + } + + implicit val ordering: Ordering[Member] = new Ordering[Member] { + def compare(x: Member, y: Member) = addressOrdering.compare(x.address, y.address) + } def apply(address: Address, status: MemberStatus): Member = new Member(address, status) diff --git a/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala new file mode 100644 index 0000000000..a75ead0149 --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala @@ -0,0 +1,30 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers +import akka.actor.Address +import scala.util.Random + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class MemberSpec extends WordSpec with MustMatchers { + + "Member" must { + + "be sorted by address correctly" in { + import Member.ordering + val m1 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Up) + val m2 = Member(Address("akka", "sys1", "host1", 10000), MemberStatus.Up) + val m3 = Member(Address("cluster", "sys1", "host1", 10000), MemberStatus.Up) + val m4 = Member(Address("cluster", "sys1", "host2", 10000), MemberStatus.Up) + val m5 = Member(Address("cluster", "sys2", "host2", 10000), MemberStatus.Up) + + val expected = IndexedSeq(m1, m2, m3, m4, m5) + val shuffled = Random.shuffle(expected) + shuffled.sorted must be(expected) + } + } +} From 829783f359ffb6b1a093d3b4dcb4a5bc07b9ea91 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 25 May 2012 12:10:37 +0200 Subject: [PATCH 152/538] Remove port awareness in asserts of leader and members. See #2133 * Extracted common parts to MultiNodeClusterSpec --- .../akka/cluster/JoinTwoClustersSpec.scala | 27 +++----- .../MembershipChangeListenerSpec.scala | 13 +--- .../akka/cluster/MultiNodeClusterSpec.scala | 63 +++++++++++++++++++ .../akka/cluster/NodeMembershipSpec.scala | 26 ++------ .../scala/akka/cluster/NodeStartupSpec.scala | 12 +--- 5 files changed, 80 insertions(+), 61 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index 9ed003944f..c15af5e651 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -18,13 +18,7 @@ object JoinTwoClustersMultiJvmSpec extends MultiNodeConfig { val c1 = role("c1") val c2 = role("c2") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - akka.cluster { - gossip-frequency = 200 ms - leader-actions-frequency = 200 ms - periodic-tasks-initial-delay = 300 ms - } - """))) + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -35,13 +29,11 @@ class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec -abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) with ImplicitSender with BeforeAndAfter { +abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { import JoinTwoClustersMultiJvmSpec._ override def initialParticipants = 6 - def cluster: Cluster = Cluster(system) - after { testConductor.enter("after") } @@ -50,12 +42,6 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm val b1Address = node(b1).address val c1Address = node(c1).address - def awaitUpConvergence(numberOfMembers: Int): Unit = { - awaitCond(cluster.latestGossip.members.size == numberOfMembers) - awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up)) - awaitCond(cluster.convergence.isDefined) - } - "Three different clusters (A, B and C)" must { "be able to 'elect' a single leader after joining (A -> B)" in { @@ -72,7 +58,9 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm awaitUpConvergence(numberOfMembers = 2) - cluster.isLeader must be(ifNode(a1, b1, c1)(true)(false)) + assertLeader(a1, a2) + assertLeader(b1, b2) + assertLeader(c1, c2) runOn(b2) { cluster.join(a1Address) @@ -82,7 +70,8 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm awaitUpConvergence(numberOfMembers = 4) } - cluster.isLeader must be(ifNode(a1, c1)(true)(false)) + assertLeader(a1, a2, b1, b2) + assertLeader(c1, c2) } @@ -94,7 +83,7 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm awaitUpConvergence(numberOfMembers = 6) - cluster.isLeader must be(ifNode(a1)(true)(false)) + assertLeader(a1, a2, b1, b2, c1, c2) } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index c5ae2f11e7..844d2803a1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -16,13 +16,7 @@ object MembershipChangeListenerMultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - akka.cluster { - gossip-frequency = 200 ms - leader-actions-frequency = 200 ms - periodic-tasks-initial-delay = 300 ms - } - """))) + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -30,13 +24,12 @@ class MembershipChangeListenerMultiJvmNode1 extends MembershipChangeListenerSpec class MembershipChangeListenerMultiJvmNode2 extends MembershipChangeListenerSpec class MembershipChangeListenerMultiJvmNode3 extends MembershipChangeListenerSpec -abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChangeListenerMultiJvmSpec) with ImplicitSender with BeforeAndAfter { +abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChangeListenerMultiJvmSpec) + with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { import MembershipChangeListenerMultiJvmSpec._ override def initialParticipants = 3 - def cluster: Cluster = Cluster(system) - after { testConductor.enter("after") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala new file mode 100644 index 0000000000..873d819dbb --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -0,0 +1,63 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.Config +import com.typesafe.config.ConfigFactory +import akka.actor.Address +import akka.remote.testconductor.RoleName +import akka.remote.testkit.MultiNodeSpec +import akka.util.duration._ + +object MultiNodeClusterSpec { + def clusterConfig: Config = ConfigFactory.parseString(""" + akka.cluster { + gossip-frequency = 200 ms + leader-actions-frequency = 200 ms + periodic-tasks-initial-delay = 300 ms + } + akka.test { + single-expect-default = 5 s + } + """) +} + +trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ + + def cluster: Cluster = Cluster(system) + + /** + * Assert that the member addresses match the expected addresses in the + * sort order used by the cluster. + */ + def assertMembers(gotMembers: Iterable[Member], expectedAddresses: Address*): Unit = { + import Member.addressOrdering + val members = gotMembers.toIndexedSeq + members.size must be(expectedAddresses.length) + expectedAddresses.sorted.zipWithIndex.foreach { case (a, i) ⇒ members(i).address must be(a) } + } + + /** + * Assert that the cluster has elected the correct leader + * out of all nodes in the cluster. First + * member in the cluster ring is expected leader. + */ + def assertLeader(nodesInCluster: RoleName*): Unit = if (nodesInCluster.contains(mySelf)) { + nodesInCluster.length must not be (0) + import Member.addressOrdering + val expectedLeader = nodesInCluster.map(role ⇒ (role, node(role).address)).sortBy(_._2).head._1 + cluster.isLeader must be(ifNode(expectedLeader)(true)(false)) + } + + /** + * Wait until the expected number of members has status Up + * and convergence has been reached. + */ + def awaitUpConvergence(numberOfMembers: Int): Unit = { + awaitCond(cluster.latestGossip.members.size == numberOfMembers) + awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up)) + awaitCond(cluster.convergence.isDefined, 10 seconds) + } + +} \ No newline at end of file diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index a8af644fe0..b5dc5d4d42 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -14,13 +14,7 @@ object NodeMembershipMultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - akka.cluster { - gossip-frequency = 200 ms - leader-actions-frequency = 200 ms - periodic-tasks-initial-delay = 300 ms - } - """))) + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -28,13 +22,11 @@ class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec -abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) with ImplicitSender with BeforeAndAfter { +abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { import NodeMembershipMultiJvmSpec._ override def initialParticipants = 3 - def cluster: Cluster = Cluster(system) - after { testConductor.enter("after") } @@ -50,11 +42,7 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp runOn(first, second) { cluster.join(firstAddress) awaitCond(cluster.latestGossip.members.size == 2) - val members = cluster.latestGossip.members.toIndexedSeq - members.size must be(2) - val sortedAddresses = IndexedSeq(firstAddress, secondAddress).sortBy(_.toString) - members(0).address must be(sortedAddresses(0)) - members(1).address must be(sortedAddresses(1)) + assertMembers(cluster.latestGossip.members, firstAddress, secondAddress) awaitCond { cluster.latestGossip.members.forall(_.status == MemberStatus.Up) } @@ -69,14 +57,8 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp cluster.join(firstAddress) } - // runOn all awaitCond(cluster.latestGossip.members.size == 3) - val members = cluster.latestGossip.members.toIndexedSeq - members.size must be(3) - val sortedAddresses = IndexedSeq(firstAddress, secondAddress, thirdAddress).sortBy(_.toString) - members(0).address must be(sortedAddresses(0)) - members(1).address must be(sortedAddresses(1)) - members(2).address must be(sortedAddresses(2)) + assertMembers(cluster.latestGossip.members, firstAddress, secondAddress, thirdAddress) awaitCond { cluster.latestGossip.members.forall(_.status == MemberStatus.Up) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index f2206f8d89..55a0b15f63 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -13,26 +13,18 @@ object NodeStartupMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - akka.cluster { - gossip-frequency = 200 ms - leader-actions-frequency = 200 ms - periodic-tasks-initial-delay = 300 ms - } - """))) + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } class NodeStartupMultiJvmNode1 extends NodeStartupSpec class NodeStartupMultiJvmNode2 extends NodeStartupSpec -abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with ImplicitSender with BeforeAndAfter { +abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { import NodeStartupMultiJvmSpec._ override def initialParticipants = 2 - def cluster: Cluster = Cluster(system) - after { testConductor.enter("after") } From 34915063cda510ba06baba744af1c3ee2bfcaf22 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 25 May 2012 12:51:37 +0200 Subject: [PATCH 153/538] Correct sort. See #2133 --- akka-cluster/src/main/scala/akka/cluster/Cluster.scala | 8 ++++---- akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 55f9967596..259f487358 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -101,10 +101,10 @@ object Member { import MemberStatus._ implicit val addressOrdering: Ordering[Address] = Ordering.fromLessThan[Address] { (a, b) ⇒ - if (a.protocol < b.protocol) true - else if (a.system < b.system) true - else if (a.host.getOrElse("") < b.host.getOrElse("")) true - else if (a.port.getOrElse(0) < b.port.getOrElse(0)) true + if (a.protocol != b.protocol) a.protocol.compareTo(b.protocol) < 0 + else if (a.system != b.system) a.system.compareTo(b.system) < 0 + else if (a.host.getOrElse("") != b.host.getOrElse("")) a.host.getOrElse("").compareTo(b.host.getOrElse("")) < 0 + else if (a.port.getOrElse(0) != b.port.getOrElse(0)) a.port.getOrElse(0) < b.port.getOrElse(0) else false } diff --git a/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala index a75ead0149..ba1037b8bc 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala @@ -21,8 +21,9 @@ class MemberSpec extends WordSpec with MustMatchers { val m3 = Member(Address("cluster", "sys1", "host1", 10000), MemberStatus.Up) val m4 = Member(Address("cluster", "sys1", "host2", 10000), MemberStatus.Up) val m5 = Member(Address("cluster", "sys2", "host2", 10000), MemberStatus.Up) + val m6 = Member(Address("cluster", "sys2", "host3", 8000), MemberStatus.Up) - val expected = IndexedSeq(m1, m2, m3, m4, m5) + val expected = IndexedSeq(m1, m2, m3, m4, m5, m6) val shuffled = Random.shuffle(expected) shuffled.sorted must be(expected) } From dc17bba62fd08a4b3e100735f0b23e6a51be358f Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 25 May 2012 14:33:28 +0200 Subject: [PATCH 154/538] Removing warning from serialization.rst by adding whitespace, thanks reST. --- akka-docs/java/serialization.rst | 4 ++-- akka-docs/scala/serialization.rst | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/akka-docs/java/serialization.rst b/akka-docs/java/serialization.rst index c352b6e1ae..d9aff609d8 100644 --- a/akka-docs/java/serialization.rst +++ b/akka-docs/java/serialization.rst @@ -181,7 +181,7 @@ which might contain actor references. External Akka Serializers ========================= -`Akka-protostuff by Roman Levenstein`_ +`Akka-protostuff by Roman Levenstein `_ -`Akka-quickser by Roman Levenstein`_ +`Akka-quickser by Roman Levenstein `_ diff --git a/akka-docs/scala/serialization.rst b/akka-docs/scala/serialization.rst index c1c2c16a8b..404847affc 100644 --- a/akka-docs/scala/serialization.rst +++ b/akka-docs/scala/serialization.rst @@ -188,7 +188,7 @@ which might contain actor references. External Akka Serializers ========================= -`Akka-protostuff by Roman Levenstein`_ +`Akka-protostuff by Roman Levenstein `_ -`Akka-quickser by Roman Levenstein`_ +`Akka-quickser by Roman Levenstein `_ From 24f6406634b39e350df4e2ee3efb080b9c91d7c3 Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 25 May 2012 14:42:22 +0200 Subject: [PATCH 155/538] fix specs2 samples so they system.shutdown --- akka-docs/java/typed-actors.rst | 2 +- .../code/docs/testkit/Specs2DemoAcceptance.scala | 8 +++++--- .../scala/code/docs/testkit/Specs2DemoSpec.scala | 11 ++++++----- akka-docs/scala/testing.rst | 14 +++++++++----- akka-docs/scala/typed-actors.rst | 2 +- 5 files changed, 22 insertions(+), 15 deletions(-) diff --git a/akka-docs/java/typed-actors.rst b/akka-docs/java/typed-actors.rst index 7712622dfe..90bdc5616c 100644 --- a/akka-docs/java/typed-actors.rst +++ b/akka-docs/java/typed-actors.rst @@ -163,7 +163,7 @@ Typed Actor Hierarchies Since you can obtain a contextual Typed Actor Extension by passing in an ``ActorContext`` you can create child Typed Actors by invoking ``typedActorOf(..)`` on that. -.. includecode:: code/akka/docs/actor/TypedActorDocTestBase.java +.. includecode:: code/docs/actor/TypedActorDocTestBase.java :include: typed-actor-hierarchy You can also create a child Typed Actor in regular Akka Actors by giving the ``UntypedActorContext`` diff --git a/akka-docs/scala/code/docs/testkit/Specs2DemoAcceptance.scala b/akka-docs/scala/code/docs/testkit/Specs2DemoAcceptance.scala index a3edb6a093..ab8bac9bf3 100644 --- a/akka-docs/scala/code/docs/testkit/Specs2DemoAcceptance.scala +++ b/akka-docs/scala/code/docs/testkit/Specs2DemoAcceptance.scala @@ -1,7 +1,7 @@ package docs.testkit -import org.specs2._ -import org.specs2.specification.Scope +import org.specs2.Specification +import org.specs2.specification.{ Step, Scope } import akka.actor.{ Props, ActorSystem, Actor } import akka.testkit.{ TestKit, ImplicitSender } @@ -13,10 +13,12 @@ class Specs2DemoAcceptance extends Specification { p ^ "A TestKit should" ^ "work properly with Specs2 acceptance tests" ! e1 ^ - "correctly convert durations" ! e2 + "correctly convert durations" ! e2 ^ + Step(system.shutdown()) ^ end // do not forget to shutdown! val system = ActorSystem() + // an alternative to mixing in NoTimeConversions implicit def d2d(d: org.specs2.time.Duration): akka.util.FiniteDuration = akka.util.Duration(d.inMilliseconds, "millis") diff --git a/akka-docs/scala/code/docs/testkit/Specs2DemoSpec.scala b/akka-docs/scala/code/docs/testkit/Specs2DemoSpec.scala index efe7b6088e..a620c5139b 100644 --- a/akka-docs/scala/code/docs/testkit/Specs2DemoSpec.scala +++ b/akka-docs/scala/code/docs/testkit/Specs2DemoSpec.scala @@ -2,20 +2,19 @@ package docs.testkit import org.specs2.mutable.Specification import org.specs2.specification.Scope +import org.specs2.time.NoTimeConversions import akka.actor.{ Props, ActorSystem, Actor } import akka.testkit.{ TestKit, ImplicitSender } +import akka.util.duration._ -class Specs2DemoUnitSpec extends Specification { +class Specs2DemoUnitSpec extends Specification with NoTimeConversions { val system = ActorSystem() - implicit def d2d(d: org.specs2.time.Duration): akka.util.FiniteDuration = - akka.util.Duration(d.inMilliseconds, "millis") - /* * this is needed if different test cases would clash when run concurrently, - * e.g. when creating specifically named top-level actors + * e.g. when creating specifically named top-level actors; leave out otherwise */ sequential @@ -31,4 +30,6 @@ class Specs2DemoUnitSpec extends Specification { } } } + + step(system.shutdown) // do not forget to shutdown! } diff --git a/akka-docs/scala/testing.rst b/akka-docs/scala/testing.rst index ac27655342..a98ee14917 100644 --- a/akka-docs/scala/testing.rst +++ b/akka-docs/scala/testing.rst @@ -684,11 +684,15 @@ Some `Specs2 `_ users have contributed examples of how to wor with :class:`org.specs2.specification.Scope`. * The Specification traits provide a :class:`Duration` DSL which uses partly the same method names as :class:`akka.util.Duration`, resulting in ambiguous - implicits if ``akka.util.duration._`` is imported. The work-around is to use - the Specification variants and supply an implicit conversion to the Akka - Duration. This conversion is not supplied with the Akka distribution because - that would mean that our JAR files would dependon Specs2, which is not - justified by this little feature. + implicits if ``akka.util.duration._`` is imported. There are two work-arounds: + + * either use the Specification variant of Duration and supply an implicit + conversion to the Akka Duration. This conversion is not supplied with the + Akka distribution because that would mean that our JAR files would dependon + Specs2, which is not justified by this little feature. + + * or mix :class:`org.specs2.time.NoTimeConversions` into the Specification. + * Specifications are by default executed concurrently, which requires some care when writing the tests or alternatively the ``sequential`` keyword. diff --git a/akka-docs/scala/typed-actors.rst b/akka-docs/scala/typed-actors.rst index 7c039a1db6..349b574888 100644 --- a/akka-docs/scala/typed-actors.rst +++ b/akka-docs/scala/typed-actors.rst @@ -163,7 +163,7 @@ Typed Actor Hierarchies Since you can obtain a contextual Typed Actor Extension by passing in an ``ActorContext`` you can create child Typed Actors by invoking ``typedActorOf(..)`` on that: -.. includecode:: code/akka/docs/actor/TypedActorDocSpec.scala +.. includecode:: code/docs/actor/TypedActorDocSpec.scala :include: typed-actor-hierarchy You can also create a child Typed Actor in regular Akka Actors by giving the ``ActorContext`` From a2cd84e0470cd6e1076d652238cd5a7bcc80a6e4 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 25 May 2012 12:59:14 +0200 Subject: [PATCH 156/538] Sort on host and port only. See #2133 --- akka-cluster/src/main/scala/akka/cluster/Cluster.scala | 9 +++++---- .../src/test/scala/akka/cluster/MemberSpec.scala | 10 +++++----- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 259f487358..d55882205c 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -100,11 +100,12 @@ class Member(val address: Address, val status: MemberStatus) extends ClusterMess object Member { import MemberStatus._ + /** + * Sort Address by host and port + */ implicit val addressOrdering: Ordering[Address] = Ordering.fromLessThan[Address] { (a, b) ⇒ - if (a.protocol != b.protocol) a.protocol.compareTo(b.protocol) < 0 - else if (a.system != b.system) a.system.compareTo(b.system) < 0 - else if (a.host.getOrElse("") != b.host.getOrElse("")) a.host.getOrElse("").compareTo(b.host.getOrElse("")) < 0 - else if (a.port.getOrElse(0) != b.port.getOrElse(0)) a.port.getOrElse(0) < b.port.getOrElse(0) + if (a.host != b.host) a.host.getOrElse("").compareTo(b.host.getOrElse("")) < 0 + else if (a.port != b.port) a.port.getOrElse(0) < b.port.getOrElse(0) else false } diff --git a/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala index ba1037b8bc..050407577e 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala @@ -16,14 +16,14 @@ class MemberSpec extends WordSpec with MustMatchers { "be sorted by address correctly" in { import Member.ordering + // sorting should be done on host and port, only val m1 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Up) val m2 = Member(Address("akka", "sys1", "host1", 10000), MemberStatus.Up) - val m3 = Member(Address("cluster", "sys1", "host1", 10000), MemberStatus.Up) - val m4 = Member(Address("cluster", "sys1", "host2", 10000), MemberStatus.Up) - val m5 = Member(Address("cluster", "sys2", "host2", 10000), MemberStatus.Up) - val m6 = Member(Address("cluster", "sys2", "host3", 8000), MemberStatus.Up) + val m3 = Member(Address("cluster", "sys2", "host2", 8000), MemberStatus.Up) + val m4 = Member(Address("cluster", "sys2", "host2", 9000), MemberStatus.Up) + val m5 = Member(Address("cluster", "sys1", "host2", 10000), MemberStatus.Up) - val expected = IndexedSeq(m1, m2, m3, m4, m5, m6) + val expected = IndexedSeq(m1, m2, m3, m4, m5) val shuffled = Random.shuffle(expected) shuffled.sorted must be(expected) } From bd6b46d665979af48822534e68c0525893decb17 Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 25 May 2012 15:11:00 +0200 Subject: [PATCH 157/538] improve logging of NoStackTrace exceptions --- .../src/main/scala/akka/event/Logging.scala | 6 ++++-- .../akka/remote/testconductor/Conductor.scala | 17 +++++++++++------ .../akka/remote/testconductor/BarrierSpec.scala | 6 +++--- 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index 6e6f92ad0d..b91509ac9f 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -648,7 +648,7 @@ object Logging { import java.util.Date private val dateFormat = new SimpleDateFormat("MM/dd/yyyy HH:mm:ss.SSS") - private val errorFormat = "[ERROR] [%s] [%s] [%s] %s\n%s".intern + private val errorFormat = "[ERROR] [%s] [%s] [%s] %s%s".intern private val errorFormatWithoutCause = "[ERROR] [%s] [%s] [%s] %s".intern private val warningFormat = "[WARN] [%s] [%s] [%s] %s".intern private val infoFormat = "[INFO] [%s] [%s] [%s] %s".intern @@ -728,10 +728,12 @@ object Logging { * Returns the StackTrace for the given Throwable as a String */ def stackTraceFor(e: Throwable): String = e match { - case null | Error.NoCause | _: NoStackTrace ⇒ "" + case null | Error.NoCause ⇒ "" + case _: NoStackTrace ⇒ " (" + e.getClass.getName + ")" case other ⇒ val sw = new java.io.StringWriter val pw = new java.io.PrintWriter(sw) + pw.append('\n') other.printStackTrace(pw) sw.toString } diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index d46f682d58..8fa8eeff21 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -468,11 +468,16 @@ private[akka] object BarrierCoordinator { override def toString = productPrefix + productIterator.mkString("(", ", ", ")") } - case class BarrierTimeout(data: Data) extends RuntimeException(data.barrier) with NoStackTrace with Printer - case class DuplicateNode(data: Data, node: Controller.NodeInfo) extends RuntimeException with NoStackTrace with Printer - case class WrongBarrier(barrier: String, client: ActorRef, data: Data) extends RuntimeException(barrier) with NoStackTrace with Printer + case class BarrierTimeout(data: Data) + extends RuntimeException("timeout while waiting for barrier '" + data.barrier + "'") with NoStackTrace with Printer + case class DuplicateNode(data: Data, node: Controller.NodeInfo) + extends RuntimeException(node.toString) with NoStackTrace with Printer + case class WrongBarrier(barrier: String, client: ActorRef, data: Data) + extends RuntimeException(data.clients.find(_.fsm == client).map(_.name.toString).getOrElse(client.toString) + + " tried to enter '" + barrier + "' while we were waiting for '" + data.barrier + "'") with NoStackTrace with Printer case class BarrierEmpty(data: Data, msg: String) extends RuntimeException(msg) with NoStackTrace with Printer - case class ClientLost(data: Data, client: RoleName) extends RuntimeException with NoStackTrace with Printer + case class ClientLost(data: Data, client: RoleName) + extends RuntimeException("unannounced disconnect of " + client) with NoStackTrace with Printer } /** @@ -506,7 +511,7 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor if (clients.find(_.name == n.name).isDefined) throw new DuplicateNode(d, n) stay using d.copy(clients = clients + n) case Event(ClientDisconnected(name), d @ Data(clients, _, arrived)) ⇒ - if (clients.isEmpty) throw BarrierEmpty(d, "no client to disconnect") + if (clients.isEmpty) throw BarrierEmpty(d, "cannot disconnect " + name + ": no client to disconnect") (clients find (_.name == name)) match { case None ⇒ stay case Some(c) ⇒ throw ClientLost(d.copy(clients = clients - c, arrived = arrived filterNot (_ == c.fsm)), name) @@ -524,7 +529,7 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor else goto(Waiting) using d.copy(barrier = name, arrived = sender :: Nil) case Event(RemoveClient(name), d @ Data(clients, _, _)) ⇒ - if (clients.isEmpty) throw BarrierEmpty(d, "no client to remove") + if (clients.isEmpty) throw BarrierEmpty(d, "cannot remove " + name + ": no client to remove") stay using d.copy(clients = clients filterNot (_.name == name)) } diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala index e0fd5dfb97..b8bce31708 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala @@ -54,7 +54,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with EventFilter[BarrierEmpty](occurrences = 1) intercept { b ! RemoveClient(A) } - expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "no client to remove"))) + expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "cannot remove RoleName(a): no client to remove"))) } "register clients and disconnect them" in { @@ -68,7 +68,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with EventFilter[BarrierEmpty](occurrences = 1) intercept { b ! ClientDisconnected(A) } - expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "no client to disconnect"))) + expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "cannot disconnect RoleName(a): no client to disconnect"))) } "fail entering barrier when nobody registered" in { @@ -187,7 +187,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with EventFilter[BarrierEmpty](occurrences = 1) intercept { barrier ! RemoveClient(A) } - expectMsg(Failed(barrier, BarrierEmpty(Data(Set(), "", Nil), "no client to remove"))) + expectMsg(Failed(barrier, BarrierEmpty(Data(Set(), "", Nil), "cannot remove RoleName(a): no client to remove"))) barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) a.send(barrier, EnterBarrier("right")) a.expectMsg(ToClient(BarrierResult("right", false))) From 17d54f1ae8c984066c06edcc733b940be0248d56 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 25 May 2012 15:45:13 +0200 Subject: [PATCH 158/538] Tag as LongRunningTest --- .../multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala | 6 +++--- .../scala/akka/cluster/MembershipChangeListenerSpec.scala | 7 +++---- .../multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala | 4 ++-- .../src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala | 6 +++--- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index c15af5e651..4bbe703405 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -8,7 +8,7 @@ import org.scalatest.BeforeAndAfter import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec -import akka.testkit.ImplicitSender +import akka.testkit._ object JoinTwoClustersMultiJvmSpec extends MultiNodeConfig { val a1 = role("a1") @@ -44,7 +44,7 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm "Three different clusters (A, B and C)" must { - "be able to 'elect' a single leader after joining (A -> B)" in { + "be able to 'elect' a single leader after joining (A -> B)" taggedAs LongRunningTest in { runOn(a1, a2) { cluster.join(a1Address) @@ -75,7 +75,7 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm } - "be able to 'elect' a single leader after joining (C -> A + B)" in { + "be able to 'elect' a single leader after joining (C -> A + B)" taggedAs LongRunningTest in { runOn(b2) { cluster.join(c1Address) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index 844d2803a1..64019c102c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -8,8 +8,7 @@ import org.scalatest.BeforeAndAfter import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec -import akka.testkit.ImplicitSender -import akka.testkit.TestLatch +import akka.testkit._ object MembershipChangeListenerMultiJvmSpec extends MultiNodeConfig { val first = role("first") @@ -39,7 +38,7 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan val firstAddress = node(first).address val secondAddress = node(second).address - "(when two systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" in { + "(when two systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { runOn(first, second) { cluster.join(firstAddress) @@ -56,7 +55,7 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan } - "(when three systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" in { + "(when three systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { runOn(third) { cluster.join(firstAddress) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index b5dc5d4d42..f96265ac5a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -37,7 +37,7 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp "A set of connected cluster systems" must { - "(when two systems) start gossiping to each other so that both systems gets the same gossip info" in { + "(when two systems) start gossiping to each other so that both systems gets the same gossip info" taggedAs LongRunningTest in { runOn(first, second) { cluster.join(firstAddress) @@ -51,7 +51,7 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp } - "(when three systems) start gossiping to each other so that both systems gets the same gossip info" in { + "(when three systems) start gossiping to each other so that both systems gets the same gossip info" taggedAs LongRunningTest in { runOn(third) { cluster.join(firstAddress) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index 55a0b15f63..65cd7891a9 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -34,7 +34,7 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { - "be a singleton cluster when started up" in { + "be a singleton cluster when started up" taggedAs LongRunningTest in { runOn(first) { awaitCond(cluster.isSingletonCluster) // FIXME #2117 singletonCluster should reach convergence @@ -42,7 +42,7 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi } } - "be in 'Joining' phase when started up" in { + "be in 'Joining' phase when started up" taggedAs LongRunningTest in { runOn(first) { val members = cluster.latestGossip.members members.size must be(1) @@ -55,7 +55,7 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi } "A second cluster node" must { - "join the other node cluster when sending a Join command" in { + "join the other node cluster when sending a Join command" taggedAs LongRunningTest in { runOn(second) { cluster.join(firstAddress) From 2abe5308dabf452885eaad2c1b63c3ce34774dfe Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 25 May 2012 17:04:21 +0200 Subject: [PATCH 159/538] Commented out the cluster tests because it's not possible to exclude them right now --- .../akka/cluster/JoinTwoClustersSpec.scala | 180 +++++++++--------- .../MembershipChangeListenerSpec.scala | 154 +++++++-------- .../akka/cluster/NodeMembershipSpec.scala | 140 +++++++------- .../scala/akka/cluster/NodeStartupSpec.scala | 148 +++++++------- 4 files changed, 311 insertions(+), 311 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index 4bbe703405..87129a7a7c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -1,90 +1,90 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import org.scalatest.BeforeAndAfter -import com.typesafe.config.ConfigFactory -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ - -object JoinTwoClustersMultiJvmSpec extends MultiNodeConfig { - val a1 = role("a1") - val a2 = role("a2") - val b1 = role("b1") - val b2 = role("b2") - val c1 = role("c1") - val c2 = role("c2") - - commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) - -} - -class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec -class JoinTwoClustersMultiJvmNode2 extends JoinTwoClustersSpec -class JoinTwoClustersMultiJvmNode3 extends JoinTwoClustersSpec -class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec -class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec -class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec - -abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { - import JoinTwoClustersMultiJvmSpec._ - - override def initialParticipants = 6 - - after { - testConductor.enter("after") - } - - val a1Address = node(a1).address - val b1Address = node(b1).address - val c1Address = node(c1).address - - "Three different clusters (A, B and C)" must { - - "be able to 'elect' a single leader after joining (A -> B)" taggedAs LongRunningTest in { - - runOn(a1, a2) { - cluster.join(a1Address) - } - runOn(b1, b2) { - cluster.join(b1Address) - } - runOn(c1, c2) { - cluster.join(c1Address) - } - - awaitUpConvergence(numberOfMembers = 2) - - assertLeader(a1, a2) - assertLeader(b1, b2) - assertLeader(c1, c2) - - runOn(b2) { - cluster.join(a1Address) - } - - runOn(a1, a2, b1, b2) { - awaitUpConvergence(numberOfMembers = 4) - } - - assertLeader(a1, a2, b1, b2) - assertLeader(c1, c2) - - } - - "be able to 'elect' a single leader after joining (C -> A + B)" taggedAs LongRunningTest in { - - runOn(b2) { - cluster.join(c1Address) - } - - awaitUpConvergence(numberOfMembers = 6) - - assertLeader(a1, a2, b1, b2, c1, c2) - } - } - -} +///** +// * Copyright (C) 2009-2012 Typesafe Inc. +// */ +// +//package akka.cluster +// +//import org.scalatest.BeforeAndAfter +//import com.typesafe.config.ConfigFactory +//import akka.remote.testkit.MultiNodeConfig +//import akka.remote.testkit.MultiNodeSpec +//import akka.testkit._ +// +//object JoinTwoClustersMultiJvmSpec extends MultiNodeConfig { +// val a1 = role("a1") +// val a2 = role("a2") +// val b1 = role("b1") +// val b2 = role("b2") +// val c1 = role("c1") +// val c2 = role("c2") +// +// commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) +// +//} +// +//class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec +//class JoinTwoClustersMultiJvmNode2 extends JoinTwoClustersSpec +//class JoinTwoClustersMultiJvmNode3 extends JoinTwoClustersSpec +//class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec +//class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec +//class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec +// +//abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +// import JoinTwoClustersMultiJvmSpec._ +// +// override def initialParticipants = 6 +// +// after { +// testConductor.enter("after") +// } +// +// val a1Address = node(a1).address +// val b1Address = node(b1).address +// val c1Address = node(c1).address +// +// "Three different clusters (A, B and C)" must { +// +// "be able to 'elect' a single leader after joining (A -> B)" taggedAs LongRunningTest in { +// +// runOn(a1, a2) { +// cluster.join(a1Address) +// } +// runOn(b1, b2) { +// cluster.join(b1Address) +// } +// runOn(c1, c2) { +// cluster.join(c1Address) +// } +// +// awaitUpConvergence(numberOfMembers = 2) +// +// assertLeader(a1, a2) +// assertLeader(b1, b2) +// assertLeader(c1, c2) +// +// runOn(b2) { +// cluster.join(a1Address) +// } +// +// runOn(a1, a2, b1, b2) { +// awaitUpConvergence(numberOfMembers = 4) +// } +// +// assertLeader(a1, a2, b1, b2) +// assertLeader(c1, c2) +// +// } +// +// "be able to 'elect' a single leader after joining (C -> A + B)" taggedAs LongRunningTest in { +// +// runOn(b2) { +// cluster.join(c1Address) +// } +// +// awaitUpConvergence(numberOfMembers = 6) +// +// assertLeader(a1, a2, b1, b2, c1, c2) +// } +// } +// +//} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index 64019c102c..6bb0f556d5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -1,77 +1,77 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import scala.collection.immutable.SortedSet -import org.scalatest.BeforeAndAfter -import com.typesafe.config.ConfigFactory -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ - -object MembershipChangeListenerMultiJvmSpec extends MultiNodeConfig { - val first = role("first") - val second = role("second") - val third = role("third") - - commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) - -} - -class MembershipChangeListenerMultiJvmNode1 extends MembershipChangeListenerSpec -class MembershipChangeListenerMultiJvmNode2 extends MembershipChangeListenerSpec -class MembershipChangeListenerMultiJvmNode3 extends MembershipChangeListenerSpec - -abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChangeListenerMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { - import MembershipChangeListenerMultiJvmSpec._ - - override def initialParticipants = 3 - - after { - testConductor.enter("after") - } - - "A set of connected cluster systems" must { - - val firstAddress = node(first).address - val secondAddress = node(second).address - - "(when two systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { - - runOn(first, second) { - cluster.join(firstAddress) - val latch = TestLatch() - cluster.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) - latch.countDown() - } - }) - latch.await - cluster.convergence.isDefined must be(true) - } - - } - - "(when three systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { - - runOn(third) { - cluster.join(firstAddress) - } - - val latch = TestLatch() - cluster.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - if (members.size == 3 && members.forall(_.status == MemberStatus.Up)) - latch.countDown() - } - }) - latch.await - cluster.convergence.isDefined must be(true) - - } - } - -} +///** +// * Copyright (C) 2009-2012 Typesafe Inc. +// */ +//package akka.cluster +// +//import scala.collection.immutable.SortedSet +//import org.scalatest.BeforeAndAfter +//import com.typesafe.config.ConfigFactory +//import akka.remote.testkit.MultiNodeConfig +//import akka.remote.testkit.MultiNodeSpec +//import akka.testkit._ +// +//object MembershipChangeListenerMultiJvmSpec extends MultiNodeConfig { +// val first = role("first") +// val second = role("second") +// val third = role("third") +// +// commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) +// +//} +// +//class MembershipChangeListenerMultiJvmNode1 extends MembershipChangeListenerSpec +//class MembershipChangeListenerMultiJvmNode2 extends MembershipChangeListenerSpec +//class MembershipChangeListenerMultiJvmNode3 extends MembershipChangeListenerSpec +// +//abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChangeListenerMultiJvmSpec) +// with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +// import MembershipChangeListenerMultiJvmSpec._ +// +// override def initialParticipants = 3 +// +// after { +// testConductor.enter("after") +// } +// +// "A set of connected cluster systems" must { +// +// val firstAddress = node(first).address +// val secondAddress = node(second).address +// +// "(when two systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { +// +// runOn(first, second) { +// cluster.join(firstAddress) +// val latch = TestLatch() +// cluster.registerListener(new MembershipChangeListener { +// def notify(members: SortedSet[Member]) { +// if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) +// latch.countDown() +// } +// }) +// latch.await +// cluster.convergence.isDefined must be(true) +// } +// +// } +// +// "(when three systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { +// +// runOn(third) { +// cluster.join(firstAddress) +// } +// +// val latch = TestLatch() +// cluster.registerListener(new MembershipChangeListener { +// def notify(members: SortedSet[Member]) { +// if (members.size == 3 && members.forall(_.status == MemberStatus.Up)) +// latch.countDown() +// } +// }) +// latch.await +// cluster.convergence.isDefined must be(true) +// +// } +// } +// +//} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index f96265ac5a..21defd1d97 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -1,70 +1,70 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ - -object NodeMembershipMultiJvmSpec extends MultiNodeConfig { - val first = role("first") - val second = role("second") - val third = role("third") - - commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) - -} - -class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec -class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec -class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec - -abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { - import NodeMembershipMultiJvmSpec._ - - override def initialParticipants = 3 - - after { - testConductor.enter("after") - } - - val firstAddress = node(first).address - val secondAddress = node(second).address - val thirdAddress = node(third).address - - "A set of connected cluster systems" must { - - "(when two systems) start gossiping to each other so that both systems gets the same gossip info" taggedAs LongRunningTest in { - - runOn(first, second) { - cluster.join(firstAddress) - awaitCond(cluster.latestGossip.members.size == 2) - assertMembers(cluster.latestGossip.members, firstAddress, secondAddress) - awaitCond { - cluster.latestGossip.members.forall(_.status == MemberStatus.Up) - } - awaitCond(cluster.convergence.isDefined) - } - - } - - "(when three systems) start gossiping to each other so that both systems gets the same gossip info" taggedAs LongRunningTest in { - - runOn(third) { - cluster.join(firstAddress) - } - - awaitCond(cluster.latestGossip.members.size == 3) - assertMembers(cluster.latestGossip.members, firstAddress, secondAddress, thirdAddress) - awaitCond { - cluster.latestGossip.members.forall(_.status == MemberStatus.Up) - } - awaitCond(cluster.convergence.isDefined) - - } - } - -} +///** +// * Copyright (C) 2009-2012 Typesafe Inc. +// */ +//package akka.cluster +// +//import com.typesafe.config.ConfigFactory +//import org.scalatest.BeforeAndAfter +//import akka.remote.testkit.MultiNodeConfig +//import akka.remote.testkit.MultiNodeSpec +//import akka.testkit._ +// +//object NodeMembershipMultiJvmSpec extends MultiNodeConfig { +// val first = role("first") +// val second = role("second") +// val third = role("third") +// +// commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) +// +//} +// +//class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec +//class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec +//class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec +// +//abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +// import NodeMembershipMultiJvmSpec._ +// +// override def initialParticipants = 3 +// +// after { +// testConductor.enter("after") +// } +// +// val firstAddress = node(first).address +// val secondAddress = node(second).address +// val thirdAddress = node(third).address +// +// "A set of connected cluster systems" must { +// +// "(when two systems) start gossiping to each other so that both systems gets the same gossip info" taggedAs LongRunningTest in { +// +// runOn(first, second) { +// cluster.join(firstAddress) +// awaitCond(cluster.latestGossip.members.size == 2) +// assertMembers(cluster.latestGossip.members, firstAddress, secondAddress) +// awaitCond { +// cluster.latestGossip.members.forall(_.status == MemberStatus.Up) +// } +// awaitCond(cluster.convergence.isDefined) +// } +// +// } +// +// "(when three systems) start gossiping to each other so that both systems gets the same gossip info" taggedAs LongRunningTest in { +// +// runOn(third) { +// cluster.join(firstAddress) +// } +// +// awaitCond(cluster.latestGossip.members.size == 3) +// assertMembers(cluster.latestGossip.members, firstAddress, secondAddress, thirdAddress) +// awaitCond { +// cluster.latestGossip.members.forall(_.status == MemberStatus.Up) +// } +// awaitCond(cluster.convergence.isDefined) +// +// } +// } +// +//} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index 65cd7891a9..ff4c06215d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -1,74 +1,74 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ - -object NodeStartupMultiJvmSpec extends MultiNodeConfig { - val first = role("first") - val second = role("second") - - commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) - -} - -class NodeStartupMultiJvmNode1 extends NodeStartupSpec -class NodeStartupMultiJvmNode2 extends NodeStartupSpec - -abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { - import NodeStartupMultiJvmSpec._ - - override def initialParticipants = 2 - - after { - testConductor.enter("after") - } - - val firstAddress = node(first).address - val secondAddress = node(second).address - - "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { - - "be a singleton cluster when started up" taggedAs LongRunningTest in { - runOn(first) { - awaitCond(cluster.isSingletonCluster) - // FIXME #2117 singletonCluster should reach convergence - //awaitCond(cluster.convergence.isDefined) - } - } - - "be in 'Joining' phase when started up" taggedAs LongRunningTest in { - runOn(first) { - val members = cluster.latestGossip.members - members.size must be(1) - - val joiningMember = members find (_.address == firstAddress) - joiningMember must not be (None) - joiningMember.get.status must be(MemberStatus.Joining) - } - } - } - - "A second cluster node" must { - "join the other node cluster when sending a Join command" taggedAs LongRunningTest in { - - runOn(second) { - cluster.join(firstAddress) - } - - awaitCond { - cluster.latestGossip.members.exists { member ⇒ - member.address == secondAddress && member.status == MemberStatus.Up - } - } - cluster.latestGossip.members.size must be(2) - awaitCond(cluster.convergence.isDefined) - } - } - -} +///** +// * Copyright (C) 2009-2012 Typesafe Inc. +// */ +//package akka.cluster +// +//import com.typesafe.config.ConfigFactory +//import org.scalatest.BeforeAndAfter +//import akka.remote.testkit.MultiNodeConfig +//import akka.remote.testkit.MultiNodeSpec +//import akka.testkit._ +// +//object NodeStartupMultiJvmSpec extends MultiNodeConfig { +// val first = role("first") +// val second = role("second") +// +// commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) +// +//} +// +//class NodeStartupMultiJvmNode1 extends NodeStartupSpec +//class NodeStartupMultiJvmNode2 extends NodeStartupSpec +// +//abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +// import NodeStartupMultiJvmSpec._ +// +// override def initialParticipants = 2 +// +// after { +// testConductor.enter("after") +// } +// +// val firstAddress = node(first).address +// val secondAddress = node(second).address +// +// "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { +// +// "be a singleton cluster when started up" taggedAs LongRunningTest in { +// runOn(first) { +// awaitCond(cluster.isSingletonCluster) +// // FIXME #2117 singletonCluster should reach convergence +// //awaitCond(cluster.convergence.isDefined) +// } +// } +// +// "be in 'Joining' phase when started up" taggedAs LongRunningTest in { +// runOn(first) { +// val members = cluster.latestGossip.members +// members.size must be(1) +// +// val joiningMember = members find (_.address == firstAddress) +// joiningMember must not be (None) +// joiningMember.get.status must be(MemberStatus.Joining) +// } +// } +// } +// +// "A second cluster node" must { +// "join the other node cluster when sending a Join command" taggedAs LongRunningTest in { +// +// runOn(second) { +// cluster.join(firstAddress) +// } +// +// awaitCond { +// cluster.latestGossip.members.exists { member ⇒ +// member.address == secondAddress && member.status == MemberStatus.Up +// } +// } +// cluster.latestGossip.members.size must be(2) +// awaitCond(cluster.convergence.isDefined) +// } +// } +// +//} From 7cd1d38eb1d8acb07ea9a1dc2312925d57f82023 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 25 May 2012 17:15:35 +0200 Subject: [PATCH 160/538] #2130 - Fixing unforunate naming in DiningHakkers --- .../src/main/scala/DiningHakkersOnBecome.scala | 8 +++----- .../src/main/scala/DiningHakkersOnFsm.scala | 8 +++----- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala b/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala index b82699ebe4..65d7d7c23c 100644 --- a/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala +++ b/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala @@ -131,17 +131,15 @@ class Hakker(name: String, left: ActorRef, right: ActorRef) extends Actor { object DiningHakkers { val system = ActorSystem() - def main(args: Array[String]): Unit = { - run - } + def main(args: Array[String]): Unit = run def run { //Create 5 chopsticks - val chopsticks = for (i ← 1 to 5) yield system.actorOf(Props[Chopstick], "Chopstick " + i) + val chopsticks = for (i ← 1 to 5) yield system.actorOf(Props[Chopstick], "Chopstick" + i) //Create 5 awesome hakkers and assign them their left and right chopstick val hakkers = for { - (name, i) ← List("Ghosh", "Bonér", "Klang", "Krasser", "Manie").zipWithIndex + (name, i) ← List("Ghosh", "Boner", "Klang", "Krasser", "Manie").zipWithIndex } yield system.actorOf(Props(new Hakker(name, chopsticks(i), chopsticks((i + 1) % 5)))) //Signal all hakkers that they should start thinking, and watch the show diff --git a/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala b/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala index 52ed49797a..7928a85334 100644 --- a/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala +++ b/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala @@ -169,16 +169,14 @@ object DiningHakkersOnFsm { val system = ActorSystem() - def main(args: Array[String]): Unit = { - run - } + def main(args: Array[String]): Unit = run def run = { // Create 5 chopsticks - val chopsticks = for (i ← 1 to 5) yield system.actorOf(Props[Chopstick], "Chopstick " + i) + val chopsticks = for (i ← 1 to 5) yield system.actorOf(Props[Chopstick], "Chopstick" + i) // Create 5 awesome fsm hakkers and assign them their left and right chopstick val hakkers = for { - (name, i) ← List("Ghosh", "Bonér", "Klang", "Krasser", "Manie").zipWithIndex + (name, i) ← List("Ghosh", "Boner", "Klang", "Krasser", "Manie").zipWithIndex } yield system.actorOf(Props(new FSMHakker(name, chopsticks(i), chopsticks((i + 1) % 5)))) hakkers.foreach(_ ! Think) From 52875d3586cedeb8a6816781ce705dedbed45701 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 25 May 2012 17:26:04 +0200 Subject: [PATCH 161/538] Revert "Removed ResizerSpec test violating routing rules" This reverts commit 162d59db35b680594bcbdae56934807697371845. --- .../test/scala/akka/routing/ResizerSpec.scala | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala index ede4a69d7c..111460e3ac 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -128,6 +128,36 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with current.routees.size must be(2) } + // FIXME this test violates the rule that you can not use a BalancingDispatcher with any kind of Router - now throws a ConfigurationException in verification process + "resize when busy" ignore { + + val busy = new TestLatch(1) + + val resizer = DefaultResizer( + lowerBound = 1, + upperBound = 3, + pressureThreshold = 0, + messagesPerResize = 1) + + val router = system.actorOf(Props[BusyActor].withRouter(RoundRobinRouter(resizer = Some(resizer))).withDispatcher("bal-disp")) + + val latch1 = new TestLatch(1) + router ! (latch1, busy) + Await.ready(latch1, 2 seconds) + + val latch2 = new TestLatch(1) + router ! (latch2, busy) + Await.ready(latch2, 2 seconds) + + val latch3 = new TestLatch(1) + router ! (latch3, busy) + Await.ready(latch3, 2 seconds) + + Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(3) + + busy.countDown() + } + "grow as needed under pressure" in { // make sure the pool starts at the expected lower limit and grows to the upper as needed // as influenced by the backlog of blocking pooled actors From c16bd740731c19950e3d8588321dc58f2f169b0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 25 May 2012 17:27:24 +0200 Subject: [PATCH 162/538] Added more logging to Cluster's who to gossip selection process. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- akka-cluster/src/main/scala/akka/cluster/Cluster.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 76e3356143..e7d672d051 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -795,6 +795,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * @return 'true' if it gossiped to a "deputy" member. */ private def gossipToRandomNodeOf(addresses: Iterable[Address]): Boolean = { + log.debug("Cluster Node [{}] - Selecting random node to gossip to [{}]", remoteAddress, addresses.mkString(", ")) if (addresses.isEmpty) false else { val peers = addresses filter (_ != remoteAddress) // filter out myself From d99b1cd7f00d0d091b91195226369fb0e798c1bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 25 May 2012 17:29:29 +0200 Subject: [PATCH 163/538] Rewritten old in-memory ClientDowningSpec into multi-node specs. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Split it up into two different specs: - ClientDowningNodeThatIsUnreachableMultiJvmSpec - ClientDowningNodeThatIsUpMultiJvmSpec Signed-off-by: Jonas Bonér --- ...ientDowningNodeThatIsUnreachableSpec.scala | 111 ++++++++++++++ .../ClientDowningNodeThatIsUpSpec.scala | 108 +++++++++++++ .../akka/cluster/ClientDowningSpec.scala | 145 ------------------ 3 files changed, 219 insertions(+), 145 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala delete mode 100644 akka-cluster/src/test/scala/akka/cluster/ClientDowningSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala new file mode 100644 index 0000000000..8e02420050 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -0,0 +1,111 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ +import akka.actor.Address + +object ClientDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + + val waitForConvergence = 20 seconds + + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + akka { + #loglevel = "DEBUG" + #stdout-loglevel = "DEBUG" + cluster { + gossip-frequency = 100 ms + leader-actions-frequency = 100 ms + periodic-tasks-initial-delay = 300 ms + auto-down = off + } + } + """))) +} + +class ClientDowningNodeThatIsUnreachableMultiJvmNode1 extends ClientDowningNodeThatIsUnreachableSpec +class ClientDowningNodeThatIsUnreachableMultiJvmNode2 extends ClientDowningNodeThatIsUnreachableSpec +class ClientDowningNodeThatIsUnreachableMultiJvmNode3 extends ClientDowningNodeThatIsUnreachableSpec +class ClientDowningNodeThatIsUnreachableMultiJvmNode4 extends ClientDowningNodeThatIsUnreachableSpec + +class ClientDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(ClientDowningNodeThatIsUnreachableMultiJvmSpec) with ImplicitSender with BeforeAndAfter { + import ClientDowningNodeThatIsUnreachableMultiJvmSpec._ + + override def initialParticipants = 4 + + def node = Cluster(system) + + def assertMemberRing(nrOfMembers: Int, canNotBePartOfRing: Seq[Address] = Seq.empty[Address]): Unit = { + awaitCond(node.latestGossip.members.size == nrOfMembers, waitForConvergence) + awaitCond(node.latestGossip.members.forall(_.status == MemberStatus.Up), waitForConvergence) + awaitCond(canNotBePartOfRing forall (address => !(node.latestGossip.members exists (_.address == address))), waitForConvergence) + } + + "Client of a 4 node cluster" must { + + "be able to DOWN a node that is UNREACHABLE (killed)" taggedAs LongRunningTest in { + runOn(first) { + node.self + assertMemberRing(nrOfMembers = 4) + testConductor.enter("all-up") + + val thirdAddress = node(third).address + + // kill 'third' node + testConductor.shutdown(third, 0) + testConductor.removeNode(third) + + // mark 'third' node as DOWN + node.down(thirdAddress) + testConductor.enter("down-third-node") + + assertMemberRing(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) + node.latestGossip.members.exists(_.address == thirdAddress) must be(false) + testConductor.enter("await-completion") + } + + runOn(second) { + node.join(node(first).address) + + assertMemberRing(nrOfMembers = 4) + testConductor.enter("all-up") + + val thirdAddress = node(third).address + testConductor.enter("down-third-node") + + assertMemberRing(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) + testConductor.enter("await-completion") + } + + runOn(third) { + node.join(node(first).address) + + assertMemberRing(nrOfMembers = 4) + testConductor.enter("all-up") + } + + runOn(fourth) { + node.join(node(first).address) + + assertMemberRing(nrOfMembers = 4) + testConductor.enter("all-up") + + val thirdAddress = node(third).address + testConductor.enter("down-third-node") + + assertMemberRing(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) + testConductor.enter("await-completion") + } + } + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala new file mode 100644 index 0000000000..52d37a4ed3 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -0,0 +1,108 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ +import akka.actor.Address + +object ClientDowningNodeThatIsUpMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + + val waitForConvergence = 20 seconds + + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + akka { + #loglevel = "DEBUG" + #stdout-loglevel = "DEBUG" + cluster { + gossip-frequency = 100 ms + leader-actions-frequency = 100 ms + periodic-tasks-initial-delay = 300 ms + auto-down = off + } + } + """))) +} + +class ClientDowningNodeThatIsUpMultiJvmNode1 extends ClientDowningNodeThatIsUpSpec +class ClientDowningNodeThatIsUpMultiJvmNode2 extends ClientDowningNodeThatIsUpSpec +class ClientDowningNodeThatIsUpMultiJvmNode3 extends ClientDowningNodeThatIsUpSpec +class ClientDowningNodeThatIsUpMultiJvmNode4 extends ClientDowningNodeThatIsUpSpec + +class ClientDowningNodeThatIsUpSpec extends MultiNodeSpec(ClientDowningNodeThatIsUpMultiJvmSpec) with ImplicitSender with BeforeAndAfter { + import ClientDowningNodeThatIsUpMultiJvmSpec._ + + override def initialParticipants = 4 + + def node = Cluster(system) + + def assertMemberRing(nrOfMembers: Int, canNotBePartOfRing: Seq[Address] = Seq.empty[Address]): Unit = { + awaitCond(node.latestGossip.members.size == nrOfMembers, waitForConvergence) + awaitCond(node.latestGossip.members.forall(_.status == MemberStatus.Up), waitForConvergence) + awaitCond(canNotBePartOfRing forall (address => !(node.latestGossip.members exists (_.address.port == address.port))), waitForConvergence) + } + + "Client of a 4 node cluster" must { + + "be able to DOWN a node that is UP (healthy and available)" taggedAs LongRunningTest in { + runOn(first) { + node.self + assertMemberRing(nrOfMembers = 4) + testConductor.enter("all-up") + + val thirdAddress = node(third).address + + // mark 'third' node as DOWN + testConductor.removeNode(third) + node.down(thirdAddress) + testConductor.enter("down-third-node") + + assertMemberRing(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) + node.latestGossip.members.exists(_.address == thirdAddress) must be(false) + testConductor.enter("await-completion") + } + + runOn(second) { + node.join(node(first).address) + + assertMemberRing(nrOfMembers = 4) + testConductor.enter("all-up") + + val thirdAddress = node(third).address + testConductor.enter("down-third-node") + + assertMemberRing(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) + testConductor.enter("await-completion") + } + + runOn(third) { + node.join(node(first).address) + + assertMemberRing(nrOfMembers = 4) + testConductor.enter("all-up") + } + + runOn(fourth) { + node.join(node(first).address) + + assertMemberRing(nrOfMembers = 4) + testConductor.enter("all-up") + + val thirdAddress = node(third).address + testConductor.enter("down-third-node") + + assertMemberRing(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) + testConductor.enter("await-completion") + } + } + } +} diff --git a/akka-cluster/src/test/scala/akka/cluster/ClientDowningSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClientDowningSpec.scala deleted file mode 100644 index 0e7b0ed330..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/ClientDowningSpec.scala +++ /dev/null @@ -1,145 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import akka.testkit._ -import akka.dispatch._ -import akka.actor._ -import akka.remote._ -import akka.util.duration._ - -import com.typesafe.config._ - -import java.net.InetSocketAddress - -class ClientDowningSpec extends ClusterSpec("akka.cluster.auto-down = off") with ImplicitSender { - val portPrefix = 1 - - var node1: Cluster = _ - var node2: Cluster = _ - var node3: Cluster = _ - var node4: Cluster = _ - - var system1: ActorSystemImpl = _ - var system2: ActorSystemImpl = _ - var system3: ActorSystemImpl = _ - var system4: ActorSystemImpl = _ - - try { - "Client of a 4 node cluster" must { - - // ======= NODE 1 ======== - system1 = ActorSystem("system1", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d550 - }""".format(portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote1 = system1.provider.asInstanceOf[RemoteActorRefProvider] - node1 = Cluster(system1) - val fd1 = node1.failureDetector - val address1 = node1.remoteAddress - - // ======= NODE 2 ======== - system2 = ActorSystem("system2", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d551 - cluster.node-to-join = "akka://system1@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote2 = system2.provider.asInstanceOf[RemoteActorRefProvider] - node2 = Cluster(system2) - val fd2 = node2.failureDetector - val address2 = node2.remoteAddress - - // ======= NODE 3 ======== - system3 = ActorSystem("system3", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d552 - cluster.node-to-join = "akka://system1@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote3 = system3.provider.asInstanceOf[RemoteActorRefProvider] - node3 = Cluster(system3) - val fd3 = node3.failureDetector - val address3 = node3.remoteAddress - - // ======= NODE 4 ======== - system4 = ActorSystem("system4", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d553 - cluster.node-to-join = "akka://system1@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote4 = system4.provider.asInstanceOf[RemoteActorRefProvider] - node4 = Cluster(system4) - val fd4 = node4.failureDetector - val address4 = node4.remoteAddress - - "be able to DOWN a node that is UP" taggedAs LongRunningTest in { - println("Give the system time to converge...") - awaitConvergence(node1 :: node2 :: node3 :: node4 :: Nil) - - node3.shutdown() - system3.shutdown() - - // client marks node3 as DOWN - node1.down(address3) - - println("Give the system time to converge...") - Thread.sleep(10.seconds.dilated.toMillis) - awaitConvergence(node1 :: node2 :: node4 :: Nil) - - node1.latestGossip.members.size must be(3) - node1.latestGossip.members.exists(_.address == address3) must be(false) - } - - "be able to DOWN a node that is UNREACHABLE" taggedAs LongRunningTest in { - node4.shutdown() - system4.shutdown() - - // clien marks node4 as DOWN - node2.down(address4) - - println("Give the system time to converge...") - Thread.sleep(10.seconds.dilated.toMillis) - awaitConvergence(node1 :: node2 :: Nil) - - node1.latestGossip.members.size must be(2) - node1.latestGossip.members.exists(_.address == address4) must be(false) - node1.latestGossip.members.exists(_.address == address3) must be(false) - } - } - } catch { - case e: Exception ⇒ - e.printStackTrace - fail(e.toString) - } - - override def atTermination() { - if (node1 ne null) node1.shutdown() - if (system1 ne null) system1.shutdown() - - if (node2 ne null) node2.shutdown() - if (system2 ne null) system2.shutdown() - - if (node3 ne null) node3.shutdown() - if (system3 ne null) system3.shutdown() - - if (node4 ne null) node4.shutdown() - if (system4 ne null) system4.shutdown() - } -} From 021d7fcfeb9901e48c29f200a06d6923f3ac8f2d Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 25 May 2012 17:42:12 +0200 Subject: [PATCH 164/538] #2136 - Making it possible to use balancing dispatcher for routees but not routers, changing the docs and updating the tests --- .../actor/ActorConfigurationVerificationSpec.scala | 10 +++++----- akka-actor/src/main/scala/akka/routing/Routing.scala | 2 +- akka-docs/java/dispatchers.rst | 2 +- akka-docs/java/routing.rst | 4 ++-- akka-docs/scala/dispatchers.rst | 4 ++-- akka-docs/scala/routing.rst | 4 ++-- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala index bedf51f083..5752bd7806 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala @@ -42,27 +42,27 @@ class ActorConfigurationVerificationSpec extends AkkaSpec(ActorConfigurationVeri "An Actor configured with a BalancingDispatcher" must { "fail verification with a ConfigurationException if also configured with a RoundRobinRouter" in { intercept[ConfigurationException] { - system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher").withRouter(RoundRobinRouter(2))) + system.actorOf(Props[TestActor].withRouter(RoundRobinRouter(2).withDispatcher("balancing-dispatcher"))) } } "fail verification with a ConfigurationException if also configured with a BroadcastRouter" in { intercept[ConfigurationException] { - system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher").withRouter(BroadcastRouter(2))) + system.actorOf(Props[TestActor].withRouter(BroadcastRouter(2).withDispatcher("balancing-dispatcher"))) } } "fail verification with a ConfigurationException if also configured with a RandomRouter" in { intercept[ConfigurationException] { - system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher").withRouter(RandomRouter(2))) + system.actorOf(Props[TestActor].withRouter(RandomRouter(2).withDispatcher("balancing-dispatcher"))) } } "fail verification with a ConfigurationException if also configured with a SmallestMailboxRouter" in { intercept[ConfigurationException] { - system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher").withRouter(SmallestMailboxRouter(2))) + system.actorOf(Props[TestActor].withRouter(SmallestMailboxRouter(2).withDispatcher("balancing-dispatcher"))) } } "fail verification with a ConfigurationException if also configured with a ScatterGatherFirstCompletedRouter" in { intercept[ConfigurationException] { - system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher").withRouter(ScatterGatherFirstCompletedRouter(nrOfInstances = 2, within = 2 seconds))) + system.actorOf(Props[TestActor].withRouter(ScatterGatherFirstCompletedRouter(nrOfInstances = 2, within = 2 seconds).withDispatcher("balancing-dispatcher"))) } } "not fail verification with a ConfigurationException also not configured with a Router" in { diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 94eed672f4..e60e46c247 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -30,7 +30,7 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup _path) { // verify that a BalancingDispatcher is not used with a Router - if (_system.dispatchers.isBalancingDispatcher(_props.dispatcher) && _props.routerConfig != NoRouter) + if (_props.routerConfig != NoRouter && _system.dispatchers.isBalancingDispatcher(_props.routerConfig.routerDispatcher)) throw new ConfigurationException( "Configuration for actor [" + _path.toString + "] is invalid - you can not use a 'BalancingDispatcher' together with any type of 'Router'") diff --git a/akka-docs/java/dispatchers.rst b/akka-docs/java/dispatchers.rst index f7e0db9c3c..9260fc11e5 100644 --- a/akka-docs/java/dispatchers.rst +++ b/akka-docs/java/dispatchers.rst @@ -85,7 +85,7 @@ There are 4 different types of message dispatchers: "thread-pool-executor" or the FQCN of an ``akka.dispatcher.ExecutorServiceConfigurator`` - - Note that you can **not** use a ``BalancingDispatcher`` together with any kind of ``Router``, trying to do so will make your actor fail verification. + - Note that you can **not** use a ``BalancingDispatcher`` as a **Router Dispatcher**. (You can however use it for the **Routees**) * CallingThreadDispatcher diff --git a/akka-docs/java/routing.rst b/akka-docs/java/routing.rst index a55b41c43d..38cf3a1a80 100644 --- a/akka-docs/java/routing.rst +++ b/akka-docs/java/routing.rst @@ -380,8 +380,8 @@ The dispatcher for created children of the router will be taken from makes sense to configure the :class:`BalancingDispatcher` if the precise routing is not so important (i.e. no consistent hashing or round-robin is required); this enables newly created routees to pick up work immediately by -stealing it from their siblings. Note that you can **not** use a ``BalancingDispatcher`` -together with any kind of ``Router``, trying to do so will make your actor fail verification. +stealing it from their siblings. Note that you can **not** use a ``BalancingDispatcher`` as a **Router Dispatcher**. +(You can however use it for the **Routees**) The “head” router, of course, cannot run on the same balancing dispatcher, because it does not process the same messages, hence this special actor does diff --git a/akka-docs/scala/dispatchers.rst b/akka-docs/scala/dispatchers.rst index 478136e428..100b882b5b 100644 --- a/akka-docs/scala/dispatchers.rst +++ b/akka-docs/scala/dispatchers.rst @@ -86,7 +86,7 @@ There are 4 different types of message dispatchers: "thread-pool-executor" or the FQCN of an ``akka.dispatcher.ExecutorServiceConfigurator`` - - Note that you can **not** use a ``BalancingDispatcher`` together with any kind of ``Router``, trying to do so will make your actor fail verification. + - Note that you can **not** use a ``BalancingDispatcher`` as a **Router Dispatcher**. (You can however use it for the **Routees**) * CallingThreadDispatcher @@ -114,7 +114,7 @@ And then using it: .. includecode:: ../scala/code/docs/dispatcher/DispatcherDocSpec.scala#defining-pinned-dispatcher -Note that ``thread-pool-executor`` configuration as per the above ``my-thread-pool-dispatcher`` exmaple is +Note that ``thread-pool-executor`` configuration as per the above ``my-thread-pool-dispatcher`` example is NOT applicable. This is because every actor will have its own thread pool when using ``PinnedDispatcher``, and that pool will have only one thread. diff --git a/akka-docs/scala/routing.rst b/akka-docs/scala/routing.rst index 4d434b2cab..25f582e085 100644 --- a/akka-docs/scala/routing.rst +++ b/akka-docs/scala/routing.rst @@ -380,8 +380,8 @@ The dispatcher for created children of the router will be taken from makes sense to configure the :class:`BalancingDispatcher` if the precise routing is not so important (i.e. no consistent hashing or round-robin is required); this enables newly created routees to pick up work immediately by -stealing it from their siblings. Note that you can **not** use a ``BalancingDispatcher`` -together with any kind of ``Router``, trying to do so will make your actor fail verification. +stealing it from their siblings. Note that you can **not** use a ``BalancingDispatcher`` as a **Router Dispatcher**. +(You can however use it for the **Routees**) .. note:: From ea99a1f315e716da9782ec86b10c96800fc75196 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 25 May 2012 17:43:03 +0200 Subject: [PATCH 165/538] Simplified config and removed old too-long timeout. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- ...ientDowningNodeThatIsUnreachableSpec.scala | 22 +++++++------------ .../ClientDowningNodeThatIsUpSpec.scala | 22 +++++++------------ 2 files changed, 16 insertions(+), 28 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index 8e02420050..95510a701d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -17,18 +17,12 @@ object ClientDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - val waitForConvergence = 20 seconds - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - akka { - #loglevel = "DEBUG" - #stdout-loglevel = "DEBUG" - cluster { - gossip-frequency = 100 ms - leader-actions-frequency = 100 ms - periodic-tasks-initial-delay = 300 ms - auto-down = off - } + akka.cluster { + gossip-frequency = 100 ms + leader-actions-frequency = 100 ms + periodic-tasks-initial-delay = 300 ms + auto-down = off } """))) } @@ -46,9 +40,9 @@ class ClientDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(ClientDowning def node = Cluster(system) def assertMemberRing(nrOfMembers: Int, canNotBePartOfRing: Seq[Address] = Seq.empty[Address]): Unit = { - awaitCond(node.latestGossip.members.size == nrOfMembers, waitForConvergence) - awaitCond(node.latestGossip.members.forall(_.status == MemberStatus.Up), waitForConvergence) - awaitCond(canNotBePartOfRing forall (address => !(node.latestGossip.members exists (_.address == address))), waitForConvergence) + awaitCond(node.latestGossip.members.size == nrOfMembers) + awaitCond(node.latestGossip.members.forall(_.status == MemberStatus.Up)) + awaitCond(canNotBePartOfRing forall (address => !(node.latestGossip.members exists (_.address == address)))) } "Client of a 4 node cluster" must { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index 52d37a4ed3..b92a45f2e4 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -17,18 +17,12 @@ object ClientDowningNodeThatIsUpMultiJvmSpec extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - val waitForConvergence = 20 seconds - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - akka { - #loglevel = "DEBUG" - #stdout-loglevel = "DEBUG" - cluster { - gossip-frequency = 100 ms - leader-actions-frequency = 100 ms - periodic-tasks-initial-delay = 300 ms - auto-down = off - } + akka.cluster { + gossip-frequency = 100 ms + leader-actions-frequency = 100 ms + periodic-tasks-initial-delay = 300 ms + auto-down = off } """))) } @@ -46,9 +40,9 @@ class ClientDowningNodeThatIsUpSpec extends MultiNodeSpec(ClientDowningNodeThatI def node = Cluster(system) def assertMemberRing(nrOfMembers: Int, canNotBePartOfRing: Seq[Address] = Seq.empty[Address]): Unit = { - awaitCond(node.latestGossip.members.size == nrOfMembers, waitForConvergence) - awaitCond(node.latestGossip.members.forall(_.status == MemberStatus.Up), waitForConvergence) - awaitCond(canNotBePartOfRing forall (address => !(node.latestGossip.members exists (_.address.port == address.port))), waitForConvergence) + awaitCond(node.latestGossip.members.size == nrOfMembers) + awaitCond(node.latestGossip.members.forall(_.status == MemberStatus.Up)) + awaitCond(canNotBePartOfRing forall (address => !(node.latestGossip.members exists (_.address.port == address.port)))) } "Client of a 4 node cluster" must { From 8b9a6d85e7144716884e37d5c8f98927f6e95061 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 25 May 2012 17:51:36 +0200 Subject: [PATCH 166/538] Removing dead FIXME --- akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala | 1 - 1 file changed, 1 deletion(-) diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala index 111460e3ac..0a87273d61 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -128,7 +128,6 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with current.routees.size must be(2) } - // FIXME this test violates the rule that you can not use a BalancingDispatcher with any kind of Router - now throws a ConfigurationException in verification process "resize when busy" ignore { val busy = new TestLatch(1) From 178c9145feeca4b282358749443309023f74656f Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 25 May 2012 17:55:25 +0200 Subject: [PATCH 167/538] document creator requirement, see #2131 --- akka-actor/src/main/scala/akka/actor/Props.scala | 6 ++++++ akka-actor/src/main/scala/akka/japi/JavaAPI.scala | 3 +++ akka-docs/general/supervision.rst | 2 ++ akka-docs/java/untyped-actors.rst | 6 ++++++ akka-docs/scala/actors.rst | 7 +++++++ 5 files changed, 24 insertions(+) diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index 292a437dab..f6552179c3 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -87,6 +87,8 @@ object Props { * Props is a ActorRef configuration object, that is thread safe and fully sharable. * Used when creating new actors through; ActorSystem.actorOf and ActorContext.actorOf. * + * In case of providing code which creates the actual Actor instance, that must not return the same instance multiple times. + * * Examples on Scala API: * {{{ * val props = Props[MyActor] @@ -144,6 +146,8 @@ case class Props( /** * Returns a new Props with the specified creator set. + * + * The creator must not return the same instance multiple times. * * Scala API. */ @@ -152,6 +156,8 @@ case class Props( /** * Returns a new Props with the specified creator set. * + * The creator must not return the same instance multiple times. + * * Java API. */ def withCreator(c: Creator[Actor]): Props = copy(creator = () ⇒ c.create) diff --git a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala index 5bd38ad52a..b0db141aee 100644 --- a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala +++ b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala @@ -38,6 +38,9 @@ trait Effect { * A constructor/factory, takes no parameters but creates a new value of type T every call. */ trait Creator[T] { + /** + * This method must return a different instance upon every call. + */ def create(): T } diff --git a/akka-docs/general/supervision.rst b/akka-docs/general/supervision.rst index fef3a585dd..c1bc684ce4 100644 --- a/akka-docs/general/supervision.rst +++ b/akka-docs/general/supervision.rst @@ -55,6 +55,8 @@ actors cannot be orphaned or attached to supervisors from the outside, which might otherwise catch them unawares. In addition, this yields a natural and clean shutdown procedure for (sub-trees of) actor applications. +.. _supervision-restart: + What Restarting Means --------------------- diff --git a/akka-docs/java/untyped-actors.rst b/akka-docs/java/untyped-actors.rst index d7c99199ed..9ee8d5ba47 100644 --- a/akka-docs/java/untyped-actors.rst +++ b/akka-docs/java/untyped-actors.rst @@ -115,6 +115,12 @@ Here is an example: This way of creating the Actor is also great for integrating with Dependency Injection (DI) frameworks like Guice or Spring. +.. warning:: + + You might be tempted at times to offer an ``UntypedActor`` factory which + always returns the same instance, e.g. by using a static field. This is not + supported, as it works against the meaning of an actor restart, which is + described here: :ref:`supervision-restart`. UntypedActor API ================ diff --git a/akka-docs/scala/actors.rst b/akka-docs/scala/actors.rst index 92c335120a..66b775aa0f 100644 --- a/akka-docs/scala/actors.rst +++ b/akka-docs/scala/actors.rst @@ -105,6 +105,13 @@ Here is an example: .. includecode:: code/docs/actor/ActorDocSpec.scala#creating-constructor +.. warning:: + + You might be tempted at times to offer an ``Actor`` factory which always + returns the same instance, e.g. by using a ``lazy val`` or an + ``object ... extends Actor``. This is not supported, as it works against the + meaning of an actor restart, which is described here: + :ref:`supervision-restart`. Props ----- From e748da6f492dcc750c51a752220d388fee5f1e36 Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 25 May 2012 18:11:47 +0200 Subject: [PATCH 168/538] improve one word, see #2131 --- akka-docs/java/untyped-actors.rst | 2 +- akka-docs/scala/actors.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-docs/java/untyped-actors.rst b/akka-docs/java/untyped-actors.rst index 9ee8d5ba47..7df286d7f7 100644 --- a/akka-docs/java/untyped-actors.rst +++ b/akka-docs/java/untyped-actors.rst @@ -119,7 +119,7 @@ This way of creating the Actor is also great for integrating with Dependency Inj You might be tempted at times to offer an ``UntypedActor`` factory which always returns the same instance, e.g. by using a static field. This is not - supported, as it works against the meaning of an actor restart, which is + supported, as it goes against the meaning of an actor restart, which is described here: :ref:`supervision-restart`. UntypedActor API diff --git a/akka-docs/scala/actors.rst b/akka-docs/scala/actors.rst index 66b775aa0f..291d06e567 100644 --- a/akka-docs/scala/actors.rst +++ b/akka-docs/scala/actors.rst @@ -109,7 +109,7 @@ Here is an example: You might be tempted at times to offer an ``Actor`` factory which always returns the same instance, e.g. by using a ``lazy val`` or an - ``object ... extends Actor``. This is not supported, as it works against the + ``object ... extends Actor``. This is not supported, as it goes against the meaning of an actor restart, which is described here: :ref:`supervision-restart`. From 57313cc9e0cbbc0bde9a2c7f33bc3c21b46a8eb7 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 25 May 2012 13:03:57 +0200 Subject: [PATCH 169/538] Move LeaderElectionSpec to multi-jvm. See #2113 --- .../scala/akka/cluster/LeaderElectionSpec.scala | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename akka-cluster/src/{test => multi-jvm}/scala/akka/cluster/LeaderElectionSpec.scala (100%) diff --git a/akka-cluster/src/test/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala similarity index 100% rename from akka-cluster/src/test/scala/akka/cluster/LeaderElectionSpec.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala From 597271052f9c650ec8c7df5cc8318ccfd0be4018 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 25 May 2012 14:48:00 +0200 Subject: [PATCH 170/538] Port LeaderElectionSpec to MultiNodeSpec. See #2113 --- .../akka/cluster/LeaderElectionSpec.scala | 212 ++++++++---------- .../akka/cluster/MultiNodeClusterSpec.scala | 20 +- 2 files changed, 110 insertions(+), 122 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index c262fad8c3..56cfbee75d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -4,128 +4,100 @@ package akka.cluster +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.dispatch._ -import akka.actor._ -import akka.remote._ -import akka.util.duration._ -import com.typesafe.config._ +object LeaderElectionMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val forth = role("forth") -import java.net.InetSocketAddress + commonConfig(debugConfig(on = false). + withFallback(ConfigFactory.parseString(""" + akka.cluster.auto-down = off + """)). + withFallback(MultiNodeClusterSpec.clusterConfig)) + +} + +class LeaderElectionMultiJvmNode1 extends LeaderElectionSpec +class LeaderElectionMultiJvmNode2 extends LeaderElectionSpec +class LeaderElectionMultiJvmNode3 extends LeaderElectionSpec +class LeaderElectionMultiJvmNode4 extends LeaderElectionSpec + +abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSpec) with MultiNodeClusterSpec { + import LeaderElectionMultiJvmSpec._ + + override def initialParticipants = 4 + + val firstAddress = node(first).address + val myAddress = node(mySelf).address + + // sorted in the order used by the cluster + val roles = Seq(first, second, third, forth).sorted + + "A cluster of three nodes" must { + + "be able to 'elect' a single leader" in { + // make sure that the first cluster is started before other join + runOn(first) { + cluster + } + testConductor.enter("first-started") + + cluster.join(firstAddress) + awaitUpConvergence(numberOfMembers = 4) + cluster.isLeader must be(mySelf == roles.head) + testConductor.enter("after") + } + + def shutdownLeaderAndVerifyNewLeader(alreadyShutdown: Int): Unit = { + val currentRoles = roles.drop(alreadyShutdown) + currentRoles.size must be >= (2) + + runOn(currentRoles.head) { + cluster.shutdown() + testConductor.enter("after-shutdown") + testConductor.enter("after-down") + } + + // runOn previously shutdown cluster nodes + if ((roles diff currentRoles).contains(mySelf)) { + testConductor.enter("after-shutdown") + testConductor.enter("after-down") + } + + // runOn remaining cluster nodes + if (currentRoles.tail.contains(mySelf)) { + + testConductor.enter("after-shutdown") + + runOn(currentRoles.last) { + // user marks the shutdown leader as DOWN + val leaderAddress = node(currentRoles.head).address + cluster.down(leaderAddress) + } + + testConductor.enter("after-down") + + awaitUpConvergence(currentRoles.size - 1) + val nextExpectedLeader = currentRoles.tail.head + cluster.isLeader must be(mySelf == nextExpectedLeader) + } + + testConductor.enter("after") + } + + "be able to 're-elect' a single leader after leader has left" in { + shutdownLeaderAndVerifyNewLeader(alreadyShutdown = 0) + } + + "be able to 're-elect' a single leader after leader has left (again)" in { + shutdownLeaderAndVerifyNewLeader(alreadyShutdown = 1) + } + } -class LeaderElectionSpec extends ClusterSpec with ImplicitSender { - val portPrefix = 5 - - var node1: Cluster = _ - var node2: Cluster = _ - var node3: Cluster = _ - - var system1: ActorSystemImpl = _ - var system2: ActorSystemImpl = _ - var system3: ActorSystemImpl = _ - - try { - "A cluster of three nodes" must { - - // ======= NODE 1 ======== - system1 = ActorSystem("system1", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d550 - }""".format(portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - node1 = Cluster(system1) - val address1 = node1.remoteAddress - - // ======= NODE 2 ======== - system2 = ActorSystem("system2", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d551 - cluster.node-to-join = "akka://system1@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - node2 = Cluster(system2) - val address2 = node2.remoteAddress - - // ======= NODE 3 ======== - system3 = ActorSystem("system3", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d552 - cluster.node-to-join = "akka://system1@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - node3 = Cluster(system3) - val address3 = node3.remoteAddress - - "be able to 'elect' a single leader" taggedAs LongRunningTest in { - - println("Give the system time to converge...") - awaitConvergence(node1 :: node2 :: node3 :: Nil) - - // check leader - node1.isLeader must be(true) - node2.isLeader must be(false) - node3.isLeader must be(false) - } - - "be able to 're-elect' a single leader after leader has left" taggedAs LongRunningTest in { - - // shut down system1 - the leader - node1.shutdown() - system1.shutdown() - - // user marks node1 as DOWN - node2.down(address1) - - println("Give the system time to converge...") - Thread.sleep(10.seconds.dilated.toMillis) - awaitConvergence(node2 :: node3 :: Nil) - - // check leader - node2.isLeader must be(true) - node3.isLeader must be(false) - } - - "be able to 're-elect' a single leader after leader has left (again, leaving a single node)" taggedAs LongRunningTest in { - - // shut down system1 - the leader - node2.shutdown() - system2.shutdown() - - // user marks node2 as DOWN - node3.down(address2) - - println("Give the system time to converge...") - Thread.sleep(10.seconds.dilated.toMillis) - awaitConvergence(node3 :: Nil) - - // check leader - node3.isLeader must be(true) - } - } - } catch { - case e: Exception ⇒ - e.printStackTrace - fail(e.toString) - } - - override def atTermination() { - if (node1 ne null) node1.shutdown() - if (system1 ne null) system1.shutdown() - - if (node2 ne null) node2.shutdown() - if (system2 ne null) system2.shutdown() - - if (node3 ne null) node3.shutdown() - if (system3 ne null) system3.shutdown() - } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 873d819dbb..48f1d0b520 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -45,8 +45,7 @@ trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ */ def assertLeader(nodesInCluster: RoleName*): Unit = if (nodesInCluster.contains(mySelf)) { nodesInCluster.length must not be (0) - import Member.addressOrdering - val expectedLeader = nodesInCluster.map(role ⇒ (role, node(role).address)).sortBy(_._2).head._1 + val expectedLeader = roleOfLeader(nodesInCluster) cluster.isLeader must be(ifNode(expectedLeader)(true)(false)) } @@ -60,4 +59,21 @@ trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ awaitCond(cluster.convergence.isDefined, 10 seconds) } + def roleOfLeader(nodesInCluster: Seq[RoleName]): RoleName = { + nodesInCluster.length must not be (0) + nodesInCluster.sorted.head + } + + /** + * Sort the roles in the order used by the cluster. + */ + implicit val clusterOrdering: Ordering[RoleName] = new Ordering[RoleName] { + import Member.addressOrdering + def compare(x: RoleName, y: RoleName) = addressOrdering.compare(node(x).address, node(y).address) + } + + def roleName(address: Address): Option[RoleName] = { + testConductor.getNodes.await.find(node(_).address == address) + } + } \ No newline at end of file From 0a09cfc893603d903269818c7f45c69a2e18a0f3 Mon Sep 17 00:00:00 2001 From: Roland Date: Sun, 27 May 2012 14:18:35 +0200 Subject: [PATCH 171/538] kick off pattern collection --- .../src/main/scala/akka/actor/Props.scala | 2 +- .../java/code/docs/pattern/JavaTemplate.java | 18 ++++++++++ akka-docs/java/howto.rst | 33 +++++++++++++++++++ akka-docs/java/index.rst | 1 + .../code/docs/pattern/ScalaTemplate.scala | 16 +++++++++ akka-docs/scala/howto.rst | 33 +++++++++++++++++++ akka-docs/scala/index.rst | 1 + 7 files changed, 103 insertions(+), 1 deletion(-) create mode 100644 akka-docs/java/code/docs/pattern/JavaTemplate.java create mode 100644 akka-docs/java/howto.rst create mode 100644 akka-docs/scala/code/docs/pattern/ScalaTemplate.scala create mode 100644 akka-docs/scala/howto.rst diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index f6552179c3..dfd6200fd3 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -146,7 +146,7 @@ case class Props( /** * Returns a new Props with the specified creator set. - * + * * The creator must not return the same instance multiple times. * * Scala API. diff --git a/akka-docs/java/code/docs/pattern/JavaTemplate.java b/akka-docs/java/code/docs/pattern/JavaTemplate.java new file mode 100644 index 0000000000..7e6fd175fb --- /dev/null +++ b/akka-docs/java/code/docs/pattern/JavaTemplate.java @@ -0,0 +1,18 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package docs.pattern; + +// this part will not appear in the docs + +//#all-of-it +class JavaTemplate { + public JavaTemplate() { + System.out.println("Hello, Template!"); + } + //#uninteresting-stuff + // don’t show this plumbimg + //#uninteresting-stuff +} +//#all-of-it diff --git a/akka-docs/java/howto.rst b/akka-docs/java/howto.rst new file mode 100644 index 0000000000..333fb9e498 --- /dev/null +++ b/akka-docs/java/howto.rst @@ -0,0 +1,33 @@ + +.. _howto-java: + +###################### +HowTo: Common Patterns +###################### + +This section lists common actor patterns which have been found to be useful, +elegant or instructive. Anything is welcome, example topics being message +routing strategies, supervision patterns, restart handling, etc. As a special +bonus, additions to this section are marked with the contributor’s name, and it +would be nice if every Akka user who finds a recurring pattern in his or her +code could share it for the profit of all. Where applicable it might also make +sense to add to the ``akka.pattern`` package for creating an `OTP-like library +`_. + +Template Pattern +================ + +*Contributed by: N. N.* + +This is an especially nice pattern, since it does even come with some empty example code: + +.. includecode:: code/docs/pattern/JavaTemplate.java + :include: all-of-it + :exclude: uninteresting-stuff + +.. note:: + + Spread the word: this is the easiest way to get famous! + +Please keep this pattern at the end of this file. + diff --git a/akka-docs/java/index.rst b/akka-docs/java/index.rst index 981e07f869..4022092dba 100644 --- a/akka-docs/java/index.rst +++ b/akka-docs/java/index.rst @@ -24,3 +24,4 @@ Java API extending-akka zeromq microkernel + howto diff --git a/akka-docs/scala/code/docs/pattern/ScalaTemplate.scala b/akka-docs/scala/code/docs/pattern/ScalaTemplate.scala new file mode 100644 index 0000000000..beceae17b7 --- /dev/null +++ b/akka-docs/scala/code/docs/pattern/ScalaTemplate.scala @@ -0,0 +1,16 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package docs.pattern + +// this part will not appear in the docs + +//#all-of-it +class ScalaTemplate { + println("Hello, Template!") + //#uninteresting-stuff + // don’t show this plumbimg + //#uninteresting-stuff +} +//#all-of-it diff --git a/akka-docs/scala/howto.rst b/akka-docs/scala/howto.rst new file mode 100644 index 0000000000..9436480327 --- /dev/null +++ b/akka-docs/scala/howto.rst @@ -0,0 +1,33 @@ + +.. _howto-scala: + +###################### +HowTo: Common Patterns +###################### + +This section lists common actor patterns which have been found to be useful, +elegant or instructive. Anything is welcome, example topics being message +routing strategies, supervision patterns, restart handling, etc. As a special +bonus, additions to this section are marked with the contributor’s name, and it +would be nice if every Akka user who finds a recurring pattern in his or her +code could share it for the profit of all. Where applicable it might also make +sense to add to the ``akka.pattern`` package for creating an `OTP-like library +`_. + +Template Pattern +================ + +*Contributed by: N. N.* + +This is an especially nice pattern, since it does even come with some empty example code: + +.. includecode:: code/docs/pattern/ScalaTemplate.scala + :include: all-of-it + :exclude: uninteresting-stuff + +.. note:: + + Spread the word: this is the easiest way to get famous! + +Please keep this pattern at the end of this file. + diff --git a/akka-docs/scala/index.rst b/akka-docs/scala/index.rst index fc1b619e26..ddceb9fcf8 100644 --- a/akka-docs/scala/index.rst +++ b/akka-docs/scala/index.rst @@ -28,3 +28,4 @@ Scala API zeromq microkernel camel + howto From a44bd10fc33e1ce4284c9d0cec79a7131466d71c Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Sun, 27 May 2012 19:15:31 +0200 Subject: [PATCH 172/538] Tag as LongRunningTest. See 2113 --- .../scala/akka/cluster/LeaderElectionSpec.scala | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index 56cfbee75d..007ab941dc 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -33,15 +33,14 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp override def initialParticipants = 4 - val firstAddress = node(first).address - val myAddress = node(mySelf).address + lazy val firstAddress = node(first).address // sorted in the order used by the cluster - val roles = Seq(first, second, third, forth).sorted + lazy val roles = Seq(first, second, third, forth).sorted - "A cluster of three nodes" must { + "A cluster of four nodes" must { - "be able to 'elect' a single leader" in { + "be able to 'elect' a single leader" taggedAs LongRunningTest in { // make sure that the first cluster is started before other join runOn(first) { cluster @@ -91,11 +90,11 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp testConductor.enter("after") } - "be able to 're-elect' a single leader after leader has left" in { + "be able to 're-elect' a single leader after leader has left" taggedAs LongRunningTest in { shutdownLeaderAndVerifyNewLeader(alreadyShutdown = 0) } - "be able to 're-elect' a single leader after leader has left (again)" in { + "be able to 're-elect' a single leader after leader has left (again)" taggedAs LongRunningTest in { shutdownLeaderAndVerifyNewLeader(alreadyShutdown = 1) } } From fcaa4751b39bf3d8bfb0d3c4caa7ca81b653e188 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Sun, 27 May 2012 19:20:30 +0200 Subject: [PATCH 173/538] Pass include/exclude tags arguments to multi-jvm tests, see #2139 --- project/AkkaBuild.scala | 53 +++++++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 13c90ed61e..2ffe034c3c 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -85,7 +85,7 @@ object AkkaBuild extends Build { extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src => (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq }, - scalatestOptions in MultiJvm := Seq("-r", "org.scalatest.akka.QuietReporter"), + scalatestOptions in MultiJvm := defaultMultiJvmScalatestOptions, jvmOptions in MultiJvm := defaultMultiJvmOptions, test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } ) @@ -101,7 +101,7 @@ object AkkaBuild extends Build { extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src => (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq }, - scalatestOptions in MultiJvm := Seq("-r", "org.scalatest.akka.QuietReporter"), + scalatestOptions in MultiJvm := defaultMultiJvmScalatestOptions, jvmOptions in MultiJvm := defaultMultiJvmOptions, test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } ) @@ -118,7 +118,7 @@ object AkkaBuild extends Build { extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src => (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq }, - scalatestOptions in MultiJvm := Seq("-r", "org.scalatest.akka.QuietReporter"), + scalatestOptions in MultiJvm := defaultMultiJvmScalatestOptions, jvmOptions in MultiJvm := defaultMultiJvmOptions, test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } ) @@ -298,7 +298,7 @@ object AkkaBuild extends Build { val defaultExcludedTags = Seq("timing", "long-running") - val defaultMultiJvmOptions: Seq[String] = { + lazy val defaultMultiJvmOptions: Seq[String] = { (System.getProperty("akka.test.timefactor") match { case null => Nil case x => List("-Dakka.test.timefactor=" + x) @@ -306,6 +306,31 @@ object AkkaBuild extends Build { (if (getBoolean("sbt.log.noformat")) List("-Dakka.test.nocolor=true") else Nil) } + // for excluding tests by name (or use system property: -Dakka.test.names.exclude=TimingSpec) + lazy val defaultExcludeTestNames: Seq[String] = { + val exclude = System.getProperty("akka.test.names.exclude", "") + if (exclude.isEmpty) Seq.empty else exclude.split(",").toSeq + } + + // for excluding tests by tag (or use system property: -Dakka.test.tags.exclude=timing) + lazy val defaultExcludeTestTags: Seq[String] = { + val exclude = System.getProperty("akka.test.tags.exclude", "") + if (exclude.isEmpty) defaultExcludedTags else exclude.split(",").toSeq + } + + // for including tests by tag (or use system property: -Dakka.test.tags.include=timing) + lazy val defaultIncludeTestTags: Seq[String] = { + val include = System.getProperty("akka.test.tags.include", "") + if (include.isEmpty) Seq.empty else include.split(",").toSeq + } + + lazy val defaultMultiJvmScalatestOptions: Seq[String] = { + val excludeTags = (defaultExcludeTestTags.toSet -- defaultIncludeTestTags.toSet).toSeq + Seq("-r", "org.scalatest.akka.QuietReporter") ++ + (if (excludeTags.isEmpty) Seq.empty else Seq("-l", excludeTags.mkString(" "))) ++ + (if (defaultIncludeTestTags.isEmpty) Seq.empty else Seq("-n", defaultIncludeTestTags.mkString(" "))) + } + lazy val defaultSettings = baseSettings ++ formatSettings ++ Seq( resolvers += "Typesafe Repo" at "http://repo.typesafe.com/typesafe/releases/", @@ -318,23 +343,9 @@ object AkkaBuild extends Build { parallelExecution in Test := System.getProperty("akka.parallelExecution", "false").toBoolean, - // for excluding tests by name (or use system property: -Dakka.test.names.exclude=TimingSpec) - excludeTestNames := { - val exclude = System.getProperty("akka.test.names.exclude", "") - if (exclude.isEmpty) Seq.empty else exclude.split(",").toSeq - }, - - // for excluding tests by tag (or use system property: -Dakka.test.tags.exclude=timing) - excludeTestTags := { - val exclude = System.getProperty("akka.test.tags.exclude", "") - if (exclude.isEmpty) defaultExcludedTags else exclude.split(",").toSeq - }, - - // for including tests by tag (or use system property: -Dakka.test.tags.include=timing) - includeTestTags := { - val include = System.getProperty("akka.test.tags.include", "") - if (include.isEmpty) Seq.empty else include.split(",").toSeq - }, + excludeTestNames := defaultExcludeTestNames, + excludeTestTags := defaultExcludeTestTags, + includeTestTags := defaultIncludeTestTags, // add filters for tests excluded by name testOptions in Test <++= excludeTestNames map { _.map(exclude => Tests.Filter(test => !test.contains(exclude))) }, From 6993064cdeb3c2e8863e0e07a6e876a7bab5f701 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Sun, 27 May 2012 19:21:38 +0200 Subject: [PATCH 174/538] Revert "Commented out the cluster tests because it's not possible to exclude them right now" This reverts commit 2abe5308dabf452885eaad2c1b63c3ce34774dfe. --- .../akka/cluster/JoinTwoClustersSpec.scala | 180 +++++++++--------- .../MembershipChangeListenerSpec.scala | 154 +++++++-------- .../akka/cluster/NodeMembershipSpec.scala | 140 +++++++------- .../scala/akka/cluster/NodeStartupSpec.scala | 148 +++++++------- 4 files changed, 311 insertions(+), 311 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index 87129a7a7c..4bbe703405 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -1,90 +1,90 @@ -///** -// * Copyright (C) 2009-2012 Typesafe Inc. -// */ -// -//package akka.cluster -// -//import org.scalatest.BeforeAndAfter -//import com.typesafe.config.ConfigFactory -//import akka.remote.testkit.MultiNodeConfig -//import akka.remote.testkit.MultiNodeSpec -//import akka.testkit._ -// -//object JoinTwoClustersMultiJvmSpec extends MultiNodeConfig { -// val a1 = role("a1") -// val a2 = role("a2") -// val b1 = role("b1") -// val b2 = role("b2") -// val c1 = role("c1") -// val c2 = role("c2") -// -// commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) -// -//} -// -//class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec -//class JoinTwoClustersMultiJvmNode2 extends JoinTwoClustersSpec -//class JoinTwoClustersMultiJvmNode3 extends JoinTwoClustersSpec -//class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec -//class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec -//class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec -// -//abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { -// import JoinTwoClustersMultiJvmSpec._ -// -// override def initialParticipants = 6 -// -// after { -// testConductor.enter("after") -// } -// -// val a1Address = node(a1).address -// val b1Address = node(b1).address -// val c1Address = node(c1).address -// -// "Three different clusters (A, B and C)" must { -// -// "be able to 'elect' a single leader after joining (A -> B)" taggedAs LongRunningTest in { -// -// runOn(a1, a2) { -// cluster.join(a1Address) -// } -// runOn(b1, b2) { -// cluster.join(b1Address) -// } -// runOn(c1, c2) { -// cluster.join(c1Address) -// } -// -// awaitUpConvergence(numberOfMembers = 2) -// -// assertLeader(a1, a2) -// assertLeader(b1, b2) -// assertLeader(c1, c2) -// -// runOn(b2) { -// cluster.join(a1Address) -// } -// -// runOn(a1, a2, b1, b2) { -// awaitUpConvergence(numberOfMembers = 4) -// } -// -// assertLeader(a1, a2, b1, b2) -// assertLeader(c1, c2) -// -// } -// -// "be able to 'elect' a single leader after joining (C -> A + B)" taggedAs LongRunningTest in { -// -// runOn(b2) { -// cluster.join(c1Address) -// } -// -// awaitUpConvergence(numberOfMembers = 6) -// -// assertLeader(a1, a2, b1, b2, c1, c2) -// } -// } -// -//} +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import org.scalatest.BeforeAndAfter +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ + +object JoinTwoClustersMultiJvmSpec extends MultiNodeConfig { + val a1 = role("a1") + val a2 = role("a2") + val b1 = role("b1") + val b2 = role("b2") + val c1 = role("c1") + val c2 = role("c2") + + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) + +} + +class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec +class JoinTwoClustersMultiJvmNode2 extends JoinTwoClustersSpec +class JoinTwoClustersMultiJvmNode3 extends JoinTwoClustersSpec +class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec +class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec +class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec + +abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + import JoinTwoClustersMultiJvmSpec._ + + override def initialParticipants = 6 + + after { + testConductor.enter("after") + } + + val a1Address = node(a1).address + val b1Address = node(b1).address + val c1Address = node(c1).address + + "Three different clusters (A, B and C)" must { + + "be able to 'elect' a single leader after joining (A -> B)" taggedAs LongRunningTest in { + + runOn(a1, a2) { + cluster.join(a1Address) + } + runOn(b1, b2) { + cluster.join(b1Address) + } + runOn(c1, c2) { + cluster.join(c1Address) + } + + awaitUpConvergence(numberOfMembers = 2) + + assertLeader(a1, a2) + assertLeader(b1, b2) + assertLeader(c1, c2) + + runOn(b2) { + cluster.join(a1Address) + } + + runOn(a1, a2, b1, b2) { + awaitUpConvergence(numberOfMembers = 4) + } + + assertLeader(a1, a2, b1, b2) + assertLeader(c1, c2) + + } + + "be able to 'elect' a single leader after joining (C -> A + B)" taggedAs LongRunningTest in { + + runOn(b2) { + cluster.join(c1Address) + } + + awaitUpConvergence(numberOfMembers = 6) + + assertLeader(a1, a2, b1, b2, c1, c2) + } + } + +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index 6bb0f556d5..64019c102c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -1,77 +1,77 @@ -///** -// * Copyright (C) 2009-2012 Typesafe Inc. -// */ -//package akka.cluster -// -//import scala.collection.immutable.SortedSet -//import org.scalatest.BeforeAndAfter -//import com.typesafe.config.ConfigFactory -//import akka.remote.testkit.MultiNodeConfig -//import akka.remote.testkit.MultiNodeSpec -//import akka.testkit._ -// -//object MembershipChangeListenerMultiJvmSpec extends MultiNodeConfig { -// val first = role("first") -// val second = role("second") -// val third = role("third") -// -// commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) -// -//} -// -//class MembershipChangeListenerMultiJvmNode1 extends MembershipChangeListenerSpec -//class MembershipChangeListenerMultiJvmNode2 extends MembershipChangeListenerSpec -//class MembershipChangeListenerMultiJvmNode3 extends MembershipChangeListenerSpec -// -//abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChangeListenerMultiJvmSpec) -// with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { -// import MembershipChangeListenerMultiJvmSpec._ -// -// override def initialParticipants = 3 -// -// after { -// testConductor.enter("after") -// } -// -// "A set of connected cluster systems" must { -// -// val firstAddress = node(first).address -// val secondAddress = node(second).address -// -// "(when two systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { -// -// runOn(first, second) { -// cluster.join(firstAddress) -// val latch = TestLatch() -// cluster.registerListener(new MembershipChangeListener { -// def notify(members: SortedSet[Member]) { -// if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) -// latch.countDown() -// } -// }) -// latch.await -// cluster.convergence.isDefined must be(true) -// } -// -// } -// -// "(when three systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { -// -// runOn(third) { -// cluster.join(firstAddress) -// } -// -// val latch = TestLatch() -// cluster.registerListener(new MembershipChangeListener { -// def notify(members: SortedSet[Member]) { -// if (members.size == 3 && members.forall(_.status == MemberStatus.Up)) -// latch.countDown() -// } -// }) -// latch.await -// cluster.convergence.isDefined must be(true) -// -// } -// } -// -//} +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import scala.collection.immutable.SortedSet +import org.scalatest.BeforeAndAfter +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ + +object MembershipChangeListenerMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) + +} + +class MembershipChangeListenerMultiJvmNode1 extends MembershipChangeListenerSpec +class MembershipChangeListenerMultiJvmNode2 extends MembershipChangeListenerSpec +class MembershipChangeListenerMultiJvmNode3 extends MembershipChangeListenerSpec + +abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChangeListenerMultiJvmSpec) + with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + import MembershipChangeListenerMultiJvmSpec._ + + override def initialParticipants = 3 + + after { + testConductor.enter("after") + } + + "A set of connected cluster systems" must { + + val firstAddress = node(first).address + val secondAddress = node(second).address + + "(when two systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { + + runOn(first, second) { + cluster.join(firstAddress) + val latch = TestLatch() + cluster.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) + latch.countDown() + } + }) + latch.await + cluster.convergence.isDefined must be(true) + } + + } + + "(when three systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { + + runOn(third) { + cluster.join(firstAddress) + } + + val latch = TestLatch() + cluster.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + if (members.size == 3 && members.forall(_.status == MemberStatus.Up)) + latch.countDown() + } + }) + latch.await + cluster.convergence.isDefined must be(true) + + } + } + +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index 21defd1d97..f96265ac5a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -1,70 +1,70 @@ -///** -// * Copyright (C) 2009-2012 Typesafe Inc. -// */ -//package akka.cluster -// -//import com.typesafe.config.ConfigFactory -//import org.scalatest.BeforeAndAfter -//import akka.remote.testkit.MultiNodeConfig -//import akka.remote.testkit.MultiNodeSpec -//import akka.testkit._ -// -//object NodeMembershipMultiJvmSpec extends MultiNodeConfig { -// val first = role("first") -// val second = role("second") -// val third = role("third") -// -// commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) -// -//} -// -//class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec -//class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec -//class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec -// -//abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { -// import NodeMembershipMultiJvmSpec._ -// -// override def initialParticipants = 3 -// -// after { -// testConductor.enter("after") -// } -// -// val firstAddress = node(first).address -// val secondAddress = node(second).address -// val thirdAddress = node(third).address -// -// "A set of connected cluster systems" must { -// -// "(when two systems) start gossiping to each other so that both systems gets the same gossip info" taggedAs LongRunningTest in { -// -// runOn(first, second) { -// cluster.join(firstAddress) -// awaitCond(cluster.latestGossip.members.size == 2) -// assertMembers(cluster.latestGossip.members, firstAddress, secondAddress) -// awaitCond { -// cluster.latestGossip.members.forall(_.status == MemberStatus.Up) -// } -// awaitCond(cluster.convergence.isDefined) -// } -// -// } -// -// "(when three systems) start gossiping to each other so that both systems gets the same gossip info" taggedAs LongRunningTest in { -// -// runOn(third) { -// cluster.join(firstAddress) -// } -// -// awaitCond(cluster.latestGossip.members.size == 3) -// assertMembers(cluster.latestGossip.members, firstAddress, secondAddress, thirdAddress) -// awaitCond { -// cluster.latestGossip.members.forall(_.status == MemberStatus.Up) -// } -// awaitCond(cluster.convergence.isDefined) -// -// } -// } -// -//} +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ + +object NodeMembershipMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) + +} + +class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec +class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec +class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec + +abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + import NodeMembershipMultiJvmSpec._ + + override def initialParticipants = 3 + + after { + testConductor.enter("after") + } + + val firstAddress = node(first).address + val secondAddress = node(second).address + val thirdAddress = node(third).address + + "A set of connected cluster systems" must { + + "(when two systems) start gossiping to each other so that both systems gets the same gossip info" taggedAs LongRunningTest in { + + runOn(first, second) { + cluster.join(firstAddress) + awaitCond(cluster.latestGossip.members.size == 2) + assertMembers(cluster.latestGossip.members, firstAddress, secondAddress) + awaitCond { + cluster.latestGossip.members.forall(_.status == MemberStatus.Up) + } + awaitCond(cluster.convergence.isDefined) + } + + } + + "(when three systems) start gossiping to each other so that both systems gets the same gossip info" taggedAs LongRunningTest in { + + runOn(third) { + cluster.join(firstAddress) + } + + awaitCond(cluster.latestGossip.members.size == 3) + assertMembers(cluster.latestGossip.members, firstAddress, secondAddress, thirdAddress) + awaitCond { + cluster.latestGossip.members.forall(_.status == MemberStatus.Up) + } + awaitCond(cluster.convergence.isDefined) + + } + } + +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index ff4c06215d..65cd7891a9 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -1,74 +1,74 @@ -///** -// * Copyright (C) 2009-2012 Typesafe Inc. -// */ -//package akka.cluster -// -//import com.typesafe.config.ConfigFactory -//import org.scalatest.BeforeAndAfter -//import akka.remote.testkit.MultiNodeConfig -//import akka.remote.testkit.MultiNodeSpec -//import akka.testkit._ -// -//object NodeStartupMultiJvmSpec extends MultiNodeConfig { -// val first = role("first") -// val second = role("second") -// -// commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) -// -//} -// -//class NodeStartupMultiJvmNode1 extends NodeStartupSpec -//class NodeStartupMultiJvmNode2 extends NodeStartupSpec -// -//abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { -// import NodeStartupMultiJvmSpec._ -// -// override def initialParticipants = 2 -// -// after { -// testConductor.enter("after") -// } -// -// val firstAddress = node(first).address -// val secondAddress = node(second).address -// -// "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { -// -// "be a singleton cluster when started up" taggedAs LongRunningTest in { -// runOn(first) { -// awaitCond(cluster.isSingletonCluster) -// // FIXME #2117 singletonCluster should reach convergence -// //awaitCond(cluster.convergence.isDefined) -// } -// } -// -// "be in 'Joining' phase when started up" taggedAs LongRunningTest in { -// runOn(first) { -// val members = cluster.latestGossip.members -// members.size must be(1) -// -// val joiningMember = members find (_.address == firstAddress) -// joiningMember must not be (None) -// joiningMember.get.status must be(MemberStatus.Joining) -// } -// } -// } -// -// "A second cluster node" must { -// "join the other node cluster when sending a Join command" taggedAs LongRunningTest in { -// -// runOn(second) { -// cluster.join(firstAddress) -// } -// -// awaitCond { -// cluster.latestGossip.members.exists { member ⇒ -// member.address == secondAddress && member.status == MemberStatus.Up -// } -// } -// cluster.latestGossip.members.size must be(2) -// awaitCond(cluster.convergence.isDefined) -// } -// } -// -//} +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ + +object NodeStartupMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) + +} + +class NodeStartupMultiJvmNode1 extends NodeStartupSpec +class NodeStartupMultiJvmNode2 extends NodeStartupSpec + +abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + import NodeStartupMultiJvmSpec._ + + override def initialParticipants = 2 + + after { + testConductor.enter("after") + } + + val firstAddress = node(first).address + val secondAddress = node(second).address + + "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { + + "be a singleton cluster when started up" taggedAs LongRunningTest in { + runOn(first) { + awaitCond(cluster.isSingletonCluster) + // FIXME #2117 singletonCluster should reach convergence + //awaitCond(cluster.convergence.isDefined) + } + } + + "be in 'Joining' phase when started up" taggedAs LongRunningTest in { + runOn(first) { + val members = cluster.latestGossip.members + members.size must be(1) + + val joiningMember = members find (_.address == firstAddress) + joiningMember must not be (None) + joiningMember.get.status must be(MemberStatus.Joining) + } + } + } + + "A second cluster node" must { + "join the other node cluster when sending a Join command" taggedAs LongRunningTest in { + + runOn(second) { + cluster.join(firstAddress) + } + + awaitCond { + cluster.latestGossip.members.exists { member ⇒ + member.address == secondAddress && member.status == MemberStatus.Up + } + } + cluster.latestGossip.members.size must be(2) + awaitCond(cluster.convergence.isDefined) + } + } + +} From 4786078565f7599f087d4b3605fd86e5cb0c79c9 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Sun, 27 May 2012 19:46:42 +0200 Subject: [PATCH 175/538] Adjust cluster tests to not fail when excluded, see 2139 --- .../scala/akka/cluster/JoinTwoClustersSpec.scala | 11 ++++++++--- .../akka/cluster/MembershipChangeListenerSpec.scala | 12 +++++++++--- .../scala/akka/cluster/NodeMembershipSpec.scala | 12 +++++++++--- .../scala/akka/cluster/NodeStartupSpec.scala | 4 ++-- 4 files changed, 28 insertions(+), 11 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index 4bbe703405..6a7ebcee86 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -38,13 +38,18 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm testConductor.enter("after") } - val a1Address = node(a1).address - val b1Address = node(b1).address - val c1Address = node(c1).address + lazy val a1Address = node(a1).address + lazy val b1Address = node(b1).address + lazy val c1Address = node(c1).address "Three different clusters (A, B and C)" must { "be able to 'elect' a single leader after joining (A -> B)" taggedAs LongRunningTest in { + // make sure that the node-to-join is started before other join + runOn(a1, b1, c1) { + cluster + } + testConductor.enter("first-started") runOn(a1, a2) { cluster.join(a1Address) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index 64019c102c..dc915912ee 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -33,13 +33,19 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan testConductor.enter("after") } + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + "A set of connected cluster systems" must { - val firstAddress = node(first).address - val secondAddress = node(second).address - "(when two systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { + // make sure that the node-to-join is started before other join + runOn(first) { + cluster + } + testConductor.enter("first-started") + runOn(first, second) { cluster.join(firstAddress) val latch = TestLatch() diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index f96265ac5a..232d6ca0e7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -31,14 +31,20 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp testConductor.enter("after") } - val firstAddress = node(first).address - val secondAddress = node(second).address - val thirdAddress = node(third).address + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + lazy val thirdAddress = node(third).address "A set of connected cluster systems" must { "(when two systems) start gossiping to each other so that both systems gets the same gossip info" taggedAs LongRunningTest in { + // make sure that the node-to-join is started before other join + runOn(first) { + cluster + } + testConductor.enter("first-started") + runOn(first, second) { cluster.join(firstAddress) awaitCond(cluster.latestGossip.members.size == 2) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index 65cd7891a9..fcbcce746f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -29,8 +29,8 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi testConductor.enter("after") } - val firstAddress = node(first).address - val secondAddress = node(second).address + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { From fc56d40d9e9fa9dd2346c92d0004b4ad5fe9cdf8 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Sun, 27 May 2012 20:44:34 +0200 Subject: [PATCH 176/538] Increased some timeouts of BarrierSpec --- .../akka/remote/testconductor/BarrierSpec.scala | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala index b8bce31708..f66e120195 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala @@ -84,7 +84,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.send(barrier, EnterBarrier("bar")) noMsg(a, b) - within(1 second) { + within(2 second) { b.send(barrier, EnterBarrier("bar")) a.expectMsg(ToClient(BarrierResult("bar", true))) b.expectMsg(ToClient(BarrierResult("bar", true))) @@ -100,7 +100,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) b.send(barrier, EnterBarrier("bar")) noMsg(a, b, c) - within(1 second) { + within(2 second) { c.send(barrier, EnterBarrier("bar")) a.expectMsg(ToClient(BarrierResult("bar", true))) b.expectMsg(ToClient(BarrierResult("bar", true))) @@ -119,7 +119,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! RemoveClient(A) barrier ! ClientDisconnected(A) noMsg(a, b, c) - b.within(1 second) { + b.within(2 second) { barrier ! RemoveClient(C) b.expectMsg(ToClient(BarrierResult("bar", true))) } @@ -265,7 +265,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar")) noMsg(a, b) - within(1 second) { + within(2 second) { b.send(barrier, EnterBarrier("bar")) a.expectMsg(ToClient(BarrierResult("bar", true))) b.expectMsg(ToClient(BarrierResult("bar", true))) @@ -284,7 +284,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with c.expectMsg(ToClient(Done)) b.send(barrier, EnterBarrier("bar")) noMsg(a, b, c) - within(1 second) { + within(2 second) { c.send(barrier, EnterBarrier("bar")) a.expectMsg(ToClient(BarrierResult("bar", true))) b.expectMsg(ToClient(BarrierResult("bar", true))) @@ -306,7 +306,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! Remove(A) barrier ! ClientDisconnected(A) noMsg(a, b, c) - b.within(1 second) { + b.within(2 second) { barrier ! Remove(C) b.expectMsg(ToClient(BarrierResult("bar", true))) } From 50806243903f6242878a6d7206a92d92016068f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sun, 27 May 2012 21:22:30 +0200 Subject: [PATCH 177/538] Incorporated feedback - switched to MultiNodeClusterSpec etc. --- ...ientDowningNodeThatIsUnreachableSpec.scala | 60 ++++++------------- .../ClientDowningNodeThatIsUpSpec.scala | 60 ++++++------------- .../akka/cluster/MultiNodeClusterSpec.scala | 14 ++++- 3 files changed, 47 insertions(+), 87 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index 95510a701d..a80c0a3caa 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -8,7 +8,6 @@ import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ import akka.actor.Address object ClientDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { @@ -17,14 +16,9 @@ object ClientDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - akka.cluster { - gossip-frequency = 100 ms - leader-actions-frequency = 100 ms - periodic-tasks-initial-delay = 300 ms - auto-down = off - } - """))) + commonConfig(debugConfig(on = false). + withFallback(ConfigFactory.parseString("akka.cluster.auto-down = off")). + withFallback(MultiNodeClusterSpec.clusterConfig)) } class ClientDowningNodeThatIsUnreachableMultiJvmNode1 extends ClientDowningNodeThatIsUnreachableSpec @@ -32,25 +26,20 @@ class ClientDowningNodeThatIsUnreachableMultiJvmNode2 extends ClientDowningNodeT class ClientDowningNodeThatIsUnreachableMultiJvmNode3 extends ClientDowningNodeThatIsUnreachableSpec class ClientDowningNodeThatIsUnreachableMultiJvmNode4 extends ClientDowningNodeThatIsUnreachableSpec -class ClientDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(ClientDowningNodeThatIsUnreachableMultiJvmSpec) with ImplicitSender with BeforeAndAfter { +class ClientDowningNodeThatIsUnreachableSpec + extends MultiNodeSpec(ClientDowningNodeThatIsUnreachableMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender with BeforeAndAfter { import ClientDowningNodeThatIsUnreachableMultiJvmSpec._ override def initialParticipants = 4 - def node = Cluster(system) - - def assertMemberRing(nrOfMembers: Int, canNotBePartOfRing: Seq[Address] = Seq.empty[Address]): Unit = { - awaitCond(node.latestGossip.members.size == nrOfMembers) - awaitCond(node.latestGossip.members.forall(_.status == MemberStatus.Up)) - awaitCond(canNotBePartOfRing forall (address => !(node.latestGossip.members exists (_.address == address)))) - } - "Client of a 4 node cluster" must { "be able to DOWN a node that is UNREACHABLE (killed)" taggedAs LongRunningTest in { runOn(first) { - node.self - assertMemberRing(nrOfMembers = 4) + cluster.self + awaitUpConvergence(nrOfMembers = 4) testConductor.enter("all-up") val thirdAddress = node(third).address @@ -60,44 +49,31 @@ class ClientDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(ClientDowning testConductor.removeNode(third) // mark 'third' node as DOWN - node.down(thirdAddress) + cluster.down(thirdAddress) testConductor.enter("down-third-node") - assertMemberRing(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) - node.latestGossip.members.exists(_.address == thirdAddress) must be(false) - testConductor.enter("await-completion") - } - - runOn(second) { - node.join(node(first).address) - - assertMemberRing(nrOfMembers = 4) - testConductor.enter("all-up") - - val thirdAddress = node(third).address - testConductor.enter("down-third-node") - - assertMemberRing(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) + awaitUpConvergence(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) + cluster.latestGossip.members.exists(_.address == thirdAddress) must be(false) testConductor.enter("await-completion") } runOn(third) { - node.join(node(first).address) + cluster.join(node(first).address) - assertMemberRing(nrOfMembers = 4) + awaitUpConvergence(nrOfMembers = 4) testConductor.enter("all-up") } - runOn(fourth) { - node.join(node(first).address) + runOn(second, fourth) { + cluster.join(node(first).address) - assertMemberRing(nrOfMembers = 4) + awaitUpConvergence(nrOfMembers = 4) testConductor.enter("all-up") val thirdAddress = node(third).address testConductor.enter("down-third-node") - assertMemberRing(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) + awaitUpConvergence(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) testConductor.enter("await-completion") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index b92a45f2e4..adfc7aa514 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -8,7 +8,6 @@ import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ import akka.actor.Address object ClientDowningNodeThatIsUpMultiJvmSpec extends MultiNodeConfig { @@ -17,14 +16,9 @@ object ClientDowningNodeThatIsUpMultiJvmSpec extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - akka.cluster { - gossip-frequency = 100 ms - leader-actions-frequency = 100 ms - periodic-tasks-initial-delay = 300 ms - auto-down = off - } - """))) + commonConfig(debugConfig(on = false). + withFallback(ConfigFactory.parseString("akka.cluster.auto-down = off")). + withFallback(MultiNodeClusterSpec.clusterConfig)) } class ClientDowningNodeThatIsUpMultiJvmNode1 extends ClientDowningNodeThatIsUpSpec @@ -32,69 +26,51 @@ class ClientDowningNodeThatIsUpMultiJvmNode2 extends ClientDowningNodeThatIsUpSp class ClientDowningNodeThatIsUpMultiJvmNode3 extends ClientDowningNodeThatIsUpSpec class ClientDowningNodeThatIsUpMultiJvmNode4 extends ClientDowningNodeThatIsUpSpec -class ClientDowningNodeThatIsUpSpec extends MultiNodeSpec(ClientDowningNodeThatIsUpMultiJvmSpec) with ImplicitSender with BeforeAndAfter { +class ClientDowningNodeThatIsUpSpec + extends MultiNodeSpec(ClientDowningNodeThatIsUpMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender with BeforeAndAfter { import ClientDowningNodeThatIsUpMultiJvmSpec._ override def initialParticipants = 4 - def node = Cluster(system) - - def assertMemberRing(nrOfMembers: Int, canNotBePartOfRing: Seq[Address] = Seq.empty[Address]): Unit = { - awaitCond(node.latestGossip.members.size == nrOfMembers) - awaitCond(node.latestGossip.members.forall(_.status == MemberStatus.Up)) - awaitCond(canNotBePartOfRing forall (address => !(node.latestGossip.members exists (_.address.port == address.port)))) - } - "Client of a 4 node cluster" must { "be able to DOWN a node that is UP (healthy and available)" taggedAs LongRunningTest in { runOn(first) { - node.self - assertMemberRing(nrOfMembers = 4) + cluster.self + awaitUpConvergence(nrOfMembers = 4) testConductor.enter("all-up") val thirdAddress = node(third).address // mark 'third' node as DOWN testConductor.removeNode(third) - node.down(thirdAddress) + cluster.down(thirdAddress) testConductor.enter("down-third-node") - assertMemberRing(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) - node.latestGossip.members.exists(_.address == thirdAddress) must be(false) - testConductor.enter("await-completion") - } - - runOn(second) { - node.join(node(first).address) - - assertMemberRing(nrOfMembers = 4) - testConductor.enter("all-up") - - val thirdAddress = node(third).address - testConductor.enter("down-third-node") - - assertMemberRing(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) + awaitUpConvergence(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) + cluster.latestGossip.members.exists(_.address == thirdAddress) must be(false) testConductor.enter("await-completion") } runOn(third) { - node.join(node(first).address) + cluster.join(node(first).address) - assertMemberRing(nrOfMembers = 4) + awaitUpConvergence(nrOfMembers = 4) testConductor.enter("all-up") } - runOn(fourth) { - node.join(node(first).address) + runOn(second, fourth) { + cluster.join(node(first).address) - assertMemberRing(nrOfMembers = 4) + awaitUpConvergence(nrOfMembers = 4) testConductor.enter("all-up") val thirdAddress = node(third).address testConductor.enter("down-third-node") - assertMemberRing(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) + awaitUpConvergence(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) testConductor.enter("await-completion") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 873d819dbb..cadbb7b298 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -51,8 +51,7 @@ trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ } /** - * Wait until the expected number of members has status Up - * and convergence has been reached. + * Wait until the expected number of members has status Up and convergence has been reached. */ def awaitUpConvergence(numberOfMembers: Int): Unit = { awaitCond(cluster.latestGossip.members.size == numberOfMembers) @@ -60,4 +59,13 @@ trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ awaitCond(cluster.convergence.isDefined, 10 seconds) } -} \ No newline at end of file + /** + * Wait until the expected number of members has status Up and convergence has been reached. + * Also asserts that nodes in the 'canNotBePartOfRing' are *not* part of the cluster ring. + */ + def awaitUpConvergence(nrOfMembers: Int, canNotBePartOfRing: Seq[Address] = Seq.empty[Address]): Unit = { + awaitCond(cluster.latestGossip.members.size == nrOfMembers) + awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up)) + awaitCond(canNotBePartOfRing forall (address => !(cluster.latestGossip.members exists (_.address.port == address.port)))) + } +} From 4ec49f6ac1d8a63bae380262d4bc9e175073da9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sun, 27 May 2012 21:55:33 +0200 Subject: [PATCH 178/538] Fixed indeterministic ordering bug in test --- .../cluster/ClientDowningNodeThatIsUnreachableSpec.scala | 6 +++--- .../scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala | 7 +++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index a80c0a3caa..3a4148e3f0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -40,9 +40,9 @@ class ClientDowningNodeThatIsUnreachableSpec runOn(first) { cluster.self awaitUpConvergence(nrOfMembers = 4) - testConductor.enter("all-up") val thirdAddress = node(third).address + testConductor.enter("all-up") // kill 'third' node testConductor.shutdown(third, 0) @@ -66,11 +66,11 @@ class ClientDowningNodeThatIsUnreachableSpec runOn(second, fourth) { cluster.join(node(first).address) - awaitUpConvergence(nrOfMembers = 4) - testConductor.enter("all-up") val thirdAddress = node(third).address + testConductor.enter("all-up") + testConductor.enter("down-third-node") awaitUpConvergence(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index adfc7aa514..0f48951305 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -40,9 +40,9 @@ class ClientDowningNodeThatIsUpSpec runOn(first) { cluster.self awaitUpConvergence(nrOfMembers = 4) - testConductor.enter("all-up") val thirdAddress = node(third).address + testConductor.enter("all-up") // mark 'third' node as DOWN testConductor.removeNode(third) @@ -56,18 +56,17 @@ class ClientDowningNodeThatIsUpSpec runOn(third) { cluster.join(node(first).address) - awaitUpConvergence(nrOfMembers = 4) testConductor.enter("all-up") } runOn(second, fourth) { cluster.join(node(first).address) - awaitUpConvergence(nrOfMembers = 4) - testConductor.enter("all-up") val thirdAddress = node(third).address + testConductor.enter("all-up") + testConductor.enter("down-third-node") awaitUpConvergence(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) From 04ba2cf4908649fbaef0c9e99b71192937f66cd8 Mon Sep 17 00:00:00 2001 From: viktorklang Date: Sun, 27 May 2012 23:18:32 +0300 Subject: [PATCH 179/538] Clarified Awaitable doc. --- akka-actor/src/main/scala/akka/dispatch/Future.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index 54ec2d08b4..e3c7f8348c 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -68,6 +68,7 @@ object Await { * WARNING: Blocking operation, use with caution. * * @throws [[java.util.concurrent.TimeoutException]] if times out + * @throws [[java.lang.Throwable]] (throws clause is Exception due to Java) if there was a problem * @return The returned value as returned by Awaitable.result */ @throws(classOf[Exception]) From c7a4aa5163028a26d25aea79b19d46426c79e443 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Sun, 27 May 2012 23:43:05 +0200 Subject: [PATCH 180/538] Fixing wording in use of BalancingDispatcher as routerDispatcher --- akka-actor/src/main/scala/akka/routing/Routing.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index e60e46c247..2f585a1790 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -33,7 +33,7 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup if (_props.routerConfig != NoRouter && _system.dispatchers.isBalancingDispatcher(_props.routerConfig.routerDispatcher)) throw new ConfigurationException( "Configuration for actor [" + _path.toString + - "] is invalid - you can not use a 'BalancingDispatcher' together with any type of 'Router'") + "] is invalid - you can not use a 'BalancingDispatcher' as a Router's dispatcher, you can however use it for the routees.") /* * CAUTION: RoutedActorRef is PROBLEMATIC From 5fd1aad0d07863bc8812e73f301641bd027de286 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Mon, 28 May 2012 10:33:59 +0200 Subject: [PATCH 181/538] added docs about BalancingDispatcher and routers, see #2080 --- akka-docs/java/dispatchers.rst | 2 ++ akka-docs/java/routing.rst | 41 +++++++++++++++++++++++++++++---- akka-docs/scala/dispatchers.rst | 2 ++ akka-docs/scala/routing.rst | 36 +++++++++++++++++++++++++---- 4 files changed, 72 insertions(+), 9 deletions(-) diff --git a/akka-docs/java/dispatchers.rst b/akka-docs/java/dispatchers.rst index 9260fc11e5..2723883e9c 100644 --- a/akka-docs/java/dispatchers.rst +++ b/akka-docs/java/dispatchers.rst @@ -72,6 +72,8 @@ There are 4 different types of message dispatchers: - This is an executor based event driven dispatcher that will try to redistribute work from busy actors to idle actors. + - All the actors share a single Mailbox that they get their messages from. + - It is assumed that all actors using the same instance of this dispatcher can process all messages that have been sent to one of the actors; i.e. the actors belong to a pool of actors, and to the client there is no guarantee about which actor instance actually processes a given message. - Sharability: Actors of the same type only diff --git a/akka-docs/java/routing.rst b/akka-docs/java/routing.rst index 38cf3a1a80..16aa4cee6f 100644 --- a/akka-docs/java/routing.rst +++ b/akka-docs/java/routing.rst @@ -380,11 +380,16 @@ The dispatcher for created children of the router will be taken from makes sense to configure the :class:`BalancingDispatcher` if the precise routing is not so important (i.e. no consistent hashing or round-robin is required); this enables newly created routees to pick up work immediately by -stealing it from their siblings. Note that you can **not** use a ``BalancingDispatcher`` as a **Router Dispatcher**. -(You can however use it for the **Routees**) +stealing it from their siblings. -The “head” router, of course, cannot run on the same balancing dispatcher, -because it does not process the same messages, hence this special actor does +.. note:: + + If you provide a collection of actors to route to, then they will still use the same dispatcher + that was configured for them in their ``Props``, it is not possible to change an actors dispatcher + after it has been created. + +The “head” router cannot always run on the same dispatcher, because it +does not process the same type of messages, hence this special actor does not use the dispatcher configured in :class:`Props`, but takes the ``routerDispatcher`` from the :class:`RouterConfig` instead, which defaults to the actor system’s default dispatcher. All standard routers allow setting this @@ -393,3 +398,31 @@ implement the method in a suitable way. .. includecode:: code/docs/jrouting/CustomRouterDocTestBase.java#dispatchers +.. note:: + + It is not allowed to configure the ``routerDispatcher`` to be a + :class:`BalancingDispatcher` since the messages meant for the special + router actor cannot be processed by any other actor. + +At first glance there seems to be an overlap between the +:class:`BalancingDispatcher` and Routers, but they complement each other. +The balancing dispatcher is in charge of running the actors while the routers +are in charge of deciding which message goes where. A router can also have +children that span multiple actor systems, even remote ones, but a dispatcher +lives inside a single actor system. + +When using a :class:`RoundRobinRouter` with a :class:`BalancingDispatcher` +there are some configuration settings to take into account. + +- There can only be ``nr-of-instances`` messages being processed at the same + time no matter how many threads are configured for the + :class:`BalancingDispatcher`. + +- Having ``throughput`` set to a low number makes no sense since you will only + be handing off to another actor that processes the same :class:`MailBox` + as yourself, which can be costly. Either the message just got into the + mailbox and you can receive it as well as anybody else, or everybody else + is busy and you are the only one available to receive the message. + +- Resizing the number of routees only introduce inertia, since resizing + is performed at specified intervals, but work stealing is instantaneous. diff --git a/akka-docs/scala/dispatchers.rst b/akka-docs/scala/dispatchers.rst index 100b882b5b..cea9ee6e0a 100644 --- a/akka-docs/scala/dispatchers.rst +++ b/akka-docs/scala/dispatchers.rst @@ -73,6 +73,8 @@ There are 4 different types of message dispatchers: - This is an executor based event driven dispatcher that will try to redistribute work from busy actors to idle actors. + - All the actors share a single Mailbox that they get their messages from. + - It is assumed that all actors using the same instance of this dispatcher can process all messages that have been sent to one of the actors; i.e. the actors belong to a pool of actors, and to the client there is no guarantee about which actor instance actually processes a given message. - Sharability: Actors of the same type only diff --git a/akka-docs/scala/routing.rst b/akka-docs/scala/routing.rst index 25f582e085..5a37b3471a 100644 --- a/akka-docs/scala/routing.rst +++ b/akka-docs/scala/routing.rst @@ -380,9 +380,7 @@ The dispatcher for created children of the router will be taken from makes sense to configure the :class:`BalancingDispatcher` if the precise routing is not so important (i.e. no consistent hashing or round-robin is required); this enables newly created routees to pick up work immediately by -stealing it from their siblings. Note that you can **not** use a ``BalancingDispatcher`` as a **Router Dispatcher**. -(You can however use it for the **Routees**) - +stealing it from their siblings. .. note:: @@ -390,8 +388,8 @@ stealing it from their siblings. Note that you can **not** use a ``BalancingDisp that was configured for them in their ``Props``, it is not possible to change an actors dispatcher after it has been created. -The “head” router, of course, cannot run on the same balancing dispatcher, -because it does not process the same messages, hence this special actor does +The “head” router cannot always run on the same dispatcher, because it +does not process the same type of messages, hence this special actor does not use the dispatcher configured in :class:`Props`, but takes the ``routerDispatcher`` from the :class:`RouterConfig` instead, which defaults to the actor system’s default dispatcher. All standard routers allow setting this @@ -400,3 +398,31 @@ implement the method in a suitable way. .. includecode:: code/docs/routing/RouterDocSpec.scala#dispatchers +.. note:: + + It is not allowed to configure the ``routerDispatcher`` to be a + :class:`BalancingDispatcher` since the messages meant for the special + router actor cannot be processed by any other actor. + +At first glance there seems to be an overlap between the +:class:`BalancingDispatcher` and Routers, but they complement each other. +The balancing dispatcher is in charge of running the actors while the routers +are in charge of deciding which message goes where. A router can also have +children that span multiple actor systems, even remote ones, but a dispatcher +lives inside a single actor system. + +When using a :class:`RoundRobinRouter` with a :class:`BalancingDispatcher` +there are some configuration settings to take into account. + +- There can only be ``nr-of-instances`` messages being processed at the same + time no matter how many threads are configured for the + :class:`BalancingDispatcher`. + +- Having ``throughput`` set to a low number makes no sense since you will only + be handing off to another actor that processes the same :class:`MailBox` + as yourself, which can be costly. Either the message just got into the + mailbox and you can receive it as well as anybody else, or everybody else + is busy and you are the only one available to receive the message. + +- Resizing the number of routees only introduce inertia, since resizing + is performed at specified intervals, but work stealing is instantaneous. From e3eec7e344c26cc912add339e611f5a8786029e9 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 28 May 2012 11:05:02 +0200 Subject: [PATCH 182/538] LeaderElectionSpec with hard exits. See #2113 and #2138 --- .../src/main/scala/akka/actor/Props.scala | 2 +- .../cluster/HardExitLeaderElectionSpec.scala | 108 ++++++++++++++++++ .../akka/cluster/LeaderElectionSpec.scala | 8 +- 3 files changed, 113 insertions(+), 5 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/HardExitLeaderElectionSpec.scala diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index f6552179c3..dfd6200fd3 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -146,7 +146,7 @@ case class Props( /** * Returns a new Props with the specified creator set. - * + * * The creator must not return the same instance multiple times. * * Scala API. diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/HardExitLeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/HardExitLeaderElectionSpec.scala new file mode 100644 index 0000000000..0360e4f1b8 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/HardExitLeaderElectionSpec.scala @@ -0,0 +1,108 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ + +object HardExitLeaderElectionMultiJvmSpec extends MultiNodeConfig { + val controller = role("controller") + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + + commonConfig(debugConfig(on = false). + withFallback(ConfigFactory.parseString(""" + akka.cluster.auto-down = off + """)). + withFallback(MultiNodeClusterSpec.clusterConfig)) + +} + +class HardExitLeaderElectionMultiJvmNode1 extends HardExitLeaderElectionSpec +class HardExitLeaderElectionMultiJvmNode2 extends HardExitLeaderElectionSpec +class HardExitLeaderElectionMultiJvmNode3 extends HardExitLeaderElectionSpec +class HardExitLeaderElectionMultiJvmNode4 extends HardExitLeaderElectionSpec +class HardExitLeaderElectionMultiJvmNode5 extends HardExitLeaderElectionSpec + +abstract class HardExitLeaderElectionSpec extends MultiNodeSpec(HardExitLeaderElectionMultiJvmSpec) with MultiNodeClusterSpec { + import HardExitLeaderElectionMultiJvmSpec._ + + override def initialParticipants = 5 + + lazy val firstAddress = node(first).address + + // sorted in the order used by the cluster + lazy val roles = Seq(first, second, third, fourth).sorted + + "A cluster of four nodes" must { + + "be able to 'elect' a single leader" taggedAs LongRunningTest in { + // make sure that the node-to-join is started before other join + runOn(first) { + cluster + } + testConductor.enter("first-started") + + if (mySelf != controller) { + cluster.join(firstAddress) + awaitUpConvergence(numberOfMembers = roles.size) + cluster.isLeader must be(mySelf == roles.head) + } + testConductor.enter("after") + } + + def shutdownLeaderAndVerifyNewLeader(alreadyShutdown: Int): Unit = { + val currentRoles = roles.drop(alreadyShutdown) + currentRoles.size must be >= (2) + val leader = currentRoles.head + val aUser = currentRoles.last + + mySelf match { + + case `controller` ⇒ + testConductor.enter("before-shutdown") + testConductor.shutdown(leader, 0) + testConductor.removeNode(leader) + testConductor.enter("after-shutdown", "after-down", "completed") + + case `leader` ⇒ + testConductor.enter("before-shutdown") + // this node will be shutdown by the controller and doesn't participate in more barriers + + case `aUser` ⇒ + val leaderAddress = node(leader).address + testConductor.enter("before-shutdown", "after-shutdown") + // user marks the shutdown leader as DOWN + cluster.down(leaderAddress) + testConductor.enter("after-down", "completed") + + case _ if currentRoles.tail.contains(mySelf) ⇒ + // remaining cluster nodes, not shutdown + testConductor.enter("before-shutdown", "after-shutdown", "after-down") + + awaitUpConvergence(currentRoles.size - 1) + val nextExpectedLeader = currentRoles.tail.head + cluster.isLeader must be(mySelf == nextExpectedLeader) + + testConductor.enter("completed") + + } + + } + + "be able to 're-elect' a single leader after leader has left" taggedAs LongRunningTest in { + shutdownLeaderAndVerifyNewLeader(alreadyShutdown = 0) + } + + "be able to 're-elect' a single leader after leader has left (again)" taggedAs LongRunningTest in { + shutdownLeaderAndVerifyNewLeader(alreadyShutdown = 1) + } + } + +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index 007ab941dc..886556de54 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -13,7 +13,7 @@ object LeaderElectionMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") val third = role("third") - val forth = role("forth") + val fourth = role("fourth") commonConfig(debugConfig(on = false). withFallback(ConfigFactory.parseString(""" @@ -36,19 +36,19 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp lazy val firstAddress = node(first).address // sorted in the order used by the cluster - lazy val roles = Seq(first, second, third, forth).sorted + lazy val roles = Seq(first, second, third, fourth).sorted "A cluster of four nodes" must { "be able to 'elect' a single leader" taggedAs LongRunningTest in { - // make sure that the first cluster is started before other join + // make sure that the node-to-join is started before other join runOn(first) { cluster } testConductor.enter("first-started") cluster.join(firstAddress) - awaitUpConvergence(numberOfMembers = 4) + awaitUpConvergence(numberOfMembers = roles.size) cluster.isLeader must be(mySelf == roles.head) testConductor.enter("after") } From 203e2cb13c90c855e52866993ede2ca4dc99ce33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 28 May 2012 11:06:02 +0200 Subject: [PATCH 183/538] Added LeaderDowning spec for testing that the leader automatically is downing an unreachable node. Fixes 2112. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - tests downing a node at the end of the node ring - tests downing a node in the middle of the node ring - added some more utility stuff to the MultiNodeClusterSpec Signed-off-by: Jonas Bonér --- .../akka/cluster/LeaderDowningSpec.scala | 124 ++++++++++++++++++ .../akka/cluster/MultiNodeClusterSpec.scala | 32 +++-- 2 files changed, 145 insertions(+), 11 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningSpec.scala new file mode 100644 index 0000000000..357f360853 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningSpec.scala @@ -0,0 +1,124 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.actor.Address +import akka.util.duration._ + +object LeaderDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + + commonConfig(debugConfig(on = false). + withFallback(ConfigFactory.parseString("akka.cluster.auto-down = on")). + withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class LeaderDowningNodeThatIsUnreachableMultiJvmNode1 extends LeaderDowningNodeThatIsUnreachableSpec +class LeaderDowningNodeThatIsUnreachableMultiJvmNode2 extends LeaderDowningNodeThatIsUnreachableSpec +class LeaderDowningNodeThatIsUnreachableMultiJvmNode3 extends LeaderDowningNodeThatIsUnreachableSpec +class LeaderDowningNodeThatIsUnreachableMultiJvmNode4 extends LeaderDowningNodeThatIsUnreachableSpec + +class LeaderDowningNodeThatIsUnreachableSpec + extends MultiNodeSpec(LeaderDowningNodeThatIsUnreachableMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender with BeforeAndAfter { + import LeaderDowningNodeThatIsUnreachableMultiJvmSpec._ + + override def initialParticipants = 4 + + "The Leader in a 4 node cluster" must { + + "be able to DOWN a 'last' node that is UNREACHABLE" taggedAs LongRunningTest in { + runOn(first) { + cluster.self + awaitUpConvergence(numberOfMembers = 4) + + val fourthAddress = node(fourth).address + testConductor.enter("all-up") + + // kill 'fourth' node + testConductor.shutdown(fourth, 0) + testConductor.removeNode(fourth) + testConductor.enter("down-fourth-node") + + // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- + + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(fourthAddress), 30.seconds.dilated) + testConductor.enter("await-completion") + } + + runOn(fourth) { + cluster.join(node(first).address) + + awaitUpConvergence(numberOfMembers = 4) + + cluster.isLeader must be(false) + testConductor.enter("all-up") + } + + runOn(second, third) { + cluster.join(node(first).address) + awaitUpConvergence(numberOfMembers = 4) + + val fourthAddress = node(fourth).address + testConductor.enter("all-up") + + testConductor.enter("down-fourth-node") + + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(fourthAddress), 30.seconds.dilated) + testConductor.enter("await-completion") + } + } + + "be able to DOWN a 'middle' node that is UNREACHABLE" taggedAs LongRunningTest in { + runOn(first) { + cluster.self + awaitUpConvergence(numberOfMembers = 3) + + val secondAddress = node(second).address + testConductor.enter("all-up") + + // kill 'second' node + testConductor.shutdown(second, 0) + testConductor.removeNode(second) + testConductor.enter("down-second-node") + + // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- + + awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds.dilated) + testConductor.enter("await-completion") + } + + runOn(second) { + cluster.join(node(first).address) + + awaitUpConvergence(numberOfMembers = 3) + + cluster.isLeader must be(false) + testConductor.enter("all-up") + } + + runOn(second, third) { + cluster.join(node(first).address) + awaitUpConvergence(numberOfMembers = 3) + + val secondAddress = node(second).address + testConductor.enter("all-up") + + testConductor.enter("down-second-node") + + awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Seq(secondAddress), 30 seconds) + testConductor.enter("await-completion") + } + } + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 873d819dbb..6f371ee1bc 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -8,15 +8,19 @@ import com.typesafe.config.ConfigFactory import akka.actor.Address import akka.remote.testconductor.RoleName import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ import akka.util.duration._ +import akka.util.Duration object MultiNodeClusterSpec { def clusterConfig: Config = ConfigFactory.parseString(""" akka.cluster { - gossip-frequency = 200 ms - leader-actions-frequency = 200 ms - periodic-tasks-initial-delay = 300 ms + gossip-frequency = 200 ms + leader-actions-frequency = 200 ms + unreachable-nodes-reaper-frequency = 200 ms + periodic-tasks-initial-delay = 300 ms } + akka.test { single-expect-default = 5 s } @@ -51,13 +55,19 @@ trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ } /** - * Wait until the expected number of members has status Up - * and convergence has been reached. + * Wait until the expected number of members has status Up and convergence has been reached. + * Also asserts that nodes in the 'canNotBePartOfMemberRing' are *not* part of the cluster ring. */ - def awaitUpConvergence(numberOfMembers: Int): Unit = { - awaitCond(cluster.latestGossip.members.size == numberOfMembers) - awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up)) - awaitCond(cluster.convergence.isDefined, 10 seconds) + def awaitUpConvergence( + numberOfMembers: Int, + canNotBePartOfMemberRing: Seq[Address] = Seq.empty[Address], + timeout: Duration = 10.seconds.dilated): Unit = { + awaitCond(cluster.latestGossip.members.size == numberOfMembers, timeout) + awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up), timeout) + awaitCond(cluster.convergence.isDefined, timeout) + if (!canNotBePartOfMemberRing.isEmpty) // don't run this on an empty set + awaitCond( + canNotBePartOfMemberRing forall (address => !(cluster.latestGossip.members exists (_.address.port == address.port))), + timeout) } - -} \ No newline at end of file +} From 6b2e42d954a7a46bc9f228e754093358a3dd114f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 28 May 2012 11:27:52 +0200 Subject: [PATCH 184/538] Removed old LeaderDowningSpec --- .../akka/cluster/LeaderDowningSpec.scala | 144 ------------------ 1 file changed, 144 deletions(-) delete mode 100644 akka-cluster/src/test/scala/akka/cluster/LeaderDowningSpec.scala diff --git a/akka-cluster/src/test/scala/akka/cluster/LeaderDowningSpec.scala b/akka-cluster/src/test/scala/akka/cluster/LeaderDowningSpec.scala deleted file mode 100644 index 15e6cec838..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/LeaderDowningSpec.scala +++ /dev/null @@ -1,144 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import akka.testkit._ -import akka.dispatch._ -import akka.actor._ -import akka.remote._ -import akka.util.duration._ - -import com.typesafe.config._ - -import java.net.InetSocketAddress - -class LeaderDowningSpec extends ClusterSpec with ImplicitSender { - val portPrefix = 4 - - var node1: Cluster = _ - var node2: Cluster = _ - var node3: Cluster = _ - var node4: Cluster = _ - - var system1: ActorSystemImpl = _ - var system2: ActorSystemImpl = _ - var system3: ActorSystemImpl = _ - var system4: ActorSystemImpl = _ - - try { - "The Leader in a 4 node cluster" must { - - // ======= NODE 1 ======== - system1 = ActorSystem("system1", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d550 - }""".format(portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote1 = system1.provider.asInstanceOf[RemoteActorRefProvider] - node1 = Cluster(system1) - val fd1 = node1.failureDetector - val address1 = node1.remoteAddress - - // ======= NODE 2 ======== - system2 = ActorSystem("system2", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d551 - cluster.node-to-join = "akka://system1@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote2 = system2.provider.asInstanceOf[RemoteActorRefProvider] - node2 = Cluster(system2) - val fd2 = node2.failureDetector - val address2 = node2.remoteAddress - - // ======= NODE 3 ======== - system3 = ActorSystem("system3", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d552 - cluster.node-to-join = "akka://system1@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote3 = system3.provider.asInstanceOf[RemoteActorRefProvider] - node3 = Cluster(system3) - val fd3 = node3.failureDetector - val address3 = node3.remoteAddress - - // ======= NODE 4 ======== - system4 = ActorSystem("system4", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d553 - cluster.node-to-join = "akka://system1@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote4 = system4.provider.asInstanceOf[RemoteActorRefProvider] - node4 = Cluster(system4) - val fd4 = node4.failureDetector - val address4 = node4.remoteAddress - - "be able to DOWN a (last) node that is UNREACHABLE" taggedAs LongRunningTest in { - - println("Give the system time to converge...") - awaitConvergence(node1 :: node2 :: node3 :: node4 :: Nil) - - // shut down system4 - node4.shutdown() - system4.shutdown() - - // wait for convergence - e.g. the leader to auto-down the failed node - println("Give the system time to converge...") - Thread.sleep(30.seconds.dilated.toMillis) - awaitConvergence(node1 :: node2 :: node3 :: Nil) - - node1.latestGossip.members.size must be(3) - node1.latestGossip.members.exists(_.address == address4) must be(false) - } - - "be able to DOWN a (middle) node that is UNREACHABLE" taggedAs LongRunningTest in { - // shut down system4 - node2.shutdown() - system2.shutdown() - - // wait for convergence - e.g. the leader to auto-down the failed node - println("Give the system time to converge...") - Thread.sleep(30.seconds.dilated.toMillis) - awaitConvergence(node1 :: node3 :: Nil) - - node1.latestGossip.members.size must be(2) - node1.latestGossip.members.exists(_.address == address4) must be(false) - node1.latestGossip.members.exists(_.address == address2) must be(false) - } - } - } catch { - case e: Exception ⇒ - e.printStackTrace - fail(e.toString) - } - - override def atTermination() { - if (node1 ne null) node1.shutdown() - if (system1 ne null) system1.shutdown() - - if (node2 ne null) node2.shutdown() - if (system2 ne null) system2.shutdown() - - if (node3 ne null) node3.shutdown() - if (system3 ne null) system3.shutdown() - - if (node4 ne null) node4.shutdown() - if (system4 ne null) system4.shutdown() - } -} From a66ababd27f78002540f4329f770543713cc7aa6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 28 May 2012 11:33:14 +0200 Subject: [PATCH 185/538] Decreased failure detector threshold to shorten failure detection time. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../scala/akka/cluster/LeaderDowningSpec.scala | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningSpec.scala index 357f360853..a8191057e7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningSpec.scala @@ -18,7 +18,12 @@ object LeaderDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { val fourth = role("fourth") commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString("akka.cluster.auto-down = on")). + withFallback(ConfigFactory.parseString(""" + akka.cluster { + auto-down = on + failure-detector.threshold = 4 + } + """)). withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -60,8 +65,6 @@ class LeaderDowningNodeThatIsUnreachableSpec cluster.join(node(first).address) awaitUpConvergence(numberOfMembers = 4) - - cluster.isLeader must be(false) testConductor.enter("all-up") } @@ -102,8 +105,6 @@ class LeaderDowningNodeThatIsUnreachableSpec cluster.join(node(first).address) awaitUpConvergence(numberOfMembers = 3) - - cluster.isLeader must be(false) testConductor.enter("all-up") } From faed944590ab875d757e96afe5ea411ab591a601 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 28 May 2012 11:54:10 +0200 Subject: [PATCH 186/538] Fixed minor check: checking on 'address' equality instead of 'port' --- .../src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 6f371ee1bc..1726c0a577 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -67,7 +67,7 @@ trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ awaitCond(cluster.convergence.isDefined, timeout) if (!canNotBePartOfMemberRing.isEmpty) // don't run this on an empty set awaitCond( - canNotBePartOfMemberRing forall (address => !(cluster.latestGossip.members exists (_.address.port == address.port))), + canNotBePartOfMemberRing forall (address => !(cluster.latestGossip.members exists (_.address == address))), timeout) } } From e533845a013d35220cf3b982354029eb5f128b64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 28 May 2012 12:46:39 +0200 Subject: [PATCH 187/538] Renamed the LeaderDowningSpec to LeaderDowningNodeThatIsUnreachableSpec (same as the class) --- ...ingSpec.scala => LeaderDowningNodeThatIsUnreachableSpec.scala} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename akka-cluster/src/multi-jvm/scala/akka/cluster/{LeaderDowningSpec.scala => LeaderDowningNodeThatIsUnreachableSpec.scala} (100%) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala similarity index 100% rename from akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningSpec.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala From 59dd754819764327f4cb1e5c4aad7d04cc3425f1 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 28 May 2012 13:55:22 +0200 Subject: [PATCH 188/538] Use only the hard exit LeaderElectionSpec, see #2113 --- .../cluster/HardExitLeaderElectionSpec.scala | 108 ------------------ .../akka/cluster/LeaderElectionSpec.scala | 58 +++++----- 2 files changed, 32 insertions(+), 134 deletions(-) delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/HardExitLeaderElectionSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/HardExitLeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/HardExitLeaderElectionSpec.scala deleted file mode 100644 index 0360e4f1b8..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/HardExitLeaderElectionSpec.scala +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import com.typesafe.config.ConfigFactory -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ - -object HardExitLeaderElectionMultiJvmSpec extends MultiNodeConfig { - val controller = role("controller") - val first = role("first") - val second = role("second") - val third = role("third") - val fourth = role("fourth") - - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" - akka.cluster.auto-down = off - """)). - withFallback(MultiNodeClusterSpec.clusterConfig)) - -} - -class HardExitLeaderElectionMultiJvmNode1 extends HardExitLeaderElectionSpec -class HardExitLeaderElectionMultiJvmNode2 extends HardExitLeaderElectionSpec -class HardExitLeaderElectionMultiJvmNode3 extends HardExitLeaderElectionSpec -class HardExitLeaderElectionMultiJvmNode4 extends HardExitLeaderElectionSpec -class HardExitLeaderElectionMultiJvmNode5 extends HardExitLeaderElectionSpec - -abstract class HardExitLeaderElectionSpec extends MultiNodeSpec(HardExitLeaderElectionMultiJvmSpec) with MultiNodeClusterSpec { - import HardExitLeaderElectionMultiJvmSpec._ - - override def initialParticipants = 5 - - lazy val firstAddress = node(first).address - - // sorted in the order used by the cluster - lazy val roles = Seq(first, second, third, fourth).sorted - - "A cluster of four nodes" must { - - "be able to 'elect' a single leader" taggedAs LongRunningTest in { - // make sure that the node-to-join is started before other join - runOn(first) { - cluster - } - testConductor.enter("first-started") - - if (mySelf != controller) { - cluster.join(firstAddress) - awaitUpConvergence(numberOfMembers = roles.size) - cluster.isLeader must be(mySelf == roles.head) - } - testConductor.enter("after") - } - - def shutdownLeaderAndVerifyNewLeader(alreadyShutdown: Int): Unit = { - val currentRoles = roles.drop(alreadyShutdown) - currentRoles.size must be >= (2) - val leader = currentRoles.head - val aUser = currentRoles.last - - mySelf match { - - case `controller` ⇒ - testConductor.enter("before-shutdown") - testConductor.shutdown(leader, 0) - testConductor.removeNode(leader) - testConductor.enter("after-shutdown", "after-down", "completed") - - case `leader` ⇒ - testConductor.enter("before-shutdown") - // this node will be shutdown by the controller and doesn't participate in more barriers - - case `aUser` ⇒ - val leaderAddress = node(leader).address - testConductor.enter("before-shutdown", "after-shutdown") - // user marks the shutdown leader as DOWN - cluster.down(leaderAddress) - testConductor.enter("after-down", "completed") - - case _ if currentRoles.tail.contains(mySelf) ⇒ - // remaining cluster nodes, not shutdown - testConductor.enter("before-shutdown", "after-shutdown", "after-down") - - awaitUpConvergence(currentRoles.size - 1) - val nextExpectedLeader = currentRoles.tail.head - cluster.isLeader must be(mySelf == nextExpectedLeader) - - testConductor.enter("completed") - - } - - } - - "be able to 're-elect' a single leader after leader has left" taggedAs LongRunningTest in { - shutdownLeaderAndVerifyNewLeader(alreadyShutdown = 0) - } - - "be able to 're-elect' a single leader after leader has left (again)" taggedAs LongRunningTest in { - shutdownLeaderAndVerifyNewLeader(alreadyShutdown = 1) - } - } - -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index 886556de54..54f744a6c8 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -10,6 +10,7 @@ import akka.remote.testkit.MultiNodeSpec import akka.testkit._ object LeaderElectionMultiJvmSpec extends MultiNodeConfig { + val controller = role("controller") val first = role("first") val second = role("second") val third = role("third") @@ -27,11 +28,12 @@ class LeaderElectionMultiJvmNode1 extends LeaderElectionSpec class LeaderElectionMultiJvmNode2 extends LeaderElectionSpec class LeaderElectionMultiJvmNode3 extends LeaderElectionSpec class LeaderElectionMultiJvmNode4 extends LeaderElectionSpec +class LeaderElectionMultiJvmNode5 extends LeaderElectionSpec abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSpec) with MultiNodeClusterSpec { import LeaderElectionMultiJvmSpec._ - override def initialParticipants = 4 + override def initialParticipants = 5 lazy val firstAddress = node(first).address @@ -47,47 +49,51 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp } testConductor.enter("first-started") - cluster.join(firstAddress) - awaitUpConvergence(numberOfMembers = roles.size) - cluster.isLeader must be(mySelf == roles.head) + if (mySelf != controller) { + cluster.join(firstAddress) + awaitUpConvergence(numberOfMembers = roles.size) + cluster.isLeader must be(mySelf == roles.head) + } testConductor.enter("after") } def shutdownLeaderAndVerifyNewLeader(alreadyShutdown: Int): Unit = { val currentRoles = roles.drop(alreadyShutdown) currentRoles.size must be >= (2) + val leader = currentRoles.head + val aUser = currentRoles.last - runOn(currentRoles.head) { - cluster.shutdown() - testConductor.enter("after-shutdown") - testConductor.enter("after-down") - } + mySelf match { - // runOn previously shutdown cluster nodes - if ((roles diff currentRoles).contains(mySelf)) { - testConductor.enter("after-shutdown") - testConductor.enter("after-down") - } + case `controller` ⇒ + testConductor.enter("before-shutdown") + testConductor.shutdown(leader, 0) + testConductor.removeNode(leader) + testConductor.enter("after-shutdown", "after-down", "completed") - // runOn remaining cluster nodes - if (currentRoles.tail.contains(mySelf)) { + case `leader` ⇒ + testConductor.enter("before-shutdown") + // this node will be shutdown by the controller and doesn't participate in more barriers - testConductor.enter("after-shutdown") - - runOn(currentRoles.last) { + case `aUser` ⇒ + val leaderAddress = node(leader).address + testConductor.enter("before-shutdown", "after-shutdown") // user marks the shutdown leader as DOWN - val leaderAddress = node(currentRoles.head).address cluster.down(leaderAddress) - } + testConductor.enter("after-down", "completed") - testConductor.enter("after-down") + case _ if currentRoles.tail.contains(mySelf) ⇒ + // remaining cluster nodes, not shutdown + testConductor.enter("before-shutdown", "after-shutdown", "after-down") + + awaitUpConvergence(currentRoles.size - 1) + val nextExpectedLeader = currentRoles.tail.head + cluster.isLeader must be(mySelf == nextExpectedLeader) + + testConductor.enter("completed") - awaitUpConvergence(currentRoles.size - 1) - val nextExpectedLeader = currentRoles.tail.head - cluster.isLeader must be(mySelf == nextExpectedLeader) } - testConductor.enter("after") } "be able to 're-elect' a single leader after leader has left" taggedAs LongRunningTest in { From 3393f3be974100379e543f251c1f1f7eb9d1f3ac Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 28 May 2012 14:50:19 +0200 Subject: [PATCH 189/538] Tag BarrierSpec as timing, due to jenkins failures --- .../remote/testconductor/BarrierSpec.scala | 57 ++++++++++--------- 1 file changed, 29 insertions(+), 28 deletions(-) diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala index f66e120195..37ebd0a193 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala @@ -18,6 +18,7 @@ import akka.event.Logging import org.scalatest.BeforeAndAfterEach import java.net.InetSocketAddress import java.net.InetAddress +import akka.testkit.TimingTest object BarrierSpec { case class Failed(ref: ActorRef, thr: Throwable) @@ -46,7 +47,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "A BarrierCoordinator" must { - "register clients and remove them" in { + "register clients and remove them" taggedAs TimingTest in { val b = getBarrier() b ! NodeInfo(A, AddressFromURIString("akka://sys"), system.deadLetters) b ! RemoveClient(B) @@ -57,7 +58,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "cannot remove RoleName(a): no client to remove"))) } - "register clients and disconnect them" in { + "register clients and disconnect them" taggedAs TimingTest in { val b = getBarrier() b ! NodeInfo(A, AddressFromURIString("akka://sys"), system.deadLetters) b ! ClientDisconnected(B) @@ -71,13 +72,13 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "cannot disconnect RoleName(a): no client to disconnect"))) } - "fail entering barrier when nobody registered" in { + "fail entering barrier when nobody registered" taggedAs TimingTest in { val b = getBarrier() b ! EnterBarrier("b") expectMsg(ToClient(BarrierResult("b", false))) } - "enter barrier" in { + "enter barrier" taggedAs TimingTest in { val barrier = getBarrier() val a, b = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -91,7 +92,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with } } - "enter barrier with joining node" in { + "enter barrier with joining node" taggedAs TimingTest in { val barrier = getBarrier() val a, b, c = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -108,7 +109,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with } } - "enter barrier with leaving node" in { + "enter barrier with leaving node" taggedAs TimingTest in { val barrier = getBarrier() val a, b, c = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -127,7 +128,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectNoMsg(1 second) } - "leave barrier when last “arrived” is removed" in { + "leave barrier when last “arrived” is removed" taggedAs TimingTest in { val barrier = getBarrier() val a, b = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -138,7 +139,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with b.expectMsg(ToClient(BarrierResult("foo", true))) } - "fail barrier with disconnecing node" in { + "fail barrier with disconnecing node" taggedAs TimingTest in { val barrier = getBarrier() val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -151,7 +152,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA), "bar", a.ref :: Nil), B))) } - "fail barrier with disconnecing node who already arrived" in { + "fail barrier with disconnecing node who already arrived" taggedAs TimingTest in { val barrier = getBarrier() val a, b, c = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -167,7 +168,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA, nodeC), "bar", a.ref :: Nil), B))) } - "fail when entering wrong barrier" in { + "fail when entering wrong barrier" taggedAs TimingTest in { val barrier = getBarrier() val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -181,7 +182,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectMsg(Failed(barrier, WrongBarrier("foo", b.ref, Data(Set(nodeA, nodeB), "bar", a.ref :: Nil)))) } - "fail barrier after first failure" in { + "fail barrier after first failure" taggedAs TimingTest in { val barrier = getBarrier() val a = TestProbe() EventFilter[BarrierEmpty](occurrences = 1) intercept { @@ -193,7 +194,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with a.expectMsg(ToClient(BarrierResult("right", false))) } - "fail after barrier timeout" in { + "fail after barrier timeout" taggedAs TimingTest in { val barrier = getBarrier() val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -206,7 +207,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with } } - "fail if a node registers twice" in { + "fail if a node registers twice" taggedAs TimingTest in { val barrier = getBarrier() val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -218,7 +219,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectMsg(Failed(barrier, DuplicateNode(Data(Set(nodeA), "", Nil), nodeB))) } - "finally have no failure messages left" in { + "finally have no failure messages left" taggedAs TimingTest in { expectNoMsg(1 second) } @@ -226,7 +227,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "A Controller with BarrierCoordinator" must { - "register clients and remove them" in { + "register clients and remove them" taggedAs TimingTest in { val b = getController(1) b ! NodeInfo(A, AddressFromURIString("akka://sys"), testActor) expectMsg(ToClient(Done)) @@ -237,7 +238,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with } } - "register clients and disconnect them" in { + "register clients and disconnect them" taggedAs TimingTest in { val b = getController(1) b ! NodeInfo(A, AddressFromURIString("akka://sys"), testActor) expectMsg(ToClient(Done)) @@ -250,13 +251,13 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with } } - "fail entering barrier when nobody registered" in { + "fail entering barrier when nobody registered" taggedAs TimingTest in { val b = getController(0) b ! EnterBarrier("b") expectMsg(ToClient(BarrierResult("b", false))) } - "enter barrier" in { + "enter barrier" taggedAs TimingTest in { val barrier = getController(2) val a, b = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -272,7 +273,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with } } - "enter barrier with joining node" in { + "enter barrier with joining node" taggedAs TimingTest in { val barrier = getController(2) val a, b, c = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -292,7 +293,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with } } - "enter barrier with leaving node" in { + "enter barrier with leaving node" taggedAs TimingTest in { val barrier = getController(3) val a, b, c = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -314,7 +315,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectNoMsg(1 second) } - "leave barrier when last “arrived” is removed" in { + "leave barrier when last “arrived” is removed" taggedAs TimingTest in { val barrier = getController(2) val a, b = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -327,7 +328,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with b.expectMsg(ToClient(BarrierResult("foo", true))) } - "fail barrier with disconnecing node" in { + "fail barrier with disconnecing node" taggedAs TimingTest in { val barrier = getController(2) val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -344,7 +345,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with a.expectMsg(ToClient(BarrierResult("bar", false))) } - "fail barrier with disconnecing node who already arrived" in { + "fail barrier with disconnecing node who already arrived" taggedAs TimingTest in { val barrier = getController(3) val a, b, c = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -363,7 +364,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with a.expectMsg(ToClient(BarrierResult("bar", false))) } - "fail when entering wrong barrier" in { + "fail when entering wrong barrier" taggedAs TimingTest in { val barrier = getController(2) val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -380,7 +381,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with b.expectMsg(ToClient(BarrierResult("foo", false))) } - "not really fail after barrier timeout" in { + "not really fail after barrier timeout" taggedAs TimingTest in { val barrier = getController(2) val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -398,7 +399,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with b.expectMsg(ToClient(BarrierResult("right", true))) } - "fail if a node registers twice" in { + "fail if a node registers twice" taggedAs TimingTest in { val controller = getController(2) val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -411,7 +412,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with b.expectMsg(ToClient(BarrierResult("initial startup", false))) } - "fail subsequent barriers if a node registers twice" in { + "fail subsequent barriers if a node registers twice" taggedAs TimingTest in { val controller = getController(1) val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -426,7 +427,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with a.expectMsg(ToClient(BarrierResult("x", false))) } - "finally have no failure messages left" in { + "finally have no failure messages left" taggedAs TimingTest in { expectNoMsg(1 second) } From 478cd96639220ec24d1c565ed992ee47a5c2a113 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 28 May 2012 11:53:57 +0200 Subject: [PATCH 190/538] Move GossipingAccrualFailureDetectorSpec to multi-jvm, see #2110 --- .../scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename akka-cluster/src/{test => multi-jvm}/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala (100%) diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala similarity index 100% rename from akka-cluster/src/test/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala From bffb14b022dcd93e5be46ae1deadc7bcd9e36a32 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 28 May 2012 13:48:58 +0200 Subject: [PATCH 191/538] Port GossipingAccrualFailureDetectorSpec to MultiNodeSpec, see #2110 --- .../GossipingAccrualFailureDetectorSpec.scala | 142 +++++++----------- 1 file changed, 52 insertions(+), 90 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 029b0b221d..fa70b9a134 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -3,111 +3,73 @@ */ package akka.cluster -import akka.testkit._ -import akka.dispatch._ -import akka.actor._ -import akka.remote._ +import org.scalatest.BeforeAndAfter +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec import akka.util.duration._ +import akka.testkit._ -import com.typesafe.config._ +object GossipingAccrualFailureDetectorMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") -import java.net.InetSocketAddress + commonConfig(debugConfig(on = false). + withFallback(ConfigFactory.parseString("akka.cluster.failure-detector.threshold=5")). + withFallback(MultiNodeClusterSpec.clusterConfig)) +} -class GossipingAccrualFailureDetectorSpec extends ClusterSpec with ImplicitSender { - val portPrefix = 2 +class GossipingAccrualFailureDetectorMultiJvmNode1 extends GossipingAccrualFailureDetectorSpec +class GossipingAccrualFailureDetectorMultiJvmNode2 extends GossipingAccrualFailureDetectorSpec +class GossipingAccrualFailureDetectorMultiJvmNode3 extends GossipingAccrualFailureDetectorSpec - var node1: Cluster = _ - var node2: Cluster = _ - var node3: Cluster = _ +abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(GossipingAccrualFailureDetectorMultiJvmSpec) + with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + import GossipingAccrualFailureDetectorMultiJvmSpec._ - var system1: ActorSystemImpl = _ - var system2: ActorSystemImpl = _ - var system3: ActorSystemImpl = _ + override def initialParticipants = 3 - try { - "A Gossip-driven Failure Detector" must { + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + lazy val thirdAddress = node(third).address - // ======= NODE 1 ======== - system1 = ActorSystem("system1", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port=%d550 - }""".format(portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote1 = system1.provider.asInstanceOf[RemoteActorRefProvider] - node1 = Cluster(system1) - val fd1 = node1.failureDetector - val address1 = node1.remoteAddress + after { + testConductor.enter("after") + } - // ======= NODE 2 ======== - system2 = ActorSystem("system2", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port=%d551 - cluster.node-to-join = "akka://system1@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote2 = system2.provider.asInstanceOf[RemoteActorRefProvider] - node2 = Cluster(system2) - val fd2 = node2.failureDetector - val address2 = node2.remoteAddress + "A Gossip-driven Failure Detector" must { - // ======= NODE 3 ======== - system3 = ActorSystem("system3", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port=%d552 - cluster.node-to-join = "akka://system1@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote3 = system3.provider.asInstanceOf[RemoteActorRefProvider] - node3 = Cluster(system3) - val fd3 = node3.failureDetector - val address3 = node3.remoteAddress + "receive gossip heartbeats so that all healthy systems in the cluster are marked 'available'" taggedAs LongRunningTest in { + // make sure that the node-to-join is started before other join + runOn(first) { + cluster.self + } + testConductor.enter("first-started") - "receive gossip heartbeats so that all healthy systems in the cluster are marked 'available'" taggedAs LongRunningTest in { - println("Let the systems gossip for a while...") - Thread.sleep(30.seconds.dilated.toMillis) // let them gossip for 30 seconds - fd1.isAvailable(address2) must be(true) - fd1.isAvailable(address3) must be(true) - fd2.isAvailable(address1) must be(true) - fd2.isAvailable(address3) must be(true) - fd3.isAvailable(address1) must be(true) - fd3.isAvailable(address2) must be(true) + cluster.join(firstAddress) + + log.info("Let the systems gossip for a while...") + 10.seconds.dilated.sleep // let them gossip + cluster.failureDetector.isAvailable(firstAddress) must be(true) + cluster.failureDetector.isAvailable(secondAddress) must be(true) + cluster.failureDetector.isAvailable(thirdAddress) must be(true) + } + + "mark system as 'unavailable' if a system in the cluster is shut down (and its heartbeats stops)" taggedAs LongRunningTest in { + runOn(first) { + testConductor.shutdown(third, 0) + testConductor.removeNode(third) } - "mark system as 'unavailable' if a system in the cluster is shut down (and its heartbeats stops)" taggedAs LongRunningTest in { - // shut down system3 - node3.shutdown() - system3.shutdown() - println("Give the remaning systems time to detect failure...") - Thread.sleep(30.seconds.dilated.toMillis) // give them 30 seconds to detect failure of system3 - fd1.isAvailable(address2) must be(true) - fd1.isAvailable(address3) must be(false) - fd2.isAvailable(address1) must be(true) - fd2.isAvailable(address3) must be(false) + runOn(first, second) { + log.info("Give the remaning systems time to detect failure...") + 15.seconds.dilated.sleep // give them time to detect failure + cluster.failureDetector.isAvailable(firstAddress) must be(true) + cluster.failureDetector.isAvailable(secondAddress) must be(true) + cluster.failureDetector.isAvailable(thirdAddress) must be(false) } } - } catch { - case e: Exception ⇒ - e.printStackTrace - fail(e.toString) } - override def atTermination() { - if (node1 ne null) node1.shutdown() - if (system1 ne null) system1.shutdown() - - if (node2 ne null) node2.shutdown() - if (system2 ne null) system2.shutdown() - - if (node3 ne null) node3.shutdown() - if (system3 ne null) system3.shutdown() - } } From 24212f14bce11d5a04411b75801180ec321d2f10 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 28 May 2012 15:29:00 +0200 Subject: [PATCH 192/538] Rename mySelf in MultiNodeSpec to myself --- .../akka/cluster/LeaderElectionSpec.scala | 10 +-- .../akka/cluster/MultiNodeClusterSpec.scala | 2 +- .../test/scala/akka/cluster/ClusterSpec.scala | 69 ------------------- .../akka/remote/testkit/MultiNodeSpec.scala | 18 ++--- 4 files changed, 15 insertions(+), 84 deletions(-) delete mode 100644 akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index 54f744a6c8..2ebc14fbcb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -49,10 +49,10 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp } testConductor.enter("first-started") - if (mySelf != controller) { + if (myself != controller) { cluster.join(firstAddress) awaitUpConvergence(numberOfMembers = roles.size) - cluster.isLeader must be(mySelf == roles.head) + cluster.isLeader must be(myself == roles.head) } testConductor.enter("after") } @@ -63,7 +63,7 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp val leader = currentRoles.head val aUser = currentRoles.last - mySelf match { + myself match { case `controller` ⇒ testConductor.enter("before-shutdown") @@ -82,13 +82,13 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp cluster.down(leaderAddress) testConductor.enter("after-down", "completed") - case _ if currentRoles.tail.contains(mySelf) ⇒ + case _ if currentRoles.tail.contains(myself) ⇒ // remaining cluster nodes, not shutdown testConductor.enter("before-shutdown", "after-shutdown", "after-down") awaitUpConvergence(currentRoles.size - 1) val nextExpectedLeader = currentRoles.tail.head - cluster.isLeader must be(mySelf == nextExpectedLeader) + cluster.isLeader must be(myself == nextExpectedLeader) testConductor.enter("completed") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 82adf065c5..c85f15834d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -46,7 +46,7 @@ trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ * out of all nodes in the cluster. First * member in the cluster ring is expected leader. */ - def assertLeader(nodesInCluster: RoleName*): Unit = if (nodesInCluster.contains(mySelf)) { + def assertLeader(nodesInCluster: RoleName*): Unit = if (nodesInCluster.contains(myself)) { nodesInCluster.length must not be (0) val expectedLeader = roleOfLeader(nodesInCluster) cluster.isLeader must be(ifNode(expectedLeader)(true)(false)) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala deleted file mode 100644 index 38017ad00c..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import akka.actor.ActorSystem -import akka.util._ -import akka.util.duration._ - -import akka.testkit.AkkaSpec -import akka.testkit.TestEvent._ -import akka.testkit.EventFilter - -import com.typesafe.config.{ Config, ConfigFactory } - -object ClusterSpec { - val testConf: Config = ConfigFactory.parseString(""" - akka { - event-handlers = ["akka.testkit.TestEventListener"] - loglevel = "WARNING" - stdout-loglevel = "WARNING" - actor { - default-dispatcher { - executor = "fork-join-executor" - fork-join-executor { - parallelism-min = 8 - parallelism-factor = 2.0 - parallelism-max = 8 - } - } - } - remote.netty.hostname = localhost - cluster { - failure-detector.threshold = 3 - auto-down = on - } - } - """) -} - -abstract class ClusterSpec(_system: ActorSystem) extends AkkaSpec(_system) { - case class PortPrefix(port: Int) { - def withPortPrefix: Int = (portPrefix.toString + port.toString).toInt - } - - implicit def intToPortPrefix(port: Int) = PortPrefix(port) - - def portPrefix: Int - - def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName, config.withFallback(ClusterSpec.testConf))) - - def this(s: String) = this(ConfigFactory.parseString(s)) - - def this() = this(ActorSystem(AkkaSpec.getCallerName, ClusterSpec.testConf)) - - def awaitConvergence(nodes: Iterable[Cluster], maxWaitTime: Duration = 60 seconds) { - val deadline = maxWaitTime.fromNow - while (nodes map (_.convergence.isDefined) exists (_ == false)) { - if (deadline.isOverdue) throw new IllegalStateException("Convergence could no be reached within " + maxWaitTime) - Thread.sleep(1000) - } - nodes foreach { n ⇒ println("Converged: " + n.self + " == " + n.convergence.isDefined) } - } - - override def atStartup { - system.eventStream.publish(Mute(EventFilter[java.net.ConnectException]())) - system.eventStream.publish(Mute(EventFilter[java.nio.channels.ClosedChannelException]())) - } -} diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index e7bce0890c..8217120fd8 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -77,13 +77,13 @@ abstract class MultiNodeConfig { def deployOnAll(deployment: String): Unit = _allDeploy :+= deployment - private[testkit] lazy val mySelf: RoleName = { + private[testkit] lazy val myself: RoleName = { require(_roles.size > MultiNodeSpec.selfIndex, "not enough roles declared for this test") _roles(MultiNodeSpec.selfIndex) } private[testkit] def config: Config = { - val configs = (_nodeConf get mySelf).toList ::: _commonConf.toList ::: MultiNodeSpec.nodeConfig :: AkkaSpec.testConf :: Nil + val configs = (_nodeConf get myself).toList ::: _commonConf.toList ::: MultiNodeSpec.nodeConfig :: AkkaSpec.testConf :: Nil configs reduce (_ withFallback _) } @@ -128,13 +128,13 @@ object MultiNodeSpec { } -abstract class MultiNodeSpec(val mySelf: RoleName, _system: ActorSystem, roles: Seq[RoleName], deployments: RoleName ⇒ Seq[String]) +abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, roles: Seq[RoleName], deployments: RoleName ⇒ Seq[String]) extends AkkaSpec(_system) { import MultiNodeSpec._ def this(config: MultiNodeConfig) = - this(config.mySelf, ActorSystem(AkkaSpec.getCallerName, config.config), config.roles, config.deployments) + this(config.myself, ActorSystem(AkkaSpec.getCallerName, config.config), config.roles, config.deployments) /* * Test Class Interface @@ -165,13 +165,13 @@ abstract class MultiNodeSpec(val mySelf: RoleName, _system: ActorSystem, roles: * to the `roleMap`). */ def runOn(nodes: RoleName*)(thunk: ⇒ Unit): Unit = { - if (nodes exists (_ == mySelf)) { + if (nodes exists (_ == myself)) { thunk } } def ifNode[T](nodes: RoleName*)(yes: ⇒ T)(no: ⇒ T): T = { - if (nodes exists (_ == mySelf)) yes else no + if (nodes exists (_ == myself)) yes else no } /** @@ -198,9 +198,9 @@ abstract class MultiNodeSpec(val mySelf: RoleName, _system: ActorSystem, roles: private val controllerAddr = new InetSocketAddress(nodeNames(0), 4711) if (selfIndex == 0) { - testConductor.startController(initialParticipants, mySelf, controllerAddr).await + testConductor.startController(initialParticipants, myself, controllerAddr).await } else { - testConductor.startClient(mySelf, controllerAddr).await + testConductor.startClient(myself, controllerAddr).await } // now add deployments, if so desired @@ -210,7 +210,7 @@ abstract class MultiNodeSpec(val mySelf: RoleName, _system: ActorSystem, roles: } private val replacements = roles map (r ⇒ Replacement("@" + r.name + "@", r)) private val deployer = system.asInstanceOf[ExtendedActorSystem].provider.deployer - deployments(mySelf) foreach { str ⇒ + deployments(myself) foreach { str ⇒ val deployString = (str /: replacements) { case (base, r @ Replacement(tag, _)) ⇒ base.indexOf(tag) match { From d8530811c92433795d96324e52d6bedf60b6b56d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 28 May 2012 15:38:39 +0200 Subject: [PATCH 193/538] Minor cleanup --- .../cluster/ClientDowningNodeThatIsUnreachableSpec.scala | 4 +--- .../akka/cluster/ClientDowningNodeThatIsUpSpec.scala | 4 +--- .../scala/akka/cluster/JoinTwoClustersSpec.scala | 2 +- .../cluster/LeaderDowningNodeThatIsUnreachableSpec.scala | 4 ++-- .../multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala | 8 ++------ .../scala/akka/cluster/MembershipChangeListenerSpec.scala | 2 +- .../scala/akka/cluster/MultiNodeClusterSpec.scala | 1 + .../multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala | 2 +- 8 files changed, 10 insertions(+), 17 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index 22f9ada0c8..6ab4d1a39e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -16,9 +16,7 @@ object ClientDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString("akka.cluster.auto-down = off")). - withFallback(MultiNodeClusterSpec.clusterConfig)) + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } class ClientDowningNodeThatIsUnreachableMultiJvmNode1 extends ClientDowningNodeThatIsUnreachableSpec diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index d855522b36..ee798d5a8a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -16,9 +16,7 @@ object ClientDowningNodeThatIsUpMultiJvmSpec extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString("akka.cluster.auto-down = off")). - withFallback(MultiNodeClusterSpec.clusterConfig)) + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } class ClientDowningNodeThatIsUpMultiJvmNode1 extends ClientDowningNodeThatIsUpSpec diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index 6a7ebcee86..1017c8a33a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -47,7 +47,7 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm "be able to 'elect' a single leader after joining (A -> B)" taggedAs LongRunningTest in { // make sure that the node-to-join is started before other join runOn(a1, b1, c1) { - cluster + cluster.self } testConductor.enter("first-started") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index a8191057e7..cda794fe21 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -12,9 +12,9 @@ import akka.actor.Address import akka.util.duration._ object LeaderDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { - val first = role("first") + val first = role("first") val second = role("second") - val third = role("third") + val third = role("third") val fourth = role("fourth") commonConfig(debugConfig(on = false). diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index 2ebc14fbcb..7053ba5b50 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -16,11 +16,7 @@ object LeaderElectionMultiJvmSpec extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" - akka.cluster.auto-down = off - """)). - withFallback(MultiNodeClusterSpec.clusterConfig)) + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -45,7 +41,7 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp "be able to 'elect' a single leader" taggedAs LongRunningTest in { // make sure that the node-to-join is started before other join runOn(first) { - cluster + cluster.self } testConductor.enter("first-started") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index dc915912ee..f818c97744 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -42,7 +42,7 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan // make sure that the node-to-join is started before other join runOn(first) { - cluster + cluster.self } testConductor.enter("first-started") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index c85f15834d..ae9d3e9fb7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -15,6 +15,7 @@ import akka.util.Duration object MultiNodeClusterSpec { def clusterConfig: Config = ConfigFactory.parseString(""" akka.cluster { + auto-down = off gossip-frequency = 200 ms leader-actions-frequency = 200 ms unreachable-nodes-reaper-frequency = 200 ms diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index 232d6ca0e7..312ac6dbe8 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -41,7 +41,7 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp // make sure that the node-to-join is started before other join runOn(first) { - cluster + cluster.self } testConductor.enter("first-started") From 96f264e842a3f9f51a2530f325c71cee35e8b876 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 28 May 2012 16:49:49 +0200 Subject: [PATCH 194/538] Initial stab at DeathWatch 2.0, tests don't pass just yet --- .../test/scala/akka/routing/RoutingSpec.scala | 4 +- .../src/main/scala/akka/actor/Actor.scala | 2 +- .../src/main/scala/akka/actor/ActorCell.scala | 80 +++++++++++++------ .../scala/akka/actor/ActorRefProvider.scala | 32 ++------ .../main/scala/akka/actor/ActorSystem.scala | 6 -- .../akka/dispatch/AbstractDispatcher.scala | 4 +- .../main/scala/akka/event/DeathWatch.scala | 19 ----- .../main/scala/akka/pattern/AskSupport.scala | 31 +++---- .../akka/pattern/GracefulStopSupport.scala | 8 +- .../docs/actor/FaultHandlingTestBase.java | 4 +- .../docs/actor/FaultHandlingDocSpec.scala | 4 +- .../akka/remote/RemoteActorRefProvider.scala | 27 +------ .../main/scala/akka/remote/RemoteDaemon.scala | 10 ++- 13 files changed, 99 insertions(+), 132 deletions(-) delete mode 100644 akka-actor/src/main/scala/akka/event/DeathWatch.scala diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 5ad6da271f..98d3e71384 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -73,7 +73,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with watch(router) watch(c2) system.stop(c2) - expectMsg(Terminated(c2)) + expectMsg(Terminated(c2)(stopped = true)) // it might take a while until the Router has actually processed the Terminated message awaitCond { router ! "" @@ -84,7 +84,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with res == Seq(c1, c1) } system.stop(c1) - expectMsg(Terminated(router)) + expectMsg(Terminated(router)(stopped = true)) } "be able to send their routees" in { diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 3d93e52a54..8906dcd60e 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -59,7 +59,7 @@ case object Kill extends Kill { /** * When Death Watch is used, the watcher will receive a Terminated(watched) message when watched is terminated. */ -case class Terminated(@BeanProperty actor: ActorRef) extends PossiblyHarmful +case class Terminated(@BeanProperty actor: ActorRef)(@BeanProperty stopped: Boolean) extends PossiblyHarmful abstract class ReceiveTimeout extends PossiblyHarmful diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 0955595640..6d49045099 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -6,7 +6,6 @@ package akka.actor import akka.dispatch._ import scala.annotation.tailrec -import scala.collection.immutable.{ Stack, TreeMap } import java.util.concurrent.TimeUnit import java.util.concurrent.TimeUnit.MILLISECONDS import akka.event.Logging.{ Debug, Warning, Error } @@ -16,6 +15,7 @@ import java.io.{ NotSerializableException, ObjectOutputStream } import akka.serialization.SerializationExtension import akka.util.NonFatal import akka.event.Logging.LogEventException +import collection.immutable.{ TreeSet, Stack, TreeMap } //TODO: everything here for current compatibility - could be limited more @@ -187,6 +187,8 @@ private[akka] object ActorCell { final val behaviorStackPlaceHolder: Stack[Actor.Receive] = Stack.empty.push(Actor.emptyBehavior) + final val emptyActorRefSet: Set[ActorRef] = TreeSet.empty + sealed trait SuspendReason case object UserRequest extends SuspendReason case class Recreation(cause: Throwable) extends SuspendReason @@ -407,16 +409,14 @@ private[akka] class ActorCell( actor.asInstanceOf[InternalActorRef].stop() } - var currentMessage: Envelope = null - + var currentMessage: Envelope = _ var actor: Actor = _ - private var behaviorStack: Stack[Actor.Receive] = Stack.empty - @volatile //This must be volatile since it isn't protected by the mailbox status var mailbox: Mailbox = _ - var nextNameSequence: Long = 0 + var watching: Set[ActorRef] = emptyActorRefSet + var watchedBy: Set[ActorRef] = emptyActorRefSet //Not thread safe, so should only be used inside the actor that inhabits this ActorCell final protected def randomName(): String = { @@ -462,13 +462,25 @@ private[akka] class ActorCell( override final def watch(subject: ActorRef): ActorRef = { // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - dispatcher.systemDispatch(this, Link(subject)) + subject match { + case a: InternalActorRef ⇒ + if (!watching.contains(a)) { + watching += a + a.sendSystemMessage(Watch(a, self)) + } + } subject } override final def unwatch(subject: ActorRef): ActorRef = { // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - dispatcher.systemDispatch(this, Unlink(subject)) + subject match { + case a: InternalActorRef ⇒ + if (watching.contains(a)) { + watching -= a + a.sendSystemMessage(Unwatch(a, self)) + } + } subject } @@ -567,15 +579,17 @@ private[akka] class ActorCell( def resume(): Unit = if (isNormal) dispatcher resume this - def link(subject: ActorRef): Unit = if (!isTerminating) { - if (system.deathWatch.subscribe(self, subject)) { - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now monitoring " + subject)) + def addWatcher(watcher: ActorRef): Unit = if (!isTerminating) { + if (!watchedBy.contains(watcher)) { + watchedBy += watcher + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), self + " watched by " + watcher)) } } - def unlink(subject: ActorRef): Unit = if (!isTerminating) { - if (system.deathWatch.unsubscribe(self, subject)) { - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped monitoring " + subject)) + def remWatcher(watcher: ActorRef): Unit = if (!isTerminating) { + if (watchedBy.contains(watcher)) { + watchedBy -= watcher + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), self + " unwatched by " + watcher)) } } @@ -603,15 +617,17 @@ private[akka] class ActorCell( try { message match { - case Create() ⇒ create() - case Recreate(cause) ⇒ recreate(cause) - case Link(subject) ⇒ link(subject) - case Unlink(subject) ⇒ unlink(subject) - case Suspend() ⇒ suspend() - case Resume() ⇒ resume() - case Terminate() ⇒ terminate() - case Supervise(child) ⇒ supervise(child) - case ChildTerminated(child) ⇒ handleChildTerminated(child) + case Create() ⇒ create() + case Recreate(cause) ⇒ recreate(cause) + case Watch(`self`, watcher) ⇒ addWatcher(watcher) + case Watch(watchee, `self`) ⇒ watch(watchee) + case Unwatch(`self`, watcher) ⇒ remWatcher(watcher) + case Unwatch(watchee, `self`) ⇒ unwatch(watchee) + case Suspend() ⇒ suspend() + case Resume() ⇒ resume() + case Terminate() ⇒ terminate() + case Supervise(child) ⇒ supervise(child) + case ChildTerminated(child) ⇒ handleChildTerminated(child) } } catch { case e @ (_: InterruptedException | NonFatal(_)) ⇒ handleInvokeFailure(e, "error while processing " + message) @@ -698,7 +714,23 @@ private[akka] class ActorCell( } finally { try { parent.sendSystemMessage(ChildTerminated(self)) - system.deathWatch.publish(Terminated(self)) + if (!watchedBy.isEmpty) { + val terminated = Terminated(self)(stopped = true) + watchedBy foreach { + watcher ⇒ + try watcher.tell(terminated) catch { + case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) + } + } + } + if (!watching.isEmpty) { + watching foreach { + watchee ⇒ + try watchee.tell(Unwatch(watchee, self)) catch { + case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) + } + } + } if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped")) } finally { diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 41473e7f7c..ca971de40e 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -38,11 +38,6 @@ trait ActorRefProvider { */ def deadLetters: ActorRef - /** - * Reference to the death watch service. - */ - def deathWatch: DeathWatch - /** * The root path for all actors within this actor system, including remote * address if enabled. @@ -162,10 +157,11 @@ trait ActorRefFactory { * INTERNAL USE ONLY */ protected def provider: ActorRefProvider + /** - * INTERNAL USE ONLY + * Returns the default MessageDispatcher associated with this ActorRefFactory */ - protected def dispatcher: MessageDispatcher + implicit def dispatcher: MessageDispatcher /** * Father of all children created by this interface. @@ -339,8 +335,6 @@ class LocalActorRefProvider( override val deadLetters: InternalActorRef = new DeadLetterActorRef(this, rootPath / "deadLetters", eventStream) - override val deathWatch: DeathWatch = new LocalDeathWatch(1024) //TODO make configrable - /* * generate name for temporary actor refs */ @@ -516,8 +510,8 @@ class LocalActorRefProvider( def init(_system: ActorSystemImpl) { system = _system // chain death watchers so that killing guardian stops the application - deathWatch.subscribe(systemGuardian, guardian) - deathWatch.subscribe(rootGuardian, systemGuardian) + guardian.sendSystemMessage(Watch(systemGuardian, guardian)) + rootGuardian.sendSystemMessage(Watch(rootGuardian, systemGuardian)) eventStream.startDefaultLoggers(_system) } @@ -566,19 +560,3 @@ class LocalActorRefProvider( def getExternalAddressFor(addr: Address): Option[Address] = if (addr == rootPath.address) Some(addr) else None } - -class LocalDeathWatch(val mapSize: Int) extends DeathWatch with ActorClassification { - - override def publish(event: Event): Unit = { - val monitors = dissociate(classify(event)) - if (monitors.nonEmpty) monitors.foreach(_ ! event) - } - - override def subscribe(subscriber: Subscriber, to: Classifier): Boolean = { - if (!super.subscribe(subscriber, to)) { - subscriber ! Terminated(to) - false - } else true - } -} - diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index c5595212c2..94ee24336a 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -408,11 +408,6 @@ abstract class ExtendedActorSystem extends ActorSystem { */ def systemGuardian: InternalActorRef - /** - * Implementation of the mechanism which is used for watch()/unwatch(). - */ - def deathWatch: DeathWatch - /** * A ThreadFactory that can be used if the transport needs to create any Threads */ @@ -570,7 +565,6 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, def lookupRoot: InternalActorRef = provider.rootGuardian def guardian: InternalActorRef = provider.guardian def systemGuardian: InternalActorRef = provider.systemGuardian - def deathWatch: DeathWatch = provider.deathWatch def /(actorName: String): ActorPath = guardian.path / actorName def /(path: Iterable[String]): ActorPath = guardian.path / path diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 682e6ba4bf..8e160276e8 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -102,11 +102,11 @@ private[akka] case class ChildTerminated(child: ActorRef) extends SystemMessage /** * INTERNAL API */ -private[akka] case class Link(subject: ActorRef) extends SystemMessage // sent to self from ActorCell.watch +private[akka] case class Watch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to self from ActorCell.watch /** * INTERNAL API */ -private[akka] case class Unlink(subject: ActorRef) extends SystemMessage // sent to self from ActorCell.unwatch +private[akka] case class Unwatch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to self from ActorCell.unwatch final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cleanup: () ⇒ Unit) extends Runnable { def run(): Unit = diff --git a/akka-actor/src/main/scala/akka/event/DeathWatch.scala b/akka-actor/src/main/scala/akka/event/DeathWatch.scala deleted file mode 100644 index 8bf6935619..0000000000 --- a/akka-actor/src/main/scala/akka/event/DeathWatch.scala +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.event - -import akka.actor._ - -/** - * The contract of DeathWatch is not properly expressed using the type system - * Whenever there is a publish, all listeners to the Terminated Actor should be atomically removed - * A failed subscribe should also only mean that the Classifier (ActorRef) that is listened to is already shut down - * See LocalDeathWatch for semantics - */ -abstract class DeathWatch extends ActorEventBus with ActorClassifier { - type Event = Terminated - - protected final def classify(event: Event): Classifier = event.actor -} diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index a20baaf533..634299248d 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -4,12 +4,11 @@ package akka.pattern import java.util.concurrent.TimeoutException -import akka.dispatch.{ Promise, Terminate, SystemMessage, Future } -import akka.event.DeathWatch import akka.util.Timeout import annotation.tailrec import akka.util.Unsafe import akka.actor._ +import akka.dispatch._ /** * This is what is used to complete a Future that is returned from an ask/? call, @@ -229,9 +228,14 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide if (!completedJustNow) provider.deadLetters ! message } - override def sendSystemMessage(message: SystemMessage): Unit = message match { - case _: Terminate ⇒ stop() - case _ ⇒ + override def sendSystemMessage(message: SystemMessage): Unit = { + val self = this + message match { + case _: Terminate ⇒ stop() + case Watch(`self`, watcher) ⇒ //FIXME IMPLEMENT + case Unwatch(`self`, watcher) ⇒ //FIXME IMPLEMENT + case _ ⇒ + } } override def isTerminated: Boolean = state match { @@ -241,23 +245,22 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide @tailrec override def stop(): Unit = { - def ensurePromiseCompleted(): Unit = - if (!result.isCompleted) result.tryComplete(Left(new ActorKilledException("Stopped"))) + def ensureCompleted(): Unit = if (!result.isCompleted) result.tryComplete(Left(new ActorKilledException("Stopped"))) state match { - case null ⇒ - // if path was never queried nobody can possibly be watching us, so we don't have to publish termination either - if (updateState(null, Stopped)) ensurePromiseCompleted() - else stop() + case null ⇒ // if path was never queried nobody can possibly be watching us, so we don't have to publish termination either + if (updateState(null, Stopped)) ensureCompleted() else stop() case p: ActorPath ⇒ if (updateState(p, StoppedWithPath(p))) { try { - ensurePromiseCompleted() - provider.deathWatch.publish(Terminated(this)) + ensureCompleted() + val termination = Terminated(this)(stopped = true) + // watchedBy foreach { w => w.tell(termination) } + // watching foreach { w.sendSystemMessage(Unwatch(w, self)) } } finally { provider.unregisterTempActor(p) } } else stop() - case Stopped | _: StoppedWithPath ⇒ + case Stopped | _: StoppedWithPath ⇒ // already stopped case Registering ⇒ stop() // spin until registration is completed before stopping } } diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index adcbe53f0b..d1e7fab327 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -4,9 +4,9 @@ package akka.pattern -import akka.dispatch.{ Promise, Future } import akka.actor._ import akka.util.{ Timeout, Duration } +import akka.dispatch.{ Unwatch, Watch, Promise, Future } trait GracefulStopSupport { /** @@ -39,11 +39,11 @@ trait GracefulStopSupport { } else system match { case e: ExtendedActorSystem ⇒ val ref = PromiseActorRef(e.provider, Timeout(timeout)) - e.deathWatch.subscribe(ref, target) + ref.sendSystemMessage(Watch(target, ref)) ref.result onComplete { case Right(Terminated(`target`)) ⇒ () // Ignore - case _ ⇒ e.deathWatch.unsubscribe(ref, target) - } // Just making sure we're not leaking here + case _ ⇒ ref.sendSystemMessage(Unwatch(target, ref)) // Just making sure we're not leaking here + } target ! PoisonPill ref.result map { case Terminated(`target`) ⇒ true } case s ⇒ throw new IllegalArgumentException("Unknown ActorSystem implementation: '" + s + "'") diff --git a/akka-docs/java/code/docs/actor/FaultHandlingTestBase.java b/akka-docs/java/code/docs/actor/FaultHandlingTestBase.java index bcc4705948..2d40071fe8 100644 --- a/akka-docs/java/code/docs/actor/FaultHandlingTestBase.java +++ b/akka-docs/java/code/docs/actor/FaultHandlingTestBase.java @@ -182,7 +182,7 @@ public class FaultHandlingTestBase { final TestProbe probe = new TestProbe(system); probe.watch(child); child.tell(new IllegalArgumentException()); - probe.expectMsg(new Terminated(child)); + probe.expectMsg(new Terminated(child, true)); //#stop //#escalate-kill @@ -190,7 +190,7 @@ public class FaultHandlingTestBase { probe.watch(child); assert Await.result(ask(child, "get", 5000), timeout).equals(0); child.tell(new Exception()); - probe.expectMsg(new Terminated(child)); + probe.expectMsg(new Terminated(child, true)); //#escalate-kill //#escalate-restart diff --git a/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala b/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala index 8ce16f1021..4e0fdc5ee5 100644 --- a/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala @@ -111,7 +111,7 @@ class FaultHandlingDocSpec extends AkkaSpec with ImplicitSender { //#stop watch(child) // have testActor watch “child” child ! new IllegalArgumentException // break it - expectMsg(Terminated(child)) + expectMsg(Terminated(child)(stopped = true)) child.isTerminated must be(true) //#stop } @@ -125,7 +125,7 @@ class FaultHandlingDocSpec extends AkkaSpec with ImplicitSender { expectMsg(0) child2 ! new Exception("CRASH") // escalate failure - expectMsg(Terminated(child2)) + expectMsg(Terminated(child2)(stopped = true)) //#escalate-kill //#escalate-restart val supervisor2 = system.actorOf(Props[Supervisor2], "supervisor2") diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index a12c5f5578..eaecf67792 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -6,7 +6,7 @@ package akka.remote import akka.actor._ import akka.dispatch._ -import akka.event.{ DeathWatch, Logging, LoggingAdapter } +import akka.event.{ Logging, LoggingAdapter } import akka.event.EventStream import akka.serialization.Serialization import akka.serialization.SerializationExtension @@ -34,8 +34,6 @@ private[akka] class RemoteActorRefProvider( override def rootPath: ActorPath = local.rootPath override def deadLetters: InternalActorRef = local.deadLetters - override val deathWatch: DeathWatch = new RemoteDeathWatch(local.deathWatch, this) - // these are only available after init() override def rootGuardian: InternalActorRef = local.rootGuardian override def guardian: InternalActorRef = local.guardian @@ -246,25 +244,4 @@ private[akka] class RemoteActorRef private[akka] ( @throws(classOf[java.io.ObjectStreamException]) private def writeReplace(): AnyRef = SerializedActorRef(path) -} - -private[akka] class RemoteDeathWatch(val local: DeathWatch, val provider: RemoteActorRefProvider) extends DeathWatch { - - override def subscribe(watcher: ActorRef, watched: ActorRef): Boolean = watched match { - case r: RemoteRef ⇒ - val ret = local.subscribe(watcher, watched) - provider.actorFor(r.path.root / "remote") ! DaemonMsgWatch(watcher, watched) - ret - case l: LocalRef ⇒ - local.subscribe(watcher, watched) - case _ ⇒ - provider.log.error("unknown ActorRef type {} as DeathWatch target", watched.getClass) - false - } - - override def unsubscribe(watcher: ActorRef, watched: ActorRef): Boolean = local.unsubscribe(watcher, watched) - - override def unsubscribe(watcher: ActorRef): Unit = local.unsubscribe(watcher) - - override def publish(event: Terminated): Unit = local.publish(event) -} +} \ No newline at end of file diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala index 7e4beecc7d..1e81cfaac6 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala @@ -8,6 +8,7 @@ import scala.annotation.tailrec import akka.actor.{ VirtualPathContainer, Terminated, Deploy, Props, Nobody, LocalActorRef, InternalActorRef, Address, ActorSystemImpl, ActorRef, ActorPathExtractor, ActorPath, Actor } import akka.event.LoggingAdapter +import akka.dispatch.Watch private[akka] sealed trait DaemonMsg private[akka] case class DaemonMsgCreate(props: Props, deploy: Deploy, path: String, supervisor: ActorRef) extends DaemonMsg @@ -62,18 +63,19 @@ private[akka] class RemoteSystemDaemon(system: ActorSystemImpl, _path: ActorPath val actor = system.provider.actorOf(system, props, supervisor.asInstanceOf[InternalActorRef], path, false, Some(deploy), true) addChild(subpath.mkString("/"), actor) - system.deathWatch.subscribe(this, actor) + this.sendSystemMessage(Watch(actor, this)) case _ ⇒ log.error("remote path does not match path from message [{}]", message) } case DaemonMsgWatch(watcher, watched) ⇒ - val other = system.actorFor(watcher.path.root / "remote") - system.deathWatch.subscribe(other, watched) + system.actorFor(watcher.path.root / "remote") match { + case a: InternalActorRef ⇒ a.sendSystemMessage(Watch(watched, a)) + } } case Terminated(child: LocalActorRef) ⇒ removeChild(child.path.elements.drop(1).mkString("/")) - case t: Terminated ⇒ system.deathWatch.publish(t) + case t: Terminated ⇒ //FIXME system.deathWatch.publish(t) case unknown ⇒ log.warning("Unknown message {} received by {}", unknown, this) } From 41cd0fcf45ef07de141612f05126d8a0a0847351 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 28 May 2012 17:13:57 +0200 Subject: [PATCH 195/538] Making dispatcher in ActorRefFactory public and implicit, by request from Matthias Doenitz (spray.cc) --- akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 41473e7f7c..bbb71bc474 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -163,9 +163,9 @@ trait ActorRefFactory { */ protected def provider: ActorRefProvider /** - * INTERNAL USE ONLY + * Returns the default MessageDispatcher used by this ActorRefFactory */ - protected def dispatcher: MessageDispatcher + implicit def dispatcher: MessageDispatcher /** * Father of all children created by this interface. From 4ec5cd329b88699f34f91b38ce0669369b38a5e6 Mon Sep 17 00:00:00 2001 From: Heiko Seeberger Date: Mon, 28 May 2012 17:55:59 +0200 Subject: [PATCH 196/538] closes #2146: Publish OSGi bundle for akka-actor --- project/AkkaBuild.scala | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index dbe9fbae9e..85bba37987 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -10,8 +10,7 @@ import com.typesafe.sbtmultijvm.MultiJvmPlugin import com.typesafe.sbtmultijvm.MultiJvmPlugin.{ MultiJvm, extraOptions, jvmOptions, scalatestOptions } import com.typesafe.sbtscalariform.ScalariformPlugin import com.typesafe.sbtscalariform.ScalariformPlugin.ScalariformKeys -import com.typesafe.sbtosgi.OsgiPlugin.osgiSettings -import com.typesafe.sbtosgi.OsgiKeys +import com.typesafe.sbtosgi.OsgiPlugin.{ OsgiKeys, osgiSettings } import java.lang.Boolean.getBoolean import Sphinx.{ sphinxDocs, sphinxHtml, sphinxLatex, sphinxPdf, sphinxPygments, sphinxTags } @@ -50,6 +49,8 @@ object AkkaBuild extends Build { autoCompilerPlugins := true, libraryDependencies <+= scalaVersion { v => compilerPlugin("org.scala-lang.plugins" % "continuations" % v) }, scalacOptions += "-P:continuations:enable", + packagedArtifact in (Compile, packageBin) <<= (artifact in (Compile, packageBin), OsgiKeys.bundle).identityMap, + artifact in (Compile, packageBin) ~= (_.copy(`type` = "bundle")), // to fix scaladoc generation fullClasspath in doc in Compile <<= fullClasspath in Compile ) From c7d7bbb93efb222ca116fe474cb682dd63e4498a Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 28 May 2012 18:37:41 +0200 Subject: [PATCH 197/538] Test tags not working as expected, see #2145 * Tests tagged with timing or long-running are excluded by default, as before * Use -Dakka.test.tags.include=long-running,timing to run all untagged and also tests tagged with long-running or timing * Use -Dakka.test.tags.only=timing,long-running to only run tests tagged with long-running or timing, i.e. untagged tests, or tests tagged with something else are not run * I think the above two parameters covers our needs, but there is also -Dakka.test.tags.exclude=some-other to be able to exclude tests that are included by default --- .../test/scala/akka/actor/SchedulerSpec.scala | 2 +- project/AkkaBuild.scala | 48 +++++++++++-------- 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala index beeb2a4c3b..c67bcb44af 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala @@ -48,7 +48,7 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout assert(countDownLatch2.await(2, TimeUnit.SECONDS)) } - "should stop continuous scheduling if the receiving actor has been terminated" taggedAs TimingTest in { + "stop continuous scheduling if the receiving actor has been terminated" taggedAs TimingTest in { val actor = system.actorOf(Props(new Actor { def receive = { case x ⇒ testActor ! x diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index dbe9fbae9e..4f322fdc2f 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -297,6 +297,7 @@ object AkkaBuild extends Build { val excludeTestNames = SettingKey[Seq[String]]("exclude-test-names") val excludeTestTags = SettingKey[Seq[String]]("exclude-test-tags") val includeTestTags = SettingKey[Seq[String]]("include-test-tags") + val onlyTestTags = SettingKey[Seq[String]]("only-test-tags") val defaultExcludedTags = Seq("timing", "long-running") @@ -308,29 +309,37 @@ object AkkaBuild extends Build { (if (getBoolean("sbt.log.noformat")) List("-Dakka.test.nocolor=true") else Nil) } - // for excluding tests by name (or use system property: -Dakka.test.names.exclude=TimingSpec) - lazy val defaultExcludeTestNames: Seq[String] = { - val exclude = System.getProperty("akka.test.names.exclude", "") - if (exclude.isEmpty) Seq.empty else exclude.split(",").toSeq + // for excluding tests by name use system property: -Dakka.test.names.exclude=TimingSpec + // not supported by multi-jvm tests + lazy val useExcludeTestNames: Seq[String] = systemPropertyAsSeq("akka.test.names.exclude") + + // for excluding tests by tag use system property: -Dakka.test.tags.exclude= + // note that it will not be used if you specify -Dakka.test.tags.only + lazy val useExcludeTestTags: Seq[String] = { + if (useOnlyTestTags.isEmpty) systemPropertyAsSeq("akka.test.tags.exclude", defaultExcludedTags) + else Seq.empty } - // for excluding tests by tag (or use system property: -Dakka.test.tags.exclude=timing) - lazy val defaultExcludeTestTags: Seq[String] = { - val exclude = System.getProperty("akka.test.tags.exclude", "") - if (exclude.isEmpty) defaultExcludedTags else exclude.split(",").toSeq + // for including tests by tag use system property: -Dakka.test.tags.include= + // note that it will not be used if you specify -Dakka.test.tags.only + lazy val useIncludeTestTags: Seq[String] = { + if (useOnlyTestTags.isEmpty) systemPropertyAsSeq("akka.test.tags.include") + else Seq.empty } - // for including tests by tag (or use system property: -Dakka.test.tags.include=timing) - lazy val defaultIncludeTestTags: Seq[String] = { - val include = System.getProperty("akka.test.tags.include", "") - if (include.isEmpty) Seq.empty else include.split(",").toSeq + // for running only tests by tag use system property: -Dakka.test.tags.only= + lazy val useOnlyTestTags: Seq[String] = systemPropertyAsSeq("akka.test.tags.only") + + def systemPropertyAsSeq(name: String, default: Seq[String] = Seq.empty): Seq[String] = { + val prop = System.getProperty(name, "") + if (prop.isEmpty) default else prop.split(",").toSeq } lazy val defaultMultiJvmScalatestOptions: Seq[String] = { - val excludeTags = (defaultExcludeTestTags.toSet -- defaultIncludeTestTags.toSet).toSeq + val excludeTags = (useExcludeTestTags.toSet -- useIncludeTestTags.toSet).toSeq Seq("-r", "org.scalatest.akka.QuietReporter") ++ (if (excludeTags.isEmpty) Seq.empty else Seq("-l", excludeTags.mkString(" "))) ++ - (if (defaultIncludeTestTags.isEmpty) Seq.empty else Seq("-n", defaultIncludeTestTags.mkString(" "))) + (if (useOnlyTestTags.isEmpty) Seq.empty else Seq("-n", useOnlyTestTags.mkString(" "))) } lazy val defaultSettings = baseSettings ++ formatSettings ++ Seq( @@ -345,9 +354,10 @@ object AkkaBuild extends Build { parallelExecution in Test := System.getProperty("akka.parallelExecution", "false").toBoolean, - excludeTestNames := defaultExcludeTestNames, - excludeTestTags := defaultExcludeTestTags, - includeTestTags := defaultIncludeTestTags, + excludeTestNames := useExcludeTestNames, + excludeTestTags := useExcludeTestTags, + includeTestTags := useIncludeTestTags, + onlyTestTags := useOnlyTestTags, // add filters for tests excluded by name testOptions in Test <++= excludeTestNames map { _.map(exclude => Tests.Filter(test => !test.contains(exclude))) }, @@ -358,8 +368,8 @@ object AkkaBuild extends Build { if (tags.isEmpty) Seq.empty else Seq(Tests.Argument("-l", tags.mkString(" "))) }, - // add arguments for tests included by tag - testOptions in Test <++= includeTestTags map { tags => + // add arguments for running only tests by tag + testOptions in Test <++= onlyTestTags map { tags => if (tags.isEmpty) Seq.empty else Seq(Tests.Argument("-n", tags.mkString(" "))) }, From 260e2814495fb893fab3f089748c443c3f03fb23 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 01:43:39 +0200 Subject: [PATCH 198/538] #2102 - porting over the fix for Netty/356 --- .../java/akka/util/internal/HashedWheelTimer.java | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java b/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java index 25841861c5..7a497b8442 100644 --- a/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java +++ b/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java @@ -89,6 +89,7 @@ public class HashedWheelTimer implements Timer { final ReusableIterator[] iterators; final int mask; final ReadWriteLock lock = new ReentrantReadWriteLock(); + final boolean isWindows = System.getProperty("os.name", "").toLowerCase().indexOf("win") >= 0; volatile int wheelCursor; private LoggingAdapter logger; @@ -389,7 +390,17 @@ public class HashedWheelTimer implements Timer { for (;;) { final long currentTime = System.nanoTime(); - final long sleepTime = (tickDuration * tick - (currentTime - startTime)); + + long sleepTime = tickDuration * tick - (currentTime - startTime); + + // Check if we run on windows, as if thats the case we will need + // to round the sleepTime as workaround for a bug that only affect + // the JVM if it runs on windows. + // + // See https://github.com/netty/netty/issues/356 + if (isWindows) { + sleepTime = (sleepTime / 10) * 10; + } if (sleepTime <= 0) { break; From 5c863ad409d1654c54a442b7a5d0e2723cc0b955 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 02:13:25 +0200 Subject: [PATCH 199/538] #2051 - Removing the embedded config library and switch to using the published OSGi-enabled 0.4.1 version --- .../main/java/com/typesafe/config/Config.java | 509 --------- .../com/typesafe/config/ConfigException.java | 366 ------- .../com/typesafe/config/ConfigFactory.java | 706 ------------- .../typesafe/config/ConfigIncludeContext.java | 30 - .../com/typesafe/config/ConfigIncluder.java | 41 - .../java/com/typesafe/config/ConfigList.java | 44 - .../com/typesafe/config/ConfigMergeable.java | 47 - .../com/typesafe/config/ConfigObject.java | 113 -- .../com/typesafe/config/ConfigOrigin.java | 82 -- .../typesafe/config/ConfigParseOptions.java | 150 --- .../com/typesafe/config/ConfigParseable.java | 42 - .../typesafe/config/ConfigResolveOptions.java | 75 -- .../com/typesafe/config/ConfigSyntax.java | 32 - .../java/com/typesafe/config/ConfigUtil.java | 74 -- .../java/com/typesafe/config/ConfigValue.java | 60 -- .../typesafe/config/ConfigValueFactory.java | 141 --- .../com/typesafe/config/ConfigValueType.java | 12 - .../config/impl/AbstractConfigObject.java | 427 -------- .../config/impl/AbstractConfigValue.java | 221 ---- .../typesafe/config/impl/ConfigBoolean.java | 39 - .../config/impl/ConfigDelayedMerge.java | 244 ----- .../config/impl/ConfigDelayedMergeObject.java | 233 ----- .../typesafe/config/impl/ConfigDouble.java | 53 - .../com/typesafe/config/impl/ConfigImpl.java | 419 -------- .../typesafe/config/impl/ConfigImplUtil.java | 185 ---- .../com/typesafe/config/impl/ConfigInt.java | 53 - .../com/typesafe/config/impl/ConfigLong.java | 53 - .../com/typesafe/config/impl/ConfigNull.java | 49 - .../typesafe/config/impl/ConfigNumber.java | 102 -- .../typesafe/config/impl/ConfigString.java | 44 - .../config/impl/ConfigSubstitution.java | 300 ------ .../config/impl/DefaultTransformer.java | 81 -- .../com/typesafe/config/impl/FromMapMode.java | 8 - .../typesafe/config/impl/MergeableValue.java | 9 - .../com/typesafe/config/impl/OriginType.java | 8 - .../com/typesafe/config/impl/Parseable.java | 637 ------------ .../java/com/typesafe/config/impl/Parser.java | 965 ------------------ .../java/com/typesafe/config/impl/Path.java | 208 ---- .../com/typesafe/config/impl/PathBuilder.java | 60 -- .../config/impl/PropertiesParser.java | 191 ---- .../typesafe/config/impl/ResolveStatus.java | 26 - .../typesafe/config/impl/SimpleConfig.java | 841 --------------- .../config/impl/SimpleConfigList.java | 383 ------- .../config/impl/SimpleConfigObject.java | 215 ---- .../config/impl/SimpleConfigOrigin.java | 341 ------- .../config/impl/SubstitutionExpression.java | 50 - .../config/impl/SubstitutionResolver.java | 55 - .../java/com/typesafe/config/impl/Token.java | 79 -- .../com/typesafe/config/impl/TokenType.java | 22 - .../com/typesafe/config/impl/Tokenizer.java | 596 ----------- .../java/com/typesafe/config/impl/Tokens.java | 413 -------- .../com/typesafe/config/impl/Unmergeable.java | 16 - .../java/com/typesafe/config/package.html | 42 - project/AkkaBuild.scala | 11 +- 54 files changed, 8 insertions(+), 10195 deletions(-) delete mode 100755 akka-actor/src/main/java/com/typesafe/config/Config.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/ConfigException.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/ConfigFactory.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/ConfigIncludeContext.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/ConfigIncluder.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/ConfigList.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/ConfigMergeable.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/ConfigObject.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/ConfigOrigin.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/ConfigParseOptions.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/ConfigParseable.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/ConfigResolveOptions.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/ConfigSyntax.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/ConfigUtil.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/ConfigValue.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/ConfigValueFactory.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/ConfigValueType.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/AbstractConfigObject.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/AbstractConfigValue.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/ConfigBoolean.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/ConfigDelayedMerge.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/ConfigDelayedMergeObject.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/ConfigDouble.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/ConfigImpl.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/ConfigImplUtil.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/ConfigInt.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/ConfigLong.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/ConfigNull.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/ConfigNumber.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/ConfigString.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/ConfigSubstitution.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/DefaultTransformer.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/FromMapMode.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/MergeableValue.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/OriginType.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/Parseable.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/Parser.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/Path.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/PathBuilder.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/PropertiesParser.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/ResolveStatus.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/SimpleConfig.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/SimpleConfigList.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/SimpleConfigObject.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/SimpleConfigOrigin.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/SubstitutionExpression.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/SubstitutionResolver.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/Token.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/TokenType.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/Tokenizer.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/Tokens.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/impl/Unmergeable.java delete mode 100755 akka-actor/src/main/java/com/typesafe/config/package.html diff --git a/akka-actor/src/main/java/com/typesafe/config/Config.java b/akka-actor/src/main/java/com/typesafe/config/Config.java deleted file mode 100755 index 629b107be9..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/Config.java +++ /dev/null @@ -1,509 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config; - -import java.util.List; -import java.util.Map; -import java.util.Set; - -/** - * An immutable map from config paths to config values. - * - *

- * Contrast with {@link ConfigObject} which is a map from config keys, - * rather than paths, to config values. A {@code Config} contains a tree of - * {@code ConfigObject}, and {@link Config#root()} returns the tree's root - * object. - * - *

- * Throughout the API, there is a distinction between "keys" and "paths". A key - * is a key in a JSON object; it's just a string that's the key in a map. A - * "path" is a parseable expression with a syntax and it refers to a series of - * keys. Path expressions are described in the spec for - * Human-Optimized Config Object Notation. In brief, a path is - * period-separated so "a.b.c" looks for key c in object b in object a in the - * root object. Sometimes double quotes are needed around special characters in - * path expressions. - * - *

- * The API for a {@code Config} is in terms of path expressions, while the API - * for a {@code ConfigObject} is in terms of keys. Conceptually, {@code Config} - * is a one-level map from paths to values, while a - * {@code ConfigObject} is a tree of nested maps from keys to values. - * - *

- * Use {@link ConfigUtil#joinPath} and {@link ConfigUtil#splitPath} to convert - * between path expressions and individual path elements (keys). - * - *

- * Another difference between {@code Config} and {@code ConfigObject} is that - * conceptually, {@code ConfigValue}s with a {@link ConfigValue#valueType() - * valueType()} of {@link ConfigValueType#NULL NULL} exist in a - * {@code ConfigObject}, while a {@code Config} treats null values as if they - * were missing. - * - *

- * {@code Config} is an immutable object and thus safe to use from multiple - * threads. There's never a need for "defensive copies." - * - *

- * The "getters" on a {@code Config} all work in the same way. They never return - * null, nor do they return a {@code ConfigValue} with - * {@link ConfigValue#valueType() valueType()} of {@link ConfigValueType#NULL - * NULL}. Instead, they throw {@link ConfigException.Missing} if the value is - * completely absent or set to null. If the value is set to null, a subtype of - * {@code ConfigException.Missing} called {@link ConfigException.Null} will be - * thrown. {@link ConfigException.WrongType} will be thrown anytime you ask for - * a type and the value has an incompatible type. Reasonable type conversions - * are performed for you though. - * - *

- * If you want to iterate over the contents of a {@code Config}, you can get its - * {@code ConfigObject} with {@link #root()}, and then iterate over the - * {@code ConfigObject} (which implements java.util.Map). Or, you - * can use {@link #entrySet()} which recurses the object tree for you and builds - * up a Set of all path-value pairs where the value is not null. - * - *

- * Do not implement {@code Config}; it should only be implemented by - * the config library. Arbitrary implementations will not work because the - * library internals assume a specific concrete implementation. Also, this - * interface is likely to grow new methods over time, so third-party - * implementations will break. - */ -public interface Config extends ConfigMergeable { - /** - * Gets the {@code Config} as a tree of {@link ConfigObject}. This is a - * constant-time operation (it is not proportional to the number of values - * in the {@code Config}). - * - * @return the root object in the configuration - */ - ConfigObject root(); - - /** - * Gets the origin of the {@code Config}, which may be a file, or a file - * with a line number, or just a descriptive phrase. - * - * @return the origin of the {@code Config} for use in error messages - */ - ConfigOrigin origin(); - - @Override - Config withFallback(ConfigMergeable other); - - /** - * Returns a replacement config with all substitutions (the - * ${foo.bar} syntax, see the - * spec) resolved. Substitutions are looked up using this - * Config as the root object, that is, a substitution - * ${foo.bar} will be replaced with the result of - * getValue("foo.bar"). - * - *

- * This method uses {@link ConfigResolveOptions#defaults()}, there is - * another variant {@link Config#resolve(ConfigResolveOptions)} which lets - * you specify non-default options. - * - *

- * A given {@link Config} must be resolved before using it to retrieve - * config values, but ideally should be resolved one time for your entire - * stack of fallbacks (see {@link Config#withFallback}). Otherwise, some - * substitutions that could have resolved with all fallbacks available may - * not resolve, which will be a user-visible oddity. - * - *

- * resolve() should be invoked on root config objects, rather - * than on a subtree (a subtree is the result of something like - * config.getConfig("foo")). The problem with - * resolve() on a subtree is that substitutions are relative to - * the root of the config and the subtree will have no way to get values - * from the root. For example, if you did - * config.getConfig("foo").resolve() on the below config file, - * it would not work: - * - *

-     *   common-value = 10
-     *   foo {
-     *      whatever = ${common-value}
-     *   }
-     * 
- * - * @return an immutable object with substitutions resolved - * @throws ConfigException.UnresolvedSubstitution - * if any substitutions refer to nonexistent paths - * @throws ConfigException - * some other config exception if there are other problems - */ - Config resolve(); - - /** - * Like {@link Config#resolve()} but allows you to specify non-default - * options. - * - * @param options - * resolve options - * @return the resolved Config - */ - Config resolve(ConfigResolveOptions options); - - /** - * Validates this config against a reference config, throwing an exception - * if it is invalid. The purpose of this method is to "fail early" with a - * comprehensive list of problems; in general, anything this method can find - * would be detected later when trying to use the config, but it's often - * more user-friendly to fail right away when loading the config. - * - *

- * Using this method is always optional, since you can "fail late" instead. - * - *

- * You must restrict validation to paths you "own" (those whose meaning are - * defined by your code module). If you validate globally, you may trigger - * errors about paths that happen to be in the config but have nothing to do - * with your module. It's best to allow the modules owning those paths to - * validate them. Also, if every module validates only its own stuff, there - * isn't as much redundant work being done. - * - *

- * If no paths are specified in checkValid()'s parameter list, - * validation is for the entire config. - * - *

- * If you specify paths that are not in the reference config, those paths - * are ignored. (There's nothing to validate.) - * - *

- * Here's what validation involves: - * - *

    - *
  • All paths found in the reference config must be present in this - * config or an exception will be thrown. - *
  • - * Some changes in type from the reference config to this config will cause - * an exception to be thrown. Not all potential type problems are detected, - * in particular it's assumed that strings are compatible with everything - * except objects and lists. This is because string types are often "really" - * some other type (system properties always start out as strings, or a - * string like "5ms" could be used with {@link #getMilliseconds}). Also, - * it's allowed to set any type to null or override null with any type. - *
  • - * Any unresolved substitutions in this config will cause a validation - * failure; both the reference config and this config should be resolved - * before validation. If the reference config is unresolved, it's a bug in - * the caller of this method. - *
- * - *

- * If you want to allow a certain setting to have a flexible type (or - * otherwise want validation to be looser for some settings), you could - * either remove the problematic setting from the reference config provided - * to this method, or you could intercept the validation exception and - * screen out certain problems. Of course, this will only work if all other - * callers of this method are careful to restrict validation to their own - * paths, as they should be. - * - *

- * If validation fails, the thrown exception contains a list of all problems - * found. See {@link ConfigException.ValidationFailed#problems}. The - * exception's getMessage() will have all the problems - * concatenated into one huge string, as well. - * - *

- * Again, checkValid() can't guess every domain-specific way a - * setting can be invalid, so some problems may arise later when attempting - * to use the config. checkValid() is limited to reporting - * generic, but common, problems such as missing settings and blatant type - * incompatibilities. - * - * @param reference - * a reference configuration - * @param restrictToPaths - * only validate values underneath these paths that your code - * module owns and understands - * @throws ConfigException.ValidationFailed - * if there are any validation issues - * @throws ConfigException.NotResolved - * if this config is not resolved - * @throws ConfigException.BugOrBroken - * if the reference config is unresolved or caller otherwise - * misuses the API - */ - void checkValid(Config reference, String... restrictToPaths); - - /** - * Checks whether a value is present and non-null at the given path. This - * differs in two ways from {@code Map.containsKey()} as implemented by - * {@link ConfigObject}: it looks for a path expression, not a key; and it - * returns false for null values, while {@code containsKey()} returns true - * indicating that the object contains a null value for the key. - * - *

- * If a path exists according to {@link #hasPath(String)}, then - * {@link #getValue(String)} will never throw an exception. However, the - * typed getters, such as {@link #getInt(String)}, will still throw if the - * value is not convertible to the requested type. - * - * @param path - * the path expression - * @return true if a non-null value is present at the path - * @throws ConfigException.BadPath - * if the path expression is invalid - */ - boolean hasPath(String path); - - /** - * Returns true if the {@code Config}'s root object contains no key-value - * pairs. - * - * @return true if the configuration is empty - */ - boolean isEmpty(); - - /** - * Returns the set of path-value pairs, excluding any null values, found by - * recursing {@link #root() the root object}. Note that this is very - * different from root().entrySet() which returns the set of - * immediate-child keys in the root object and includes null values. - * - * @return set of paths with non-null values, built up by recursing the - * entire tree of {@link ConfigObject} - */ - Set> entrySet(); - - /** - * - * @param path - * path expression - * @return the boolean value at the requested path - * @throws ConfigException.Missing - * if value is absent or null - * @throws ConfigException.WrongType - * if value is not convertible to boolean - */ - boolean getBoolean(String path); - - /** - * @param path - * path expression - * @return the numeric value at the requested path - * @throws ConfigException.Missing - * if value is absent or null - * @throws ConfigException.WrongType - * if value is not convertible to a number - */ - Number getNumber(String path); - - /** - * @param path - * path expression - * @return the 32-bit integer value at the requested path - * @throws ConfigException.Missing - * if value is absent or null - * @throws ConfigException.WrongType - * if value is not convertible to an int (for example it is out - * of range, or it's a boolean value) - */ - int getInt(String path); - - /** - * @param path - * path expression - * @return the 64-bit long value at the requested path - * @throws ConfigException.Missing - * if value is absent or null - * @throws ConfigException.WrongType - * if value is not convertible to a long - */ - long getLong(String path); - - /** - * @param path - * path expression - * @return the floating-point value at the requested path - * @throws ConfigException.Missing - * if value is absent or null - * @throws ConfigException.WrongType - * if value is not convertible to a double - */ - double getDouble(String path); - - /** - * @param path - * path expression - * @return the string value at the requested path - * @throws ConfigException.Missing - * if value is absent or null - * @throws ConfigException.WrongType - * if value is not convertible to a string - */ - String getString(String path); - - /** - * @param path - * path expression - * @return the {@link ConfigObject} value at the requested path - * @throws ConfigException.Missing - * if value is absent or null - * @throws ConfigException.WrongType - * if value is not convertible to an object - */ - ConfigObject getObject(String path); - - /** - * @param path - * path expression - * @return the nested {@code Config} value at the requested path - * @throws ConfigException.Missing - * if value is absent or null - * @throws ConfigException.WrongType - * if value is not convertible to a Config - */ - Config getConfig(String path); - - /** - * Gets the value at the path as an unwrapped Java boxed value ( - * {@link java.lang.Boolean Boolean}, {@link java.lang.Integer Integer}, and - * so on - see {@link ConfigValue#unwrapped()}). - * - * @param path - * path expression - * @return the unwrapped value at the requested path - * @throws ConfigException.Missing - * if value is absent or null - */ - Object getAnyRef(String path); - - /** - * Gets the value at the given path, unless the value is a - * null value or missing, in which case it throws just like - * the other getters. Use {@code get()} on the {@link - * Config#root()} object (or other object in the tree) if you - * want an unprocessed value. - * - * @param path - * path expression - * @return the value at the requested path - * @throws ConfigException.Missing - * if value is absent or null - */ - ConfigValue getValue(String path); - - /** - * Gets a value as a size in bytes (parses special strings like "128M"). If - * the value is already a number, then it's left alone; if it's a string, - * it's parsed understanding unit suffixes such as "128K", as documented in - * the the - * spec. - * - * @param path - * path expression - * @return the value at the requested path, in bytes - * @throws ConfigException.Missing - * if value is absent or null - * @throws ConfigException.WrongType - * if value is not convertible to Long or String - * @throws ConfigException.BadValue - * if value cannot be parsed as a size in bytes - */ - Long getBytes(String path); - - /** - * Get value as a duration in milliseconds. If the value is already a - * number, then it's left alone; if it's a string, it's parsed understanding - * units suffixes like "10m" or "5ns" as documented in the the - * spec. - * - * @param path - * path expression - * @return the duration value at the requested path, in milliseconds - * @throws ConfigException.Missing - * if value is absent or null - * @throws ConfigException.WrongType - * if value is not convertible to Long or String - * @throws ConfigException.BadValue - * if value cannot be parsed as a number of milliseconds - */ - Long getMilliseconds(String path); - - /** - * Get value as a duration in nanoseconds. If the value is already a number - * it's taken as milliseconds and converted to nanoseconds. If it's a - * string, it's parsed understanding unit suffixes, as for - * {@link #getMilliseconds(String)}. - * - * @param path - * path expression - * @return the duration value at the requested path, in nanoseconds - * @throws ConfigException.Missing - * if value is absent or null - * @throws ConfigException.WrongType - * if value is not convertible to Long or String - * @throws ConfigException.BadValue - * if value cannot be parsed as a number of nanoseconds - */ - Long getNanoseconds(String path); - - /** - * Gets a list value (with any element type) as a {@link ConfigList}, which - * implements {@code java.util.List}. Throws if the path is - * unset or null. - * - * @param path - * the path to the list value. - * @return the {@link ConfigList} at the path - * @throws ConfigException.Missing - * if value is absent or null - * @throws ConfigException.WrongType - * if value is not convertible to a ConfigList - */ - ConfigList getList(String path); - - List getBooleanList(String path); - - List getNumberList(String path); - - List getIntList(String path); - - List getLongList(String path); - - List getDoubleList(String path); - - List getStringList(String path); - - List getObjectList(String path); - - List getConfigList(String path); - - List getAnyRefList(String path); - - List getBytesList(String path); - - List getMillisecondsList(String path); - - List getNanosecondsList(String path); - - /** - * Clone the config with only the given path (and its children) retained; - * all sibling paths are removed. - * - * @param path - * path to keep - * @return a copy of the config minus all paths except the one specified - */ - Config withOnlyPath(String path); - - /** - * Clone the config with the given path removed. - * - * @param path - * path to remove - * @return a copy of the config minus the specified path - */ - Config withoutPath(String path); -} diff --git a/akka-actor/src/main/java/com/typesafe/config/ConfigException.java b/akka-actor/src/main/java/com/typesafe/config/ConfigException.java deleted file mode 100755 index 3c31d811c3..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/ConfigException.java +++ /dev/null @@ -1,366 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config; - - -/** - * All exceptions thrown by the library are subclasses of - * ConfigException. - */ -public abstract class ConfigException extends RuntimeException { - private static final long serialVersionUID = 1L; - - final private ConfigOrigin origin; - - protected ConfigException(ConfigOrigin origin, String message, - Throwable cause) { - super(origin.description() + ": " + message, cause); - this.origin = origin; - } - - protected ConfigException(ConfigOrigin origin, String message) { - this(origin.description() + ": " + message, null); - } - - protected ConfigException(String message, Throwable cause) { - super(message, cause); - this.origin = null; - } - - protected ConfigException(String message) { - this(message, null); - } - - /** - * Returns an "origin" (such as a filename and line number) for the - * exception, or null if none is available. If there's no sensible origin - * for a given exception, or the kind of exception doesn't meaningfully - * relate to a particular origin file, this returns null. Never assume this - * will return non-null, it can always return null. - * - * @return origin of the problem, or null if unknown/inapplicable - */ - public ConfigOrigin origin() { - return origin; - } - - /** - * Exception indicating that the type of a value does not match the type you - * requested. - * - */ - public static class WrongType extends ConfigException { - private static final long serialVersionUID = 1L; - - public WrongType(ConfigOrigin origin, String path, String expected, - String actual, - Throwable cause) { - super(origin, path + " has type " + actual + " rather than " - + expected, - cause); - } - - public WrongType(ConfigOrigin origin, String path, String expected, - String actual) { - this(origin, path, expected, actual, null); - } - - WrongType(ConfigOrigin origin, String message, Throwable cause) { - super(origin, message, cause); - } - - WrongType(ConfigOrigin origin, String message) { - this(origin, message, null); - } - } - - /** - * Exception indicates that the setting was never set to anything, not even - * null. - */ - public static class Missing extends ConfigException { - private static final long serialVersionUID = 1L; - - public Missing(String path, Throwable cause) { - super("No configuration setting found for key '" + path + "'", - cause); - } - - public Missing(String path) { - this(path, null); - } - - protected Missing(ConfigOrigin origin, String message, Throwable cause) { - super(origin, message, cause); - } - - protected Missing(ConfigOrigin origin, String message) { - this(origin, message, null); - } - } - - /** - * Exception indicates that the setting was treated as missing because it - * was set to null. - */ - public static class Null extends Missing { - private static final long serialVersionUID = 1L; - - private static String makeMessage(String path, String expected) { - if (expected != null) { - return "Configuration key '" + path - + "' is set to null but expected " + expected; - } else { - return "Configuration key '" + path + "' is null"; - } - } - - public Null(ConfigOrigin origin, String path, String expected, - Throwable cause) { - super(origin, makeMessage(path, expected), cause); - } - - public Null(ConfigOrigin origin, String path, String expected) { - this(origin, path, expected, null); - } - } - - /** - * Exception indicating that a value was messed up, for example you may have - * asked for a duration and the value can't be sensibly parsed as a - * duration. - * - */ - public static class BadValue extends ConfigException { - private static final long serialVersionUID = 1L; - - public BadValue(ConfigOrigin origin, String path, String message, - Throwable cause) { - super(origin, "Invalid value at '" + path + "': " + message, cause); - } - - public BadValue(ConfigOrigin origin, String path, String message) { - this(origin, path, message, null); - } - - public BadValue(String path, String message, Throwable cause) { - super("Invalid value at '" + path + "': " + message, cause); - } - - public BadValue(String path, String message) { - this(path, message, null); - } - } - - /** - * Exception indicating that a path expression was invalid. Try putting - * double quotes around path elements that contain "special" characters. - * - */ - public static class BadPath extends ConfigException { - private static final long serialVersionUID = 1L; - - public BadPath(ConfigOrigin origin, String path, String message, - Throwable cause) { - super(origin, - path != null ? ("Invalid path '" + path + "': " + message) - : message, cause); - } - - public BadPath(ConfigOrigin origin, String path, String message) { - this(origin, path, message, null); - } - - public BadPath(String path, String message, Throwable cause) { - super(path != null ? ("Invalid path '" + path + "': " + message) - : message, cause); - } - - public BadPath(String path, String message) { - this(path, message, null); - } - - public BadPath(ConfigOrigin origin, String message) { - this(origin, null, message); - } - } - - /** - * Exception indicating that there's a bug in something (possibly the - * library itself) or the runtime environment is broken. This exception - * should never be handled; instead, something should be fixed to keep the - * exception from occurring. This exception can be thrown by any method in - * the library. - */ - public static class BugOrBroken extends ConfigException { - private static final long serialVersionUID = 1L; - - public BugOrBroken(String message, Throwable cause) { - super(message, cause); - } - - public BugOrBroken(String message) { - this(message, null); - } - } - - /** - * Exception indicating that there was an IO error. - * - */ - public static class IO extends ConfigException { - private static final long serialVersionUID = 1L; - - public IO(ConfigOrigin origin, String message, Throwable cause) { - super(origin, message, cause); - } - - public IO(ConfigOrigin origin, String message) { - this(origin, message, null); - } - } - - /** - * Exception indicating that there was a parse error. - * - */ - public static class Parse extends ConfigException { - private static final long serialVersionUID = 1L; - - public Parse(ConfigOrigin origin, String message, Throwable cause) { - super(origin, message, cause); - } - - public Parse(ConfigOrigin origin, String message) { - this(origin, message, null); - } - } - - /** - * Exception indicating that a substitution did not resolve to anything. - * Thrown by {@link Config#resolve}. - */ - public static class UnresolvedSubstitution extends Parse { - private static final long serialVersionUID = 1L; - - public UnresolvedSubstitution(ConfigOrigin origin, String expression, Throwable cause) { - super(origin, "Could not resolve substitution to a value: " + expression, cause); - } - - public UnresolvedSubstitution(ConfigOrigin origin, String expression) { - this(origin, expression, null); - } - } - - /** - * Exception indicating that you tried to use a function that requires - * substitutions to be resolved, but substitutions have not been resolved - * (that is, {@link Config#resolve} was not called). This is always a bug in - * either application code or the library; it's wrong to write a handler for - * this exception because you should be able to fix the code to avoid it by - * adding calls to {@link Config#resolve}. - */ - public static class NotResolved extends BugOrBroken { - private static final long serialVersionUID = 1L; - - public NotResolved(String message, Throwable cause) { - super(message, cause); - } - - public NotResolved(String message) { - this(message, null); - } - } - - /** - * Information about a problem that occurred in {@link Config#checkValid}. A - * {@link ConfigException.ValidationFailed} exception thrown from - * checkValid() includes a list of problems encountered. - */ - public static class ValidationProblem { - - final private String path; - final private ConfigOrigin origin; - final private String problem; - - public ValidationProblem(String path, ConfigOrigin origin, String problem) { - this.path = path; - this.origin = origin; - this.problem = problem; - } - - /** Returns the config setting causing the problem. */ - public String path() { - return path; - } - - /** - * Returns where the problem occurred (origin may include info on the - * file, line number, etc.). - */ - public ConfigOrigin origin() { - return origin; - } - - /** Returns a description of the problem. */ - public String problem() { - return problem; - } - } - - /** - * Exception indicating that {@link Config#checkValid} found validity - * problems. The problems are available via the {@link #problems()} method. - * The getMessage() of this exception is a potentially very - * long string listing all the problems found. - */ - public static class ValidationFailed extends ConfigException { - private static final long serialVersionUID = 1L; - - final private Iterable problems; - - public ValidationFailed(Iterable problems) { - super(makeMessage(problems), null); - this.problems = problems; - } - - public Iterable problems() { - return problems; - } - - private static String makeMessage(Iterable problems) { - StringBuilder sb = new StringBuilder(); - for (ValidationProblem p : problems) { - sb.append(p.origin().description()); - sb.append(": "); - sb.append(p.path()); - sb.append(": "); - sb.append(p.problem()); - sb.append(", "); - } - if (sb.length() == 0) - throw new ConfigException.BugOrBroken( - "ValidationFailed must have a non-empty list of problems"); - sb.setLength(sb.length() - 2); // chop comma and space - - return sb.toString(); - } - } - - /** - * Exception that doesn't fall into any other category. - */ - public static class Generic extends ConfigException { - private static final long serialVersionUID = 1L; - - public Generic(String message, Throwable cause) { - super(message, cause); - } - - public Generic(String message) { - this(message, null); - } - } - -} diff --git a/akka-actor/src/main/java/com/typesafe/config/ConfigFactory.java b/akka-actor/src/main/java/com/typesafe/config/ConfigFactory.java deleted file mode 100755 index 981708ae55..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/ConfigFactory.java +++ /dev/null @@ -1,706 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config; - -import java.io.File; -import java.io.Reader; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.Map; -import java.util.Properties; - -import com.typesafe.config.impl.ConfigImpl; -import com.typesafe.config.impl.Parseable; - -/** - * Contains static methods for creating {@link Config} instances. - * - *

- * See also {@link ConfigValueFactory} which contains static methods for - * converting Java values into a {@link ConfigObject}. You can then convert a - * {@code ConfigObject} into a {@code Config} with {@link ConfigObject#toConfig}. - * - *

- * The static methods with "load" in the name do some sort of higher-level - * operation potentially parsing multiple resources and resolving substitutions, - * while the ones with "parse" in the name just create a {@link ConfigValue} - * from a resource and nothing else. - */ -public final class ConfigFactory { - private ConfigFactory() { - } - - /** - * Loads an application's configuration from the given classpath resource or - * classpath resource basename, sandwiches it between default reference - * config and default overrides, and then resolves it. The classpath - * resource is "raw" (it should have no "/" prefix, and is not made relative - * to any package, so it's like {@link ClassLoader#getResource} not - * {@link Class#getResource}). - * - *

- * Resources are loaded from the current thread's - * {@link Thread#getContextClassLoader()}. In general, a library needs its - * configuration to come from the class loader used to load that library, so - * the proper "reference.conf" are present. - * - *

- * The loaded object will already be resolved (substitutions have already - * been processed). As a result, if you add more fallbacks then they won't - * be seen by substitutions. Substitutions are the "${foo.bar}" syntax. If - * you want to parse additional files or something then you need to use - * {@link #load(Config)}. - * - * @param resourceBasename - * name (optionally without extension) of a resource on classpath - * @return configuration for an application relative to context class loader - */ - public static Config load(String resourceBasename) { - return load(Thread.currentThread().getContextClassLoader(), resourceBasename); - } - - /** - * Like {@link #load(String)} but uses the supplied class loader instead of - * the current thread's context class loader. - * - * @param loader - * @param resourceBasename - * @return configuration for an application relative to given class loader - */ - public static Config load(ClassLoader loader, String resourceBasename) { - return load(loader, resourceBasename, ConfigParseOptions.defaults(), - ConfigResolveOptions.defaults()); - } - - /** - * Like {@link #load(String)} but allows you to specify parse and resolve - * options. - * - * @param resourceBasename - * the classpath resource name with optional extension - * @param parseOptions - * options to use when parsing the resource - * @param resolveOptions - * options to use when resolving the stack - * @return configuration for an application - */ - public static Config load(String resourceBasename, ConfigParseOptions parseOptions, - ConfigResolveOptions resolveOptions) { - return load(Thread.currentThread().getContextClassLoader(), resourceBasename, parseOptions, - resolveOptions); - } - - /** - * Like {@link #load(String,ConfigParseOptions,ConfigResolveOptions)} but - * allows you to specify a class loader - * - * @param loader - * class loader in which to find resources - * @param resourceBasename - * the classpath resource name with optional extension - * @param parseOptions - * options to use when parsing the resource - * @param resolveOptions - * options to use when resolving the stack - * @return configuration for an application - */ - public static Config load(ClassLoader loader, String resourceBasename, - ConfigParseOptions parseOptions, ConfigResolveOptions resolveOptions) { - Config appConfig = ConfigFactory.parseResourcesAnySyntax(loader, resourceBasename, - parseOptions); - return load(loader, appConfig, resolveOptions); - } - - /** - * Assembles a standard configuration using a custom Config - * object rather than loading "application.conf". The Config - * object will be sandwiched between the default reference config and - * default overrides and then resolved. - * - * @param config - * the application's portion of the configuration - * @return resolved configuration with overrides and fallbacks added - */ - public static Config load(Config config) { - return load(Thread.currentThread().getContextClassLoader(), config); - } - - public static Config load(ClassLoader loader, Config config) { - return load(loader, config, ConfigResolveOptions.defaults()); - } - - /** - * Like {@link #load(Config)} but allows you to specify - * {@link ConfigResolveOptions}. - * - * @param config - * the application's portion of the configuration - * @param resolveOptions - * options for resolving the assembled config stack - * @return resolved configuration with overrides and fallbacks added - */ - public static Config load(Config config, ConfigResolveOptions resolveOptions) { - return load(Thread.currentThread().getContextClassLoader(), config, resolveOptions); - } - - /** - * Like {@link #load(Config,ConfigResolveOptions)} but allows you to specify - * a class loader other than the context class loader. - * - * @param loader - * class loader to use when looking up override and reference - * configs - * @param config - * the application's portion of the configuration - * @param resolveOptions - * options for resolving the assembled config stack - * @return resolved configuration with overrides and fallbacks added - */ - public static Config load(ClassLoader loader, Config config, ConfigResolveOptions resolveOptions) { - return defaultOverrides(loader).withFallback(config).withFallback(defaultReference(loader)) - .resolve(resolveOptions); - } - - private static Config loadDefaultConfig(ClassLoader loader) { - int specified = 0; - - // override application.conf with config.file, config.resource, - // config.url if requested. - String resource = System.getProperty("config.resource"); - if (resource != null) - specified += 1; - String file = System.getProperty("config.file"); - if (file != null) - specified += 1; - String url = System.getProperty("config.url"); - if (url != null) - specified += 1; - - if (specified == 0) { - return load(loader, "application"); - } else if (specified > 1) { - throw new ConfigException.Generic("You set more than one of config.file='" + file - + "', config.url='" + url + "', config.resource='" + resource - + "'; don't know which one to use!"); - } else { - if (resource != null) { - if (resource.startsWith("/")) - resource = resource.substring(1); - // this deliberately does not parseResourcesAnySyntax; if - // people want that they can use an include statement. - return load(loader, parseResources(loader, resource)); - } else if (file != null) { - return load(loader, parseFile(new File(file))); - } else { - try { - return load(loader, parseURL(new URL(url))); - } catch (MalformedURLException e) { - throw new ConfigException.Generic("Bad URL in config.url system property: '" - + url + "': " + e.getMessage(), e); - } - } - } - } - - /** - * Loads a default configuration, equivalent to {@link #load(String) - * load("application")} in most cases. This configuration should be used by - * libraries and frameworks unless an application provides a different one. - *

- * This method may return a cached singleton. - *

- * If the system properties config.resource, - * config.file, or config.url are set, then the - * classpath resource, file, or URL specified in those properties will be - * used rather than the default - * application.{conf,json,properties} classpath resources. - * These system properties should not be set in code (after all, you can - * just parse whatever you want manually and then use {@link #load(Config)} - * if you don't want to use application.conf). The properties - * are intended for use by the person or script launching the application. - * For example someone might have a production.conf that - * include application.conf but then change a couple of values. - * When launching the app they could specify - * -Dconfig.resource=production.conf to get production mode. - *

- * If no system properties are set to change the location of the default - * configuration, ConfigFactory.load() is equivalent to - * ConfigFactory.load("application"). - * - * @return configuration for an application - */ - public static Config load() { - return load(Thread.currentThread().getContextClassLoader()); - } - - /** - * Like {@link #load()} but allows specifying a class loader other than the - * thread's current context class loader. - * - * @param loader - * class loader for finding resources - * @return configuration for an application - */ - public static Config load(ClassLoader loader) { - return loadDefaultConfig(loader); - } - - /** - * Obtains the default reference configuration, which is currently created - * by merging all resources "reference.conf" found on the classpath and - * overriding the result with system properties. The returned reference - * configuration will already have substitutions resolved. - * - *

- * Libraries and frameworks should ship with a "reference.conf" in their - * jar. - * - *

- * The reference config must be looked up in the class loader that contains - * the libraries that you want to use with this config, so the - * "reference.conf" for each library can be found. Use - * {@link #defaultReference(ClassLoader)} if the context class loader is not - * suitable. - * - *

- * The {@link #load()} methods merge this configuration for you - * automatically. - * - *

- * Future versions may look for reference configuration in more places. It - * is not guaranteed that this method only looks at - * "reference.conf". - * - * @return the default reference config for context class loader - */ - public static Config defaultReference() { - return defaultReference(Thread.currentThread().getContextClassLoader()); - } - - /** - * Like {@link #defaultReference()} but allows you to specify a class loader - * to use rather than the current context class loader. - * - * @param loader - * @return the default reference config for this class loader - */ - public static Config defaultReference(ClassLoader loader) { - return ConfigImpl.defaultReference(loader); - } - - /** - * Obtains the default override configuration, which currently consists of - * system properties. The returned override configuration will already have - * substitutions resolved. - * - *

- * The {@link #load()} methods merge this configuration for you - * automatically. - * - *

- * Future versions may get overrides in more places. It is not guaranteed - * that this method only uses system properties. - * - * @return the default override configuration - */ - public static Config defaultOverrides() { - return systemProperties(); - } - - /** - * Like {@link #defaultOverrides()} but allows you to specify a class loader - * to use rather than the current context class loader. - * - * @param loader - * @return the default override configuration - */ - public static Config defaultOverrides(ClassLoader loader) { - return systemProperties(); - } - - /** - * Gets an empty configuration. See also {@link #empty(String)} to create an - * empty configuration with a description, which may improve user-visible - * error messages. - * - * @return an empty configuration - */ - public static Config empty() { - return empty(null); - } - - /** - * Gets an empty configuration with a description to be used to create a - * {@link ConfigOrigin} for this Config. The description should - * be very short and say what the configuration is, like "default settings" - * or "foo settings" or something. (Presumably you will merge some actual - * settings into this empty config using {@link Config#withFallback}, making - * the description more useful.) - * - * @param originDescription - * description of the config - * @return an empty configuration - */ - public static Config empty(String originDescription) { - return ConfigImpl.emptyConfig(originDescription); - } - - /** - * Gets a Config containing the system properties from - * {@link java.lang.System#getProperties()}, parsed and converted as with - * {@link #parseProperties}. This method can return a global immutable - * singleton, so it's preferred over parsing system properties yourself. - * - *

- * {@link #load} will include the system properties as overrides already, as - * will {@link #defaultReference} and {@link #defaultOverrides}. - * - *

- * Because this returns a singleton, it will not notice changes to system - * properties made after the first time this method is called. - * - * @return system properties parsed into a Config - */ - public static Config systemProperties() { - return ConfigImpl.systemPropertiesAsConfig(); - } - - /** - * Gets a Config containing the system's environment variables. - * This method can return a global immutable singleton. - * - *

- * Environment variables are used as fallbacks when resolving substitutions - * whether or not this object is included in the config being resolved, so - * you probably don't need to use this method for most purposes. It can be a - * nicer API for accessing environment variables than raw - * {@link java.lang.System#getenv(String)} though, since you can use methods - * such as {@link Config#getInt}. - * - * @return system environment variables parsed into a Config - */ - public static Config systemEnvironment() { - return ConfigImpl.envVariablesAsConfig(); - } - - /** - * Converts a Java {@link java.util.Properties} object to a - * {@link ConfigObject} using the rules documented in the HOCON - * spec. The keys in the Properties object are split on the - * period character '.' and treated as paths. The values will all end up as - * string values. If you have both "a=foo" and "a.b=bar" in your properties - * file, so "a" is both the object containing "b" and the string "foo", then - * the string value is dropped. - * - *

- * If you want to have System.getProperties() as a - * ConfigObject, it's better to use the {@link #systemProperties()} method - * which returns a cached global singleton. - * - * @param properties - * a Java Properties object - * @param options - * @return the parsed configuration - */ - public static Config parseProperties(Properties properties, - ConfigParseOptions options) { - return Parseable.newProperties(properties, options).parse().toConfig(); - } - - public static Config parseProperties(Properties properties) { - return parseProperties(properties, ConfigParseOptions.defaults()); - } - - public static Config parseReader(Reader reader, ConfigParseOptions options) { - return Parseable.newReader(reader, options).parse().toConfig(); - } - - public static Config parseReader(Reader reader) { - return parseReader(reader, ConfigParseOptions.defaults()); - } - - public static Config parseURL(URL url, ConfigParseOptions options) { - return Parseable.newURL(url, options).parse().toConfig(); - } - - public static Config parseURL(URL url) { - return parseURL(url, ConfigParseOptions.defaults()); - } - - public static Config parseFile(File file, ConfigParseOptions options) { - return Parseable.newFile(file, options).parse().toConfig(); - } - - public static Config parseFile(File file) { - return parseFile(file, ConfigParseOptions.defaults()); - } - - /** - * Parses a file with a flexible extension. If the fileBasename - * already ends in a known extension, this method parses it according to - * that extension (the file's syntax must match its extension). If the - * fileBasename does not end in an extension, it parses files - * with all known extensions and merges whatever is found. - * - *

- * In the current implementation, the extension ".conf" forces - * {@link ConfigSyntax#CONF}, ".json" forces {@link ConfigSyntax#JSON}, and - * ".properties" forces {@link ConfigSyntax#PROPERTIES}. When merging files, - * ".conf" falls back to ".json" falls back to ".properties". - * - *

- * Future versions of the implementation may add additional syntaxes or - * additional extensions. However, the ordering (fallback priority) of the - * three current extensions will remain the same. - * - *

- * If options forces a specific syntax, this method only parses - * files with an extension matching that syntax. - * - *

- * If {@link ConfigParseOptions#getAllowMissing options.getAllowMissing()} - * is true, then no files have to exist; if false, then at least one file - * has to exist. - * - * @param fileBasename - * a filename with or without extension - * @param options - * parse options - * @return the parsed configuration - */ - public static Config parseFileAnySyntax(File fileBasename, - ConfigParseOptions options) { - return ConfigImpl.parseFileAnySyntax(fileBasename, options).toConfig(); - } - - public static Config parseFileAnySyntax(File fileBasename) { - return parseFileAnySyntax(fileBasename, ConfigParseOptions.defaults()); - } - - /** - * Parses all resources on the classpath with the given name and merges them - * into a single Config. - * - *

- * If the resource name does not begin with a "/", it will have the supplied - * class's package added to it, in the same way as - * {@link java.lang.Class#getResource}. - * - *

- * Duplicate resources with the same name are merged such that ones returned - * earlier from {@link ClassLoader#getResources} fall back to (have higher - * priority than) the ones returned later. This implies that resources - * earlier in the classpath override those later in the classpath when they - * configure the same setting. However, in practice real applications may - * not be consistent about classpath ordering, so be careful. It may be best - * to avoid assuming too much. - * - * @param klass - * klass.getClassLoader() will be used to load - * resources, and non-absolute resource names will have this - * class's package added - * @param resource - * resource to look up, relative to klass's package - * or absolute starting with a "/" - * @param options - * parse options - * @return the parsed configuration - */ - public static Config parseResources(Class klass, String resource, - ConfigParseOptions options) { - return Parseable.newResources(klass, resource, options).parse() - .toConfig(); - } - - public static Config parseResources(Class klass, String resource) { - return parseResources(klass, resource, ConfigParseOptions.defaults()); - } - - /** - * Parses classpath resources with a flexible extension. In general, this - * method has the same behavior as - * {@link #parseFileAnySyntax(File,ConfigParseOptions)} but for classpath - * resources instead, as in {@link #parseResources}. - * - *

- * There is a thorny problem with this method, which is that - * {@link java.lang.ClassLoader#getResources} must be called separately for - * each possible extension. The implementation ends up with separate lists - * of resources called "basename.conf" and "basename.json" for example. As a - * result, the ideal ordering between two files with different extensions is - * unknown; there is no way to figure out how to merge the two lists in - * classpath order. To keep it simple, the lists are simply concatenated, - * with the same syntax priorities as - * {@link #parseFileAnySyntax(File,ConfigParseOptions) parseFileAnySyntax()} - * - all ".conf" resources are ahead of all ".json" resources which are - * ahead of all ".properties" resources. - * - * @param klass - * class which determines the ClassLoader and the - * package for relative resource names - * @param resourceBasename - * a resource name as in {@link java.lang.Class#getResource}, - * with or without extension - * @param options - * parse options - * @return the parsed configuration - */ - public static Config parseResourcesAnySyntax(Class klass, String resourceBasename, - ConfigParseOptions options) { - return ConfigImpl.parseResourcesAnySyntax(klass, resourceBasename, - options).toConfig(); - } - - public static Config parseResourcesAnySyntax(Class klass, String resourceBasename) { - return parseResourcesAnySyntax(klass, resourceBasename, ConfigParseOptions.defaults()); - } - - /** - * Parses all resources on the classpath with the given name and merges them - * into a single Config. - * - *

- * This works like {@link java.lang.ClassLoader#getResource}, not like - * {@link java.lang.Class#getResource}, so the name never begins with a - * slash. - * - *

- * See {@link #parseResources(Class,String,ConfigParseOptions)} for full - * details. - * - * @param loader - * will be used to load resources - * @param resource - * resource to look up - * @param options - * parse options - * @return the parsed configuration - */ - public static Config parseResources(ClassLoader loader, String resource, - ConfigParseOptions options) { - return Parseable.newResources(loader, resource, options).parse().toConfig(); - } - - public static Config parseResources(ClassLoader loader, String resource) { - return parseResources(loader, resource, ConfigParseOptions.defaults()); - } - - /** - * Parses classpath resources with a flexible extension. In general, this - * method has the same behavior as - * {@link #parseFileAnySyntax(File,ConfigParseOptions)} but for classpath - * resources instead, as in - * {@link #parseResources(ClassLoader,String,ConfigParseOptions)}. - * - *

- * {@link #parseResourcesAnySyntax(Class,String,ConfigParseOptions)} differs - * in the syntax for the resource name, but otherwise see - * {@link #parseResourcesAnySyntax(Class,String,ConfigParseOptions)} for - * some details and caveats on this method. - * - * @param loader - * class loader to look up resources in - * @param resourceBasename - * a resource name as in - * {@link java.lang.ClassLoader#getResource}, with or without - * extension - * @param options - * parse options - * @return the parsed configuration - */ - public static Config parseResourcesAnySyntax(ClassLoader loader, String resourceBasename, - ConfigParseOptions options) { - return ConfigImpl.parseResourcesAnySyntax(loader, resourceBasename, options).toConfig(); - } - - public static Config parseResourcesAnySyntax(ClassLoader loader, String resourceBasename) { - return parseResourcesAnySyntax(loader, resourceBasename, ConfigParseOptions.defaults()); - } - - /** - * Like {@link #parseResources(ClassLoader,String,ConfigParseOptions)} but - * uses thread's current context class loader. - */ - public static Config parseResources(String resource, ConfigParseOptions options) { - return Parseable - .newResources(Thread.currentThread().getContextClassLoader(), resource, options) - .parse().toConfig(); - } - - /** - * Like {@link #parseResources(ClassLoader,String)} but uses thread's - * current context class loader. - */ - public static Config parseResources(String resource) { - return parseResources(Thread.currentThread().getContextClassLoader(), resource, - ConfigParseOptions.defaults()); - } - - /** - * Like - * {@link #parseResourcesAnySyntax(ClassLoader,String,ConfigParseOptions)} - * but uses thread's current context class loader. - */ - public static Config parseResourcesAnySyntax(String resourceBasename, ConfigParseOptions options) { - return ConfigImpl.parseResourcesAnySyntax(Thread.currentThread().getContextClassLoader(), - resourceBasename, options).toConfig(); - } - - /** - * Like {@link #parseResourcesAnySyntax(ClassLoader,String)} but uses - * thread's current context class loader. - */ - public static Config parseResourcesAnySyntax(String resourceBasename) { - return parseResourcesAnySyntax(Thread.currentThread().getContextClassLoader(), - resourceBasename, ConfigParseOptions.defaults()); - } - - public static Config parseString(String s, ConfigParseOptions options) { - return Parseable.newString(s, options).parse().toConfig(); - } - - public static Config parseString(String s) { - return parseString(s, ConfigParseOptions.defaults()); - } - - /** - * Creates a {@code Config} based on a {@link java.util.Map} from paths to - * plain Java values. Similar to - * {@link ConfigValueFactory#fromMap(Map,String)}, except the keys in the - * map are path expressions, rather than keys; and correspondingly it - * returns a {@code Config} instead of a {@code ConfigObject}. This is more - * convenient if you are writing literal maps in code, and less convenient - * if you are getting your maps from some data source such as a parser. - * - *

- * An exception will be thrown (and it is a bug in the caller of the method) - * if a path is both an object and a value, for example if you had both - * "a=foo" and "a.b=bar", then "a" is both the string "foo" and the parent - * object of "b". The caller of this method should ensure that doesn't - * happen. - * - * @param values - * @param originDescription - * description of what this map represents, like a filename, or - * "default settings" (origin description is used in error - * messages) - * @return the map converted to a {@code Config} - */ - public static Config parseMap(Map values, - String originDescription) { - return ConfigImpl.fromPathMap(values, originDescription).toConfig(); - } - - /** - * See the other overload of {@link #parseMap(Map, String)} for details, - * this one just uses a default origin description. - * - * @param values - * @return the map converted to a {@code Config} - */ - public static Config parseMap(Map values) { - return parseMap(values, null); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/ConfigIncludeContext.java b/akka-actor/src/main/java/com/typesafe/config/ConfigIncludeContext.java deleted file mode 100755 index ac3644a5af..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/ConfigIncludeContext.java +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config; - - -/** - * Context provided to a {@link ConfigIncluder}; this interface is only useful - * inside a {@code ConfigIncluder} implementation, and is not intended for apps - * to implement. - */ -public interface ConfigIncludeContext { - /** - * Tries to find a name relative to whatever is doing the including, for - * example in the same directory as the file doing the including. Returns - * null if it can't meaningfully create a relative name. The returned - * parseable may not exist; this function is not required to do any IO, just - * compute what the name would be. - * - * The passed-in filename has to be a complete name (with extension), not - * just a basename. (Include statements in config files are allowed to give - * just a basename.) - * - * @param filename - * the name to make relative to the resource doing the including - * @return parseable item relative to the resource doing the including, or - * null - */ - ConfigParseable relativeTo(String filename); -} diff --git a/akka-actor/src/main/java/com/typesafe/config/ConfigIncluder.java b/akka-actor/src/main/java/com/typesafe/config/ConfigIncluder.java deleted file mode 100755 index 38e8e35a91..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/ConfigIncluder.java +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config; - -/** - * Implement this interface and provide an instance to - * {@link ConfigParseOptions#setIncluder ConfigParseOptions.setIncluder()} to - * customize handling of {@code include} statements in config files. - */ -public interface ConfigIncluder { - /** - * Returns a new includer that falls back to the given includer. This is how - * you can obtain the default includer; it will be provided as a fallback. - * It's up to your includer to chain to it if you want to. You might want to - * merge any files found by the fallback includer with any objects you load - * yourself. - * - * It's important to handle the case where you already have the fallback - * with a "return this", i.e. this method should not create a new object if - * the fallback is the same one you already have. The same fallback may be - * added repeatedly. - * - * @param fallback - * @return a new includer - */ - ConfigIncluder withFallback(ConfigIncluder fallback); - - /** - * Parses another item to be included. The returned object typically would - * not have substitutions resolved. You can throw a ConfigException here to - * abort parsing, or return an empty object, but may not return null. - * - * @param context - * some info about the include context - * @param what - * the include statement's argument - * @return a non-null ConfigObject - */ - ConfigObject include(ConfigIncludeContext context, String what); -} diff --git a/akka-actor/src/main/java/com/typesafe/config/ConfigList.java b/akka-actor/src/main/java/com/typesafe/config/ConfigList.java deleted file mode 100755 index 5c694a508e..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/ConfigList.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config; - -import java.util.List; - -/** - * Subtype of {@link ConfigValue} representing a list value, as in JSON's - * {@code [1,2,3]} syntax. - * - *

- * {@code ConfigList} implements {@code java.util.List} so you can - * use it like a regular Java list. Or call {@link #unwrapped()} to unwrap the - * list elements into plain Java values. - * - *

- * Like all {@link ConfigValue} subtypes, {@code ConfigList} is immutable. This - * makes it threadsafe and you never have to create "defensive copies." The - * mutator methods from {@link java.util.List} all throw - * {@link java.lang.UnsupportedOperationException}. - * - *

- * The {@link ConfigValue#valueType} method on a list returns - * {@link ConfigValueType#LIST}. - * - *

- * Do not implement {@code ConfigList}; it should only be implemented - * by the config library. Arbitrary implementations will not work because the - * library internals assume a specific concrete implementation. Also, this - * interface is likely to grow new methods over time, so third-party - * implementations will break. - * - */ -public interface ConfigList extends List, ConfigValue { - - /** - * Recursively unwraps the list, returning a list of plain Java values such - * as Integer or String or whatever is in the list. - */ - @Override - List unwrapped(); - -} diff --git a/akka-actor/src/main/java/com/typesafe/config/ConfigMergeable.java b/akka-actor/src/main/java/com/typesafe/config/ConfigMergeable.java deleted file mode 100755 index c760bf9d15..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/ConfigMergeable.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config; - -/** - * Marker for types whose instances can be merged, that is {@link Config} and - * {@link ConfigValue}. Instances of {@code Config} and {@code ConfigValue} can - * be combined into a single new instance using the - * {@link ConfigMergeable#withFallback withFallback()} method. - * - *

- * Do not implement this interface; it should only be implemented by - * the config library. Arbitrary implementations will not work because the - * library internals assume a specific concrete implementation. Also, this - * interface is likely to grow new methods over time, so third-party - * implementations will break. - */ -public interface ConfigMergeable { - /** - * Returns a new value computed by merging this value with another, with - * keys in this value "winning" over the other one. Only - * {@link ConfigObject} and {@link Config} instances do anything in this - * method (they need to merge the fallback keys into themselves). All other - * values just return the original value, since they automatically override - * any fallback. - * - *

- * The semantics of merging are described in the spec - * for HOCON. - * - *

- * Note that objects do not merge "across" non-objects; if you write - * object.withFallback(nonObject).withFallback(otherObject), - * then otherObject will simply be ignored. This is an - * intentional part of how merging works. Both non-objects, and any object - * which has fallen back to a non-object, block subsequent fallbacks. - * - * @param other - * an object whose keys should be used if the keys are not - * present in this one - * @return a new object (or the original one, if the fallback doesn't get - * used) - */ - ConfigMergeable withFallback(ConfigMergeable other); -} diff --git a/akka-actor/src/main/java/com/typesafe/config/ConfigObject.java b/akka-actor/src/main/java/com/typesafe/config/ConfigObject.java deleted file mode 100755 index 285bf04e7f..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/ConfigObject.java +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config; - -import java.util.Map; - -/** - * Subtype of {@link ConfigValue} representing an object (dictionary, map) - * value, as in JSON's { "a" : 42 } syntax. - * - *

- * {@code ConfigObject} implements {@code java.util.Map} so - * you can use it like a regular Java map. Or call {@link #unwrapped()} to - * unwrap the map to a map with plain Java values rather than - * {@code ConfigValue}. - * - *

- * Like all {@link ConfigValue} subtypes, {@code ConfigObject} is immutable. - * This makes it threadsafe and you never have to create "defensive copies." The - * mutator methods from {@link java.util.Map} all throw - * {@link java.lang.UnsupportedOperationException}. - * - *

- * The {@link ConfigValue#valueType} method on an object returns - * {@link ConfigValueType#OBJECT}. - * - *

- * In most cases you want to use the {@link Config} interface rather than this - * one. Call {@link #toConfig()} to convert a {@code ConfigObject} to a - * {@code Config}. - * - *

- * The API for a {@code ConfigObject} is in terms of keys, while the API for a - * {@link Config} is in terms of path expressions. Conceptually, - * {@code ConfigObject} is a tree of maps from keys to values, while a - * {@code Config} is a one-level map from paths to values. - * - *

- * Use {@link ConfigUtil#joinPath} and {@link ConfigUtil#splitPath} to convert - * between path expressions and individual path elements (keys). - * - *

- * A {@code ConfigObject} may contain null values, which will have - * {@link ConfigValue#valueType()} equal to {@link ConfigValueType#NULL}. If - * {@code get()} returns Java's null then the key was not present in the parsed - * file (or wherever this value tree came from). If {@code get()} returns a - * {@link ConfigValue} with type {@code ConfigValueType#NULL} then the key was - * set to null explicitly in the config file. - * - *

- * Do not implement {@code ConfigObject}; it should only be implemented - * by the config library. Arbitrary implementations will not work because the - * library internals assume a specific concrete implementation. Also, this - * interface is likely to grow new methods over time, so third-party - * implementations will break. - */ -public interface ConfigObject extends ConfigValue, Map { - - /** - * Converts this object to a {@link Config} instance, enabling you to use - * path expressions to find values in the object. This is a constant-time - * operation (it is not proportional to the size of the object). - * - * @return a {@link Config} with this object as its root - */ - Config toConfig(); - - /** - * Recursively unwraps the object, returning a map from String to whatever - * plain Java values are unwrapped from the object's values. - * - * @return a {@link java.util.Map} containing plain Java objects - */ - @Override - Map unwrapped(); - - @Override - ConfigObject withFallback(ConfigMergeable other); - - /** - * Gets a {@link ConfigValue} at the given key, or returns null if there is - * no value. The returned {@link ConfigValue} may have - * {@link ConfigValueType#NULL} or any other type, and the passed-in key - * must be a key in this object, rather than a path expression. - * - * @param key - * key to look up - * - * @return the value at the key or null if none - */ - @Override - ConfigValue get(Object key); - - /** - * Clone the object with only the given key (and its children) retained; all - * sibling keys are removed. - * - * @param key - * key to keep - * @return a copy of the object minus all keys except the one specified - */ - ConfigObject withOnlyKey(String key); - - /** - * Clone the object with the given key removed. - * - * @param key - * key to remove - * @return a copy of the object minus the specified key - */ - ConfigObject withoutKey(String key); -} diff --git a/akka-actor/src/main/java/com/typesafe/config/ConfigOrigin.java b/akka-actor/src/main/java/com/typesafe/config/ConfigOrigin.java deleted file mode 100755 index c34767fb2e..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/ConfigOrigin.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config; - -import java.net.URL; -import java.util.List; - - -/** - * Represents the origin (such as filename and line number) of a - * {@link ConfigValue} for use in error messages. Obtain the origin of a value - * with {@link ConfigValue#origin}. Exceptions may have an origin, see - * {@link ConfigException#origin}, but be careful because - * ConfigException.origin() may return null. - * - *

- * It's best to use this interface only for debugging; its accuracy is - * "best effort" rather than guaranteed, and a potentially-noticeable amount of - * memory could probably be saved if origins were not kept around, so in the - * future there might be some option to discard origins. - * - *

- * Do not implement this interface; it should only be implemented by - * the config library. Arbitrary implementations will not work because the - * library internals assume a specific concrete implementation. Also, this - * interface is likely to grow new methods over time, so third-party - * implementations will break. - */ -public interface ConfigOrigin { - /** - * Returns a string describing the origin of a value or exception. This will - * never return null. - * - * @return string describing the origin - */ - public String description(); - - /** - * Returns a filename describing the origin. This will return null if the - * origin was not a file. - * - * @return filename of the origin or null - */ - public String filename(); - - /** - * Returns a URL describing the origin. This will return null if the origin - * has no meaningful URL. - * - * @return url of the origin or null - */ - public URL url(); - - /** - * Returns a classpath resource name describing the origin. This will return - * null if the origin was not a classpath resource. - * - * @return resource name of the origin or null - */ - public String resource(); - - /** - * Returns a line number where the value or exception originated. This will - * return -1 if there's no meaningful line number. - * - * @return line number or -1 if none is available - */ - public int lineNumber(); - - /** - * Returns any comments that appeared to "go with" this place in the file. - * Often an empty list, but never null. The details of this are subject to - * change, but at the moment comments that are immediately before an array - * element or object field, with no blank line after the comment, "go with" - * that element or field. - * - * @return any comments that seemed to "go with" this origin, empty list if - * none - */ - public List comments(); -} diff --git a/akka-actor/src/main/java/com/typesafe/config/ConfigParseOptions.java b/akka-actor/src/main/java/com/typesafe/config/ConfigParseOptions.java deleted file mode 100755 index 2d057e812f..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/ConfigParseOptions.java +++ /dev/null @@ -1,150 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config; - - -/** - * A set of options related to parsing. - * - *

- * This object is immutable, so the "setters" return a new object. - * - *

- * Here is an example of creating a custom {@code ConfigParseOptions}: - * - *

- *     ConfigParseOptions options = ConfigParseOptions.defaults()
- *         .setSyntax(ConfigSyntax.JSON)
- *         .setAllowMissing(false)
- * 
- * - */ -public final class ConfigParseOptions { - final ConfigSyntax syntax; - final String originDescription; - final boolean allowMissing; - final ConfigIncluder includer; - - protected ConfigParseOptions(ConfigSyntax syntax, String originDescription, - boolean allowMissing, ConfigIncluder includer) { - this.syntax = syntax; - this.originDescription = originDescription; - this.allowMissing = allowMissing; - this.includer = includer; - } - - public static ConfigParseOptions defaults() { - return new ConfigParseOptions(null, null, true, null); - } - - /** - * Set the file format. If set to null, try to guess from any available - * filename extension; if guessing fails, assume {@link ConfigSyntax#CONF}. - * - * @param syntax - * a syntax or {@code null} for best guess - * @return options with the syntax set - */ - public ConfigParseOptions setSyntax(ConfigSyntax syntax) { - if (this.syntax == syntax) - return this; - else - return new ConfigParseOptions(syntax, this.originDescription, - this.allowMissing, this.includer); - } - - public ConfigSyntax getSyntax() { - return syntax; - } - - /** - * Set a description for the thing being parsed. In most cases this will be - * set up for you to something like the filename, but if you provide just an - * input stream you might want to improve on it. Set to null to allow the - * library to come up with something automatically. This description is the - * basis for the {@link ConfigOrigin} of the parsed values. - * - * @param originDescription - * @return options with the origin description set - */ - public ConfigParseOptions setOriginDescription(String originDescription) { - if (this.originDescription == originDescription) - return this; - else if (this.originDescription != null && originDescription != null - && this.originDescription.equals(originDescription)) - return this; - else - return new ConfigParseOptions(this.syntax, originDescription, - this.allowMissing, this.includer); - } - - public String getOriginDescription() { - return originDescription; - } - - /** this is package-private, not public API */ - ConfigParseOptions withFallbackOriginDescription(String originDescription) { - if (this.originDescription == null) - return setOriginDescription(originDescription); - else - return this; - } - - /** - * Set to false to throw an exception if the item being parsed (for example - * a file) is missing. Set to true to just return an empty document in that - * case. - * - * @param allowMissing - * @return options with the "allow missing" flag set - */ - public ConfigParseOptions setAllowMissing(boolean allowMissing) { - if (this.allowMissing == allowMissing) - return this; - else - return new ConfigParseOptions(this.syntax, this.originDescription, - allowMissing, this.includer); - } - - public boolean getAllowMissing() { - return allowMissing; - } - - /** - * Set a ConfigIncluder which customizes how includes are handled. - * - * @param includer - * @return new version of the parse options with different includer - */ - public ConfigParseOptions setIncluder(ConfigIncluder includer) { - if (this.includer == includer) - return this; - else - return new ConfigParseOptions(this.syntax, this.originDescription, - this.allowMissing, includer); - } - - public ConfigParseOptions prependIncluder(ConfigIncluder includer) { - if (this.includer == includer) - return this; - else if (this.includer != null) - return setIncluder(includer.withFallback(this.includer)); - else - return setIncluder(includer); - } - - public ConfigParseOptions appendIncluder(ConfigIncluder includer) { - if (this.includer == includer) - return this; - else if (this.includer != null) - return setIncluder(this.includer.withFallback(includer)); - else - return setIncluder(includer); - } - - public ConfigIncluder getIncluder() { - return includer; - } - -} diff --git a/akka-actor/src/main/java/com/typesafe/config/ConfigParseable.java b/akka-actor/src/main/java/com/typesafe/config/ConfigParseable.java deleted file mode 100755 index 8c19085513..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/ConfigParseable.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config; - - -/** - * An opaque handle to something that can be parsed, obtained from - * {@link ConfigIncludeContext}. - * - *

- * Do not implement this interface; it should only be implemented by - * the config library. Arbitrary implementations will not work because the - * library internals assume a specific concrete implementation. Also, this - * interface is likely to grow new methods over time, so third-party - * implementations will break. - */ -public interface ConfigParseable { - /** - * Parse whatever it is. The options should come from - * {@link ConfigParseable#options options()} but you could tweak them if you - * like. - * - * @param options - * parse options, should be based on the ones from - * {@link ConfigParseable#options options()} - */ - ConfigObject parse(ConfigParseOptions options); - - /** - * Returns a {@link ConfigOrigin} describing the origin of the parseable - * item. - */ - ConfigOrigin origin(); - - /** - * Get the initial options, which can be modified then passed to parse(). - * These options will have the right description, includer, and other - * parameters already set up. - */ - ConfigParseOptions options(); -} diff --git a/akka-actor/src/main/java/com/typesafe/config/ConfigResolveOptions.java b/akka-actor/src/main/java/com/typesafe/config/ConfigResolveOptions.java deleted file mode 100755 index d82a6be71a..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/ConfigResolveOptions.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config; - -/** - * A set of options related to resolving substitutions. Substitutions use the - * ${foo.bar} syntax and are documented in the HOCON - * spec. - *

- * This object is immutable, so the "setters" return a new object. - *

- * Here is an example of creating a custom {@code ConfigResolveOptions}: - * - *

- *     ConfigResolveOptions options = ConfigResolveOptions.defaults()
- *         .setUseSystemEnvironment(false)
- * 
- *

- * In addition to {@link ConfigResolveOptions#defaults}, there's a prebuilt - * {@link ConfigResolveOptions#noSystem} which avoids looking at any system - * environment variables or other external system information. (Right now, - * environment variables are the only example.) - */ -public final class ConfigResolveOptions { - private final boolean useSystemEnvironment; - - private ConfigResolveOptions(boolean useSystemEnvironment) { - this.useSystemEnvironment = useSystemEnvironment; - } - - /** - * Returns the default resolve options. - * - * @return the default resolve options - */ - public static ConfigResolveOptions defaults() { - return new ConfigResolveOptions(true); - } - - /** - * Returns resolve options that disable any reference to "system" data - * (currently, this means environment variables). - * - * @return the resolve options with env variables disabled - */ - public static ConfigResolveOptions noSystem() { - return defaults().setUseSystemEnvironment(false); - } - - /** - * Returns options with use of environment variables set to the given value. - * - * @param value - * true to resolve substitutions falling back to environment - * variables. - * @return options with requested setting for use of environment variables - */ - @SuppressWarnings("static-method") - public ConfigResolveOptions setUseSystemEnvironment(boolean value) { - return new ConfigResolveOptions(value); - } - - /** - * Returns whether the options enable use of system environment variables. - * This method is mostly used by the config lib internally, not by - * applications. - * - * @return true if environment variables should be used - */ - public boolean getUseSystemEnvironment() { - return useSystemEnvironment; - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/ConfigSyntax.java b/akka-actor/src/main/java/com/typesafe/config/ConfigSyntax.java deleted file mode 100755 index 54529fad0b..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/ConfigSyntax.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config; - -/** - * The syntax of a character stream, JSON, HOCON - * aka ".conf", or Java properties. - * - */ -public enum ConfigSyntax { - /** - * Pedantically strict JSON format; no - * comments, no unexpected commas, no duplicate keys in the same object. - */ - JSON, - /** - * The JSON-superset HOCON format. - */ - CONF, - /** - * Standard Java properties format. - */ - PROPERTIES; -} diff --git a/akka-actor/src/main/java/com/typesafe/config/ConfigUtil.java b/akka-actor/src/main/java/com/typesafe/config/ConfigUtil.java deleted file mode 100755 index cc936923fe..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/ConfigUtil.java +++ /dev/null @@ -1,74 +0,0 @@ -package com.typesafe.config; - -import java.util.List; - -import com.typesafe.config.impl.ConfigImplUtil; - -/** - * Contains static utility methods. - * - */ -public final class ConfigUtil { - private ConfigUtil() { - - } - - /** - * Quotes and escapes a string, as in the JSON specification. - * - * @param s - * a string - * @return the string quoted and escaped - */ - public static String quoteString(String s) { - return ConfigImplUtil.renderJsonString(s); - } - - /** - * Converts a list of keys to a path expression, by quoting the path - * elements as needed and then joining them separated by a period. A path - * expression is usable with a {@link Config}, while individual path - * elements are usable with a {@link ConfigObject}. - * - * @param elements - * the keys in the path - * @return a path expression - * @throws ConfigException - * if there are no elements - */ - public static String joinPath(String... elements) { - return ConfigImplUtil.joinPath(elements); - } - - /** - * Converts a list of strings to a path expression, by quoting the path - * elements as needed and then joining them separated by a period. A path - * expression is usable with a {@link Config}, while individual path - * elements are usable with a {@link ConfigObject}. - * - * @param elements - * the keys in the path - * @return a path expression - * @throws ConfigException - * if the list is empty - */ - public static String joinPath(List elements) { - return ConfigImplUtil.joinPath(elements); - } - - /** - * Converts a path expression into a list of keys, by splitting on period - * and unquoting the individual path elements. A path expression is usable - * with a {@link Config}, while individual path elements are usable with a - * {@link ConfigObject}. - * - * @param path - * a path expression - * @return the individual keys in the path - * @throws ConfigException - * if the path expression is invalid - */ - public static List splitPath(String path) { - return ConfigImplUtil.splitPath(path); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/ConfigValue.java b/akka-actor/src/main/java/com/typesafe/config/ConfigValue.java deleted file mode 100755 index 1f389be08f..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/ConfigValue.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config; - -/** - * An immutable value, following the JSON type - * schema. - * - *

- * Because this object is immutable, it is safe to use from multiple threads and - * there's no need for "defensive copies." - * - *

- * Do not implement {@code ConfigValue}; it should only be implemented - * by the config library. Arbitrary implementations will not work because the - * library internals assume a specific concrete implementation. Also, this - * interface is likely to grow new methods over time, so third-party - * implementations will break. - */ -public interface ConfigValue extends ConfigMergeable { - /** - * The origin of the value (file, line number, etc.), for debugging and - * error messages. - * - * @return where the value came from - */ - ConfigOrigin origin(); - - /** - * The {@link ConfigValueType} of the value; matches the JSON type schema. - * - * @return value's type - */ - ConfigValueType valueType(); - - /** - * Returns the value as a plain Java boxed value, that is, a {@code String}, - * {@code Number}, {@code Boolean}, {@code Map}, - * {@code List}, or {@code null}, matching the {@link #valueType()} - * of this {@code ConfigValue}. If the value is a {@link ConfigObject} or - * {@link ConfigList}, it is recursively unwrapped. - */ - Object unwrapped(); - - /** - * Renders the config value as a HOCON string. This method is primarily - * intended for debugging, so it tries to add helpful comments and - * whitespace. If the config value has not been resolved (see - * {@link Config#resolve}), it's possible that it can't be rendered as valid - * HOCON. In that case the rendering should still be useful for debugging - * but you might not be able to parse it. - * - * @return the rendered value - */ - String render(); - - @Override - ConfigValue withFallback(ConfigMergeable other); -} diff --git a/akka-actor/src/main/java/com/typesafe/config/ConfigValueFactory.java b/akka-actor/src/main/java/com/typesafe/config/ConfigValueFactory.java deleted file mode 100755 index babace186e..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/ConfigValueFactory.java +++ /dev/null @@ -1,141 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config; - -import java.util.Map; - -import com.typesafe.config.impl.ConfigImpl; - -/** - * This class holds some static factory methods for building {@link ConfigValue} - * instances. See also {@link ConfigFactory} which has methods for parsing files - * and certain in-memory data structures. - */ -public final class ConfigValueFactory { - private ConfigValueFactory() { - } - - /** - * Creates a ConfigValue from a plain Java boxed value, which may be a - * Boolean, Number, String, Map, Iterable, or null. A Map must be a Map from - * String to more values that can be supplied to fromAnyRef(). An Iterable - * must iterate over more values that can be supplied to fromAnyRef(). A Map - * will become a ConfigObject and an Iterable will become a ConfigList. If - * the Iterable is not an ordered collection, results could be strange, - * since ConfigList is ordered. - * - *

- * In a Map passed to fromAnyRef(), the map's keys are plain keys, not path - * expressions. So if your Map has a key "foo.bar" then you will get one - * object with a key called "foo.bar", rather than an object with a key - * "foo" containing another object with a key "bar". - * - *

- * The originDescription will be used to set the origin() field on the - * ConfigValue. It should normally be the name of the file the values came - * from, or something short describing the value such as "default settings". - * The originDescription is prefixed to error messages so users can tell - * where problematic values are coming from. - * - *

- * Supplying the result of ConfigValue.unwrapped() to this function is - * guaranteed to work and should give you back a ConfigValue that matches - * the one you unwrapped. The re-wrapped ConfigValue will lose some - * information that was present in the original such as its origin, but it - * will have matching values. - * - *

- * This function throws if you supply a value that cannot be converted to a - * ConfigValue, but supplying such a value is a bug in your program, so you - * should never handle the exception. Just fix your program (or report a bug - * against this library). - * - * @param object - * object to convert to ConfigValue - * @param originDescription - * name of origin file or brief description of what the value is - * @return a new value - */ - public static ConfigValue fromAnyRef(Object object, String originDescription) { - return ConfigImpl.fromAnyRef(object, originDescription); - } - - /** - * See the fromAnyRef() documentation for details. This is a typesafe - * wrapper that only works on {@link java.util.Map} and returns - * {@link ConfigObject} rather than {@link ConfigValue}. - * - *

- * If your Map has a key "foo.bar" then you will get one object with a key - * called "foo.bar", rather than an object with a key "foo" containing - * another object with a key "bar". The keys in the map are keys; not path - * expressions. That is, the Map corresponds exactly to a single - * {@code ConfigObject}. The keys will not be parsed or modified, and the - * values are wrapped in ConfigValue. To get nested {@code ConfigObject}, - * some of the values in the map would have to be more maps. - * - *

- * See also {@link ConfigFactory#parseMap(Map,String)} which interprets the - * keys in the map as path expressions. - * - * @param values - * @param originDescription - * @return a new {@link ConfigObject} value - */ - public static ConfigObject fromMap(Map values, - String originDescription) { - return (ConfigObject) fromAnyRef(values, originDescription); - } - - /** - * See the fromAnyRef() documentation for details. This is a typesafe - * wrapper that only works on {@link java.util.Iterable} and returns - * {@link ConfigList} rather than {@link ConfigValue}. - * - * @param values - * @param originDescription - * @return a new {@link ConfigList} value - */ - public static ConfigList fromIterable(Iterable values, - String originDescription) { - return (ConfigList) fromAnyRef(values, originDescription); - } - - /** - * See the other overload {@link #fromAnyRef(Object,String)} for details, - * this one just uses a default origin description. - * - * @param object - * @return a new {@link ConfigValue} - */ - public static ConfigValue fromAnyRef(Object object) { - return fromAnyRef(object, null); - } - - /** - * See the other overload {@link #fromMap(Map,String)} for details, this one - * just uses a default origin description. - * - *

- * See also {@link ConfigFactory#parseMap(Map)} which interprets the keys in - * the map as path expressions. - * - * @param values - * @return a new {@link ConfigObject} - */ - public static ConfigObject fromMap(Map values) { - return fromMap(values, null); - } - - /** - * See the other overload of {@link #fromIterable(Iterable, String)} for - * details, this one just uses a default origin description. - * - * @param values - * @return a new {@link ConfigList} - */ - public static ConfigList fromIterable(Iterable values) { - return fromIterable(values, null); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/ConfigValueType.java b/akka-actor/src/main/java/com/typesafe/config/ConfigValueType.java deleted file mode 100755 index a15774d3ce..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/ConfigValueType.java +++ /dev/null @@ -1,12 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config; - -/** - * The type of a configuration value (following the JSON type schema). - */ -public enum ConfigValueType { - OBJECT, LIST, NUMBER, BOOLEAN, NULL, STRING -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/AbstractConfigObject.java b/akka-actor/src/main/java/com/typesafe/config/impl/AbstractConfigObject.java deleted file mode 100755 index c5031fe568..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/AbstractConfigObject.java +++ /dev/null @@ -1,427 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigMergeable; -import com.typesafe.config.ConfigObject; -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigResolveOptions; -import com.typesafe.config.ConfigValue; -import com.typesafe.config.ConfigValueType; - -abstract class AbstractConfigObject extends AbstractConfigValue implements - ConfigObject { - - private static final long serialVersionUID = 1L; - - final private SimpleConfig config; - - protected AbstractConfigObject(ConfigOrigin origin) { - super(origin); - this.config = new SimpleConfig(this); - } - - @Override - public SimpleConfig toConfig() { - return config; - } - - @Override - public AbstractConfigObject toFallbackValue() { - return this; - } - - @Override - abstract public AbstractConfigObject withOnlyKey(String key); - - @Override - abstract public AbstractConfigObject withoutKey(String key); - - abstract protected AbstractConfigObject withOnlyPathOrNull(Path path); - - abstract AbstractConfigObject withOnlyPath(Path path); - - abstract AbstractConfigObject withoutPath(Path path); - - /** - * This looks up the key with no transformation or type conversion of any - * kind, and returns null if the key is not present. - * - * @param key - * @return the unmodified raw value or null - */ - protected abstract AbstractConfigValue peek(String key); - - protected AbstractConfigValue peek(String key, - SubstitutionResolver resolver, int depth, - ConfigResolveOptions options) { - AbstractConfigValue v = peek(key); - - if (v != null && resolver != null) { - v = resolver.resolve(v, depth, options); - } - - return v; - } - - /** - * Looks up the path with no transformation, type conversion, or exceptions - * (just returns null if path not found). Does however resolve the path, if - * resolver != null. - */ - protected AbstractConfigValue peekPath(Path path, SubstitutionResolver resolver, - int depth, ConfigResolveOptions options) { - return peekPath(this, path, resolver, depth, options); - } - - AbstractConfigValue peekPath(Path path) { - return peekPath(this, path, null, 0, null); - } - - private static AbstractConfigValue peekPath(AbstractConfigObject self, Path path, - SubstitutionResolver resolver, int depth, - ConfigResolveOptions options) { - String key = path.first(); - Path next = path.remainder(); - - if (next == null) { - AbstractConfigValue v = self.peek(key, resolver, depth, options); - return v; - } else { - // it's important to ONLY resolve substitutions here, not - // all values, because if you resolve arrays or objects - // it creates unnecessary cycles as a side effect (any sibling - // of the object we want to follow could cause a cycle, not just - // the object we want to follow). - - ConfigValue v = self.peek(key); - - if (v instanceof ConfigSubstitution && resolver != null) { - v = resolver.resolve((AbstractConfigValue) v, depth, options); - } - - if (v instanceof AbstractConfigObject) { - return peekPath((AbstractConfigObject) v, next, resolver, - depth, options); - } else { - return null; - } - } - } - - @Override - public ConfigValueType valueType() { - return ConfigValueType.OBJECT; - } - - protected abstract AbstractConfigObject newCopy(ResolveStatus status, boolean ignoresFallbacks, - ConfigOrigin origin); - - @Override - protected AbstractConfigObject newCopy(boolean ignoresFallbacks, ConfigOrigin origin) { - return newCopy(resolveStatus(), ignoresFallbacks, origin); - } - - @Override - protected final AbstractConfigObject mergedWithTheUnmergeable(Unmergeable fallback) { - if (ignoresFallbacks()) - throw new ConfigException.BugOrBroken("should not be reached"); - - List stack = new ArrayList(); - if (this instanceof Unmergeable) { - stack.addAll(((Unmergeable) this).unmergedValues()); - } else { - stack.add(this); - } - stack.addAll(fallback.unmergedValues()); - return new ConfigDelayedMergeObject(mergeOrigins(stack), stack, - ((AbstractConfigValue) fallback).ignoresFallbacks()); - } - - @Override - protected AbstractConfigObject mergedWithObject(AbstractConfigObject fallback) { - if (ignoresFallbacks()) - throw new ConfigException.BugOrBroken("should not be reached"); - - boolean changed = false; - boolean allResolved = true; - Map merged = new HashMap(); - Set allKeys = new HashSet(); - allKeys.addAll(this.keySet()); - allKeys.addAll(fallback.keySet()); - for (String key : allKeys) { - AbstractConfigValue first = this.peek(key); - AbstractConfigValue second = fallback.peek(key); - AbstractConfigValue kept; - if (first == null) - kept = second; - else if (second == null) - kept = first; - else - kept = first.withFallback(second); - - merged.put(key, kept); - - if (first != kept) - changed = true; - - if (kept.resolveStatus() == ResolveStatus.UNRESOLVED) - allResolved = false; - } - - ResolveStatus newResolveStatus = ResolveStatus.fromBoolean(allResolved); - boolean newIgnoresFallbacks = fallback.ignoresFallbacks(); - - if (changed) - return new SimpleConfigObject(mergeOrigins(this, fallback), merged, newResolveStatus, - newIgnoresFallbacks); - else if (newResolveStatus != resolveStatus() || newIgnoresFallbacks != ignoresFallbacks()) - return newCopy(newResolveStatus, newIgnoresFallbacks, origin()); - else - return this; - } - - @Override - public AbstractConfigObject withFallback(ConfigMergeable mergeable) { - return (AbstractConfigObject) super.withFallback(mergeable); - } - - static ConfigOrigin mergeOrigins( - Collection stack) { - if (stack.isEmpty()) - throw new ConfigException.BugOrBroken( - "can't merge origins on empty list"); - List origins = new ArrayList(); - ConfigOrigin firstOrigin = null; - int numMerged = 0; - for (AbstractConfigValue v : stack) { - if (firstOrigin == null) - firstOrigin = v.origin(); - - if (v instanceof AbstractConfigObject - && ((AbstractConfigObject) v).resolveStatus() == ResolveStatus.RESOLVED - && ((ConfigObject) v).isEmpty()) { - // don't include empty files or the .empty() - // config in the description, since they are - // likely to be "implementation details" - } else { - origins.add(v.origin()); - numMerged += 1; - } - } - - if (numMerged == 0) { - // the configs were all empty, so just use the first one - origins.add(firstOrigin); - } - - return SimpleConfigOrigin.mergeOrigins(origins); - } - - static ConfigOrigin mergeOrigins(AbstractConfigObject... stack) { - return mergeOrigins(Arrays.asList(stack)); - } - - private AbstractConfigObject modify(Modifier modifier, - ResolveStatus newResolveStatus) { - Map changes = null; - for (String k : keySet()) { - AbstractConfigValue v = peek(k); - // "modified" may be null, which means remove the child; - // to do that we put null in the "changes" map. - AbstractConfigValue modified = modifier.modifyChild(v); - if (modified != v) { - if (changes == null) - changes = new HashMap(); - changes.put(k, modified); - } - } - if (changes == null) { - return newCopy(newResolveStatus, ignoresFallbacks(), origin()); - } else { - Map modified = new HashMap(); - for (String k : keySet()) { - if (changes.containsKey(k)) { - AbstractConfigValue newValue = changes.get(k); - if (newValue != null) { - modified.put(k, newValue); - } else { - // remove this child; don't put it in the new map. - } - } else { - modified.put(k, peek(k)); - } - } - return new SimpleConfigObject(origin(), modified, newResolveStatus, - ignoresFallbacks()); - } - } - - @Override - AbstractConfigObject resolveSubstitutions(final SubstitutionResolver resolver, - final int depth, - final ConfigResolveOptions options) { - if (resolveStatus() == ResolveStatus.RESOLVED) - return this; - - return modify(new Modifier() { - - @Override - public AbstractConfigValue modifyChild(AbstractConfigValue v) { - return resolver.resolve(v, depth, options); - } - - }, ResolveStatus.RESOLVED); - } - - @Override - AbstractConfigObject relativized(final Path prefix) { - return modify(new Modifier() { - - @Override - public AbstractConfigValue modifyChild(AbstractConfigValue v) { - return v.relativized(prefix); - } - - }, resolveStatus()); - } - - @Override - public AbstractConfigValue get(Object key) { - if (key instanceof String) - return peek((String) key); - else - return null; - } - - @Override - protected void render(StringBuilder sb, int indent, boolean formatted) { - if (isEmpty()) { - sb.append("{}"); - } else { - sb.append("{"); - if (formatted) - sb.append('\n'); - for (String k : keySet()) { - AbstractConfigValue v = peek(k); - if (formatted) { - indent(sb, indent + 1); - sb.append("# "); - sb.append(v.origin().description()); - sb.append("\n"); - for (String comment : v.origin().comments()) { - indent(sb, indent + 1); - sb.append("# "); - sb.append(comment); - sb.append("\n"); - } - indent(sb, indent + 1); - } - v.render(sb, indent + 1, k, formatted); - sb.append(","); - if (formatted) - sb.append('\n'); - } - // chop comma or newline - sb.setLength(sb.length() - 1); - if (formatted) { - sb.setLength(sb.length() - 1); // also chop comma - sb.append("\n"); // put a newline back - indent(sb, indent); - } - sb.append("}"); - } - } - - private static boolean mapEquals(Map a, - Map b) { - Set aKeys = a.keySet(); - Set bKeys = b.keySet(); - - if (!aKeys.equals(bKeys)) - return false; - - for (String key : aKeys) { - if (!a.get(key).equals(b.get(key))) - return false; - } - return true; - } - - private static int mapHash(Map m) { - // the keys have to be sorted, otherwise we could be equal - // to another map but have a different hashcode. - List keys = new ArrayList(); - keys.addAll(m.keySet()); - Collections.sort(keys); - - int valuesHash = 0; - for (String k : keys) { - valuesHash += m.get(k).hashCode(); - } - return 41 * (41 + keys.hashCode()) + valuesHash; - } - - @Override - protected boolean canEqual(Object other) { - return other instanceof ConfigObject; - } - - @Override - public boolean equals(Object other) { - // note that "origin" is deliberately NOT part of equality. - // neither are other "extras" like ignoresFallbacks or resolve status. - if (other instanceof ConfigObject) { - // optimization to avoid unwrapped() for two ConfigObject, - // which is what AbstractConfigValue does. - return canEqual(other) && mapEquals(this, ((ConfigObject) other)); - } else { - return false; - } - } - - @Override - public int hashCode() { - // note that "origin" is deliberately NOT part of equality - // neither are other "extras" like ignoresFallbacks or resolve status. - return mapHash(this); - } - - private static UnsupportedOperationException weAreImmutable(String method) { - return new UnsupportedOperationException( - "ConfigObject is immutable, you can't call Map.'" + method - + "'"); - } - - @Override - public void clear() { - throw weAreImmutable("clear"); - } - - @Override - public ConfigValue put(String arg0, ConfigValue arg1) { - throw weAreImmutable("put"); - } - - @Override - public void putAll(Map arg0) { - throw weAreImmutable("putAll"); - } - - @Override - public ConfigValue remove(Object arg0) { - throw weAreImmutable("remove"); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/AbstractConfigValue.java b/akka-actor/src/main/java/com/typesafe/config/impl/AbstractConfigValue.java deleted file mode 100755 index 7eea5f0a8a..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/AbstractConfigValue.java +++ /dev/null @@ -1,221 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.io.Serializable; - -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigMergeable; -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigResolveOptions; -import com.typesafe.config.ConfigValue; - -/** - * - * Trying very hard to avoid a parent reference in config values; when you have - * a tree like this, the availability of parent() tends to result in a lot of - * improperly-factored and non-modular code. Please don't add parent(). - * - */ -abstract class AbstractConfigValue implements ConfigValue, MergeableValue, Serializable { - - private static final long serialVersionUID = 1L; - - final private SimpleConfigOrigin origin; - - AbstractConfigValue(ConfigOrigin origin) { - this.origin = (SimpleConfigOrigin) origin; - } - - @Override - public SimpleConfigOrigin origin() { - return this.origin; - } - - /** - * Called only by SubstitutionResolver object. - * - * @param resolver - * the resolver doing the resolving - * @param depth - * the number of substitutions followed in resolving the current - * one - * @param options - * whether to look at system props and env vars - * @return a new value if there were changes, or this if no changes - */ - AbstractConfigValue resolveSubstitutions(SubstitutionResolver resolver, - int depth, - ConfigResolveOptions options) { - return this; - } - - ResolveStatus resolveStatus() { - return ResolveStatus.RESOLVED; - } - - /** - * This is used when including one file in another; the included file is - * relativized to the path it's included into in the parent file. The point - * is that if you include a file at foo.bar in the parent, and the included - * file as a substitution ${a.b.c}, the included substitution now needs to - * be ${foo.bar.a.b.c} because we resolve substitutions globally only after - * parsing everything. - * - * @param prefix - * @return value relativized to the given path or the same value if nothing - * to do - */ - AbstractConfigValue relativized(Path prefix) { - return this; - } - - protected interface Modifier { - AbstractConfigValue modifyChild(AbstractConfigValue v); - } - - @Override - public AbstractConfigValue toFallbackValue() { - return this; - } - - protected abstract AbstractConfigValue newCopy(boolean ignoresFallbacks, ConfigOrigin origin); - - // this is virtualized rather than a field because only some subclasses - // really need to store the boolean, and they may be able to pack it - // with another boolean to save space. - protected boolean ignoresFallbacks() { - return true; - } - - private ConfigException badMergeException() { - if (ignoresFallbacks()) - throw new ConfigException.BugOrBroken( - "method should not have been called with ignoresFallbacks=true" - + getClass().getSimpleName()); - else - throw new ConfigException.BugOrBroken("should override this in " - + getClass().getSimpleName()); - } - - protected AbstractConfigValue mergedWithTheUnmergeable(Unmergeable fallback) { - throw badMergeException(); - } - - protected AbstractConfigValue mergedWithObject(AbstractConfigObject fallback) { - throw badMergeException(); - } - - protected AbstractConfigValue mergedWithNonObject(AbstractConfigValue fallback) { - // falling back to a non-object doesn't merge anything, and also - // prohibits merging any objects that we fall back to later. - // so we have to switch to ignoresFallbacks mode. - return newCopy(true /* ignoresFallbacks */, origin); - } - - public AbstractConfigValue withOrigin(ConfigOrigin origin) { - if (this.origin == origin) - return this; - else - return newCopy(ignoresFallbacks(), origin); - } - - @Override - public AbstractConfigValue withFallback(ConfigMergeable mergeable) { - if (ignoresFallbacks()) { - return this; - } else { - ConfigValue other = ((MergeableValue) mergeable).toFallbackValue(); - - if (other instanceof Unmergeable) { - return mergedWithTheUnmergeable((Unmergeable) other); - } else if (other instanceof AbstractConfigObject) { - AbstractConfigObject fallback = (AbstractConfigObject) other; - if (fallback.resolveStatus() == ResolveStatus.RESOLVED && fallback.isEmpty()) { - if (fallback.ignoresFallbacks()) - return newCopy(true /* ignoresFallbacks */, origin); - else - return this; - } else { - return mergedWithObject((AbstractConfigObject) other); - } - } else { - return mergedWithNonObject((AbstractConfigValue) other); - } - } - } - - protected boolean canEqual(Object other) { - return other instanceof ConfigValue; - } - - @Override - public boolean equals(Object other) { - // note that "origin" is deliberately NOT part of equality - if (other instanceof ConfigValue) { - return canEqual(other) - && (this.valueType() == - ((ConfigValue) other).valueType()) - && ConfigImplUtil.equalsHandlingNull(this.unwrapped(), - ((ConfigValue) other).unwrapped()); - } else { - return false; - } - } - - @Override - public int hashCode() { - // note that "origin" is deliberately NOT part of equality - Object o = this.unwrapped(); - if (o == null) - return 0; - else - return o.hashCode(); - } - - @Override - public final String toString() { - StringBuilder sb = new StringBuilder(); - render(sb, 0, null /* atKey */, false /* formatted */); - return getClass().getSimpleName() + "(" + sb.toString() + ")"; - } - - protected static void indent(StringBuilder sb, int indent) { - int remaining = indent; - while (remaining > 0) { - sb.append(" "); - --remaining; - } - } - - protected void render(StringBuilder sb, int indent, String atKey, boolean formatted) { - if (atKey != null) { - sb.append(ConfigImplUtil.renderJsonString(atKey)); - sb.append(" : "); - } - render(sb, indent, formatted); - } - - protected void render(StringBuilder sb, int indent, boolean formatted) { - Object u = unwrapped(); - sb.append(u.toString()); - } - - - @Override - public final String render() { - StringBuilder sb = new StringBuilder(); - render(sb, 0, null, true /* formatted */); - return sb.toString(); - } - - // toString() is a debugging-oriented string but this is defined - // to create a string that would parse back to the value in JSON. - // It only works for primitive values (that would be a single token) - // which are auto-converted to strings when concatenating with - // other strings or by the DefaultTransformer. - String transformToString() { - return null; - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigBoolean.java b/akka-actor/src/main/java/com/typesafe/config/impl/ConfigBoolean.java deleted file mode 100755 index 5a5c0f8ffd..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigBoolean.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigValueType; - -final class ConfigBoolean extends AbstractConfigValue { - - private static final long serialVersionUID = 1L; - - final private boolean value; - - ConfigBoolean(ConfigOrigin origin, boolean value) { - super(origin); - this.value = value; - } - - @Override - public ConfigValueType valueType() { - return ConfigValueType.BOOLEAN; - } - - @Override - public Boolean unwrapped() { - return value; - } - - @Override - String transformToString() { - return value ? "true" : "false"; - } - - @Override - protected ConfigBoolean newCopy(boolean ignoresFallbacks, ConfigOrigin origin) { - return new ConfigBoolean(origin, value); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigDelayedMerge.java b/akka-actor/src/main/java/com/typesafe/config/impl/ConfigDelayedMerge.java deleted file mode 100755 index 49eae37c97..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigDelayedMerge.java +++ /dev/null @@ -1,244 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.io.ObjectStreamException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; - -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigResolveOptions; -import com.typesafe.config.ConfigValueType; - -/** - * The issue here is that we want to first merge our stack of config files, and - * then we want to evaluate substitutions. But if two substitutions both expand - * to an object, we might need to merge those two objects. Thus, we can't ever - * "override" a substitution when we do a merge; instead we have to save the - * stack of values that should be merged, and resolve the merge when we evaluate - * substitutions. - */ -final class ConfigDelayedMerge extends AbstractConfigValue implements - Unmergeable { - - private static final long serialVersionUID = 1L; - - // earlier items in the stack win - final private List stack; - final private boolean ignoresFallbacks; - - ConfigDelayedMerge(ConfigOrigin origin, List stack, - boolean ignoresFallbacks) { - super(origin); - this.stack = stack; - this.ignoresFallbacks = ignoresFallbacks; - if (stack.isEmpty()) - throw new ConfigException.BugOrBroken( - "creating empty delayed merge value"); - - for (AbstractConfigValue v : stack) { - if (v instanceof ConfigDelayedMerge || v instanceof ConfigDelayedMergeObject) - throw new ConfigException.BugOrBroken( - "placed nested DelayedMerge in a ConfigDelayedMerge, should have consolidated stack"); - } - } - - ConfigDelayedMerge(ConfigOrigin origin, List stack) { - this(origin, stack, false /* ignoresFallbacks */); - } - - @Override - public ConfigValueType valueType() { - throw new ConfigException.NotResolved( - "called valueType() on value with unresolved substitutions, need to resolve first"); - } - - @Override - public Object unwrapped() { - throw new ConfigException.NotResolved( - "called unwrapped() on value with unresolved substitutions, need to resolve first"); - } - - @Override - AbstractConfigValue resolveSubstitutions(SubstitutionResolver resolver, - int depth, ConfigResolveOptions options) { - return resolveSubstitutions(stack, resolver, depth, options); - } - - // static method also used by ConfigDelayedMergeObject - static AbstractConfigValue resolveSubstitutions( - List stack, SubstitutionResolver resolver, - int depth, ConfigResolveOptions options) { - // to resolve substitutions, we need to recursively resolve - // the stack of stuff to merge, and merge the stack so - // we won't be a delayed merge anymore. - - AbstractConfigValue merged = null; - for (AbstractConfigValue v : stack) { - AbstractConfigValue resolved = resolver.resolve(v, depth, options); - if (resolved != null) { - if (merged == null) - merged = resolved; - else - merged = merged.withFallback(resolved); - } - } - - return merged; - } - - @Override - ResolveStatus resolveStatus() { - return ResolveStatus.UNRESOLVED; - } - - @Override - ConfigDelayedMerge relativized(Path prefix) { - List newStack = new ArrayList(); - for (AbstractConfigValue o : stack) { - newStack.add(o.relativized(prefix)); - } - return new ConfigDelayedMerge(origin(), newStack, ignoresFallbacks); - } - - @Override - protected boolean ignoresFallbacks() { - return ignoresFallbacks; - } - - @Override - protected AbstractConfigValue newCopy(boolean newIgnoresFallbacks, ConfigOrigin newOrigin) { - return new ConfigDelayedMerge(newOrigin, stack, newIgnoresFallbacks); - } - - @Override - protected final ConfigDelayedMerge mergedWithTheUnmergeable(Unmergeable fallback) { - if (ignoresFallbacks) - throw new ConfigException.BugOrBroken("should not be reached"); - - // if we turn out to be an object, and the fallback also does, - // then a merge may be required; delay until we resolve. - List newStack = new ArrayList(); - newStack.addAll(stack); - newStack.addAll(fallback.unmergedValues()); - return new ConfigDelayedMerge(AbstractConfigObject.mergeOrigins(newStack), newStack, - ((AbstractConfigValue) fallback).ignoresFallbacks()); - } - - @Override - protected final ConfigDelayedMerge mergedWithObject(AbstractConfigObject fallback) { - if (ignoresFallbacks) - throw new ConfigException.BugOrBroken("should not be reached"); - - // if we turn out to be an object, and the fallback also does, - // then a merge may be required; delay until we resolve. - List newStack = new ArrayList(); - newStack.addAll(stack); - newStack.add(fallback); - return new ConfigDelayedMerge(AbstractConfigObject.mergeOrigins(newStack), newStack, - fallback.ignoresFallbacks()); - } - - @Override - public Collection unmergedValues() { - return stack; - } - - @Override - protected boolean canEqual(Object other) { - return other instanceof ConfigDelayedMerge; - } - - @Override - public boolean equals(Object other) { - // note that "origin" is deliberately NOT part of equality - if (other instanceof ConfigDelayedMerge) { - return canEqual(other) - && this.stack.equals(((ConfigDelayedMerge) other).stack); - } else { - return false; - } - } - - @Override - public int hashCode() { - // note that "origin" is deliberately NOT part of equality - return stack.hashCode(); - } - - @Override - protected void render(StringBuilder sb, int indent, String atKey, boolean formatted) { - render(stack, sb, indent, atKey, formatted); - } - - // static method also used by ConfigDelayedMergeObject. - static void render(List stack, StringBuilder sb, int indent, String atKey, - boolean formatted) { - if (formatted) { - sb.append("# unresolved merge of " + stack.size() + " values follows (\n"); - if (atKey == null) { - indent(sb, indent); - sb.append("# this unresolved merge will not be parseable because it's at the root of the object\n"); - sb.append("# the HOCON format has no way to list multiple root objects in a single file\n"); - } - } - - List reversed = new ArrayList(); - reversed.addAll(stack); - Collections.reverse(reversed); - - int i = 0; - for (AbstractConfigValue v : reversed) { - if (formatted) { - indent(sb, indent); - if (atKey != null) { - sb.append("# unmerged value " + i + " for key " - + ConfigImplUtil.renderJsonString(atKey) + " from "); - } else { - sb.append("# unmerged value " + i + " from "); - } - i += 1; - sb.append(v.origin().description()); - sb.append("\n"); - for (String comment : v.origin().comments()) { - indent(sb, indent); - sb.append("# "); - sb.append(comment); - sb.append("\n"); - } - indent(sb, indent); - } - - if (atKey != null) { - sb.append(ConfigImplUtil.renderJsonString(atKey)); - sb.append(" : "); - } - v.render(sb, indent, formatted); - sb.append(","); - if (formatted) - sb.append('\n'); - } - // chop comma or newline - sb.setLength(sb.length() - 1); - if (formatted) { - sb.setLength(sb.length() - 1); // also chop comma - sb.append("\n"); // put a newline back - indent(sb, indent); - sb.append("# ) end of unresolved merge\n"); - } - } - - // This ridiculous hack is because some JDK versions apparently can't - // serialize an array, which is used to implement ArrayList and EmptyList. - // maybe - // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6446627 - private Object writeReplace() throws ObjectStreamException { - // switch to LinkedList - return new ConfigDelayedMerge(origin(), - new java.util.LinkedList(stack), ignoresFallbacks); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigDelayedMergeObject.java b/akka-actor/src/main/java/com/typesafe/config/impl/ConfigDelayedMergeObject.java deleted file mode 100755 index fa873d9df9..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigDelayedMergeObject.java +++ /dev/null @@ -1,233 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.io.ObjectStreamException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigMergeable; -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigResolveOptions; -import com.typesafe.config.ConfigValue; - -// This is just like ConfigDelayedMerge except we know statically -// that it will turn out to be an object. -final class ConfigDelayedMergeObject extends AbstractConfigObject implements - Unmergeable { - - private static final long serialVersionUID = 1L; - - final private List stack; - final private boolean ignoresFallbacks; - - ConfigDelayedMergeObject(ConfigOrigin origin, - List stack) { - this(origin, stack, false /* ignoresFallbacks */); - } - - ConfigDelayedMergeObject(ConfigOrigin origin, List stack, - boolean ignoresFallbacks) { - super(origin); - this.stack = stack; - this.ignoresFallbacks = ignoresFallbacks; - - if (stack.isEmpty()) - throw new ConfigException.BugOrBroken( - "creating empty delayed merge object"); - if (!(stack.get(0) instanceof AbstractConfigObject)) - throw new ConfigException.BugOrBroken( - "created a delayed merge object not guaranteed to be an object"); - - for (AbstractConfigValue v : stack) { - if (v instanceof ConfigDelayedMerge || v instanceof ConfigDelayedMergeObject) - throw new ConfigException.BugOrBroken( - "placed nested DelayedMerge in a ConfigDelayedMergeObject, should have consolidated stack"); - } - } - - @Override - protected ConfigDelayedMergeObject newCopy(ResolveStatus status, boolean ignoresFallbacks, - ConfigOrigin origin) { - if (status != resolveStatus()) - throw new ConfigException.BugOrBroken( - "attempt to create resolved ConfigDelayedMergeObject"); - return new ConfigDelayedMergeObject(origin, stack, ignoresFallbacks); - } - - @Override - AbstractConfigObject resolveSubstitutions(SubstitutionResolver resolver, - int depth, ConfigResolveOptions options) { - AbstractConfigValue merged = ConfigDelayedMerge.resolveSubstitutions( - stack, resolver, depth, - options); - if (merged instanceof AbstractConfigObject) { - return (AbstractConfigObject) merged; - } else { - throw new ConfigException.BugOrBroken( - "somehow brokenly merged an object and didn't get an object"); - } - } - - @Override - ResolveStatus resolveStatus() { - return ResolveStatus.UNRESOLVED; - } - - @Override - ConfigDelayedMergeObject relativized(Path prefix) { - List newStack = new ArrayList(); - for (AbstractConfigValue o : stack) { - newStack.add(o.relativized(prefix)); - } - return new ConfigDelayedMergeObject(origin(), newStack, - ignoresFallbacks); - } - - @Override - protected boolean ignoresFallbacks() { - return ignoresFallbacks; - } - - @Override - protected ConfigDelayedMergeObject mergedWithObject(AbstractConfigObject fallback) { - if (ignoresFallbacks) - throw new ConfigException.BugOrBroken("should not be reached"); - - // since we are an object, and the fallback is, we'll need to - // merge the fallback once we resolve. - List newStack = new ArrayList(); - newStack.addAll(stack); - newStack.add(fallback); - return new ConfigDelayedMergeObject(AbstractConfigObject.mergeOrigins(newStack), newStack, - fallback.ignoresFallbacks()); - } - - @Override - public ConfigDelayedMergeObject withFallback(ConfigMergeable mergeable) { - return (ConfigDelayedMergeObject) super.withFallback(mergeable); - } - - @Override - public ConfigDelayedMergeObject withOnlyKey(String key) { - throw notResolved(); - } - - @Override - public ConfigDelayedMergeObject withoutKey(String key) { - throw notResolved(); - } - - @Override - protected AbstractConfigObject withOnlyPathOrNull(Path path) { - throw notResolved(); - } - - @Override - AbstractConfigObject withOnlyPath(Path path) { - throw notResolved(); - } - - @Override - AbstractConfigObject withoutPath(Path path) { - throw notResolved(); - } - - @Override - public Collection unmergedValues() { - return stack; - } - - @Override - protected boolean canEqual(Object other) { - return other instanceof ConfigDelayedMergeObject; - } - - @Override - public boolean equals(Object other) { - // note that "origin" is deliberately NOT part of equality - if (other instanceof ConfigDelayedMergeObject) { - return canEqual(other) - && this.stack - .equals(((ConfigDelayedMergeObject) other).stack); - } else { - return false; - } - } - - @Override - public int hashCode() { - // note that "origin" is deliberately NOT part of equality - return stack.hashCode(); - } - - @Override - protected void render(StringBuilder sb, int indent, String atKey, boolean formatted) { - ConfigDelayedMerge.render(stack, sb, indent, atKey, formatted); - } - - private static ConfigException notResolved() { - return new ConfigException.NotResolved( - "bug: this object has not had substitutions resolved, so can't be used"); - } - - @Override - public Map unwrapped() { - throw notResolved(); - } - - @Override - public boolean containsKey(Object key) { - throw notResolved(); - } - - @Override - public boolean containsValue(Object value) { - throw notResolved(); - } - - @Override - public Set> entrySet() { - throw notResolved(); - } - - @Override - public boolean isEmpty() { - throw notResolved(); - } - - @Override - public Set keySet() { - throw notResolved(); - } - - @Override - public int size() { - throw notResolved(); - } - - @Override - public Collection values() { - throw notResolved(); - } - - @Override - protected AbstractConfigValue peek(String key) { - throw notResolved(); - } - - // This ridiculous hack is because some JDK versions apparently can't - // serialize an array, which is used to implement ArrayList and EmptyList. - // maybe - // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6446627 - private Object writeReplace() throws ObjectStreamException { - // switch to LinkedList - return new ConfigDelayedMergeObject(origin(), - new java.util.LinkedList(stack), ignoresFallbacks); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigDouble.java b/akka-actor/src/main/java/com/typesafe/config/impl/ConfigDouble.java deleted file mode 100755 index 8b99db9640..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigDouble.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigValueType; - -final class ConfigDouble extends ConfigNumber { - - private static final long serialVersionUID = 1L; - - final private double value; - - ConfigDouble(ConfigOrigin origin, double value, String originalText) { - super(origin, originalText); - this.value = value; - } - - @Override - public ConfigValueType valueType() { - return ConfigValueType.NUMBER; - } - - @Override - public Double unwrapped() { - return value; - } - - @Override - String transformToString() { - String s = super.transformToString(); - if (s == null) - return Double.toString(value); - else - return s; - } - - @Override - protected long longValue() { - return (long) value; - } - - @Override - protected double doubleValue() { - return value; - } - - @Override - protected ConfigDouble newCopy(boolean ignoresFallbacks, ConfigOrigin origin) { - return new ConfigDouble(origin, value, originalText); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigImpl.java b/akka-actor/src/main/java/com/typesafe/config/impl/ConfigImpl.java deleted file mode 100755 index fd41dda6a4..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigImpl.java +++ /dev/null @@ -1,419 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.io.File; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; - -import com.typesafe.config.Config; -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigIncludeContext; -import com.typesafe.config.ConfigIncluder; -import com.typesafe.config.ConfigObject; -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigParseOptions; -import com.typesafe.config.ConfigParseable; -import com.typesafe.config.ConfigSyntax; -import com.typesafe.config.ConfigValue; - -/** This is public but is only supposed to be used by the "config" package */ -public class ConfigImpl { - - private interface NameSource { - ConfigParseable nameToParseable(String name); - } - - // this function is a little tricky because there are three places we're - // trying to use it; for 'include "basename"' in a .conf file, for - // loading app.{conf,json,properties} from classpath, and for - // loading app.{conf,json,properties} from the filesystem. - private static ConfigObject fromBasename(NameSource source, String name, - ConfigParseOptions options) { - ConfigObject obj; - if (name.endsWith(".conf") || name.endsWith(".json") - || name.endsWith(".properties")) { - ConfigParseable p = source.nameToParseable(name); - - obj = p.parse(p.options().setAllowMissing(options.getAllowMissing())); - } else { - ConfigParseable confHandle = source.nameToParseable(name + ".conf"); - ConfigParseable jsonHandle = source.nameToParseable(name + ".json"); - ConfigParseable propsHandle = source.nameToParseable(name - + ".properties"); - boolean gotSomething = false; - List failMessages = new ArrayList(); - - ConfigSyntax syntax = options.getSyntax(); - - obj = SimpleConfigObject.empty(SimpleConfigOrigin.newSimple(name)); - if (syntax == null || syntax == ConfigSyntax.CONF) { - try { - obj = confHandle.parse(confHandle.options().setAllowMissing(false) - .setSyntax(ConfigSyntax.CONF)); - gotSomething = true; - } catch (ConfigException.IO e) { - failMessages.add(e.getMessage()); - } - } - - if (syntax == null || syntax == ConfigSyntax.JSON) { - try { - ConfigObject parsed = jsonHandle.parse(jsonHandle.options() - .setAllowMissing(false).setSyntax(ConfigSyntax.JSON)); - obj = obj.withFallback(parsed); - gotSomething = true; - } catch (ConfigException.IO e) { - failMessages.add(e.getMessage()); - } - } - - if (syntax == null || syntax == ConfigSyntax.PROPERTIES) { - try { - ConfigObject parsed = propsHandle.parse(propsHandle.options() - .setAllowMissing(false).setSyntax(ConfigSyntax.PROPERTIES)); - obj = obj.withFallback(parsed); - gotSomething = true; - } catch (ConfigException.IO e) { - failMessages.add(e.getMessage()); - } - } - - if (!options.getAllowMissing() && !gotSomething) { - String failMessage; - if (failMessages.isEmpty()) { - // this should not happen - throw new ConfigException.BugOrBroken( - "should not be reached: nothing found but no exceptions thrown"); - } else { - StringBuilder sb = new StringBuilder(); - for (String msg : failMessages) { - sb.append(msg); - sb.append(", "); - } - sb.setLength(sb.length() - 2); - failMessage = sb.toString(); - } - throw new ConfigException.IO(SimpleConfigOrigin.newSimple(name), failMessage); - } - } - - return obj; - } - - /** For use ONLY by library internals, DO NOT TOUCH not guaranteed ABI */ - public static ConfigObject parseResourcesAnySyntax(final Class klass, - String resourceBasename, final ConfigParseOptions baseOptions) { - NameSource source = new NameSource() { - @Override - public ConfigParseable nameToParseable(String name) { - return Parseable.newResources(klass, name, baseOptions); - } - }; - return fromBasename(source, resourceBasename, baseOptions); - } - - /** For use ONLY by library internals, DO NOT TOUCH not guaranteed ABI */ - public static ConfigObject parseResourcesAnySyntax(final ClassLoader loader, - String resourceBasename, final ConfigParseOptions baseOptions) { - NameSource source = new NameSource() { - @Override - public ConfigParseable nameToParseable(String name) { - return Parseable.newResources(loader, name, baseOptions); - } - }; - return fromBasename(source, resourceBasename, baseOptions); - } - - /** For use ONLY by library internals, DO NOT TOUCH not guaranteed ABI */ - public static ConfigObject parseFileAnySyntax(final File basename, - final ConfigParseOptions baseOptions) { - NameSource source = new NameSource() { - @Override - public ConfigParseable nameToParseable(String name) { - return Parseable.newFile(new File(name), baseOptions); - } - }; - return fromBasename(source, basename.getPath(), baseOptions); - } - - static AbstractConfigObject emptyObject(String originDescription) { - ConfigOrigin origin = originDescription != null ? SimpleConfigOrigin - .newSimple(originDescription) : null; - return emptyObject(origin); - } - - /** For use ONLY by library internals, DO NOT TOUCH not guaranteed ABI */ - public static Config emptyConfig(String originDescription) { - return emptyObject(originDescription).toConfig(); - } - - static AbstractConfigObject empty(ConfigOrigin origin) { - return emptyObject(origin); - } - - // default origin for values created with fromAnyRef and no origin specified - final private static ConfigOrigin defaultValueOrigin = SimpleConfigOrigin - .newSimple("hardcoded value"); - final private static ConfigBoolean defaultTrueValue = new ConfigBoolean( - defaultValueOrigin, true); - final private static ConfigBoolean defaultFalseValue = new ConfigBoolean( - defaultValueOrigin, false); - final private static ConfigNull defaultNullValue = new ConfigNull( - defaultValueOrigin); - final private static SimpleConfigList defaultEmptyList = new SimpleConfigList( - defaultValueOrigin, Collections. emptyList()); - final private static SimpleConfigObject defaultEmptyObject = SimpleConfigObject - .empty(defaultValueOrigin); - - private static SimpleConfigList emptyList(ConfigOrigin origin) { - if (origin == null || origin == defaultValueOrigin) - return defaultEmptyList; - else - return new SimpleConfigList(origin, - Collections. emptyList()); - } - - private static AbstractConfigObject emptyObject(ConfigOrigin origin) { - // we want null origin to go to SimpleConfigObject.empty() to get the - // origin "empty config" rather than "hardcoded value" - if (origin == defaultValueOrigin) - return defaultEmptyObject; - else - return SimpleConfigObject.empty(origin); - } - - private static ConfigOrigin valueOrigin(String originDescription) { - if (originDescription == null) - return defaultValueOrigin; - else - return SimpleConfigOrigin.newSimple(originDescription); - } - - /** For use ONLY by library internals, DO NOT TOUCH not guaranteed ABI */ - public static ConfigValue fromAnyRef(Object object, String originDescription) { - ConfigOrigin origin = valueOrigin(originDescription); - return fromAnyRef(object, origin, FromMapMode.KEYS_ARE_KEYS); - } - - /** For use ONLY by library internals, DO NOT TOUCH not guaranteed ABI */ - public static ConfigObject fromPathMap( - Map pathMap, String originDescription) { - ConfigOrigin origin = valueOrigin(originDescription); - return (ConfigObject) fromAnyRef(pathMap, origin, - FromMapMode.KEYS_ARE_PATHS); - } - - static AbstractConfigValue fromAnyRef(Object object, ConfigOrigin origin, - FromMapMode mapMode) { - if (origin == null) - throw new ConfigException.BugOrBroken( - "origin not supposed to be null"); - - if (object == null) { - if (origin != defaultValueOrigin) - return new ConfigNull(origin); - else - return defaultNullValue; - } else if (object instanceof Boolean) { - if (origin != defaultValueOrigin) { - return new ConfigBoolean(origin, (Boolean) object); - } else if ((Boolean) object) { - return defaultTrueValue; - } else { - return defaultFalseValue; - } - } else if (object instanceof String) { - return new ConfigString(origin, (String) object); - } else if (object instanceof Number) { - // here we always keep the same type that was passed to us, - // rather than figuring out if a Long would fit in an Int - // or a Double has no fractional part. i.e. deliberately - // not using ConfigNumber.newNumber() when we have a - // Double, Integer, or Long. - if (object instanceof Double) { - return new ConfigDouble(origin, (Double) object, null); - } else if (object instanceof Integer) { - return new ConfigInt(origin, (Integer) object, null); - } else if (object instanceof Long) { - return new ConfigLong(origin, (Long) object, null); - } else { - return ConfigNumber.newNumber(origin, - ((Number) object).doubleValue(), null); - } - } else if (object instanceof Map) { - if (((Map) object).isEmpty()) - return emptyObject(origin); - - if (mapMode == FromMapMode.KEYS_ARE_KEYS) { - Map values = new HashMap(); - for (Map.Entry entry : ((Map) object).entrySet()) { - Object key = entry.getKey(); - if (!(key instanceof String)) - throw new ConfigException.BugOrBroken( - "bug in method caller: not valid to create ConfigObject from map with non-String key: " - + key); - AbstractConfigValue value = fromAnyRef(entry.getValue(), - origin, mapMode); - values.put((String) key, value); - } - - return new SimpleConfigObject(origin, values); - } else { - return PropertiesParser.fromPathMap(origin, (Map) object); - } - } else if (object instanceof Iterable) { - Iterator i = ((Iterable) object).iterator(); - if (!i.hasNext()) - return emptyList(origin); - - List values = new ArrayList(); - while (i.hasNext()) { - AbstractConfigValue v = fromAnyRef(i.next(), origin, mapMode); - values.add(v); - } - - return new SimpleConfigList(origin, values); - } else { - throw new ConfigException.BugOrBroken( - "bug in method caller: not valid to create ConfigValue from: " - + object); - } - } - - private static class SimpleIncluder implements ConfigIncluder { - - private ConfigIncluder fallback; - - SimpleIncluder(ConfigIncluder fallback) { - this.fallback = fallback; - } - - @Override - public ConfigObject include(final ConfigIncludeContext context, - String name) { - NameSource source = new NameSource() { - @Override - public ConfigParseable nameToParseable(String name) { - ConfigParseable p = context.relativeTo(name); - if (p == null) { - // avoid returning null - return Parseable.newNotFound(name, "include was not found: '" + name + "'", - ConfigParseOptions.defaults()); - } else { - return p; - } - } - }; - - ConfigObject obj = fromBasename(source, name, ConfigParseOptions - .defaults().setAllowMissing(true)); - - // now use the fallback includer if any and merge - // its result. - if (fallback != null) { - return obj.withFallback(fallback.include(context, name)); - } else { - return obj; - } - } - - @Override - public ConfigIncluder withFallback(ConfigIncluder fallback) { - if (this == fallback) { - throw new ConfigException.BugOrBroken( - "trying to create includer cycle"); - } else if (this.fallback == fallback) { - return this; - } else if (this.fallback != null) { - return new SimpleIncluder(this.fallback.withFallback(fallback)); - } else { - return new SimpleIncluder(fallback); - } - } - } - - private static class DefaultIncluderHolder { - static final ConfigIncluder defaultIncluder = new SimpleIncluder(null); - } - - static ConfigIncluder defaultIncluder() { - try { - return DefaultIncluderHolder.defaultIncluder; - } catch (ExceptionInInitializerError e) { - throw ConfigImplUtil.extractInitializerError(e); - } - } - - private static AbstractConfigObject loadSystemProperties() { - return (AbstractConfigObject) Parseable.newProperties(System.getProperties(), - ConfigParseOptions.defaults().setOriginDescription("system properties")).parse(); - } - - private static class SystemPropertiesHolder { - // this isn't final due to the reloadSystemPropertiesConfig() hack below - static AbstractConfigObject systemProperties = loadSystemProperties(); - } - - static AbstractConfigObject systemPropertiesAsConfigObject() { - try { - return SystemPropertiesHolder.systemProperties; - } catch (ExceptionInInitializerError e) { - throw ConfigImplUtil.extractInitializerError(e); - } - } - - /** For use ONLY by library internals, DO NOT TOUCH not guaranteed ABI */ - public static Config systemPropertiesAsConfig() { - return systemPropertiesAsConfigObject().toConfig(); - } - - // this is a hack to let us set system props in the test suite. - // obviously not thread-safe. - static void reloadSystemPropertiesConfig() { - SystemPropertiesHolder.systemProperties = loadSystemProperties(); - } - - private static AbstractConfigObject loadEnvVariables() { - Map env = System.getenv(); - Map m = new HashMap(); - for (Map.Entry entry : env.entrySet()) { - String key = entry.getKey(); - m.put(key, - new ConfigString(SimpleConfigOrigin.newSimple("env var " + key), entry - .getValue())); - } - return new SimpleConfigObject(SimpleConfigOrigin.newSimple("env variables"), - m, ResolveStatus.RESOLVED, false /* ignoresFallbacks */); - } - - private static class EnvVariablesHolder { - static final AbstractConfigObject envVariables = loadEnvVariables(); - } - - static AbstractConfigObject envVariablesAsConfigObject() { - try { - return EnvVariablesHolder.envVariables; - } catch (ExceptionInInitializerError e) { - throw ConfigImplUtil.extractInitializerError(e); - } - } - - /** For use ONLY by library internals, DO NOT TOUCH not guaranteed ABI */ - public static Config envVariablesAsConfig() { - return envVariablesAsConfigObject().toConfig(); - } - - /** For use ONLY by library internals, DO NOT TOUCH not guaranteed ABI */ - public static Config defaultReference(ClassLoader loader) { - Config unresolvedResources = Parseable - .newResources(loader, "reference.conf", ConfigParseOptions.defaults()).parse() - .toConfig(); - return systemPropertiesAsConfig().withFallback(unresolvedResources).resolve(); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigImplUtil.java b/akka-actor/src/main/java/com/typesafe/config/impl/ConfigImplUtil.java deleted file mode 100755 index 4cbcff7aa3..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigImplUtil.java +++ /dev/null @@ -1,185 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.io.File; -import java.net.URISyntaxException; -import java.net.URL; -import java.util.ArrayList; -import java.util.List; - -import com.typesafe.config.ConfigException; - - -/** This is public just for the "config" package to use, don't touch it */ -final public class ConfigImplUtil { - static boolean equalsHandlingNull(Object a, Object b) { - if (a == null && b != null) - return false; - else if (a != null && b == null) - return false; - else if (a == b) // catches null == null plus optimizes identity case - return true; - else - return a.equals(b); - } - - /** - * This is public ONLY for use by the "config" package, DO NOT USE this ABI - * may change. - */ - public static String renderJsonString(String s) { - StringBuilder sb = new StringBuilder(); - sb.append('"'); - for (int i = 0; i < s.length(); ++i) { - char c = s.charAt(i); - switch (c) { - case '"': - sb.append("\\\""); - break; - case '\\': - sb.append("\\\\"); - break; - case '\n': - sb.append("\\n"); - break; - case '\b': - sb.append("\\b"); - break; - case '\f': - sb.append("\\f"); - break; - case '\r': - sb.append("\\r"); - break; - case '\t': - sb.append("\\t"); - break; - default: - if (Character.isISOControl(c)) - sb.append(String.format("\\u%04x", (int) c)); - else - sb.append(c); - } - } - sb.append('"'); - return sb.toString(); - } - - static boolean isWhitespace(int codepoint) { - switch (codepoint) { - // try to hit the most common ASCII ones first, then the nonbreaking - // spaces that Java brokenly leaves out of isWhitespace. - case ' ': - case '\n': - case '\u00A0': - case '\u2007': - case '\u202F': - return true; - default: - return Character.isWhitespace(codepoint); - } - } - - /** This is public just for the "config" package to use, don't touch it! */ - public static String unicodeTrim(String s) { - // this is dumb because it looks like there aren't any whitespace - // characters that need surrogate encoding. But, points for - // pedantic correctness! It's future-proof or something. - // String.trim() actually is broken, since there are plenty of - // non-ASCII whitespace characters. - final int length = s.length(); - if (length == 0) - return s; - - int start = 0; - while (start < length) { - char c = s.charAt(start); - if (c == ' ' || c == '\n') { - start += 1; - } else { - int cp = s.codePointAt(start); - if (isWhitespace(cp)) - start += Character.charCount(cp); - else - break; - } - } - - int end = length; - while (end > start) { - char c = s.charAt(end - 1); - if (c == ' ' || c == '\n') { - --end; - } else { - int cp; - int delta; - if (Character.isLowSurrogate(c)) { - cp = s.codePointAt(end - 2); - delta = 2; - } else { - cp = s.codePointAt(end - 1); - delta = 1; - } - if (isWhitespace(cp)) - end -= delta; - else - break; - } - } - return s.substring(start, end); - } - - /** This is public just for the "config" package to use, don't touch it! */ - public static ConfigException extractInitializerError(ExceptionInInitializerError e) { - Throwable cause = e.getCause(); - if (cause != null && cause instanceof ConfigException) { - return (ConfigException) cause; - } else { - throw e; - } - } - - static File urlToFile(URL url) { - // this isn't really right, clearly, but not sure what to do. - try { - // this will properly handle hex escapes, etc. - return new File(url.toURI()); - } catch (URISyntaxException e) { - // this handles some stuff like file:///c:/Whatever/ - // apparently but mangles handling of hex escapes - return new File(url.getPath()); - } - } - - /** - * This is public ONLY for use by the "config" package, DO NOT USE this ABI - * may change. You can use the version in ConfigUtil instead. - */ - public static String joinPath(String... elements) { - return (new Path(elements)).render(); - } - - /** - * This is public ONLY for use by the "config" package, DO NOT USE this ABI - * may change. You can use the version in ConfigUtil instead. - */ - public static String joinPath(List elements) { - return joinPath(elements.toArray(new String[0])); - } - - /** - * This is public ONLY for use by the "config" package, DO NOT USE this ABI - * may change. You can use the version in ConfigUtil instead. - */ - public static List splitPath(String path) { - Path p = Path.newPath(path); - List elements = new ArrayList(); - while (p != null) { - elements.add(p.first()); - p = p.remainder(); - } - return elements; - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigInt.java b/akka-actor/src/main/java/com/typesafe/config/impl/ConfigInt.java deleted file mode 100755 index 8ccb3f872a..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigInt.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigValueType; - -final class ConfigInt extends ConfigNumber { - - private static final long serialVersionUID = 1L; - - final private int value; - - ConfigInt(ConfigOrigin origin, int value, String originalText) { - super(origin, originalText); - this.value = value; - } - - @Override - public ConfigValueType valueType() { - return ConfigValueType.NUMBER; - } - - @Override - public Integer unwrapped() { - return value; - } - - @Override - String transformToString() { - String s = super.transformToString(); - if (s == null) - return Integer.toString(value); - else - return s; - } - - @Override - protected long longValue() { - return value; - } - - @Override - protected double doubleValue() { - return value; - } - - @Override - protected ConfigInt newCopy(boolean ignoresFallbacks, ConfigOrigin origin) { - return new ConfigInt(origin, value, originalText); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigLong.java b/akka-actor/src/main/java/com/typesafe/config/impl/ConfigLong.java deleted file mode 100755 index 6e1a5073fb..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigLong.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigValueType; - -final class ConfigLong extends ConfigNumber { - - private static final long serialVersionUID = 1L; - - final private long value; - - ConfigLong(ConfigOrigin origin, long value, String originalText) { - super(origin, originalText); - this.value = value; - } - - @Override - public ConfigValueType valueType() { - return ConfigValueType.NUMBER; - } - - @Override - public Long unwrapped() { - return value; - } - - @Override - String transformToString() { - String s = super.transformToString(); - if (s == null) - return Long.toString(value); - else - return s; - } - - @Override - protected long longValue() { - return value; - } - - @Override - protected double doubleValue() { - return value; - } - - @Override - protected ConfigLong newCopy(boolean ignoresFallbacks, ConfigOrigin origin) { - return new ConfigLong(origin, value, originalText); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigNull.java b/akka-actor/src/main/java/com/typesafe/config/impl/ConfigNull.java deleted file mode 100755 index 1dcd32b5b0..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigNull.java +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigValueType; - -/** - * This exists because sometimes null is not the same as missing. Specifically, - * if a value is set to null we can give a better error message (indicating - * where it was set to null) in case someone asks for the value. Also, null - * overrides values set "earlier" in the search path, while missing values do - * not. - * - */ -final class ConfigNull extends AbstractConfigValue { - - private static final long serialVersionUID = 1L; - - ConfigNull(ConfigOrigin origin) { - super(origin); - } - - @Override - public ConfigValueType valueType() { - return ConfigValueType.NULL; - } - - @Override - public Object unwrapped() { - return null; - } - - @Override - String transformToString() { - return "null"; - } - - @Override - protected void render(StringBuilder sb, int indent, boolean formatted) { - sb.append("null"); - } - - @Override - protected ConfigNull newCopy(boolean ignoresFallbacks, ConfigOrigin origin) { - return new ConfigNull(origin); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigNumber.java b/akka-actor/src/main/java/com/typesafe/config/impl/ConfigNumber.java deleted file mode 100755 index 8f6996d77b..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigNumber.java +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigOrigin; - -abstract class ConfigNumber extends AbstractConfigValue { - - private static final long serialVersionUID = 1L; - - // This is so when we concatenate a number into a string (say it appears in - // a sentence) we always have it exactly as the person typed it into the - // config file. It's purely cosmetic; equals/hashCode don't consider this - // for example. - final protected String originalText; - - protected ConfigNumber(ConfigOrigin origin, String originalText) { - super(origin); - this.originalText = originalText; - } - - @Override - public abstract Number unwrapped(); - - @Override - String transformToString() { - return originalText; - } - - int intValueRangeChecked(String path) { - long l = longValue(); - if (l < Integer.MIN_VALUE || l > Integer.MAX_VALUE) { - throw new ConfigException.WrongType(origin(), path, "32-bit integer", - "out-of-range value " + l); - } - return (int) l; - } - - protected abstract long longValue(); - - protected abstract double doubleValue(); - - private boolean isWhole() { - long asLong = longValue(); - return asLong == doubleValue(); - } - - @Override - protected boolean canEqual(Object other) { - return other instanceof ConfigNumber; - } - - @Override - public boolean equals(Object other) { - // note that "origin" is deliberately NOT part of equality - if (canEqual(other)) { - ConfigNumber n = (ConfigNumber) other; - if (isWhole()) { - return n.isWhole() && this.longValue() == n.longValue(); - } else { - return (!n.isWhole()) && this.doubleValue() == n.doubleValue(); - } - } else { - return false; - } - } - - @Override - public int hashCode() { - // note that "origin" is deliberately NOT part of equality - - // this matches what standard Long.hashCode and Double.hashCode - // do, though I don't think it really matters. - long asLong; - if (isWhole()) { - asLong = longValue(); - } else { - asLong = Double.doubleToLongBits(doubleValue()); - } - return (int) (asLong ^ (asLong >>> 32)); - } - - static ConfigNumber newNumber(ConfigOrigin origin, long number, - String originalText) { - if (number <= Integer.MAX_VALUE && number >= Integer.MIN_VALUE) - return new ConfigInt(origin, (int) number, originalText); - else - return new ConfigLong(origin, number, originalText); - } - - static ConfigNumber newNumber(ConfigOrigin origin, double number, - String originalText) { - long asLong = (long) number; - if (asLong == number) { - return newNumber(origin, asLong, originalText); - } else { - return new ConfigDouble(origin, number, originalText); - } - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigString.java b/akka-actor/src/main/java/com/typesafe/config/impl/ConfigString.java deleted file mode 100755 index 4bdce3b129..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigString.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigValueType; - -final class ConfigString extends AbstractConfigValue { - - private static final long serialVersionUID = 1L; - - final private String value; - - ConfigString(ConfigOrigin origin, String value) { - super(origin); - this.value = value; - } - - @Override - public ConfigValueType valueType() { - return ConfigValueType.STRING; - } - - @Override - public String unwrapped() { - return value; - } - - @Override - String transformToString() { - return value; - } - - @Override - protected void render(StringBuilder sb, int indent, boolean formatted) { - sb.append(ConfigImplUtil.renderJsonString(value)); - } - - @Override - protected ConfigString newCopy(boolean ignoresFallbacks, ConfigOrigin origin) { - return new ConfigString(origin, value); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigSubstitution.java b/akka-actor/src/main/java/com/typesafe/config/impl/ConfigSubstitution.java deleted file mode 100755 index cca0ebb577..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/ConfigSubstitution.java +++ /dev/null @@ -1,300 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.io.ObjectStreamException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; - -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigResolveOptions; -import com.typesafe.config.ConfigValue; -import com.typesafe.config.ConfigValueType; - -/** - * A ConfigSubstitution represents a value with one or more substitutions in it; - * it can resolve to a value of any type, though if the substitution has more - * than one piece it always resolves to a string via value concatenation. - */ -final class ConfigSubstitution extends AbstractConfigValue implements - Unmergeable { - - private static final long serialVersionUID = 1L; - - // this is a list of String and SubstitutionExpression where the - // SubstitutionExpression has to be resolved to values, then if there's more - // than one piece everything is stringified and concatenated - final private List pieces; - // the length of any prefixes added with relativized() - final private int prefixLength; - final private boolean ignoresFallbacks; - - ConfigSubstitution(ConfigOrigin origin, List pieces) { - this(origin, pieces, 0, false); - } - - private ConfigSubstitution(ConfigOrigin origin, List pieces, - int prefixLength, boolean ignoresFallbacks) { - super(origin); - this.pieces = pieces; - this.prefixLength = prefixLength; - this.ignoresFallbacks = ignoresFallbacks; - for (Object p : pieces) { - if (p instanceof Path) - throw new RuntimeException("broken here"); - } - } - - @Override - public ConfigValueType valueType() { - throw new ConfigException.NotResolved( - "need to call resolve() on root config; tried to get value type on an unresolved substitution: " - + this); - } - - @Override - public Object unwrapped() { - throw new ConfigException.NotResolved( - "need to call resolve() on root config; tried to unwrap an unresolved substitution: " - + this); - } - - @Override - protected ConfigSubstitution newCopy(boolean ignoresFallbacks, ConfigOrigin newOrigin) { - return new ConfigSubstitution(newOrigin, pieces, prefixLength, ignoresFallbacks); - } - - @Override - protected boolean ignoresFallbacks() { - return ignoresFallbacks; - } - - @Override - protected AbstractConfigValue mergedWithTheUnmergeable(Unmergeable fallback) { - if (ignoresFallbacks) - throw new ConfigException.BugOrBroken("should not be reached"); - - // if we turn out to be an object, and the fallback also does, - // then a merge may be required; delay until we resolve. - List newStack = new ArrayList(); - newStack.add(this); - newStack.addAll(fallback.unmergedValues()); - return new ConfigDelayedMerge(AbstractConfigObject.mergeOrigins(newStack), newStack, - ((AbstractConfigValue) fallback).ignoresFallbacks()); - } - - protected AbstractConfigValue mergedLater(AbstractConfigValue fallback) { - if (ignoresFallbacks) - throw new ConfigException.BugOrBroken("should not be reached"); - - List newStack = new ArrayList(); - newStack.add(this); - newStack.add(fallback); - return new ConfigDelayedMerge(AbstractConfigObject.mergeOrigins(newStack), newStack, - fallback.ignoresFallbacks()); - } - - @Override - protected AbstractConfigValue mergedWithObject(AbstractConfigObject fallback) { - // if we turn out to be an object, and the fallback also does, - // then a merge may be required; delay until we resolve. - return mergedLater(fallback); - } - - @Override - protected AbstractConfigValue mergedWithNonObject(AbstractConfigValue fallback) { - // if the optional substitution ends up getting deleted (because it is - // not present), we'll have to use the fallback. So delay the merge. - if (pieces.size() == 1 && ((SubstitutionExpression) pieces.get(0)).optional()) - return mergedLater(fallback); - else - return super.mergedWithNonObject(fallback); - } - - @Override - public Collection unmergedValues() { - return Collections.singleton(this); - } - - List pieces() { - return pieces; - } - - // larger than anyone would ever want - private static final int MAX_DEPTH = 100; - - private ConfigValue findInObject(AbstractConfigObject root, - SubstitutionResolver resolver, /* null if we should not have refs */ - Path subst, int depth, ConfigResolveOptions options) { - if (depth > MAX_DEPTH) { - throw new ConfigException.BadValue(origin(), subst.render(), - "Substitution ${" + subst.render() - + "} is part of a cycle of substitutions"); - } - - ConfigValue result = root.peekPath(subst, resolver, depth, options); - - if (result instanceof ConfigSubstitution) { - throw new ConfigException.BugOrBroken( - "peek or peekPath returned an unresolved substitution"); - } - - return result; - } - - private ConfigValue resolve(SubstitutionResolver resolver, SubstitutionExpression subst, - int depth, ConfigResolveOptions options) { - // First we look up the full path, which means relative to the - // included file if we were not a root file - ConfigValue result = findInObject(resolver.root(), resolver, subst.path(), - depth, options); - - if (result == null) { - // Then we want to check relative to the root file. We don't - // want the prefix we were included at to be used when looking up - // env variables either. - Path unprefixed = subst.path().subPath(prefixLength); - - if (result == null && prefixLength > 0) { - result = findInObject(resolver.root(), resolver, unprefixed, depth, options); - } - - if (result == null && options.getUseSystemEnvironment()) { - result = findInObject(ConfigImpl.envVariablesAsConfigObject(), null, unprefixed, - depth, options); - } - } - - return result; - } - - private ConfigValue resolve(SubstitutionResolver resolver, int depth, - ConfigResolveOptions options) { - if (pieces.size() > 1) { - // need to concat everything into a string - StringBuilder sb = new StringBuilder(); - for (Object p : pieces) { - if (p instanceof String) { - sb.append((String) p); - } else { - SubstitutionExpression exp = (SubstitutionExpression) p; - ConfigValue v = resolve(resolver, exp, depth, options); - - if (v == null) { - if (exp.optional()) { - // append nothing to StringBuilder - } else { - throw new ConfigException.UnresolvedSubstitution(origin(), - exp.toString()); - } - } else { - switch (v.valueType()) { - case LIST: - case OBJECT: - // cannot substitute lists and objects into strings - throw new ConfigException.WrongType(v.origin(), exp.path().render(), - "not a list or object", v.valueType().name()); - default: - sb.append(((AbstractConfigValue) v).transformToString()); - } - } - } - } - return new ConfigString(origin(), sb.toString()); - } else { - if (!(pieces.get(0) instanceof SubstitutionExpression)) - throw new ConfigException.BugOrBroken( - "ConfigSubstitution should never contain a single String piece"); - SubstitutionExpression exp = (SubstitutionExpression) pieces.get(0); - ConfigValue v = resolve(resolver, exp, depth, options); - if (v == null && !exp.optional()) { - throw new ConfigException.UnresolvedSubstitution(origin(), exp.toString()); - } - return v; - } - } - - @Override - AbstractConfigValue resolveSubstitutions(SubstitutionResolver resolver, - int depth, - ConfigResolveOptions options) { - // only ConfigSubstitution adds to depth here, because the depth - // is the substitution depth not the recursion depth - AbstractConfigValue resolved = (AbstractConfigValue) resolve(resolver, - depth + 1, options); - return resolved; - } - - @Override - ResolveStatus resolveStatus() { - return ResolveStatus.UNRESOLVED; - } - - // when you graft a substitution into another object, - // you have to prefix it with the location in that object - // where you grafted it; but save prefixLength so - // system property and env variable lookups don't get - // broken. - @Override - ConfigSubstitution relativized(Path prefix) { - List newPieces = new ArrayList(); - for (Object p : pieces) { - if (p instanceof SubstitutionExpression) { - SubstitutionExpression exp = (SubstitutionExpression) p; - - newPieces.add(exp.changePath(exp.path().prepend(prefix))); - } else { - newPieces.add(p); - } - } - return new ConfigSubstitution(origin(), newPieces, prefixLength - + prefix.length(), ignoresFallbacks); - } - - @Override - protected boolean canEqual(Object other) { - return other instanceof ConfigSubstitution; - } - - @Override - public boolean equals(Object other) { - // note that "origin" is deliberately NOT part of equality - if (other instanceof ConfigSubstitution) { - return canEqual(other) - && this.pieces.equals(((ConfigSubstitution) other).pieces); - } else { - return false; - } - } - - @Override - public int hashCode() { - // note that "origin" is deliberately NOT part of equality - return pieces.hashCode(); - } - - @Override - protected void render(StringBuilder sb, int indent, boolean formatted) { - for (Object p : pieces) { - if (p instanceof SubstitutionExpression) { - sb.append(p.toString()); - } else { - sb.append(ConfigImplUtil.renderJsonString((String) p)); - } - } - } - - // This ridiculous hack is because some JDK versions apparently can't - // serialize an array, which is used to implement ArrayList and EmptyList. - // maybe - // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6446627 - private Object writeReplace() throws ObjectStreamException { - // switch to LinkedList - return new ConfigSubstitution(origin(), new java.util.LinkedList(pieces), - prefixLength, ignoresFallbacks); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/DefaultTransformer.java b/akka-actor/src/main/java/com/typesafe/config/impl/DefaultTransformer.java deleted file mode 100755 index 9a9bf5c6a9..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/DefaultTransformer.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import com.typesafe.config.ConfigValueType; - -/** - * Default automatic type transformations. - */ -final class DefaultTransformer { - - static AbstractConfigValue transform(AbstractConfigValue value, - ConfigValueType requested) { - if (value.valueType() == ConfigValueType.STRING) { - String s = (String) value.unwrapped(); - switch (requested) { - case NUMBER: - try { - Long v = Long.parseLong(s); - return new ConfigLong(value.origin(), v, s); - } catch (NumberFormatException e) { - // try Double - } - try { - Double v = Double.parseDouble(s); - return new ConfigDouble(value.origin(), v, s); - } catch (NumberFormatException e) { - // oh well. - } - break; - case NULL: - if (s.equals("null")) - return new ConfigNull(value.origin()); - break; - case BOOLEAN: - if (s.equals("true") || s.equals("yes") || s.equals("on")) { - return new ConfigBoolean(value.origin(), true); - } else if (s.equals("false") || s.equals("no") - || s.equals("off")) { - return new ConfigBoolean(value.origin(), false); - } - break; - case LIST: - // can't go STRING to LIST automatically - break; - case OBJECT: - // can't go STRING to OBJECT automatically - break; - case STRING: - // no-op STRING to STRING - break; - } - } else if (requested == ConfigValueType.STRING) { - // if we converted null to string here, then you wouldn't properly - // get a missing-value error if you tried to get a null value - // as a string. - switch (value.valueType()) { - case NUMBER: // FALL THROUGH - case BOOLEAN: - return new ConfigString(value.origin(), - value.transformToString()); - case NULL: - // want to be sure this throws instead of returning "null" as a - // string - break; - case OBJECT: - // no OBJECT to STRING automatically - break; - case LIST: - // no LIST to STRING automatically - break; - case STRING: - // no-op STRING to STRING - break; - } - } - - return value; - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/FromMapMode.java b/akka-actor/src/main/java/com/typesafe/config/impl/FromMapMode.java deleted file mode 100755 index 764a45664c..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/FromMapMode.java +++ /dev/null @@ -1,8 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -enum FromMapMode { - KEYS_ARE_PATHS, KEYS_ARE_KEYS -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/MergeableValue.java b/akka-actor/src/main/java/com/typesafe/config/impl/MergeableValue.java deleted file mode 100755 index 38dba70cc2..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/MergeableValue.java +++ /dev/null @@ -1,9 +0,0 @@ -package com.typesafe.config.impl; - -import com.typesafe.config.ConfigMergeable; -import com.typesafe.config.ConfigValue; - -interface MergeableValue extends ConfigMergeable { - // converts a Config to its root object and a ConfigValue to itself - ConfigValue toFallbackValue(); -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/OriginType.java b/akka-actor/src/main/java/com/typesafe/config/impl/OriginType.java deleted file mode 100755 index b8e990c091..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/OriginType.java +++ /dev/null @@ -1,8 +0,0 @@ -package com.typesafe.config.impl; - -enum OriginType { - GENERIC, - FILE, - URL, - RESOURCE -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/Parseable.java b/akka-actor/src/main/java/com/typesafe/config/impl/Parseable.java deleted file mode 100755 index 4938603199..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/Parseable.java +++ /dev/null @@ -1,637 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FilterReader; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.Reader; -import java.io.StringReader; -import java.io.UnsupportedEncodingException; -import java.net.MalformedURLException; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URL; -import java.util.Enumeration; -import java.util.Iterator; -import java.util.Properties; - -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigIncludeContext; -import com.typesafe.config.ConfigObject; -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigParseOptions; -import com.typesafe.config.ConfigParseable; -import com.typesafe.config.ConfigSyntax; -import com.typesafe.config.ConfigValue; - -/** - * This is public but it's only for use by the config package; DO NOT TOUCH. The - * point of this class is to avoid "propagating" each overload on - * "thing which can be parsed" through multiple interfaces. Most interfaces can - * have just one overload that takes a Parseable. Also it's used as an abstract - * "resource handle" in the ConfigIncluder interface. - */ -public abstract class Parseable implements ConfigParseable { - private ConfigIncludeContext includeContext; - private ConfigParseOptions initialOptions; - private ConfigOrigin initialOrigin; - - protected Parseable() { - - } - - private ConfigParseOptions fixupOptions(ConfigParseOptions baseOptions) { - ConfigSyntax syntax = baseOptions.getSyntax(); - if (syntax == null) { - syntax = guessSyntax(); - } - if (syntax == null) { - syntax = ConfigSyntax.CONF; - } - ConfigParseOptions modified = baseOptions.setSyntax(syntax); - - modified = modified.appendIncluder(ConfigImpl.defaultIncluder()); - - return modified; - } - - protected void postConstruct(ConfigParseOptions baseOptions) { - this.initialOptions = fixupOptions(baseOptions); - - this.includeContext = new ConfigIncludeContext() { - @Override - public ConfigParseable relativeTo(String filename) { - return Parseable.this.relativeTo(filename); - } - }; - - if (initialOptions.getOriginDescription() != null) - initialOrigin = SimpleConfigOrigin.newSimple(initialOptions.getOriginDescription()); - else - initialOrigin = createOrigin(); - } - - // the general idea is that any work should be in here, not in the - // constructor, - // so that exceptions are thrown from the public parse() function and not - // from the creation of the Parseable. Essentially this is a lazy field. - // The parser should close the reader when it's done with it. - // ALSO, IMPORTANT: if the file or URL is not found, this must throw. - // to support the "allow missing" feature. - protected abstract Reader reader() throws IOException; - - ConfigSyntax guessSyntax() { - return null; - } - - ConfigParseable relativeTo(String filename) { - return null; - } - - ConfigIncludeContext includeContext() { - return includeContext; - } - - static AbstractConfigObject forceParsedToObject(ConfigValue value) { - if (value instanceof AbstractConfigObject) { - return (AbstractConfigObject) value; - } else { - throw new ConfigException.WrongType(value.origin(), "", - "object at file root", value.valueType().name()); - } - } - - @Override - public ConfigObject parse(ConfigParseOptions baseOptions) { - return forceParsedToObject(parseValue(baseOptions)); - } - - final AbstractConfigValue parseValue(ConfigParseOptions baseOptions) { - // note that we are NOT using our "initialOptions", - // but using the ones from the passed-in options. The idea is that - // callers can get our original options and then parse with different - // ones if they want. - ConfigParseOptions options = fixupOptions(baseOptions); - - // passed-in options can override origin - ConfigOrigin origin; - if (options.getOriginDescription() != null) - origin = SimpleConfigOrigin.newSimple(options.getOriginDescription()); - else - origin = initialOrigin; - return parseValue(origin, options); - } - - final private AbstractConfigValue parseValue(ConfigOrigin origin, - ConfigParseOptions finalOptions) { - try { - return rawParseValue(origin, finalOptions); - } catch (IOException e) { - if (finalOptions.getAllowMissing()) { - return SimpleConfigObject.emptyMissing(origin); - } else { - throw new ConfigException.IO(origin, e.getMessage(), e); - } - } - } - - // this is parseValue without post-processing the IOException or handling - // options.getAllowMissing() - protected AbstractConfigValue rawParseValue(ConfigOrigin origin, ConfigParseOptions finalOptions) - throws IOException { - Reader reader = reader(); - try { - return rawParseValue(reader, origin, finalOptions); - } finally { - reader.close(); - } - } - - protected AbstractConfigValue rawParseValue(Reader reader, ConfigOrigin origin, - ConfigParseOptions finalOptions) throws IOException { - if (finalOptions.getSyntax() == ConfigSyntax.PROPERTIES) { - return PropertiesParser.parse(reader, origin); - } else { - Iterator tokens = Tokenizer.tokenize(origin, reader, finalOptions.getSyntax()); - return Parser.parse(tokens, origin, finalOptions, includeContext()); - } - } - - public ConfigObject parse() { - return forceParsedToObject(parseValue(options())); - } - - AbstractConfigValue parseValue() { - return parseValue(options()); - } - - @Override - public final ConfigOrigin origin() { - return initialOrigin; - } - - protected abstract ConfigOrigin createOrigin(); - - @Override - public ConfigParseOptions options() { - return initialOptions; - } - - @Override - public String toString() { - return getClass().getSimpleName(); - } - - private static ConfigSyntax syntaxFromExtension(String name) { - if (name.endsWith(".json")) - return ConfigSyntax.JSON; - else if (name.endsWith(".conf")) - return ConfigSyntax.CONF; - else if (name.endsWith(".properties")) - return ConfigSyntax.PROPERTIES; - else - return null; - } - - private static Reader readerFromStream(InputStream input) { - try { - // well, this is messed up. If we aren't going to close - // the passed-in InputStream then we have no way to - // close these readers. So maybe we should not have an - // InputStream version, only a Reader version. - Reader reader = new InputStreamReader(input, "UTF-8"); - return new BufferedReader(reader); - } catch (UnsupportedEncodingException e) { - throw new ConfigException.BugOrBroken( - "Java runtime does not support UTF-8", e); - } - } - - private static Reader doNotClose(Reader input) { - return new FilterReader(input) { - @Override - public void close() { - // NOTHING. - } - }; - } - - static URL relativeTo(URL url, String filename) { - // I'm guessing this completely fails on Windows, help wanted - if (new File(filename).isAbsolute()) - return null; - - try { - URI siblingURI = url.toURI(); - URI relative = new URI(filename); - - // this seems wrong, but it's documented that the last - // element of the path in siblingURI gets stripped out, - // so to get something in the same directory as - // siblingURI we just call resolve(). - URL resolved = siblingURI.resolve(relative).toURL(); - - return resolved; - } catch (MalformedURLException e) { - return null; - } catch (URISyntaxException e) { - return null; - } catch (IllegalArgumentException e) { - return null; - } - } - - static File relativeTo(File file, String filename) { - File child = new File(filename); - - if (child.isAbsolute()) - return null; - - File parent = file.getParentFile(); - - if (parent == null) - return null; - else - return new File(parent, filename); - } - - // this is a parseable that doesn't exist and just throws when you try to - // parse it - private final static class ParseableNotFound extends Parseable { - final private String what; - final private String message; - - ParseableNotFound(String what, String message, ConfigParseOptions options) { - this.what = what; - this.message = message; - postConstruct(options); - } - - @Override - protected Reader reader() throws IOException { - throw new FileNotFoundException(message); - } - - @Override - protected ConfigOrigin createOrigin() { - return SimpleConfigOrigin.newSimple(what); - } - } - - public static Parseable newNotFound(String whatNotFound, String message, - ConfigParseOptions options) { - return new ParseableNotFound(whatNotFound, message, options); - } - - private final static class ParseableReader extends Parseable { - final private Reader reader; - - ParseableReader(Reader reader, ConfigParseOptions options) { - this.reader = reader; - postConstruct(options); - } - - @Override - protected Reader reader() { - return reader; - } - - @Override - protected ConfigOrigin createOrigin() { - return SimpleConfigOrigin.newSimple("Reader"); - } - } - - /** - * note that we will never close this reader; you have to do it when parsing - * is complete. - */ - public static Parseable newReader(Reader reader, ConfigParseOptions options) { - return new ParseableReader(doNotClose(reader), options); - } - - private final static class ParseableString extends Parseable { - final private String input; - - ParseableString(String input, ConfigParseOptions options) { - this.input = input; - postConstruct(options); - } - - @Override - protected Reader reader() { - return new StringReader(input); - } - - @Override - protected ConfigOrigin createOrigin() { - return SimpleConfigOrigin.newSimple("String"); - } - } - - public static Parseable newString(String input, ConfigParseOptions options) { - return new ParseableString(input, options); - } - - private final static class ParseableURL extends Parseable { - final private URL input; - - ParseableURL(URL input, ConfigParseOptions options) { - this.input = input; - postConstruct(options); - } - - @Override - protected Reader reader() throws IOException { - InputStream stream = input.openStream(); - return readerFromStream(stream); - } - - @Override - ConfigSyntax guessSyntax() { - return syntaxFromExtension(input.getPath()); - } - - @Override - ConfigParseable relativeTo(String filename) { - URL url = relativeTo(input, filename); - if (url == null) - return null; - return newURL(url, options() - .setOriginDescription(null)); - } - - @Override - protected ConfigOrigin createOrigin() { - return SimpleConfigOrigin.newURL(input); - } - - @Override - public String toString() { - return getClass().getSimpleName() + "(" + input.toExternalForm() - + ")"; - } - } - - public static Parseable newURL(URL input, ConfigParseOptions options) { - // we want file: URLs and files to always behave the same, so switch - // to a file if it's a file: URL - if (input.getProtocol().equals("file")) { - return newFile(ConfigImplUtil.urlToFile(input), options); - } else { - return new ParseableURL(input, options); - } - } - - private final static class ParseableFile extends Parseable { - final private File input; - - ParseableFile(File input, ConfigParseOptions options) { - this.input = input; - postConstruct(options); - } - - @Override - protected Reader reader() throws IOException { - InputStream stream = new FileInputStream(input); - return readerFromStream(stream); - } - - @Override - ConfigSyntax guessSyntax() { - return syntaxFromExtension(input.getName()); - } - - @Override - ConfigParseable relativeTo(String filename) { - File sibling; - if ((new File(filename)).isAbsolute()) { - sibling = new File(filename); - } else { - // this may return null - sibling = relativeTo(input, filename); - } - if (sibling == null) - return null; - if (sibling.exists()) { - return newFile(sibling, options().setOriginDescription(null)); - } else { - // fall back to classpath; we treat the "filename" as absolute - // (don't add a package name in front), - // if it starts with "/" then remove the "/", for consistency - // with ParseableResources.relativeTo - String resource = filename; - if (filename.startsWith("/")) - resource = filename.substring(1); - return newResources(this.getClass().getClassLoader(), resource, options() - .setOriginDescription(null)); - } - } - - @Override - protected ConfigOrigin createOrigin() { - return SimpleConfigOrigin.newFile(input.getPath()); - } - - @Override - public String toString() { - return getClass().getSimpleName() + "(" + input.getPath() + ")"; - } - } - - public static Parseable newFile(File input, ConfigParseOptions options) { - return new ParseableFile(input, options); - } - - private final static class ParseableResources extends Parseable { - final private ClassLoader loader; - final private String resource; - - ParseableResources(ClassLoader loader, String resource, - ConfigParseOptions options) { - this.loader = loader; - this.resource = resource; - postConstruct(options); - } - - @Override - protected Reader reader() throws IOException { - throw new ConfigException.BugOrBroken( - "reader() should not be called on resources"); - } - - @Override - protected AbstractConfigObject rawParseValue(ConfigOrigin origin, - ConfigParseOptions finalOptions) throws IOException { - Enumeration e = loader.getResources(resource); - if (!e.hasMoreElements()) { - throw new IOException("resource not found on classpath: " + resource); - } - AbstractConfigObject merged = SimpleConfigObject.empty(origin); - while (e.hasMoreElements()) { - URL url = e.nextElement(); - - ConfigOrigin elementOrigin = ((SimpleConfigOrigin) origin).addURL(url); - - AbstractConfigValue v; - - // it's tempting to use ParseableURL here but it would be wrong - // because the wrong relativeTo() would be used for includes. - InputStream stream = url.openStream(); - try { - Reader reader = readerFromStream(stream); - stream = null; // reader now owns it - try { - // parse in "raw" mode which will throw any IOException - // from here. - v = rawParseValue(reader, elementOrigin, finalOptions); - } finally { - reader.close(); - } - } finally { - // stream is null if the reader owns it - if (stream != null) - stream.close(); - } - - merged = merged.withFallback(v); - } - - return merged; - } - - @Override - ConfigSyntax guessSyntax() { - return syntaxFromExtension(resource); - } - - static String parent(String resource) { - // the "resource" is not supposed to begin with a "/" - // because it's supposed to be the raw resource - // (ClassLoader#getResource), not the - // resource "syntax" (Class#getResource) - int i = resource.lastIndexOf('/'); - if (i < 0) { - return null; - } else { - return resource.substring(0, i); - } - } - - @Override - ConfigParseable relativeTo(String sibling) { - if (sibling.startsWith("/")) { - // if it starts with "/" then don't make it relative to - // the including resource - return newResources(loader, sibling.substring(1), - options().setOriginDescription(null)); - } else { - // here we want to build a new resource name and let - // the class loader have it, rather than getting the - // url with getResource() and relativizing to that url. - // This is needed in case the class loader is going to - // search a classpath. - String parent = parent(resource); - if (parent == null) - return newResources(loader, sibling, options().setOriginDescription(null)); - else - return newResources(loader, parent + "/" + sibling, options() - .setOriginDescription(null)); - } - } - - @Override - protected ConfigOrigin createOrigin() { - return SimpleConfigOrigin.newResource(resource); - } - - @Override - public String toString() { - return getClass().getSimpleName() + "(" + resource + "," - + loader.getClass().getSimpleName() + ")"; - } - } - - public static Parseable newResources(Class klass, String resource, - ConfigParseOptions options) { - return newResources(klass.getClassLoader(), convertResourceName(klass, resource), options); - } - - // this function is supposed to emulate the difference - // between Class.getResource and ClassLoader.getResource - // (unfortunately there doesn't seem to be public API for it). - // We're using it because the Class API is more limited, - // for example it lacks getResources(). So we want to be able to - // use ClassLoader directly. - private static String convertResourceName(Class klass, String resource) { - if (resource.startsWith("/")) { - // "absolute" resource, chop the slash - return resource.substring(1); - } else { - String className = klass.getName(); - int i = className.lastIndexOf('.'); - if (i < 0) { - // no package - return resource; - } else { - // need to be relative to the package - String packageName = className.substring(0, i); - String packagePath = packageName.replace('.', '/'); - return packagePath + "/" + resource; - } - } - } - - public static Parseable newResources(ClassLoader loader, String resource, - ConfigParseOptions options) { - return new ParseableResources(loader, resource, options); - } - - private final static class ParseableProperties extends Parseable { - final private Properties props; - - ParseableProperties(Properties props, ConfigParseOptions options) { - this.props = props; - postConstruct(options); - } - - @Override - protected Reader reader() throws IOException { - throw new ConfigException.BugOrBroken( - "reader() should not be called on props"); - } - - @Override - protected AbstractConfigObject rawParseValue(ConfigOrigin origin, - ConfigParseOptions finalOptions) { - return PropertiesParser.fromProperties(origin, props); - } - - @Override - ConfigSyntax guessSyntax() { - return ConfigSyntax.PROPERTIES; - } - - @Override - protected ConfigOrigin createOrigin() { - return SimpleConfigOrigin.newSimple("properties"); - } - - @Override - public String toString() { - return getClass().getSimpleName() + "(" + props.size() + " props)"; - } - } - - public static Parseable newProperties(Properties properties, - ConfigParseOptions options) { - return new ParseableProperties(properties, options); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/Parser.java b/akka-actor/src/main/java/com/typesafe/config/impl/Parser.java deleted file mode 100755 index 1ba8535207..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/Parser.java +++ /dev/null @@ -1,965 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.io.StringReader; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.ListIterator; -import java.util.Map; -import java.util.Stack; - -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigIncludeContext; -import com.typesafe.config.ConfigIncluder; -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigParseOptions; -import com.typesafe.config.ConfigSyntax; -import com.typesafe.config.ConfigValueType; - -final class Parser { - - static AbstractConfigValue parse(Iterator tokens, - ConfigOrigin origin, ConfigParseOptions options, - ConfigIncludeContext includeContext) { - ParseContext context = new ParseContext(options.getSyntax(), origin, - tokens, options.getIncluder(), includeContext); - return context.parse(); - } - - static private final class TokenWithComments { - final Token token; - final List comments; - - TokenWithComments(Token token, List comments) { - this.token = token; - this.comments = comments; - } - - TokenWithComments(Token token) { - this(token, Collections. emptyList()); - } - - TokenWithComments prepend(List earlier) { - if (this.comments.isEmpty()) { - return new TokenWithComments(token, earlier); - } else { - List merged = new ArrayList(); - merged.addAll(earlier); - merged.addAll(comments); - return new TokenWithComments(token, merged); - } - } - - SimpleConfigOrigin setComments(SimpleConfigOrigin origin) { - if (comments.isEmpty()) { - return origin; - } else { - List newComments = new ArrayList(); - for (Token c : comments) { - newComments.add(Tokens.getCommentText(c)); - } - return origin.setComments(newComments); - } - } - - @Override - public String toString() { - // this ends up in user-visible error messages, so we don't want the - // comments - return token.toString(); - } - } - - static private final class ParseContext { - private int lineNumber; - final private Stack buffer; - final private Iterator tokens; - final private ConfigIncluder includer; - final private ConfigIncludeContext includeContext; - final private ConfigSyntax flavor; - final private ConfigOrigin baseOrigin; - final private LinkedList pathStack; - // this is the number of "equals" we are inside, - // used to modify the error message to reflect that - // someone may think this is .properties format. - int equalsCount; - - ParseContext(ConfigSyntax flavor, ConfigOrigin origin, - Iterator tokens, ConfigIncluder includer, - ConfigIncludeContext includeContext) { - lineNumber = 1; - buffer = new Stack(); - this.tokens = tokens; - this.flavor = flavor; - this.baseOrigin = origin; - this.includer = includer; - this.includeContext = includeContext; - this.pathStack = new LinkedList(); - this.equalsCount = 0; - } - - private void consolidateCommentBlock(Token commentToken) { - // a comment block "goes with" the following token - // unless it's separated from it by a blank line. - // we want to build a list of newline tokens followed - // by a non-newline non-comment token; with all comments - // associated with that final non-newline non-comment token. - List newlines = new ArrayList(); - List comments = new ArrayList(); - - Token previous = null; - Token next = commentToken; - while (true) { - if (Tokens.isNewline(next)) { - if (previous != null && Tokens.isNewline(previous)) { - // blank line; drop all comments to this point and - // start a new comment block - comments.clear(); - } - newlines.add(next); - } else if (Tokens.isComment(next)) { - comments.add(next); - } else { - // a non-newline non-comment token - break; - } - - previous = next; - next = tokens.next(); - } - - // put our concluding token in the queue with all the comments - // attached - buffer.push(new TokenWithComments(next, comments)); - - // now put all the newlines back in front of it - ListIterator li = newlines.listIterator(newlines.size()); - while (li.hasPrevious()) { - buffer.push(new TokenWithComments(li.previous())); - } - } - - private TokenWithComments popToken() { - if (buffer.isEmpty()) { - Token t = tokens.next(); - if (Tokens.isComment(t)) { - consolidateCommentBlock(t); - return buffer.pop(); - } else { - return new TokenWithComments(t); - } - } else { - return buffer.pop(); - } - } - - private TokenWithComments nextToken() { - TokenWithComments withComments = null; - - withComments = popToken(); - Token t = withComments.token; - - if (Tokens.isProblem(t)) { - ConfigOrigin origin = t.origin(); - String message = Tokens.getProblemMessage(t); - Throwable cause = Tokens.getProblemCause(t); - boolean suggestQuotes = Tokens.getProblemSuggestQuotes(t); - if (suggestQuotes) { - message = addQuoteSuggestion(t.toString(), message); - } else { - message = addKeyName(message); - } - throw new ConfigException.Parse(origin, message, cause); - } else { - if (flavor == ConfigSyntax.JSON) { - if (Tokens.isUnquotedText(t)) { - throw parseError(addKeyName("Token not allowed in valid JSON: '" - + Tokens.getUnquotedText(t) + "'")); - } else if (Tokens.isSubstitution(t)) { - throw parseError(addKeyName("Substitutions (${} syntax) not allowed in JSON")); - } - } - - return withComments; - } - } - - private void putBack(TokenWithComments token) { - buffer.push(token); - } - - private TokenWithComments nextTokenIgnoringNewline() { - TokenWithComments t = nextToken(); - - while (Tokens.isNewline(t.token)) { - // line number tokens have the line that was _ended_ by the - // newline, so we have to add one. - lineNumber = t.token.lineNumber() + 1; - - t = nextToken(); - } - - return t; - } - - // In arrays and objects, comma can be omitted - // as long as there's at least one newline instead. - // this skips any newlines in front of a comma, - // skips the comma, and returns true if it found - // either a newline or a comma. The iterator - // is left just after the comma or the newline. - private boolean checkElementSeparator() { - if (flavor == ConfigSyntax.JSON) { - TokenWithComments t = nextTokenIgnoringNewline(); - if (t.token == Tokens.COMMA) { - return true; - } else { - putBack(t); - return false; - } - } else { - boolean sawSeparatorOrNewline = false; - TokenWithComments t = nextToken(); - while (true) { - if (Tokens.isNewline(t.token)) { - // newline number is the line just ended, so add one - lineNumber = t.token.lineNumber() + 1; - sawSeparatorOrNewline = true; - - // we want to continue to also eat - // a comma if there is one. - } else if (t.token == Tokens.COMMA) { - return true; - } else { - // non-newline-or-comma - putBack(t); - return sawSeparatorOrNewline; - } - t = nextToken(); - } - } - } - - // merge a bunch of adjacent values into one - // value; change unquoted text into a string - // value. - private void consolidateValueTokens() { - // this trick is not done in JSON - if (flavor == ConfigSyntax.JSON) - return; - - List values = null; // create only if we have value tokens - TokenWithComments firstValueWithComments = null; - TokenWithComments t = nextTokenIgnoringNewline(); // ignore a - // newline up - // front - while (Tokens.isValue(t.token) || Tokens.isUnquotedText(t.token) - || Tokens.isSubstitution(t.token)) { - if (values == null) { - values = new ArrayList(); - firstValueWithComments = t; - } - values.add(t.token); - t = nextToken(); // but don't consolidate across a newline - } - // the last one wasn't a value token - putBack(t); - - if (values == null) - return; - - if (values.size() == 1 && Tokens.isValue(firstValueWithComments.token)) { - // a single value token requires no consolidation - putBack(firstValueWithComments); - return; - } - - // this will be a list of String and SubstitutionExpression - List minimized = new ArrayList(); - - // we have multiple value tokens or one unquoted text token; - // collapse into a string token. - StringBuilder sb = new StringBuilder(); - ConfigOrigin firstOrigin = null; - for (Token valueToken : values) { - if (Tokens.isValue(valueToken)) { - AbstractConfigValue v = Tokens.getValue(valueToken); - sb.append(v.transformToString()); - if (firstOrigin == null) - firstOrigin = v.origin(); - } else if (Tokens.isUnquotedText(valueToken)) { - String text = Tokens.getUnquotedText(valueToken); - if (firstOrigin == null) - firstOrigin = valueToken.origin(); - sb.append(text); - } else if (Tokens.isSubstitution(valueToken)) { - if (firstOrigin == null) - firstOrigin = valueToken.origin(); - - if (sb.length() > 0) { - // save string so far - minimized.add(sb.toString()); - sb.setLength(0); - } - // now save substitution - List expression = Tokens - .getSubstitutionPathExpression(valueToken); - Path path = parsePathExpression(expression.iterator(), valueToken.origin()); - boolean optional = Tokens.getSubstitutionOptional(valueToken); - - minimized.add(new SubstitutionExpression(path, optional)); - } else { - throw new ConfigException.BugOrBroken( - "should not be trying to consolidate token: " - + valueToken); - } - } - - if (sb.length() > 0) { - // save string so far - minimized.add(sb.toString()); - } - - if (minimized.isEmpty()) - throw new ConfigException.BugOrBroken( - "trying to consolidate values to nothing"); - - Token consolidated = null; - - if (minimized.size() == 1 && minimized.get(0) instanceof String) { - consolidated = Tokens.newString(firstOrigin, - (String) minimized.get(0)); - } else { - // there's some substitution to do later (post-parse step) - consolidated = Tokens.newValue(new ConfigSubstitution( - firstOrigin, minimized)); - } - - putBack(new TokenWithComments(consolidated, firstValueWithComments.comments)); - } - - private ConfigOrigin lineOrigin() { - return ((SimpleConfigOrigin) baseOrigin).setLineNumber(lineNumber); - } - - private ConfigException parseError(String message) { - return parseError(message, null); - } - - private ConfigException parseError(String message, Throwable cause) { - return new ConfigException.Parse(lineOrigin(), message, cause); - } - - - private String previousFieldName(Path lastPath) { - if (lastPath != null) { - return lastPath.render(); - } else if (pathStack.isEmpty()) - return null; - else - return pathStack.peek().render(); - } - - private String previousFieldName() { - return previousFieldName(null); - } - - private String addKeyName(String message) { - String previousFieldName = previousFieldName(); - if (previousFieldName != null) { - return "in value for key '" + previousFieldName + "': " + message; - } else { - return message; - } - } - - private String addQuoteSuggestion(String badToken, String message) { - return addQuoteSuggestion(null, equalsCount > 0, badToken, message); - } - - private String addQuoteSuggestion(Path lastPath, boolean insideEquals, String badToken, - String message) { - String previousFieldName = previousFieldName(lastPath); - - String part; - if (badToken.equals(Tokens.END.toString())) { - // EOF requires special handling for the error to make sense. - if (previousFieldName != null) - part = message + " (if you intended '" + previousFieldName - + "' to be part of a value, instead of a key, " - + "try adding double quotes around the whole value"; - else - return message; - } else { - if (previousFieldName != null) { - part = message + " (if you intended " + badToken - + " to be part of the value for '" + previousFieldName + "', " - + "try enclosing the value in double quotes"; - } else { - part = message + " (if you intended " + badToken - + " to be part of a key or string value, " - + "try enclosing the key or value in double quotes"; - } - } - - if (insideEquals) - return part - + ", or you may be able to rename the file .properties rather than .conf)"; - else - return part + ")"; - } - - private AbstractConfigValue parseValue(TokenWithComments t) { - AbstractConfigValue v; - - if (Tokens.isValue(t.token)) { - v = Tokens.getValue(t.token); - } else if (t.token == Tokens.OPEN_CURLY) { - v = parseObject(true); - } else if (t.token == Tokens.OPEN_SQUARE) { - v = parseArray(); - } else { - throw parseError(addQuoteSuggestion(t.token.toString(), - "Expecting a value but got wrong token: " + t.token)); - } - - v = v.withOrigin(t.setComments(v.origin())); - - return v; - } - - private static AbstractConfigObject createValueUnderPath(Path path, - AbstractConfigValue value) { - // for path foo.bar, we are creating - // { "foo" : { "bar" : value } } - List keys = new ArrayList(); - - String key = path.first(); - Path remaining = path.remainder(); - while (key != null) { - keys.add(key); - if (remaining == null) { - break; - } else { - key = remaining.first(); - remaining = remaining.remainder(); - } - } - - // the setComments(null) is to ensure comments are only - // on the exact leaf node they apply to. - // a comment before "foo.bar" applies to the full setting - // "foo.bar" not also to "foo" - ListIterator i = keys.listIterator(keys.size()); - String deepest = i.previous(); - AbstractConfigObject o = new SimpleConfigObject(value.origin().setComments(null), - Collections. singletonMap( - deepest, value)); - while (i.hasPrevious()) { - Map m = Collections. singletonMap( - i.previous(), o); - o = new SimpleConfigObject(value.origin().setComments(null), m); - } - - return o; - } - - private Path parseKey(TokenWithComments token) { - if (flavor == ConfigSyntax.JSON) { - if (Tokens.isValueWithType(token.token, ConfigValueType.STRING)) { - String key = (String) Tokens.getValue(token.token).unwrapped(); - return Path.newKey(key); - } else { - throw parseError(addKeyName("Expecting close brace } or a field name here, got " - + token)); - } - } else { - List expression = new ArrayList(); - TokenWithComments t = token; - while (Tokens.isValue(t.token) || Tokens.isUnquotedText(t.token)) { - expression.add(t.token); - t = nextToken(); // note: don't cross a newline - } - - if (expression.isEmpty()) { - throw parseError(addKeyName("expecting a close brace or a field name here, got " - + t)); - } - - putBack(t); // put back the token we ended with - return parsePathExpression(expression.iterator(), lineOrigin()); - } - } - - private static boolean isIncludeKeyword(Token t) { - return Tokens.isUnquotedText(t) - && Tokens.getUnquotedText(t).equals("include"); - } - - private static boolean isUnquotedWhitespace(Token t) { - if (!Tokens.isUnquotedText(t)) - return false; - - String s = Tokens.getUnquotedText(t); - - for (int i = 0; i < s.length(); ++i) { - char c = s.charAt(i); - if (!ConfigImplUtil.isWhitespace(c)) - return false; - } - return true; - } - - private void parseInclude(Map values) { - TokenWithComments t = nextTokenIgnoringNewline(); - while (isUnquotedWhitespace(t.token)) { - t = nextTokenIgnoringNewline(); - } - - if (Tokens.isValueWithType(t.token, ConfigValueType.STRING)) { - String name = (String) Tokens.getValue(t.token).unwrapped(); - AbstractConfigObject obj = (AbstractConfigObject) includer - .include(includeContext, name); - - if (!pathStack.isEmpty()) { - Path prefix = new Path(pathStack); - obj = obj.relativized(prefix); - } - - for (String key : obj.keySet()) { - AbstractConfigValue v = obj.get(key); - AbstractConfigValue existing = values.get(key); - if (existing != null) { - values.put(key, v.withFallback(existing)); - } else { - values.put(key, v); - } - } - - } else { - throw parseError("include keyword is not followed by a quoted string, but by: " - + t); - } - } - - private boolean isKeyValueSeparatorToken(Token t) { - if (flavor == ConfigSyntax.JSON) { - return t == Tokens.COLON; - } else { - return t == Tokens.COLON || t == Tokens.EQUALS; - } - } - - private AbstractConfigObject parseObject(boolean hadOpenCurly) { - // invoked just after the OPEN_CURLY (or START, if !hadOpenCurly) - Map values = new HashMap(); - ConfigOrigin objectOrigin = lineOrigin(); - boolean afterComma = false; - Path lastPath = null; - boolean lastInsideEquals = false; - - while (true) { - TokenWithComments t = nextTokenIgnoringNewline(); - if (t.token == Tokens.CLOSE_CURLY) { - if (flavor == ConfigSyntax.JSON && afterComma) { - throw parseError(addQuoteSuggestion(t.toString(), - "expecting a field name after a comma, got a close brace } instead")); - } else if (!hadOpenCurly) { - throw parseError(addQuoteSuggestion(t.toString(), - "unbalanced close brace '}' with no open brace")); - } - break; - } else if (t.token == Tokens.END && !hadOpenCurly) { - putBack(t); - break; - } else if (flavor != ConfigSyntax.JSON && isIncludeKeyword(t.token)) { - parseInclude(values); - - afterComma = false; - } else { - TokenWithComments keyToken = t; - Path path = parseKey(keyToken); - TokenWithComments afterKey = nextTokenIgnoringNewline(); - boolean insideEquals = false; - - // path must be on-stack while we parse the value - pathStack.push(path); - - TokenWithComments valueToken; - AbstractConfigValue newValue; - if (flavor == ConfigSyntax.CONF && afterKey.token == Tokens.OPEN_CURLY) { - // can omit the ':' or '=' before an object value - valueToken = afterKey; - } else { - if (!isKeyValueSeparatorToken(afterKey.token)) { - throw parseError(addQuoteSuggestion(afterKey.toString(), - "Key '" + path.render() + "' may not be followed by token: " - + afterKey)); - } - - if (afterKey.token == Tokens.EQUALS) { - insideEquals = true; - equalsCount += 1; - } - - consolidateValueTokens(); - valueToken = nextTokenIgnoringNewline(); - } - - newValue = parseValue(valueToken.prepend(keyToken.comments)); - - lastPath = pathStack.pop(); - if (insideEquals) { - equalsCount -= 1; - } - lastInsideEquals = insideEquals; - - String key = path.first(); - Path remaining = path.remainder(); - - if (remaining == null) { - AbstractConfigValue existing = values.get(key); - if (existing != null) { - // In strict JSON, dups should be an error; while in - // our custom config language, they should be merged - // if the value is an object (or substitution that - // could become an object). - - if (flavor == ConfigSyntax.JSON) { - throw parseError("JSON does not allow duplicate fields: '" - + key - + "' was already seen at " - + existing.origin().description()); - } else { - newValue = newValue.withFallback(existing); - } - } - values.put(key, newValue); - } else { - if (flavor == ConfigSyntax.JSON) { - throw new ConfigException.BugOrBroken( - "somehow got multi-element path in JSON mode"); - } - - AbstractConfigObject obj = createValueUnderPath( - remaining, newValue); - AbstractConfigValue existing = values.get(key); - if (existing != null) { - obj = obj.withFallback(existing); - } - values.put(key, obj); - } - - afterComma = false; - } - - if (checkElementSeparator()) { - // continue looping - afterComma = true; - } else { - t = nextTokenIgnoringNewline(); - if (t.token == Tokens.CLOSE_CURLY) { - if (!hadOpenCurly) { - throw parseError(addQuoteSuggestion(lastPath, lastInsideEquals, - t.toString(), "unbalanced close brace '}' with no open brace")); - } - break; - } else if (hadOpenCurly) { - throw parseError(addQuoteSuggestion(lastPath, lastInsideEquals, - t.toString(), "Expecting close brace } or a comma, got " + t)); - } else { - if (t.token == Tokens.END) { - putBack(t); - break; - } else { - throw parseError(addQuoteSuggestion(lastPath, lastInsideEquals, - t.toString(), "Expecting end of input or a comma, got " + t)); - } - } - } - } - - return new SimpleConfigObject(objectOrigin, values); - } - - private SimpleConfigList parseArray() { - // invoked just after the OPEN_SQUARE - ConfigOrigin arrayOrigin = lineOrigin(); - List values = new ArrayList(); - - consolidateValueTokens(); - - TokenWithComments t = nextTokenIgnoringNewline(); - - // special-case the first element - if (t.token == Tokens.CLOSE_SQUARE) { - return new SimpleConfigList(arrayOrigin, - Collections. emptyList()); - } else if (Tokens.isValue(t.token) || t.token == Tokens.OPEN_CURLY - || t.token == Tokens.OPEN_SQUARE) { - values.add(parseValue(t)); - } else { - throw parseError(addKeyName("List should have ] or a first element after the open [, instead had token: " - + t - + " (if you want " - + t - + " to be part of a string value, then double-quote it)")); - } - - // now remaining elements - while (true) { - // just after a value - if (checkElementSeparator()) { - // comma (or newline equivalent) consumed - } else { - t = nextTokenIgnoringNewline(); - if (t.token == Tokens.CLOSE_SQUARE) { - return new SimpleConfigList(arrayOrigin, values); - } else { - throw parseError(addKeyName("List should have ended with ] or had a comma, instead had token: " - + t - + " (if you want " - + t - + " to be part of a string value, then double-quote it)")); - } - } - - // now just after a comma - consolidateValueTokens(); - - t = nextTokenIgnoringNewline(); - if (Tokens.isValue(t.token) || t.token == Tokens.OPEN_CURLY - || t.token == Tokens.OPEN_SQUARE) { - values.add(parseValue(t)); - } else if (flavor != ConfigSyntax.JSON && t.token == Tokens.CLOSE_SQUARE) { - // we allow one trailing comma - putBack(t); - } else { - throw parseError(addKeyName("List should have had new element after a comma, instead had token: " - + t - + " (if you want the comma or " - + t - + " to be part of a string value, then double-quote it)")); - } - } - } - - AbstractConfigValue parse() { - TokenWithComments t = nextTokenIgnoringNewline(); - if (t.token == Tokens.START) { - // OK - } else { - throw new ConfigException.BugOrBroken( - "token stream did not begin with START, had " + t); - } - - t = nextTokenIgnoringNewline(); - AbstractConfigValue result = null; - if (t.token == Tokens.OPEN_CURLY || t.token == Tokens.OPEN_SQUARE) { - result = parseValue(t); - } else { - if (flavor == ConfigSyntax.JSON) { - if (t.token == Tokens.END) { - throw parseError("Empty document"); - } else { - throw parseError("Document must have an object or array at root, unexpected token: " - + t); - } - } else { - // the root object can omit the surrounding braces. - // this token should be the first field's key, or part - // of it, so put it back. - putBack(t); - result = parseObject(false); - // in this case we don't try to use commentsStack comments - // since they would all presumably apply to fields not the - // root object - } - } - - t = nextTokenIgnoringNewline(); - if (t.token == Tokens.END) { - return result; - } else { - throw parseError("Document has trailing tokens after first object or array: " - + t); - } - } - } - - static class Element { - StringBuilder sb; - // an element can be empty if it has a quoted empty string "" in it - boolean canBeEmpty; - - Element(String initial, boolean canBeEmpty) { - this.canBeEmpty = canBeEmpty; - this.sb = new StringBuilder(initial); - } - - @Override - public String toString() { - return "Element(" + sb.toString() + "," + canBeEmpty + ")"; - } - } - - private static void addPathText(List buf, boolean wasQuoted, - String newText) { - int i = wasQuoted ? -1 : newText.indexOf('.'); - Element current = buf.get(buf.size() - 1); - if (i < 0) { - // add to current path element - current.sb.append(newText); - // any empty quoted string means this element can - // now be empty. - if (wasQuoted && current.sb.length() == 0) - current.canBeEmpty = true; - } else { - // "buf" plus up to the period is an element - current.sb.append(newText.substring(0, i)); - // then start a new element - buf.add(new Element("", false)); - // recurse to consume remainder of newText - addPathText(buf, false, newText.substring(i + 1)); - } - } - - private static Path parsePathExpression(Iterator expression, - ConfigOrigin origin) { - return parsePathExpression(expression, origin, null); - } - - // originalText may be null if not available - private static Path parsePathExpression(Iterator expression, - ConfigOrigin origin, String originalText) { - // each builder in "buf" is an element in the path. - List buf = new ArrayList(); - buf.add(new Element("", false)); - - if (!expression.hasNext()) { - throw new ConfigException.BadPath(origin, originalText, - "Expecting a field name or path here, but got nothing"); - } - - while (expression.hasNext()) { - Token t = expression.next(); - if (Tokens.isValueWithType(t, ConfigValueType.STRING)) { - AbstractConfigValue v = Tokens.getValue(t); - // this is a quoted string; so any periods - // in here don't count as path separators - String s = v.transformToString(); - - addPathText(buf, true, s); - } else if (t == Tokens.END) { - // ignore this; when parsing a file, it should not happen - // since we're parsing a token list rather than the main - // token iterator, and when parsing a path expression from the - // API, it's expected to have an END. - } else { - // any periods outside of a quoted string count as - // separators - String text; - if (Tokens.isValue(t)) { - // appending a number here may add - // a period, but we _do_ count those as path - // separators, because we basically want - // "foo 3.0bar" to parse as a string even - // though there's a number in it. The fact that - // we tokenize non-string values is largely an - // implementation detail. - AbstractConfigValue v = Tokens.getValue(t); - text = v.transformToString(); - } else if (Tokens.isUnquotedText(t)) { - text = Tokens.getUnquotedText(t); - } else { - throw new ConfigException.BadPath( - origin, - originalText, - "Token not allowed in path expression: " - + t - + " (you can double-quote this token if you really want it here)"); - } - - addPathText(buf, false, text); - } - } - - PathBuilder pb = new PathBuilder(); - for (Element e : buf) { - if (e.sb.length() == 0 && !e.canBeEmpty) { - throw new ConfigException.BadPath( - origin, - originalText, - "path has a leading, trailing, or two adjacent period '.' (use quoted \"\" empty string if you want an empty element)"); - } else { - pb.appendKey(e.sb.toString()); - } - } - - return pb.result(); - } - - static ConfigOrigin apiOrigin = SimpleConfigOrigin.newSimple("path parameter"); - - static Path parsePath(String path) { - Path speculated = speculativeFastParsePath(path); - if (speculated != null) - return speculated; - - StringReader reader = new StringReader(path); - - try { - Iterator tokens = Tokenizer.tokenize(apiOrigin, reader, - ConfigSyntax.CONF); - tokens.next(); // drop START - return parsePathExpression(tokens, apiOrigin, path); - } finally { - reader.close(); - } - } - - // the idea is to see if the string has any chars that might require the - // full parser to deal with. - private static boolean hasUnsafeChars(String s) { - for (int i = 0; i < s.length(); ++i) { - char c = s.charAt(i); - if (Character.isLetter(c) || c == '.') - continue; - else - return true; - } - return false; - } - - private static void appendPathString(PathBuilder pb, String s) { - int splitAt = s.indexOf('.'); - if (splitAt < 0) { - pb.appendKey(s); - } else { - pb.appendKey(s.substring(0, splitAt)); - appendPathString(pb, s.substring(splitAt + 1)); - } - } - - // do something much faster than the full parser if - // we just have something like "foo" or "foo.bar" - private static Path speculativeFastParsePath(String path) { - String s = ConfigImplUtil.unicodeTrim(path); - if (s.isEmpty()) - return null; - if (hasUnsafeChars(s)) - return null; - if (s.startsWith(".") || s.endsWith(".") || s.contains("..")) - return null; // let the full parser throw the error - - PathBuilder pb = new PathBuilder(); - appendPathString(pb, s); - return pb.result(); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/Path.java b/akka-actor/src/main/java/com/typesafe/config/impl/Path.java deleted file mode 100755 index fbbe1e0874..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/Path.java +++ /dev/null @@ -1,208 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.io.Serializable; -import java.util.Iterator; -import java.util.List; - -import com.typesafe.config.ConfigException; - -final class Path implements Serializable { - - private static final long serialVersionUID = 1L; - - final private String first; - final private Path remainder; - - Path(String first, Path remainder) { - this.first = first; - this.remainder = remainder; - } - - Path(String... elements) { - if (elements.length == 0) - throw new ConfigException.BugOrBroken("empty path"); - this.first = elements[0]; - if (elements.length > 1) { - PathBuilder pb = new PathBuilder(); - for (int i = 1; i < elements.length; ++i) { - pb.appendKey(elements[i]); - } - this.remainder = pb.result(); - } else { - this.remainder = null; - } - } - - // append all the paths in the list together into one path - Path(List pathsToConcat) { - if (pathsToConcat.isEmpty()) - throw new ConfigException.BugOrBroken("empty path"); - - Iterator i = pathsToConcat.iterator(); - Path firstPath = i.next(); - this.first = firstPath.first; - - PathBuilder pb = new PathBuilder(); - if (firstPath.remainder != null) { - pb.appendPath(firstPath.remainder); - } - while (i.hasNext()) { - pb.appendPath(i.next()); - } - this.remainder = pb.result(); - } - - String first() { - return first; - } - - /** - * - * @return path minus the first element or null if no more elements - */ - Path remainder() { - return remainder; - } - - /** - * - * @return path minus the last element or null if we have just one element - */ - Path parent() { - if (remainder == null) - return null; - - PathBuilder pb = new PathBuilder(); - Path p = this; - while (p.remainder != null) { - pb.appendKey(p.first); - p = p.remainder; - } - return pb.result(); - } - - /** - * - * @return last element in the path - */ - String last() { - Path p = this; - while (p.remainder != null) { - p = p.remainder; - } - return p.first; - } - - Path prepend(Path toPrepend) { - PathBuilder pb = new PathBuilder(); - pb.appendPath(toPrepend); - pb.appendPath(this); - return pb.result(); - } - - int length() { - int count = 1; - Path p = remainder; - while (p != null) { - count += 1; - p = p.remainder; - } - return count; - } - - Path subPath(int removeFromFront) { - int count = removeFromFront; - Path p = this; - while (p != null && count > 0) { - count -= 1; - p = p.remainder; - } - return p; - } - - @Override - public boolean equals(Object other) { - if (other instanceof Path) { - Path that = (Path) other; - return this.first.equals(that.first) - && ConfigImplUtil.equalsHandlingNull(this.remainder, - that.remainder); - } else { - return false; - } - } - - @Override - public int hashCode() { - return 41 * (41 + first.hashCode()) - + (remainder == null ? 0 : remainder.hashCode()); - } - - // this doesn't have a very precise meaning, just to reduce - // noise from quotes in the rendered path for average cases - static boolean hasFunkyChars(String s) { - int length = s.length(); - - if (length == 0) - return false; - - // if the path starts with something that could be a number, - // we need to quote it because the number could be invalid, - // for example it could be a hyphen with no digit afterward - // or the exponent "e" notation could be mangled. - char first = s.charAt(0); - if (!(Character.isLetter(first))) - return true; - - for (int i = 1; i < length; ++i) { - char c = s.charAt(i); - - if (Character.isLetterOrDigit(c) || c == '-' || c == '_') - continue; - else - return true; - } - return false; - } - - private void appendToStringBuilder(StringBuilder sb) { - if (hasFunkyChars(first) || first.isEmpty()) - sb.append(ConfigImplUtil.renderJsonString(first)); - else - sb.append(first); - if (remainder != null) { - sb.append("."); - remainder.appendToStringBuilder(sb); - } - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("Path("); - appendToStringBuilder(sb); - sb.append(")"); - return sb.toString(); - } - - /** - * toString() is a debugging-oriented version while this is an - * error-message-oriented human-readable one. - */ - String render() { - StringBuilder sb = new StringBuilder(); - appendToStringBuilder(sb); - return sb.toString(); - } - - static Path newKey(String key) { - return new Path(key, null); - } - - static Path newPath(String path) { - return Parser.parsePath(path); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/PathBuilder.java b/akka-actor/src/main/java/com/typesafe/config/impl/PathBuilder.java deleted file mode 100755 index ede6c66387..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/PathBuilder.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.util.Stack; - -import com.typesafe.config.ConfigException; - -final class PathBuilder { - // the keys are kept "backward" (top of stack is end of path) - final private Stack keys; - private Path result; - - PathBuilder() { - keys = new Stack(); - } - - private void checkCanAppend() { - if (result != null) - throw new ConfigException.BugOrBroken( - "Adding to PathBuilder after getting result"); - } - - void appendKey(String key) { - checkCanAppend(); - - keys.push(key); - } - - void appendPath(Path path) { - checkCanAppend(); - - String first = path.first(); - Path remainder = path.remainder(); - while (true) { - keys.push(first); - if (remainder != null) { - first = remainder.first(); - remainder = remainder.remainder(); - } else { - break; - } - } - } - - Path result() { - // note: if keys is empty, we want to return null, which is a valid - // empty path - if (result == null) { - Path remainder = null; - while (!keys.isEmpty()) { - String key = keys.pop(); - remainder = new Path(key, remainder); - } - result = remainder; - } - return result; - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/PropertiesParser.java b/akka-actor/src/main/java/com/typesafe/config/impl/PropertiesParser.java deleted file mode 100755 index 7c8c81fb07..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/PropertiesParser.java +++ /dev/null @@ -1,191 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.io.IOException; -import java.io.Reader; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; - -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigOrigin; - -final class PropertiesParser { - static AbstractConfigObject parse(Reader reader, - ConfigOrigin origin) throws IOException { - Properties props = new Properties(); - props.load(reader); - return fromProperties(origin, props); - } - - static String lastElement(String path) { - int i = path.lastIndexOf('.'); - if (i < 0) - return path; - else - return path.substring(i + 1); - } - - static String exceptLastElement(String path) { - int i = path.lastIndexOf('.'); - if (i < 0) - return null; - else - return path.substring(0, i); - } - - static Path pathFromPropertyKey(String key) { - String last = lastElement(key); - String exceptLast = exceptLastElement(key); - Path path = new Path(last, null); - while (exceptLast != null) { - last = lastElement(exceptLast); - exceptLast = exceptLastElement(exceptLast); - path = new Path(last, path); - } - return path; - } - - static AbstractConfigObject fromProperties(ConfigOrigin origin, - Properties props) { - Map pathMap = new HashMap(); - for (Map.Entry entry : props.entrySet()) { - Object key = entry.getKey(); - if (key instanceof String) { - Path path = pathFromPropertyKey((String) key); - pathMap.put(path, entry.getValue()); - } - } - return fromPathMap(origin, pathMap, true /* from properties */); - } - - static AbstractConfigObject fromPathMap(ConfigOrigin origin, - Map pathExpressionMap) { - Map pathMap = new HashMap(); - for (Map.Entry entry : pathExpressionMap.entrySet()) { - Object keyObj = entry.getKey(); - if (!(keyObj instanceof String)) { - throw new ConfigException.BugOrBroken( - "Map has a non-string as a key, expecting a path expression as a String"); - } - Path path = Path.newPath((String) keyObj); - pathMap.put(path, entry.getValue()); - } - return fromPathMap(origin, pathMap, false /* from properties */); - } - - private static AbstractConfigObject fromPathMap(ConfigOrigin origin, - Map pathMap, boolean convertedFromProperties) { - /* - * First, build a list of paths that will have values, either string or - * object values. - */ - Set scopePaths = new HashSet(); - Set valuePaths = new HashSet(); - for (Path path : pathMap.keySet()) { - // add value's path - valuePaths.add(path); - - // all parent paths are objects - Path next = path.parent(); - while (next != null) { - scopePaths.add(next); - next = next.parent(); - } - } - - if (convertedFromProperties) { - /* - * If any string values are also objects containing other values, - * drop those string values - objects "win". - */ - valuePaths.removeAll(scopePaths); - } else { - /* If we didn't start out as properties, then this is an error. */ - for (Path path : valuePaths) { - if (scopePaths.contains(path)) { - throw new ConfigException.BugOrBroken( - "In the map, path '" - + path.render() - + "' occurs as both the parent object of a value and as a value. " - + "Because Map has no defined ordering, this is a broken situation."); - } - } - } - - /* - * Create maps for the object-valued values. - */ - Map root = new HashMap(); - Map> scopes = new HashMap>(); - - for (Path path : scopePaths) { - Map scope = new HashMap(); - scopes.put(path, scope); - } - - /* Store string values in the associated scope maps */ - for (Path path : valuePaths) { - Path parentPath = path.parent(); - Map parent = parentPath != null ? scopes - .get(parentPath) : root; - - String last = path.last(); - Object rawValue = pathMap.get(path); - AbstractConfigValue value; - if (convertedFromProperties) { - value = new ConfigString(origin, (String) rawValue); - } else { - value = ConfigImpl.fromAnyRef(pathMap.get(path), origin, - FromMapMode.KEYS_ARE_PATHS); - } - parent.put(last, value); - } - - /* - * Make a list of scope paths from longest to shortest, so children go - * before parents. - */ - List sortedScopePaths = new ArrayList(); - sortedScopePaths.addAll(scopePaths); - // sort descending by length - Collections.sort(sortedScopePaths, new Comparator() { - @Override - public int compare(Path a, Path b) { - // Path.length() is O(n) so in theory this sucks - // but in practice we can make Path precompute length - // if it ever matters. - return b.length() - a.length(); - } - }); - - /* - * Create ConfigObject for each scope map, working from children to - * parents to avoid modifying any already-created ConfigObject. This is - * where we need the sorted list. - */ - for (Path scopePath : sortedScopePaths) { - Map scope = scopes.get(scopePath); - - Path parentPath = scopePath.parent(); - Map parent = parentPath != null ? scopes - .get(parentPath) : root; - - AbstractConfigObject o = new SimpleConfigObject(origin, scope, - ResolveStatus.RESOLVED, false /* ignoresFallbacks */); - parent.put(scopePath.last(), o); - } - - // return root config object - return new SimpleConfigObject(origin, root, ResolveStatus.RESOLVED, - false /* ignoresFallbacks */); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/ResolveStatus.java b/akka-actor/src/main/java/com/typesafe/config/impl/ResolveStatus.java deleted file mode 100755 index 8deeaf520f..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/ResolveStatus.java +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.util.Collection; - -/** - * Status of substitution resolution. - */ -enum ResolveStatus { - UNRESOLVED, RESOLVED; - - final static ResolveStatus fromValues( - Collection values) { - for (AbstractConfigValue v : values) { - if (v.resolveStatus() == ResolveStatus.UNRESOLVED) - return ResolveStatus.UNRESOLVED; - } - return ResolveStatus.RESOLVED; - } - - final static ResolveStatus fromBoolean(boolean resolved) { - return resolved ? ResolveStatus.RESOLVED : ResolveStatus.UNRESOLVED; - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/SimpleConfig.java b/akka-actor/src/main/java/com/typesafe/config/impl/SimpleConfig.java deleted file mode 100755 index 0ab776b9af..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/SimpleConfig.java +++ /dev/null @@ -1,841 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.io.Serializable; -import java.util.AbstractMap; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; - -import com.typesafe.config.Config; -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigList; -import com.typesafe.config.ConfigMergeable; -import com.typesafe.config.ConfigObject; -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigResolveOptions; -import com.typesafe.config.ConfigValue; -import com.typesafe.config.ConfigValueType; - -/** - * One thing to keep in mind in the future: as Collection-like APIs are added - * here, including iterators or size() or anything, they should be consistent - * with a one-level java.util.Map from paths to non-null values. Null values are - * not "in" the map. - */ -final class SimpleConfig implements Config, MergeableValue, Serializable { - - private static final long serialVersionUID = 1L; - - final private AbstractConfigObject object; - - SimpleConfig(AbstractConfigObject object) { - this.object = object; - } - - @Override - public AbstractConfigObject root() { - return object; - } - - @Override - public ConfigOrigin origin() { - return object.origin(); - } - - @Override - public SimpleConfig resolve() { - return resolve(ConfigResolveOptions.defaults()); - } - - @Override - public SimpleConfig resolve(ConfigResolveOptions options) { - AbstractConfigValue resolved = SubstitutionResolver.resolve(object, - object, options); - if (resolved == object) - return this; - else - return new SimpleConfig((AbstractConfigObject) resolved); - } - - - @Override - public boolean hasPath(String pathExpression) { - Path path = Path.newPath(pathExpression); - ConfigValue peeked = object.peekPath(path, null, 0, null); - return peeked != null && peeked.valueType() != ConfigValueType.NULL; - } - - @Override - public boolean isEmpty() { - return object.isEmpty(); - } - - private static void findPaths(Set> entries, Path parent, - AbstractConfigObject obj) { - for (Map.Entry entry : obj.entrySet()) { - String elem = entry.getKey(); - ConfigValue v = entry.getValue(); - Path path = Path.newKey(elem); - if (parent != null) - path = path.prepend(parent); - if (v instanceof AbstractConfigObject) { - findPaths(entries, path, (AbstractConfigObject) v); - } else if (v instanceof ConfigNull) { - // nothing; nulls are conceptually not in a Config - } else { - entries.add(new AbstractMap.SimpleImmutableEntry(path.render(), v)); - } - } - } - - @Override - public Set> entrySet() { - Set> entries = new HashSet>(); - findPaths(entries, null, object); - return entries; - } - - static private AbstractConfigValue find(AbstractConfigObject self, - String pathExpression, ConfigValueType expected, String originalPath) { - Path path = Path.newPath(pathExpression); - return find(self, path, expected, originalPath); - } - - static private AbstractConfigValue findKey(AbstractConfigObject self, - String key, ConfigValueType expected, String originalPath) { - AbstractConfigValue v = self.peek(key); - if (v == null) - throw new ConfigException.Missing(originalPath); - - if (expected != null) - v = DefaultTransformer.transform(v, expected); - - if (v.valueType() == ConfigValueType.NULL) - throw new ConfigException.Null(v.origin(), originalPath, - expected != null ? expected.name() : null); - else if (expected != null && v.valueType() != expected) - throw new ConfigException.WrongType(v.origin(), originalPath, - expected.name(), v.valueType().name()); - else - return v; - } - - static private AbstractConfigValue find(AbstractConfigObject self, - Path path, ConfigValueType expected, String originalPath) { - String key = path.first(); - Path next = path.remainder(); - if (next == null) { - return findKey(self, key, expected, originalPath); - } else { - AbstractConfigObject o = (AbstractConfigObject) findKey(self, key, - ConfigValueType.OBJECT, originalPath); - assert (o != null); // missing was supposed to throw - return find(o, next, expected, originalPath); - } - } - - AbstractConfigValue find(String pathExpression, ConfigValueType expected, - String originalPath) { - return find(object, pathExpression, expected, originalPath); - } - - @Override - public AbstractConfigValue getValue(String path) { - return find(path, null, path); - } - - @Override - public boolean getBoolean(String path) { - ConfigValue v = find(path, ConfigValueType.BOOLEAN, path); - return (Boolean) v.unwrapped(); - } - - private ConfigNumber getConfigNumber(String path) { - ConfigValue v = find(path, ConfigValueType.NUMBER, path); - return (ConfigNumber) v; - } - - @Override - public Number getNumber(String path) { - return getConfigNumber(path).unwrapped(); - } - - @Override - public int getInt(String path) { - ConfigNumber n = getConfigNumber(path); - return n.intValueRangeChecked(path); - } - - @Override - public long getLong(String path) { - return getNumber(path).longValue(); - } - - @Override - public double getDouble(String path) { - return getNumber(path).doubleValue(); - } - - @Override - public String getString(String path) { - ConfigValue v = find(path, ConfigValueType.STRING, path); - return (String) v.unwrapped(); - } - - @Override - public ConfigList getList(String path) { - AbstractConfigValue v = find(path, ConfigValueType.LIST, path); - return (ConfigList) v; - } - - @Override - public AbstractConfigObject getObject(String path) { - AbstractConfigObject obj = (AbstractConfigObject) find(path, - ConfigValueType.OBJECT, path); - return obj; - } - - @Override - public SimpleConfig getConfig(String path) { - return getObject(path).toConfig(); - } - - @Override - public Object getAnyRef(String path) { - ConfigValue v = find(path, null, path); - return v.unwrapped(); - } - - @Override - public Long getBytes(String path) { - Long size = null; - try { - size = getLong(path); - } catch (ConfigException.WrongType e) { - ConfigValue v = find(path, ConfigValueType.STRING, path); - size = parseBytes((String) v.unwrapped(), - v.origin(), path); - } - return size; - } - - @Override - public Long getMilliseconds(String path) { - long ns = getNanoseconds(path); - long ms = TimeUnit.NANOSECONDS.toMillis(ns); - return ms; - } - - @Override - public Long getNanoseconds(String path) { - Long ns = null; - try { - ns = TimeUnit.MILLISECONDS.toNanos(getLong(path)); - } catch (ConfigException.WrongType e) { - ConfigValue v = find(path, ConfigValueType.STRING, path); - ns = parseDuration((String) v.unwrapped(), v.origin(), path); - } - return ns; - } - - @SuppressWarnings("unchecked") - private List getHomogeneousUnwrappedList(String path, - ConfigValueType expected) { - List l = new ArrayList(); - List list = getList(path); - for (ConfigValue cv : list) { - // variance would be nice, but stupid cast will do - AbstractConfigValue v = (AbstractConfigValue) cv; - if (expected != null) { - v = DefaultTransformer.transform(v, expected); - } - if (v.valueType() != expected) - throw new ConfigException.WrongType(v.origin(), path, - "list of " + expected.name(), "list of " - + v.valueType().name()); - l.add((T) v.unwrapped()); - } - return l; - } - - @Override - public List getBooleanList(String path) { - return getHomogeneousUnwrappedList(path, ConfigValueType.BOOLEAN); - } - - @Override - public List getNumberList(String path) { - return getHomogeneousUnwrappedList(path, ConfigValueType.NUMBER); - } - - @Override - public List getIntList(String path) { - List l = new ArrayList(); - List numbers = getHomogeneousWrappedList(path, ConfigValueType.NUMBER); - for (AbstractConfigValue v : numbers) { - l.add(((ConfigNumber) v).intValueRangeChecked(path)); - } - return l; - } - - @Override - public List getLongList(String path) { - List l = new ArrayList(); - List numbers = getNumberList(path); - for (Number n : numbers) { - l.add(n.longValue()); - } - return l; - } - - @Override - public List getDoubleList(String path) { - List l = new ArrayList(); - List numbers = getNumberList(path); - for (Number n : numbers) { - l.add(n.doubleValue()); - } - return l; - } - - @Override - public List getStringList(String path) { - return getHomogeneousUnwrappedList(path, ConfigValueType.STRING); - } - - @SuppressWarnings("unchecked") - private List getHomogeneousWrappedList( - String path, ConfigValueType expected) { - List l = new ArrayList(); - List list = getList(path); - for (ConfigValue cv : list) { - // variance would be nice, but stupid cast will do - AbstractConfigValue v = (AbstractConfigValue) cv; - if (expected != null) { - v = DefaultTransformer.transform(v, expected); - } - if (v.valueType() != expected) - throw new ConfigException.WrongType(v.origin(), path, - "list of " + expected.name(), "list of " - + v.valueType().name()); - l.add((T) v); - } - return l; - } - - @Override - public List getObjectList(String path) { - return getHomogeneousWrappedList(path, ConfigValueType.OBJECT); - } - - @Override - public List getConfigList(String path) { - List objects = getObjectList(path); - List l = new ArrayList(); - for (ConfigObject o : objects) { - l.add(o.toConfig()); - } - return l; - } - - @Override - public List getAnyRefList(String path) { - List l = new ArrayList(); - List list = getList(path); - for (ConfigValue v : list) { - l.add(v.unwrapped()); - } - return l; - } - - @Override - public List getBytesList(String path) { - List l = new ArrayList(); - List list = getList(path); - for (ConfigValue v : list) { - if (v.valueType() == ConfigValueType.NUMBER) { - l.add(((Number) v.unwrapped()).longValue()); - } else if (v.valueType() == ConfigValueType.STRING) { - String s = (String) v.unwrapped(); - Long n = parseBytes(s, v.origin(), path); - l.add(n); - } else { - throw new ConfigException.WrongType(v.origin(), path, - "memory size string or number of bytes", v.valueType() - .name()); - } - } - return l; - } - - @Override - public List getMillisecondsList(String path) { - List nanos = getNanosecondsList(path); - List l = new ArrayList(); - for (Long n : nanos) { - l.add(TimeUnit.NANOSECONDS.toMillis(n)); - } - return l; - } - - @Override - public List getNanosecondsList(String path) { - List l = new ArrayList(); - List list = getList(path); - for (ConfigValue v : list) { - if (v.valueType() == ConfigValueType.NUMBER) { - l.add(TimeUnit.MILLISECONDS.toNanos(((Number) v.unwrapped()) - .longValue())); - } else if (v.valueType() == ConfigValueType.STRING) { - String s = (String) v.unwrapped(); - Long n = parseDuration(s, v.origin(), path); - l.add(n); - } else { - throw new ConfigException.WrongType(v.origin(), path, - "duration string or number of nanoseconds", v - .valueType().name()); - } - } - return l; - } - - @Override - public AbstractConfigObject toFallbackValue() { - return object; - } - - @Override - public SimpleConfig withFallback(ConfigMergeable other) { - // this can return "this" if the withFallback doesn't need a new - // ConfigObject - return object.withFallback(other).toConfig(); - } - - @Override - public final boolean equals(Object other) { - if (other instanceof SimpleConfig) { - return object.equals(((SimpleConfig) other).object); - } else { - return false; - } - } - - @Override - public final int hashCode() { - // we do the "41*" just so our hash code won't match that of the - // underlying object. there's no real reason it can't match, but - // making it not match might catch some kinds of bug. - return 41 * object.hashCode(); - } - - @Override - public String toString() { - return "Config(" + object.toString() + ")"; - } - - private static String getUnits(String s) { - int i = s.length() - 1; - while (i >= 0) { - char c = s.charAt(i); - if (!Character.isLetter(c)) - break; - i -= 1; - } - return s.substring(i + 1); - } - - /** - * Parses a duration string. If no units are specified in the string, it is - * assumed to be in milliseconds. The returned duration is in nanoseconds. - * The purpose of this function is to implement the duration-related methods - * in the ConfigObject interface. - * - * @param input - * the string to parse - * @param originForException - * origin of the value being parsed - * @param pathForException - * path to include in exceptions - * @return duration in nanoseconds - * @throws ConfigException - * if string is invalid - */ - public static long parseDuration(String input, - ConfigOrigin originForException, String pathForException) { - String s = ConfigImplUtil.unicodeTrim(input); - String originalUnitString = getUnits(s); - String unitString = originalUnitString; - String numberString = ConfigImplUtil.unicodeTrim(s.substring(0, s.length() - - unitString.length())); - TimeUnit units = null; - - // this would be caught later anyway, but the error message - // is more helpful if we check it here. - if (numberString.length() == 0) - throw new ConfigException.BadValue(originForException, - pathForException, "No number in duration value '" + input - + "'"); - - if (unitString.length() > 2 && !unitString.endsWith("s")) - unitString = unitString + "s"; - - // note that this is deliberately case-sensitive - if (unitString.equals("") || unitString.equals("ms") - || unitString.equals("milliseconds")) { - units = TimeUnit.MILLISECONDS; - } else if (unitString.equals("us") || unitString.equals("microseconds")) { - units = TimeUnit.MICROSECONDS; - } else if (unitString.equals("ns") || unitString.equals("nanoseconds")) { - units = TimeUnit.NANOSECONDS; - } else if (unitString.equals("d") || unitString.equals("days")) { - units = TimeUnit.DAYS; - } else if (unitString.equals("h") || unitString.equals("hours")) { - units = TimeUnit.HOURS; - } else if (unitString.equals("s") || unitString.equals("seconds")) { - units = TimeUnit.SECONDS; - } else if (unitString.equals("m") || unitString.equals("minutes")) { - units = TimeUnit.MINUTES; - } else { - throw new ConfigException.BadValue(originForException, - pathForException, "Could not parse time unit '" - + originalUnitString - + "' (try ns, us, ms, s, m, d)"); - } - - try { - // if the string is purely digits, parse as an integer to avoid - // possible precision loss; - // otherwise as a double. - if (numberString.matches("[0-9]+")) { - return units.toNanos(Long.parseLong(numberString)); - } else { - long nanosInUnit = units.toNanos(1); - return (long) (Double.parseDouble(numberString) * nanosInUnit); - } - } catch (NumberFormatException e) { - throw new ConfigException.BadValue(originForException, - pathForException, "Could not parse duration number '" - + numberString + "'"); - } - } - - private static enum MemoryUnit { - BYTES("", 1024, 0), - - KILOBYTES("kilo", 1000, 1), - MEGABYTES("mega", 1000, 2), - GIGABYTES("giga", 1000, 3), - TERABYTES("tera", 1000, 4), - PETABYTES("peta", 1000, 5), - EXABYTES("exa", 1000, 6), - ZETTABYTES("zetta", 1000, 7), - YOTTABYTES("yotta", 1000, 8), - - KIBIBYTES("kibi", 1024, 1), - MEBIBYTES("mebi", 1024, 2), - GIBIBYTES("gibi", 1024, 3), - TEBIBYTES("tebi", 1024, 4), - PEBIBYTES("pebi", 1024, 5), - EXBIBYTES("exbi", 1024, 6), - ZEBIBYTES("zebi", 1024, 7), - YOBIBYTES("yobi", 1024, 8); - - final String prefix; - final int powerOf; - final int power; - final long bytes; - - MemoryUnit(String prefix, int powerOf, int power) { - this.prefix = prefix; - this.powerOf = powerOf; - this.power = power; - int i = power; - long bytes = 1; - while (i > 0) { - bytes *= powerOf; - --i; - } - this.bytes = bytes; - } - - private static Map makeUnitsMap() { - Map map = new HashMap(); - for (MemoryUnit unit : MemoryUnit.values()) { - map.put(unit.prefix + "byte", unit); - map.put(unit.prefix + "bytes", unit); - if (unit.prefix.length() == 0) { - map.put("b", unit); - map.put("B", unit); - map.put("", unit); // no unit specified means bytes - } else { - String first = unit.prefix.substring(0, 1); - String firstUpper = first.toUpperCase(); - if (unit.powerOf == 1024) { - map.put(first, unit); // 512m - map.put(firstUpper, unit); // 512M - map.put(firstUpper + "i", unit); // 512Mi - map.put(firstUpper + "iB", unit); // 512MiB - } else if (unit.powerOf == 1000) { - if (unit.power == 1) { - map.put(first + "B", unit); // 512kB - } else { - map.put(firstUpper + "B", unit); // 512MB - } - } else { - throw new RuntimeException("broken MemoryUnit enum"); - } - } - } - return map; - } - - private static Map unitsMap = makeUnitsMap(); - - static MemoryUnit parseUnit(String unit) { - return unitsMap.get(unit); - } - } - - /** - * Parses a size-in-bytes string. If no units are specified in the string, - * it is assumed to be in bytes. The returned value is in bytes. The purpose - * of this function is to implement the size-in-bytes-related methods in the - * Config interface. - * - * @param input - * the string to parse - * @param originForException - * origin of the value being parsed - * @param pathForException - * path to include in exceptions - * @return size in bytes - * @throws ConfigException - * if string is invalid - */ - public static long parseBytes(String input, ConfigOrigin originForException, - String pathForException) { - String s = ConfigImplUtil.unicodeTrim(input); - String unitString = getUnits(s); - String numberString = ConfigImplUtil.unicodeTrim(s.substring(0, - s.length() - unitString.length())); - - // this would be caught later anyway, but the error message - // is more helpful if we check it here. - if (numberString.length() == 0) - throw new ConfigException.BadValue(originForException, - pathForException, "No number in size-in-bytes value '" - + input + "'"); - - MemoryUnit units = MemoryUnit.parseUnit(unitString); - - if (units == null) { - throw new ConfigException.BadValue(originForException, pathForException, - "Could not parse size-in-bytes unit '" + unitString - + "' (try k, K, kB, KiB, kilobytes, kibibytes)"); - } - - try { - // if the string is purely digits, parse as an integer to avoid - // possible precision loss; otherwise as a double. - if (numberString.matches("[0-9]+")) { - return Long.parseLong(numberString) * units.bytes; - } else { - return (long) (Double.parseDouble(numberString) * units.bytes); - } - } catch (NumberFormatException e) { - throw new ConfigException.BadValue(originForException, pathForException, - "Could not parse size-in-bytes number '" + numberString + "'"); - } - } - - private AbstractConfigValue peekPath(Path path) { - return root().peekPath(path); - } - - private static void addProblem(List accumulator, Path path, - ConfigOrigin origin, String problem) { - accumulator.add(new ConfigException.ValidationProblem(path.render(), origin, problem)); - } - - private static String getDesc(ConfigValue refValue) { - if (refValue instanceof AbstractConfigObject) { - AbstractConfigObject obj = (AbstractConfigObject) refValue; - if (obj.isEmpty()) - return "object"; - else - return "object with keys " + obj.keySet(); - } else if (refValue instanceof SimpleConfigList) { - return "list"; - } else { - return refValue.valueType().name().toLowerCase(); - } - } - - private static void addMissing(List accumulator, - ConfigValue refValue, Path path, ConfigOrigin origin) { - addProblem(accumulator, path, origin, "No setting at '" + path.render() + "', expecting: " - + getDesc(refValue)); - } - - private static void addWrongType(List accumulator, - ConfigValue refValue, AbstractConfigValue actual, Path path) { - addProblem(accumulator, path, actual.origin(), "Wrong value type at '" + path.render() - + "', expecting: " + getDesc(refValue) + " but got: " - + getDesc(actual)); - } - - private static boolean couldBeNull(AbstractConfigValue v) { - return DefaultTransformer.transform(v, ConfigValueType.NULL) - .valueType() == ConfigValueType.NULL; - } - - private static boolean haveCompatibleTypes(ConfigValue reference, AbstractConfigValue value) { - if (couldBeNull((AbstractConfigValue) reference) || couldBeNull(value)) { - // we allow any setting to be null - return true; - } else if (reference instanceof AbstractConfigObject) { - if (value instanceof AbstractConfigObject) { - return true; - } else { - return false; - } - } else if (reference instanceof SimpleConfigList) { - if (value instanceof SimpleConfigList) { - return true; - } else { - return false; - } - } else if (reference instanceof ConfigString) { - // assume a string could be gotten as any non-collection type; - // allows things like getMilliseconds including domain-specific - // interpretations of strings - return true; - } else if (value instanceof ConfigString) { - // assume a string could be gotten as any non-collection type - return true; - } else { - if (reference.valueType() == value.valueType()) { - return true; - } else { - return false; - } - } - } - - // path is null if we're at the root - private static void checkValidObject(Path path, AbstractConfigObject reference, - AbstractConfigObject value, - List accumulator) { - for (Map.Entry entry : reference.entrySet()) { - String key = entry.getKey(); - - Path childPath; - if (path != null) - childPath = Path.newKey(key).prepend(path); - else - childPath = Path.newKey(key); - - AbstractConfigValue v = value.get(key); - if (v == null) { - addMissing(accumulator, entry.getValue(), childPath, value.origin()); - } else { - checkValid(childPath, entry.getValue(), v, accumulator); - } - } - } - - private static void checkValid(Path path, ConfigValue reference, AbstractConfigValue value, - List accumulator) { - // Unmergeable is supposed to be impossible to encounter in here - // because we check for resolve status up front. - - if (haveCompatibleTypes(reference, value)) { - if (reference instanceof AbstractConfigObject && value instanceof AbstractConfigObject) { - checkValidObject(path, (AbstractConfigObject) reference, - (AbstractConfigObject) value, accumulator); - } else if (reference instanceof SimpleConfigList && value instanceof SimpleConfigList) { - SimpleConfigList listRef = (SimpleConfigList) reference; - SimpleConfigList listValue = (SimpleConfigList) value; - if (listRef.isEmpty() || listValue.isEmpty()) { - // can't verify type, leave alone - } else { - AbstractConfigValue refElement = listRef.get(0); - for (ConfigValue elem : listValue) { - AbstractConfigValue e = (AbstractConfigValue) elem; - if (!haveCompatibleTypes(refElement, e)) { - addProblem(accumulator, path, e.origin(), "List at '" + path.render() - + "' contains wrong value type, expecting list of " - + getDesc(refElement) + " but got element of type " - + getDesc(e)); - // don't add a problem for every last array element - break; - } - } - } - } - } else { - addWrongType(accumulator, reference, value, path); - } - } - - @Override - public void checkValid(Config reference, String... restrictToPaths) { - SimpleConfig ref = (SimpleConfig) reference; - - // unresolved reference config is a bug in the caller of checkValid - if (ref.root().resolveStatus() != ResolveStatus.RESOLVED) - throw new ConfigException.BugOrBroken( - "do not call checkValid() with an unresolved reference config, call Config.resolve()"); - - // unresolved config under validation is probably a bug in something, - // but our whole goal here is to check for bugs in this config, so - // BugOrBroken is not the appropriate exception. - if (root().resolveStatus() != ResolveStatus.RESOLVED) - throw new ConfigException.NotResolved( - "config has unresolved substitutions; must call Config.resolve()"); - - // Now we know that both reference and this config are resolved - - List problems = new ArrayList(); - - if (restrictToPaths.length == 0) { - checkValidObject(null, ref.root(), root(), problems); - } else { - for (String p : restrictToPaths) { - Path path = Path.newPath(p); - AbstractConfigValue refValue = ref.peekPath(path); - if (refValue != null) { - AbstractConfigValue child = peekPath(path); - if (child != null) { - checkValid(path, refValue, child, problems); - } else { - addMissing(problems, refValue, path, origin()); - } - } - } - } - - if (!problems.isEmpty()) { - throw new ConfigException.ValidationFailed(problems); - } - } - - @Override - public SimpleConfig withOnlyPath(String pathExpression) { - Path path = Path.newPath(pathExpression); - return new SimpleConfig(root().withOnlyPath(path)); - } - - @Override - public SimpleConfig withoutPath(String pathExpression) { - Path path = Path.newPath(pathExpression); - return new SimpleConfig(root().withoutPath(path)); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/SimpleConfigList.java b/akka-actor/src/main/java/com/typesafe/config/impl/SimpleConfigList.java deleted file mode 100755 index 50d361f35e..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/SimpleConfigList.java +++ /dev/null @@ -1,383 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.io.ObjectStreamException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.ListIterator; - -import com.typesafe.config.ConfigList; -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigResolveOptions; -import com.typesafe.config.ConfigValue; -import com.typesafe.config.ConfigValueType; - -final class SimpleConfigList extends AbstractConfigValue implements ConfigList { - - private static final long serialVersionUID = 1L; - - final private List value; - final private boolean resolved; - - SimpleConfigList(ConfigOrigin origin, List value) { - this(origin, value, ResolveStatus - .fromValues(value)); - } - - SimpleConfigList(ConfigOrigin origin, List value, - ResolveStatus status) { - super(origin); - this.value = value; - this.resolved = status == ResolveStatus.RESOLVED; - } - - @Override - public ConfigValueType valueType() { - return ConfigValueType.LIST; - } - - @Override - public List unwrapped() { - List list = new ArrayList(); - for (AbstractConfigValue v : value) { - list.add(v.unwrapped()); - } - return list; - } - - @Override - ResolveStatus resolveStatus() { - return ResolveStatus.fromBoolean(resolved); - } - - private SimpleConfigList modify(Modifier modifier, - ResolveStatus newResolveStatus) { - // lazy-create for optimization - List changed = null; - int i = 0; - for (AbstractConfigValue v : value) { - AbstractConfigValue modified = modifier.modifyChild(v); - - // lazy-create the new list if required - if (changed == null && modified != v) { - changed = new ArrayList(); - for (int j = 0; j < i; ++j) { - changed.add(value.get(j)); - } - } - - // once the new list is created, all elements - // have to go in it. if modifyChild returned - // null, we drop that element. - if (changed != null && modified != null) { - changed.add(modified); - } - - i += 1; - } - - if (changed != null) { - return new SimpleConfigList(origin(), changed, newResolveStatus); - } else { - return this; - } - } - - @Override - SimpleConfigList resolveSubstitutions(final SubstitutionResolver resolver, - final int depth, final ConfigResolveOptions options) { - if (resolved) - return this; - - return modify(new Modifier() { - @Override - public AbstractConfigValue modifyChild(AbstractConfigValue v) { - return resolver.resolve(v, depth, options); - } - - }, ResolveStatus.RESOLVED); - } - - @Override - SimpleConfigList relativized(final Path prefix) { - return modify(new Modifier() { - @Override - public AbstractConfigValue modifyChild(AbstractConfigValue v) { - return v.relativized(prefix); - } - - }, resolveStatus()); - } - - @Override - protected boolean canEqual(Object other) { - return other instanceof SimpleConfigList; - } - - @Override - public boolean equals(Object other) { - // note that "origin" is deliberately NOT part of equality - if (other instanceof SimpleConfigList) { - // optimization to avoid unwrapped() for two ConfigList - return canEqual(other) && value.equals(((SimpleConfigList) other).value); - } else { - return false; - } - } - - @Override - public int hashCode() { - // note that "origin" is deliberately NOT part of equality - return value.hashCode(); - } - - @Override - protected void render(StringBuilder sb, int indent, boolean formatted) { - if (value.isEmpty()) { - sb.append("[]"); - } else { - sb.append("["); - if (formatted) - sb.append('\n'); - for (AbstractConfigValue v : value) { - if (formatted) { - indent(sb, indent + 1); - sb.append("# "); - sb.append(v.origin().description()); - sb.append("\n"); - - for (String comment : v.origin().comments()) { - indent(sb, indent + 1); - sb.append("# "); - sb.append(comment); - sb.append("\n"); - } - - indent(sb, indent + 1); - } - v.render(sb, indent + 1, formatted); - sb.append(","); - if (formatted) - sb.append('\n'); - } - sb.setLength(sb.length() - 1); // chop or newline - if (formatted) { - sb.setLength(sb.length() - 1); // also chop comma - sb.append('\n'); - indent(sb, indent); - } - sb.append("]"); - } - } - - @Override - public boolean contains(Object o) { - return value.contains(o); - } - - @Override - public boolean containsAll(Collection c) { - return value.containsAll(c); - } - - @Override - public AbstractConfigValue get(int index) { - return value.get(index); - } - - @Override - public int indexOf(Object o) { - return value.indexOf(o); - } - - @Override - public boolean isEmpty() { - return value.isEmpty(); - } - - @Override - public Iterator iterator() { - final Iterator i = value.iterator(); - - return new Iterator() { - @Override - public boolean hasNext() { - return i.hasNext(); - } - - @Override - public ConfigValue next() { - return i.next(); - } - - @Override - public void remove() { - throw weAreImmutable("iterator().remove"); - } - }; - } - - @Override - public int lastIndexOf(Object o) { - return value.lastIndexOf(o); - } - - private static ListIterator wrapListIterator( - final ListIterator i) { - return new ListIterator() { - @Override - public boolean hasNext() { - return i.hasNext(); - } - - @Override - public ConfigValue next() { - return i.next(); - } - - @Override - public void remove() { - throw weAreImmutable("listIterator().remove"); - } - - @Override - public void add(ConfigValue arg0) { - throw weAreImmutable("listIterator().add"); - } - - @Override - public boolean hasPrevious() { - return i.hasPrevious(); - } - - @Override - public int nextIndex() { - return i.nextIndex(); - } - - @Override - public ConfigValue previous() { - return i.previous(); - } - - @Override - public int previousIndex() { - return i.previousIndex(); - } - - @Override - public void set(ConfigValue arg0) { - throw weAreImmutable("listIterator().set"); - } - }; - } - - @Override - public ListIterator listIterator() { - return wrapListIterator(value.listIterator()); - } - - @Override - public ListIterator listIterator(int index) { - return wrapListIterator(value.listIterator(index)); - } - - @Override - public int size() { - return value.size(); - } - - @Override - public List subList(int fromIndex, int toIndex) { - List list = new ArrayList(); - // yay bloat caused by lack of type variance - for (AbstractConfigValue v : value.subList(fromIndex, toIndex)) { - list.add(v); - } - return list; - } - - @Override - public Object[] toArray() { - return value.toArray(); - } - - @Override - public T[] toArray(T[] a) { - return value.toArray(a); - } - - private static UnsupportedOperationException weAreImmutable(String method) { - return new UnsupportedOperationException( - "ConfigList is immutable, you can't call List.'" + method + "'"); - } - - @Override - public boolean add(ConfigValue e) { - throw weAreImmutable("add"); - } - - @Override - public void add(int index, ConfigValue element) { - throw weAreImmutable("add"); - } - - @Override - public boolean addAll(Collection c) { - throw weAreImmutable("addAll"); - } - - @Override - public boolean addAll(int index, Collection c) { - throw weAreImmutable("addAll"); - } - - @Override - public void clear() { - throw weAreImmutable("clear"); - } - - @Override - public boolean remove(Object o) { - throw weAreImmutable("remove"); - } - - @Override - public ConfigValue remove(int index) { - throw weAreImmutable("remove"); - } - - @Override - public boolean removeAll(Collection c) { - throw weAreImmutable("removeAll"); - } - - @Override - public boolean retainAll(Collection c) { - throw weAreImmutable("retainAll"); - } - - @Override - public ConfigValue set(int index, ConfigValue element) { - throw weAreImmutable("set"); - } - - @Override - protected SimpleConfigList newCopy(boolean ignoresFallbacks, ConfigOrigin newOrigin) { - return new SimpleConfigList(newOrigin, value); - } - - // This ridiculous hack is because some JDK versions apparently can't - // serialize an array, which is used to implement ArrayList and EmptyList. - // maybe - // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6446627 - private Object writeReplace() throws ObjectStreamException { - // switch to LinkedList - return new SimpleConfigList(origin(), new java.util.LinkedList(value), - resolveStatus()); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/SimpleConfigObject.java b/akka-actor/src/main/java/com/typesafe/config/impl/SimpleConfigObject.java deleted file mode 100755 index a138ec611e..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/SimpleConfigObject.java +++ /dev/null @@ -1,215 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.util.AbstractMap; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigValue; - -final class SimpleConfigObject extends AbstractConfigObject { - - private static final long serialVersionUID = 1L; - - // this map should never be modified - assume immutable - final private Map value; - final private boolean resolved; - final private boolean ignoresFallbacks; - - SimpleConfigObject(ConfigOrigin origin, - Map value, ResolveStatus status, - boolean ignoresFallbacks) { - super(origin); - if (value == null) - throw new ConfigException.BugOrBroken( - "creating config object with null map"); - this.value = value; - this.resolved = status == ResolveStatus.RESOLVED; - this.ignoresFallbacks = ignoresFallbacks; - } - - SimpleConfigObject(ConfigOrigin origin, - Map value) { - this(origin, value, ResolveStatus.fromValues(value.values()), false /* ignoresFallbacks */); - } - - @Override - public SimpleConfigObject withOnlyKey(String key) { - return withOnlyPath(Path.newKey(key)); - } - - @Override - public SimpleConfigObject withoutKey(String key) { - return withoutPath(Path.newKey(key)); - } - - // gets the object with only the path if the path - // exists, otherwise null if it doesn't. this ensures - // that if we have { a : { b : 42 } } and do - // withOnlyPath("a.b.c") that we don't keep an empty - // "a" object. - @Override - protected SimpleConfigObject withOnlyPathOrNull(Path path) { - String key = path.first(); - Path next = path.remainder(); - AbstractConfigValue v = value.get(key); - - if (next != null) { - if (v != null && (v instanceof AbstractConfigObject)) { - v = ((AbstractConfigObject) v).withOnlyPathOrNull(next); - } else { - // if the path has more elements but we don't have an object, - // then the rest of the path does not exist. - v = null; - } - } - - if (v == null) { - return null; - } else { - return new SimpleConfigObject(origin(), Collections.singletonMap(key, v), - resolveStatus(), ignoresFallbacks); - } - } - - @Override - SimpleConfigObject withOnlyPath(Path path) { - SimpleConfigObject o = withOnlyPathOrNull(path); - if (o == null) { - return new SimpleConfigObject(origin(), - Collections. emptyMap(), resolveStatus(), - ignoresFallbacks); - } else { - return o; - } - } - - @Override - SimpleConfigObject withoutPath(Path path) { - String key = path.first(); - Path next = path.remainder(); - AbstractConfigValue v = value.get(key); - - if (v != null && next != null && v instanceof AbstractConfigObject) { - v = ((AbstractConfigObject) v).withoutPath(next); - Map updated = new HashMap( - value); - updated.put(key, v); - return new SimpleConfigObject(origin(), updated, resolveStatus(), ignoresFallbacks); - } else if (next != null || v == null) { - // can't descend, nothing to remove - return this; - } else { - Map smaller = new HashMap( - value.size() - 1); - for (Map.Entry old : value.entrySet()) { - if (!old.getKey().equals(key)) - smaller.put(old.getKey(), old.getValue()); - } - return new SimpleConfigObject(origin(), smaller, resolveStatus(), ignoresFallbacks); - } - } - - @Override - protected AbstractConfigValue peek(String key) { - return value.get(key); - } - - @Override - protected SimpleConfigObject newCopy(ResolveStatus newStatus, boolean newIgnoresFallbacks, - ConfigOrigin newOrigin) { - return new SimpleConfigObject(newOrigin, value, newStatus, newIgnoresFallbacks); - } - - @Override - ResolveStatus resolveStatus() { - return ResolveStatus.fromBoolean(resolved); - } - - @Override - protected boolean ignoresFallbacks() { - return ignoresFallbacks; - } - - @Override - public Map unwrapped() { - Map m = new HashMap(); - for (Map.Entry e : value.entrySet()) { - m.put(e.getKey(), e.getValue().unwrapped()); - } - return m; - } - - @Override - public boolean containsKey(Object key) { - return value.containsKey(key); - } - - @Override - public Set keySet() { - return value.keySet(); - } - - @Override - public boolean containsValue(Object v) { - return value.containsValue(v); - } - - @Override - public Set> entrySet() { - // total bloat just to work around lack of type variance - - HashSet> entries = new HashSet>(); - for (Map.Entry e : value.entrySet()) { - entries.add(new AbstractMap.SimpleImmutableEntry( - e.getKey(), e - .getValue())); - } - return entries; - } - - @Override - public boolean isEmpty() { - return value.isEmpty(); - } - - @Override - public int size() { - return value.size(); - } - - @Override - public Collection values() { - return new HashSet(value.values()); - } - - final private static String EMPTY_NAME = "empty config"; - final private static SimpleConfigObject emptyInstance = empty(SimpleConfigOrigin - .newSimple(EMPTY_NAME)); - - final static SimpleConfigObject empty() { - return emptyInstance; - } - - final static SimpleConfigObject empty(ConfigOrigin origin) { - if (origin == null) - return empty(); - else - return new SimpleConfigObject(origin, - Collections. emptyMap()); - } - - final static SimpleConfigObject emptyMissing(ConfigOrigin baseOrigin) { - return new SimpleConfigObject(SimpleConfigOrigin.newSimple( - baseOrigin.description() + " (not found)"), - Collections. emptyMap()); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/SimpleConfigOrigin.java b/akka-actor/src/main/java/com/typesafe/config/impl/SimpleConfigOrigin.java deleted file mode 100755 index 23351c1e8e..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/SimpleConfigOrigin.java +++ /dev/null @@ -1,341 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.io.File; -import java.io.Serializable; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; - -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigOrigin; - -// it would be cleaner to have a class hierarchy for various origin types, -// but was hoping this would be enough simpler to be a little messy. eh. -final class SimpleConfigOrigin implements ConfigOrigin, Serializable { - - private static final long serialVersionUID = 1L; - - final private String description; - final private int lineNumber; - final private int endLineNumber; - final private OriginType originType; - final private String urlOrNull; - final private List commentsOrNull; - - protected SimpleConfigOrigin(String description, int lineNumber, int endLineNumber, - OriginType originType, - String urlOrNull, List commentsOrNull) { - this.description = description; - this.lineNumber = lineNumber; - this.endLineNumber = endLineNumber; - this.originType = originType; - this.urlOrNull = urlOrNull; - this.commentsOrNull = commentsOrNull; - } - - static SimpleConfigOrigin newSimple(String description) { - return new SimpleConfigOrigin(description, -1, -1, OriginType.GENERIC, null, null); - } - - static SimpleConfigOrigin newFile(String filename) { - String url; - try { - url = (new File(filename)).toURI().toURL().toExternalForm(); - } catch (MalformedURLException e) { - url = null; - } - return new SimpleConfigOrigin(filename, -1, -1, OriginType.FILE, url, null); - } - - static SimpleConfigOrigin newURL(URL url) { - String u = url.toExternalForm(); - return new SimpleConfigOrigin(u, -1, -1, OriginType.URL, u, null); - } - - static SimpleConfigOrigin newResource(String resource, URL url) { - return new SimpleConfigOrigin(resource, -1, -1, OriginType.RESOURCE, - url != null ? url.toExternalForm() : null, null); - } - - static SimpleConfigOrigin newResource(String resource) { - return newResource(resource, null); - } - - SimpleConfigOrigin setLineNumber(int lineNumber) { - if (lineNumber == this.lineNumber && lineNumber == this.endLineNumber) { - return this; - } else { - return new SimpleConfigOrigin(this.description, lineNumber, lineNumber, - this.originType, this.urlOrNull, this.commentsOrNull); - } - } - - SimpleConfigOrigin addURL(URL url) { - return new SimpleConfigOrigin(this.description, this.lineNumber, this.endLineNumber, - this.originType, url != null ? url.toExternalForm() : null, this.commentsOrNull); - } - - SimpleConfigOrigin setComments(List comments) { - if (ConfigImplUtil.equalsHandlingNull(comments, this.commentsOrNull)) { - return this; - } else { - return new SimpleConfigOrigin(this.description, this.lineNumber, this.endLineNumber, - this.originType, this.urlOrNull, comments); - } - } - - @Override - public String description() { - // not putting the URL in here for files and resources, because people - // parsing "file: line" syntax would hit the ":" in the URL. - if (lineNumber < 0) { - return description; - } else if (endLineNumber == lineNumber) { - return description + ": " + lineNumber; - } else { - return description + ": " + lineNumber + "-" + endLineNumber; - } - } - - @Override - public boolean equals(Object other) { - if (other instanceof SimpleConfigOrigin) { - SimpleConfigOrigin otherOrigin = (SimpleConfigOrigin) other; - - return this.description.equals(otherOrigin.description) - && this.lineNumber == otherOrigin.lineNumber - && this.endLineNumber == otherOrigin.endLineNumber - && this.originType == otherOrigin.originType - && ConfigImplUtil.equalsHandlingNull(this.urlOrNull, otherOrigin.urlOrNull); - } else { - return false; - } - } - - @Override - public int hashCode() { - int h = 41 * (41 + description.hashCode()); - h = 41 * (h + lineNumber); - h = 41 * (h + endLineNumber); - h = 41 * (h + originType.hashCode()); - if (urlOrNull != null) - h = 41 * (h + urlOrNull.hashCode()); - return h; - } - - @Override - public String toString() { - // the url is only really useful on top of description for resources - if (originType == OriginType.RESOURCE && urlOrNull != null) { - return "ConfigOrigin(" + description + "," + urlOrNull + ")"; - } else { - return "ConfigOrigin(" + description + ")"; - } - } - - @Override - public String filename() { - if (originType == OriginType.FILE) { - return description; - } else if (urlOrNull != null) { - URL url; - try { - url = new URL(urlOrNull); - } catch (MalformedURLException e) { - return null; - } - if (url.getProtocol().equals("file")) { - return url.getFile(); - } else { - return null; - } - } else { - return null; - } - } - - @Override - public URL url() { - if (urlOrNull == null) { - return null; - } else { - try { - return new URL(urlOrNull); - } catch (MalformedURLException e) { - return null; - } - } - } - - @Override - public String resource() { - if (originType == OriginType.RESOURCE) { - return description; - } else { - return null; - } - } - - @Override - public int lineNumber() { - return lineNumber; - } - - @Override - public List comments() { - if (commentsOrNull != null) { - return commentsOrNull; - } else { - return Collections.emptyList(); - } - } - - static final String MERGE_OF_PREFIX = "merge of "; - - private static SimpleConfigOrigin mergeTwo(SimpleConfigOrigin a, SimpleConfigOrigin b) { - String mergedDesc; - int mergedStartLine; - int mergedEndLine; - List mergedComments; - - OriginType mergedType; - if (a.originType == b.originType) { - mergedType = a.originType; - } else { - mergedType = OriginType.GENERIC; - } - - // first use the "description" field which has no line numbers - // cluttering it. - String aDesc = a.description; - String bDesc = b.description; - if (aDesc.startsWith(MERGE_OF_PREFIX)) - aDesc = aDesc.substring(MERGE_OF_PREFIX.length()); - if (bDesc.startsWith(MERGE_OF_PREFIX)) - bDesc = bDesc.substring(MERGE_OF_PREFIX.length()); - - if (aDesc.equals(bDesc)) { - mergedDesc = aDesc; - - if (a.lineNumber < 0) - mergedStartLine = b.lineNumber; - else if (b.lineNumber < 0) - mergedStartLine = a.lineNumber; - else - mergedStartLine = Math.min(a.lineNumber, b.lineNumber); - - mergedEndLine = Math.max(a.endLineNumber, b.endLineNumber); - } else { - // this whole merge song-and-dance was intended to avoid this case - // whenever possible, but we've lost. Now we have to lose some - // structured information and cram into a string. - - // description() method includes line numbers, so use it instead - // of description field. - String aFull = a.description(); - String bFull = b.description(); - if (aFull.startsWith(MERGE_OF_PREFIX)) - aFull = aFull.substring(MERGE_OF_PREFIX.length()); - if (bFull.startsWith(MERGE_OF_PREFIX)) - bFull = bFull.substring(MERGE_OF_PREFIX.length()); - - mergedDesc = MERGE_OF_PREFIX + aFull + "," + bFull; - - mergedStartLine = -1; - mergedEndLine = -1; - } - - String mergedURL; - if (ConfigImplUtil.equalsHandlingNull(a.urlOrNull, b.urlOrNull)) { - mergedURL = a.urlOrNull; - } else { - mergedURL = null; - } - - if (ConfigImplUtil.equalsHandlingNull(a.commentsOrNull, b.commentsOrNull)) { - mergedComments = a.commentsOrNull; - } else { - mergedComments = new ArrayList(); - if (a.commentsOrNull != null) - mergedComments.addAll(a.commentsOrNull); - if (b.commentsOrNull != null) - mergedComments.addAll(b.commentsOrNull); - } - - return new SimpleConfigOrigin(mergedDesc, mergedStartLine, mergedEndLine, mergedType, - mergedURL, mergedComments); - } - - private static int similarity(SimpleConfigOrigin a, SimpleConfigOrigin b) { - int count = 0; - - if (a.originType == b.originType) - count += 1; - - if (a.description.equals(b.description)) { - count += 1; - - // only count these if the description field (which is the file - // or resource name) also matches. - if (a.lineNumber == b.lineNumber) - count += 1; - if (a.endLineNumber == b.endLineNumber) - count += 1; - if (ConfigImplUtil.equalsHandlingNull(a.urlOrNull, b.urlOrNull)) - count += 1; - } - - return count; - } - - // this picks the best pair to merge, because the pair has the most in - // common. we want to merge two lines in the same file rather than something - // else with one of the lines; because two lines in the same file can be - // better consolidated. - private static SimpleConfigOrigin mergeThree(SimpleConfigOrigin a, SimpleConfigOrigin b, - SimpleConfigOrigin c) { - if (similarity(a, b) >= similarity(b, c)) { - return mergeTwo(mergeTwo(a, b), c); - } else { - return mergeTwo(a, mergeTwo(b, c)); - } - } - - static ConfigOrigin mergeOrigins(Collection stack) { - if (stack.isEmpty()) { - throw new ConfigException.BugOrBroken("can't merge empty list of origins"); - } else if (stack.size() == 1) { - return stack.iterator().next(); - } else if (stack.size() == 2) { - Iterator i = stack.iterator(); - return mergeTwo((SimpleConfigOrigin) i.next(), (SimpleConfigOrigin) i.next()); - } else { - List remaining = new ArrayList(); - for (ConfigOrigin o : stack) { - remaining.add((SimpleConfigOrigin) o); - } - while (remaining.size() > 2) { - SimpleConfigOrigin c = remaining.get(remaining.size() - 1); - remaining.remove(remaining.size() - 1); - SimpleConfigOrigin b = remaining.get(remaining.size() - 1); - remaining.remove(remaining.size() - 1); - SimpleConfigOrigin a = remaining.get(remaining.size() - 1); - remaining.remove(remaining.size() - 1); - - SimpleConfigOrigin merged = mergeThree(a, b, c); - - remaining.add(merged); - } - - // should be down to either 1 or 2 - return mergeOrigins(remaining); - } - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/SubstitutionExpression.java b/akka-actor/src/main/java/com/typesafe/config/impl/SubstitutionExpression.java deleted file mode 100755 index be67073a1d..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/SubstitutionExpression.java +++ /dev/null @@ -1,50 +0,0 @@ -package com.typesafe.config.impl; - -import java.io.Serializable; - -final class SubstitutionExpression implements Serializable { - - private static final long serialVersionUID = 1L; - - final private Path path; - final private boolean optional; - - SubstitutionExpression(Path path, boolean optional) { - this.path = path; - this.optional = optional; - } - - Path path() { - return path; - } - - boolean optional() { - return optional; - } - - SubstitutionExpression changePath(Path newPath) { - return new SubstitutionExpression(newPath, optional); - } - - @Override - public String toString() { - return "${" + (optional ? "?" : "") + path.render() + "}"; - } - - @Override - public boolean equals(Object other) { - if (other instanceof SubstitutionExpression) { - SubstitutionExpression otherExp = (SubstitutionExpression) other; - return otherExp.path.equals(this.path) && otherExp.optional == this.optional; - } else { - return false; - } - } - - @Override - public int hashCode() { - int h = 41 * (41 + path.hashCode()); - h = 41 * (h + (optional ? 1 : 0)); - return h; - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/SubstitutionResolver.java b/akka-actor/src/main/java/com/typesafe/config/impl/SubstitutionResolver.java deleted file mode 100755 index 7bb3bf3a61..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/SubstitutionResolver.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.util.IdentityHashMap; -import java.util.Map; - -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigResolveOptions; - -/** - * This exists because we have to memoize resolved substitutions as we go - * through the config tree; otherwise we could end up creating multiple copies - * of values or whole trees of values as we follow chains of substitutions. - */ -final class SubstitutionResolver { - final private AbstractConfigObject root; - // note that we can resolve things to undefined (represented as Java null, - // rather than ConfigNull) so this map can have null values. - final private Map memos; - - SubstitutionResolver(AbstractConfigObject root) { - this.root = root; - // note: the memoization is by object identity, not object value - this.memos = new IdentityHashMap(); - } - - AbstractConfigValue resolve(AbstractConfigValue original, int depth, - ConfigResolveOptions options) { - if (memos.containsKey(original)) { - return memos.get(original); - } else { - AbstractConfigValue resolved = original.resolveSubstitutions(this, - depth, options); - if (resolved != null) { - if (resolved.resolveStatus() != ResolveStatus.RESOLVED) - throw new ConfigException.BugOrBroken( - "resolveSubstitutions() did not give us a resolved object"); - } - memos.put(original, resolved); - return resolved; - } - } - - AbstractConfigObject root() { - return this.root; - } - - static AbstractConfigValue resolve(AbstractConfigValue value, - AbstractConfigObject root, ConfigResolveOptions options) { - SubstitutionResolver resolver = new SubstitutionResolver(root); - return resolver.resolve(value, 0, options); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/Token.java b/akka-actor/src/main/java/com/typesafe/config/impl/Token.java deleted file mode 100755 index 5f16d26e1d..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/Token.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigOrigin; - -class Token { - final private TokenType tokenType; - final private String debugString; - final private ConfigOrigin origin; - - Token(TokenType tokenType, ConfigOrigin origin) { - this(tokenType, origin, null); - } - - Token(TokenType tokenType, ConfigOrigin origin, String debugString) { - this.tokenType = tokenType; - this.origin = origin; - this.debugString = debugString; - } - - // this is used for singleton tokens like COMMA or OPEN_CURLY - static Token newWithoutOrigin(TokenType tokenType, String debugString) { - return new Token(tokenType, null, debugString); - } - - final TokenType tokenType() { - return tokenType; - } - - // this is final because we don't always use the origin() accessor, - // and we don't because it throws if origin is null - final ConfigOrigin origin() { - // code is only supposed to call origin() on token types that are - // expected to have an origin. - if (origin == null) - throw new ConfigException.BugOrBroken( - "tried to get origin from token that doesn't have one: " + this); - return origin; - } - - final int lineNumber() { - if (origin != null) - return origin.lineNumber(); - else - return -1; - } - - @Override - public String toString() { - if (debugString != null) - return debugString; - else - return tokenType.name(); - } - - protected boolean canEqual(Object other) { - return other instanceof Token; - } - - @Override - public boolean equals(Object other) { - if (other instanceof Token) { - // origin is deliberately left out - return canEqual(other) - && this.tokenType == ((Token) other).tokenType; - } else { - return false; - } - } - - @Override - public int hashCode() { - // origin is deliberately left out - return tokenType.hashCode(); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/TokenType.java b/akka-actor/src/main/java/com/typesafe/config/impl/TokenType.java deleted file mode 100755 index 7853c09445..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/TokenType.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -enum TokenType { - START, - END, - COMMA, - EQUALS, - COLON, - OPEN_CURLY, - CLOSE_CURLY, - OPEN_SQUARE, - CLOSE_SQUARE, - VALUE, - NEWLINE, - UNQUOTED_TEXT, - SUBSTITUTION, - PROBLEM, - COMMENT; -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/Tokenizer.java b/akka-actor/src/main/java/com/typesafe/config/impl/Tokenizer.java deleted file mode 100755 index 2fcee8e61a..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/Tokenizer.java +++ /dev/null @@ -1,596 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.io.IOException; -import java.io.Reader; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Queue; - -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigSyntax; - -final class Tokenizer { - // this exception should not leave this file - private static class ProblemException extends Exception { - private static final long serialVersionUID = 1L; - - final private Token problem; - - ProblemException(Token problem) { - this.problem = problem; - } - - Token problem() { - return problem; - } - } - - private static String asString(int codepoint) { - if (codepoint == '\n') - return "newline"; - else if (codepoint == '\t') - return "tab"; - else if (codepoint == -1) - return "end of file"; - else if (Character.isISOControl(codepoint)) - return String.format("control character 0x%x", codepoint); - else - return String.format("%c", codepoint); - } - - /** - * Tokenizes a Reader. Does not close the reader; you have to arrange to do - * that after you're done with the returned iterator. - */ - static Iterator tokenize(ConfigOrigin origin, Reader input, ConfigSyntax flavor) { - return new TokenIterator(origin, input, flavor != ConfigSyntax.JSON); - } - - private static class TokenIterator implements Iterator { - - private static class WhitespaceSaver { - // has to be saved inside value concatenations - private StringBuilder whitespace; - // may need to value-concat with next value - private boolean lastTokenWasSimpleValue; - - WhitespaceSaver() { - whitespace = new StringBuilder(); - lastTokenWasSimpleValue = false; - } - - void add(int c) { - if (lastTokenWasSimpleValue) - whitespace.appendCodePoint(c); - } - - Token check(Token t, ConfigOrigin baseOrigin, int lineNumber) { - if (isSimpleValue(t)) { - return nextIsASimpleValue(baseOrigin, lineNumber); - } else { - nextIsNotASimpleValue(); - return null; - } - } - - // called if the next token is not a simple value; - // discards any whitespace we were saving between - // simple values. - private void nextIsNotASimpleValue() { - lastTokenWasSimpleValue = false; - whitespace.setLength(0); - } - - // called if the next token IS a simple value, - // so creates a whitespace token if the previous - // token also was. - private Token nextIsASimpleValue(ConfigOrigin baseOrigin, - int lineNumber) { - if (lastTokenWasSimpleValue) { - // need to save whitespace between the two so - // the parser has the option to concatenate it. - if (whitespace.length() > 0) { - Token t = Tokens.newUnquotedText( - lineOrigin(baseOrigin, lineNumber), - whitespace.toString()); - whitespace.setLength(0); // reset - return t; - } else { - // lastTokenWasSimpleValue = true still - return null; - } - } else { - lastTokenWasSimpleValue = true; - whitespace.setLength(0); - return null; - } - } - } - - final private SimpleConfigOrigin origin; - final private Reader input; - final private LinkedList buffer; - private int lineNumber; - private ConfigOrigin lineOrigin; - final private Queue tokens; - final private WhitespaceSaver whitespaceSaver; - final private boolean allowComments; - - TokenIterator(ConfigOrigin origin, Reader input, boolean allowComments) { - this.origin = (SimpleConfigOrigin) origin; - this.input = input; - this.allowComments = allowComments; - this.buffer = new LinkedList(); - lineNumber = 1; - lineOrigin = this.origin.setLineNumber(lineNumber); - tokens = new LinkedList(); - tokens.add(Tokens.START); - whitespaceSaver = new WhitespaceSaver(); - } - - - // this should ONLY be called from nextCharSkippingComments - // or when inside a quoted string, everything else should - // use nextCharSkippingComments(). - private int nextCharRaw() { - if (buffer.isEmpty()) { - try { - return input.read(); - } catch (IOException e) { - throw new ConfigException.IO(origin, "read error: " - + e.getMessage(), e); - } - } else { - int c = buffer.pop(); - return c; - } - } - - private void putBack(int c) { - if (buffer.size() > 2) { - throw new ConfigException.BugOrBroken( - "bug: putBack() three times, undesirable look-ahead"); - } - buffer.push(c); - } - - static boolean isWhitespace(int c) { - return ConfigImplUtil.isWhitespace(c); - } - - static boolean isWhitespaceNotNewline(int c) { - return c != '\n' && ConfigImplUtil.isWhitespace(c); - } - - private boolean startOfComment(int c) { - if (c == -1) { - return false; - } else { - if (allowComments) { - if (c == '#') { - return true; - } else if (c == '/') { - int maybeSecondSlash = nextCharRaw(); - // we want to predictably NOT consume any chars - putBack(maybeSecondSlash); - if (maybeSecondSlash == '/') { - return true; - } else { - return false; - } - } else { - return false; - } - } else { - return false; - } - } - } - - // get next char, skipping non-newline whitespace - private int nextCharAfterWhitespace(WhitespaceSaver saver) { - for (;;) { - int c = nextCharRaw(); - - if (c == -1) { - return -1; - } else { - if (isWhitespaceNotNewline(c)) { - saver.add(c); - continue; - } else { - return c; - } - } - } - } - - private ProblemException problem(String message) { - return problem("", message, null); - } - - private ProblemException problem(String what, String message) { - return problem(what, message, null); - } - - private ProblemException problem(String what, String message, boolean suggestQuotes) { - return problem(what, message, suggestQuotes, null); - } - - private ProblemException problem(String what, String message, Throwable cause) { - return problem(lineOrigin, what, message, cause); - } - - private ProblemException problem(String what, String message, boolean suggestQuotes, - Throwable cause) { - return problem(lineOrigin, what, message, suggestQuotes, cause); - } - - private static ProblemException problem(ConfigOrigin origin, String what, - String message, - Throwable cause) { - return problem(origin, what, message, false, cause); - } - - private static ProblemException problem(ConfigOrigin origin, String what, String message, - boolean suggestQuotes, Throwable cause) { - if (what == null || message == null) - throw new ConfigException.BugOrBroken( - "internal error, creating bad ProblemException"); - return new ProblemException(Tokens.newProblem(origin, what, message, suggestQuotes, - cause)); - } - - private static ProblemException problem(ConfigOrigin origin, String message) { - return problem(origin, "", message, null); - } - - private static ConfigOrigin lineOrigin(ConfigOrigin baseOrigin, - int lineNumber) { - return ((SimpleConfigOrigin) baseOrigin).setLineNumber(lineNumber); - } - - // ONE char has always been consumed, either the # or the first /, but - // not both slashes - private Token pullComment(int firstChar) { - if (firstChar == '/') { - int discard = nextCharRaw(); - if (discard != '/') - throw new ConfigException.BugOrBroken("called pullComment but // not seen"); - } - - StringBuilder sb = new StringBuilder(); - for (;;) { - int c = nextCharRaw(); - if (c == -1 || c == '\n') { - putBack(c); - return Tokens.newComment(lineOrigin, sb.toString()); - } else { - sb.appendCodePoint(c); - } - } - } - - // chars JSON allows a number to start with - static final String firstNumberChars = "0123456789-"; - // chars JSON allows to be part of a number - static final String numberChars = "0123456789eE+-."; - // chars that stop an unquoted string - static final String notInUnquotedText = "$\"{}[]:=,+#`^?!@*&\\"; - - // The rules here are intended to maximize convenience while - // avoiding confusion with real valid JSON. Basically anything - // that parses as JSON is treated the JSON way and otherwise - // we assume it's a string and let the parser sort it out. - private Token pullUnquotedText() { - ConfigOrigin origin = lineOrigin; - StringBuilder sb = new StringBuilder(); - int c = nextCharRaw(); - while (true) { - if (c == -1) { - break; - } else if (notInUnquotedText.indexOf(c) >= 0) { - break; - } else if (isWhitespace(c)) { - break; - } else if (startOfComment(c)) { - break; - } else { - sb.appendCodePoint(c); - } - - // we parse true/false/null tokens as such no matter - // what is after them, as long as they are at the - // start of the unquoted token. - if (sb.length() == 4) { - String s = sb.toString(); - if (s.equals("true")) - return Tokens.newBoolean(origin, true); - else if (s.equals("null")) - return Tokens.newNull(origin); - } else if (sb.length() == 5) { - String s = sb.toString(); - if (s.equals("false")) - return Tokens.newBoolean(origin, false); - } - - c = nextCharRaw(); - } - - // put back the char that ended the unquoted text - putBack(c); - - String s = sb.toString(); - return Tokens.newUnquotedText(origin, s); - } - - private Token pullNumber(int firstChar) throws ProblemException { - StringBuilder sb = new StringBuilder(); - sb.appendCodePoint(firstChar); - boolean containedDecimalOrE = false; - int c = nextCharRaw(); - while (c != -1 && numberChars.indexOf(c) >= 0) { - if (c == '.' || c == 'e' || c == 'E') - containedDecimalOrE = true; - sb.appendCodePoint(c); - c = nextCharRaw(); - } - // the last character we looked at wasn't part of the number, put it - // back - putBack(c); - String s = sb.toString(); - try { - if (containedDecimalOrE) { - // force floating point representation - return Tokens.newDouble(lineOrigin, Double.parseDouble(s), s); - } else { - // this should throw if the integer is too large for Long - return Tokens.newLong(lineOrigin, Long.parseLong(s), s); - } - } catch (NumberFormatException e) { - throw problem(s, "Invalid number: '" + s + "'", true /* suggestQuotes */, e); - } - } - - private void pullEscapeSequence(StringBuilder sb) throws ProblemException { - int escaped = nextCharRaw(); - if (escaped == -1) - throw problem("End of input but backslash in string had nothing after it"); - - switch (escaped) { - case '"': - sb.append('"'); - break; - case '\\': - sb.append('\\'); - break; - case '/': - sb.append('/'); - break; - case 'b': - sb.append('\b'); - break; - case 'f': - sb.append('\f'); - break; - case 'n': - sb.append('\n'); - break; - case 'r': - sb.append('\r'); - break; - case 't': - sb.append('\t'); - break; - case 'u': { - // kind of absurdly slow, but screw it for now - char[] a = new char[4]; - for (int i = 0; i < 4; ++i) { - int c = nextCharRaw(); - if (c == -1) - throw problem("End of input but expecting 4 hex digits for \\uXXXX escape"); - a[i] = (char) c; - } - String digits = new String(a); - try { - sb.appendCodePoint(Integer.parseInt(digits, 16)); - } catch (NumberFormatException e) { - throw problem(digits, String.format( - "Malformed hex digits after \\u escape in string: '%s'", digits), e); - } - } - break; - default: - throw problem( - asString(escaped), - String.format( - "backslash followed by '%s', this is not a valid escape sequence (quoted strings use JSON escaping, so use double-backslash \\\\ for literal backslash)", - asString(escaped))); - } - } - - private Token pullQuotedString() throws ProblemException { - // the open quote has already been consumed - StringBuilder sb = new StringBuilder(); - int c = '\0'; // value doesn't get used - do { - c = nextCharRaw(); - if (c == -1) - throw problem("End of input but string quote was still open"); - - if (c == '\\') { - pullEscapeSequence(sb); - } else if (c == '"') { - // end the loop, done! - } else if (Character.isISOControl(c)) { - throw problem(asString(c), "JSON does not allow unescaped " + asString(c) - + " in quoted strings, use a backslash escape"); - } else { - sb.appendCodePoint(c); - } - } while (c != '"'); - return Tokens.newString(lineOrigin, sb.toString()); - } - - private Token pullSubstitution() throws ProblemException { - // the initial '$' has already been consumed - ConfigOrigin origin = lineOrigin; - int c = nextCharRaw(); - if (c != '{') { - throw problem(asString(c), "'$' not followed by {, '" + asString(c) - + "' not allowed after '$'", true /* suggestQuotes */); - } - - boolean optional = false; - c = nextCharRaw(); - if (c == '?') { - optional = true; - } else { - putBack(c); - } - - WhitespaceSaver saver = new WhitespaceSaver(); - List expression = new ArrayList(); - - Token t; - do { - t = pullNextToken(saver); - - // note that we avoid validating the allowed tokens inside - // the substitution here; we even allow nested substitutions - // in the tokenizer. The parser sorts it out. - if (t == Tokens.CLOSE_CURLY) { - // end the loop, done! - break; - } else if (t == Tokens.END) { - throw problem(origin, - "Substitution ${ was not closed with a }"); - } else { - Token whitespace = saver.check(t, origin, lineNumber); - if (whitespace != null) - expression.add(whitespace); - expression.add(t); - } - } while (true); - - return Tokens.newSubstitution(origin, optional, expression); - } - - private Token pullNextToken(WhitespaceSaver saver) throws ProblemException { - int c = nextCharAfterWhitespace(saver); - if (c == -1) { - return Tokens.END; - } else if (c == '\n') { - // newline tokens have the just-ended line number - Token line = Tokens.newLine(lineOrigin); - lineNumber += 1; - lineOrigin = origin.setLineNumber(lineNumber); - return line; - } else { - Token t = null; - if (startOfComment(c)) { - t = pullComment(c); - } else { - switch (c) { - case '"': - t = pullQuotedString(); - break; - case '$': - t = pullSubstitution(); - break; - case ':': - t = Tokens.COLON; - break; - case ',': - t = Tokens.COMMA; - break; - case '=': - t = Tokens.EQUALS; - break; - case '{': - t = Tokens.OPEN_CURLY; - break; - case '}': - t = Tokens.CLOSE_CURLY; - break; - case '[': - t = Tokens.OPEN_SQUARE; - break; - case ']': - t = Tokens.CLOSE_SQUARE; - break; - } - - if (t == null) { - if (firstNumberChars.indexOf(c) >= 0) { - t = pullNumber(c); - } else if (notInUnquotedText.indexOf(c) >= 0) { - throw problem(asString(c), "Reserved character '" + asString(c) - + "' is not allowed outside quotes", true /* suggestQuotes */); - } else { - putBack(c); - t = pullUnquotedText(); - } - } - } - - if (t == null) - throw new ConfigException.BugOrBroken( - "bug: failed to generate next token"); - - return t; - } - } - - private static boolean isSimpleValue(Token t) { - if (Tokens.isSubstitution(t) || Tokens.isUnquotedText(t) - || Tokens.isValue(t)) { - return true; - } else { - return false; - } - } - - private void queueNextToken() throws ProblemException { - Token t = pullNextToken(whitespaceSaver); - Token whitespace = whitespaceSaver.check(t, origin, lineNumber); - if (whitespace != null) - tokens.add(whitespace); - - tokens.add(t); - } - - @Override - public boolean hasNext() { - return !tokens.isEmpty(); - } - - @Override - public Token next() { - Token t = tokens.remove(); - if (tokens.isEmpty() && t != Tokens.END) { - try { - queueNextToken(); - } catch (ProblemException e) { - tokens.add(e.problem()); - } - if (tokens.isEmpty()) - throw new ConfigException.BugOrBroken( - "bug: tokens queue should not be empty here"); - } - return t; - } - - @Override - public void remove() { - throw new UnsupportedOperationException( - "Does not make sense to remove items from token stream"); - } - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/Tokens.java b/akka-actor/src/main/java/com/typesafe/config/impl/Tokens.java deleted file mode 100755 index 83bec62af3..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/Tokens.java +++ /dev/null @@ -1,413 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.util.List; - -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigValueType; - -/* FIXME the way the subclasses of Token are private with static isFoo and accessors is kind of ridiculous. */ -final class Tokens { - static private class Value extends Token { - - final private AbstractConfigValue value; - - Value(AbstractConfigValue value) { - super(TokenType.VALUE, value.origin()); - this.value = value; - } - - AbstractConfigValue value() { - return value; - } - - @Override - public String toString() { - return "'" + value().unwrapped() + "' (" + value.valueType().name() + ")"; - } - - @Override - protected boolean canEqual(Object other) { - return other instanceof Value; - } - - @Override - public boolean equals(Object other) { - return super.equals(other) && ((Value) other).value.equals(value); - } - - @Override - public int hashCode() { - return 41 * (41 + super.hashCode()) + value.hashCode(); - } - } - - static private class Line extends Token { - Line(ConfigOrigin origin) { - super(TokenType.NEWLINE, origin); - } - - @Override - public String toString() { - return "'\\n'@" + lineNumber(); - } - - @Override - protected boolean canEqual(Object other) { - return other instanceof Line; - } - - @Override - public boolean equals(Object other) { - return super.equals(other) && ((Line) other).lineNumber() == lineNumber(); - } - - @Override - public int hashCode() { - return 41 * (41 + super.hashCode()) + lineNumber(); - } - } - - // This is not a Value, because it requires special processing - static private class UnquotedText extends Token { - final private String value; - - UnquotedText(ConfigOrigin origin, String s) { - super(TokenType.UNQUOTED_TEXT, origin); - this.value = s; - } - - String value() { - return value; - } - - @Override - public String toString() { - return "'" + value + "'"; - } - - @Override - protected boolean canEqual(Object other) { - return other instanceof UnquotedText; - } - - @Override - public boolean equals(Object other) { - return super.equals(other) - && ((UnquotedText) other).value.equals(value); - } - - @Override - public int hashCode() { - return 41 * (41 + super.hashCode()) + value.hashCode(); - } - } - - static private class Problem extends Token { - final private String what; - final private String message; - final private boolean suggestQuotes; - final private Throwable cause; - - Problem(ConfigOrigin origin, String what, String message, boolean suggestQuotes, - Throwable cause) { - super(TokenType.PROBLEM, origin); - this.what = what; - this.message = message; - this.suggestQuotes = suggestQuotes; - this.cause = cause; - } - - String message() { - return message; - } - - boolean suggestQuotes() { - return suggestQuotes; - } - - Throwable cause() { - return cause; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append('\''); - sb.append(what); - sb.append('\''); - return sb.toString(); - } - - @Override - protected boolean canEqual(Object other) { - return other instanceof Problem; - } - - @Override - public boolean equals(Object other) { - return super.equals(other) && ((Problem) other).what.equals(what) - && ((Problem) other).message.equals(message) - && ((Problem) other).suggestQuotes == suggestQuotes - && ConfigImplUtil.equalsHandlingNull(((Problem) other).cause, cause); - } - - @Override - public int hashCode() { - int h = 41 * (41 + super.hashCode()); - h = 41 * (h + what.hashCode()); - h = 41 * (h + message.hashCode()); - h = 41 * (h + Boolean.valueOf(suggestQuotes).hashCode()); - if (cause != null) - h = 41 * (h + cause.hashCode()); - return h; - } - } - - static private class Comment extends Token { - final private String text; - - Comment(ConfigOrigin origin, String text) { - super(TokenType.COMMENT, origin); - this.text = text; - } - - String text() { - return text; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("'#"); - sb.append(text); - sb.append("' (COMMENT)"); - return sb.toString(); - } - - @Override - protected boolean canEqual(Object other) { - return other instanceof Comment; - } - - @Override - public boolean equals(Object other) { - return super.equals(other) && ((Comment) other).text.equals(text); - } - - @Override - public int hashCode() { - int h = 41 * (41 + super.hashCode()); - h = 41 * (h + text.hashCode()); - return h; - } - } - - // This is not a Value, because it requires special processing - static private class Substitution extends Token { - final private boolean optional; - final private List value; - - Substitution(ConfigOrigin origin, boolean optional, List expression) { - super(TokenType.SUBSTITUTION, origin); - this.optional = optional; - this.value = expression; - } - - boolean optional() { - return optional; - } - - List value() { - return value; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - for (Token t : value) { - sb.append(t.toString()); - } - return "'${" + sb.toString() + "}'"; - } - - @Override - protected boolean canEqual(Object other) { - return other instanceof Substitution; - } - - @Override - public boolean equals(Object other) { - return super.equals(other) - && ((Substitution) other).value.equals(value); - } - - @Override - public int hashCode() { - return 41 * (41 + super.hashCode()) + value.hashCode(); - } - } - - static boolean isValue(Token token) { - return token instanceof Value; - } - - static AbstractConfigValue getValue(Token token) { - if (token instanceof Value) { - return ((Value) token).value(); - } else { - throw new ConfigException.BugOrBroken( - "tried to get value of non-value token " + token); - } - } - - static boolean isValueWithType(Token t, ConfigValueType valueType) { - return isValue(t) && getValue(t).valueType() == valueType; - } - - static boolean isNewline(Token token) { - return token instanceof Line; - } - - static boolean isProblem(Token token) { - return token instanceof Problem; - } - - static String getProblemMessage(Token token) { - if (token instanceof Problem) { - return ((Problem) token).message(); - } else { - throw new ConfigException.BugOrBroken("tried to get problem message from " + token); - } - } - - static boolean getProblemSuggestQuotes(Token token) { - if (token instanceof Problem) { - return ((Problem) token).suggestQuotes(); - } else { - throw new ConfigException.BugOrBroken("tried to get problem suggestQuotes from " - + token); - } - } - - static Throwable getProblemCause(Token token) { - if (token instanceof Problem) { - return ((Problem) token).cause(); - } else { - throw new ConfigException.BugOrBroken("tried to get problem cause from " + token); - } - } - - static boolean isComment(Token token) { - return token instanceof Comment; - } - - static String getCommentText(Token token) { - if (token instanceof Comment) { - return ((Comment) token).text(); - } else { - throw new ConfigException.BugOrBroken("tried to get comment text from " + token); - } - } - - static boolean isUnquotedText(Token token) { - return token instanceof UnquotedText; - } - - static String getUnquotedText(Token token) { - if (token instanceof UnquotedText) { - return ((UnquotedText) token).value(); - } else { - throw new ConfigException.BugOrBroken( - "tried to get unquoted text from " + token); - } - } - - static boolean isSubstitution(Token token) { - return token instanceof Substitution; - } - - static List getSubstitutionPathExpression(Token token) { - if (token instanceof Substitution) { - return ((Substitution) token).value(); - } else { - throw new ConfigException.BugOrBroken( - "tried to get substitution from " + token); - } - } - - static boolean getSubstitutionOptional(Token token) { - if (token instanceof Substitution) { - return ((Substitution) token).optional(); - } else { - throw new ConfigException.BugOrBroken("tried to get substitution optionality from " - + token); - } - } - - final static Token START = Token.newWithoutOrigin(TokenType.START, "start of file"); - final static Token END = Token.newWithoutOrigin(TokenType.END, "end of file"); - final static Token COMMA = Token.newWithoutOrigin(TokenType.COMMA, "','"); - final static Token EQUALS = Token.newWithoutOrigin(TokenType.EQUALS, "'='"); - final static Token COLON = Token.newWithoutOrigin(TokenType.COLON, "':'"); - final static Token OPEN_CURLY = Token.newWithoutOrigin(TokenType.OPEN_CURLY, "'{'"); - final static Token CLOSE_CURLY = Token.newWithoutOrigin(TokenType.CLOSE_CURLY, "'}'"); - final static Token OPEN_SQUARE = Token.newWithoutOrigin(TokenType.OPEN_SQUARE, "'['"); - final static Token CLOSE_SQUARE = Token.newWithoutOrigin(TokenType.CLOSE_SQUARE, "']'"); - - static Token newLine(ConfigOrigin origin) { - return new Line(origin); - } - - static Token newProblem(ConfigOrigin origin, String what, String message, - boolean suggestQuotes, Throwable cause) { - return new Problem(origin, what, message, suggestQuotes, cause); - } - - static Token newComment(ConfigOrigin origin, String text) { - return new Comment(origin, text); - } - - static Token newUnquotedText(ConfigOrigin origin, String s) { - return new UnquotedText(origin, s); - } - - static Token newSubstitution(ConfigOrigin origin, boolean optional, List expression) { - return new Substitution(origin, optional, expression); - } - - static Token newValue(AbstractConfigValue value) { - return new Value(value); - } - - static Token newString(ConfigOrigin origin, String value) { - return newValue(new ConfigString(origin, value)); - } - - static Token newInt(ConfigOrigin origin, int value, String originalText) { - return newValue(ConfigNumber.newNumber(origin, value, - originalText)); - } - - static Token newDouble(ConfigOrigin origin, double value, - String originalText) { - return newValue(ConfigNumber.newNumber(origin, value, - originalText)); - } - - static Token newLong(ConfigOrigin origin, long value, String originalText) { - return newValue(ConfigNumber.newNumber(origin, value, - originalText)); - } - - static Token newNull(ConfigOrigin origin) { - return newValue(new ConfigNull(origin)); - } - - static Token newBoolean(ConfigOrigin origin, boolean value) { - return newValue(new ConfigBoolean(origin, value)); - } -} diff --git a/akka-actor/src/main/java/com/typesafe/config/impl/Unmergeable.java b/akka-actor/src/main/java/com/typesafe/config/impl/Unmergeable.java deleted file mode 100755 index 0028f2e023..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/impl/Unmergeable.java +++ /dev/null @@ -1,16 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe Inc. - */ -package com.typesafe.config.impl; - -import java.util.Collection; - -/** - * Interface that tags a ConfigValue that is not mergeable until after - * substitutions are resolved. Basically these are special ConfigValue that - * never appear in a resolved tree, like {@link ConfigSubstitution} and - * {@link ConfigDelayedMerge}. - */ -interface Unmergeable { - Collection unmergedValues(); -} diff --git a/akka-actor/src/main/java/com/typesafe/config/package.html b/akka-actor/src/main/java/com/typesafe/config/package.html deleted file mode 100755 index 1e1c78bfb2..0000000000 --- a/akka-actor/src/main/java/com/typesafe/config/package.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - - -

-An API for loading and using configuration files, see the project site -for more information. -

- -

-Typically you would load configuration with a static method from {@link com.typesafe.config.ConfigFactory} and then use -it with methods in the {@link com.typesafe.config.Config} interface. -

- -

-An application can simply call {@link com.typesafe.config.ConfigFactory#load()} and place -its configuration in "application.conf" on the classpath. -If you use the default configuration from {@link com.typesafe.config.ConfigFactory#load()} -there's no need to pass a configuration to your libraries -and frameworks, as long as they all default to this same default, which they should. -

- -

-A library or framework should ship a file "reference.conf" in its jar, and allow an application to pass in a -{@link com.typesafe.config.Config} to be used for the library. If no {@link com.typesafe.config.Config} is provided, -call {@link com.typesafe.config.ConfigFactory#load()} -to get the default one. Typically a library might offer two constructors, one with a Config parameter -and one which uses {@link com.typesafe.config.ConfigFactory#load()}. -

- -

-You can find an example app and library on GitHub. -

- - - diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 85bba37987..8ec0c2dd48 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -52,7 +52,8 @@ object AkkaBuild extends Build { packagedArtifact in (Compile, packageBin) <<= (artifact in (Compile, packageBin), OsgiKeys.bundle).identityMap, artifact in (Compile, packageBin) ~= (_.copy(`type` = "bundle")), // to fix scaladoc generation - fullClasspath in doc in Compile <<= fullClasspath in Compile + fullClasspath in doc in Compile <<= fullClasspath in Compile, + libraryDependencies ++= Dependencies.actor ) ) @@ -392,6 +393,10 @@ object AkkaBuild extends Build { object Dependencies { import Dependency._ + val actor = Seq( + config + ) + val testkit = Seq(Test.scalatest, Test.junit) val actorTests = Seq( @@ -442,7 +447,7 @@ object Dependency { } // Compile - + val config = "com.typesafe" % "config" % "0.4.1" val camelCore = "org.apache.camel" % "camel-core" % V.Camel // ApacheV2 val netty = "io.netty" % "netty" % V.Netty // ApacheV2 val protobuf = "com.google.protobuf" % "protobuf-java" % V.Protobuf // New BSD @@ -502,7 +507,7 @@ object OSGi { ) def akkaImport(packageName: String = "akka.*") = "%s;version=\"[2.1,2.2)\"".format(packageName) - def configImport(packageName: String = "com.typesafe.config.*") = "%s;version=\"[0.4,0.5)\"".format(packageName) + def configImport(packageName: String = "com.typesafe.config.*") = "%s;version=\"[0.4.1,0.5)\"".format(packageName) def scalaImport(packageName: String = "scala.*") = "%s;version=\"[2.9.2,2.10)\"".format(packageName) } From 44b6764a40b245f0560c64c0f7c3e5a522a4f457 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 29 May 2012 08:46:00 +0200 Subject: [PATCH 200/538] Reduce sleep and use awaitCond for the failure case, see #2110 --- .../GossipingAccrualFailureDetectorSpec.scala | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index fa70b9a134..9d388622db 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -16,7 +16,7 @@ object GossipingAccrualFailureDetectorMultiJvmSpec extends MultiNodeConfig { val third = role("third") commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString("akka.cluster.failure-detector.threshold=5")). + withFallback(ConfigFactory.parseString("akka.cluster.failure-detector.threshold=4")). withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -40,7 +40,7 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi "A Gossip-driven Failure Detector" must { - "receive gossip heartbeats so that all healthy systems in the cluster are marked 'available'" taggedAs LongRunningTest in { + "receive gossip heartbeats so that all member nodes in the cluster are marked 'available'" taggedAs LongRunningTest in { // make sure that the node-to-join is started before other join runOn(first) { cluster.self @@ -49,25 +49,24 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi cluster.join(firstAddress) - log.info("Let the systems gossip for a while...") - 10.seconds.dilated.sleep // let them gossip + 5.seconds.dilated.sleep // let them gossip cluster.failureDetector.isAvailable(firstAddress) must be(true) cluster.failureDetector.isAvailable(secondAddress) must be(true) cluster.failureDetector.isAvailable(thirdAddress) must be(true) } - "mark system as 'unavailable' if a system in the cluster is shut down (and its heartbeats stops)" taggedAs LongRunningTest in { + "mark node as 'unavailable' if a node in the cluster is shut down (and its heartbeats stops)" taggedAs LongRunningTest in { runOn(first) { testConductor.shutdown(third, 0) testConductor.removeNode(third) } runOn(first, second) { - log.info("Give the remaning systems time to detect failure...") - 15.seconds.dilated.sleep // give them time to detect failure + // remaning nodes should detect failure... + awaitCond(!cluster.failureDetector.isAvailable(thirdAddress), 10.seconds) + // other connections still ok cluster.failureDetector.isAvailable(firstAddress) must be(true) cluster.failureDetector.isAvailable(secondAddress) must be(true) - cluster.failureDetector.isAvailable(thirdAddress) must be(false) } } } From 80fabe240e6514b9b2a95778b813c7d77cb62658 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 29 May 2012 09:05:46 +0200 Subject: [PATCH 201/538] tags.exclude should add to default excludes, not override, see #2145 --- project/AkkaBuild.scala | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 4f322fdc2f..0d84386578 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -295,11 +295,11 @@ object AkkaBuild extends Build { ) val excludeTestNames = SettingKey[Seq[String]]("exclude-test-names") - val excludeTestTags = SettingKey[Seq[String]]("exclude-test-tags") - val includeTestTags = SettingKey[Seq[String]]("include-test-tags") - val onlyTestTags = SettingKey[Seq[String]]("only-test-tags") + val excludeTestTags = SettingKey[Set[String]]("exclude-test-tags") + val includeTestTags = SettingKey[Set[String]]("include-test-tags") + val onlyTestTags = SettingKey[Set[String]]("only-test-tags") - val defaultExcludedTags = Seq("timing", "long-running") + val defaultExcludedTags = Set("timing", "long-running") lazy val defaultMultiJvmOptions: Seq[String] = { (System.getProperty("akka.test.timefactor") match { @@ -315,28 +315,28 @@ object AkkaBuild extends Build { // for excluding tests by tag use system property: -Dakka.test.tags.exclude= // note that it will not be used if you specify -Dakka.test.tags.only - lazy val useExcludeTestTags: Seq[String] = { - if (useOnlyTestTags.isEmpty) systemPropertyAsSeq("akka.test.tags.exclude", defaultExcludedTags) - else Seq.empty + lazy val useExcludeTestTags: Set[String] = { + if (useOnlyTestTags.isEmpty) defaultExcludedTags ++ systemPropertyAsSeq("akka.test.tags.exclude").toSet + else Set.empty } // for including tests by tag use system property: -Dakka.test.tags.include= // note that it will not be used if you specify -Dakka.test.tags.only - lazy val useIncludeTestTags: Seq[String] = { - if (useOnlyTestTags.isEmpty) systemPropertyAsSeq("akka.test.tags.include") - else Seq.empty + lazy val useIncludeTestTags: Set[String] = { + if (useOnlyTestTags.isEmpty) systemPropertyAsSeq("akka.test.tags.include").toSet + else Set.empty } // for running only tests by tag use system property: -Dakka.test.tags.only= - lazy val useOnlyTestTags: Seq[String] = systemPropertyAsSeq("akka.test.tags.only") + lazy val useOnlyTestTags: Set[String] = systemPropertyAsSeq("akka.test.tags.only").toSet - def systemPropertyAsSeq(name: String, default: Seq[String] = Seq.empty): Seq[String] = { + def systemPropertyAsSeq(name: String): Seq[String] = { val prop = System.getProperty(name, "") - if (prop.isEmpty) default else prop.split(",").toSeq + if (prop.isEmpty) Seq.empty else prop.split(",").toSeq } lazy val defaultMultiJvmScalatestOptions: Seq[String] = { - val excludeTags = (useExcludeTestTags.toSet -- useIncludeTestTags.toSet).toSeq + val excludeTags = (useExcludeTestTags -- useIncludeTestTags).toSeq Seq("-r", "org.scalatest.akka.QuietReporter") ++ (if (excludeTags.isEmpty) Seq.empty else Seq("-l", excludeTags.mkString(" "))) ++ (if (useOnlyTestTags.isEmpty) Seq.empty else Seq("-n", useOnlyTestTags.mkString(" "))) @@ -364,7 +364,7 @@ object AkkaBuild extends Build { // add arguments for tests excluded by tag - includes override excludes (opposite to scalatest) testOptions in Test <++= (excludeTestTags, includeTestTags) map { (excludes, includes) => - val tags = (excludes.toSet -- includes.toSet).toSeq + val tags = (excludes -- includes) if (tags.isEmpty) Seq.empty else Seq(Tests.Argument("-l", tags.mkString(" "))) }, From d1e8fb0b9a84e6719c622e003564b0e7fbd2cdc0 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 09:06:03 +0200 Subject: [PATCH 202/538] Update docs to reflect that there isn't 0 deps for akka-actor --- akka-docs/intro/getting-started.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/akka-docs/intro/getting-started.rst b/akka-docs/intro/getting-started.rst index adc580c6e2..f96bb42b9d 100644 --- a/akka-docs/intro/getting-started.rst +++ b/akka-docs/intro/getting-started.rst @@ -31,7 +31,7 @@ Modules Akka is very modular and has many JARs for containing different features. -- ``akka-actor-2.1-SNAPSHOT.jar`` -- Classic Actors, Typed Actors, IO Actor etc. Has ZERO dependencies. +- ``akka-actor-2.1-SNAPSHOT.jar`` -- Classic Actors, Typed Actors, IO Actor etc. - ``akka-remote-2.1-SNAPSHOT.jar`` -- Remote Actors - ``akka-testkit-2.1-SNAPSHOT.jar`` -- Toolkit for testing Actor systems - ``akka-kernel-2.1-SNAPSHOT.jar`` -- Akka microkernel for running a bare-bones mini application server @@ -43,8 +43,7 @@ Akka is very modular and has many JARs for containing different features. - ``akka--mailbox-2.1-SNAPSHOT.jar`` -- Akka durable mailboxes How to see the JARs dependencies of each Akka module is described in the -:ref:`dependencies` section. Worth noting is that ``akka-actor`` has zero -external dependencies (apart from the ``scala-library.jar`` JAR). +:ref:`dependencies` section. Using a release distribution ---------------------------- From b9a6ccaf419568834452975a063812a833b1bdef Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 29 May 2012 08:28:54 +0200 Subject: [PATCH 203/538] Port all MultiJvm tests to MultiNode, see #1935 * Moved akka-remote/multi-jvm test to akka-remote-tests/multi-jvm * Removed old test utilities that are replaced by testconductor * Removed multi-jvm from akka-remote build, these tests are now in akka-remote-tests * Removed test dependencies in build that are not needed any longer * DirectRoutedRemoteActorMultiJvmSpec replaced with NewRemoteActorMultiJvmSpec, same thing --- ...Spec.scala => LookupRemoteActorSpec.scala} | 10 +- ...JvmSpec.scala => NewRemoteActorSpec.scala} | 14 +-- .../scala/akka/remote/QuietReporter.scala | 0 .../router/RandomRoutedRemoteActorSpec.scala | 92 ++++++++++++++ .../RoundRobinRoutedRemoteActorSpec.scala | 92 ++++++++++++++ .../ScatterGatherRoutedRemoteActorSpec.scala | 93 +++++++++++++++ .../testconductor/TestConductorSpec.scala | 4 +- .../AbstractRemoteActorMultiJvmSpec.scala | 29 ----- .../scala/akka/remote/AkkaRemoteSpec.scala | 33 ------ .../multi-jvm/scala/akka/remote/Barrier.scala | 19 --- .../DirectRoutedRemoteActorMultiJvmSpec.scala | 64 ---------- .../scala/akka/remote/FileBasedBarrier.scala | 83 ------------- .../scala/akka/remote/MultiJvmSync.scala | 49 -------- .../remote/NewRemoteActorMultiJvmSpec.scala | 65 ---------- .../RandomRoutedRemoteActorMultiJvmSpec.scala | 110 ----------------- ...ndRobinRoutedRemoteActorMultiJvmSpec.scala | 112 ------------------ ...rGatherRoutedRemoteActorMultiJvmSpec.scala | 107 ----------------- .../scala/akka/remote/ZKClient.scala | 71 ----------- project/AkkaBuild.scala | 19 +-- 19 files changed, 295 insertions(+), 771 deletions(-) rename akka-remote-tests/src/multi-jvm/scala/akka/remote/{SimpleRemoteSpec.scala => LookupRemoteActorSpec.scala} (77%) rename akka-remote-tests/src/multi-jvm/scala/akka/remote/{router/DirectRoutedRemoteActorMultiJvmSpec.scala => NewRemoteActorSpec.scala} (83%) rename {akka-remote => akka-remote-tests}/src/multi-jvm/scala/akka/remote/QuietReporter.scala (100%) create mode 100644 akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala create mode 100644 akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala create mode 100644 akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala delete mode 100644 akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala delete mode 100644 akka-remote/src/multi-jvm/scala/akka/remote/AkkaRemoteSpec.scala delete mode 100644 akka-remote/src/multi-jvm/scala/akka/remote/Barrier.scala delete mode 100644 akka-remote/src/multi-jvm/scala/akka/remote/DirectRoutedRemoteActorMultiJvmSpec.scala delete mode 100644 akka-remote/src/multi-jvm/scala/akka/remote/FileBasedBarrier.scala delete mode 100644 akka-remote/src/multi-jvm/scala/akka/remote/MultiJvmSync.scala delete mode 100644 akka-remote/src/multi-jvm/scala/akka/remote/NewRemoteActorMultiJvmSpec.scala delete mode 100644 akka-remote/src/multi-jvm/scala/akka/remote/RandomRoutedRemoteActorMultiJvmSpec.scala delete mode 100644 akka-remote/src/multi-jvm/scala/akka/remote/RoundRobinRoutedRemoteActorMultiJvmSpec.scala delete mode 100644 akka-remote/src/multi-jvm/scala/akka/remote/ScatterGatherRoutedRemoteActorMultiJvmSpec.scala delete mode 100644 akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala similarity index 77% rename from akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala rename to akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala index dcc4b60526..797ff97ecd 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/SimpleRemoteSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala @@ -11,7 +11,7 @@ import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -object SimpleRemoteMultiJvmSpec extends MultiNodeConfig { +object LookupRemoteActorMultiJvmSpec extends MultiNodeConfig { class SomeActor extends Actor with Serializable { def receive = { @@ -26,12 +26,12 @@ object SimpleRemoteMultiJvmSpec extends MultiNodeConfig { } -class SimpleRemoteMultiJvmNode1 extends SimpleRemoteSpec -class SimpleRemoteMultiJvmNode2 extends SimpleRemoteSpec +class LookupRemoteActorMultiJvmNode1 extends LookupRemoteActorSpec +class LookupRemoteActorMultiJvmNode2 extends LookupRemoteActorSpec -class SimpleRemoteSpec extends MultiNodeSpec(SimpleRemoteMultiJvmSpec) +class LookupRemoteActorSpec extends MultiNodeSpec(LookupRemoteActorMultiJvmSpec) with ImplicitSender with DefaultTimeout { - import SimpleRemoteMultiJvmSpec._ + import LookupRemoteActorMultiJvmSpec._ def initialParticipants = 2 diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala similarity index 83% rename from akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala rename to akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala index 294bc80884..4342a20178 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/DirectRoutedRemoteActorMultiJvmSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala @@ -14,7 +14,7 @@ import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -object DirectRoutedRemoteActorMultiJvmSpec extends MultiNodeConfig { +object NewRemoteActorMultiJvmSpec extends MultiNodeConfig { class SomeActor extends Actor with Serializable { def receive = { @@ -28,20 +28,20 @@ object DirectRoutedRemoteActorMultiJvmSpec extends MultiNodeConfig { val slave = role("slave") deployOn(master, """/service-hello.remote = "@slave@" """) - + deployOnAll("""/service-hello2.remote = "@slave@" """) } -class DirectRoutedRemoteActorMultiJvmNode1 extends DirectRoutedRemoteActorSpec -class DirectRoutedRemoteActorMultiJvmNode2 extends DirectRoutedRemoteActorSpec +class NewRemoteActorMultiJvmNode1 extends NewRemoteActorSpec +class NewRemoteActorMultiJvmNode2 extends NewRemoteActorSpec -class DirectRoutedRemoteActorSpec extends MultiNodeSpec(DirectRoutedRemoteActorMultiJvmSpec) +class NewRemoteActorSpec extends MultiNodeSpec(NewRemoteActorMultiJvmSpec) with ImplicitSender with DefaultTimeout { - import DirectRoutedRemoteActorMultiJvmSpec._ + import NewRemoteActorMultiJvmSpec._ def initialParticipants = 2 - "A new remote actor configured with a Direct router" must { + "A new remote actor" must { "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { runOn(master) { diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/QuietReporter.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/QuietReporter.scala similarity index 100% rename from akka-remote/src/multi-jvm/scala/akka/remote/QuietReporter.scala rename to akka-remote-tests/src/multi-jvm/scala/akka/remote/QuietReporter.scala diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala new file mode 100644 index 0000000000..87bae463ce --- /dev/null +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala @@ -0,0 +1,92 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.remote.router + +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.Props +import akka.dispatch.Await +import akka.pattern.ask +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.routing.Broadcast +import akka.routing.RandomRouter +import akka.routing.RoutedActorRef +import akka.testkit._ + +object RandomRoutedRemoteActorMultiJvmSpec extends MultiNodeConfig { + + class SomeActor extends Actor with Serializable { + def receive = { + case "hit" ⇒ sender ! self + case "end" ⇒ context.stop(self) + } + } + + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + + commonConfig(debugConfig(on = false)) + + deployOnAll(""" + /service-hello.router = "random" + /service-hello.nr-of-instances = 3 + /service-hello.target.nodes = ["@first@", "@second@", "@third@"] + """) +} + +class RandomRoutedRemoteActorMultiJvmNode1 extends RandomRoutedRemoteActorSpec +class RandomRoutedRemoteActorMultiJvmNode2 extends RandomRoutedRemoteActorSpec +class RandomRoutedRemoteActorMultiJvmNode3 extends RandomRoutedRemoteActorSpec +class RandomRoutedRemoteActorMultiJvmNode4 extends RandomRoutedRemoteActorSpec + +class RandomRoutedRemoteActorSpec extends MultiNodeSpec(RandomRoutedRemoteActorMultiJvmSpec) + with ImplicitSender with DefaultTimeout { + import RandomRoutedRemoteActorMultiJvmSpec._ + + def initialParticipants = 4 + + "A new remote actor configured with a Random router" must { + "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { + + runOn(first, second, third) { + testConductor.enter("start", "broadcast-end", "end", "done") + } + + runOn(fourth) { + testConductor.enter("start") + val actor = system.actorOf(Props[SomeActor].withRouter(RandomRouter()), "service-hello") + actor.isInstanceOf[RoutedActorRef] must be(true) + + val connectionCount = 3 + val iterationCount = 10 + + var replies = Map( + node(first).address -> 0, + node(second).address -> 0, + node(third).address -> 0) + + for (i ← 0 until iterationCount) { + for (k ← 0 until connectionCount) { + val nodeAddress = Await.result(actor ? "hit", timeout.duration).asInstanceOf[ActorRef].path.address + replies = replies + (nodeAddress -> (replies(nodeAddress) + 1)) + } + } + + testConductor.enter("broadcast-end") + actor ! Broadcast("end") + + testConductor.enter("end") + replies.values foreach { _ must be > (0) } + + // shut down the actor before we let the other node(s) shut down so we don't try to send + // "Terminate" to a shut down node + system.stop(actor) + testConductor.enter("done") + } + } + } +} diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala new file mode 100644 index 0000000000..48026af375 --- /dev/null +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala @@ -0,0 +1,92 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.remote.router + +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.Props +import akka.dispatch.Await +import akka.pattern.ask +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.routing.Broadcast +import akka.routing.RoundRobinRouter +import akka.routing.RoutedActorRef +import akka.testkit._ + +object RoundRobinRoutedRemoteActorMultiJvmSpec extends MultiNodeConfig { + + class SomeActor extends Actor with Serializable { + def receive = { + case "hit" ⇒ sender ! self + case "end" ⇒ context.stop(self) + } + } + + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + + commonConfig(debugConfig(on = false)) + + deployOnAll(""" + /service-hello.router = "round-robin" + /service-hello.nr-of-instances = 3 + /service-hello.target.nodes = ["@first@", "@second@", "@third@"] + """) +} + +class RoundRobinRoutedRemoteActorMultiJvmNode1 extends RoundRobinRoutedRemoteActorSpec +class RoundRobinRoutedRemoteActorMultiJvmNode2 extends RoundRobinRoutedRemoteActorSpec +class RoundRobinRoutedRemoteActorMultiJvmNode3 extends RoundRobinRoutedRemoteActorSpec +class RoundRobinRoutedRemoteActorMultiJvmNode4 extends RoundRobinRoutedRemoteActorSpec + +class RoundRobinRoutedRemoteActorSpec extends MultiNodeSpec(RoundRobinRoutedRemoteActorMultiJvmSpec) + with ImplicitSender with DefaultTimeout { + import RoundRobinRoutedRemoteActorMultiJvmSpec._ + + def initialParticipants = 4 + + "A new remote actor configured with a RoundRobin router" must { + "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { + + runOn(first, second, third) { + testConductor.enter("start", "broadcast-end", "end", "done") + } + + runOn(fourth) { + testConductor.enter("start") + val actor = system.actorOf(Props[SomeActor].withRouter(RoundRobinRouter()), "service-hello") + actor.isInstanceOf[RoutedActorRef] must be(true) + + val connectionCount = 3 + val iterationCount = 10 + + var replies = Map( + node(first).address -> 0, + node(second).address -> 0, + node(third).address -> 0) + + for (i ← 0 until iterationCount) { + for (k ← 0 until connectionCount) { + val nodeAddress = Await.result(actor ? "hit", timeout.duration).asInstanceOf[ActorRef].path.address + replies = replies + (nodeAddress -> (replies(nodeAddress) + 1)) + } + } + + testConductor.enter("broadcast-end") + actor ! Broadcast("end") + + testConductor.enter("end") + replies.values foreach { _ must be(10) } + + // shut down the actor before we let the other node(s) shut down so we don't try to send + // "Terminate" to a shut down node + system.stop(actor) + testConductor.enter("done") + } + } + } +} diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala new file mode 100644 index 0000000000..7afa86d22e --- /dev/null +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala @@ -0,0 +1,93 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.remote.router + +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.Props +import akka.dispatch.Await +import akka.pattern.ask +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.routing.Broadcast +import akka.routing.ScatterGatherFirstCompletedRouter +import akka.routing.RoutedActorRef +import akka.testkit._ +import akka.util.duration._ + +object ScatterGatherRoutedRemoteActorMultiJvmSpec extends MultiNodeConfig { + + class SomeActor extends Actor with Serializable { + def receive = { + case "hit" ⇒ sender ! self + case "end" ⇒ context.stop(self) + } + } + + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + + commonConfig(debugConfig(on = false)) + + deployOnAll(""" + /service-hello.router = "scatter-gather" + /service-hello.nr-of-instances = 3 + /service-hello.target.nodes = ["@first@", "@second@", "@third@"] + """) +} + +class ScatterGatherRoutedRemoteActorMultiJvmNode1 extends ScatterGatherRoutedRemoteActorSpec +class ScatterGatherRoutedRemoteActorMultiJvmNode2 extends ScatterGatherRoutedRemoteActorSpec +class ScatterGatherRoutedRemoteActorMultiJvmNode3 extends ScatterGatherRoutedRemoteActorSpec +class ScatterGatherRoutedRemoteActorMultiJvmNode4 extends ScatterGatherRoutedRemoteActorSpec + +class ScatterGatherRoutedRemoteActorSpec extends MultiNodeSpec(ScatterGatherRoutedRemoteActorMultiJvmSpec) + with ImplicitSender with DefaultTimeout { + import ScatterGatherRoutedRemoteActorMultiJvmSpec._ + + def initialParticipants = 4 + + "A new remote actor configured with a ScatterGatherFirstCompleted router" must { + "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { + + runOn(first, second, third) { + testConductor.enter("start", "broadcast-end", "end", "done") + } + + runOn(fourth) { + testConductor.enter("start") + val actor = system.actorOf(Props[SomeActor].withRouter(ScatterGatherFirstCompletedRouter(within = 10 seconds)), "service-hello") + actor.isInstanceOf[RoutedActorRef] must be(true) + + val connectionCount = 3 + val iterationCount = 10 + + for (i ← 0 until iterationCount) { + for (k ← 0 until connectionCount) { + actor ! "hit" + } + } + + val replies = (receiveWhile(5 seconds, messages = connectionCount * iterationCount) { + case ref: ActorRef ⇒ (ref.path.address, 1) + }).foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { + case (m, (n, c)) ⇒ m + (n -> (m(n) + c)) + } + + testConductor.enter("broadcast-end") + actor ! Broadcast("end") + + testConductor.enter("end") + replies.values.sum must be === connectionCount * iterationCount + + // shut down the actor before we let the other node(s) shut down so we don't try to send + // "Terminate" to a shut down node + system.stop(actor) + testConductor.enter("done") + } + } + } +} diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index df6388d562..1d58b48a00 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -3,9 +3,7 @@ */ package akka.remote.testconductor -import akka.remote.AkkaRemoteSpec import com.typesafe.config.ConfigFactory -import akka.remote.AbstractRemoteActorMultiJvmSpec import akka.actor.Props import akka.actor.Actor import akka.dispatch.Await @@ -20,7 +18,7 @@ import akka.remote.testkit.MultiNodeConfig object TestConductorMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false)) - + val master = role("master") val slave = role("slave") } diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala deleted file mode 100644 index ca4313b56b..0000000000 --- a/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala +++ /dev/null @@ -1,29 +0,0 @@ -package akka.remote - -import com.typesafe.config.{Config, ConfigFactory} -import akka.actor.Address - -trait AbstractRemoteActorMultiJvmSpec { - def NrOfNodes: Int - def commonConfig: Config - - def PortRangeStart = 1990 - def NodeRange = 1 to NrOfNodes - - private[this] val remotes: IndexedSeq[String] = { - val nodesOpt = Option(AkkaRemoteSpec.testNodes).map(_.split(",").toIndexedSeq) - nodesOpt getOrElse IndexedSeq.fill(NrOfNodes)("localhost") - } - - val nodeConfigs = (NodeRange.toList zip remotes) map { - case (port, host) => - ConfigFactory.parseString(""" - akka { - remote.netty.hostname="%s" - remote.netty.port = "%d" - }""".format(host, PortRangeStart + port, port)) withFallback commonConfig - } - - def akkaSpec(port: Int) = "AkkaRemoteSpec@%s:%d".format(remotes(port), PortRangeStart + 1 + port) - def akkaURIs(count: Int): String = 0 until count map {idx => "\"akka://" + akkaSpec(idx) + "\""} mkString "," -} diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/AkkaRemoteSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/AkkaRemoteSpec.scala deleted file mode 100644 index c1a2109bc0..0000000000 --- a/akka-remote/src/multi-jvm/scala/akka/remote/AkkaRemoteSpec.scala +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.remote - -import akka.testkit._ -import com.typesafe.config.Config -import com.typesafe.config.ConfigFactory -import com.typesafe.config.ConfigParseOptions -import com.typesafe.config.ConfigResolveOptions -import java.io.File -import akka.actor.{ActorSystem, ActorSystemImpl} - -object AkkaRemoteSpec { - private def configParseOptions = ConfigParseOptions.defaults.setAllowMissing(false) - - val testConf: Config = { - System.getProperty("akka.config") match { - case null ⇒ AkkaSpec.testConf - case location ⇒ - ConfigFactory.systemProperties - .withFallback(ConfigFactory.parseFileAnySyntax(new File(location), configParseOptions)) - .withFallback(ConfigFactory.defaultReference(ActorSystem.findClassLoader())).resolve(ConfigResolveOptions.defaults) - } - } - - val testNodes = System.getProperty("test.hosts") -} - -abstract class AkkaRemoteSpec(config: Config) - extends AkkaSpec(config.withFallback(AkkaRemoteSpec.testConf)) - with MultiJvmSync diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/Barrier.scala b/akka-remote/src/multi-jvm/scala/akka/remote/Barrier.scala deleted file mode 100644 index e99fca2a45..0000000000 --- a/akka-remote/src/multi-jvm/scala/akka/remote/Barrier.scala +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.remote - -trait Barrier { - def await() = { enter(); leave() } - - def apply(body: ⇒ Unit) { - enter() - body - leave() - } - - def enter(): Unit - - def leave(): Unit -} diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/DirectRoutedRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/DirectRoutedRemoteActorMultiJvmSpec.scala deleted file mode 100644 index 3026ddd613..0000000000 --- a/akka-remote/src/multi-jvm/scala/akka/remote/DirectRoutedRemoteActorMultiJvmSpec.scala +++ /dev/null @@ -1,64 +0,0 @@ -package akka.remote - -import akka.actor.{ Actor, ActorRef, Props } -import akka.testkit._ -import akka.dispatch.Await -import akka.pattern.ask - -object DirectRoutedRemoteActorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { - override def NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - def receive = { - case "identify" ⇒ sender ! self - } - } - - import com.typesafe.config.ConfigFactory - override def commonConfig = ConfigFactory.parseString(""" - akka { - loglevel = "WARNING" - actor { - provider = "akka.remote.RemoteActorRefProvider" - deployment { - /service-hello.remote = %s - } - } - }""" format akkaURIs(1)) -} - -import DirectRoutedRemoteActorMultiJvmSpec._ - -class DirectRoutedRemoteActorMultiJvmNode1 extends AkkaRemoteSpec(nodeConfigs(0)) { - import DirectRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - - "___" must { - "___" in { - barrier("start") - barrier("done") - } - } -} - -class DirectRoutedRemoteActorMultiJvmNode2 extends AkkaRemoteSpec(nodeConfigs(1)) with DefaultTimeout { - - import DirectRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - - "A new remote actor configured with a Direct router" must { - "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { - barrier("start") - - val actor = system.actorOf(Props[SomeActor], "service-hello") - actor.isInstanceOf[RemoteActorRef] must be(true) - - Await.result(actor ? "identify", timeout.duration).asInstanceOf[ActorRef].path.address.hostPort must equal(akkaSpec(0)) - - // shut down the actor before we let the other node(s) shut down so we don't try to send - // "Terminate" to a shut down node - system.stop(actor) - barrier("done") - } - } -} diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/FileBasedBarrier.scala b/akka-remote/src/multi-jvm/scala/akka/remote/FileBasedBarrier.scala deleted file mode 100644 index a1773fc86e..0000000000 --- a/akka-remote/src/multi-jvm/scala/akka/remote/FileBasedBarrier.scala +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.remote - -import akka.util.duration._ -import akka.util.Duration -import System.{ currentTimeMillis ⇒ now } - -import java.io.File - -class BarrierTimeoutException(message: String) extends RuntimeException(message) - -object FileBasedBarrier { - val HomeDir = ".multi-jvm" - val DefaultTimeout = 30.seconds - val DefaultSleep = 100.millis -} - -import FileBasedBarrier._ - -class FileBasedBarrier( - name: String, - count: Int, - group: String, - node: String, - timeout: Duration = FileBasedBarrier.DefaultTimeout, - sleep: Duration = FileBasedBarrier.DefaultSleep) extends Barrier { - - val barrierDir = { - val dir = new File(new File(new File(FileBasedBarrier.HomeDir), group), name) - dir.mkdirs() - dir - } - - val nodeFile = new File(barrierDir, node) - - val readyFile = new File(barrierDir, "ready") - - def enter() = { - createNode() - if (nodesPresent >= count) createReady() - val ready = waitFor(readyFile.exists, timeout, sleep) - if (!ready) expire("entry") - } - - def leave() = { - removeNode() - val empty = waitFor(nodesPresent <= 1, timeout, sleep) - removeReady() - if (!empty) expire("exit") - } - - def nodesPresent = barrierDir.list.size - - def createNode() = nodeFile.createNewFile() - - def removeNode() = nodeFile.delete() - - def createReady() = readyFile.createNewFile() - - def removeReady() = readyFile.delete() - - def waitFor(test: ⇒ Boolean, timeout: Duration, sleep: Duration): Boolean = { - val start = now - val limit = start + timeout.toMillis - var passed = test - var expired = false - while (!passed && !expired) { - if (now > limit) expired = true - else { - Thread.sleep(sleep.toMillis) - passed = test - } - } - passed - } - - def expire(barrier: String) = { - throw new BarrierTimeoutException("Timeout (%s) waiting for %s barrier" format (timeout, barrier)) - } -} diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/MultiJvmSync.scala b/akka-remote/src/multi-jvm/scala/akka/remote/MultiJvmSync.scala deleted file mode 100644 index c1e6080e6e..0000000000 --- a/akka-remote/src/multi-jvm/scala/akka/remote/MultiJvmSync.scala +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.remote - -import akka.testkit.AkkaSpec -import akka.util.Duration - -trait MultiJvmSync extends AkkaSpec { - def nodes: Int - - override def atStartup() = { - onStart() - MultiJvmSync.start(getClass.getName, nodes) - } - - def onStart() {} - - override def atTermination() = { - MultiJvmSync.end(getClass.getName, nodes) - onEnd() - } - - def onEnd() {} - - def barrier(name: String, timeout: Duration = FileBasedBarrier.DefaultTimeout) = { - MultiJvmSync.barrier(name, nodes, getClass.getName, timeout) - } -} - -object MultiJvmSync { - val TestMarker = "MultiJvm" - val StartBarrier = "multi-jvm-start" - val EndBarrier = "multi-jvm-end" - - def start(className: String, count: Int) = barrier(StartBarrier, count, className) - - def end(className: String, count: Int) = barrier(EndBarrier, count, className) - - def barrier(name: String, count: Int, className: String, timeout: Duration = FileBasedBarrier.DefaultTimeout) = { - val Array(testName, nodeName) = className split TestMarker - val barrier = if (AkkaRemoteSpec.testNodes eq null) - new FileBasedBarrier(name, count, testName, nodeName, timeout) - else - new ZkClient.ZkBarrier(nodeName, count, "/" + testName + "_" + name) - barrier.await() - } -} diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/NewRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/NewRemoteActorMultiJvmSpec.scala deleted file mode 100644 index c3dc1ae9de..0000000000 --- a/akka-remote/src/multi-jvm/scala/akka/remote/NewRemoteActorMultiJvmSpec.scala +++ /dev/null @@ -1,65 +0,0 @@ -package akka.remote - -import akka.actor.{ Actor, ActorRef, Props } -import akka.testkit._ -import akka.dispatch.Await -import akka.pattern.ask - -object NewRemoteActorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { - override def NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - def receive = { - case "identify" ⇒ sender ! self - } - } - - import com.typesafe.config.ConfigFactory - override def commonConfig = ConfigFactory.parseString(""" - akka { - loglevel = "WARNING" - actor { - provider = "akka.remote.RemoteActorRefProvider" - deployment { - /service-hello.remote = %s - } - } - }""" format akkaURIs(1)) -} - -class NewRemoteActorMultiJvmNode1 extends AkkaRemoteSpec(NewRemoteActorMultiJvmSpec.nodeConfigs(0)) { - - import NewRemoteActorMultiJvmSpec._ - - val nodes = NrOfNodes - - "___" must { - "___" in { - barrier("start") - - barrier("done") - } - } -} - -class NewRemoteActorMultiJvmNode2 extends AkkaRemoteSpec(NewRemoteActorMultiJvmSpec.nodeConfigs(1)) with DefaultTimeout { - - import NewRemoteActorMultiJvmSpec._ - - val nodes = NrOfNodes - - "A new remote actor" must { - "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { - barrier("start") - - val actor = system.actorOf(Props[SomeActor], "service-hello") - Await.result(actor ? "identify", timeout.duration).asInstanceOf[ActorRef].path.address.hostPort must equal(akkaSpec(0)) - - // shut down the actor before we let the other node(s) shut down so we don't try to send - // "Terminate" to a shut down node - system.stop(actor) - barrier("done") - } - } -} - diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/RandomRoutedRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/RandomRoutedRemoteActorMultiJvmSpec.scala deleted file mode 100644 index 2b2b233dee..0000000000 --- a/akka-remote/src/multi-jvm/scala/akka/remote/RandomRoutedRemoteActorMultiJvmSpec.scala +++ /dev/null @@ -1,110 +0,0 @@ -package akka.remote - -import akka.actor.{ Actor, ActorRef, Props } -import akka.routing._ -import akka.testkit.DefaultTimeout -import akka.dispatch.Await -import akka.pattern.ask - -object RandomRoutedRemoteActorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { - override def NrOfNodes = 4 - class SomeActor extends Actor with Serializable { - def receive = { - case "hit" ⇒ sender ! self - case "end" ⇒ context.stop(self) - } - } - - import com.typesafe.config.ConfigFactory - override def commonConfig = ConfigFactory.parseString(""" - akka { - loglevel = "WARNING" - actor { - provider = "akka.remote.RemoteActorRefProvider" - deployment { - /service-hello.router = "random" - /service-hello.nr-of-instances = %d - /service-hello.target.nodes = [%s] - } - } - }""" format (3, akkaURIs(3))) -} - -class RandomRoutedRemoteActorMultiJvmNode1 extends AkkaRemoteSpec(RandomRoutedRemoteActorMultiJvmSpec.nodeConfigs(0)) { - import RandomRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - "___" must { - "___" in { - barrier("start") - barrier("broadcast-end") - barrier("end") - barrier("done") - } - } -} - -class RandomRoutedRemoteActorMultiJvmNode2 extends AkkaRemoteSpec(RandomRoutedRemoteActorMultiJvmSpec.nodeConfigs(1)) { - import RandomRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - "___" must { - "___" in { - barrier("start") - barrier("broadcast-end") - barrier("end") - barrier("done") - } - } -} - -class RandomRoutedRemoteActorMultiJvmNode3 extends AkkaRemoteSpec(RandomRoutedRemoteActorMultiJvmSpec.nodeConfigs(2)) { - import RandomRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - "___" must { - "___" in { - barrier("start") - barrier("broadcast-end") - barrier("end") - barrier("done") - } - } -} - -class RandomRoutedRemoteActorMultiJvmNode4 extends AkkaRemoteSpec(RandomRoutedRemoteActorMultiJvmSpec.nodeConfigs(3)) with DefaultTimeout { - import RandomRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - "A new remote actor configured with a Random router" must { - "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { - - barrier("start") - val actor = system.actorOf(Props[SomeActor].withRouter(RandomRouter()), "service-hello") - actor.isInstanceOf[RoutedActorRef] must be(true) - - val connectionCount = NrOfNodes - 1 - val iterationCount = 10 - - var replies = Map( - akkaSpec(0) -> 0, - akkaSpec(1) -> 0, - akkaSpec(2) -> 0) - - for (i ← 0 until iterationCount) { - for (k ← 0 until connectionCount) { - val nodeName = Await.result(actor ? "hit", timeout.duration).asInstanceOf[ActorRef].path.address.hostPort - replies = replies + (nodeName -> (replies(nodeName) + 1)) - } - } - - barrier("broadcast-end") - actor ! Broadcast("end") - - barrier("end") - replies.values foreach { _ must be > (0) } - - // shut down the actor before we let the other node(s) shut down so we don't try to send - // "Terminate" to a shut down node - system.stop(actor) - barrier("done") - } - } -} - diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/RoundRobinRoutedRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/RoundRobinRoutedRemoteActorMultiJvmSpec.scala deleted file mode 100644 index c84aa46366..0000000000 --- a/akka-remote/src/multi-jvm/scala/akka/remote/RoundRobinRoutedRemoteActorMultiJvmSpec.scala +++ /dev/null @@ -1,112 +0,0 @@ -package akka.remote - -import akka.actor.{ Actor, ActorRef, Props } -import akka.routing._ -import akka.testkit.DefaultTimeout -import akka.dispatch.Await -import akka.pattern.ask - -object RoundRobinRoutedRemoteActorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { - override def NrOfNodes = 4 - - class SomeActor extends Actor with Serializable { - def receive = { - case "hit" ⇒ sender ! self - case "end" ⇒ context.stop(self) - } - } - - import com.typesafe.config.ConfigFactory - override def commonConfig = ConfigFactory.parseString(""" - akka { - loglevel = "WARNING" - actor { - provider = "akka.remote.RemoteActorRefProvider" - deployment { - /service-hello.router = "round-robin" - /service-hello.nr-of-instances = %d - /service-hello.target.nodes = [%s] - } - } - }""" format (3, akkaURIs(3))) -} - -class RoundRobinRoutedRemoteActorMultiJvmNode1 extends AkkaRemoteSpec(RoundRobinRoutedRemoteActorMultiJvmSpec.nodeConfigs(0)) { - import RoundRobinRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - "___" must { - "___" in { - barrier("start") - barrier("broadcast-end") - barrier("end") - barrier("done") - } - } -} - -class RoundRobinRoutedRemoteActorMultiJvmNode2 extends AkkaRemoteSpec(RoundRobinRoutedRemoteActorMultiJvmSpec.nodeConfigs(1)) { - import RoundRobinRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - "___" must { - "___" in { - barrier("start") - barrier("broadcast-end") - barrier("end") - barrier("done") - } - } -} - -class RoundRobinRoutedRemoteActorMultiJvmNode3 extends AkkaRemoteSpec(RoundRobinRoutedRemoteActorMultiJvmSpec.nodeConfigs(2)) { - import RoundRobinRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - "___" must { - "___" in { - barrier("start") - barrier("broadcast-end") - barrier("end") - barrier("done") - } - } -} - -class RoundRobinRoutedRemoteActorMultiJvmNode4 extends AkkaRemoteSpec(RoundRobinRoutedRemoteActorMultiJvmSpec.nodeConfigs(3)) with DefaultTimeout { - import RoundRobinRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - "A new remote actor configured with a RoundRobin router" must { - "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { - - barrier("start") - val actor = system.actorOf(Props[SomeActor].withRouter(RoundRobinRouter()), "service-hello") - actor.isInstanceOf[RoutedActorRef] must be(true) - - val connectionCount = NrOfNodes - 1 - val iterationCount = 10 - - var replies = Map( - akkaSpec(0) -> 0, - akkaSpec(1) -> 0, - akkaSpec(2) -> 0) - - for (i ← 0 until iterationCount) { - for (k ← 0 until connectionCount) { - val nodeName = Await.result(actor ? "hit", timeout.duration).asInstanceOf[ActorRef].path.address.hostPort - - replies = replies + (nodeName -> (replies(nodeName) + 1)) - } - } - - barrier("broadcast-end") - actor ! Broadcast("end") - - barrier("end") - replies.values foreach { _ must be(10) } - - // shut down the actor before we let the other node(s) shut down so we don't try to send - // "Terminate" to a shut down node - system.stop(actor) - barrier("done") - } - } -} - diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/ScatterGatherRoutedRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/ScatterGatherRoutedRemoteActorMultiJvmSpec.scala deleted file mode 100644 index b618300ff2..0000000000 --- a/akka-remote/src/multi-jvm/scala/akka/remote/ScatterGatherRoutedRemoteActorMultiJvmSpec.scala +++ /dev/null @@ -1,107 +0,0 @@ -package akka.remote - -import akka.actor.{ Actor, ActorRef, Props } -import akka.routing._ -import akka.testkit._ -import akka.util.duration._ - -object ScatterGatherRoutedRemoteActorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { - override def NrOfNodes = 4 - class SomeActor extends Actor with Serializable { - def receive = { - case "hit" ⇒ sender ! self - case "end" ⇒ context.stop(self) - } - } - - import com.typesafe.config.ConfigFactory - override def commonConfig = ConfigFactory.parseString(""" - akka { - loglevel = "WARNING" - actor { - provider = "akka.remote.RemoteActorRefProvider" - deployment { - /service-hello.router = "scatter-gather" - /service-hello.nr-of-instances = %d - /service-hello.target.nodes = [%s] - } - } - }""" format (3, akkaURIs(3))) -} - -class ScatterGatherRoutedRemoteActorMultiJvmNode1 extends AkkaRemoteSpec(ScatterGatherRoutedRemoteActorMultiJvmSpec.nodeConfigs(0)) { - import ScatterGatherRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - "___" must { - "___" in { - barrier("start") - barrier("broadcast-end") - barrier("end") - barrier("done") - } - } -} - -class ScatterGatherRoutedRemoteActorMultiJvmNode2 extends AkkaRemoteSpec(ScatterGatherRoutedRemoteActorMultiJvmSpec.nodeConfigs(1)) { - import ScatterGatherRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - "___" must { - "___" in { - barrier("start") - barrier("broadcast-end") - barrier("end") - barrier("done") - } - } -} - -class ScatterGatherRoutedRemoteActorMultiJvmNode3 extends AkkaRemoteSpec(ScatterGatherRoutedRemoteActorMultiJvmSpec.nodeConfigs(2)) { - import ScatterGatherRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - "___" must { - "___" in { - barrier("start") - barrier("broadcast-end") - barrier("end") - barrier("done") - } - } -} - -class ScatterGatherRoutedRemoteActorMultiJvmNode4 extends AkkaRemoteSpec(ScatterGatherRoutedRemoteActorMultiJvmSpec.nodeConfigs(3)) - with DefaultTimeout with ImplicitSender { - import ScatterGatherRoutedRemoteActorMultiJvmSpec._ - val nodes = NrOfNodes - "A new remote actor configured with a ScatterGather router" must { - "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { - - barrier("start") - val actor = system.actorOf(Props[SomeActor].withRouter(ScatterGatherFirstCompletedRouter(within = 10 seconds)), "service-hello") - actor.isInstanceOf[RoutedActorRef] must be(true) - - val connectionCount = NrOfNodes - 1 - val iterationCount = 10 - - for (i ← 0 until iterationCount) { - for (k ← 0 until connectionCount) { - actor ! "hit" - } - } - - val replies = (receiveWhile(5 seconds, messages = connectionCount * iterationCount) { - case ref: ActorRef ⇒ (ref.path.address.hostPort, 1) - }).foldLeft(Map(akkaSpec(0) -> 0, akkaSpec(1) -> 0, akkaSpec(2) -> 0)) { - case (m, (n, c)) ⇒ m + (n -> (m(n) + c)) - } - - barrier("broadcast-end") - actor ! Broadcast("end") - - barrier("end") - replies.values.sum must be === connectionCount * iterationCount - - barrier("done") - } - } -} - diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala b/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala deleted file mode 100644 index 611478babb..0000000000 --- a/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Copyright (C) 2011-2012 Typesafe - */ -package akka.remote - -import org.apache.zookeeper._ -import ZooDefs.Ids - -object ZkClient extends Watcher { - // Don't forget to close! - lazy val zk: ZooKeeper = { - val remoteNodes = AkkaRemoteSpec.testNodes split ',' - - // ZkServers are configured to listen on a specific port. - val connectString = remoteNodes map (_+":2181") mkString "," - new ZooKeeper(connectString, 3000, this) - } - - def process(ev: WatchedEvent) { - synchronized { notify() } - } - - class ZkBarrier(name: String, count: Int, root: String) extends Barrier { - @annotation.tailrec - private def waitForServer() { - // SI-1672 - val r = try { - zk.exists("/", false) - true - } catch { - case _: KeeperException.ConnectionLossException => - Thread.sleep(10000) - false - } - if (!r) waitForServer() - } - waitForServer() - - try zk.create(root, Array[Byte](), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT) catch { - case _: KeeperException.NodeExistsException => - } - - val timeoutMs = 300*1000 - - private def block(num: Int) { - val start = System.currentTimeMillis - while (true) { - if (System.currentTimeMillis - start > timeoutMs) throw new InterruptedException("Timed out blocking in zk") - ZkClient.this.synchronized { - val children = zk.getChildren(root, true) - if (children.size < num) { - ZkClient.this.wait(timeoutMs) - } else - return - } - } - } - - def enter() { - zk.create(root + "/" + name, Array[Byte](), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL) - block(count) - } - - final def leave() { - zk.create(root + "/" + name + ".leave", Array[Byte](), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL) - block(2*count) - } - } - - def barrier(name: String, count: Int, root: String) = new ZkBarrier(name, count, root) -} diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 7962bb6a29..e427255782 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -82,23 +82,17 @@ object AkkaBuild extends Build { id = "akka-remote", base = file("akka-remote"), dependencies = Seq(actor, actorTests % "test->test", testkit % "test->test"), - settings = defaultSettings ++ multiJvmSettings ++ OSGi.remote ++ Seq( + settings = defaultSettings ++ OSGi.remote ++ Seq( libraryDependencies ++= Dependencies.remote, // disable parallel tests - parallelExecution in Test := false, - extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src => - (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq - }, - scalatestOptions in MultiJvm := defaultMultiJvmScalatestOptions, - jvmOptions in MultiJvm := defaultMultiJvmOptions, - test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } + parallelExecution in Test := false ) - ) configs (MultiJvm) + ) lazy val remoteTests = Project( id = "akka-remote-tests", base = file("akka-remote-tests"), - dependencies = Seq(remote % "compile;test->test;multi-jvm->multi-jvm", actorTests % "test->test", testkit % "test->test"), + dependencies = Seq(remote, actorTests % "test->test", testkit % "test->test"), settings = defaultSettings ++ multiJvmSettings ++ Seq( // disable parallel tests parallelExecution in Test := false, @@ -415,8 +409,7 @@ object Dependencies { ) val remote = Seq( - netty, protobuf, Test.junit, Test.scalatest, - Test.zookeeper, Test.log4j // needed for ZkBarrier in multi-jvm tests + netty, protobuf, Test.junit, Test.scalatest ) val cluster = Seq(Test.junit, Test.scalatest) @@ -482,8 +475,6 @@ object Dependency { val scalatest = "org.scalatest" % "scalatest_2.9.1" % V.Scalatest % "test" // ApacheV2 val scalacheck = "org.scala-tools.testing" % "scalacheck_2.9.1" % "1.9" % "test" // New BSD val specs2 = "org.specs2" % "specs2_2.9.1" % "1.9" % "test" // Modified BSD / ApacheV2 - val zookeeper = "org.apache.hadoop.zookeeper" % "zookeeper" % "3.4.0" % "test" // ApacheV2 - val log4j = "log4j" % "log4j" % "1.2.14" % "test" // ApacheV2 } } From 9936fab6203b56f73c585b7ef5f5d1632477022b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 10:34:41 +0200 Subject: [PATCH 204/538] Added Typesafe Config's license in AkkaBuild --- project/AkkaBuild.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 8ec0c2dd48..0b5744c5cc 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -447,7 +447,7 @@ object Dependency { } // Compile - val config = "com.typesafe" % "config" % "0.4.1" + val config = "com.typesafe" % "config" % "0.4.1" // ApacheV2 val camelCore = "org.apache.camel" % "camel-core" % V.Camel // ApacheV2 val netty = "io.netty" % "netty" % V.Netty // ApacheV2 val protobuf = "com.google.protobuf" % "protobuf-java" % V.Protobuf // New BSD From 88b611ebcb6e1917caf7055333d137c6f1c43ad3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Tue, 29 May 2012 10:38:42 +0200 Subject: [PATCH 205/538] Enable running tests on multiple nodes with -Dakka.test.multi-node=true --- project/AkkaBuild.scala | 25 +++++++++++++------------ project/plugins.sbt | 2 +- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 7962bb6a29..c0e09d9d91 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -7,7 +7,7 @@ package akka import sbt._ import sbt.Keys._ import com.typesafe.sbtmultijvm.MultiJvmPlugin -import com.typesafe.sbtmultijvm.MultiJvmPlugin.{ MultiJvm, extraOptions, jvmOptions, scalatestOptions } +import com.typesafe.sbtmultijvm.MultiJvmPlugin.{ MultiJvm, extraOptions, jvmOptions, scalatestOptions, multiNodeTest } import com.typesafe.sbtscalariform.ScalariformPlugin import com.typesafe.sbtscalariform.ScalariformPlugin.ScalariformKeys import com.typesafe.sbtosgi.OsgiPlugin.{ OsgiKeys, osgiSettings } @@ -90,8 +90,7 @@ object AkkaBuild extends Build { (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq }, scalatestOptions in MultiJvm := defaultMultiJvmScalatestOptions, - jvmOptions in MultiJvm := defaultMultiJvmOptions, - test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } + jvmOptions in MultiJvm := defaultMultiJvmOptions ) ) configs (MultiJvm) @@ -106,8 +105,7 @@ object AkkaBuild extends Build { (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq }, scalatestOptions in MultiJvm := defaultMultiJvmScalatestOptions, - jvmOptions in MultiJvm := defaultMultiJvmOptions, - test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } + jvmOptions in MultiJvm := defaultMultiJvmOptions ) ) configs (MultiJvm) @@ -123,8 +121,7 @@ object AkkaBuild extends Build { (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq }, scalatestOptions in MultiJvm := defaultMultiJvmScalatestOptions, - jvmOptions in MultiJvm := defaultMultiJvmOptions, - test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } + jvmOptions in MultiJvm := defaultMultiJvmOptions ) ) configs (MultiJvm) @@ -340,8 +337,8 @@ object AkkaBuild extends Build { lazy val defaultMultiJvmScalatestOptions: Seq[String] = { val excludeTags = (useExcludeTestTags -- useIncludeTestTags).toSeq Seq("-r", "org.scalatest.akka.QuietReporter") ++ - (if (excludeTags.isEmpty) Seq.empty else Seq("-l", excludeTags.mkString(" "))) ++ - (if (useOnlyTestTags.isEmpty) Seq.empty else Seq("-n", useOnlyTestTags.mkString(" "))) + (if (excludeTags.isEmpty) Seq.empty else Seq("-l", excludeTags.mkString("\"", " ", "\""))) ++ + (if (useOnlyTestTags.isEmpty) Seq.empty else Seq("-n", useOnlyTestTags.mkString("\"", " ", "\""))) } lazy val defaultSettings = baseSettings ++ formatSettings ++ Seq( @@ -367,12 +364,12 @@ object AkkaBuild extends Build { // add arguments for tests excluded by tag - includes override excludes (opposite to scalatest) testOptions in Test <++= (excludeTestTags, includeTestTags) map { (excludes, includes) => val tags = (excludes -- includes) - if (tags.isEmpty) Seq.empty else Seq(Tests.Argument("-l", tags.mkString(" "))) + if (tags.isEmpty) Seq.empty else Seq(Tests.Argument("-l", tags.mkString("\"", " ", "\""))) }, // add arguments for running only tests by tag testOptions in Test <++= onlyTestTags map { tags => - if (tags.isEmpty) Seq.empty else Seq(Tests.Argument("-n", tags.mkString(" "))) + if (tags.isEmpty) Seq.empty else Seq(Tests.Argument("-n", tags.mkString("\"", " ", "\""))) }, // show full stack traces @@ -394,7 +391,11 @@ object AkkaBuild extends Build { lazy val multiJvmSettings = MultiJvmPlugin.settings ++ inConfig(MultiJvm)(ScalariformPlugin.scalariformSettings) ++ Seq( compileInputs in MultiJvm <<= (compileInputs in MultiJvm) dependsOn (ScalariformKeys.format in MultiJvm), - ScalariformKeys.preferences in MultiJvm := formattingPreferences + ScalariformKeys.preferences in MultiJvm := formattingPreferences, + if (java.lang.Boolean.getBoolean("akka.test.multi-node")) + test in Test <<= ((test in Test), (multiNodeTest in MultiJvm)) map { case x => x } + else + test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } ) } diff --git a/project/plugins.sbt b/project/plugins.sbt index 45c8e41913..754b9eefa2 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -1,7 +1,7 @@ resolvers += Classpaths.typesafeResolver -addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.2.0-M1") +addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.2.0-M2") addSbtPlugin("com.typesafe.sbtscalariform" % "sbtscalariform" % "0.4.0") From 66efe504da5463c7292be6d364b89711a67652ae Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 29 May 2012 11:41:22 +0200 Subject: [PATCH 206/538] Workaround for Initialization race in MultiNodeSpec, see #2143 --- .../testconductor/TestConductorSpec.scala | 20 +++++++++---------- .../akka/remote/testkit/MultiNodeSpec.scala | 17 ++++++++++++++-- 2 files changed, 25 insertions(+), 12 deletions(-) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index df6388d562..1ae78ba0da 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -20,7 +20,7 @@ import akka.remote.testkit.MultiNodeConfig object TestConductorMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false)) - + val master = role("master") val slave = role("slave") } @@ -34,19 +34,19 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec) with Im def initialParticipants = 2 - runOn(master) { - system.actorOf(Props(new Actor { - def receive = { - case x ⇒ testActor ! x; sender ! x - } - }), "echo") - } - - val echo = system.actorFor(node(master) / "user" / "echo") + lazy val echo = system.actorFor(node(master) / "user" / "echo") "A TestConductor" must { "enter a barrier" in { + runOn(master) { + system.actorOf(Props(new Actor { + def receive = { + case x ⇒ testActor ! x; sender ! x + } + }), "echo") + } + testConductor.enter("name") } diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 8217120fd8..88c77f07fc 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -14,6 +14,7 @@ import com.typesafe.config.ConfigFactory import akka.dispatch.Await.Awaitable import akka.dispatch.Await import akka.util.Duration +import akka.util.NonFatal import akka.actor.ActorPath import akka.actor.RootActorPath import akka.remote.testconductor.RoleName @@ -214,8 +215,20 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, roles: val deployString = (str /: replacements) { case (base, r @ Replacement(tag, _)) ⇒ base.indexOf(tag) match { - case -1 ⇒ base - case start ⇒ base.replace(tag, r.addr) + case -1 ⇒ base + case start ⇒ + val replaceWith = try + r.addr + catch { + case NonFatal(e) ⇒ + // might happen if all test cases are ignored (excluded) and + // controller node is finished/exited before r.addr is run + // on the other nodes + val unresolved = "akka://unresolved-replacement-" + r.role.name + log.warning(unresolved + " due to: " + e.getMessage) + unresolved + } + base.replace(tag, replaceWith) } } import scala.collection.JavaConverters._ From dabe0a87e5148f31b48d590a99d238e57e1dddc5 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 12:21:58 +0200 Subject: [PATCH 207/538] Switching to logging warnings instead of throwing exceptions in untrustedMode --- .../src/main/scala/akka/remote/RemoteTransport.scala | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala index d912d1d878..7cb622ab00 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala @@ -287,13 +287,10 @@ abstract class RemoteTransport(val system: ExtendedActorSystem, val provider: Re case l: LocalRef ⇒ if (provider.remoteSettings.LogReceive) log.debug("received local message {}", remoteMessage) remoteMessage.payload match { - case msg: SystemMessage ⇒ - if (useUntrustedMode) - throw new SecurityException("RemoteModule server is operating is untrusted mode, can not send system message") - else l.sendSystemMessage(msg) - case _: AutoReceivedMessage if (useUntrustedMode) ⇒ - throw new SecurityException("RemoteModule server is operating is untrusted mode, can not pass on a AutoReceivedMessage to the remote actor") - case m ⇒ l.!(m)(remoteMessage.sender) + case _: SystemMessage if useUntrustedMode ⇒ log.warning("operating in UntrustedMode, dropping inbound system message") + case _: AutoReceivedMessage if useUntrustedMode ⇒ log.warning("operating in UntrustedMode, dropping inbound AutoReceivedMessage") + case msg: SystemMessage ⇒ l.sendSystemMessage(msg) + case msg ⇒ l.!(msg)(remoteMessage.sender) } case r: RemoteRef ⇒ if (provider.remoteSettings.LogReceive) log.debug("received remote-destined message {}", remoteMessage) From c5e6a3204c6b2de108e04637128cd4ae09eef765 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 12:30:17 +0200 Subject: [PATCH 208/538] Adding a FIXME for handleSupervisorFailing in ActorCell --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 0955595640..d4d5239e84 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -725,7 +725,7 @@ private[akka] class ActorCell( dispatcher.reportFailure(new LogEventException(Error(e, self.path.toString, clazz(actor), "error while creating actor"), e)) // prevent any further messages to be processed until the actor has been restarted dispatcher.suspend(this) - actor.supervisorStrategy.handleSupervisorFailing(self, children) + actor.supervisorStrategy.handleSupervisorFailing(self, children) // FIXME Should this be called on actor or failedActor? clearActorFields(actor) // If this fails, we need to ensure that preRestart isn't called. } finally { parent.tell(Failed(new ActorInitializationException(self, "exception during re-creation", e)), self) From 07dd65484922ca7328edf5b6027cfd0c04830157 Mon Sep 17 00:00:00 2001 From: Gert Vanthienen Date: Tue, 29 May 2012 12:41:09 +0200 Subject: [PATCH 209/538] Adding a convenience BundleActivator implementation to bootstrap Akka from an OSGi bundle --- .../akka/osgi/ActorSystemActivator.scala | 78 +++++++++++++++++++ .../akka/osgi/ActorSystemActivatorTest.scala | 75 ++++++++++++++++++ project/AkkaBuild.scala | 16 ++++ 3 files changed, 169 insertions(+) create mode 100644 akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala create mode 100644 akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala diff --git a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala new file mode 100644 index 0000000000..d63404334a --- /dev/null +++ b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala @@ -0,0 +1,78 @@ +package akka.osgi + +import com.typesafe.config.{ Config, ConfigFactory } +import akka.actor.ActorSystem +import org.osgi.framework.{ BundleContext, BundleActivator } +import java.util.Properties + +/** + * Abstract {@link BundleActivator} implementation to bootstrap and configure an {@link ActorSystem} in an + * OSGi environment. + */ +abstract class ActorSystemActivator extends BundleActivator { + + var system: ActorSystem = null + + /** + * Implement this method to add your own actors to the ActorSystem + * + * @param system the ActorSystem that was created by the activator + */ + def configure(system: ActorSystem) + + /** + * Sets up a new ActorSystem and registers it in the OSGi Service Registry + * + * @param context the BundleContext + */ + def start(context: BundleContext) { + system = createActorSystem(context) + configure(system) + + val properties = new Properties(); + properties.put("name", getActorSystemName(context)) + context.registerService(classOf[ActorSystem].getName, system, properties) + } + + /** + * Shuts down the ActorSystem when the bundle is stopped. + * + * @param context the BundleContext + */ + def stop(context: BundleContext) { + if (system != null) { + system.shutdown() + system.shutdown() + system = null + } + } + + /** + * Strategy method to create the ActorSystem. + */ + def createActorSystem(context: BundleContext) = + ActorSystem(getActorSystemName(context), getActorSystemConfig(context), getClass.getClassLoader) + + + /** + * Strategy method to create the Config for the ActorSystem, ensuring that the default/reference configuration is + * loaded from the akka-actor bundle. + */ + def getActorSystemConfig(context: BundleContext): Config = { + val reference = ConfigFactory.defaultReference(classOf[ActorSystem].getClassLoader) + ConfigFactory.load(getClass.getClassLoader).withFallback(reference) + } + + /** + * Strategy method to determine the ActorSystem name - override this method to define the ActorSytem name yourself. + * + * The default implementation will use 'bundle--ActorSystem' where matches the bundle id for the containing bundle. + * + * @param context the BundleContext + * @return the ActorSystem name + */ + def getActorSystemName(context: BundleContext): String = { + "bundle-%s-ActorSystem".format(context.getBundle().getBundleId) + } + +} diff --git a/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala b/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala new file mode 100644 index 0000000000..ffcc3cc0e7 --- /dev/null +++ b/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala @@ -0,0 +1,75 @@ +package akka.osgi + +import java.util.{ ServiceLoader, HashMap } +import de.kalpatec.pojosr.framework.launch.{ ClasspathScanner, PojoServiceRegistryFactory } +import org.scalatest.FlatSpec +import org.osgi.framework.BundleContext +import akka.actor.{ Actor, Props, ActorSystem } +import akka.pattern.ask +import akka.dispatch.Await +import akka.util.duration._ +import akka.util.Timeout + +/** + * Test cases for {@link ActorSystemActivator} + */ +class ActorSystemActivatorTest extends FlatSpec { + + abstract class TestMessage + + case object Ping extends TestMessage + case object Pong extends TestMessage + + class PongActor extends Actor { + def receive = { + case Ping ⇒ + sender ! Pong + } + } + + lazy val context: BundleContext = { + val config = new HashMap[String, AnyRef](); + config.put(PojoServiceRegistryFactory.BUNDLE_DESCRIPTORS, new ClasspathScanner().scanForBundles()); + + val loader = ServiceLoader.load(classOf[PojoServiceRegistryFactory]); + + val registry = loader.iterator().next().newPojoServiceRegistry(config); + registry.getBundleContext + } + + val activator = new ActorSystemActivator { + def configure(system: ActorSystem) { + system.actorOf(Props(new PongActor), name = "pong") + } + } + + "ActorSystemActivator" should "start and register the ActorSystem on start" in { + + activator.start(context) + + val reference = context.getServiceReference(classOf[ActorSystem].getName) + assert(reference != null) + + val system = context.getService(reference).asInstanceOf[ActorSystem] + val actor = system.actorFor("/user/pong") + + implicit val timeout = Timeout(5 seconds) + val future = actor ? Ping + val result = Await.result(future, timeout.duration) + assert(result != null) + } + + it should "stop the ActorSystem on bundle stop" in { + val reference = context.getServiceReference(classOf[ActorSystem].getName) + assert(reference != null) + + val system = context.getService(reference).asInstanceOf[ActorSystem] + assert(!system.isTerminated) + + activator.stop(context) + + system.awaitTermination() + assert(system.isTerminated) + } + +} diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index dbe9fbae9e..f0e6446879 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -209,6 +209,15 @@ object AkkaBuild extends Build { ) ) + lazy val osgi = Project( + id = "akka-osgi", + base = file("akka-osgi"), + dependencies = Seq(actor), + settings = defaultSettings ++ OSGi.osgi ++ Seq( + libraryDependencies ++= Dependencies.osgi + ) + ) + lazy val akkaSbtPlugin = Project( id = "akka-sbt-plugin", base = file("akka-sbt-plugin"), @@ -419,6 +428,8 @@ object Dependencies { val camel = Seq(camelCore, Test.scalatest, Test.junit, Test.mockito) + val osgi = Seq(osgiCore, Test.pojosr, Test.scalatest, Test.junit) + val tutorials = Seq(Test.scalatest, Test.junit) val docs = Seq(Test.scalatest, Test.junit, Test.specs2) @@ -434,6 +445,7 @@ object Dependency { val Camel = "2.8.0" val Logback = "0.9.28" val Netty = "3.3.0.Final" + val OSGi = "4.2.0" val Protobuf = "2.4.1" val ScalaStm = "0.5" val Scalatest = "1.6.1" @@ -444,6 +456,7 @@ object Dependency { val camelCore = "org.apache.camel" % "camel-core" % V.Camel // ApacheV2 val netty = "io.netty" % "netty" % V.Netty // ApacheV2 + val osgiCore = "org.osgi" % "org.osgi.core" % V.OSGi // ApacheV2 val protobuf = "com.google.protobuf" % "protobuf-java" % V.Protobuf // New BSD val scalaStm = "org.scala-tools" % "scala-stm_2.9.1" % V.ScalaStm // Modified BSD (Scala) val slf4jApi = "org.slf4j" % "slf4j-api" % V.Slf4j // MIT @@ -463,6 +476,7 @@ object Dependency { val junit = "junit" % "junit" % "4.5" % "test" // Common Public License 1.0 val logback = "ch.qos.logback" % "logback-classic" % V.Logback % "test" // EPL 1.0 / LGPL 2.1 val mockito = "org.mockito" % "mockito-all" % "1.8.1" % "test" // MIT + val pojosr = "com.googlecode.pojosr" % "de.kalpatec.pojosr.framework" % "0.1.8" % "test" // ApacheV2 val scalatest = "org.scalatest" % "scalatest_2.9.1" % V.Scalatest % "test" // ApacheV2 val scalacheck = "org.scala-tools.testing" % "scalacheck_2.9.1" % "1.9" % "test" // New BSD val specs2 = "org.specs2" % "specs2_2.9.1" % "1.9" % "test" // Modified BSD / ApacheV2 @@ -487,6 +501,8 @@ object OSGi { val mailboxesCommon = exports(Seq("akka.actor.mailbox.*")) + val osgi = exports(Seq("akka.osgi.*")) + val remote = exports(Seq("akka.remote.*", "akka.routing.*", "akka.serialization.*")) val slf4j = exports(Seq("akka.event.slf4j.*")) From 7456bf595a374719c06dfe60b2a6825ee7cdaa50 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 29 May 2012 12:50:50 +0200 Subject: [PATCH 210/538] Add doc note about using testconductor from constructor, see #2143 --- .../test/scala/akka/remote/testkit/MultiNodeSpec.scala | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 88c77f07fc..1745d15b61 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -129,6 +129,14 @@ object MultiNodeSpec { } +/** + * Note: To be able to run tests with everything ignored or excluded by tags + * you must not use `testconductor`, or helper methods that use `testconductor`, + * from the constructor of your test class. Otherwise the controller node might + * be shutdown before other nodes have completed and you will see errors like: + * `AskTimeoutException: sending to terminated ref breaks promises`. Using lazy + * val is fine. + */ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, roles: Seq[RoleName], deployments: RoleName ⇒ Seq[String]) extends AkkaSpec(_system) { From 06f86e1091cd2e1342d06b4e1969824fa527eaca Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 29 May 2012 13:18:46 +0200 Subject: [PATCH 211/538] Boy scouting based on feedback, see #1935 --- .../akka/remote/NewRemoteActorSpec.scala | 3 +-- .../router/RandomRoutedRemoteActorSpec.scala | 23 +++++++++-------- .../RoundRobinRoutedRemoteActorSpec.scala | 25 ++++++++++--------- .../ScatterGatherRoutedRemoteActorSpec.scala | 18 ++++++------- 4 files changed, 35 insertions(+), 34 deletions(-) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala index 4342a20178..0f193f7a71 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.remote.router +package akka.remote import com.typesafe.config.ConfigFactory @@ -9,7 +9,6 @@ import akka.actor.Actor import akka.actor.ActorRef import akka.actor.Props import akka.pattern.ask -import akka.remote.RemoteActorRef import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala index 87bae463ce..464b683601 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala @@ -6,6 +6,8 @@ package akka.remote.router import akka.actor.Actor import akka.actor.ActorRef import akka.actor.Props +import akka.actor.PoisonPill +import akka.actor.Address import akka.dispatch.Await import akka.pattern.ask import akka.remote.testkit.MultiNodeConfig @@ -14,13 +16,13 @@ import akka.routing.Broadcast import akka.routing.RandomRouter import akka.routing.RoutedActorRef import akka.testkit._ +import akka.util.duration._ object RandomRoutedRemoteActorMultiJvmSpec extends MultiNodeConfig { class SomeActor extends Actor with Serializable { def receive = { case "hit" ⇒ sender ! self - case "end" ⇒ context.stop(self) } } @@ -64,23 +66,22 @@ class RandomRoutedRemoteActorSpec extends MultiNodeSpec(RandomRoutedRemoteActorM val connectionCount = 3 val iterationCount = 10 - var replies = Map( - node(first).address -> 0, - node(second).address -> 0, - node(third).address -> 0) + for (i ← 0 until iterationCount; k ← 0 until connectionCount) { + actor ! "hit" + } - for (i ← 0 until iterationCount) { - for (k ← 0 until connectionCount) { - val nodeAddress = Await.result(actor ? "hit", timeout.duration).asInstanceOf[ActorRef].path.address - replies = replies + (nodeAddress -> (replies(nodeAddress) + 1)) - } + val replies: Map[Address, Int] = (receiveWhile(5 seconds, messages = connectionCount * iterationCount) { + case ref: ActorRef ⇒ ref.path.address + }).foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { + case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1)) } testConductor.enter("broadcast-end") - actor ! Broadcast("end") + actor ! Broadcast(PoisonPill) testConductor.enter("end") replies.values foreach { _ must be > (0) } + replies.get(node(fourth).address) must be(None) // shut down the actor before we let the other node(s) shut down so we don't try to send // "Terminate" to a shut down node diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala index 48026af375..3442cc08d4 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala @@ -6,6 +6,8 @@ package akka.remote.router import akka.actor.Actor import akka.actor.ActorRef import akka.actor.Props +import akka.actor.PoisonPill +import akka.actor.Address import akka.dispatch.Await import akka.pattern.ask import akka.remote.testkit.MultiNodeConfig @@ -14,13 +16,13 @@ import akka.routing.Broadcast import akka.routing.RoundRobinRouter import akka.routing.RoutedActorRef import akka.testkit._ +import akka.util.duration._ object RoundRobinRoutedRemoteActorMultiJvmSpec extends MultiNodeConfig { class SomeActor extends Actor with Serializable { def receive = { case "hit" ⇒ sender ! self - case "end" ⇒ context.stop(self) } } @@ -64,23 +66,22 @@ class RoundRobinRoutedRemoteActorSpec extends MultiNodeSpec(RoundRobinRoutedRemo val connectionCount = 3 val iterationCount = 10 - var replies = Map( - node(first).address -> 0, - node(second).address -> 0, - node(third).address -> 0) + for (i ← 0 until iterationCount; k ← 0 until connectionCount) { + actor ! "hit" + } - for (i ← 0 until iterationCount) { - for (k ← 0 until connectionCount) { - val nodeAddress = Await.result(actor ? "hit", timeout.duration).asInstanceOf[ActorRef].path.address - replies = replies + (nodeAddress -> (replies(nodeAddress) + 1)) - } + val replies: Map[Address, Int] = (receiveWhile(5 seconds, messages = connectionCount * iterationCount) { + case ref: ActorRef ⇒ ref.path.address + }).foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { + case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1)) } testConductor.enter("broadcast-end") - actor ! Broadcast("end") + actor ! Broadcast(PoisonPill) testConductor.enter("end") - replies.values foreach { _ must be(10) } + replies.values foreach { _ must be(iterationCount) } + replies.get(node(fourth).address) must be(None) // shut down the actor before we let the other node(s) shut down so we don't try to send // "Terminate" to a shut down node diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala index 7afa86d22e..eab148feeb 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala @@ -15,13 +15,14 @@ import akka.routing.ScatterGatherFirstCompletedRouter import akka.routing.RoutedActorRef import akka.testkit._ import akka.util.duration._ +import akka.actor.PoisonPill +import akka.actor.Address object ScatterGatherRoutedRemoteActorMultiJvmSpec extends MultiNodeConfig { class SomeActor extends Actor with Serializable { def receive = { case "hit" ⇒ sender ! self - case "end" ⇒ context.stop(self) } } @@ -65,23 +66,22 @@ class ScatterGatherRoutedRemoteActorSpec extends MultiNodeSpec(ScatterGatherRout val connectionCount = 3 val iterationCount = 10 - for (i ← 0 until iterationCount) { - for (k ← 0 until connectionCount) { - actor ! "hit" - } + for (i ← 0 until iterationCount; k ← 0 until connectionCount) { + actor ! "hit" } - val replies = (receiveWhile(5 seconds, messages = connectionCount * iterationCount) { - case ref: ActorRef ⇒ (ref.path.address, 1) + val replies: Map[Address, Int] = (receiveWhile(5 seconds, messages = connectionCount * iterationCount) { + case ref: ActorRef ⇒ ref.path.address }).foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { - case (m, (n, c)) ⇒ m + (n -> (m(n) + c)) + case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1)) } testConductor.enter("broadcast-end") - actor ! Broadcast("end") + actor ! Broadcast(PoisonPill) testConductor.enter("end") replies.values.sum must be === connectionCount * iterationCount + replies.get(node(fourth).address) must be(None) // shut down the actor before we let the other node(s) shut down so we don't try to send // "Terminate" to a shut down node From e3e391e5aa1b4a6345aed769f3b53457f15ea08c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 14:09:22 +0200 Subject: [PATCH 212/538] Removing DaemonMsgWatch, oh yeah baby. However, still no cigar --- .../scala/akka/actor/DeathWatchSpec.scala | 2 +- .../src/main/scala/akka/actor/ActorCell.scala | 116 ++-- .../src/main/scala/akka/actor/ActorRef.scala | 23 +- .../main/scala/akka/pattern/AskSupport.scala | 17 +- .../main/java/akka/remote/RemoteProtocol.java | 640 +----------------- .../src/main/protocol/RemoteProtocol.proto | 10 +- akka-remote/src/main/resources/reference.conf | 2 - .../main/scala/akka/remote/RemoteDaemon.scala | 7 +- .../DaemonMsgWatchSerializer.scala | 43 -- .../DaemonMsgWatchSerializerSpec.scala | 49 -- .../test/scala/akka/testkit/AkkaSpec.scala | 4 +- 11 files changed, 102 insertions(+), 811 deletions(-) delete mode 100644 akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala delete mode 100644 akka-remote/src/test/scala/akka/serialization/DaemonMsgWatchSerializerSpec.scala diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala index 7a1aa35485..97eec5be01 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala @@ -36,7 +36,7 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout "notify with one Terminated message when an Actor is stopped" in { val terminal = system.actorOf(Props.empty) startWatching(terminal) ! "hallo" - expectMsg("hallo") // this ensures that the DaemonMsgWatch has been received before we send the PoisonPill + expectMsg("hallo") terminal ! PoisonPill diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index cb804703ed..c09f40cebd 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -460,28 +460,22 @@ private[akka] class ActorCell( // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ final def stop(): Unit = dispatcher.systemDispatch(this, Terminate()) - override final def watch(subject: ActorRef): ActorRef = { - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - subject match { - case a: InternalActorRef ⇒ - if (!watching.contains(a)) { - watching += a - a.sendSystemMessage(Watch(a, self)) - } - } - subject + override final def watch(subject: ActorRef): ActorRef = subject match { + case a: InternalActorRef ⇒ + if (!watching.contains(a)) { + a.sendSystemMessage(Watch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + watching += a + } + a } - override final def unwatch(subject: ActorRef): ActorRef = { - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - subject match { - case a: InternalActorRef ⇒ - if (watching.contains(a)) { - watching -= a - a.sendSystemMessage(Unwatch(a, self)) - } - } - subject + override final def unwatch(subject: ActorRef): ActorRef = subject match { + case a: InternalActorRef ⇒ + if (watching.contains(a)) { + a.sendSystemMessage(Unwatch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + watching -= a + } + a } final def children: Iterable[ActorRef] = childrenRefs.children @@ -579,18 +573,26 @@ private[akka] class ActorCell( def resume(): Unit = if (isNormal) dispatcher resume this - def addWatcher(watcher: ActorRef): Unit = if (!isTerminating) { - if (!watchedBy.contains(watcher)) { - watchedBy += watcher - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), self + " watched by " + watcher)) - } + def addWatcher(watchee: ActorRef, watcher: ActorRef): Unit = { + if (watchee == self) { + if (!watchedBy.contains(watcher)) { + watchedBy += watcher + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), self + " watched by " + watcher)) + } + } else if (watcher == self) { + watch(watchee) + } else println("addNOOOOOOOOO: " + watchee + " => " + watcher) } - def remWatcher(watcher: ActorRef): Unit = if (!isTerminating) { - if (watchedBy.contains(watcher)) { - watchedBy -= watcher - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), self + " unwatched by " + watcher)) - } + def remWatcher(watchee: ActorRef, watcher: ActorRef): Unit = { + if (watchee == self) { + if (watchedBy.contains(watcher)) { + watchedBy -= watcher + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), self + " unwatched by " + watcher)) + } + } else if (watcher == self) { + unwatch(watchee) + } else println("remNOOOOOOOOO: " + watchee + " => " + watcher) } def terminate() { @@ -617,17 +619,15 @@ private[akka] class ActorCell( try { message match { - case Create() ⇒ create() - case Recreate(cause) ⇒ recreate(cause) - case Watch(`self`, watcher) ⇒ addWatcher(watcher) - case Watch(watchee, `self`) ⇒ watch(watchee) - case Unwatch(`self`, watcher) ⇒ remWatcher(watcher) - case Unwatch(watchee, `self`) ⇒ unwatch(watchee) - case Suspend() ⇒ suspend() - case Resume() ⇒ resume() - case Terminate() ⇒ terminate() - case Supervise(child) ⇒ supervise(child) - case ChildTerminated(child) ⇒ handleChildTerminated(child) + case Create() ⇒ create() + case Recreate(cause) ⇒ recreate(cause) + case Watch(watchee, watcher) ⇒ addWatcher(watchee, watcher) + case Unwatch(watchee, watcher) ⇒ remWatcher(watchee, watcher) + case Suspend() ⇒ suspend() + case Resume() ⇒ resume() + case Terminate() ⇒ terminate() + case Supervise(child) ⇒ supervise(child) + case ChildTerminated(child) ⇒ handleChildTerminated(child) } } catch { case e @ (_: InterruptedException | NonFatal(_)) ⇒ handleInvokeFailure(e, "error while processing " + message) @@ -714,27 +714,33 @@ private[akka] class ActorCell( } finally { try { parent.sendSystemMessage(ChildTerminated(self)) + if (!watchedBy.isEmpty) { val terminated = Terminated(self)(stopped = true) - watchedBy foreach { - watcher ⇒ - try watcher.tell(terminated) catch { - case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) - } - } + try { + watchedBy foreach { + watcher ⇒ + try watcher.tell(terminated, self) catch { + case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) + } + } + } finally watchedBy = emptyActorRefSet } + if (!watching.isEmpty) { - watching foreach { - watchee ⇒ - try watchee.tell(Unwatch(watchee, self)) catch { - case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) - } - } + try { + watching foreach { + case watchee: InternalActorRef ⇒ + try watchee.sendSystemMessage(Unwatch(watchee, self)) catch { + case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) + } + } + } finally watching = emptyActorRefSet } if (system.settings.DebugLifecycle) - system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped")) + system.eventStream.publish(Debug(self.path.toString, clazz(a), "stopped")) } finally { - behaviorStack = ActorCell.behaviorStackPlaceHolder + behaviorStack = behaviorStackPlaceHolder clearActorFields(a) actor = null } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 460bd02076..ad45f6ad09 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -409,13 +409,17 @@ private[akka] object DeadLetterActorRef { * * INTERNAL API */ -private[akka] class EmptyLocalActorRef( - override val provider: ActorRefProvider, - override val path: ActorPath, - val eventStream: EventStream) extends MinimalActorRef { +private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider, + override val path: ActorPath, + val eventStream: EventStream) extends MinimalActorRef { override def isTerminated(): Boolean = true + override def sendSystemMessage(message: SystemMessage): Unit = message match { + case Watch(maybeThis, watcher) if maybeThis == this ⇒ watcher ! Terminated(this)(stopped = false) + case _ ⇒ + } + override def !(message: Any)(implicit sender: ActorRef = null): Unit = message match { case d: DeadLetter ⇒ // do NOT form endless loops, since deadLetters will resend! case _ ⇒ eventStream.publish(DeadLetter(message, sender, this)) @@ -428,8 +432,15 @@ private[akka] class EmptyLocalActorRef( * * INTERNAL API */ -private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, _path: ActorPath, _eventStream: EventStream) - extends EmptyLocalActorRef(_provider, _path, _eventStream) { +private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, + _path: ActorPath, + _eventStream: EventStream) extends EmptyLocalActorRef(_provider, _path, _eventStream) { + + override def sendSystemMessage(message: SystemMessage): Unit = message match { + case Watch(maybeThis, watcher) if maybeThis == this ⇒ + case Watch(other, watcher) ⇒ watcher ! Terminated(other)(stopped = false) + case _ ⇒ + } override def !(message: Any)(implicit sender: ActorRef = this): Unit = message match { case d: DeadLetter ⇒ eventStream.publish(d) diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 634299248d..2837bd6546 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -228,14 +228,11 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide if (!completedJustNow) provider.deadLetters ! message } - override def sendSystemMessage(message: SystemMessage): Unit = { - val self = this - message match { - case _: Terminate ⇒ stop() - case Watch(`self`, watcher) ⇒ //FIXME IMPLEMENT - case Unwatch(`self`, watcher) ⇒ //FIXME IMPLEMENT - case _ ⇒ - } + override def sendSystemMessage(message: SystemMessage): Unit = message match { + case _: Terminate ⇒ stop() + case Watch(watchee, watcher) ⇒ //FIXME IMPLEMENT + case Unwatch(watchee, watcher) ⇒ //FIXME IMPLEMENT + case _ ⇒ } override def isTerminated: Boolean = state match { @@ -254,8 +251,8 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide try { ensureCompleted() val termination = Terminated(this)(stopped = true) - // watchedBy foreach { w => w.tell(termination) } - // watching foreach { w.sendSystemMessage(Unwatch(w, self)) } + // FIXME watchedBy foreach { w => w.tell(termination) } + // FIXME watching foreach { w.sendSystemMessage(Unwatch(w, self)) } } finally { provider.unregisterTempActor(p) } diff --git a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java index 0794e54364..8f3ab4e1fb 100644 --- a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java +++ b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java @@ -309,7 +309,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -981,7 +981,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1977,7 +1977,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -2527,7 +2527,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -2936,7 +2936,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3410,7 +3410,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3909,7 +3909,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -4487,7 +4487,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -5367,7 +5367,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -6067,7 +6067,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -6351,605 +6351,6 @@ public final class RemoteProtocol { // @@protoc_insertion_point(class_scope:DeployProtocol) } - public interface DaemonMsgWatchProtocolOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ActorRefProtocol watcher = 1; - boolean hasWatcher(); - akka.remote.RemoteProtocol.ActorRefProtocol getWatcher(); - akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatcherOrBuilder(); - - // required .ActorRefProtocol watched = 2; - boolean hasWatched(); - akka.remote.RemoteProtocol.ActorRefProtocol getWatched(); - akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatchedOrBuilder(); - } - public static final class DaemonMsgWatchProtocol extends - com.google.protobuf.GeneratedMessage - implements DaemonMsgWatchProtocolOrBuilder { - // Use DaemonMsgWatchProtocol.newBuilder() to construct. - private DaemonMsgWatchProtocol(Builder builder) { - super(builder); - } - private DaemonMsgWatchProtocol(boolean noInit) {} - - private static final DaemonMsgWatchProtocol defaultInstance; - public static DaemonMsgWatchProtocol getDefaultInstance() { - return defaultInstance; - } - - public DaemonMsgWatchProtocol getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return akka.remote.RemoteProtocol.internal_static_DaemonMsgWatchProtocol_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return akka.remote.RemoteProtocol.internal_static_DaemonMsgWatchProtocol_fieldAccessorTable; - } - - private int bitField0_; - // required .ActorRefProtocol watcher = 1; - public static final int WATCHER_FIELD_NUMBER = 1; - private akka.remote.RemoteProtocol.ActorRefProtocol watcher_; - public boolean hasWatcher() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public akka.remote.RemoteProtocol.ActorRefProtocol getWatcher() { - return watcher_; - } - public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatcherOrBuilder() { - return watcher_; - } - - // required .ActorRefProtocol watched = 2; - public static final int WATCHED_FIELD_NUMBER = 2; - private akka.remote.RemoteProtocol.ActorRefProtocol watched_; - public boolean hasWatched() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public akka.remote.RemoteProtocol.ActorRefProtocol getWatched() { - return watched_; - } - public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatchedOrBuilder() { - return watched_; - } - - private void initFields() { - watcher_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); - watched_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasWatcher()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasWatched()) { - memoizedIsInitialized = 0; - return false; - } - if (!getWatcher().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - if (!getWatched().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, watcher_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, watched_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, watcher_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, watched_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(akka.remote.RemoteProtocol.DaemonMsgWatchProtocol prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements akka.remote.RemoteProtocol.DaemonMsgWatchProtocolOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return akka.remote.RemoteProtocol.internal_static_DaemonMsgWatchProtocol_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return akka.remote.RemoteProtocol.internal_static_DaemonMsgWatchProtocol_fieldAccessorTable; - } - - // Construct using akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getWatcherFieldBuilder(); - getWatchedFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (watcherBuilder_ == null) { - watcher_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); - } else { - watcherBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - if (watchedBuilder_ == null) { - watched_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); - } else { - watchedBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.getDescriptor(); - } - - public akka.remote.RemoteProtocol.DaemonMsgWatchProtocol getDefaultInstanceForType() { - return akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.getDefaultInstance(); - } - - public akka.remote.RemoteProtocol.DaemonMsgWatchProtocol build() { - akka.remote.RemoteProtocol.DaemonMsgWatchProtocol result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private akka.remote.RemoteProtocol.DaemonMsgWatchProtocol buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - akka.remote.RemoteProtocol.DaemonMsgWatchProtocol result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public akka.remote.RemoteProtocol.DaemonMsgWatchProtocol buildPartial() { - akka.remote.RemoteProtocol.DaemonMsgWatchProtocol result = new akka.remote.RemoteProtocol.DaemonMsgWatchProtocol(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (watcherBuilder_ == null) { - result.watcher_ = watcher_; - } else { - result.watcher_ = watcherBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (watchedBuilder_ == null) { - result.watched_ = watched_; - } else { - result.watched_ = watchedBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof akka.remote.RemoteProtocol.DaemonMsgWatchProtocol) { - return mergeFrom((akka.remote.RemoteProtocol.DaemonMsgWatchProtocol)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(akka.remote.RemoteProtocol.DaemonMsgWatchProtocol other) { - if (other == akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.getDefaultInstance()) return this; - if (other.hasWatcher()) { - mergeWatcher(other.getWatcher()); - } - if (other.hasWatched()) { - mergeWatched(other.getWatched()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasWatcher()) { - - return false; - } - if (!hasWatched()) { - - return false; - } - if (!getWatcher().isInitialized()) { - - return false; - } - if (!getWatched().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - akka.remote.RemoteProtocol.ActorRefProtocol.Builder subBuilder = akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder(); - if (hasWatcher()) { - subBuilder.mergeFrom(getWatcher()); - } - input.readMessage(subBuilder, extensionRegistry); - setWatcher(subBuilder.buildPartial()); - break; - } - case 18: { - akka.remote.RemoteProtocol.ActorRefProtocol.Builder subBuilder = akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder(); - if (hasWatched()) { - subBuilder.mergeFrom(getWatched()); - } - input.readMessage(subBuilder, extensionRegistry); - setWatched(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .ActorRefProtocol watcher = 1; - private akka.remote.RemoteProtocol.ActorRefProtocol watcher_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> watcherBuilder_; - public boolean hasWatcher() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public akka.remote.RemoteProtocol.ActorRefProtocol getWatcher() { - if (watcherBuilder_ == null) { - return watcher_; - } else { - return watcherBuilder_.getMessage(); - } - } - public Builder setWatcher(akka.remote.RemoteProtocol.ActorRefProtocol value) { - if (watcherBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - watcher_ = value; - onChanged(); - } else { - watcherBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setWatcher( - akka.remote.RemoteProtocol.ActorRefProtocol.Builder builderForValue) { - if (watcherBuilder_ == null) { - watcher_ = builderForValue.build(); - onChanged(); - } else { - watcherBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeWatcher(akka.remote.RemoteProtocol.ActorRefProtocol value) { - if (watcherBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - watcher_ != akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance()) { - watcher_ = - akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder(watcher_).mergeFrom(value).buildPartial(); - } else { - watcher_ = value; - } - onChanged(); - } else { - watcherBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearWatcher() { - if (watcherBuilder_ == null) { - watcher_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); - onChanged(); - } else { - watcherBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public akka.remote.RemoteProtocol.ActorRefProtocol.Builder getWatcherBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getWatcherFieldBuilder().getBuilder(); - } - public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatcherOrBuilder() { - if (watcherBuilder_ != null) { - return watcherBuilder_.getMessageOrBuilder(); - } else { - return watcher_; - } - } - private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> - getWatcherFieldBuilder() { - if (watcherBuilder_ == null) { - watcherBuilder_ = new com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder>( - watcher_, - getParentForChildren(), - isClean()); - watcher_ = null; - } - return watcherBuilder_; - } - - // required .ActorRefProtocol watched = 2; - private akka.remote.RemoteProtocol.ActorRefProtocol watched_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> watchedBuilder_; - public boolean hasWatched() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public akka.remote.RemoteProtocol.ActorRefProtocol getWatched() { - if (watchedBuilder_ == null) { - return watched_; - } else { - return watchedBuilder_.getMessage(); - } - } - public Builder setWatched(akka.remote.RemoteProtocol.ActorRefProtocol value) { - if (watchedBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - watched_ = value; - onChanged(); - } else { - watchedBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder setWatched( - akka.remote.RemoteProtocol.ActorRefProtocol.Builder builderForValue) { - if (watchedBuilder_ == null) { - watched_ = builderForValue.build(); - onChanged(); - } else { - watchedBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder mergeWatched(akka.remote.RemoteProtocol.ActorRefProtocol value) { - if (watchedBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - watched_ != akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance()) { - watched_ = - akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder(watched_).mergeFrom(value).buildPartial(); - } else { - watched_ = value; - } - onChanged(); - } else { - watchedBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder clearWatched() { - if (watchedBuilder_ == null) { - watched_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); - onChanged(); - } else { - watchedBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - public akka.remote.RemoteProtocol.ActorRefProtocol.Builder getWatchedBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getWatchedFieldBuilder().getBuilder(); - } - public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatchedOrBuilder() { - if (watchedBuilder_ != null) { - return watchedBuilder_.getMessageOrBuilder(); - } else { - return watched_; - } - } - private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> - getWatchedFieldBuilder() { - if (watchedBuilder_ == null) { - watchedBuilder_ = new com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder>( - watched_, - getParentForChildren(), - isClean()); - watched_ = null; - } - return watchedBuilder_; - } - - // @@protoc_insertion_point(builder_scope:DaemonMsgWatchProtocol) - } - - static { - defaultInstance = new DaemonMsgWatchProtocol(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:DaemonMsgWatchProtocol) - } - private static com.google.protobuf.Descriptors.Descriptor internal_static_AkkaRemoteProtocol_descriptor; private static @@ -7000,11 +6401,6 @@ public final class RemoteProtocol { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_DeployProtocol_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_DaemonMsgWatchProtocol_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_DaemonMsgWatchProtocol_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -7039,11 +6435,9 @@ public final class RemoteProtocol { "ssCreator\030\003 \001(\t\022\017\n\007creator\030\004 \001(\014\022\024\n\014rout" + "erConfig\030\005 \001(\014\"S\n\016DeployProtocol\022\014\n\004path" + "\030\001 \002(\t\022\016\n\006config\030\002 \001(\014\022\024\n\014routerConfig\030\003" + - " \001(\014\022\r\n\005scope\030\004 \001(\014\"`\n\026DaemonMsgWatchPro" + - "tocol\022\"\n\007watcher\030\001 \002(\0132\021.ActorRefProtoco" + - "l\022\"\n\007watched\030\002 \002(\0132\021.ActorRefProtocol*7\n" + - "\013CommandType\022\013\n\007CONNECT\020\001\022\014\n\010SHUTDOWN\020\002\022" + - "\r\n\tHEARTBEAT\020\003B\017\n\013akka.remoteH\001" + " \001(\014\022\r\n\005scope\030\004 \001(\014*7\n\013CommandType\022\013\n\007CO" + + "NNECT\020\001\022\014\n\010SHUTDOWN\020\002\022\r\n\tHEARTBEAT\020\003B\017\n\013" + + "akka.remoteH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -7130,14 +6524,6 @@ public final class RemoteProtocol { new java.lang.String[] { "Path", "Config", "RouterConfig", "Scope", }, akka.remote.RemoteProtocol.DeployProtocol.class, akka.remote.RemoteProtocol.DeployProtocol.Builder.class); - internal_static_DaemonMsgWatchProtocol_descriptor = - getDescriptor().getMessageTypes().get(10); - internal_static_DaemonMsgWatchProtocol_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_DaemonMsgWatchProtocol_descriptor, - new java.lang.String[] { "Watcher", "Watched", }, - akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.class, - akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.Builder.class); return null; } }; diff --git a/akka-remote/src/main/protocol/RemoteProtocol.proto b/akka-remote/src/main/protocol/RemoteProtocol.proto index 72b04caa57..7d86d8a82b 100644 --- a/akka-remote/src/main/protocol/RemoteProtocol.proto +++ b/akka-remote/src/main/protocol/RemoteProtocol.proto @@ -107,12 +107,4 @@ message DeployProtocol { optional bytes config = 2; optional bytes routerConfig = 3; optional bytes scope = 4; -} - -/** - * Serialization of akka.remote.DaemonMsgWatch - */ -message DaemonMsgWatchProtocol { - required ActorRefProtocol watcher = 1; - required ActorRefProtocol watched = 2; -} +} \ No newline at end of file diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 97b85895ed..a56ea16c9a 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -15,7 +15,6 @@ akka { serializers { proto = "akka.serialization.ProtobufSerializer" daemon-create = "akka.serialization.DaemonMsgCreateSerializer" - daemon-watch = "akka.serialization.DaemonMsgWatchSerializer" } @@ -24,7 +23,6 @@ akka { # does, need to use the more specific one here in order to avoid ambiguity "com.google.protobuf.GeneratedMessage" = proto "akka.remote.DaemonMsgCreate" = daemon-create - "akka.remote.DaemonMsgWatch" = daemon-watch } deployment { diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala index 1e81cfaac6..ddab54b2ad 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala @@ -12,7 +12,6 @@ import akka.dispatch.Watch private[akka] sealed trait DaemonMsg private[akka] case class DaemonMsgCreate(props: Props, deploy: Deploy, path: String, supervisor: ActorRef) extends DaemonMsg -private[akka] case class DaemonMsgWatch(watcher: ActorRef, watched: ActorRef) extends DaemonMsg /** * Internal system "daemon" actor for remote internal communication. @@ -67,15 +66,11 @@ private[akka] class RemoteSystemDaemon(system: ActorSystemImpl, _path: ActorPath case _ ⇒ log.error("remote path does not match path from message [{}]", message) } - case DaemonMsgWatch(watcher, watched) ⇒ - system.actorFor(watcher.path.root / "remote") match { - case a: InternalActorRef ⇒ a.sendSystemMessage(Watch(watched, a)) - } } case Terminated(child: LocalActorRef) ⇒ removeChild(child.path.elements.drop(1).mkString("/")) - case t: Terminated ⇒ //FIXME system.deathWatch.publish(t) + case t: Terminated ⇒ case unknown ⇒ log.warning("Unknown message {} received by {}", unknown, this) } diff --git a/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala b/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala deleted file mode 100644 index 016d7f14cb..0000000000 --- a/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.serialization - -import akka.actor.ActorRef -import akka.remote.DaemonMsgWatch -import akka.remote.RemoteProtocol.ActorRefProtocol -import akka.remote.RemoteProtocol.DaemonMsgWatchProtocol -import akka.actor.ExtendedActorSystem - -/** - * Serializes akka's internal DaemonMsgWatch using protobuf. - * - * INTERNAL API - */ -private[akka] class DaemonMsgWatchSerializer(val system: ExtendedActorSystem) extends Serializer { - import ProtobufSerializer.serializeActorRef - import ProtobufSerializer.deserializeActorRef - - def includeManifest: Boolean = false - def identifier = 4 - - def toBinary(obj: AnyRef): Array[Byte] = obj match { - case DaemonMsgWatch(watcher, watched) ⇒ - DaemonMsgWatchProtocol.newBuilder. - setWatcher(serializeActorRef(watcher)). - setWatched(serializeActorRef(watched)). - build.toByteArray - case _ ⇒ - throw new IllegalArgumentException( - "Can't serialize a non-DaemonMsgWatch message using DaemonMsgWatchSerializer [%s]".format(obj)) - } - - def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { - val proto = DaemonMsgWatchProtocol.parseFrom(bytes) - DaemonMsgWatch( - watcher = deserializeActorRef(system, proto.getWatcher), - watched = deserializeActorRef(system, proto.getWatched)) - } - -} \ No newline at end of file diff --git a/akka-remote/src/test/scala/akka/serialization/DaemonMsgWatchSerializerSpec.scala b/akka-remote/src/test/scala/akka/serialization/DaemonMsgWatchSerializerSpec.scala deleted file mode 100644 index a6069beac1..0000000000 --- a/akka-remote/src/test/scala/akka/serialization/DaemonMsgWatchSerializerSpec.scala +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.serialization - -import akka.testkit.AkkaSpec -import akka.remote.DaemonMsgWatch -import akka.actor.Actor -import akka.actor.Props - -object DaemonMsgWatchSerializerSpec { - class MyActor extends Actor { - def receive = { - case _ ⇒ - } - } -} - -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class DaemonMsgWatchSerializerSpec extends AkkaSpec { - - import DaemonMsgWatchSerializerSpec._ - - val ser = SerializationExtension(system) - - "Serialization" must { - - "resolve DaemonMsgWatchSerializer" in { - ser.serializerFor(classOf[DaemonMsgWatch]).getClass must be(classOf[DaemonMsgWatchSerializer]) - } - - "serialize and de-serialize DaemonMsgWatch" in { - val watcher = system.actorOf(Props[MyActor], "watcher") - val watched = system.actorOf(Props[MyActor], "watched") - val msg = DaemonMsgWatch(watcher, watched) - val bytes = ser.serialize(msg) match { - case Left(exception) ⇒ fail(exception) - case Right(bytes) ⇒ bytes - } - ser.deserialize(bytes.asInstanceOf[Array[Byte]], classOf[DaemonMsgWatch]) match { - case Left(exception) ⇒ fail(exception) - case Right(m) ⇒ assert(m === msg) - } - } - - } -} - diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala index f24ea49b8c..d2eeeee776 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala @@ -111,9 +111,7 @@ class AkkaSpecSpec extends WordSpec with MustMatchers { "akka.actor.debug.lifecycle" -> true, "akka.actor.debug.event-stream" -> true, "akka.loglevel" -> "DEBUG", "akka.stdout-loglevel" -> "DEBUG") val system = ActorSystem("AkkaSpec1", ConfigFactory.parseMap(conf.asJava).withFallback(AkkaSpec.testConf)) - val spec = new AkkaSpec(system) { - val ref = Seq(testActor, system.actorOf(Props.empty, "name")) - } + val spec = new AkkaSpec(system) { val ref = Seq(testActor, system.actorOf(Props.empty, "name")) } spec.ref foreach (_.isTerminated must not be true) system.shutdown() spec.awaitCond(spec.ref forall (_.isTerminated), 2 seconds) From 2dfa560bbf1807d41a18d6b27539dc67550f8d3c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 14:27:36 +0200 Subject: [PATCH 213/538] Switched back to the old debug messages --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index c09f40cebd..135f30f7e6 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -577,7 +577,7 @@ private[akka] class ActorCell( if (watchee == self) { if (!watchedBy.contains(watcher)) { watchedBy += watcher - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), self + " watched by " + watcher)) + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now monitoring " + watcher)) } } else if (watcher == self) { watch(watchee) @@ -588,7 +588,7 @@ private[akka] class ActorCell( if (watchee == self) { if (watchedBy.contains(watcher)) { watchedBy -= watcher - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), self + " unwatched by " + watcher)) + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped monitoring " + watcher)) } } else if (watcher == self) { unwatch(watchee) From f918ae6c324984c92526cf3b4a3b054c20fb2131 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 29 May 2012 14:30:15 +0200 Subject: [PATCH 214/538] Tag all multi node tests as long-running, since slow jenkins sometimes has problems running them. --- .../multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala | 2 +- .../src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala | 4 ++-- .../akka/remote/router/RandomRoutedRemoteActorSpec.scala | 2 +- .../akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala | 2 +- .../remote/router/ScatterGatherRoutedRemoteActorSpec.scala | 2 +- .../scala/akka/remote/testconductor/TestConductorSpec.scala | 5 +++-- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala index 797ff97ecd..cfbbae67dc 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala @@ -40,7 +40,7 @@ class LookupRemoteActorSpec extends MultiNodeSpec(LookupRemoteActorMultiJvmSpec) } "Remoting" must { - "lookup remote actor" in { + "lookup remote actor" taggedAs LongRunningTest in { runOn(slave) { val hello = system.actorFor(node(master) / "user" / "service-hello") hello.isInstanceOf[RemoteActorRef] must be(true) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala index 0f193f7a71..5aa79eb775 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala @@ -41,7 +41,7 @@ class NewRemoteActorSpec extends MultiNodeSpec(NewRemoteActorMultiJvmSpec) def initialParticipants = 2 "A new remote actor" must { - "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { + "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" taggedAs LongRunningTest in { runOn(master) { val actor = system.actorOf(Props[SomeActor], "service-hello") @@ -59,7 +59,7 @@ class NewRemoteActorSpec extends MultiNodeSpec(NewRemoteActorMultiJvmSpec) testConductor.enter("done") } - "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef (with deployOnAll)" in { + "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef (with deployOnAll)" taggedAs LongRunningTest in { runOn(master) { val actor = system.actorOf(Props[SomeActor], "service-hello2") diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala index 464b683601..58f230e487 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala @@ -52,7 +52,7 @@ class RandomRoutedRemoteActorSpec extends MultiNodeSpec(RandomRoutedRemoteActorM def initialParticipants = 4 "A new remote actor configured with a Random router" must { - "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { + "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" taggedAs LongRunningTest in { runOn(first, second, third) { testConductor.enter("start", "broadcast-end", "end", "done") diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala index 3442cc08d4..c72644899e 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala @@ -52,7 +52,7 @@ class RoundRobinRoutedRemoteActorSpec extends MultiNodeSpec(RoundRobinRoutedRemo def initialParticipants = 4 "A new remote actor configured with a RoundRobin router" must { - "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { + "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" taggedAs LongRunningTest in { runOn(first, second, third) { testConductor.enter("start", "broadcast-end", "end", "done") diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala index eab148feeb..10a007e772 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala @@ -52,7 +52,7 @@ class ScatterGatherRoutedRemoteActorSpec extends MultiNodeSpec(ScatterGatherRout def initialParticipants = 4 "A new remote actor configured with a ScatterGatherFirstCompleted router" must { - "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" in { + "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" taggedAs LongRunningTest in { runOn(first, second, third) { testConductor.enter("start", "broadcast-end", "end", "done") diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 3be194675a..624347be69 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -11,6 +11,7 @@ import akka.dispatch.Await.Awaitable import akka.util.Duration import akka.util.duration._ import akka.testkit.ImplicitSender +import akka.testkit.LongRunningTest import java.net.InetSocketAddress import java.net.InetAddress import akka.remote.testkit.MultiNodeSpec @@ -36,7 +37,7 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec) with Im "A TestConductor" must { - "enter a barrier" in { + "enter a barrier" taggedAs LongRunningTest in { runOn(master) { system.actorOf(Props(new Actor { def receive = { @@ -48,7 +49,7 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec) with Im testConductor.enter("name") } - "support throttling of network connections" in { + "support throttling of network connections" taggedAs LongRunningTest in { runOn(slave) { // start remote network connection so that it can be throttled From 46098562ab84dc957a409225e8c05f8bca11ed24 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 14:34:25 +0200 Subject: [PATCH 215/538] Making sure that you cannot watch or unwatch yourself --- .../src/main/scala/akka/actor/ActorCell.scala | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 135f30f7e6..e8f9b64e5f 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -574,23 +574,29 @@ private[akka] class ActorCell( def resume(): Unit = if (isNormal) dispatcher resume this def addWatcher(watchee: ActorRef, watcher: ActorRef): Unit = { - if (watchee == self) { + val watcheeSelf = watchee == self + val watcherSelf = watcher == self + + if (watcheeSelf && !watcherSelf) { if (!watchedBy.contains(watcher)) { watchedBy += watcher if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now monitoring " + watcher)) } - } else if (watcher == self) { + } else if (!watcheeSelf && watcherSelf) { watch(watchee) } else println("addNOOOOOOOOO: " + watchee + " => " + watcher) } def remWatcher(watchee: ActorRef, watcher: ActorRef): Unit = { - if (watchee == self) { + val watcheeSelf = watchee == self + val watcherSelf = watcher == self + + if (watcheeSelf && !watcherSelf) { if (watchedBy.contains(watcher)) { watchedBy -= watcher if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped monitoring " + watcher)) } - } else if (watcher == self) { + } else if (!watcheeSelf && watcherSelf) { unwatch(watchee) } else println("remNOOOOOOOOO: " + watchee + " => " + watcher) } From f0cac7c1808d1ad786e3f96add571b036b11ee49 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 14:46:34 +0200 Subject: [PATCH 216/538] Removing some boiler --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index e8f9b64e5f..4e8a54d7fb 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -483,10 +483,8 @@ private[akka] class ActorCell( /** * Impl UntypedActorContext */ - final def getChildren(): java.lang.Iterable[ActorRef] = { - import scala.collection.JavaConverters.asJavaIterableConverter - asJavaIterableConverter(children).asJava - } + final def getChildren(): java.lang.Iterable[ActorRef] = + scala.collection.JavaConverters.asJavaIterableConverter(children).asJava final def tell(message: Any, sender: ActorRef): Unit = dispatcher.dispatch(this, Envelope(message, if (sender eq null) system.deadLetters else sender)(system)) From b38ce0274d0ea7a6a3f282bdd4c0874d50ba7a65 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 15:04:04 +0200 Subject: [PATCH 217/538] Making sure that Terminated messages don't go to the same guy --- akka-actor/src/main/scala/akka/actor/ActorRef.scala | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index ad45f6ad09..5e3de885ea 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -416,8 +416,8 @@ private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider, override def isTerminated(): Boolean = true override def sendSystemMessage(message: SystemMessage): Unit = message match { - case Watch(maybeThis, watcher) if maybeThis == this ⇒ watcher ! Terminated(this)(stopped = false) - case _ ⇒ + case Watch(watchee, watcher) ⇒ if (watchee == this && watcher != this) watcher ! Terminated(watchee)(stopped = false) + case _ ⇒ } override def !(message: Any)(implicit sender: ActorRef = null): Unit = message match { @@ -437,9 +437,8 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, _eventStream: EventStream) extends EmptyLocalActorRef(_provider, _path, _eventStream) { override def sendSystemMessage(message: SystemMessage): Unit = message match { - case Watch(maybeThis, watcher) if maybeThis == this ⇒ - case Watch(other, watcher) ⇒ watcher ! Terminated(other)(stopped = false) - case _ ⇒ + case Watch(watchee, watcher) ⇒ if (watchee != this && watcher != this) watcher ! Terminated(watchee)(stopped = false) + case _ ⇒ } override def !(message: Any)(implicit sender: ActorRef = this): Unit = message match { From 8903abb23014f7fabe4ce4172a7ef97b4066f808 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 15:20:26 +0200 Subject: [PATCH 218/538] Making sure Watch messages are handled by DeadLetterActorRef + EmptyLocalActorRef --- akka-actor/src/main/scala/akka/actor/ActorRef.scala | 10 ++++++---- akka-actor/src/main/scala/akka/actor/ActorSystem.scala | 6 ++++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 5e3de885ea..042a5cdd6a 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -421,8 +421,9 @@ private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider, } override def !(message: Any)(implicit sender: ActorRef = null): Unit = message match { - case d: DeadLetter ⇒ // do NOT form endless loops, since deadLetters will resend! - case _ ⇒ eventStream.publish(DeadLetter(message, sender, this)) + case DeadLetter(w: Watch, _, _) ⇒ sendSystemMessage(w) + case d: DeadLetter ⇒ // do NOT form endless loops, since deadLetters will resend! + case _ ⇒ eventStream.publish(DeadLetter(message, sender, this)) } } @@ -442,8 +443,9 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, } override def !(message: Any)(implicit sender: ActorRef = this): Unit = message match { - case d: DeadLetter ⇒ eventStream.publish(d) - case _ ⇒ eventStream.publish(DeadLetter(message, sender, this)) + case DeadLetter(w: Watch, _, _) ⇒ sendSystemMessage(w) + case d: DeadLetter ⇒ eventStream.publish(d) + case _ ⇒ eventStream.publish(DeadLetter(message, sender, this)) } @throws(classOf[java.io.ObjectStreamException]) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 94ee24336a..993e7e98e4 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -542,7 +542,8 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, //FIXME Why do we need this at all? val deadLetterQueue: MessageQueue = new MessageQueue { - def enqueue(receiver: ActorRef, envelope: Envelope) { deadLetters ! DeadLetter(envelope.message, envelope.sender, receiver) } + def enqueue(receiver: ActorRef, envelope: Envelope): Unit = + deadLetters ! DeadLetter(envelope.message, envelope.sender, receiver) def dequeue() = null def hasMessages = false def numberOfMessages = 0 @@ -551,7 +552,8 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, //FIXME Why do we need this at all? val deadLetterMailbox: Mailbox = new Mailbox(null, deadLetterQueue) { becomeClosed() - def systemEnqueue(receiver: ActorRef, handle: SystemMessage): Unit = deadLetters ! DeadLetter(handle, receiver, receiver) + def systemEnqueue(receiver: ActorRef, handle: SystemMessage): Unit = + deadLetters ! DeadLetter(handle, receiver, receiver) def systemDrain(): SystemMessage = null def hasSystemMessages = false } From 4b6f00f42418992cbd7b85c4ab7b395553108803 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Tue, 29 May 2012 15:31:57 +0200 Subject: [PATCH 219/538] Quoted strings confuses ScalaTest (space seoarated argument strings suck) --- project/AkkaBuild.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index e20144b418..d21f510cd4 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -332,8 +332,8 @@ object AkkaBuild extends Build { lazy val defaultMultiJvmScalatestOptions: Seq[String] = { val excludeTags = (useExcludeTestTags -- useIncludeTestTags).toSeq Seq("-r", "org.scalatest.akka.QuietReporter") ++ - (if (excludeTags.isEmpty) Seq.empty else Seq("-l", excludeTags.mkString("\"", " ", "\""))) ++ - (if (useOnlyTestTags.isEmpty) Seq.empty else Seq("-n", useOnlyTestTags.mkString("\"", " ", "\""))) + (if (excludeTags.isEmpty) Seq.empty else Seq("-l", excludeTags.mkString(" "))) ++ + (if (useOnlyTestTags.isEmpty) Seq.empty else Seq("-n", useOnlyTestTags.mkString(" "))) } lazy val defaultSettings = baseSettings ++ formatSettings ++ Seq( @@ -359,12 +359,12 @@ object AkkaBuild extends Build { // add arguments for tests excluded by tag - includes override excludes (opposite to scalatest) testOptions in Test <++= (excludeTestTags, includeTestTags) map { (excludes, includes) => val tags = (excludes -- includes) - if (tags.isEmpty) Seq.empty else Seq(Tests.Argument("-l", tags.mkString("\"", " ", "\""))) + if (tags.isEmpty) Seq.empty else Seq(Tests.Argument("-l", tags.mkString(" "))) }, // add arguments for running only tests by tag testOptions in Test <++= onlyTestTags map { tags => - if (tags.isEmpty) Seq.empty else Seq(Tests.Argument("-n", tags.mkString("\"", " ", "\""))) + if (tags.isEmpty) Seq.empty else Seq(Tests.Argument("-n", tags.mkString(" "))) }, // show full stack traces From a413a9394b3d54c9e26bb5e58dcd03a22bc1ddb5 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 16:36:24 +0200 Subject: [PATCH 220/538] Switching approaches for EmptyLocalActorRef and DeadLetterActorRef --- .../src/main/scala/akka/actor/ActorRef.scala | 36 +++++++++++-------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 042a5cdd6a..a713a61ddc 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -415,15 +415,20 @@ private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider, override def isTerminated(): Boolean = true - override def sendSystemMessage(message: SystemMessage): Unit = message match { - case Watch(watchee, watcher) ⇒ if (watchee == this && watcher != this) watcher ! Terminated(watchee)(stopped = false) - case _ ⇒ - } + override def sendSystemMessage(message: SystemMessage): Unit = specialHandle(message) override def !(message: Any)(implicit sender: ActorRef = null): Unit = message match { - case DeadLetter(w: Watch, _, _) ⇒ sendSystemMessage(w) - case d: DeadLetter ⇒ // do NOT form endless loops, since deadLetters will resend! - case _ ⇒ eventStream.publish(DeadLetter(message, sender, this)) + case d: DeadLetter ⇒ specialHandle(d.message) // do NOT form endless loops, since deadLetters will resend! + case _ ⇒ if (!specialHandle(message)) eventStream.publish(DeadLetter(message, sender, this)) + } + + protected def specialHandle(msg: Any): Boolean = msg match { + case w: Watch ⇒ + if (w.watchee == this && w.watcher != this) w.watcher ! Terminated(w.watchee)(stopped = false) + + true + case w: Unwatch ⇒ true // Just ignore + case _ ⇒ false } } @@ -437,15 +442,18 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, _path: ActorPath, _eventStream: EventStream) extends EmptyLocalActorRef(_provider, _path, _eventStream) { - override def sendSystemMessage(message: SystemMessage): Unit = message match { - case Watch(watchee, watcher) ⇒ if (watchee != this && watcher != this) watcher ! Terminated(watchee)(stopped = false) - case _ ⇒ + override def !(message: Any)(implicit sender: ActorRef = this): Unit = message match { + case d: DeadLetter ⇒ if (!specialHandle(d.message)) eventStream.publish(d) + case _ ⇒ if (!specialHandle(message)) eventStream.publish(DeadLetter(message, sender, this)) } - override def !(message: Any)(implicit sender: ActorRef = this): Unit = message match { - case DeadLetter(w: Watch, _, _) ⇒ sendSystemMessage(w) - case d: DeadLetter ⇒ eventStream.publish(d) - case _ ⇒ eventStream.publish(DeadLetter(message, sender, this)) + override protected def specialHandle(msg: Any): Boolean = msg match { + case w: Watch ⇒ + if (w.watchee != this && w.watcher != this) w.watcher ! Terminated(w.watchee)(stopped = false) + + true + case w: Unwatch ⇒ true // Just ignore + case _ ⇒ false } @throws(classOf[java.io.ObjectStreamException]) From 35aaa220aa0c65333e75a7c199fe9ebc782c1b89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Tue, 29 May 2012 17:16:28 +0200 Subject: [PATCH 221/538] Quoting strings differently for multi-node --- project/AkkaBuild.scala | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index d21f510cd4..4b8f72e424 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -329,11 +329,13 @@ object AkkaBuild extends Build { if (prop.isEmpty) Seq.empty else prop.split(",").toSeq } + val multiNodeEnabled = java.lang.Boolean.getBoolean("akka.test.multi-node") + lazy val defaultMultiJvmScalatestOptions: Seq[String] = { val excludeTags = (useExcludeTestTags -- useIncludeTestTags).toSeq Seq("-r", "org.scalatest.akka.QuietReporter") ++ - (if (excludeTags.isEmpty) Seq.empty else Seq("-l", excludeTags.mkString(" "))) ++ - (if (useOnlyTestTags.isEmpty) Seq.empty else Seq("-n", useOnlyTestTags.mkString(" "))) + (if (excludeTags.isEmpty) Seq.empty else Seq("-l", if (multiNodeEnabled) excludeTags.mkString("\"", " ", "\"") else excludeTags.mkString(" "))) ++ + (if (useOnlyTestTags.isEmpty) Seq.empty else Seq("-n", if (multiNodeEnabled) useOnlyTestTags.mkString("\"", " ", "\"") else useOnlyTestTags.mkString(" "))) } lazy val defaultSettings = baseSettings ++ formatSettings ++ Seq( @@ -387,7 +389,7 @@ object AkkaBuild extends Build { lazy val multiJvmSettings = MultiJvmPlugin.settings ++ inConfig(MultiJvm)(ScalariformPlugin.scalariformSettings) ++ Seq( compileInputs in MultiJvm <<= (compileInputs in MultiJvm) dependsOn (ScalariformKeys.format in MultiJvm), ScalariformKeys.preferences in MultiJvm := formattingPreferences, - if (java.lang.Boolean.getBoolean("akka.test.multi-node")) + if (multiNodeEnabled) test in Test <<= ((test in Test), (multiNodeTest in MultiJvm)) map { case x => x } else test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } From dbc3d91395fa79a06bb74f659118fb63cbc1ddba Mon Sep 17 00:00:00 2001 From: Peter Badenhorst Date: Fri, 25 May 2012 00:59:17 +0200 Subject: [PATCH 222/538] Added changes to Netty pipelines to support SSL/TLS. Fixes #1978 1) Netty server and client pipelines updated to conditionally load keystore/truststore if SSL is enabled in the config 2) Supports any available encryption protocol via 'ssl-protocol' 3) Supported encryption algorithms are specified via 'ssl-encryption-protocol' config key Conflicts: akka-remote/src/main/scala/akka/remote/netty/Client.scala akka-remote/src/main/scala/akka/remote/netty/Server.scala akka-remote/src/main/scala/akka/remote/netty/Settings.scala --- akka-remote/src/main/resources/reference.conf | 27 ++++++ .../main/scala/akka/remote/netty/Client.scala | 88 +++++++++++++++++++ .../main/scala/akka/remote/netty/Server.scala | 84 +++++++++++++++++- .../scala/akka/remote/netty/Settings.scala | 43 ++++++++- .../akka/remote/Ticket1978ConfigSpec.scala | 46 ++++++++++ 5 files changed, 286 insertions(+), 2 deletions(-) create mode 100644 akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 97b85895ed..5c7b802f1c 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -155,6 +155,33 @@ akka { # (O) Maximum time window that a client should try to reconnect for reconnection-time-window = 600s + + # (I&O) Enable SSL/TLS encryption. + # This must be enabled on both the client and server to work. + enable-ssl = off + + # (I) This is the Java Key Store used by the server connection + ssl-key-store = "keystore" + + # This password is used for decrypting the key store + ssl-key-store-password = "changeme" + + # (O) This is the Java Key Store used by the client connection + ssl-trust-store = "truststore" + + # This password is used for decrypting the trust store + ssl-trust-store-password = "changeme" + + # (I&O) Protocol to use for SSL encryption, choose from: + # Java 6 & 7: + # SSLv3, TLSv1, + # Java 7: + # TLSv1.1, TLSv1.2 + ssl-protocol = "TLSv1" + + # You need to install the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256 + # More info here: http://docs.oracle.com/javase/7/docs/technotes/guides/security/SunProviders.html#SunJCEProvider + ssl-supported-algorithms = ["TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"] } } } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index c1737831da..b1b37a08f8 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -18,6 +18,19 @@ import akka.actor.{ Address, ActorRef } import akka.AkkaException import akka.event.Logging import akka.util.Switch +import akka.actor.ActorRef +import org.jboss.netty.channel.ChannelFutureListener +import akka.remote.RemoteClientWriteFailed +import java.net.InetAddress +import java.security.{ SecureRandom, KeyStore, GeneralSecurityException } +import org.jboss.netty.util.TimerTask +import org.jboss.netty.util.Timeout +import java.util.concurrent.TimeUnit +import org.jboss.netty.handler.timeout.{ IdleState, IdleStateEvent, IdleStateAwareChannelHandler, IdleStateHandler } +import java.security.cert.X509Certificate +import javax.net.ssl.{ SSLContext, X509TrustManager, TrustManagerFactory, TrustManager } +import org.jboss.netty.handler.ssl.SslHandler +import java.io.FileInputStream /** * This is the abstract baseclass for netty remote clients, currently there's only an @@ -310,6 +323,81 @@ private[akka] class PassiveRemoteClient(val currentChannel: Channel, netty: NettyRemoteTransport, remoteAddress: Address) extends RemoteClient(netty, remoteAddress) { + import client.netty.settings + + def initTLS(trustStorePath: String, trustStorePassword: String): Option[SSLContext] = { + if (trustStorePath != null && trustStorePassword != null) + try { + val trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm) + val trustStore = KeyStore.getInstance(KeyStore.getDefaultType) + val stream = new FileInputStream(trustStorePath) + trustStore.load(stream, trustStorePassword.toCharArray) + trustManagerFactory.init(trustStore); + val trustManagers: Array[TrustManager] = trustManagerFactory.getTrustManagers + + val sslContext = SSLContext.getInstance("TLS") + sslContext.init(null, trustManagers, new SecureRandom()) + Some(sslContext) + } catch { + case e: GeneralSecurityException ⇒ { + client.log.error(e, "TLS connection could not be established. TLS is not used!"); + None + } + } + else { + client.log.error("TLS connection could not be established because trust store details are missing") + None + } + } + + def getSSLHandler_? : Option[SslHandler] = { + val sslContext: Option[SSLContext] = { + if (settings.EnableSSL) { + client.log.debug("Client SSL is enabled, initialising ...") + initTLS(settings.SSLTrustStore.get, settings.SSLTrustStorePassword.get) + } else { + None + } + } + if (sslContext.isDefined) { + client.log.debug("Client Using SSL context to create SSLEngine ...") + val sslEngine = sslContext.get.createSSLEngine + sslEngine.setUseClientMode(true) + sslEngine.setEnabledCipherSuites(settings.SSLSupportedAlgorithms.toArray.map(_.toString)) + Some(new SslHandler(sslEngine)) + } else { + None + } + } + + def getPipeline: ChannelPipeline = { + val sslHandler = getSSLHandler_? + val timeout = new IdleStateHandler(client.netty.timer, + settings.ReadTimeout.toSeconds.toInt, + settings.WriteTimeout.toSeconds.toInt, + settings.AllTimeout.toSeconds.toInt) + val lenDec = new LengthFieldBasedFrameDecoder(settings.MessageFrameSize, 0, 4, 0, 4) + val lenPrep = new LengthFieldPrepender(4) + val messageDec = new RemoteMessageDecoder + val messageEnc = new RemoteMessageEncoder(client.netty) + val remoteClient = new ActiveRemoteClientHandler(name, bootstrap, remoteAddress, localAddress, client.netty.timer, client) + + val stages: List[ChannelHandler] = timeout :: lenDec :: messageDec :: lenPrep :: messageEnc :: executionHandler :: remoteClient :: Nil + if (sslHandler.isDefined) { + client.log.debug("Client creating pipeline with SSL handler...") + new StaticChannelPipeline(sslHandler.get :: stages: _*) + } else { + client.log.debug("Client creating pipeline without SSL handler...") + new StaticChannelPipeline(stages: _*) + } + } +} + +class PassiveRemoteClient(val currentChannel: Channel, + netty: NettyRemoteTransport, + remoteAddress: Address) + extends RemoteClient(netty, remoteAddress) { + def connect(reconnectIfAlreadyConnected: Boolean = false): Boolean = runSwitch switchOn { netty.notifyListeners(RemoteClientStarted(netty, remoteAddress)) log.debug("Starting remote client connection to [{}]", remoteAddress) diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index cc3310fada..ace45677f1 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -5,6 +5,7 @@ package akka.remote.netty import java.net.InetSocketAddress import java.util.concurrent.Executors +import java.io.FileNotFoundException import scala.Option.option2Iterable import org.jboss.netty.bootstrap.ServerBootstrap import org.jboss.netty.channel.ChannelHandler.Sharable @@ -12,13 +13,17 @@ import org.jboss.netty.channel.group.ChannelGroup import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory import org.jboss.netty.handler.codec.frame.{ LengthFieldPrepender, LengthFieldBasedFrameDecoder } import org.jboss.netty.handler.execution.ExecutionHandler -import akka.event.Logging import akka.remote.RemoteProtocol.{ RemoteControlProtocol, CommandType, AkkaRemoteProtocol } import akka.remote.{ RemoteServerShutdown, RemoteServerError, RemoteServerClientDisconnected, RemoteServerClientConnected, RemoteServerClientClosed, RemoteProtocol, RemoteMessage } import akka.actor.Address import java.net.InetAddress import akka.actor.ActorSystemImpl import org.jboss.netty.channel._ +import org.jboss.netty.handler.ssl.SslHandler +import java.security.{ SecureRandom, KeyStore, GeneralSecurityException } +import javax.net.ssl.{ KeyManagerFactory, SSLContext } +import java.io.FileInputStream +import akka.event.{ LoggingAdapter, Logging } private[akka] class NettyRemoteServer(val netty: NettyRemoteTransport) { @@ -26,6 +31,8 @@ private[akka] class NettyRemoteServer(val netty: NettyRemoteTransport) { val ip = InetAddress.getByName(settings.Hostname) + lazy val log = Logging(netty.system, "NettyRemoteServer(" + ip + ")") + private val factory = settings.UseDispatcherForIO match { case Some(id) ⇒ @@ -80,6 +87,81 @@ private[akka] class NettyRemoteServer(val netty: NettyRemoteTransport) { } } +class RemoteServerPipelineFactory( + val openChannels: ChannelGroup, + val executionHandler: ExecutionHandler, + val netty: NettyRemoteTransport, + val log: LoggingAdapter) extends ChannelPipelineFactory { + + import netty.settings + + def initTLS(keyStorePath: String, keyStorePassword: String): Option[SSLContext] = { + if (keyStorePath != null && keyStorePassword != null) { + try { + val factory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm) + val keyStore = KeyStore.getInstance(KeyStore.getDefaultType) + val stream = new FileInputStream(keyStorePath) + keyStore.load(stream, keyStorePassword.toCharArray) + factory.init(keyStore, keyStorePassword.toCharArray) + val sslContext = SSLContext.getInstance(settings.SSLProtocol.get) + sslContext.init(factory.getKeyManagers, null, new SecureRandom()) + Some(sslContext) + } catch { + case e: FileNotFoundException ⇒ { + log.error(e, "TLS connection could not be established because keystore could not be loaded") + None + } + case e: GeneralSecurityException ⇒ { + log.error(e, "TLS connection could not be established") + None + } + } + } else { + log.error("TLS connection could not be established because key store details are missing") + None + } + } + + def getSSLHandler_? : Option[SslHandler] = { + val sslContext: Option[SSLContext] = { + if (settings.EnableSSL) { + log.debug("SSL is enabled, initialising...") + initTLS(settings.SSLKeyStore.get, settings.SSLKeyStorePassword.get) + } else { + None + } + } + if (sslContext.isDefined) { + log.debug("Using SSL context to create SSLEngine...") + val sslEngine = sslContext.get.createSSLEngine + sslEngine.setUseClientMode(false) + sslEngine.setEnabledCipherSuites(settings.SSLSupportedAlgorithms.toArray.map(_.toString)) + Some(new SslHandler(sslEngine)) + } else { + None + } + } + + def getPipeline: ChannelPipeline = { + val sslHandler = getSSLHandler_? + val lenDec = new LengthFieldBasedFrameDecoder(settings.MessageFrameSize, 0, 4, 0, 4) + val lenPrep = new LengthFieldPrepender(4) + val messageDec = new RemoteMessageDecoder + val messageEnc = new RemoteMessageEncoder(netty) + + val authenticator = if (settings.RequireCookie) new RemoteServerAuthenticationHandler(settings.SecureCookie) :: Nil else Nil + val remoteServer = new RemoteServerHandler(openChannels, netty) + val stages: List[ChannelHandler] = lenDec :: messageDec :: lenPrep :: messageEnc :: executionHandler :: authenticator ::: remoteServer :: Nil + if (sslHandler.isDefined) { + log.debug("Creating pipeline with SSL handler...") + new StaticChannelPipeline(sslHandler.get :: stages: _*) + } else { + log.debug("Creating pipeline without SSL handler...") + new StaticChannelPipeline(stages: _*) + } + } +} + @ChannelHandler.Sharable private[akka] class RemoteServerAuthenticationHandler(secureCookie: Option[String]) extends SimpleChannelUpstreamHandler { val authenticated = new AnyRef diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index 64bc184408..d753a743b6 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -73,4 +73,45 @@ private[akka] class NettySettings(config: Config, val systemName: String) { case sz ⇒ sz } -} + val SSLKeyStore = getString("ssl-key-store") match { + case "" ⇒ None + case keyStore ⇒ Some(keyStore) + } + + val SSLTrustStore = getString("ssl-trust-store") match { + case "" ⇒ None + case trustStore ⇒ Some(trustStore) + } + + val SSLKeyStorePassword = getString("ssl-key-store-password") match { + case "" ⇒ None + case password ⇒ Some(password) + } + + val SSLTrustStorePassword = getString("ssl-trust-store-password") match { + case "" ⇒ None + case password ⇒ Some(password) + } + + val SSLSupportedAlgorithms = getStringList("ssl-supported-algorithms") + + val SSLProtocol = getString("ssl-protocol") match { + case "" ⇒ None + case protocol ⇒ Some(protocol) + } + + val EnableSSL = { + val enableSSL = getBoolean("enable-ssl") + if (enableSSL) { + if (SSLProtocol.isEmpty) throw new ConfigurationException( + "Configuration option 'akka.remote.netty.enable-ssl is turned on but no protocol is defined in 'akka.remote.netty.ssl-protocol'.") + if (SSLKeyStore.isEmpty && SSLTrustStore.isEmpty) throw new ConfigurationException( + "Configuration option 'akka.remote.netty.enable-ssl is turned on but no key/trust store is defined in 'akka.remote.netty.ssl-key-store' / 'akka.remote.netty.ssl-trust-store'.") + if (SSLKeyStore.isDefined && SSLKeyStorePassword.isEmpty) throw new ConfigurationException( + "Configuration option 'akka.remote.netty.ssl-key-store' is defined but no key-store password is defined in 'akka.remote.netty.ssl-key-store-password'.") + if (SSLTrustStore.isDefined && SSLTrustStorePassword.isEmpty) throw new ConfigurationException( + "Configuration option 'akka.remote.netty.ssl-trust-store' is defined but no trust-store password is defined in 'akka.remote.netty.ssl-trust-store-password'.") + } + enableSSL + } +} \ No newline at end of file diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala new file mode 100644 index 0000000000..0d429043c2 --- /dev/null +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala @@ -0,0 +1,46 @@ +package akka.remote + +import akka.testkit._ +import akka.actor._ +import com.typesafe.config._ +import akka.actor.ExtendedActorSystem +import akka.util.duration._ +import akka.util.Duration +import akka.remote.netty.NettyRemoteTransport +import java.util.ArrayList + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class Ticket1978ConfigSpec extends AkkaSpec(""" +akka { + actor.provider = "akka.remote.RemoteActorRefProvider" + remote.netty { + hostname = localhost + port = 12345 + } + actor.deployment { + /blub.remote = "akka://remote-sys@localhost:12346" + /looker/child.remote = "akka://remote-sys@localhost:12346" + /looker/child/grandchild.remote = "akka://RemoteCommunicationSpec@localhost:12345" + } +} +""") with ImplicitSender with DefaultTimeout { + + "SSL Remoting" must { + "be able to parse these extra Netty config elements" in { + val settings = + system.asInstanceOf[ExtendedActorSystem] + .provider.asInstanceOf[RemoteActorRefProvider] + .transport.asInstanceOf[NettyRemoteTransport] + .settings + import settings._ + + EnableSSL must be(false) + SSLKeyStore must be(Some("keystore")) + SSLKeyStorePassword must be(Some("changeme")) + SSLTrustStore must be(Some("truststore")) + SSLTrustStorePassword must be(Some("changeme")) + SSLProtocol must be(Some("TLSv1")) + SSLSupportedAlgorithms must be(java.util.Arrays.asList("TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA")) + } + } +} From 56cd9692edab6a308855e6af7c5c1cc0670a04b2 Mon Sep 17 00:00:00 2001 From: Peter Badenhorst Date: Mon, 28 May 2012 23:51:47 +0200 Subject: [PATCH 223/538] Reverted changes to client and server files and moved the code to NettySSLSupport.scala Updated configuration file to reflect new netty.ssl hierarchy. --- .../TestConductorTransport.scala | 4 +- akka-remote/src/main/resources/reference.conf | 42 +++--- .../main/scala/akka/remote/netty/Client.scala | 90 +------------ .../remote/netty/NettyRemoteSupport.scala | 13 +- .../akka/remote/netty/NettySSLSupport.scala | 122 ++++++++++++++++++ .../main/scala/akka/remote/netty/Server.scala | 85 +----------- .../scala/akka/remote/netty/Settings.scala | 22 ++-- 7 files changed, 166 insertions(+), 212 deletions(-) create mode 100644 akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala index dbf17fa5a7..f7b7943275 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/TestConductorTransport.scala @@ -16,9 +16,9 @@ import org.jboss.netty.channel.ChannelPipelineFactory private[akka] class TestConductorTransport(_system: ExtendedActorSystem, _provider: RemoteActorRefProvider) extends NettyRemoteTransport(_system, _provider) { - override def createPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean): ChannelPipelineFactory = + override def createPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean, isClient: Boolean): ChannelPipelineFactory = new ChannelPipelineFactory { - def getPipeline = PipelineFactory(new NetworkFailureInjector(system) +: PipelineFactory.defaultStack(withTimeout) :+ endpoint) + def getPipeline = PipelineFactory(new NetworkFailureInjector(system) +: PipelineFactory.defaultStack(withTimeout, isClient) :+ endpoint) } } \ No newline at end of file diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 5c7b802f1c..d20a57d1a5 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -156,32 +156,34 @@ akka { # (O) Maximum time window that a client should try to reconnect for reconnection-time-window = 600s - # (I&O) Enable SSL/TLS encryption. - # This must be enabled on both the client and server to work. - enable-ssl = off + ssl { + # (I&O) Enable SSL/TLS encryption. + # This must be enabled on both the client and server to work. + enable = off - # (I) This is the Java Key Store used by the server connection - ssl-key-store = "keystore" + # (I) This is the Java Key Store used by the server connection + key-store = "keystore" - # This password is used for decrypting the key store - ssl-key-store-password = "changeme" + # This password is used for decrypting the key store + key-store-password = "changeme" - # (O) This is the Java Key Store used by the client connection - ssl-trust-store = "truststore" + # (O) This is the Java Key Store used by the client connection + trust-store = "truststore" - # This password is used for decrypting the trust store - ssl-trust-store-password = "changeme" + # This password is used for decrypting the trust store + trust-store-password = "changeme" - # (I&O) Protocol to use for SSL encryption, choose from: - # Java 6 & 7: - # SSLv3, TLSv1, - # Java 7: - # TLSv1.1, TLSv1.2 - ssl-protocol = "TLSv1" + # (I&O) Protocol to use for SSL encryption, choose from: + # Java 6 & 7: + # SSLv3, TLSv1, + # Java 7: + # TLSv1.1, TLSv1.2 + protocol = "TLSv1" - # You need to install the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256 - # More info here: http://docs.oracle.com/javase/7/docs/technotes/guides/security/SunProviders.html#SunJCEProvider - ssl-supported-algorithms = ["TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"] + # You need to install the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256 + # More info here: http://docs.oracle.com/javase/7/docs/technotes/guides/security/SunProviders.html#SunJCEProvider + supported-algorithms = ["TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"] + } } } } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index b1b37a08f8..e3037b71ad 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -18,19 +18,6 @@ import akka.actor.{ Address, ActorRef } import akka.AkkaException import akka.event.Logging import akka.util.Switch -import akka.actor.ActorRef -import org.jboss.netty.channel.ChannelFutureListener -import akka.remote.RemoteClientWriteFailed -import java.net.InetAddress -import java.security.{ SecureRandom, KeyStore, GeneralSecurityException } -import org.jboss.netty.util.TimerTask -import org.jboss.netty.util.Timeout -import java.util.concurrent.TimeUnit -import org.jboss.netty.handler.timeout.{ IdleState, IdleStateEvent, IdleStateAwareChannelHandler, IdleStateHandler } -import java.security.cert.X509Certificate -import javax.net.ssl.{ SSLContext, X509TrustManager, TrustManagerFactory, TrustManager } -import org.jboss.netty.handler.ssl.SslHandler -import java.io.FileInputStream /** * This is the abstract baseclass for netty remote clients, currently there's only an @@ -156,7 +143,7 @@ private[akka] class ActiveRemoteClient private[akka] ( openChannels = new DefaultDisposableChannelGroup(classOf[RemoteClient].getName) val b = new ClientBootstrap(netty.clientChannelFactory) - b.setPipelineFactory(netty.createPipeline(new ActiveRemoteClientHandler(name, b, remoteAddress, localAddress, netty.timer, this), true)) + b.setPipelineFactory(netty.createPipeline(new ActiveRemoteClientHandler(name, b, remoteAddress, localAddress, netty.timer, this), withTimeout = true, isClient = true)) b.setOption("tcpNoDelay", true) b.setOption("keepAlive", true) b.setOption("connectTimeoutMillis", settings.ConnectionTimeout.toMillis) @@ -323,81 +310,6 @@ private[akka] class PassiveRemoteClient(val currentChannel: Channel, netty: NettyRemoteTransport, remoteAddress: Address) extends RemoteClient(netty, remoteAddress) { - import client.netty.settings - - def initTLS(trustStorePath: String, trustStorePassword: String): Option[SSLContext] = { - if (trustStorePath != null && trustStorePassword != null) - try { - val trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm) - val trustStore = KeyStore.getInstance(KeyStore.getDefaultType) - val stream = new FileInputStream(trustStorePath) - trustStore.load(stream, trustStorePassword.toCharArray) - trustManagerFactory.init(trustStore); - val trustManagers: Array[TrustManager] = trustManagerFactory.getTrustManagers - - val sslContext = SSLContext.getInstance("TLS") - sslContext.init(null, trustManagers, new SecureRandom()) - Some(sslContext) - } catch { - case e: GeneralSecurityException ⇒ { - client.log.error(e, "TLS connection could not be established. TLS is not used!"); - None - } - } - else { - client.log.error("TLS connection could not be established because trust store details are missing") - None - } - } - - def getSSLHandler_? : Option[SslHandler] = { - val sslContext: Option[SSLContext] = { - if (settings.EnableSSL) { - client.log.debug("Client SSL is enabled, initialising ...") - initTLS(settings.SSLTrustStore.get, settings.SSLTrustStorePassword.get) - } else { - None - } - } - if (sslContext.isDefined) { - client.log.debug("Client Using SSL context to create SSLEngine ...") - val sslEngine = sslContext.get.createSSLEngine - sslEngine.setUseClientMode(true) - sslEngine.setEnabledCipherSuites(settings.SSLSupportedAlgorithms.toArray.map(_.toString)) - Some(new SslHandler(sslEngine)) - } else { - None - } - } - - def getPipeline: ChannelPipeline = { - val sslHandler = getSSLHandler_? - val timeout = new IdleStateHandler(client.netty.timer, - settings.ReadTimeout.toSeconds.toInt, - settings.WriteTimeout.toSeconds.toInt, - settings.AllTimeout.toSeconds.toInt) - val lenDec = new LengthFieldBasedFrameDecoder(settings.MessageFrameSize, 0, 4, 0, 4) - val lenPrep = new LengthFieldPrepender(4) - val messageDec = new RemoteMessageDecoder - val messageEnc = new RemoteMessageEncoder(client.netty) - val remoteClient = new ActiveRemoteClientHandler(name, bootstrap, remoteAddress, localAddress, client.netty.timer, client) - - val stages: List[ChannelHandler] = timeout :: lenDec :: messageDec :: lenPrep :: messageEnc :: executionHandler :: remoteClient :: Nil - if (sslHandler.isDefined) { - client.log.debug("Client creating pipeline with SSL handler...") - new StaticChannelPipeline(sslHandler.get :: stages: _*) - } else { - client.log.debug("Client creating pipeline without SSL handler...") - new StaticChannelPipeline(stages: _*) - } - } -} - -class PassiveRemoteClient(val currentChannel: Channel, - netty: NettyRemoteTransport, - remoteAddress: Address) - extends RemoteClient(netty, remoteAddress) { - def connect(reconnectIfAlreadyConnected: Boolean = false): Boolean = runSwitch switchOn { netty.notifyListeners(RemoteClientStarted(netty, remoteAddress)) log.debug("Starting remote client connection to [{}]", remoteAddress) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index b42239f470..32aba84893 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -61,17 +61,18 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider * * @param withTimeout determines whether an IdleStateHandler shall be included */ - def apply(endpoint: ⇒ Seq[ChannelHandler], withTimeout: Boolean): ChannelPipelineFactory = + def apply(endpoint: ⇒ Seq[ChannelHandler], withTimeout: Boolean, isClient: Boolean): ChannelPipelineFactory = new ChannelPipelineFactory { - def getPipeline = apply(defaultStack(withTimeout) ++ endpoint) + def getPipeline = apply(defaultStack(withTimeout, isClient) ++ endpoint) } /** * Construct a default protocol stack, excluding the “head” handler (i.e. the one which * actually dispatches the received messages to the local target actors). */ - def defaultStack(withTimeout: Boolean): Seq[ChannelHandler] = - (if (withTimeout) timeout :: Nil else Nil) ::: + def defaultStack(withTimeout: Boolean, isClient: Boolean): Seq[ChannelHandler] = + (if (settings.EnableSSL) NettySSLSupport(settings, NettyRemoteTransport.this, isClient) :: Nil else Nil) ::: + (if (withTimeout) timeout :: Nil else Nil) ::: msgFormat ::: authenticator ::: executionHandler :: @@ -119,8 +120,8 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider * This method is factored out to provide an extension point in case the * pipeline shall be changed. It is recommended to use */ - def createPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean): ChannelPipelineFactory = - PipelineFactory(Seq(endpoint), withTimeout) + def createPipeline(endpoint: ⇒ ChannelHandler, withTimeout: Boolean, isClient: Boolean): ChannelPipelineFactory = + PipelineFactory(Seq(endpoint), withTimeout, isClient) private val remoteClients = new HashMap[Address, RemoteClient] private val clientsLock = new ReentrantReadWriteLock diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala new file mode 100644 index 0000000000..d830c87a07 --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala @@ -0,0 +1,122 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.remote.netty + +import org.jboss.netty.handler.ssl.SslHandler +import com.sun.xml.internal.bind.v2.model.core.NonElement +import com.sun.xml.internal.ws.resources.SoapMessages +import javax.net.ssl.{ KeyManagerFactory, TrustManager, TrustManagerFactory, SSLContext } +import akka.remote.{ RemoteClientError, RemoteTransportException, RemoteServerError } +import java.security.{ GeneralSecurityException, SecureRandom, KeyStore } +import java.io.{ IOException, FileNotFoundException, FileInputStream } + +object NettySSLSupport { + /** + * Construct a SSLHandler which can be inserted into a Netty server/client pipeline + */ + def apply(settings: NettySettings, netty: NettyRemoteTransport, isClient: Boolean): SslHandler = { + if (isClient) initialiseClientSSL(settings, netty) + else initialiseServerSSL(settings, netty) + } + + private def initialiseClientSSL(settings: NettySettings, netty: NettyRemoteTransport): SslHandler = { + netty.log.debug("Client SSL is enabled, initialising ...") + val sslContext: Option[SSLContext] = { + (settings.SSLTrustStore, settings.SSLTrustStorePassword, settings.SSLProtocol) match { + case (Some(trustStore), Some(password), Some(protocol)) ⇒ constructClientContext(settings, netty, trustStore, password, protocol) + case _ ⇒ throw new GeneralSecurityException("Could not find all SSL trust store settings") + } + } + sslContext match { + case Some(context) ⇒ { + netty.log.debug("Using client SSL context to create SSLEngine ...") + val sslEngine = context.createSSLEngine + sslEngine.setUseClientMode(true) + sslEngine.setEnabledCipherSuites(settings.SSLSupportedAlgorithms.toArray.map(_.toString)) + new SslHandler(sslEngine) + } + case None ⇒ throw new GeneralSecurityException("Failed to initialise client SSL") + } + } + + private def constructClientContext(settings: NettySettings, netty: NettyRemoteTransport, trustStorePath: String, trustStorePassword: String, protocol: String): Option[SSLContext] = { + try { + val trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm) + val trustStore = KeyStore.getInstance(KeyStore.getDefaultType) + val stream = new FileInputStream(trustStorePath) + trustStore.load(stream, trustStorePassword.toCharArray) + trustManagerFactory.init(trustStore) + val trustManagers: Array[TrustManager] = trustManagerFactory.getTrustManagers + val sslContext = SSLContext.getInstance(protocol) + sslContext.init(null, trustManagers, new SecureRandom()) + Some(sslContext) + } catch { + case e: FileNotFoundException ⇒ { + val exception = new RemoteTransportException("Client SSL connection could not be established because trust store could not be loaded", e) + netty.notifyListeners(RemoteClientError(exception, netty, netty.address)) + throw exception + } + case e: IOException ⇒ { + val exception = new RemoteTransportException("Client SSL connection could not be established because: " + e.getMessage, e) + netty.notifyListeners(RemoteClientError(exception, netty, netty.address)) + throw exception + } + case e: GeneralSecurityException ⇒ { + val exception = new RemoteTransportException("Client SSL connection could not be established because SSL context could not be constructed", e) + netty.notifyListeners(RemoteClientError(exception, netty, netty.address)) + throw exception + } + } + } + + private def initialiseServerSSL(settings: NettySettings, netty: NettyRemoteTransport): SslHandler = { + netty.log.debug("Server SSL is enabled, initialising ...") + val sslContext: Option[SSLContext] = { + (settings.SSLKeyStore, settings.SSLKeyStorePassword, settings.SSLProtocol) match { + case (Some(keyStore), Some(password), Some(protocol)) ⇒ constructServerContext(settings, netty, keyStore, password, protocol) + case _ ⇒ throw new GeneralSecurityException("Could not find all SSL key store settings") + } + } + sslContext match { + case Some(context) ⇒ { + netty.log.debug("Using server SSL context to create SSLEngine ...") + val sslEngine = context.createSSLEngine + sslEngine.setUseClientMode(false) + sslEngine.setEnabledCipherSuites(settings.SSLSupportedAlgorithms.toArray.map(_.toString)) + new SslHandler(sslEngine) + } + case None ⇒ throw new GeneralSecurityException("Failed to initialise server SSL") + } + } + + private def constructServerContext(settings: NettySettings, netty: NettyRemoteTransport, keyStorePath: String, keyStorePassword: String, protocol: String): Option[SSLContext] = { + try { + val factory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm) + val keyStore = KeyStore.getInstance(KeyStore.getDefaultType) + val stream = new FileInputStream(keyStorePath) + keyStore.load(stream, keyStorePassword.toCharArray) + factory.init(keyStore, keyStorePassword.toCharArray) + val sslContext = SSLContext.getInstance(protocol) + sslContext.init(factory.getKeyManagers, null, new SecureRandom()) + Some(sslContext) + } catch { + case e: FileNotFoundException ⇒ { + val exception = new RemoteTransportException("Server SSL connection could not be established because key store could not be loaded", e) + netty.notifyListeners(RemoteServerError(exception, netty)) + throw exception + } + case e: IOException ⇒ { + val exception = new RemoteTransportException("Server SSL connection could not be established because: " + e.getMessage, e) + netty.notifyListeners(RemoteServerError(exception, netty)) + throw exception + } + case e: GeneralSecurityException ⇒ { + val exception = new RemoteTransportException("Server SSL connection could not be established because SSL context could not be constructed", e) + netty.notifyListeners(RemoteServerError(exception, netty)) + throw exception + } + } + } +} diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index ace45677f1..789cc71b6c 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -5,7 +5,6 @@ package akka.remote.netty import java.net.InetSocketAddress import java.util.concurrent.Executors -import java.io.FileNotFoundException import scala.Option.option2Iterable import org.jboss.netty.bootstrap.ServerBootstrap import org.jboss.netty.channel.ChannelHandler.Sharable @@ -19,11 +18,6 @@ import akka.actor.Address import java.net.InetAddress import akka.actor.ActorSystemImpl import org.jboss.netty.channel._ -import org.jboss.netty.handler.ssl.SslHandler -import java.security.{ SecureRandom, KeyStore, GeneralSecurityException } -import javax.net.ssl.{ KeyManagerFactory, SSLContext } -import java.io.FileInputStream -import akka.event.{ LoggingAdapter, Logging } private[akka] class NettyRemoteServer(val netty: NettyRemoteTransport) { @@ -31,8 +25,6 @@ private[akka] class NettyRemoteServer(val netty: NettyRemoteTransport) { val ip = InetAddress.getByName(settings.Hostname) - lazy val log = Logging(netty.system, "NettyRemoteServer(" + ip + ")") - private val factory = settings.UseDispatcherForIO match { case Some(id) ⇒ @@ -47,7 +39,7 @@ private[akka] class NettyRemoteServer(val netty: NettyRemoteTransport) { private val bootstrap = { val b = new ServerBootstrap(factory) - b.setPipelineFactory(netty.createPipeline(new RemoteServerHandler(openChannels, netty), false)) + b.setPipelineFactory(netty.createPipeline(new RemoteServerHandler(openChannels, netty), withTimeout = false, isClient = false)) b.setOption("backlog", settings.Backlog) b.setOption("tcpNoDelay", true) b.setOption("child.keepAlive", true) @@ -87,81 +79,6 @@ private[akka] class NettyRemoteServer(val netty: NettyRemoteTransport) { } } -class RemoteServerPipelineFactory( - val openChannels: ChannelGroup, - val executionHandler: ExecutionHandler, - val netty: NettyRemoteTransport, - val log: LoggingAdapter) extends ChannelPipelineFactory { - - import netty.settings - - def initTLS(keyStorePath: String, keyStorePassword: String): Option[SSLContext] = { - if (keyStorePath != null && keyStorePassword != null) { - try { - val factory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm) - val keyStore = KeyStore.getInstance(KeyStore.getDefaultType) - val stream = new FileInputStream(keyStorePath) - keyStore.load(stream, keyStorePassword.toCharArray) - factory.init(keyStore, keyStorePassword.toCharArray) - val sslContext = SSLContext.getInstance(settings.SSLProtocol.get) - sslContext.init(factory.getKeyManagers, null, new SecureRandom()) - Some(sslContext) - } catch { - case e: FileNotFoundException ⇒ { - log.error(e, "TLS connection could not be established because keystore could not be loaded") - None - } - case e: GeneralSecurityException ⇒ { - log.error(e, "TLS connection could not be established") - None - } - } - } else { - log.error("TLS connection could not be established because key store details are missing") - None - } - } - - def getSSLHandler_? : Option[SslHandler] = { - val sslContext: Option[SSLContext] = { - if (settings.EnableSSL) { - log.debug("SSL is enabled, initialising...") - initTLS(settings.SSLKeyStore.get, settings.SSLKeyStorePassword.get) - } else { - None - } - } - if (sslContext.isDefined) { - log.debug("Using SSL context to create SSLEngine...") - val sslEngine = sslContext.get.createSSLEngine - sslEngine.setUseClientMode(false) - sslEngine.setEnabledCipherSuites(settings.SSLSupportedAlgorithms.toArray.map(_.toString)) - Some(new SslHandler(sslEngine)) - } else { - None - } - } - - def getPipeline: ChannelPipeline = { - val sslHandler = getSSLHandler_? - val lenDec = new LengthFieldBasedFrameDecoder(settings.MessageFrameSize, 0, 4, 0, 4) - val lenPrep = new LengthFieldPrepender(4) - val messageDec = new RemoteMessageDecoder - val messageEnc = new RemoteMessageEncoder(netty) - - val authenticator = if (settings.RequireCookie) new RemoteServerAuthenticationHandler(settings.SecureCookie) :: Nil else Nil - val remoteServer = new RemoteServerHandler(openChannels, netty) - val stages: List[ChannelHandler] = lenDec :: messageDec :: lenPrep :: messageEnc :: executionHandler :: authenticator ::: remoteServer :: Nil - if (sslHandler.isDefined) { - log.debug("Creating pipeline with SSL handler...") - new StaticChannelPipeline(sslHandler.get :: stages: _*) - } else { - log.debug("Creating pipeline without SSL handler...") - new StaticChannelPipeline(stages: _*) - } - } -} - @ChannelHandler.Sharable private[akka] class RemoteServerAuthenticationHandler(secureCookie: Option[String]) extends SimpleChannelUpstreamHandler { val authenticated = new AnyRef diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index d753a743b6..5d829127f8 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -73,44 +73,44 @@ private[akka] class NettySettings(config: Config, val systemName: String) { case sz ⇒ sz } - val SSLKeyStore = getString("ssl-key-store") match { + val SSLKeyStore = getString("ssl.key-store") match { case "" ⇒ None case keyStore ⇒ Some(keyStore) } - val SSLTrustStore = getString("ssl-trust-store") match { + val SSLTrustStore = getString("ssl.trust-store") match { case "" ⇒ None case trustStore ⇒ Some(trustStore) } - val SSLKeyStorePassword = getString("ssl-key-store-password") match { + val SSLKeyStorePassword = getString("ssl.key-store-password") match { case "" ⇒ None case password ⇒ Some(password) } - val SSLTrustStorePassword = getString("ssl-trust-store-password") match { + val SSLTrustStorePassword = getString("ssl.trust-store-password") match { case "" ⇒ None case password ⇒ Some(password) } - val SSLSupportedAlgorithms = getStringList("ssl-supported-algorithms") + val SSLSupportedAlgorithms = getStringList("ssl.supported-algorithms") - val SSLProtocol = getString("ssl-protocol") match { + val SSLProtocol = getString("ssl.protocol") match { case "" ⇒ None case protocol ⇒ Some(protocol) } val EnableSSL = { - val enableSSL = getBoolean("enable-ssl") + val enableSSL = getBoolean("ssl.enable") if (enableSSL) { if (SSLProtocol.isEmpty) throw new ConfigurationException( - "Configuration option 'akka.remote.netty.enable-ssl is turned on but no protocol is defined in 'akka.remote.netty.ssl-protocol'.") + "Configuration option 'akka.remote.netty.ssl.enable is turned on but no protocol is defined in 'akka.remote.netty.ssl.protocol'.") if (SSLKeyStore.isEmpty && SSLTrustStore.isEmpty) throw new ConfigurationException( - "Configuration option 'akka.remote.netty.enable-ssl is turned on but no key/trust store is defined in 'akka.remote.netty.ssl-key-store' / 'akka.remote.netty.ssl-trust-store'.") + "Configuration option 'akka.remote.netty.ssl.enable is turned on but no key/trust store is defined in 'akka.remote.netty.ssl.key-store' / 'akka.remote.netty.ssl.trust-store'.") if (SSLKeyStore.isDefined && SSLKeyStorePassword.isEmpty) throw new ConfigurationException( - "Configuration option 'akka.remote.netty.ssl-key-store' is defined but no key-store password is defined in 'akka.remote.netty.ssl-key-store-password'.") + "Configuration option 'akka.remote.netty.ssl.key-store' is defined but no key-store password is defined in 'akka.remote.netty.ssl.key-store-password'.") if (SSLTrustStore.isDefined && SSLTrustStorePassword.isEmpty) throw new ConfigurationException( - "Configuration option 'akka.remote.netty.ssl-trust-store' is defined but no trust-store password is defined in 'akka.remote.netty.ssl-trust-store-password'.") + "Configuration option 'akka.remote.netty.ssl.trust-store' is defined but no trust-store password is defined in 'akka.remote.netty.ssl.trust-store-password'.") } enableSSL } From 8bdb8702463d036e4a4353f899ec3f710df9fd94 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 30 May 2012 13:24:38 +0200 Subject: [PATCH 224/538] Implementing death watch for PromiseActorRef --- .../akka/pattern/AbstractPromiseActorRef.java | 2 + .../scala/akka/actor/ActorRefProvider.scala | 12 ++-- .../main/scala/akka/pattern/AskSupport.scala | 63 ++++++++++++++----- .../akka/pattern/GracefulStopSupport.scala | 16 ++--- 4 files changed, 62 insertions(+), 31 deletions(-) diff --git a/akka-actor/src/main/java/akka/pattern/AbstractPromiseActorRef.java b/akka-actor/src/main/java/akka/pattern/AbstractPromiseActorRef.java index e21d58204e..bb0f03861c 100644 --- a/akka-actor/src/main/java/akka/pattern/AbstractPromiseActorRef.java +++ b/akka-actor/src/main/java/akka/pattern/AbstractPromiseActorRef.java @@ -8,10 +8,12 @@ import akka.util.Unsafe; final class AbstractPromiseActorRef { final static long stateOffset; + final static long watchedByOffset; static { try { stateOffset = Unsafe.instance.objectFieldOffset(PromiseActorRef.class.getDeclaredField("_stateDoNotCallMeDirectly")); + watchedByOffset = Unsafe.instance.objectFieldOffset(PromiseActorRef.class.getDeclaredField("_watchedByDoNotCallMeDirectly")); } catch(Throwable t){ throw new ExceptionInInitializerError(t); } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index ca971de40e..d4e9595f62 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -8,8 +8,8 @@ import java.util.concurrent.atomic.AtomicLong import akka.dispatch._ import akka.routing._ import akka.AkkaException -import akka.util.{ Switch, Helpers } import akka.event._ +import akka.util.{ NonFatal, Switch, Helpers } /** * Interface for all ActorRef providers to implement. @@ -373,9 +373,9 @@ class LocalActorRefProvider( override def sendSystemMessage(message: SystemMessage): Unit = stopped ifOff { message match { - case Supervise(child) ⇒ // TODO register child in some map to keep track of it and enable shutdown after all dead - case ChildTerminated(child) ⇒ stop() - case _ ⇒ log.error(this + " received unexpected system message [" + message + "]") + case Supervise(_) ⇒ // TODO register child in some map to keep track of it and enable shutdown after all dead + case ChildTerminated(_) ⇒ stop() + case _ ⇒ log.error(this + " received unexpected system message [" + message + "]") } } } @@ -403,8 +403,8 @@ class LocalActorRefProvider( def receive = { case Terminated(_) ⇒ context.stop(self) - case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case e: Exception ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? - case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case e: Exception ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? + case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case NonFatal(e) ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? + case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case NonFatal(e) ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? case StopChild(child) ⇒ context.stop(child); sender ! "ok" case m ⇒ deadLetters ! DeadLetter(m, sender, self) } diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 2837bd6546..47154f7853 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -4,11 +4,10 @@ package akka.pattern import java.util.concurrent.TimeoutException -import akka.util.Timeout import annotation.tailrec -import akka.util.Unsafe import akka.actor._ import akka.dispatch._ +import akka.util.{ NonFatal, Timeout, Unsafe } /** * This is what is used to complete a Future that is returned from an ask/? call, @@ -163,6 +162,7 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide extends MinimalActorRef { import PromiseActorRef._ import AbstractPromiseActorRef.stateOffset + import AbstractPromiseActorRef.watchedByOffset /** * As an optimization for the common (local) case we only register this PromiseActorRef @@ -179,14 +179,43 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide @volatile private[this] var _stateDoNotCallMeDirectly: AnyRef = _ - @inline - private def state: AnyRef = Unsafe.instance.getObjectVolatile(this, stateOffset) + @volatile + private[this] var _watchedByDoNotCallMeDirectly: Set[ActorRef] = ActorCell.emptyActorRefSet @inline - private def updateState(oldState: AnyRef, newState: AnyRef): Boolean = Unsafe.instance.compareAndSwapObject(this, stateOffset, oldState, newState) + private[this] def watchedBy: Set[ActorRef] = Unsafe.instance.getObjectVolatile(this, watchedByOffset).asInstanceOf[Set[ActorRef]] @inline - private def setState(newState: AnyRef): Unit = Unsafe.instance.putObjectVolatile(this, stateOffset, newState) + private[this] def updateWatchedBy(oldWatchedBy: Set[ActorRef], newWatchedBy: Set[ActorRef]): Boolean = + Unsafe.instance.compareAndSwapObject(this, watchedByOffset, oldWatchedBy, newWatchedBy) + + @tailrec // Returns false if the Promise is already completed + private[this] final def addWatcher(watcher: ActorRef): Boolean = watchedBy match { + case null ⇒ false + case other ⇒ if (updateWatchedBy(other, other + watcher)) true else addWatcher(watcher) + } + + @tailrec + private[this] final def remWatcher(watcher: ActorRef): Unit = watchedBy match { + case null ⇒ () + case other ⇒ if (!updateWatchedBy(other, other - watcher)) remWatcher(watcher) + } + + @tailrec + private[this] final def clearWatchers(): Set[ActorRef] = watchedBy match { + case null ⇒ ActorCell.emptyActorRefSet + case other ⇒ if (!updateWatchedBy(other, null)) clearWatchers() else other + } + + @inline + private[this] def state: AnyRef = Unsafe.instance.getObjectVolatile(this, stateOffset) + + @inline + private[this] def updateState(oldState: AnyRef, newState: AnyRef): Boolean = + Unsafe.instance.compareAndSwapObject(this, stateOffset, oldState, newState) + + @inline + private[this] def setState(newState: AnyRef): Unit = Unsafe.instance.putObjectVolatile(this, stateOffset, newState) override def getParent: InternalActorRef = provider.tempContainer @@ -230,8 +259,8 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide override def sendSystemMessage(message: SystemMessage): Unit = message match { case _: Terminate ⇒ stop() - case Watch(watchee, watcher) ⇒ //FIXME IMPLEMENT - case Unwatch(watchee, watcher) ⇒ //FIXME IMPLEMENT + case Watch(watchee, watcher) ⇒ if (watchee == this && watcher != this && !addWatcher(watcher)) watcher ! Terminated(watchee)(stopped = true) + case Unwatch(watchee, watcher) ⇒ if (watchee == this && watcher != this) remWatcher(watcher) case _ ⇒ } @@ -242,20 +271,20 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide @tailrec override def stop(): Unit = { - def ensureCompleted(): Unit = if (!result.isCompleted) result.tryComplete(Left(new ActorKilledException("Stopped"))) + def ensureCompleted(): Unit = { + if (!result.isCompleted) result.tryComplete(Left(new ActorKilledException("Stopped"))) + val watchers = clearWatchers() + if (!watchers.isEmpty) { + val termination = Terminated(this)(stopped = true) + watchers foreach { w ⇒ try w.tell(termination, this) catch { case NonFatal(t) ⇒ /* FIXME LOG THIS */ } } + } + } state match { case null ⇒ // if path was never queried nobody can possibly be watching us, so we don't have to publish termination either if (updateState(null, Stopped)) ensureCompleted() else stop() case p: ActorPath ⇒ if (updateState(p, StoppedWithPath(p))) { - try { - ensureCompleted() - val termination = Terminated(this)(stopped = true) - // FIXME watchedBy foreach { w => w.tell(termination) } - // FIXME watching foreach { w.sendSystemMessage(Unwatch(w, self)) } - } finally { - provider.unregisterTempActor(p) - } + try ensureCompleted() finally provider.unregisterTempActor(p) } else stop() case Stopped | _: StoppedWithPath ⇒ // already stopped case Registering ⇒ stop() // spin until registration is completed before stopping diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index d1e7fab327..35004e637d 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -34,18 +34,18 @@ trait GracefulStopSupport { * is completed with failure [[akka.pattern.AskTimeoutException]]. */ def gracefulStop(target: ActorRef, timeout: Duration)(implicit system: ActorSystem): Future[Boolean] = { - if (target.isTerminated) { - Promise.successful(true) - } else system match { + if (target.isTerminated) Promise.successful(true) + else system match { case e: ExtendedActorSystem ⇒ + val internalTarget = target.asInstanceOf[InternalActorRef] val ref = PromiseActorRef(e.provider, Timeout(timeout)) - ref.sendSystemMessage(Watch(target, ref)) - ref.result onComplete { - case Right(Terminated(`target`)) ⇒ () // Ignore - case _ ⇒ ref.sendSystemMessage(Unwatch(target, ref)) // Just making sure we're not leaking here + internalTarget.sendSystemMessage(Watch(target, ref)) + val result = ref.result map { + case Terminated(`target`) ⇒ true + case _ ⇒ internalTarget.sendSystemMessage(Unwatch(target, ref)); false // Just making sure we're not leaking here } target ! PoisonPill - ref.result map { case Terminated(`target`) ⇒ true } + result case s ⇒ throw new IllegalArgumentException("Unknown ActorSystem implementation: '" + s + "'") } } From f4a3bdff336ff3580e22ca949098ab648a538ecf Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 30 May 2012 13:29:48 +0200 Subject: [PATCH 225/538] Adding PossiblyHarmful to RemoteTransport untrusted mode filtering --- akka-actor/src/main/scala/akka/actor/Actor.scala | 2 +- .../src/main/scala/akka/remote/RemoteTransport.scala | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index cb34f82b78..c8962e819f 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -59,7 +59,7 @@ case object Kill extends Kill { /** * When Death Watch is used, the watcher will receive a Terminated(watched) message when watched is terminated. */ -case class Terminated(@BeanProperty actor: ActorRef)(@BeanProperty stopped: Boolean) extends PossiblyHarmful +case class Terminated(@BeanProperty actor: ActorRef)(@BeanProperty stopped: Boolean) abstract class ReceiveTimeout extends PossiblyHarmful diff --git a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala index 7cb622ab00..249c23e968 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala @@ -287,10 +287,9 @@ abstract class RemoteTransport(val system: ExtendedActorSystem, val provider: Re case l: LocalRef ⇒ if (provider.remoteSettings.LogReceive) log.debug("received local message {}", remoteMessage) remoteMessage.payload match { - case _: SystemMessage if useUntrustedMode ⇒ log.warning("operating in UntrustedMode, dropping inbound system message") - case _: AutoReceivedMessage if useUntrustedMode ⇒ log.warning("operating in UntrustedMode, dropping inbound AutoReceivedMessage") - case msg: SystemMessage ⇒ l.sendSystemMessage(msg) - case msg ⇒ l.!(msg)(remoteMessage.sender) + case msg: PossiblyHarmful if useUntrustedMode ⇒ log.warning("operating in UntrustedMode, dropping inbound PossiblyHarmful message of type {}", msg.getClass) + case msg: SystemMessage ⇒ l.sendSystemMessage(msg) + case msg ⇒ l.!(msg)(remoteMessage.sender) } case r: RemoteRef ⇒ if (provider.remoteSettings.LogReceive) log.debug("received remote-destined message {}", remoteMessage) From 6c1ca7fcdbbb4ecc51cfe71d9813f7cca41d37f1 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 30 May 2012 14:26:09 +0200 Subject: [PATCH 226/538] Unborkening the top-level surveillance --- .../src/main/scala/akka/actor/ActorCell.scala | 8 ++++---- .../scala/akka/actor/ActorRefProvider.scala | 20 +++++++------------ 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 4e8a54d7fb..1f5fa72c68 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -462,7 +462,7 @@ private[akka] class ActorCell( override final def watch(subject: ActorRef): ActorRef = subject match { case a: InternalActorRef ⇒ - if (!watching.contains(a)) { + if (a != self && !watching.contains(a)) { a.sendSystemMessage(Watch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ watching += a } @@ -471,7 +471,7 @@ private[akka] class ActorCell( override final def unwatch(subject: ActorRef): ActorRef = subject match { case a: InternalActorRef ⇒ - if (watching.contains(a)) { + if (a != self && watching.contains(a)) { a.sendSystemMessage(Unwatch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ watching -= a } @@ -582,7 +582,7 @@ private[akka] class ActorCell( } } else if (!watcheeSelf && watcherSelf) { watch(watchee) - } else println("addNOOOOOOOOO: " + watchee + " => " + watcher) + } } def remWatcher(watchee: ActorRef, watcher: ActorRef): Unit = { @@ -596,7 +596,7 @@ private[akka] class ActorCell( } } else if (!watcheeSelf && watcherSelf) { unwatch(watchee) - } else println("remNOOOOOOOOO: " + watchee + " => " + watcher) + } } def terminate() { diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index d4e9595f62..3d9563b987 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -474,18 +474,10 @@ class LocalActorRefProvider( lazy val rootGuardian: InternalActorRef = new LocalActorRef(system, guardianProps, theOneWhoWalksTheBubblesOfSpaceTime, rootPath, true) { - object Extra { - def unapply(s: String): Option[InternalActorRef] = extraNames.get(s) - } - override def getParent: InternalActorRef = this - - override def getSingleChild(name: String): InternalActorRef = { - name match { - case "temp" ⇒ tempContainer - case Extra(e) ⇒ e - case _ ⇒ super.getSingleChild(name) - } + override def getSingleChild(name: String): InternalActorRef = name match { + case "temp" ⇒ tempContainer + case other ⇒ extraNames.get(other).getOrElse(super.getSingleChild(other)) } } @@ -510,8 +502,10 @@ class LocalActorRefProvider( def init(_system: ActorSystemImpl) { system = _system // chain death watchers so that killing guardian stops the application - guardian.sendSystemMessage(Watch(systemGuardian, guardian)) - rootGuardian.sendSystemMessage(Watch(rootGuardian, systemGuardian)) + //guardian.sendSystemMessage(Watch(systemGuardian, guardian)) + //rootGuardian.sendSystemMessage(Watch(rootGuardian, systemGuardian)) + guardian.sendSystemMessage(Watch(guardian, systemGuardian)) + rootGuardian.sendSystemMessage(Watch(systemGuardian, rootGuardian)) eventStream.startDefaultLoggers(_system) } From d42109ff5f04a5e266a7b671bd67ef213985aafc Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 30 May 2012 14:43:18 +0200 Subject: [PATCH 227/538] Adding warning logging for erronous settings --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 1f5fa72c68..23cd796ad2 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -582,6 +582,8 @@ private[akka] class ActorCell( } } else if (!watcheeSelf && watcherSelf) { watch(watchee) + } else { + system.eventStream.publish(Warning(self.path.toString, clazz(actor), "BUG: illegal Watch(%s,%s) for %s".format(watchee, watcher, self))) } } @@ -596,6 +598,8 @@ private[akka] class ActorCell( } } else if (!watcheeSelf && watcherSelf) { unwatch(watchee) + } else { + system.eventStream.publish(Warning(self.path.toString, clazz(actor), "BUG: illegal Unwatch(%s,%s) for %s".format(watchee, watcher, self))) } } From c64db4b00d1298708d2d7ad90c1c7be9c42d5bc4 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 30 May 2012 15:37:29 +0200 Subject: [PATCH 228/538] Code formatting --- akka-actor/src/main/scala/akka/pattern/AskSupport.scala | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 47154f7853..3805521ae4 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -283,9 +283,7 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide case null ⇒ // if path was never queried nobody can possibly be watching us, so we don't have to publish termination either if (updateState(null, Stopped)) ensureCompleted() else stop() case p: ActorPath ⇒ - if (updateState(p, StoppedWithPath(p))) { - try ensureCompleted() finally provider.unregisterTempActor(p) - } else stop() + if (updateState(p, StoppedWithPath(p))) { try ensureCompleted() finally provider.unregisterTempActor(p) } else stop() case Stopped | _: StoppedWithPath ⇒ // already stopped case Registering ⇒ stop() // spin until registration is completed before stopping } From cd8e0ab3b1c8a62ca44b0fba6e932dbcb9ca3374 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 30 May 2012 17:16:46 +0200 Subject: [PATCH 229/538] Change to ThreadLocalRandom, see #2123 --- akka-cluster/src/main/scala/akka/cluster/Cluster.scala | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index c21bcf50c2..78bd91c8e1 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -18,7 +18,7 @@ import akka.ConfigurationException import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean } import java.util.concurrent.TimeUnit._ import java.util.concurrent.TimeoutException -import java.security.SecureRandom +import akka.jsr166y.ThreadLocalRandom import java.lang.management.ManagementFactory import javax.management._ @@ -402,7 +402,6 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ private val isRunning = new AtomicBoolean(true) private val log = Logging(system, "Node") - private val random = SecureRandom.getInstance("SHA1PRNG") private val mBeanServer = ManagementFactory.getPlatformMBeanServer private val clusterMBeanName = new ObjectName("akka:type=Cluster") @@ -842,7 +841,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // 2. gossip to unreachable members if (localUnreachableSize > 0) { val probability: Double = localUnreachableSize / (localMembersSize + 1) - if (random.nextDouble() < probability) gossipToRandomNodeOf(localUnreachableMembers.map(_.address)) + if (ThreadLocalRandom.current.nextDouble() < probability) gossipToRandomNodeOf(localUnreachableMembers.map(_.address)) } // 3. gossip to a deputy nodes for facilitating partition healing @@ -851,7 +850,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ if (localMembersSize == 0) gossipToRandomNodeOf(deputies) else { val probability = 1.0 / localMembersSize + localUnreachableSize - if (random.nextDouble() <= probability) gossipToRandomNodeOf(deputies) + if (ThreadLocalRandom.current.nextDouble() <= probability) gossipToRandomNodeOf(deputies) } } } @@ -1052,7 +1051,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ */ private def deputyNodes: Iterable[Address] = state.get.latestGossip.members.toIterable map (_.address) drop 1 take nrOfDeputyNodes filter (_ != remoteAddress) - private def selectRandomNode(addresses: Iterable[Address]): Address = addresses.toSeq(random nextInt addresses.size) + private def selectRandomNode(addresses: Iterable[Address]): Address = addresses.toSeq(ThreadLocalRandom.current nextInt addresses.size) private def isSingletonCluster(currentState: State): Boolean = currentState.latestGossip.members.size == 1 From e469561543c2af1aea1d33ccd77b112d136c3f55 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 30 May 2012 17:17:09 +0200 Subject: [PATCH 230/538] Minor corrections and cleanup --- .../cluster/ClientDowningNodeThatIsUpSpec.scala | 3 ++- ...LeaderDowningNodeThatIsUnreachableSpec.scala | 8 ++++---- .../akka/cluster/MultiNodeClusterSpec.scala | 17 +++++++++-------- .../scala/akka/cluster/NodeMembershipSpec.scala | 4 ++-- 4 files changed, 17 insertions(+), 15 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index ee798d5a8a..6b0bbae22e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -43,7 +43,6 @@ class ClientDowningNodeThatIsUpSpec testConductor.enter("all-up") // mark 'third' node as DOWN - testConductor.removeNode(third) cluster.down(thirdAddress) testConductor.enter("down-third-node") @@ -56,6 +55,8 @@ class ClientDowningNodeThatIsUpSpec cluster.join(node(first).address) awaitUpConvergence(numberOfMembers = 4) testConductor.enter("all-up") + testConductor.enter("down-third-node") + testConductor.enter("await-completion") } runOn(second, fourth) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index cda794fe21..0424c6d399 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -57,7 +57,7 @@ class LeaderDowningNodeThatIsUnreachableSpec // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- - awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(fourthAddress), 30.seconds.dilated) + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(fourthAddress), 30.seconds) testConductor.enter("await-completion") } @@ -77,7 +77,7 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.enter("down-fourth-node") - awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(fourthAddress), 30.seconds.dilated) + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(fourthAddress), 30.seconds) testConductor.enter("await-completion") } } @@ -97,7 +97,7 @@ class LeaderDowningNodeThatIsUnreachableSpec // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- - awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds.dilated) + awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds) testConductor.enter("await-completion") } @@ -108,7 +108,7 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.enter("all-up") } - runOn(second, third) { + runOn(third) { cluster.join(node(first).address) awaitUpConvergence(numberOfMembers = 3) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index ae9d3e9fb7..cb679c12b7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -60,14 +60,15 @@ trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ def awaitUpConvergence( numberOfMembers: Int, canNotBePartOfMemberRing: Seq[Address] = Seq.empty[Address], - timeout: Duration = 10.seconds.dilated): Unit = { - awaitCond(cluster.latestGossip.members.size == numberOfMembers, timeout) - awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up), timeout) - awaitCond(cluster.convergence.isDefined, timeout) - if (!canNotBePartOfMemberRing.isEmpty) // don't run this on an empty set - awaitCond( - canNotBePartOfMemberRing forall (address ⇒ !(cluster.latestGossip.members exists (_.address == address))), - timeout) + timeout: Duration = 20.seconds): Unit = { + within(timeout) { + awaitCond(cluster.latestGossip.members.size == numberOfMembers) + awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up)) + awaitCond(cluster.convergence.isDefined) + if (!canNotBePartOfMemberRing.isEmpty) // don't run this on an empty set + awaitCond( + canNotBePartOfMemberRing forall (address ⇒ !(cluster.latestGossip.members exists (_.address == address)))) + } } def roleOfLeader(nodesInCluster: Seq[RoleName]): RoleName = { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index 312ac6dbe8..fecb53c898 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -37,7 +37,7 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp "A set of connected cluster systems" must { - "(when two systems) start gossiping to each other so that both systems gets the same gossip info" taggedAs LongRunningTest in { + "(when two nodes) start gossiping to each other so that both nodes gets the same gossip info" taggedAs LongRunningTest in { // make sure that the node-to-join is started before other join runOn(first) { @@ -57,7 +57,7 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp } - "(when three systems) start gossiping to each other so that both systems gets the same gossip info" taggedAs LongRunningTest in { + "(when three nodes) start gossiping to each other so that all nodes gets the same gossip info" taggedAs LongRunningTest in { runOn(third) { cluster.join(firstAddress) From 7257dc5d86171d4f922e748a26a209aa29f4db3e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 30 May 2012 17:37:44 +0200 Subject: [PATCH 231/538] Minor code formatting --- .../src/main/scala/akka/actor/ActorCell.scala | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 23cd796ad2..3db70d5735 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -727,10 +727,9 @@ private[akka] class ActorCell( val terminated = Terminated(self)(stopped = true) try { watchedBy foreach { - watcher ⇒ - try watcher.tell(terminated, self) catch { - case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) - } + watcher ⇒ try watcher.tell(terminated, self) catch { + case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) + } } } finally watchedBy = emptyActorRefSet } @@ -738,10 +737,9 @@ private[akka] class ActorCell( if (!watching.isEmpty) { try { watching foreach { - case watchee: InternalActorRef ⇒ - try watchee.sendSystemMessage(Unwatch(watchee, self)) catch { - case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) - } + case watchee: InternalActorRef ⇒ try watchee.sendSystemMessage(Unwatch(watchee, self)) catch { + case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) + } } } finally watching = emptyActorRefSet } From 36385f981b91cc7418567f5da25a55557ab55410 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 30 May 2012 19:32:10 +0200 Subject: [PATCH 232/538] Add missing barrier --- .../scala/akka/cluster/JoinTwoClustersSpec.scala | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index 1017c8a33a..c736018806 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -4,7 +4,6 @@ package akka.cluster -import org.scalatest.BeforeAndAfter import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec @@ -29,15 +28,11 @@ class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec -abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender { import JoinTwoClustersMultiJvmSpec._ override def initialParticipants = 6 - after { - testConductor.enter("after") - } - lazy val a1Address = node(a1).address lazy val b1Address = node(b1).address lazy val c1Address = node(c1).address @@ -67,6 +62,8 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm assertLeader(b1, b2) assertLeader(c1, c2) + testConductor.enter("two-members") + runOn(b2) { cluster.join(a1Address) } @@ -78,6 +75,8 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm assertLeader(a1, a2, b1, b2) assertLeader(c1, c2) + testConductor.enter("four-members") + } "be able to 'elect' a single leader after joining (C -> A + B)" taggedAs LongRunningTest in { @@ -89,6 +88,8 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm awaitUpConvergence(numberOfMembers = 6) assertLeader(a1, a2, b1, b2, c1, c2) + + testConductor.enter("six-members") } } From 90344b657e03cb5576b9959066aa15cc2693aa0f Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 30 May 2012 23:32:22 +0200 Subject: [PATCH 233/538] Removing doc references to onResult and onTimeout --- akka-docs/java/untyped-actors.rst | 2 +- akka-docs/scala/actors.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-docs/java/untyped-actors.rst b/akka-docs/java/untyped-actors.rst index 7df286d7f7..31a0df9674 100644 --- a/akka-docs/java/untyped-actors.rst +++ b/akka-docs/java/untyped-actors.rst @@ -370,7 +370,7 @@ specified as parameter to the ``ask`` method; this will complete the See :ref:`futures-java` for more information on how to await or query a future. -The ``onComplete``, ``onResult``, or ``onTimeout`` methods of the ``Future`` can be +The ``onComplete``, ``onSuccess``, or ``onFailure`` methods of the ``Future`` can be used to register a callback to get a notification when the Future completes. Gives you a way to avoid blocking. diff --git a/akka-docs/scala/actors.rst b/akka-docs/scala/actors.rst index 291d06e567..4a556cf6c2 100644 --- a/akka-docs/scala/actors.rst +++ b/akka-docs/scala/actors.rst @@ -415,7 +415,7 @@ taken from one of the following locations in order of precedence: See :ref:`futures-scala` for more information on how to await or query a future. -The ``onComplete``, ``onResult``, or ``onTimeout`` methods of the ``Future`` can be +The ``onComplete``, ``onSuccess``, or ``onFailure`` methods of the ``Future`` can be used to register a callback to get a notification when the Future completes. Gives you a way to avoid blocking. From 5bc4391e0476eb758259a8a3cb440967898ddd99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Thu, 31 May 2012 14:02:36 +0200 Subject: [PATCH 234/538] Make multi-jvm run all tests even when there are failures see #2154 --- project/AkkaBuild.scala | 15 ++++++++++++--- project/plugins.sbt | 2 +- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 4b8f72e424..d687bff9e8 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -7,11 +7,12 @@ package akka import sbt._ import sbt.Keys._ import com.typesafe.sbtmultijvm.MultiJvmPlugin -import com.typesafe.sbtmultijvm.MultiJvmPlugin.{ MultiJvm, extraOptions, jvmOptions, scalatestOptions, multiNodeTest } +import com.typesafe.sbtmultijvm.MultiJvmPlugin.{ MultiJvm, extraOptions, jvmOptions, scalatestOptions, multiNodeExecuteTests } import com.typesafe.sbtscalariform.ScalariformPlugin import com.typesafe.sbtscalariform.ScalariformPlugin.ScalariformKeys import com.typesafe.sbtosgi.OsgiPlugin.{ OsgiKeys, osgiSettings } import java.lang.Boolean.getBoolean +import sbt.Tests import Sphinx.{ sphinxDocs, sphinxHtml, sphinxLatex, sphinxPdf, sphinxPygments, sphinxTags } object AkkaBuild extends Build { @@ -390,9 +391,17 @@ object AkkaBuild extends Build { compileInputs in MultiJvm <<= (compileInputs in MultiJvm) dependsOn (ScalariformKeys.format in MultiJvm), ScalariformKeys.preferences in MultiJvm := formattingPreferences, if (multiNodeEnabled) - test in Test <<= ((test in Test), (multiNodeTest in MultiJvm)) map { case x => x } + executeTests in Test <<= ((executeTests in Test), (multiNodeExecuteTests in MultiJvm)) map { + case (tr, mr) => + val r = tr._2 ++ mr._2 + (Tests.overall(r.values), r) + } else - test in Test <<= ((test in Test), (test in MultiJvm)) map { case x => x } + executeTests in Test <<= ((executeTests in Test), (executeTests in MultiJvm)) map { + case (tr, mr) => + val r = tr._2 ++ mr._2 + (Tests.overall(r.values), r) + } ) } diff --git a/project/plugins.sbt b/project/plugins.sbt index 754b9eefa2..59f2154537 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -1,7 +1,7 @@ resolvers += Classpaths.typesafeResolver -addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.2.0-M2") +addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.2.0-M3") addSbtPlugin("com.typesafe.sbtscalariform" % "sbtscalariform" % "0.4.0") From dfcdbc52219cce102684e4be4c28a8e4eca6120f Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 31 May 2012 14:38:44 +0200 Subject: [PATCH 235/538] Boy scouting --- .../src/main/scala/akka/cluster/Cluster.scala | 51 ++++++++----------- 1 file changed, 22 insertions(+), 29 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 78bd91c8e1..a26befb875 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -500,7 +500,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks. */ - def shutdown() { + def shutdown(): Unit = { if (isRunning.compareAndSet(true, false)) { log.info("Cluster Node [{}] - Shutting down cluster Node and cluster daemons...", remoteAddress) gossipCanceller.cancel() @@ -519,7 +519,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * Registers a listener to subscribe to cluster membership changes. */ @tailrec - final def registerListener(listener: MembershipChangeListener) { + final def registerListener(listener: MembershipChangeListener): Unit = { val localState = state.get val newListeners = localState.memberMembershipChangeListeners + listener val newState = localState copy (memberMembershipChangeListeners = newListeners) @@ -530,7 +530,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * Unsubscribes to cluster membership changes. */ @tailrec - final def unregisterListener(listener: MembershipChangeListener) { + final def unregisterListener(listener: MembershipChangeListener): Unit = { val localState = state.get val newListeners = localState.memberMembershipChangeListeners - listener val newState = localState copy (memberMembershipChangeListeners = newListeners) @@ -541,7 +541,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * Try to join this cluster node with the node specified by 'address'. * A 'Join(thisNodeAddress)' command is sent to the node to join. */ - def join(address: Address) { + def join(address: Address): Unit = { val connection = clusterCommandConnectionFor(address) val command = ClusterAction.Join(remoteAddress) log.info("Cluster Node [{}] - Trying to send JOIN to [{}] through connection [{}]", remoteAddress, address, connection) @@ -551,21 +551,21 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * Send command to issue state transition to LEAVING for the node specified by 'address'. */ - def leave(address: Address) { + def leave(address: Address): Unit = { clusterCommandDaemon ! ClusterAction.Leave(address) } /** * Send command to issue state transition to from DOWN to EXITING for the node specified by 'address'. */ - def down(address: Address) { + def down(address: Address): Unit = { clusterCommandDaemon ! ClusterAction.Down(address) } /** * Send command to issue state transition to REMOVED for the node specified by 'address'. */ - def remove(address: Address) { + def remove(address: Address): Unit = { clusterCommandDaemon ! ClusterAction.Remove(address) } @@ -578,7 +578,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * New node joining. */ @tailrec - private[cluster] final def joining(node: Address) { + private[cluster] final def joining(node: Address): Unit = { log.info("Cluster Node [{}] - Node [{}] is JOINING", remoteAddress, node) val localState = state.get @@ -611,28 +611,28 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * State transition to UP. */ - private[cluster] final def up(address: Address) { + private[cluster] final def up(address: Address): Unit = { log.info("Cluster Node [{}] - Marking node [{}] as UP", remoteAddress, address) } /** * State transition to LEAVING. */ - private[cluster] final def leaving(address: Address) { + private[cluster] final def leaving(address: Address): Unit = { log.info("Cluster Node [{}] - Marking node [{}] as LEAVING", remoteAddress, address) } /** * State transition to EXITING. */ - private[cluster] final def exiting(address: Address) { + private[cluster] final def exiting(address: Address): Unit = { log.info("Cluster Node [{}] - Marking node [{}] as EXITING", remoteAddress, address) } /** * State transition to REMOVED. */ - private[cluster] final def removing(address: Address) { + private[cluster] final def removing(address: Address): Unit = { log.info("Cluster Node [{}] - Marking node [{}] as REMOVED", remoteAddress, address) } @@ -644,7 +644,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * to this node and it will then go through the normal JOINING procedure. */ @tailrec - final private[cluster] def downing(address: Address) { + final private[cluster] def downing(address: Address): Unit = { val localState = state.get val localGossip = localState.latestGossip val localMembers = localGossip.members @@ -705,7 +705,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * Receive new gossip. */ @tailrec - final private[cluster] def receive(sender: Member, remoteGossip: Gossip) { + final private[cluster] def receive(sender: Member, remoteGossip: Gossip): Unit = { val localState = state.get val localGossip = localState.latestGossip @@ -746,14 +746,9 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ } /** - * Joins the pre-configured contact point and retrieves current gossip state. + * Joins the pre-configured contact point. */ - private def autoJoin() = nodeToJoin foreach { address ⇒ - val connection = clusterCommandConnectionFor(address) - val command = ClusterAction.Join(remoteAddress) - log.info("Cluster Node [{}] - Sending [{}] to [{}] through connection [{}]", remoteAddress, command, address, connection) - connection ! command - } + private def autoJoin(): Unit = nodeToJoin foreach join /** * Switches the member status. @@ -793,7 +788,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * Gossips latest gossip to an address. */ - private def gossipTo(address: Address) { + private def gossipTo(address: Address): Unit = { val connection = clusterGossipConnectionFor(address) log.debug("Cluster Node [{}] - Gossiping to [{}]", remoteAddress, connection) connection ! GossipEnvelope(self, latestGossip) @@ -818,10 +813,8 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * Initates a new round of gossip. */ - private def gossip() { + private def gossip(): Unit = { val localState = state.get - val localGossip = localState.latestGossip - val localMembers = localGossip.members if (!isSingletonCluster(localState) && isAvailable(localState)) { // only gossip if we are a non-singleton cluster and available @@ -860,7 +853,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * Reaps the unreachable members (moves them to the 'unreachable' list in the cluster overview) according to the failure detector's verdict. */ @tailrec - final private def reapUnreachableMembers() { + final private def reapUnreachableMembers(): Unit = { val localState = state.get if (!isSingletonCluster(localState) && isAvailable(localState)) { @@ -905,7 +898,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * Runs periodic leader actions, such as auto-downing unreachable nodes, assigning partitions etc. */ @tailrec - final private def leaderActions() { + final private def leaderActions(): Unit = { val localState = state.get val localGossip = localState.latestGossip val localMembers = localGossip.members @@ -917,7 +910,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val localOverview = localGossip.overview val localSeen = localOverview.seen - val localUnreachableMembers = localGossip.overview.unreachable + val localUnreachableMembers = localOverview.unreachable // Leader actions are as follows: // 1. Move JOINING => UP @@ -986,7 +979,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ if (!state.compareAndSet(localState, newState)) leaderActions() // recur else { if (convergence(newState.latestGossip).isDefined) { - newState.memberMembershipChangeListeners map { _ notify newGossip.members } + newState.memberMembershipChangeListeners foreach { _ notify newGossip.members } } } } From 9d1cbdc60112ea74b328f73e8665af9cba3ae7a0 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 31 May 2012 14:47:43 +0200 Subject: [PATCH 236/538] Singleton cluster convergence, see #2117 --- .../src/main/scala/akka/cluster/Cluster.scala | 7 +- .../scala/akka/cluster/NodeShutdownSpec.scala | 69 +++++++++++++++++++ .../scala/akka/cluster/NodeStartupSpec.scala | 14 +--- akka-docs/cluster/cluster.rst | 10 --- 4 files changed, 75 insertions(+), 25 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index a26befb875..fb3c45bec8 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -816,9 +816,12 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ private def gossip(): Unit = { val localState = state.get - if (!isSingletonCluster(localState) && isAvailable(localState)) { - // only gossip if we are a non-singleton cluster and available + if (isSingletonCluster(localState)) { + // gossip to myself + // TODO could perhaps be optimized, no need to gossip to myself when Up? + gossipTo(remoteAddress) + } else if (isAvailable(localState)) { log.debug("Cluster Node [{}] - Initiating new round of gossip", remoteAddress) val localGossip = localState.latestGossip diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala new file mode 100644 index 0000000000..61a9c08ceb --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -0,0 +1,69 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object NodeShutdownMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + + commonConfig(debugConfig(on = false). + withFallback(ConfigFactory.parseString(""" + akka.cluster { + auto-down = on + failure-detector.threshold = 4 + } + """)). + withFallback(MultiNodeClusterSpec.clusterConfig)) + +} + +class NodeShutdownMultiJvmNode1 extends NodeShutdownSpec +class NodeShutdownMultiJvmNode2 extends NodeShutdownSpec + +abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + import NodeShutdownMultiJvmSpec._ + + override def initialParticipants = 2 + + after { + testConductor.enter("after") + } + + "A cluster of 2 nodes" must { + + "not be singleton cluster when joined" taggedAs LongRunningTest in { + // make sure that the node-to-join is started before other join + runOn(first) { + cluster.self + } + testConductor.enter("first-started") + + runOn(second) { + cluster.join(node(first).address) + } + awaitUpConvergence(numberOfMembers = 2) + cluster.isSingletonCluster must be(false) + } + + "become singleton cluster when one node is shutdown" in { + runOn(first) { + val secondAddress = node(second).address + testConductor.shutdown(first, 0) + testConductor.removeNode(first) + awaitUpConvergence(numberOfMembers = 1, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds) + cluster.isSingletonCluster must be(true) + cluster.isLeader must be(true) + } + + } + } + +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index fcbcce746f..b198d1d72d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -37,19 +37,7 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi "be a singleton cluster when started up" taggedAs LongRunningTest in { runOn(first) { awaitCond(cluster.isSingletonCluster) - // FIXME #2117 singletonCluster should reach convergence - //awaitCond(cluster.convergence.isDefined) - } - } - - "be in 'Joining' phase when started up" taggedAs LongRunningTest in { - runOn(first) { - val members = cluster.latestGossip.members - members.size must be(1) - - val joiningMember = members find (_.address == firstAddress) - joiningMember must not be (None) - joiningMember.get.status must be(MemberStatus.Joining) + awaitUpConvergence(numberOfMembers = 1) } } } diff --git a/akka-docs/cluster/cluster.rst b/akka-docs/cluster/cluster.rst index 231830cecb..a0aca11114 100644 --- a/akka-docs/cluster/cluster.rst +++ b/akka-docs/cluster/cluster.rst @@ -81,16 +81,6 @@ can later explicitly send a ``Join`` message to another node to form a N-node cluster. It is also possible to link multiple N-node clusters by ``joining`` them. -Singleton Cluster ------------------ - -If a node does not have a preconfigured contact point to join in the Akka -configuration, then it is considered a singleton cluster (single node cluster) -and will automatically transition from ``joining`` to ``up``. Singleton clusters -can later explicitly send a ``Join`` message to another node to form a N-node -cluster. It is also possible to link multiple N-node clusters by ``joining`` them. - - Gossip ------ From 13a93dfb50c4443adc7521959bdb30e3207cc6e1 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 31 May 2012 17:19:49 +0200 Subject: [PATCH 237/538] Fix regression IllegalStateException: Can't find this Member, see #2117 * Gossiping to myself caused the failure detector to mark myself as unreachable * Don't heartbeat for myself failure detector --- .../src/main/scala/akka/cluster/Cluster.scala | 103 +++++++++--------- .../scala/akka/cluster/NodeStartupSpec.scala | 1 + 2 files changed, 53 insertions(+), 51 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index fb3c45bec8..98d0a3f11e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -203,7 +203,7 @@ case class Gossip( } /** - * Marks the gossip as seen by this node (remoteAddress) by updating the address entry in the 'gossip.overview.seen' + * Marks the gossip as seen by this node (selfAddress) by updating the address entry in the 'gossip.overview.seen' * Map with the VectorClock for the new gossip. */ def seen(address: Address): Gossip = { @@ -380,11 +380,11 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val remoteSettings = new RemoteSettings(system.settings.config, system.name) val clusterSettings = new ClusterSettings(system.settings.config, system.name) - val remoteAddress = remote.transport.address + val selfAddress = remote.transport.address val failureDetector = new AccrualFailureDetector( - system, remoteAddress, clusterSettings.FailureDetectorThreshold, clusterSettings.FailureDetectorMaxSampleSize) + system, selfAddress, clusterSettings.FailureDetectorThreshold, clusterSettings.FailureDetectorMaxSampleSize) - private val vclockNode = VectorClock.Node(remoteAddress.toString) + private val vclockNode = VectorClock.Node(selfAddress.toString) private val periodicTasksInitialDelay = clusterSettings.PeriodicTasksInitialDelay private val gossipFrequency = clusterSettings.GossipFrequency @@ -396,7 +396,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ private val autoDown = clusterSettings.AutoDown private val nrOfDeputyNodes = clusterSettings.NrOfDeputyNodes private val nrOfGossipDaemons = clusterSettings.NrOfGossipDaemons - private val nodeToJoin: Option[Address] = clusterSettings.NodeToJoin filter (_ != remoteAddress) + private val nodeToJoin: Option[Address] = clusterSettings.NodeToJoin filter (_ != selfAddress) private val serialization = remote.serialization @@ -406,7 +406,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ private val mBeanServer = ManagementFactory.getPlatformMBeanServer private val clusterMBeanName = new ObjectName("akka:type=Cluster") - log.info("Cluster Node [{}] - is starting up...", remoteAddress) + log.info("Cluster Node [{}] - is starting up...", selfAddress) // create superisor for daemons under path "/system/cluster" private val clusterDaemons = { @@ -418,7 +418,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ } private val state = { - val member = Member(remoteAddress, MemberStatus.Joining) + val member = Member(selfAddress, MemberStatus.Joining) val gossip = Gossip(members = SortedSet.empty[Member] + member) + vclockNode // add me as member and update my vector clock new AtomicReference[State](State(gossip)) } @@ -447,15 +447,15 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ createMBean() - log.info("Cluster Node [{}] - has started up successfully", remoteAddress) + log.info("Cluster Node [{}] - has started up successfully", selfAddress) // ====================================================== // ===================== PUBLIC API ===================== // ====================================================== def self: Member = latestGossip.members - .find(_.address == remoteAddress) - .getOrElse(throw new IllegalStateException("Can't find 'this' Member (" + remoteAddress + ") in the cluster membership ring")) + .find(_.address == selfAddress) + .getOrElse(throw new IllegalStateException("Can't find 'this' Member (" + selfAddress + ") in the cluster membership ring")) /** * Latest gossip. @@ -472,7 +472,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ */ def isLeader: Boolean = { val members = latestGossip.members - !members.isEmpty && (remoteAddress == members.head.address) + members.nonEmpty && (selfAddress == members.head.address) } /** @@ -502,7 +502,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ */ def shutdown(): Unit = { if (isRunning.compareAndSet(true, false)) { - log.info("Cluster Node [{}] - Shutting down cluster Node and cluster daemons...", remoteAddress) + log.info("Cluster Node [{}] - Shutting down cluster Node and cluster daemons...", selfAddress) gossipCanceller.cancel() failureDetectorReaperCanceller.cancel() leaderActionsCanceller.cancel() @@ -543,8 +543,8 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ */ def join(address: Address): Unit = { val connection = clusterCommandConnectionFor(address) - val command = ClusterAction.Join(remoteAddress) - log.info("Cluster Node [{}] - Trying to send JOIN to [{}] through connection [{}]", remoteAddress, address, connection) + val command = ClusterAction.Join(selfAddress) + log.info("Cluster Node [{}] - Trying to send JOIN to [{}] through connection [{}]", selfAddress, address, connection) connection ! command } @@ -579,7 +579,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ */ @tailrec private[cluster] final def joining(node: Address): Unit = { - log.info("Cluster Node [{}] - Node [{}] is JOINING", remoteAddress, node) + log.info("Cluster Node [{}] - Node [{}] is JOINING", selfAddress, node) val localState = state.get val localGossip = localState.latestGossip @@ -595,13 +595,14 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val newGossip = localGossip copy (overview = newOverview, members = newMembers) val versionedGossip = newGossip + vclockNode - val seenVersionedGossip = versionedGossip seen remoteAddress + val seenVersionedGossip = versionedGossip seen selfAddress val newState = localState copy (latestGossip = seenVersionedGossip) if (!state.compareAndSet(localState, newState)) joining(node) // recur if we failed update else { - failureDetector heartbeat node // update heartbeat in failure detector + if (node != selfAddress) failureDetector heartbeat node + if (convergence(newState.latestGossip).isDefined) { newState.memberMembershipChangeListeners foreach { _ notify newMembers } } @@ -612,28 +613,28 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * State transition to UP. */ private[cluster] final def up(address: Address): Unit = { - log.info("Cluster Node [{}] - Marking node [{}] as UP", remoteAddress, address) + log.info("Cluster Node [{}] - Marking node [{}] as UP", selfAddress, address) } /** * State transition to LEAVING. */ private[cluster] final def leaving(address: Address): Unit = { - log.info("Cluster Node [{}] - Marking node [{}] as LEAVING", remoteAddress, address) + log.info("Cluster Node [{}] - Marking node [{}] as LEAVING", selfAddress, address) } /** * State transition to EXITING. */ private[cluster] final def exiting(address: Address): Unit = { - log.info("Cluster Node [{}] - Marking node [{}] as EXITING", remoteAddress, address) + log.info("Cluster Node [{}] - Marking node [{}] as EXITING", selfAddress, address) } /** * State transition to REMOVED. */ private[cluster] final def removing(address: Address): Unit = { - log.info("Cluster Node [{}] - Marking node [{}] as REMOVED", remoteAddress, address) + log.info("Cluster Node [{}] - Marking node [{}] as REMOVED", selfAddress, address) } /** @@ -658,7 +659,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ localMembers .map { member ⇒ if (member.address == address) { - log.info("Cluster Node [{}] - Marking node [{}] as DOWN", remoteAddress, member.address) + log.info("Cluster Node [{}] - Marking node [{}] as DOWN", selfAddress, member.address) val newMember = member copy (status = MemberStatus.Down) downedMember = Some(newMember) newMember @@ -672,7 +673,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ .filter(_.status != MemberStatus.Down) // no need to DOWN members already DOWN .map { member ⇒ if (member.address == address) { - log.info("Cluster Node [{}] - Marking unreachable node [{}] as DOWN", remoteAddress, member.address) + log.info("Cluster Node [{}] - Marking unreachable node [{}] as DOWN", selfAddress, member.address) member copy (status = MemberStatus.Down) } else member } @@ -691,7 +692,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachablePlusNewlyDownedMembers) // update gossip overview val newGossip = localGossip copy (overview = newOverview, members = newMembers) // update gossip val versionedGossip = newGossip + vclockNode - val newState = localState copy (latestGossip = versionedGossip seen remoteAddress) + val newState = localState copy (latestGossip = versionedGossip seen selfAddress) if (!state.compareAndSet(localState, newState)) downing(address) // recur if we fail the update else { @@ -730,14 +731,14 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ remoteGossip } - val newState = localState copy (latestGossip = winningGossip seen remoteAddress) + val newState = localState copy (latestGossip = winningGossip seen selfAddress) // if we won the race then update else try again if (!state.compareAndSet(localState, newState)) receive(sender, remoteGossip) // recur if we fail the update else { - log.debug("Cluster Node [{}] - Receiving gossip from [{}]", remoteAddress, sender.address) + log.debug("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, sender.address) - failureDetector heartbeat sender.address // update heartbeat in failure detector + if (sender.address != selfAddress) failureDetector heartbeat sender.address if (convergence(newState.latestGossip).isDefined) { newState.memberMembershipChangeListeners foreach { _ notify newState.latestGossip.members } @@ -758,7 +759,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * @return the updated new state with the new member status */ private def switchMemberStatusTo(newStatus: MemberStatus, state: State): State = { - log.info("Cluster Node [{}] - Switching membership status to [{}]", remoteAddress, newStatus) + log.info("Cluster Node [{}] - Switching membership status to [{}]", selfAddress, newStatus) val localSelf = self @@ -770,7 +771,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // change my state in 'gossip.members' val newMembersSet = localMembers map { member ⇒ - if (member.address == remoteAddress) newSelf + if (member.address == selfAddress) newSelf else member } @@ -780,7 +781,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // version my changes val versionedGossip = newGossip + vclockNode - val seenVersionedGossip = versionedGossip seen remoteAddress + val seenVersionedGossip = versionedGossip seen selfAddress state copy (latestGossip = seenVersionedGossip) } @@ -790,7 +791,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ */ private def gossipTo(address: Address): Unit = { val connection = clusterGossipConnectionFor(address) - log.debug("Cluster Node [{}] - Gossiping to [{}]", remoteAddress, connection) + log.debug("Cluster Node [{}] - Gossiping to [{}]", selfAddress, connection) connection ! GossipEnvelope(self, latestGossip) } @@ -800,10 +801,10 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * @return 'true' if it gossiped to a "deputy" member. */ private def gossipToRandomNodeOf(addresses: Iterable[Address]): Boolean = { - log.debug("Cluster Node [{}] - Selecting random node to gossip to [{}]", remoteAddress, addresses.mkString(", ")) + log.debug("Cluster Node [{}] - Selecting random node to gossip to [{}]", selfAddress, addresses.mkString(", ")) if (addresses.isEmpty) false else { - val peers = addresses filter (_ != remoteAddress) // filter out myself + val peers = addresses filter (_ != selfAddress) // filter out myself val peer = selectRandomNode(peers) gossipTo(peer) deputyNodes exists (peer == _) @@ -819,10 +820,10 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ if (isSingletonCluster(localState)) { // gossip to myself // TODO could perhaps be optimized, no need to gossip to myself when Up? - gossipTo(remoteAddress) + gossipTo(selfAddress) } else if (isAvailable(localState)) { - log.debug("Cluster Node [{}] - Initiating new round of gossip", remoteAddress) + log.debug("Cluster Node [{}] - Initiating new round of gossip", selfAddress) val localGossip = localState.latestGossip val localMembers = localGossip.members @@ -842,7 +843,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // 3. gossip to a deputy nodes for facilitating partition healing val deputies = deputyNodes - if ((!gossipedToDeputy || localMembersSize < 1) && !deputies.isEmpty) { + if ((!gossipedToDeputy || localMembersSize < 1) && deputies.nonEmpty) { if (localMembersSize == 0) gossipToRandomNodeOf(deputies) else { val probability = 1.0 / localMembersSize + localUnreachableSize @@ -870,7 +871,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val newlyDetectedUnreachableMembers = localMembers filterNot { member ⇒ failureDetector.isAvailable(member.address) } - if (!newlyDetectedUnreachableMembers.isEmpty) { // we have newly detected members marked as unavailable + if (newlyDetectedUnreachableMembers.nonEmpty) { // we have newly detected members marked as unavailable val newMembers = localMembers diff newlyDetectedUnreachableMembers val newUnreachableMembers: Set[Member] = localUnreachableMembers ++ newlyDetectedUnreachableMembers @@ -880,14 +881,14 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // updating vclock and 'seen' table val versionedGossip = newGossip + vclockNode - val seenVersionedGossip = versionedGossip seen remoteAddress + val seenVersionedGossip = versionedGossip seen selfAddress val newState = localState copy (latestGossip = seenVersionedGossip) // if we won the race then update else try again if (!state.compareAndSet(localState, newState)) reapUnreachableMembers() // recur else { - log.info("Cluster Node [{}] - Marking node(s) as UNREACHABLE [{}]", remoteAddress, newlyDetectedUnreachableMembers.mkString(", ")) + log.info("Cluster Node [{}] - Marking node(s) as UNREACHABLE [{}]", selfAddress, newlyDetectedUnreachableMembers.mkString(", ")) if (convergence(newState.latestGossip).isDefined) { newState.memberMembershipChangeListeners foreach { _ notify newMembers } @@ -906,7 +907,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val localGossip = localState.latestGossip val localMembers = localGossip.members - val isLeader = !localMembers.isEmpty && (remoteAddress == localMembers.head.address) + val isLeader = localMembers.nonEmpty && (selfAddress == localMembers.head.address) if (isLeader && isAvailable(localState)) { // only run the leader actions if we are the LEADER and available @@ -932,14 +933,14 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ localMembers map { member ⇒ // 1. Move JOINING => UP if (member.status == MemberStatus.Joining) { - log.info("Cluster Node [{}] - Leader is moving node [{}] from JOINING to UP", remoteAddress, member.address) + log.info("Cluster Node [{}] - Leader is moving node [{}] from JOINING to UP", selfAddress, member.address) hasChangedState = true member copy (status = MemberStatus.Up) } else member } map { member ⇒ // 2. Move EXITING => REMOVED if (member.status == MemberStatus.Exiting) { - log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED", remoteAddress, member.address) + log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED", selfAddress, member.address) hasChangedState = true member copy (status = MemberStatus.Removed) } else member @@ -955,7 +956,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ localUnreachableMembers .filter(_.status != MemberStatus.Down) // no need to DOWN members already DOWN .map { member ⇒ - log.info("Cluster Node [{}] - Leader is marking unreachable node [{}] as DOWN", remoteAddress, member.address) + log.info("Cluster Node [{}] - Leader is marking unreachable node [{}] as DOWN", selfAddress, member.address) hasChangedState = true member copy (status = MemberStatus.Down) } @@ -974,7 +975,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val versionedGossip = newGossip + vclockNode // 5. Updating the 'seen' table - val seenVersionedGossip = versionedGossip seen remoteAddress + val seenVersionedGossip = versionedGossip seen selfAddress val newState = localState copy (latestGossip = seenVersionedGossip) @@ -1009,7 +1010,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val views = Set.empty[VectorClock] ++ seen.values if (views.size == 1) { - log.debug("Cluster Node [{}] - Cluster convergence reached", remoteAddress) + log.debug("Cluster Node [{}] - Cluster convergence reached", selfAddress) Some(gossip) } else None } else None @@ -1022,7 +1023,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val localOverview = localGossip.overview val localMembers = localGossip.members val localUnreachableMembers = localOverview.unreachable - val isUnreachable = localUnreachableMembers exists { _.address == remoteAddress } + val isUnreachable = localUnreachableMembers exists { _.address == selfAddress } val hasUnavailableMemberStatus = localMembers exists { m ⇒ (m == self) && MemberStatus.isUnavailable(m.status) } isUnreachable || hasUnavailableMemberStatus } @@ -1030,7 +1031,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * Looks up and returns the local cluster command connection. */ - private def clusterCommandDaemon = system.actorFor(RootActorPath(remoteAddress) / "system" / "cluster" / "commands") + private def clusterCommandDaemon = system.actorFor(RootActorPath(selfAddress) / "system" / "cluster" / "commands") /** * Looks up and returns the remote cluster command connection for the specific address. @@ -1045,7 +1046,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * Gets an Iterable with the addresses of a all the 'deputy' nodes - excluding this node if part of the group. */ - private def deputyNodes: Iterable[Address] = state.get.latestGossip.members.toIterable map (_.address) drop 1 take nrOfDeputyNodes filter (_ != remoteAddress) + private def deputyNodes: Iterable[Address] = state.get.latestGossip.members.toIterable map (_.address) drop 1 take nrOfDeputyNodes filter (_ != selfAddress) private def selectRandomNode(addresses: Iterable[Address]): Address = addresses.toSeq(ThreadLocalRandom.current nextInt addresses.size) @@ -1074,8 +1075,8 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val unreachable = gossip.overview.unreachable val metaData = gossip.meta "\nMembers:\n\t" + gossip.members.mkString("\n\t") + - { if (!unreachable.isEmpty) "\nUnreachable:\n\t" + unreachable.mkString("\n\t") else "" } + - { if (!metaData.isEmpty) "\nMeta Data:\t" + metaData.toString else "" } + { if (unreachable.nonEmpty) "\nUnreachable:\n\t" + unreachable.mkString("\n\t") else "" } + + { if (metaData.nonEmpty) "\nMeta Data:\t" + metaData.toString else "" } } def getMemberStatus: String = clusterNode.status.toString @@ -1100,7 +1101,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ def shutdown() = clusterNode.shutdown() } - log.info("Cluster Node [{}] - registering cluster JMX MBean [{}]", remoteAddress, clusterMBeanName) + log.info("Cluster Node [{}] - registering cluster JMX MBean [{}]", selfAddress, clusterMBeanName) try { mBeanServer.registerMBean(mbean, clusterMBeanName) } catch { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index b198d1d72d..44682b81f7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -38,6 +38,7 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi runOn(first) { awaitCond(cluster.isSingletonCluster) awaitUpConvergence(numberOfMembers = 1) + cluster.isLeader must be(true) } } } From 5578aef13acee4d7f075e9d65828a4c0810f6efd Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 31 May 2012 17:39:35 +0200 Subject: [PATCH 238/538] Changed loglevel to INFO for debugConfig(false) --- .../src/test/scala/akka/remote/testkit/MultiNodeSpec.scala | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 1745d15b61..35a9cc14e7 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -44,7 +44,7 @@ abstract class MultiNodeConfig { /** * Include for verbose debug logging - * @param on when `true` debug Config is returned, otherwise empty Config + * @param on when `true` debug Config is returned, otherwise config with info logging */ def debugConfig(on: Boolean): Config = if (on) @@ -59,7 +59,8 @@ abstract class MultiNodeConfig { fsm = on } """) - else ConfigFactory.empty + else + ConfigFactory.parseString("akka.loglevel = INFO") /** * Construct a RoleName and return it, to be used as an identifier in the From 27dbe85a70a4cd6da131cc6911fdc78d7b17f117 Mon Sep 17 00:00:00 2001 From: viktorklang Date: Thu, 31 May 2012 21:14:19 +0300 Subject: [PATCH 239/538] Correcting typo --- akka-docs/java/typed-actors.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-docs/java/typed-actors.rst b/akka-docs/java/typed-actors.rst index 90bdc5616c..6ad870b309 100644 --- a/akka-docs/java/typed-actors.rst +++ b/akka-docs/java/typed-actors.rst @@ -99,7 +99,7 @@ Methods returning: * ``void`` will be dispatched with ``fire-and-forget`` semantics, exactly like ``ActorRef.tell`` * ``akka.dispatch.Future`` will use ``send-request-reply`` semantics, exactly like ``ActorRef.ask`` * ``scala.Option`` or ``akka.japi.Option`` will use ``send-request-reply`` semantics, but *will* block to wait for an answer, - and return None if no answer was produced within the timout, or scala.Some/akka.japi.Some containing the result otherwise. + and return None if no answer was produced within the timeout, or scala.Some/akka.japi.Some containing the result otherwise. Any exception that was thrown during this call will be rethrown. * Any other type of value will use ``send-request-reply`` semantics, but *will* block to wait for an answer, throwing ``java.util.concurrent.TimeoutException`` if there was a timeout or rethrow any exception that was thrown during this call. From 346f8be047e647505c5fc40735c37e2efa68663a Mon Sep 17 00:00:00 2001 From: viktorklang Date: Thu, 31 May 2012 21:14:43 +0300 Subject: [PATCH 240/538] Correcting typo --- akka-docs/scala/typed-actors.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-docs/scala/typed-actors.rst b/akka-docs/scala/typed-actors.rst index 349b574888..bd7d92f924 100644 --- a/akka-docs/scala/typed-actors.rst +++ b/akka-docs/scala/typed-actors.rst @@ -99,7 +99,7 @@ Methods returning: * ``Unit`` will be dispatched with ``fire-and-forget`` semantics, exactly like ``ActorRef.tell`` * ``akka.dispatch.Future[_]`` will use ``send-request-reply`` semantics, exactly like ``ActorRef.ask`` * ``scala.Option[_]`` or ``akka.japi.Option`` will use ``send-request-reply`` semantics, but *will* block to wait for an answer, - and return None if no answer was produced within the timout, or scala.Some/akka.japi.Some containing the result otherwise. + and return None if no answer was produced within the timeout, or scala.Some/akka.japi.Some containing the result otherwise. Any exception that was thrown during this call will be rethrown. * Any other type of value will use ``send-request-reply`` semantics, but *will* block to wait for an answer, throwing ``java.util.concurrent.TimeoutException`` if there was a timeout or rethrow any exception that was thrown during this call. From fca5b9afbdfbdce957b18c243ce8be2371f7704a Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 31 May 2012 21:58:24 +0200 Subject: [PATCH 241/538] #2166 - Adding link to Kryo serialization by Roman Levenstein --- akka-docs/java/serialization.rst | 3 +++ akka-docs/scala/serialization.rst | 3 +++ 2 files changed, 6 insertions(+) diff --git a/akka-docs/java/serialization.rst b/akka-docs/java/serialization.rst index d9aff609d8..b973a1d18c 100644 --- a/akka-docs/java/serialization.rst +++ b/akka-docs/java/serialization.rst @@ -185,3 +185,6 @@ External Akka Serializers `Akka-quickser by Roman Levenstein `_ + + +`Akka-kryo by Roman Levenstein `_ diff --git a/akka-docs/scala/serialization.rst b/akka-docs/scala/serialization.rst index 404847affc..10283b441f 100644 --- a/akka-docs/scala/serialization.rst +++ b/akka-docs/scala/serialization.rst @@ -192,3 +192,6 @@ External Akka Serializers `Akka-quickser by Roman Levenstein `_ + + +`Akka-kryo by Roman Levenstein `_ From 85c263e07782feb5c77e1be34adfdeff4ce1c5dd Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 31 May 2012 22:17:34 +0200 Subject: [PATCH 242/538] Add missing long-running tag --- .../src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index 61a9c08ceb..10d2daeeac 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -53,7 +53,7 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) cluster.isSingletonCluster must be(false) } - "become singleton cluster when one node is shutdown" in { + "become singleton cluster when one node is shutdown" taggedAs LongRunningTest in { runOn(first) { val secondAddress = node(second).address testConductor.shutdown(first, 0) From 205b8ee7c13ad570429492081d6277e7c0882f69 Mon Sep 17 00:00:00 2001 From: Gert Vanthienen Date: Thu, 31 May 2012 22:53:15 +0200 Subject: [PATCH 243/538] Initial stab at a Blueprint namespace handler --- .../additional/code/osgi/Activator.scala | 16 +++ akka-docs/additional/code/osgi/blueprint.xml | 10 ++ akka-docs/additional/osgi.rst | 17 +++ .../blueprint/akka-namespacehandler.xml | 15 +++ .../akka/osgi/blueprint/aries/akka.xsd | 67 ++++++++++++ .../akka/osgi/ActorSystemActivator.scala | 39 +------ .../akka/osgi/OsgiActorSystemFactory.scala | 56 ++++++++++ .../BlueprintActorSystemFactory.scala | 34 ++++++ .../blueprint/aries/NamespaceHandler.scala | 100 ++++++++++++++++++ .../impl/BundleDelegatingClassLoader.scala | 74 +++++++++++++ .../akka/osgi/blueprint/aries/simple.xml | 9 ++ .../akka/osgi/ActorSystemActivatorTest.scala | 3 - project/AkkaBuild.scala | 4 +- 13 files changed, 406 insertions(+), 38 deletions(-) create mode 100644 akka-docs/additional/code/osgi/Activator.scala create mode 100644 akka-docs/additional/code/osgi/blueprint.xml create mode 100644 akka-osgi/src/main/resources/OSGI-INF/blueprint/akka-namespacehandler.xml create mode 100644 akka-osgi/src/main/resources/akka/osgi/blueprint/aries/akka.xsd create mode 100644 akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala create mode 100644 akka-osgi/src/main/scala/akka/osgi/blueprint/BlueprintActorSystemFactory.scala create mode 100644 akka-osgi/src/main/scala/akka/osgi/blueprint/aries/NamespaceHandler.scala create mode 100644 akka-osgi/src/main/scala/akka/osgi/impl/BundleDelegatingClassLoader.scala create mode 100644 akka-osgi/src/test/resources/akka/osgi/blueprint/aries/simple.xml diff --git a/akka-docs/additional/code/osgi/Activator.scala b/akka-docs/additional/code/osgi/Activator.scala new file mode 100644 index 0000000000..0e3a5c82ee --- /dev/null +++ b/akka-docs/additional/code/osgi/Activator.scala @@ -0,0 +1,16 @@ +import akka.actor.{Props, ActorSystem} +import akka.osgi.ActorSystemActivator +import org.apache.servicemix.examples.akka.Listener +import org.apache.servicemix.examples.akka.Master + +//#Activator +class Activator extends ActorSystemActivator("PiSystem") { + + def configure(system: ActorSystem) { + val listener = system.actorOf(Props[Listener], name = "listener") + val master = system.actorOf(Props(new Master(4, 10000, 10000, listener)), name = "master") + master ! Calculate + } + +} +//#Activator \ No newline at end of file diff --git a/akka-docs/additional/code/osgi/blueprint.xml b/akka-docs/additional/code/osgi/blueprint.xml new file mode 100644 index 0000000000..f817da85b0 --- /dev/null +++ b/akka-docs/additional/code/osgi/blueprint.xml @@ -0,0 +1,10 @@ + + + + + + + + + diff --git a/akka-docs/additional/osgi.rst b/akka-docs/additional/osgi.rst index aea554ef9c..3bedc8c7dd 100644 --- a/akka-docs/additional/osgi.rst +++ b/akka-docs/additional/osgi.rst @@ -8,3 +8,20 @@ To use Akka in an OSGi environment, the ``org.osgi.framework.bootdelegation`` property must be set to always delegate the ``sun.misc`` package to the boot classloader instead of resolving it through the normal OSGi class space. + +Activator +--------- + +To bootstrap Akka inside an OSGi environment, you can use the akka.osgi.AkkaSystemActivator class +to conveniently set up the ActorSystem. + +.. includecode:: code/osgi/Activator.scala#Activator + + +Blueprint +--------- + +For the Apache Aries Blueprint implementation, there's also a namespace handler available. The namespace URI +is http://akka.io/xmlns/blueprint/v1.0.0 and it can be used to set up an ActorSystem. + +.. includecode:: code/osgi/blueprint.xml diff --git a/akka-osgi/src/main/resources/OSGI-INF/blueprint/akka-namespacehandler.xml b/akka-osgi/src/main/resources/OSGI-INF/blueprint/akka-namespacehandler.xml new file mode 100644 index 0000000000..650738b10a --- /dev/null +++ b/akka-osgi/src/main/resources/OSGI-INF/blueprint/akka-namespacehandler.xml @@ -0,0 +1,15 @@ + + + + + + + + http://akka.io/xmlns/blueprint/v1.0.0 + + + + + + + diff --git a/akka-osgi/src/main/resources/akka/osgi/blueprint/aries/akka.xsd b/akka-osgi/src/main/resources/akka/osgi/blueprint/aries/akka.xsd new file mode 100644 index 0000000000..256dff22e9 --- /dev/null +++ b/akka-osgi/src/main/resources/akka/osgi/blueprint/aries/akka.xsd @@ -0,0 +1,67 @@ + + + + + + + + + + + + Defines the configuration elements for setting up Akka with Blueprint + + + + + + + + Defines an Akka ActorSystem + + + + + + + + + + + + + + + + Defines an Akka Actor + + + + + + + + + diff --git a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala index d63404334a..c6d24e8262 100644 --- a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala +++ b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala @@ -9,7 +9,10 @@ import java.util.Properties * Abstract {@link BundleActivator} implementation to bootstrap and configure an {@link ActorSystem} in an * OSGi environment. */ -abstract class ActorSystemActivator extends BundleActivator { +abstract class ActorSystemActivator(nameFor: (BundleContext) ⇒ String) extends BundleActivator { + + def this() = this({ context: BundleContext ⇒ null }) + def this(name: String) = this({ context: BundleContext ⇒ name }) var system: ActorSystem = null @@ -26,12 +29,8 @@ abstract class ActorSystemActivator extends BundleActivator { * @param context the BundleContext */ def start(context: BundleContext) { - system = createActorSystem(context) + system = OsgiActorSystemFactory(context).createActorSystem(nameFor(context)) configure(system) - - val properties = new Properties(); - properties.put("name", getActorSystemName(context)) - context.registerService(classOf[ActorSystem].getName, system, properties) } /** @@ -47,32 +46,4 @@ abstract class ActorSystemActivator extends BundleActivator { } } - /** - * Strategy method to create the ActorSystem. - */ - def createActorSystem(context: BundleContext) = - ActorSystem(getActorSystemName(context), getActorSystemConfig(context), getClass.getClassLoader) - - - /** - * Strategy method to create the Config for the ActorSystem, ensuring that the default/reference configuration is - * loaded from the akka-actor bundle. - */ - def getActorSystemConfig(context: BundleContext): Config = { - val reference = ConfigFactory.defaultReference(classOf[ActorSystem].getClassLoader) - ConfigFactory.load(getClass.getClassLoader).withFallback(reference) - } - - /** - * Strategy method to determine the ActorSystem name - override this method to define the ActorSytem name yourself. - * - * The default implementation will use 'bundle--ActorSystem' where matches the bundle id for the containing bundle. - * - * @param context the BundleContext - * @return the ActorSystem name - */ - def getActorSystemName(context: BundleContext): String = { - "bundle-%s-ActorSystem".format(context.getBundle().getBundleId) - } - } diff --git a/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala b/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala new file mode 100644 index 0000000000..8c41521964 --- /dev/null +++ b/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala @@ -0,0 +1,56 @@ +package akka.osgi + +import impl.BundleDelegatingClassLoader +import org.osgi.framework.BundleContext +import java.util.Properties +import akka.actor.ActorSystem +import com.typesafe.config.{ ConfigFactory, Config } + +/** + * Factory class to create ActorSystem implementations in an OSGi environment. This mainly involves dealing with + * bundle classloaders appropriately to ensure that configuration files and classes get loaded properly + */ +class OsgiActorSystemFactory(val context: BundleContext) { + + /* + * Classloader that delegates to the bundle for which the factory is creating an ActorSystem + */ + val classloader = BundleDelegatingClassLoader.createFor(context) + + /** + * Creates the ActorSystem and registers it in the OSGi Service Registry + */ + def createActorSystem(name: String) = { + val system = ActorSystem(actorSystemName(name), actorSystemConfig(context), classloader) + registerService(system) + system + } + + def registerService(system: ActorSystem) { + val properties = new Properties(); + properties.put("name", system.name) + context.registerService(classOf[ActorSystem].getName, system, properties) + } + + /** + * Strategy method to create the Config for the ActorSystem, ensuring that the default/reference configuration is + * loaded from the akka-actor bundle. + */ + def actorSystemConfig(context: BundleContext): Config = { + val reference = ConfigFactory.defaultReference(classOf[ActorSystem].getClassLoader) + ConfigFactory.load(classloader).withFallback(reference) + } + + /** + * Determine a the ActorSystem name + */ + def actorSystemName(name: String): String = + Option(name).getOrElse("bundle-%s-ActorSystem".format(context.getBundle().getBundleId)) + +} + +object OsgiActorSystemFactory { + + def apply(context: BundleContext) = new OsgiActorSystemFactory(context) + +} diff --git a/akka-osgi/src/main/scala/akka/osgi/blueprint/BlueprintActorSystemFactory.scala b/akka-osgi/src/main/scala/akka/osgi/blueprint/BlueprintActorSystemFactory.scala new file mode 100644 index 0000000000..92e7e8a099 --- /dev/null +++ b/akka-osgi/src/main/scala/akka/osgi/blueprint/BlueprintActorSystemFactory.scala @@ -0,0 +1,34 @@ +package akka.osgi.blueprint + +import org.osgi.framework.BundleContext +import akka.osgi.OsgiActorSystemFactory +import collection.mutable.Buffer +import akka.actor.{ Actor, Props, ActorSystem } + +/** + * A set of helper/factory classes to build a Akka system using Blueprint + */ +class BlueprintActorSystemFactory(context: BundleContext, name: String) extends OsgiActorSystemFactory(context) { + + val systems: Buffer[ActorSystem] = Buffer() + + def this(context: BundleContext) = this(context, null) + + def create: ActorSystem = create(null) + def create(name: String): ActorSystem = { + val system = super.createActorSystem(name) + systems += system + system + } + + def destroy = for (system ← systems) { + system.shutdown() + } +} + +class BlueprintActorSystem(context: BundleContext, system: ActorSystem) { + + def createActor(name: String) = system.actorOf(Props(context.getBundle.loadClass(name).asInstanceOf[Class[Actor]])) + +} + diff --git a/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/NamespaceHandler.scala b/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/NamespaceHandler.scala new file mode 100644 index 0000000000..b1412eae91 --- /dev/null +++ b/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/NamespaceHandler.scala @@ -0,0 +1,100 @@ +package akka.osgi.blueprint.aries + +import org.apache.aries.blueprint.ParserContext +import org.osgi.service.blueprint.container.ComponentDefinitionException +import org.apache.aries.blueprint.mutable.MutableBeanMetadata + +import collection.JavaConversions.setAsJavaSet +import org.osgi.framework.BundleContext +import org.apache.aries.blueprint.reflect.{ ValueMetadataImpl, RefMetadataImpl, BeanArgumentImpl } +import org.w3c.dom.{ NodeList, Element, Node } +import org.osgi.service.blueprint.reflect.{ BeanMetadata, ComponentMetadata } +import akka.actor.{ ActorRef, ActorSystem } +import akka.osgi.blueprint.{ BlueprintActorSystem, BlueprintActorSystemFactory } + +/** + * Aries Blueprint namespace handler implementation + */ +class NamespaceHandler extends org.apache.aries.blueprint.NamespaceHandler { + + val CLASS_ATTRIBUTE = "class"; + val ID_ATTRIBUTE = "id"; + val NAME_ATTRIBUTE = "name"; + + var idCounter = 1 + + def getSchemaLocation(namespace: String) = getClass().getResource("akka.xsd") + + def getManagedClasses = setAsJavaSet(Set(classOf[BlueprintActorSystemFactory])) + + def parse(element: Element, context: ParserContext) = { + val factory = context.createMetadata(classOf[MutableBeanMetadata]) + factory.setId(getId(context, element)) + factory.setScope(BeanMetadata.SCOPE_SINGLETON) + factory.setProcessor(true) + factory.setClassName(classOf[BlueprintActorSystemFactory].getName) + factory.setDestroyMethod("destroy") + factory.addArgument(new BeanArgumentImpl(new RefMetadataImpl("blueprintBundleContext"), classOf[BundleContext].getName, -1)) + + val system = context.createMetadata(classOf[MutableBeanMetadata]) + system.setId(getId(context, element)) + system.setFactoryComponent(factory) + system.setFactoryMethod("create") + system.setRuntimeClass(classOf[ActorSystem]) + if (element.hasAttribute(NAME_ATTRIBUTE)) { + system.addArgument(new BeanArgumentImpl(new ValueMetadataImpl(element.getAttribute(NAME_ATTRIBUTE)), classOf[String].getName, -1)) + } + + val actorsystem = context.createMetadata(classOf[MutableBeanMetadata]) + actorsystem.setId(getId(context, element)) + actorsystem.setClassName(classOf[BlueprintActorSystem].getName) + actorsystem.addArgument(new BeanArgumentImpl(new RefMetadataImpl("blueprintBundleContext"), classOf[BundleContext].getName, -1)) + actorsystem.addArgument(new BeanArgumentImpl(system, classOf[ActorSystem].getName, -1)) + context.getComponentDefinitionRegistry.registerComponentDefinition(actorsystem) + + val nodelist = element.getChildNodes + var i = 0 + while (i < nodelist.getLength) { + val node = nodelist.item(i) + node.getLocalName match { + case "actor" if node.isInstanceOf[Element] ⇒ parseActor(node.asInstanceOf[Element], context, actorsystem) + case _ ⇒ + } + i += 1 + } + factory + } + + def parseActor(node: Element, context: ParserContext, actorsystem: MutableBeanMetadata) = { + val actor = context.createMetadata(classOf[MutableBeanMetadata]) + actor.setFactoryComponent(actorsystem) + if (node.hasAttribute(CLASS_ATTRIBUTE)) { + actor.addArgument(new BeanArgumentImpl(new ValueMetadataImpl(node.getAttribute(CLASS_ATTRIBUTE)), classOf[String].getName, -1)) + } + actor.setId(getId(context, node)) + actor.setFactoryMethod("createActor") + // actor.setRuntimeClass(classOf[ActorRef]) + context.getComponentDefinitionRegistry.registerComponentDefinition(actor) + } + + def decorate(node: Node, component: ComponentMetadata, context: ParserContext) = + throw new ComponentDefinitionException("Bad xml syntax: node decoration is not supported"); + + def getId(context: ParserContext, element: Element) = { + if (element.hasAttribute(ID_ATTRIBUTE)) { + element.getAttribute(ID_ATTRIBUTE); + } else { + generateId(context); + } + } + + def generateId(context: ParserContext): String = { + var id = ""; + do { + idCounter += 1 + id = ".akka-" + idCounter; + } while (context.getComponentDefinitionRegistry().containsComponentDefinition(id)); + id; + } + +} diff --git a/akka-osgi/src/main/scala/akka/osgi/impl/BundleDelegatingClassLoader.scala b/akka-osgi/src/main/scala/akka/osgi/impl/BundleDelegatingClassLoader.scala new file mode 100644 index 0000000000..74592392d9 --- /dev/null +++ b/akka-osgi/src/main/scala/akka/osgi/impl/BundleDelegatingClassLoader.scala @@ -0,0 +1,74 @@ +package akka.osgi.impl + +import java.net.URL +import java.util.Enumeration + +import org.osgi.framework.{ BundleContext, Bundle } + +/* + * Companion object to create bundle delegating classloader instances + */ +object BundleDelegatingClassLoader { + + /* + * Create a bundle delegating classloader for the bundle context's bundle + */ + def createFor(context: BundleContext) = new BundleDelegatingClassLoader(context.getBundle) + +} + +/* + * A bundle delegating classloader implemenation - this will try to load classes and resources from the bundle + * specified first and if there's a classloader specified, that will be used as a fallback + */ +class BundleDelegatingClassLoader(bundle: Bundle, classLoader: Option[ClassLoader]) extends ClassLoader { + + def this(bundle: Bundle) = this(bundle, None) + + protected override def findClass(name: String): Class[_] = bundle.loadClass(name) + + protected override def findResource(name: String): URL = { + val resource: URL = bundle.getResource(name) + classLoader match { + case Some(loader) if resource == null ⇒ loader.getResource(name) + case _ ⇒ resource + } + } + + @SuppressWarnings(Array("unchecked", "rawtypes")) + protected override def findResources(name: String): Enumeration[URL] = + bundle.getResources(name).asInstanceOf[Enumeration[URL]] + + protected override def loadClass(name: String, resolve: Boolean): Class[_] = { + val clazz: Class[_] = try { + findClass(name) + } catch { + case cnfe: ClassNotFoundException ⇒ { + classLoader match { + case Some(loader) ⇒ loadClass(name, loader) + case None ⇒ rethrowClassNotFoundException(name, cnfe) + } + } + } + if (resolve) { + resolveClass(clazz) + } + clazz + } + + private def loadClass(name: String, classLoader: ClassLoader) = + try { + classLoader.loadClass(name) + } catch { + case cnfe: ClassNotFoundException ⇒ rethrowClassNotFoundException(name, cnfe) + } + + def rethrowClassNotFoundException(name: String, cnfe: ClassNotFoundException): Nothing = + throw new ClassNotFoundException(name + " from bundle " + bundle.getBundleId + " (" + bundle.getSymbolicName + ")", cnfe) + + def getBundle: Bundle = bundle + + override def toString = String.format("BundleDelegatingClassLoader(%s)", bundle) + +} + diff --git a/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/simple.xml b/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/simple.xml new file mode 100644 index 0000000000..d276ee86a0 --- /dev/null +++ b/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/simple.xml @@ -0,0 +1,9 @@ + + + + + + + + diff --git a/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala b/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala index ffcc3cc0e7..34472e3537 100644 --- a/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala +++ b/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala @@ -29,10 +29,7 @@ class ActorSystemActivatorTest extends FlatSpec { lazy val context: BundleContext = { val config = new HashMap[String, AnyRef](); - config.put(PojoServiceRegistryFactory.BUNDLE_DESCRIPTORS, new ClasspathScanner().scanForBundles()); - val loader = ServiceLoader.load(classOf[PojoServiceRegistryFactory]); - val registry = loader.iterator().next().newPojoServiceRegistry(config); registry.getBundleContext } diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index f0e6446879..629d0475f3 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -428,7 +428,7 @@ object Dependencies { val camel = Seq(camelCore, Test.scalatest, Test.junit, Test.mockito) - val osgi = Seq(osgiCore, Test.pojosr, Test.scalatest, Test.junit) + val osgi = Seq(osgiCore, ariesBlueprint, Runtime.logback, Test.pojosr, Test.tinybundles, Test.scalatest, Test.junit) val tutorials = Seq(Test.scalatest, Test.junit) @@ -454,6 +454,7 @@ object Dependency { // Compile + val ariesBlueprint = "org.apache.aries.blueprint" % "org.apache.aries.blueprint" % "0.3.1" // ApacheV2 val camelCore = "org.apache.camel" % "camel-core" % V.Camel // ApacheV2 val netty = "io.netty" % "netty" % V.Netty // ApacheV2 val osgiCore = "org.osgi" % "org.osgi.core" % V.OSGi // ApacheV2 @@ -480,6 +481,7 @@ object Dependency { val scalatest = "org.scalatest" % "scalatest_2.9.1" % V.Scalatest % "test" // ApacheV2 val scalacheck = "org.scala-tools.testing" % "scalacheck_2.9.1" % "1.9" % "test" // New BSD val specs2 = "org.specs2" % "specs2_2.9.1" % "1.9" % "test" // Modified BSD / ApacheV2 + val tinybundles = "org.ops4j.pax.tinybundles" % "tinybundles" % "1.0.0" % "test" // ApacheV2 val zookeeper = "org.apache.hadoop.zookeeper" % "zookeeper" % "3.4.0" % "test" // ApacheV2 val log4j = "log4j" % "log4j" % "1.2.14" % "test" // ApacheV2 } From 8f31850ae29f099799902e59861d2d9a966a75c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Fri, 25 May 2012 10:02:35 +0200 Subject: [PATCH 244/538] Add Migration Manager for binary compatibility checks. see #1953 --- project/AkkaBuild.scala | 47 ++++++++++++++++++++++++++++++----------- project/plugins.sbt | 2 ++ 2 files changed, 37 insertions(+), 12 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index d687bff9e8..23d51fe77c 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -11,6 +11,8 @@ import com.typesafe.sbtmultijvm.MultiJvmPlugin.{ MultiJvm, extraOptions, jvmOpti import com.typesafe.sbtscalariform.ScalariformPlugin import com.typesafe.sbtscalariform.ScalariformPlugin.ScalariformKeys import com.typesafe.sbtosgi.OsgiPlugin.{ OsgiKeys, osgiSettings } +import com.typesafe.tools.mima.plugin.MimaPlugin.mimaDefaultSettings +import com.typesafe.tools.mima.plugin.MimaKeys.previousArtifact import java.lang.Boolean.getBoolean import sbt.Tests import Sphinx.{ sphinxDocs, sphinxHtml, sphinxLatex, sphinxPdf, sphinxPygments, sphinxTags } @@ -27,7 +29,8 @@ object AkkaBuild extends Build { lazy val akka = Project( id = "akka", base = file("."), - settings = parentSettings ++ Release.settings ++ Unidoc.settings ++ Sphinx.settings ++ Publish.versionSettings ++ Dist.settings ++ Seq( + settings = parentSettings ++ Release.settings ++ Unidoc.settings ++ Sphinx.settings ++ Publish.versionSettings ++ + Dist.settings ++ mimaSettings ++ Seq( testMailbox in GlobalScope := System.getProperty("akka.testMailbox", "false").toBoolean, parallelExecution in GlobalScope := System.getProperty("akka.parallelExecution", "false").toBoolean, Publish.defaultPublishTo in ThisBuild <<= crossTarget / "repository", @@ -54,7 +57,8 @@ object AkkaBuild extends Build { artifact in (Compile, packageBin) ~= (_.copy(`type` = "bundle")), // to fix scaladoc generation fullClasspath in doc in Compile <<= fullClasspath in Compile, - libraryDependencies ++= Dependencies.actor + libraryDependencies ++= Dependencies.actor, + previousArtifact := akkaPreviousArtifact("akka-actor") ) ) @@ -63,7 +67,8 @@ object AkkaBuild extends Build { base = file("akka-testkit"), dependencies = Seq(actor), settings = defaultSettings ++ Seq( - libraryDependencies ++= Dependencies.testkit + libraryDependencies ++= Dependencies.testkit, + previousArtifact := akkaPreviousArtifact("akka-testkit") ) ) @@ -101,7 +106,8 @@ object AkkaBuild extends Build { (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq }, scalatestOptions in MultiJvm := defaultMultiJvmScalatestOptions, - jvmOptions in MultiJvm := defaultMultiJvmOptions + jvmOptions in MultiJvm := defaultMultiJvmOptions, + previousArtifact := akkaPreviousArtifact("akka-remote") ) ) configs (MultiJvm) @@ -117,7 +123,8 @@ object AkkaBuild extends Build { (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq }, scalatestOptions in MultiJvm := defaultMultiJvmScalatestOptions, - jvmOptions in MultiJvm := defaultMultiJvmOptions + jvmOptions in MultiJvm := defaultMultiJvmOptions, + previousArtifact := akkaPreviousArtifact("akka-remote") ) ) configs (MultiJvm) @@ -135,7 +142,8 @@ object AkkaBuild extends Build { base = file("akka-agent"), dependencies = Seq(actor, testkit % "test->test"), settings = defaultSettings ++ OSGi.agent ++ Seq( - libraryDependencies ++= Dependencies.agent + libraryDependencies ++= Dependencies.agent, + previousArtifact := akkaPreviousArtifact("akka-agent") ) ) @@ -144,7 +152,8 @@ object AkkaBuild extends Build { base = file("akka-transactor"), dependencies = Seq(actor, testkit % "test->test"), settings = defaultSettings ++ OSGi.transactor ++ Seq( - libraryDependencies ++= Dependencies.transactor + libraryDependencies ++= Dependencies.transactor, + previousArtifact := akkaPreviousArtifact("akka-transactor") ) ) @@ -163,7 +172,8 @@ object AkkaBuild extends Build { dependencies = Seq(remote, testkit % "compile;test->test"), settings = defaultSettings ++ OSGi.mailboxesCommon ++ Seq( libraryDependencies ++= Dependencies.mailboxes, - // DurableMailboxSpec published in akka-mailboxes-common-test + previousArtifact := akkaPreviousArtifact("akka-mailboxes-common"), + // DurableMailboxSpec published in akka-mailboxes-common-test publishArtifact in Test := true ) ) @@ -173,7 +183,8 @@ object AkkaBuild extends Build { base = file("akka-durable-mailboxes/akka-file-mailbox"), dependencies = Seq(mailboxesCommon % "compile;test->test", testkit % "test"), settings = defaultSettings ++ OSGi.fileMailbox ++ Seq( - libraryDependencies ++= Dependencies.fileMailbox + libraryDependencies ++= Dependencies.fileMailbox, + previousArtifact := akkaPreviousArtifact("akka-file-mailbox") ) ) @@ -182,7 +193,8 @@ object AkkaBuild extends Build { base = file("akka-zeromq"), dependencies = Seq(actor, testkit % "test;test->test"), settings = defaultSettings ++ OSGi.zeroMQ ++ Seq( - libraryDependencies ++= Dependencies.zeroMQ + libraryDependencies ++= Dependencies.zeroMQ, + previousArtifact := akkaPreviousArtifact("akka-zeromq") ) ) @@ -191,7 +203,8 @@ object AkkaBuild extends Build { base = file("akka-kernel"), dependencies = Seq(actor, testkit % "test->test"), settings = defaultSettings ++ Seq( - libraryDependencies ++= Dependencies.kernel + libraryDependencies ++= Dependencies.kernel, + previousArtifact := akkaPreviousArtifact("akka-kernel") ) ) @@ -339,7 +352,7 @@ object AkkaBuild extends Build { (if (useOnlyTestTags.isEmpty) Seq.empty else Seq("-n", if (multiNodeEnabled) useOnlyTestTags.mkString("\"", " ", "\"") else useOnlyTestTags.mkString(" "))) } - lazy val defaultSettings = baseSettings ++ formatSettings ++ Seq( + lazy val defaultSettings = baseSettings ++ formatSettings ++ mimaSettings ++ Seq( resolvers += "Typesafe Repo" at "http://repo.typesafe.com/typesafe/releases/", // compile options @@ -403,6 +416,16 @@ object AkkaBuild extends Build { (Tests.overall(r.values), r) } ) + + lazy val mimaSettings = mimaDefaultSettings ++ Seq( + // MiMa + previousArtifact := None + ) + + def akkaPreviousArtifact(id: String, organization: String = "com.typesafe.akka", version: String = "2.0"): Option[sbt.ModuleID] = { + // the artifact to compare binary compatibility with + Some(organization % id % version) + } } // Dependencies diff --git a/project/plugins.sbt b/project/plugins.sbt index 59f2154537..e077802cfa 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -7,6 +7,8 @@ addSbtPlugin("com.typesafe.sbtscalariform" % "sbtscalariform" % "0.4.0") addSbtPlugin("com.typesafe.sbtosgi" % "sbtosgi" % "0.2.0") +addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "0.1.3") + resolvers ++= Seq( // needed for sbt-assembly, which comes with sbt-multi-jvm Resolver.url("sbtonline", url("http://scalasbt.artifactoryonline.com/scalasbt/sbt-plugin-releases"))(Resolver.ivyStylePatterns), From 17b0f43aea5295ac9c7d32e89c2823e0bdc5c4c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Thu, 31 May 2012 15:35:40 +0200 Subject: [PATCH 245/538] Invoke mima as part of the release --dry-run see #1953 --- project/scripts/release | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/project/scripts/release b/project/scripts/release index 058d0d1615..886e6629b1 100755 --- a/project/scripts/release +++ b/project/scripts/release @@ -219,6 +219,13 @@ echolog "Creating gzipped tar download..." try tar -cz -C ${unzipped_dir} -f ${release_dir}/downloads/akka-${version}.tgz akka-${version} echolog "Successfully created local release" +# check binary compatibility for dry run +if [ $dry_run ]; then + echodry "Running migration manager report..." + sbt mima-report-binary-issues + echodry "Finished migration manager report" +fi + # commit and tag this release echolog "Committing and tagging..." try git add . From af924384b633e7b63bbfe1307e12a701cc67c7ea Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 1 Jun 2012 10:44:58 +0200 Subject: [PATCH 246/538] Shutdown correct node --- .../src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index 10d2daeeac..42e8163f2a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -56,8 +56,8 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) "become singleton cluster when one node is shutdown" taggedAs LongRunningTest in { runOn(first) { val secondAddress = node(second).address - testConductor.shutdown(first, 0) - testConductor.removeNode(first) + testConductor.shutdown(second, 0) + testConductor.removeNode(second) awaitUpConvergence(numberOfMembers = 1, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds) cluster.isSingletonCluster must be(true) cluster.isLeader must be(true) From ab1969e93ad0d4f37337dcc7ccb856a271245689 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 1 Jun 2012 10:45:34 +0200 Subject: [PATCH 247/538] Debug log since we had one hard to reproduce failure on this --- .../akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index 0424c6d399..63665d3c57 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -17,7 +17,7 @@ object LeaderDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(debugConfig(on = false). + commonConfig(debugConfig(on = true). withFallback(ConfigFactory.parseString(""" akka.cluster { auto-down = on From 4dbf4a70b379004d66add92b19dd0ed0199c9f06 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 1 Jun 2012 11:37:44 +0200 Subject: [PATCH 248/538] Test leader member states, see 2157 * The only allowed member states for a leader are up or leaving * Added above check in assertLeader so that we always verify that * More usage of assertLeader in the tests --- .../multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala | 7 +++++-- .../scala/akka/cluster/MultiNodeClusterSpec.scala | 7 ++++++- .../multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala | 3 ++- .../src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala | 3 ++- 4 files changed, 15 insertions(+), 5 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index 7053ba5b50..ba0471bedb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -49,6 +49,7 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp cluster.join(firstAddress) awaitUpConvergence(numberOfMembers = roles.size) cluster.isLeader must be(myself == roles.head) + assertLeaderIn(roles) } testConductor.enter("after") } @@ -58,6 +59,7 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp currentRoles.size must be >= (2) val leader = currentRoles.head val aUser = currentRoles.last + val remainingRoles = currentRoles.tail myself match { @@ -78,13 +80,14 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp cluster.down(leaderAddress) testConductor.enter("after-down", "completed") - case _ if currentRoles.tail.contains(myself) ⇒ + case _ if remainingRoles.contains(myself) ⇒ // remaining cluster nodes, not shutdown testConductor.enter("before-shutdown", "after-shutdown", "after-down") awaitUpConvergence(currentRoles.size - 1) - val nextExpectedLeader = currentRoles.tail.head + val nextExpectedLeader = remainingRoles.head cluster.isLeader must be(myself == nextExpectedLeader) + assertLeaderIn(remainingRoles) testConductor.enter("completed") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index cb679c12b7..4d0c7f4720 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -42,15 +42,20 @@ trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ expectedAddresses.sorted.zipWithIndex.foreach { case (a, i) ⇒ members(i).address must be(a) } } + def assertLeader(nodesInCluster: RoleName*): Unit = if (nodesInCluster.contains(myself)) { + assertLeaderIn(nodesInCluster) + } + /** * Assert that the cluster has elected the correct leader * out of all nodes in the cluster. First * member in the cluster ring is expected leader. */ - def assertLeader(nodesInCluster: RoleName*): Unit = if (nodesInCluster.contains(myself)) { + def assertLeaderIn(nodesInCluster: Seq[RoleName]): Unit = if (nodesInCluster.contains(myself)) { nodesInCluster.length must not be (0) val expectedLeader = roleOfLeader(nodesInCluster) cluster.isLeader must be(ifNode(expectedLeader)(true)(false)) + cluster.status must (be(MemberStatus.Up) or be(MemberStatus.Leaving)) } /** diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index 42e8163f2a..c0c12f4582 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -51,6 +51,7 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) } awaitUpConvergence(numberOfMembers = 2) cluster.isSingletonCluster must be(false) + assertLeader(first, second) } "become singleton cluster when one node is shutdown" taggedAs LongRunningTest in { @@ -60,7 +61,7 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) testConductor.removeNode(second) awaitUpConvergence(numberOfMembers = 1, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds) cluster.isSingletonCluster must be(true) - cluster.isLeader must be(true) + assertLeader(first) } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index 44682b81f7..b2b98f94fa 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -38,7 +38,7 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi runOn(first) { awaitCond(cluster.isSingletonCluster) awaitUpConvergence(numberOfMembers = 1) - cluster.isLeader must be(true) + assertLeader(first) } } } @@ -57,6 +57,7 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi } cluster.latestGossip.members.size must be(2) awaitCond(cluster.convergence.isDefined) + assertLeader(first, second) } } From 6a415f0e9bf915a366e36ab0bc6d4cb43cb3f15e Mon Sep 17 00:00:00 2001 From: Brian Scully Date: Fri, 1 Jun 2012 08:24:47 -0400 Subject: [PATCH 249/538] * Add Circuit Breaker to akka.pattern for general use. Circuit breaker implementation as described by Michael T. Nygard in Release It!. Fixes #1734 * Uses finite state machine for three states: Closed, Open, Half-Open * Closed state allows calls through, and on sequential failures exceeding the max# set - transitions to Open state. Intervening successes cause the failure count to reset to 0 * Open state throws a CircuitOpenException on every call until the reset timeout is reached which causes a transition to Half-Open state * Half-Open state will allow the next single call through, if it succeeds - transition to Closed state, if it fails - transition back to Open state, starting the reset timer again * Allow configuration for the call and reset timeouts, as well as the maximum number of sequential failures before opening * Supports async or synchronous call protection * Callbacks are supported for state entry into Closed, Open, Half-Open. These are run in the supplied execution context * Both thrown exceptions and calls exceeding max call time are considered failures * Uses akka scheduler for timer events * Integrated into File-Based durable mailbox * Sample documented for other durable mailboxes --- .../akka/pattern/CircuitBreakerMTSpec.scala | 121 ++++ .../akka/pattern/CircuitBreakerSpec.scala | 243 ++++++++ .../akka/pattern/AbstractCircuitBreaker.java | 18 + .../scala/akka/pattern/CircuitBreaker.scala | 560 ++++++++++++++++++ akka-docs/common/circuitbreaker.rst | 130 ++++ .../CircuitBreakerDocSpec.scala | 43 ++ .../circuitbreaker/DangerousJavaActor.java | 83 +++ akka-docs/common/index.rst | 1 + akka-docs/conf.py | 2 +- .../actor/mailbox/DurableMailboxDocSpec.scala | 13 +- akka-docs/modules/durable-mailbox.rst | 4 +- .../src/main/resources/reference.conf | 14 +- .../akka/actor/mailbox/FileBasedMailbox.scala | 38 +- .../mailbox/FileBasedMailboxSettings.scala | 3 + .../actor/mailbox/FileBasedMailboxSpec.scala | 18 +- 15 files changed, 1266 insertions(+), 25 deletions(-) create mode 100644 akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala create mode 100644 akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala create mode 100644 akka-actor/src/main/java/akka/pattern/AbstractCircuitBreaker.java create mode 100644 akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala create mode 100644 akka-docs/common/circuitbreaker.rst create mode 100644 akka-docs/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala create mode 100644 akka-docs/common/code/docs/circuitbreaker/DangerousJavaActor.java diff --git a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala new file mode 100644 index 0000000000..fab1cbab7a --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala @@ -0,0 +1,121 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.pattern + +import akka.testkit._ +import akka.util.duration._ +import org.scalatest.BeforeAndAfter +import akka.dispatch.{Promise, Await, Future} + +class CircuitBreakerMTSpec extends AkkaSpec with BeforeAndAfter { + + @volatile + var breakers: BreakerState = null + + class BreakerState { + + val halfOpenLatch = new TestLatch(1) + + val breaker = new CircuitBreaker(system.scheduler,5,100.millis.dilated,500.millis.dilated) + .onHalfOpen(halfOpenLatch.countDown()) + + } + + before { + breakers = new BreakerState() + } + + def unreliableCall(param: String) = { + param match { + case "fail" => throw new RuntimeException("FAIL") + case _ => param + } + } + + def openBreaker: Unit = { + for (i <- 1 to 5) + Await.result(breakers.breaker.withCircuitBreaker(Future(unreliableCall("fail"))) recoverWith { + case _ => Promise.successful("OK") + }, 1.second.dilated) + } + + "A circuit breaker being called by many threads" must { + "allow many calls while in closed state with no errors" in { + + val futures = for (i <- 1 to 100) yield breakers.breaker.withCircuitBreaker(Future {Thread.sleep(10); unreliableCall("succeed")}) + + val futureList = Future.sequence(futures) + + val result = Await.result(futureList, 1.second.dilated) + + result.size must be (100) + result.distinct.size must be (1) + result.distinct must contain ("succeed") + + } + + "transition to open state upon reaching failure limit and fail-fast" in { + + openBreaker + + val futures = for (i <- 1 to 100) yield breakers.breaker.withCircuitBreaker(Future { + Thread.sleep(10); unreliableCall("success") + }) recoverWith { + case _: CircuitBreakerOpenException => Promise.successful("CBO") + } + + val futureList = Future.sequence(futures) + + val result = Await.result(futureList, 1.second.dilated) + + result.size must be (100) + result.distinct.size must be (1) + result.distinct must contain ("CBO") + } + + "allow a single call through in half-open state" in { + openBreaker + + Await.ready(breakers.halfOpenLatch, 2.seconds.dilated) + + val futures = for (i <- 1 to 100) yield breakers.breaker.withCircuitBreaker(Future { + Thread.sleep(10); unreliableCall("succeed") + }) recoverWith { + case _: CircuitBreakerOpenException => Promise.successful("CBO") + } + + val futureList = Future.sequence(futures) + + val result = Await.result(futureList, 1.second.dilated) + + result.size must be (100) + result.distinct.size must be (2) + result.distinct must contain ("succeed") + result.distinct must contain ("CBO") + } + + "recover and reset the breaker after the reset timeout" in { + openBreaker + + Await.ready(breakers.halfOpenLatch, 2.seconds.dilated) + + Await.ready(breakers.breaker.withCircuitBreaker(Future(unreliableCall("succeed"))), 1.second.dilated) + + val futures = for (i <- 1 to 100) yield breakers.breaker.withCircuitBreaker(Future { + Thread.sleep(10); unreliableCall("succeed") + }) recoverWith { + case _: CircuitBreakerOpenException => Promise.successful("CBO") + } + + val futureList = Future.sequence(futures) + + val result = Await.result(futureList, 1.second.dilated) + + result.size must be (100) + result.distinct.size must be (1) + result.distinct must contain ("succeed") + } + } + +} \ No newline at end of file diff --git a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala new file mode 100644 index 0000000000..2c2a07ee3f --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala @@ -0,0 +1,243 @@ + +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.pattern + +import akka.util.duration._ +import akka.testkit._ +import org.scalatest.BeforeAndAfter +import akka.dispatch.Future +import akka.dispatch.Await + +object CircuitBreakerSpec { + + class TestException extends RuntimeException + +} + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class CircuitBreakerSpec extends AkkaSpec with BeforeAndAfter { + + import CircuitBreakerSpec.TestException + + val awaitTimeout = 2.seconds.dilated + + @volatile + var breakers: TestCircuitBreakers = null + + class TestCircuitBreakers { + val halfOpenLatch = new TestLatch(1) + val openLatch = new TestLatch(1) + val closedLatch = new TestLatch(1) + + val shortCallTimeoutCb = new CircuitBreaker(system.scheduler, 1, 50.millis.dilated, 500.millis.dilated) + .onClose(closedLatch.countDown()) + .onHalfOpen(halfOpenLatch.countDown()) + .onOpen(openLatch.countDown()) + + val shortResetTimeoutCb = new CircuitBreaker(system.scheduler, 1, 1000.millis.dilated, 50.millis.dilated) + .onClose(closedLatch.countDown()) + .onHalfOpen(halfOpenLatch.countDown()) + .onOpen(openLatch.countDown()) + + val longCallTimeoutCb = new CircuitBreaker(system.scheduler, 1, 5 seconds, 500.millis.dilated) + .onClose(closedLatch.countDown()) + .onHalfOpen(halfOpenLatch.countDown()) + .onOpen(openLatch.countDown()) + + val longResetTimeoutCb = new CircuitBreaker(system.scheduler, 1, 100.millis.dilated, 5 seconds) + .onClose(closedLatch.countDown()) + .onHalfOpen(halfOpenLatch.countDown()) + .onOpen(openLatch.countDown()) + + val multiFailureCb = new CircuitBreaker(system.scheduler, 5, 200.millis.dilated, 500.millis.dilated) + .onClose(closedLatch.countDown()) + .onHalfOpen(halfOpenLatch.countDown()) + .onOpen(openLatch.countDown()) + } + + before { + breakers = new TestCircuitBreakers + } + + def checkLatch(latch: TestLatch) { + Await.ready(latch, awaitTimeout) + } + + def throwException = throw new TestException + + def sayHi = "hi" + + "A synchronous circuit breaker that is open" must { + "throw exceptions when called before reset timeout" in { + + intercept[TestException] { + breakers.longResetTimeoutCb.withSyncCircuitBreaker(throwException) + } + checkLatch(breakers.openLatch) + + intercept[CircuitBreakerOpenException] { + breakers.longResetTimeoutCb.withSyncCircuitBreaker(sayHi) + } + } + + "transition to half-open on reset timeout" in { + intercept[TestException] { + breakers.shortResetTimeoutCb.withSyncCircuitBreaker(throwException) + } + checkLatch(breakers.halfOpenLatch) + } + } + + "A synchronous circuit breaker that is half-open" must { + "pass through next call and close on success" in { + intercept[TestException] { + breakers.shortResetTimeoutCb.withSyncCircuitBreaker(throwException) + } + checkLatch(breakers.halfOpenLatch) + assert("hi" == breakers.shortResetTimeoutCb.withSyncCircuitBreaker(sayHi)) + checkLatch(breakers.closedLatch) + } + + "open on exception in call" in { + intercept[TestException] { + breakers.shortResetTimeoutCb.withSyncCircuitBreaker(throwException) + } + checkLatch(breakers.halfOpenLatch) + intercept[TestException] { + breakers.shortResetTimeoutCb.withSyncCircuitBreaker(throwException) + } + checkLatch(breakers.openLatch) + } + } + + "A synchronous circuit breaker that is closed" must { + "allow calls through" in { + breakers.longCallTimeoutCb.withSyncCircuitBreaker(sayHi) must be("hi") + } + + "increment failure count on failure" in { + intercept[TestException] { + breakers.longCallTimeoutCb.withSyncCircuitBreaker(throwException) + } + checkLatch(breakers.openLatch) + breakers.longCallTimeoutCb.currentFailureCount must be(1) + } + + "reset failure count after success" in { + intercept[TestException] { + breakers.multiFailureCb.withSyncCircuitBreaker(throwException) + } + + breakers.multiFailureCb.currentFailureCount must be(1) + breakers.multiFailureCb.withSyncCircuitBreaker(sayHi) + breakers.multiFailureCb.currentFailureCount must be(0) + } + + "increment failure count on callTimeout" in { + breakers.shortCallTimeoutCb.withSyncCircuitBreaker({ + 100.millis.dilated.sleep() + }) + breakers.shortCallTimeoutCb.currentFailureCount must be(1) + } + } + + "An asynchronous circuit breaker that is open" must { + "throw exceptions when called before reset timeout" in { + breakers.longResetTimeoutCb.withCircuitBreaker(Future(throwException)) + + checkLatch(breakers.openLatch) + + intercept[CircuitBreakerOpenException] { + Await.result( + breakers.longResetTimeoutCb.withCircuitBreaker(Future(sayHi)), + awaitTimeout) + } + } + + "transition to half-open on reset timeout" in { + breakers.shortResetTimeoutCb.withCircuitBreaker(Future(throwException)) + checkLatch(breakers.halfOpenLatch) + } + } + + "An asynchronous circuit breaker that is half-open" must { + "pass through next call and close on success" in { + breakers.shortResetTimeoutCb.withCircuitBreaker(Future(throwException)) + checkLatch(breakers.halfOpenLatch) + + Await.result( + breakers.shortResetTimeoutCb.withCircuitBreaker(Future(sayHi)), + awaitTimeout) must be("hi") + checkLatch(breakers.closedLatch) + } + + "re-open on exception in call" in { + breakers.shortResetTimeoutCb.withCircuitBreaker(Future(throwException)) + checkLatch(breakers.halfOpenLatch) + + intercept[TestException] { + Await.result( + breakers.shortResetTimeoutCb.withCircuitBreaker(Future(throwException)), + awaitTimeout) + } + checkLatch(breakers.openLatch) + } + + "re-open on async failure" in { + breakers.shortResetTimeoutCb.withCircuitBreaker(Future(throwException)) + checkLatch(breakers.halfOpenLatch) + + breakers.shortResetTimeoutCb.withCircuitBreaker(Future(throwException)) + checkLatch(breakers.openLatch) + } + } + + "An asynchronous circuit breaker that is closed" must { + "allow calls through" in { + Await.result( + breakers.longCallTimeoutCb.withCircuitBreaker(Future(sayHi)), + awaitTimeout) must be("hi") + } + + "increment failure count on exception" in { + intercept[TestException] { + Await.result( + breakers.longCallTimeoutCb.withCircuitBreaker(Future(throwException)), + awaitTimeout) + } + checkLatch(breakers.openLatch) + breakers.longCallTimeoutCb.currentFailureCount must be(1) + } + + "increment failure count on async failure" in { + breakers.longCallTimeoutCb.withCircuitBreaker(Future(throwException)) + checkLatch(breakers.openLatch) + breakers.longCallTimeoutCb.currentFailureCount must be(1) + } + + "reset failure count after success" in { + breakers.multiFailureCb.withCircuitBreaker(Future(sayHi)) + val latch = TestLatch(4) + for (n ← 1 to 4) breakers.multiFailureCb.withCircuitBreaker(Future(throwException)) + awaitCond(breakers.multiFailureCb.currentFailureCount == 4, awaitTimeout) + breakers.multiFailureCb.withCircuitBreaker(Future(sayHi)) + awaitCond(breakers.multiFailureCb.currentFailureCount == 0, awaitTimeout) + } + + "increment failure count on callTimeout" in { + breakers.shortCallTimeoutCb.withCircuitBreaker { + Future { + 100.millis.dilated.sleep() + sayHi + } + } + + checkLatch(breakers.openLatch) + breakers.shortCallTimeoutCb.currentFailureCount must be(1) + } + } + +} diff --git a/akka-actor/src/main/java/akka/pattern/AbstractCircuitBreaker.java b/akka-actor/src/main/java/akka/pattern/AbstractCircuitBreaker.java new file mode 100644 index 0000000000..44482bb357 --- /dev/null +++ b/akka-actor/src/main/java/akka/pattern/AbstractCircuitBreaker.java @@ -0,0 +1,18 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.pattern; + +import akka.util.Unsafe; + +class AbstractCircuitBreaker { + protected final static long stateOffset; + + static { + try { + stateOffset = Unsafe.instance.objectFieldOffset(CircuitBreaker.class.getDeclaredField("_currentStateDoNotCallMeDirectly")); + } catch(Throwable t){ + throw new ExceptionInInitializerError(t); + } + } +} diff --git a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala new file mode 100644 index 0000000000..79eba6aa1b --- /dev/null +++ b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala @@ -0,0 +1,560 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.pattern + +import java.util.concurrent.atomic.{ AtomicInteger, AtomicLong, AtomicBoolean } +import akka.AkkaException +import akka.actor.Scheduler +import akka.dispatch.{ Future, ExecutionContext, Await, Promise } +import akka.util.{ Deadline, Duration, NonFatal, Unsafe } +import akka.util.duration._ +import util.control.NoStackTrace +import java.util.concurrent.{ Callable, CopyOnWriteArrayList } + +/** + * Companion object providing factory methods for Circuit Breaker which runs callbacks in caller's thread + */ +object CircuitBreaker { + + /** + * Synchronous execution context to run in caller's thread - used by companion object factory methods + */ + private[CircuitBreaker] val syncExecutionContext = new ExecutionContext { + def execute(runnable: Runnable): Unit = runnable.run() + + def reportFailure(t: Throwable): Unit = () + } + + /** + * Callbacks run in caller's thread when using withSyncCircuitBreaker, and in same ExecutionContext as the passed + * in Future when using withCircuitBreaker. To use another ExecutionContext for the callbacks you can specify the + * executor in the constructor. + * + * @param scheduler Reference to Akka scheduler + * @param maxFailures Maximum number of failures before opening the circuit + * @param callTimeout [[akka.util.Duration]] of time after which to consider a call a failure + * @param resetTimeout [[akka.util.Duration]] of time after which to attempt to close the circuit + */ + def apply(scheduler: Scheduler, maxFailures: Int, callTimeout: Duration, resetTimeout: Duration): CircuitBreaker = + new CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Duration, resetTimeout: Duration)(syncExecutionContext) + + /** + * Java API alias for apply + * + * @param scheduler Reference to Akka scheduler + * @param maxFailures Maximum number of failures before opening the circuit + * @param callTimeout [[akka.util.Duration]] of time after which to consider a call a failure + * @param resetTimeout [[akka.util.Duration]] of time after which to attempt to close the circuit + */ + def create(scheduler: Scheduler, maxFailures: Int, callTimeout: Duration, resetTimeout: Duration): CircuitBreaker = + apply(scheduler: Scheduler, maxFailures: Int, callTimeout: Duration, resetTimeout: Duration) +} + +/** + * Provides circuit breaker functionality to provide stability when working with "dangerous" operations, e.g. calls to + * remote systems + * + * Transitions through three states: + * - In *Closed* state, calls pass through until the `maxFailures` count is reached. This causes the circuit breaker + * to open. Both exceptions and calls exceeding `callTimeout` are considered failures. + * - In *Open* state, calls fail-fast with an exception. After `resetTimeout`, circuit breaker transitions to + * half-open state. + * - In *Half-Open* state, the first call will be allowed through, if it succeeds the circuit breaker will reset to + * closed state. If it fails, the circuit breaker will re-open to open state. All calls beyond the first that + * execute while the first is running will fail-fast with an exception. + * + * + * @param scheduler Reference to Akka scheduler + * @param maxFailures Maximum number of failures before opening the circuit + * @param callTimeout [[akka.util.Duration]] of time after which to consider a call a failure + * @param resetTimeout [[akka.util.Duration]] of time after which to attempt to close the circuit + * @param executor [[akka.dispatch.ExecutionContext]] used for execution of state transition listeners + */ +class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Duration, resetTimeout: Duration)(implicit executor: ExecutionContext) extends AbstractCircuitBreaker { + + def this(executor: ExecutionContext, scheduler: Scheduler, maxFailures: Int, callTimeout: Duration, resetTimeout: Duration) = { + this(scheduler, maxFailures, callTimeout, resetTimeout)(executor) + } + + /** + * Holds reference to current state of CircuitBreaker - *access only via helper methods* + */ + @volatile + private[this] var _currentStateDoNotCallMeDirectly: State = Closed + + /** + * Helper method for access to underlying state via Unsafe + * + * @param oldState Previous state on transition + * @param newState Next state on transition + * @return Whether the previous state matched correctly + */ + @inline + private[this] def swapState(oldState: State, newState: State): Boolean = + Unsafe.instance.compareAndSwapObject(this, AbstractCircuitBreaker.stateOffset, oldState, newState) + + /** + * Helper method for accessing underlying state via Unsafe + * + * @return Reference to current state + */ + @inline + private[this] def currentState: State = + Unsafe.instance.getObjectVolatile(this, AbstractCircuitBreaker.stateOffset).asInstanceOf[State] + + /** + * Wraps invocations of asynchronous calls that need to be protected + * + * @param body Call needing protected + * @tparam T return type from call + * @return [[akka.dispatch.Future]] containing the call result + */ + def withCircuitBreaker[T](body: ⇒ Future[T]): Future[T] = { + currentState.invoke(body) + } + + /** + * Java API for withCircuitBreaker + * + * @param body Call needing protected + * @tparam T return type from call + * @return [[akka.dispatch.Future]] containing the call result + */ + def callWithCircuitBreaker[T](body: Callable[Future[T]]): Future[T] = { + withCircuitBreaker(body.call) + } + + /** + * Wraps invocations of synchronous calls that need to be protected + * + * Calls are run in caller's thread + * + * @param body Call needing protected + * @tparam T return type from call + * @return The result of the call + */ + def withSyncCircuitBreaker[T](body: ⇒ T): T = { + Await.result(withCircuitBreaker( + { + try + Promise.successful(body)(CircuitBreaker.syncExecutionContext) + catch { + case NonFatal(t) ⇒ Promise.failed(t)(CircuitBreaker.syncExecutionContext) + } + }),callTimeout) + } + + /** + * Java API for withSyncCircuitBreaker + * + * @param body Call needing protected + * @tparam T return type from call + * @return The result of the call + */ + + def callWithSyncCircuitBreaker[T](body: Callable[T]): T = { + withSyncCircuitBreaker(body.call) + } + + /** + * Adds a callback to execute when circuit breaker opens + * + * The callback is run in the [[akka.dispatch.ExecutionContext]] supplied in the constructor. + * + * @param callback Handler to be invoked on state change + * @tparam T Type supplied to assist with type inference, otherwise ignored by implementation + * @return CircuitBreaker for fluent usage + */ + def onOpen[T](callback: ⇒ T): CircuitBreaker = { + Open.addListener(() ⇒ callback) + this + } + + /** + * Java API for onOpen + * + * @param callback Handler to be invoked on state change + * @tparam T Type supplied to assist with type inference, otherwise ignored by implementation + * @return CircuitBreaker for fluent usage + */ + def onOpen[T](callback: Callable[T]): CircuitBreaker = { + onOpen(callback.call) + } + + /** + * Adds a callback to execute when circuit breaker transitions to half-open + * + * The callback is run in the [[akka.dispatch.ExecutionContext]] supplied in the constructor. + * + * @param callback Handler to be invoked on state change + * @tparam T Type supplied to assist with type inference, otherwise ignored by implementation + * @return CircuitBreaker for fluent usage + */ + def onHalfOpen[T](callback: ⇒ T): CircuitBreaker = { + HalfOpen.addListener(() ⇒ callback) + this + } + + /** + * JavaAPI for onHalfOpen + * + * @param callback Handler to be invoked on state change + * @tparam T Type supplied to assist with type inference, otherwise ignored by implementation + * @return CircuitBreaker for fluent usage + */ + def onHalfOpen[T](callback: Callable[T]): CircuitBreaker = { + onHalfOpen(callback.call) + } + + /** + * Adds a callback to execute when circuit breaker state closes + * + * The callback is run in the [[akka.dispatch.ExecutionContext]] supplied in the constructor. + * + * @param callback Handler to be invoked on state change + * @tparam T Type supplied to assist with type inference, otherwise ignored by implementation + * @return CircuitBreaker for fluent usage + */ + def onClose[T](callback: ⇒ T): CircuitBreaker = { + Closed.addListener(() ⇒ callback) + this + } + + /** + * JavaAPI for onClose + * + * @param callback Handler to be invoked on state change + * @tparam T Type supplied to assist with type inference, otherwise ignored by implementation + * @return CircuitBreaker for fluent usage + */ + def onClose[T](callback: Callable[T]): CircuitBreaker = { + onClose(callback.call) + } + + /** + * Retrieves current failure count. + * + * @return count + */ + private[akka] def currentFailureCount: Int = Closed.get + + /** + * Implements consistent transition between states + * + * @param fromState State being transitioning from + * @param toState State being transitioning from + * @throws IllegalStateException if an invalid transition is attempted + */ + private def transition(fromState: State, toState: State): Unit = { + if (swapState(fromState, toState)) + toState.enter() + else + throw new IllegalStateException("Illegal transition attempted from: " + fromState + " to " + toState) + } + + /** + * Trips breaker to an open state. This is valid from Closed or Half-Open states. + * + * @param fromState State we're coming from (Closed or Half-Open) + */ + private def tripBreaker(fromState: State): Unit = { + transition(fromState, Open) + } + + /** + * Resets breaker to a closed state. This is valid from an Half-Open state only. + * + */ + private def resetBreaker(): Unit = { + transition(HalfOpen, Closed) + } + + /** + * Attempts to reset breaker by transitioning to a half-open state. This is valid from an Open state only. + * + */ + private def attemptReset(): Unit = { + transition(Open, HalfOpen) + } + + /** + * Internal state abstraction + */ + private sealed trait State { + private val listeners = new CopyOnWriteArrayList[() ⇒ _] + + /** + * Add a listener function which is invoked on state entry + * + * @param listener listener implementation + * @tparam T return type of listener, not used - but supplied for type inference purposes + */ + def addListener[T](listener: () ⇒ T) { + listeners add listener + } + + /** + * Test for whether listeners exist + * + * @return whether listeners exist + */ + private def hasListeners: Boolean = !listeners.isEmpty + + /** + * Notifies the listeners of the transition event via a Future executed in implicit parameter ExecutionContext + * + * @return Promise which executes listener in supplied [[akka.dispatch.ExecutionContext]] + */ + protected def notifyTransitionListeners() { + if (hasListeners) { + val iterator = listeners.iterator + while (iterator.hasNext) { + val listener = iterator.next + //FIXME per @viktorklang: it's a bit wasteful to create Futures for one-offs, just use EC.execute instead + Future(listener()) + } + } + } + + /** + * Shared implementation of call across all states. Thrown exception or execution of the call beyond the allowed + * call timeout is counted as a failed call, otherwise a successful call + * + * @param body Implementation of the call + * @tparam T Return type of the call's implementation + * @return Future containing the result of the call + */ + def callThrough[T](body: ⇒ Future[T]): Future[T] = { + val deadline = callTimeout.fromNow + val bodyFuture = try body catch { + case NonFatal(t) ⇒ Promise.failed(t) + } + bodyFuture onFailure { + case _ ⇒ callFails() + } onSuccess { + case _ ⇒ + if (deadline.isOverdue()) callFails() + else callSucceeds() + } + } + + /** + * Abstract entry point for all states + * + * @param body Implementation of the call that needs protected + * @tparam T Return type of protected call + * @return Future containing result of protected call + */ + def invoke[T](body: ⇒ Future[T]): Future[T] + + /** + * Invoked when call succeeds + * + */ + def callSucceeds(): Unit + + /** + * Invoked when call fails + * + */ + def callFails(): Unit + + /** + * Invoked on the transitioned-to state during transition. Notifies listeners after invoking subclass template + * method _enter + * + */ + final def enter(): Unit = { + _enter() + notifyTransitionListeners() + } + + /** + * Template method for concrete traits + * + */ + def _enter(): Unit + } + + /** + * Concrete implementation of Closed state + */ + private object Closed extends AtomicInteger with State { + + /** + * Implementation of invoke, which simply attempts the call + * + * @param body Implementation of the call that needs protected + * @tparam T Return type of protected call + * @return Future containing result of protected call + */ + override def invoke[T](body: ⇒ Future[T]): Future[T] = { + callThrough(body) + } + + /** + * On successful call, the failure count is reset to 0 + * + * @return + */ + override def callSucceeds(): Unit = { set(0) } + + /** + * On failed call, the failure count is incremented. The count is checked against the configured maxFailures, and + * the breaker is tripped if we have reached maxFailures. + * + * @return + */ + override def callFails(): Unit = { + if (incrementAndGet() == maxFailures) tripBreaker(Closed) + } + + /** + * On entry of this state, failure count is reset. + * + * @return + */ + override def _enter(): Unit = { + set(0) + } + + /** + * Override for more descriptive toString + * + * @return + */ + override def toString: String = { + "Closed with failure count = " + get() + } + } + + /** + * Concrete implementation of half-open state + */ + private object HalfOpen extends AtomicBoolean(true) with State { + + /** + * Allows a single call through, during which all other callers fail-fast. If the call fails, the breaker reopens. + * If the call succeeds the breaker closes. + * + * @param body Implementation of the call that needs protected + * @tparam T Return type of protected call + * @return Future containing result of protected call + */ + override def invoke[T](body: ⇒ Future[T]): Future[T] = { + if (compareAndSet(true, false)) + callThrough(body) + else + Promise.failed[T](new CircuitBreakerOpenException(Duration.Zero)) + } + + /** + * Reset breaker on successful call. + * + * @return + */ + override def callSucceeds(): Unit = { resetBreaker() } + + /** + * Reopen breaker on failed call. + * + * @return + */ + override def callFails(): Unit = { tripBreaker(HalfOpen) } + + /** + * On entry, guard should be reset for that first call to get in + * + * @return + */ + override def _enter(): Unit = { + set(true) + } + + /** + * Override for more descriptive toString + * + * @return + */ + override def toString: String = { + "Half-Open currently testing call for success = " + get() + } + } + + /** + * Concrete implementation of Open state + */ + private object Open extends AtomicLong with State { + + /** + * Fail-fast on any invocation + * + * @param body Implementation of the call that needs protected + * @tparam T Return type of protected call + * @return Future containing result of protected call + */ + override def invoke[T](body: ⇒ Future[T]): Future[T] = { + Promise.failed[T](new CircuitBreakerOpenException(remainingTimeout().timeLeft)) + } + + /** + * Calculate remaining timeout to inform the caller in case a backoff algorithm is useful + * + * @return [[akka.util.Deadline]] to when the breaker will attempt a reset by transitioning to half-open + */ + private def remainingTimeout(): Deadline = get match { + case 0L ⇒ Deadline.now + case t ⇒ (t.millis + resetTimeout).fromNow + } + + /** + * No-op for open, calls are never executed so cannot succeed or fail + * + * @return + */ + override def callSucceeds(): Unit = {} + + /** + * No-op for open, calls are never executed so cannot succeed or fail + * + * @return + */ + override def callFails(): Unit = {} + + /** + * On entering this state, schedule an attempted reset via [[akka.actor.Scheduler]] and store the entry time to + * calculate remaining time before attempted reset. + * + * @return + */ + override def _enter(): Unit = { + set(System.currentTimeMillis) + scheduler.scheduleOnce(resetTimeout) { + attemptReset() + } + } + + /** + * Override for more descriptive toString + * + * @return + */ + override def toString: String = { + "Open" + } + } + +} + +/** + * Exception thrown when Circuit Breaker is open. + * + * @param remainingDuration Stores remaining time before attempting a reset. Zero duration means the breaker is + * currently in half-open state. + * @param message Defaults to "Circuit Breaker is open; calls are failing fast" + */ +class CircuitBreakerOpenException( + val remainingDuration: Duration, + message: String = "Circuit Breaker is open; calls are failing fast") + extends AkkaException(message) with NoStackTrace diff --git a/akka-docs/common/circuitbreaker.rst b/akka-docs/common/circuitbreaker.rst new file mode 100644 index 0000000000..bd13927c8e --- /dev/null +++ b/akka-docs/common/circuitbreaker.rst @@ -0,0 +1,130 @@ +.. _circuit-breaker: + +############### +Circuit Breaker +############### + +================== +Why are they used? +================== +A circuit breaker is used to provide stability and prevent cascading failures in distributed +systems. These should be used in conjunction with judicious timeouts at the interfaces between +remote systems to prevent the failure of a single component from bringing down all components. + +As an example, we have a web application interacting with a remote third party web service. +Let's say the third party has oversold their capacity and their database melts down under load. +Assume that the database fails in such a way that it takes a very long time to hand back an +error to the third party web service. This in turn makes calls fail after a long period of +time. Back to our web application, the users have noticed that their form submissions take +much longer seeming to hang. Well the users do what they know to do which is use the refresh +button, adding more requests to their already running requests. This eventually causes the +failure of the web application due to resource exhaustion. This will affect all users, even +those who are not using functionality dependent on this third party web service. + +Introducing circuit breakers on the web service call would cause the requests to begin to +fail-fast, letting the user know that something is wrong and that they need not refresh +their request. This also confines the failure behavior to only those users that are using +functionality dependent on the third party, other users are no longer affected as there is no +resource exhaustion. Circuit breakers can also allow savvy developers to mark portions of +the site that use the functionality unavailable, or perhaps show some cached content as +appropriate while the breaker is open. + +The Akka library provides an implementation of a circuit breaker called +:class:`akka.pattern.CircuitBreaker` which has the behavior described below. + +================= +What do they do? +================= +* During normal operation, a circuit breaker is in the `Closed` state: + * Exceptions or calls exceeding the configured `callTimeout` increment a failure counter + * Successes reset the failure count to zero + * When the failure counter reaches a `maxFailures` count, the breaker is tripped into `Open` state +* While in `Open` state: + * All calls fail-fast with a :class:`CircuitBreakerOpenException` + * After the configured `resetTimeout`, the circuit breaker enters a `Half-Open` state +* In `Half-Open` state: + * The first call attempted is allowed through without failing fast + * All other calls fail-fast with an exception just as in `Open` state + * If the first call succeeds, the breaker is reset back to `Closed` state + * If the first call fails, the breaker is tripped again into the `Open` state for another full `resetTimeout` +* State transition listeners: + * Callbacks can be provided for every state entry via `onOpen`, `onClose`, and `onHalfOpen` + * These are executed in the :class:`ExecutionContext` provided. + +.. graphviz:: + + digraph circuit_breaker { + rankdir = "LR"; + size = "6,5"; + graph [ bgcolor = "transparent" ] + node [ fontname = "Helvetica", + fontsize = 14, + shape = circle, + color = white, + style = filled ]; + edge [ fontname = "Helvetica", fontsize = 12 ] + Closed [ fillcolor = green2 ]; + "Half-Open" [fillcolor = yellow2 ]; + Open [ fillcolor = red2 ]; + Closed -> Closed [ label = "Success" ]; + "Half-Open" -> Open [ label = "Trip Breaker" ]; + "Half-Open" -> Closed [ label = "Reset Breaker" ]; + Closed -> Open [ label = "Trip Breaker" ]; + Open -> Open [ label = "Calls failing fast" ]; + Open -> "Half-Open" [ label = "Attempt Reset" ]; + } + +======== +Examples +======== + +-------------- +Initialization +-------------- + +Here's how a :class:`CircuitBreaker` would be configured for: + * 5 maximum failures + * a call timeout of 10 seconds + * a reset timeout of 1 minute + +^^^^^^^ +Scala +^^^^^^^ + +.. includecode:: code/docs/circuitbreaker/CircuitBreakerDocSpec.scala + :include: imports1,circuit-breaker-initialization + +^^^^^^^ +Java +^^^^^^^ + +.. includecode:: code/docs/circuitbreaker/DangerousJavaActor.java + :include: imports1,circuit-breaker-initialization + +--------------- +Call Protection +--------------- + +Here's how the :class:`CircuitBreaker` would be used to protect an asynchronous +call as well as a synchronous one: + +^^^^^^^ +Scala +^^^^^^^ + +.. includecode:: code/docs/circuitbreaker/CircuitBreakerDocSpec.scala + :include: circuit-breaker-usage + +^^^^^^ +Java +^^^^^^ + +.. includecode:: code/docs/circuitbreaker/DangerousJavaActor.java + :include: circuit-breaker-usage + +.. note:: + + Using the :class:`CircuitBreaker` companion object's `apply` or `create` methods + will return a :class:`CircuitBreaker` where callbacks are executed in the caller's thread. + This can be useful if the asynchronous :class:`Future` behavior is unnecessary, for + example invoking a synchronous-only API. diff --git a/akka-docs/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala b/akka-docs/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala new file mode 100644 index 0000000000..bd6c1447ad --- /dev/null +++ b/akka-docs/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala @@ -0,0 +1,43 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package docs.circuitbreaker + +//#imports1 +import akka.util.duration._ // small d is important here +import akka.pattern.CircuitBreaker +import akka.actor.Actor +import akka.dispatch.Future +import akka.event.Logging + +//#imports1 + +class CircuitBreakerDocSpec { } + +//#circuit-breaker-initialization +class DangerousActor extends Actor { + + val log = Logging(context.system, this) + implicit val executionContext = context.dispatcher + val breaker = + new CircuitBreaker(context.system.scheduler, 5, 10.seconds, 1.minute) + .onOpen(notifyMeOnOpen) + + def notifyMeOnOpen = + log.warning("My CircuitBreaker is now open, and will not close for one minute") +//#circuit-breaker-initialization + +//#circuit-breaker-usage + def dangerousCall: String = "This really isn't that dangerous of a call after all" + + def receive = { + case "is my middle name" => + sender ! breaker.withCircuitBreaker(Future(dangerousCall)) + case "block for me" => + sender ! breaker.withSyncCircuitBreaker(dangerousCall) + } +//#circuit-breaker-usage + +} + diff --git a/akka-docs/common/code/docs/circuitbreaker/DangerousJavaActor.java b/akka-docs/common/code/docs/circuitbreaker/DangerousJavaActor.java new file mode 100644 index 0000000000..1562338e04 --- /dev/null +++ b/akka-docs/common/code/docs/circuitbreaker/DangerousJavaActor.java @@ -0,0 +1,83 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package docs.circuitbreaker; + +//#imports1 + +import akka.actor.UntypedActor; +import akka.dispatch.Future; +import akka.event.LoggingAdapter; +import akka.util.Duration; +import akka.pattern.CircuitBreaker; +import akka.event.Logging; + +import static akka.dispatch.Futures.future; + +import java.util.concurrent.Callable; + +//#imports1 + +//#circuit-breaker-initialization +public class DangerousJavaActor extends UntypedActor { + + private final CircuitBreaker breaker; + private final LoggingAdapter log = Logging.getLogger(getContext().system(), this); + + public DangerousJavaActor() { + this.breaker = new CircuitBreaker( + getContext().dispatcher(), getContext().system().scheduler(), + 5, Duration.parse("10s"), Duration.parse("1m")) + .onOpen(new Callable() { + public Object call() throws Exception { + notifyMeOnOpen(); + return null; + } + }); + } + + public void notifyMeOnOpen() { + log.warning("My CircuitBreaker is now open, and will not close for one minute"); + } +//#circuit-breaker-initialization + + //#circuit-breaker-usage + public String dangerousCall() { + return "This really isn't that dangerous of a call after all"; + } + + @Override + public void onReceive(Object message) { + if (message instanceof String) { + String m = (String) message; + if ("is my middle name".equals(m)) { + final Future f = future( + new Callable() { + public String call() { + return dangerousCall(); + } + }, getContext().dispatcher()); + + getSender().tell(breaker + .callWithCircuitBreaker( + new Callable>() { + public Future call() throws Exception { + return f; + } + })); + } + if ("block for me".equals(m)) { + getSender().tell(breaker + .callWithSyncCircuitBreaker( + new Callable() { + @Override + public String call() throws Exception { + return dangerousCall(); + } + })); + } + } + } +//#circuit-breaker-usage + +} \ No newline at end of file diff --git a/akka-docs/common/index.rst b/akka-docs/common/index.rst index 4e19d1a1aa..de9c7016fc 100644 --- a/akka-docs/common/index.rst +++ b/akka-docs/common/index.rst @@ -5,3 +5,4 @@ Common utilities :maxdepth: 2 duration + circuitbreaker diff --git a/akka-docs/conf.py b/akka-docs/conf.py index b632430b59..77b7c80be0 100644 --- a/akka-docs/conf.py +++ b/akka-docs/conf.py @@ -8,7 +8,7 @@ import sys, os # -- General configuration ----------------------------------------------------- sys.path.append(os.path.abspath('_sphinx/exts')) -extensions = ['sphinx.ext.todo', 'includecode'] +extensions = ['sphinx.ext.todo', 'includecode', 'sphinx.ext.graphviz'] templates_path = ['_templates'] source_suffix = '.rst' diff --git a/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala index ac6c58ad08..54349f73e0 100644 --- a/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala +++ b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala @@ -50,6 +50,8 @@ import akka.dispatch.MailboxType import akka.dispatch.MessageQueue import akka.actor.mailbox.DurableMessageQueue import akka.actor.mailbox.DurableMessageSerialization +import akka.pattern.CircuitBreaker +import akka.util.duration._ class MyMailboxType(systemSettings: ActorSystem.Settings, config: Config) extends MailboxType { @@ -65,20 +67,23 @@ class MyMessageQueue(_owner: ActorContext) extends DurableMessageQueue(_owner) with DurableMessageSerialization { val storage = new QueueStorage + // A real-world implmentation would use configuration to set the last + // three parameters below + val breaker = CircuitBreaker(_owner.system.scheduler,5,30.seconds,1.minute) - def enqueue(receiver: ActorRef, envelope: Envelope) { + def enqueue(receiver: ActorRef, envelope: Envelope): Unit = breaker.withSyncCircuitBreaker { val data: Array[Byte] = serialize(envelope) storage.push(data) } - def dequeue(): Envelope = { + def dequeue(): Envelope = breaker.withSyncCircuitBreaker { val data: Option[Array[Byte]] = storage.pull() data.map(deserialize).orNull } - def hasMessages: Boolean = !storage.isEmpty + def hasMessages: Boolean = breaker.withSyncCircuitBreaker { !storage.isEmpty } - def numberOfMessages: Int = storage.size + def numberOfMessages: Int = breaker.withSyncCircuitBreaker { storage.size } /** * Called when the mailbox is disposed. diff --git a/akka-docs/modules/durable-mailbox.rst b/akka-docs/modules/durable-mailbox.rst index 2a9ca174cf..5be40320d0 100644 --- a/akka-docs/modules/durable-mailbox.rst +++ b/akka-docs/modules/durable-mailbox.rst @@ -80,7 +80,9 @@ a configurator (MailboxType) and a queue implementation (DurableMessageQueue). The envelope contains the message sent to the actor, and information about sender. It is the envelope that needs to be stored. As a help utility you can mixin DurableMessageSerialization to serialize and deserialize the envelope using the ordinary :ref:`serialization-scala` -mechanism. This optional and you may store the envelope data in any way you like. +mechanism. This optional and you may store the envelope data in any way you like. Durable +mailboxes are an excellent fit for usage of circuit breakers. These are described in the +:ref:`circuit-breaker` documentation. .. includecode:: code/docs/actor/mailbox/DurableMailboxDocSpec.scala :include: custom-mailbox diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/resources/reference.conf b/akka-durable-mailboxes/akka-file-mailbox/src/main/resources/reference.conf index 1a1b7b721b..f454716af0 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/resources/reference.conf +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/resources/reference.conf @@ -45,7 +45,19 @@ akka { keep-journal = on # whether to sync the journal after each transaction - sync-journal = off + sync-journal = off + + # circuit breaker configuration + circuit-breaker { + # maximum number of failures before opening breaker + max-failures = 3 + + # duration of time beyond which a call is assumed to be timed out and considered a failure + call-timeout = 3 seconds + + # duration of time to wait until attempting to reset the breaker during which all calls fail-fast + reset-timeout = 30 seconds + } } } } diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala index c595fdcdd3..fccb6b5aea 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala @@ -5,14 +5,14 @@ package akka.actor.mailbox import akka.actor.ActorContext -import akka.dispatch.{ Envelope, MessageQueue } import akka.event.Logging import akka.actor.ActorRef -import akka.dispatch.MailboxType import com.typesafe.config.Config -import akka.util.NonFatal import akka.ConfigurationException import akka.actor.ActorSystem +import akka.dispatch._ +import akka.util.{ Duration, NonFatal } +import akka.pattern.{ CircuitBreakerOpenException, CircuitBreaker } class FileBasedMailboxType(systemSettings: ActorSystem.Settings, config: Config) extends MailboxType { private val settings = new FileBasedMailboxSettings(systemSettings, config) @@ -26,6 +26,8 @@ class FileBasedMessageQueue(_owner: ActorContext, val settings: FileBasedMailbox // TODO Is it reasonable for all FileBasedMailboxes to have their own logger? private val log = Logging(system, "FileBasedMessageQueue") + val breaker = CircuitBreaker(_owner.system.scheduler, settings.CircuitBreakerMaxFailures, settings.CircuitBreakerCallTimeout, settings.CircuitBreakerResetTimeout) + private val queue = try { (new java.io.File(settings.QueuePath)) match { case dir if dir.exists && !dir.isDirectory ⇒ throw new IllegalStateException("Path already occupied by non-directory " + dir) @@ -42,18 +44,28 @@ class FileBasedMessageQueue(_owner: ActorContext, val settings: FileBasedMailbox throw e } - def enqueue(receiver: ActorRef, envelope: Envelope): Unit = queue.add(serialize(envelope)) - - def dequeue(): Envelope = try { - queue.remove.map(item ⇒ { queue.confirmRemove(item.xid); deserialize(item.data) }).orNull - } catch { - case _: java.util.NoSuchElementException ⇒ null - case NonFatal(e) ⇒ - log.error(e, "Couldn't dequeue from file-based mailbox") - throw e + def enqueue(receiver: ActorRef, envelope: Envelope) { + breaker.withSyncCircuitBreaker(queue.add(serialize(envelope))) } - def numberOfMessages: Int = queue.length.toInt + def dequeue(): Envelope = { + breaker.withSyncCircuitBreaker( + try { + queue.remove.map(item ⇒ { queue.confirmRemove(item.xid); deserialize(item.data) }).orNull + } catch { + case _: java.util.NoSuchElementException ⇒ null + case e: CircuitBreakerOpenException ⇒ + log.debug(e.getMessage()) + throw e + case NonFatal(e) ⇒ + log.error(e, "Couldn't dequeue from file-based mailbox, due to [{}]", e.getMessage()) + throw e + }) + } + + def numberOfMessages: Int = { + breaker.withSyncCircuitBreaker(queue.length.toInt) + } def hasMessages: Boolean = numberOfMessages > 0 diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala index 87dc25840f..dff4021d96 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala @@ -29,4 +29,7 @@ class FileBasedMailboxSettings(val systemSettings: ActorSystem.Settings, val use val KeepJournal: Boolean = getBoolean("keep-journal") val SyncJournal: Boolean = getBoolean("sync-journal") + val CircuitBreakerMaxFailures = getInt("circuit-breaker.max-failures") + val CircuitBreakerCallTimeout = Duration.fromNanos(getNanoseconds("circuit-breaker.call-timeout")) + val CircuitBreakerResetTimeout = Duration.fromNanos(getNanoseconds("circuit-breaker.reset-timeout")) } \ No newline at end of file diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/test/scala/akka/actor/mailbox/FileBasedMailboxSpec.scala b/akka-durable-mailboxes/akka-file-mailbox/src/test/scala/akka/actor/mailbox/FileBasedMailboxSpec.scala index 6c97142068..e3ad811b52 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/test/scala/akka/actor/mailbox/FileBasedMailboxSpec.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/test/scala/akka/actor/mailbox/FileBasedMailboxSpec.scala @@ -1,7 +1,6 @@ package akka.actor.mailbox import org.apache.commons.io.FileUtils -import com.typesafe.config.ConfigFactory import akka.dispatch.Mailbox object FileBasedMailboxSpec { @@ -10,23 +9,32 @@ object FileBasedMailboxSpec { mailbox-type = akka.actor.mailbox.FileBasedMailboxType throughput = 1 file-based.directory-path = "file-based" + file-based.circuit-breaker.max-failures = 5 + file-based.circuit-breaker.call-timeout = 5 seconds } - """ + """ } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class FileBasedMailboxSpec extends DurableMailboxSpec("File", FileBasedMailboxSpec.config) { - val queuePath = new FileBasedMailboxSettings(system.settings, system.settings.config.getConfig("File-dispatcher")).QueuePath + val settings = new FileBasedMailboxSettings(system.settings, system.settings.config.getConfig("File-dispatcher")) "FileBasedMailboxSettings" must { "read the file-based section" in { - queuePath must be("file-based") + settings.QueuePath must be("file-based") + settings.CircuitBreakerMaxFailures must be(5) + + import akka.util.duration._ + + settings.CircuitBreakerCallTimeout must be(5 seconds) } } + def isDurableMailbox(m: Mailbox): Boolean = m.messageQueue.isInstanceOf[FileBasedMessageQueue] + def clean() { - FileUtils.deleteDirectory(new java.io.File(queuePath)) + FileUtils.deleteDirectory(new java.io.File(settings.QueuePath)) } override def atStartup() { From aef05497f57736caaabe2f1ded638ff3fef2b5b1 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 1 Jun 2012 14:49:12 +0200 Subject: [PATCH 250/538] Adding some error ouput for violated invariants in ASkSupport --- .../scala/akka/actor/ActorRefProvider.scala | 2 -- .../main/scala/akka/pattern/AskSupport.scala | 27 ++++++++++--------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 3d9563b987..eede9e1bef 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -502,8 +502,6 @@ class LocalActorRefProvider( def init(_system: ActorSystemImpl) { system = _system // chain death watchers so that killing guardian stops the application - //guardian.sendSystemMessage(Watch(systemGuardian, guardian)) - //rootGuardian.sendSystemMessage(Watch(rootGuardian, systemGuardian)) guardian.sendSystemMessage(Watch(guardian, systemGuardian)) rootGuardian.sendSystemMessage(Watch(systemGuardian, rootGuardian)) eventStream.startDefaultLoggers(_system) diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 3805521ae4..42154ff522 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -246,22 +246,25 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide override def !(message: Any)(implicit sender: ActorRef = null): Unit = state match { case Stopped | _: StoppedWithPath ⇒ provider.deadLetters ! message - case _ ⇒ - val completedJustNow = result.tryComplete { - message match { - case Status.Success(r) ⇒ Right(r) - case Status.Failure(f) ⇒ Left(f) - case other ⇒ Right(other) - } + case _ ⇒ if (!(result.tryComplete { + message match { + case Status.Success(r) ⇒ Right(r) + case Status.Failure(f) ⇒ Left(f) + case other ⇒ Right(other) } - if (!completedJustNow) provider.deadLetters ! message + })) provider.deadLetters ! message } override def sendSystemMessage(message: SystemMessage): Unit = message match { - case _: Terminate ⇒ stop() - case Watch(watchee, watcher) ⇒ if (watchee == this && watcher != this && !addWatcher(watcher)) watcher ! Terminated(watchee)(stopped = true) - case Unwatch(watchee, watcher) ⇒ if (watchee == this && watcher != this) remWatcher(watcher) - case _ ⇒ + case _: Terminate ⇒ stop() + case Watch(watchee, watcher) ⇒ + if (watchee == this && watcher != this) { + if (!addWatcher(watcher)) watcher ! Terminated(watchee)(stopped = true) + } else System.err.println("BUG: illegal Watch(%s,%s) for %s".format(watchee, watcher, this)) + case Unwatch(watchee, watcher) ⇒ + if (watchee == this && watcher != this) remWatcher(watcher) + else System.err.println("BUG: illegal Unwatch(%s,%s) for %s".format(watchee, watcher, this)) + case _ ⇒ } override def isTerminated: Boolean = state match { From 33f14f9bf691ccff6ec5d38378e1e55550059e67 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 1 Jun 2012 15:15:53 +0200 Subject: [PATCH 251/538] Test gossip convergence, see #2164 --- .../src/main/scala/akka/cluster/Cluster.scala | 1 - .../scala/akka/cluster/ConvergenceSpec.scala | 131 ++++++++++++++++++ .../akka/cluster/MultiNodeClusterSpec.scala | 11 ++ 3 files changed, 142 insertions(+), 1 deletion(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 98d0a3f11e..3729a0b3b4 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -865,7 +865,6 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val localGossip = localState.latestGossip val localOverview = localGossip.overview - val localSeen = localOverview.seen val localMembers = localGossip.members val localUnreachableMembers = localGossip.overview.unreachable diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala new file mode 100644 index 0000000000..eeb9b864ed --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -0,0 +1,131 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ +import akka.actor.Address + +object ConvergenceMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + + commonConfig(debugConfig(on = false). + withFallback(ConfigFactory.parseString(""" + akka.cluster { + failure-detector.threshold = 4 + } + """)). + withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class ConvergenceMultiJvmNode1 extends ConvergenceSpec +class ConvergenceMultiJvmNode2 extends ConvergenceSpec +class ConvergenceMultiJvmNode3 extends ConvergenceSpec +class ConvergenceMultiJvmNode4 extends ConvergenceSpec + +abstract class ConvergenceSpec + extends MultiNodeSpec(ConvergenceMultiJvmSpec) + with MultiNodeClusterSpec with BeforeAndAfter { + import ConvergenceMultiJvmSpec._ + + override def initialParticipants = 4 + + after { + testConductor.enter("after") + } + + "A cluster of 3 members" must { + + "reach initial convergence" taggedAs LongRunningTest in { + runOn(first) { + cluster.self + awaitUpConvergence(numberOfMembers = 3) + } + + runOn(second, third) { + cluster.join(node(first).address) + awaitUpConvergence(numberOfMembers = 3) + } + + runOn(fourth) { + // doesn't join immediately + } + } + + "not reach convergence while any nodes are unreachable" taggedAs LongRunningTest in { + val thirdAddress = node(third).address + testConductor.enter("before-shutdown") + + runOn(first) { + // kill 'third' node + testConductor.shutdown(third, 0) + testConductor.removeNode(third) + } + + runOn(first, second) { + val firstAddress = node(first).address + val secondAddress = node(second).address + + within(30 seconds) { + // third becomes unreachable + awaitCond(cluster.latestGossip.overview.unreachable.size == 1) + awaitCond(cluster.latestGossip.members.size == 2) + awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up)) + awaitSeenSameState(Seq(firstAddress, secondAddress)) + // still one unreachable + cluster.latestGossip.overview.unreachable.size must be(1) + cluster.latestGossip.overview.unreachable.head.address must be(thirdAddress) + // and therefore no convergence + cluster.convergence.isDefined must be(false) + + } + } + + } + + "not move a new joining node to Up while there is no convergence" taggedAs LongRunningTest in { + runOn(fourth) { + // try to join + cluster.join(node(first).address) + } + + val firstAddress = node(first).address + val secondAddress = node(second).address + val fourthAddress = node(fourth).address + + def memberStatus(address: Address): Option[MemberStatus] = + cluster.latestGossip.members.collectFirst { case m if m.address == address ⇒ m.status } + + def assertNotMovedUp: Unit = { + within(20 seconds) { + awaitCond(cluster.latestGossip.members.size == 3) + awaitSeenSameState(Seq(firstAddress, secondAddress, fourthAddress)) + memberStatus(firstAddress) must be(Some(MemberStatus.Up)) + memberStatus(secondAddress) must be(Some(MemberStatus.Up)) + // leader is not allowed to move the new node to Up + memberStatus(fourthAddress) must be(Some(MemberStatus.Joining)) + // still no convergence + cluster.convergence.isDefined must be(false) + } + } + + runOn(first, second, fourth) { + for (n ← 1 to 5) { + log.debug("assertNotMovedUp#" + n) + assertNotMovedUp + // wait and then check again + 1.second.dilated.sleep + } + } + + } + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index cb679c12b7..4c0232cf9e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -71,6 +71,17 @@ trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ } } + /** + * Wait until the specified nodes have seen the same gossip overview. + */ + def awaitSeenSameState(addresses: Seq[Address]): Unit = { + awaitCond { + val seen = cluster.latestGossip.overview.seen + val seenVectorClocks = addresses.flatMap(seen.get(_)) + seenVectorClocks.size == addresses.size && seenVectorClocks.toSet.size == 1 + } + } + def roleOfLeader(nodesInCluster: Seq[RoleName]): RoleName = { nodesInCluster.length must not be (0) nodesInCluster.sorted.head From 0449f85a8696c2ef4a20977801e59f162d12706d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 1 Jun 2012 16:49:50 +0200 Subject: [PATCH 252/538] Added implementation of the LEAVE command for a cluster node. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Added implementation of the LEAVE command for a cluster node * Changed the meaning of Member.isUnavailable to only DOWN and REMOVED * Removed EXIT and UP as user commands * Fixed Cluster.self to fall back to checking for itself in the gossip.overview.unreachable set. * Added Leader action transitioning from LEAVING -> EXITING * Improved comments Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/cluster/Cluster.scala | 126 ++++++++++++------ .../akka/cluster/JoinTwoClustersSpec.scala | 1 - akka-docs/cluster/cluster.rst | 3 +- 3 files changed, 88 insertions(+), 42 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 98d0a3f11e..c5ad773989 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -58,11 +58,6 @@ object ClusterAction { */ case class Join(address: Address) extends ClusterMessage - /** - * Command to set a node to Up (from Joining). - */ - case class Up(address: Address) extends ClusterMessage - /** * Command to leave the cluster. */ @@ -73,15 +68,16 @@ object ClusterAction { */ case class Down(address: Address) extends ClusterMessage - /** - * Command to mark a node to be removed from the cluster immediately. - */ - case class Exit(address: Address) extends ClusterMessage - /** * Command to remove a node from the cluster immediately. */ case class Remove(address: Address) extends ClusterMessage + + /** + * Command to mark a node to be removed from the cluster immediately. + * Can only be sent by the leader. + */ + private[akka] case class Exit(address: Address) extends ClusterMessage } /** @@ -158,12 +154,10 @@ object MemberStatus { case object Down extends MemberStatus case object Removed extends MemberStatus - def isUnavailable(status: MemberStatus): Boolean = { - status == MemberStatus.Down || - status == MemberStatus.Exiting || - status == MemberStatus.Removed || - status == MemberStatus.Leaving - } + /** + * Using the same notion for 'unavailable' as 'non-convergence': DOWN and REMOVED. + */ + def isUnavailable(status: MemberStatus): Boolean = status == MemberStatus.Down || status == MemberStatus.Removed } /** @@ -266,7 +260,6 @@ final class ClusterCommandDaemon extends Actor { def receive = { case Join(address) ⇒ cluster.joining(address) - case Up(address) ⇒ cluster.up(address) case Down(address) ⇒ cluster.downing(address) case Leave(address) ⇒ cluster.leaving(address) case Exit(address) ⇒ cluster.exiting(address) @@ -453,9 +446,16 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // ===================== PUBLIC API ===================== // ====================================================== - def self: Member = latestGossip.members - .find(_.address == selfAddress) - .getOrElse(throw new IllegalStateException("Can't find 'this' Member (" + selfAddress + ") in the cluster membership ring")) + def self: Member = { + val gossip = latestGossip + gossip.members + .find(_.address == selfAddress) + .getOrElse { + gossip.overview.unreachable + .find(_.address == selfAddress) + .getOrElse(throw new IllegalStateException("Can't find 'this' Member [" + selfAddress + "] in the cluster membership ring or in the unreachable set")) + } + } /** * Latest gossip. @@ -609,18 +609,32 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ } } - /** - * State transition to UP. - */ - private[cluster] final def up(address: Address): Unit = { - log.info("Cluster Node [{}] - Marking node [{}] as UP", selfAddress, address) - } - /** * State transition to LEAVING. */ - private[cluster] final def leaving(address: Address): Unit = { - log.info("Cluster Node [{}] - Marking node [{}] as LEAVING", selfAddress, address) + @tailrec + private[cluster] final def leaving(address: Address) { + log.info("Cluster Node [{}] - Marking address [{}] as LEAVING", selfAddress, address) + + val localState = state.get + val localGossip = localState.latestGossip + val localMembers = localGossip.members + + val newMembers = localMembers + Member(address, MemberStatus.Leaving) // mark node as LEAVING + val newGossip = localGossip copy (members = newMembers) + + val versionedGossip = newGossip + vclockNode + val seenVersionedGossip = versionedGossip seen selfAddress + + val newState = localState copy (latestGossip = seenVersionedGossip) + + if (!state.compareAndSet(localState, newState)) leaving(address) // recur if we failed update + else { + failureDetector heartbeat address // update heartbeat in failure detector + if (convergence(newState.latestGossip).isDefined) { + newState.memberMembershipChangeListeners foreach { _ notify newMembers } + } + } } /** @@ -909,6 +923,11 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val isLeader = localMembers.nonEmpty && (selfAddress == localMembers.head.address) + // FIXME implement partion handoff and a check if it is completed - now just returns TRUE - e.g. has completed successfully + def hasPartionHandoffCompletedSuccessfully(gossip: Gossip): Boolean = { + true + } + if (isLeader && isAvailable(localState)) { // only run the leader actions if we are the LEADER and available @@ -917,11 +936,12 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val localUnreachableMembers = localOverview.unreachable // Leader actions are as follows: - // 1. Move JOINING => UP - // 2. Move EXITING => REMOVED - // 3. Move UNREACHABLE => DOWN (auto-downing by leader) - // 4. Updating the vclock version for the changes - // 5. Updating the 'seen' table + // 1. Move JOINING => UP -- When a node joins the cluster + // 2. Move EXITING => REMOVED -- When all nodes have seen that the node is EXITING (convergence) + // 3. Move LEAVING => EXITING -- When all partition handoff has completed + // 4. Move UNREACHABLE => DOWN -- When the node is in the UNREACHABLE set it can be auto-down by leader + // 5. Updating the vclock version for the changes + // 6. Updating the 'seen' table var hasChangedState = false val newGossip = @@ -930,20 +950,37 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // we have convergence - so we can't have unreachable nodes val newMembers = + localMembers map { member ⇒ - // 1. Move JOINING => UP + // ---------------------- + // 1. Move JOINING => UP (once all nodes have seen that this node is JOINING e.g. we have a convergence) + // ---------------------- if (member.status == MemberStatus.Joining) { log.info("Cluster Node [{}] - Leader is moving node [{}] from JOINING to UP", selfAddress, member.address) hasChangedState = true member copy (status = MemberStatus.Up) } else member + } map { member ⇒ - // 2. Move EXITING => REMOVED + // ---------------------- + // 2. Move EXITING => REMOVED (once all nodes have seen that this node is EXITING e.g. we have a convergence) + // ---------------------- if (member.status == MemberStatus.Exiting) { log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED", selfAddress, member.address) hasChangedState = true member copy (status = MemberStatus.Removed) } else member + + } map { member ⇒ + // ---------------------- + // 3. Move LEAVING => EXITING (once we have a convergence on LEAVING *and* if we have a successful partition handoff) + // ---------------------- + if (member.status == MemberStatus.Leaving && hasPartionHandoffCompletedSuccessfully(localGossip)) { + log.info("Cluster Node [{}] - Leader is moving node [{}] from LEAVING to EXITING", selfAddress, member.address) + hasChangedState = true + member copy (status = MemberStatus.Exiting) + } else member + } localGossip copy (members = newMembers) // update gossip @@ -951,7 +988,9 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // we don't have convergence - so we might have unreachable nodes // if 'auto-down' is turned on, then try to auto-down any unreachable nodes - // 3. Move UNREACHABLE => DOWN (auto-downing by leader) + // ---------------------- + // 4. Move UNREACHABLE => DOWN (auto-downing by leader) + // ---------------------- val newUnreachableMembers = localUnreachableMembers .filter(_.status != MemberStatus.Down) // no need to DOWN members already DOWN @@ -971,10 +1010,14 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ if (hasChangedState) { // we have a change of state - version it and try to update - // 4. Updating the vclock version for the changes + // ---------------------- + // 5. Updating the vclock version for the changes + // ---------------------- val versionedGossip = newGossip + vclockNode - // 5. Updating the 'seen' table + // ---------------------- + // 6. Updating the 'seen' table + // ---------------------- val seenVersionedGossip = versionedGossip seen selfAddress val newState = localState copy (latestGossip = seenVersionedGossip) @@ -1005,7 +1048,10 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // 2. all unreachable members in the set have status DOWN // Else we can't continue to check for convergence // When that is done we check that all the entries in the 'seen' table have the same vector clock version - if (unreachable.isEmpty || !unreachable.exists(m ⇒ (m.status != MemberStatus.Down) && (m.status != MemberStatus.Removed))) { + if (unreachable.isEmpty || !unreachable.exists { m ⇒ + m.status != MemberStatus.Down && + m.status != MemberStatus.Removed + }) { val seen = gossip.overview.seen val views = Set.empty[VectorClock] ++ seen.values diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index c736018806..9f1395b5dd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -18,7 +18,6 @@ object JoinTwoClustersMultiJvmSpec extends MultiNodeConfig { val c2 = role("c2") commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) - } class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec diff --git a/akka-docs/cluster/cluster.rst b/akka-docs/cluster/cluster.rst index a0aca11114..fb53f13131 100644 --- a/akka-docs/cluster/cluster.rst +++ b/akka-docs/cluster/cluster.rst @@ -5,7 +5,8 @@ Cluster Specification ###################### -.. note:: *This document describes the new clustering coming in Akka 2.1 (not 2.0)* +.. note:: *This document describes the new clustering coming in Akka Coltrane and +is not available in the latest stable release)* Intro ===== From de8bb7d96f07b4e191faf2c70a285f22df43deac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 1 Jun 2012 16:50:34 +0200 Subject: [PATCH 253/538] Added test for testing cluster node transitioning from UP -> LEAVING. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../scala/akka/cluster/NodeLeaving.scala | 71 +++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala new file mode 100644 index 0000000000..a6ddccb806 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala @@ -0,0 +1,71 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import scala.collection.immutable.SortedSet +import org.scalatest.BeforeAndAfter +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ + +object NodeLeavingMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" + akka.cluster.unreachable-nodes-reaper-frequency = 30000 # turn "off" reaping to unreachable node set + """)) + .withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class NodeLeavingMultiJvmNode1 extends NodeLeavingSpec +class NodeLeavingMultiJvmNode2 extends NodeLeavingSpec +class NodeLeavingMultiJvmNode3 extends NodeLeavingSpec + +abstract class NodeLeavingSpec extends MultiNodeSpec(NodeLeavingMultiJvmSpec) + with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + import NodeLeavingMultiJvmSpec._ + + override def initialParticipants = 3 + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + lazy val thirdAddress = node(third).address + + "A node that is LEAVING a non-singleton cluster" must { + + "be marked as LEAVING in the converged membership table" taggedAs LongRunningTest in { + + runOn(first) { + cluster.self + } + testConductor.enter("first-started") + + runOn(second, third) { + cluster.join(firstAddress) + } + awaitUpConvergence(numberOfMembers = 3) + testConductor.enter("rest-started") + + runOn(first) { + cluster.leave(secondAddress) + } + testConductor.enter("second-left") + + runOn(first, third) { + awaitCond(cluster.latestGossip.members.exists(_.status == MemberStatus.Leaving)) + + val hasLeft = cluster.latestGossip.members.find(_.status == MemberStatus.Leaving) + hasLeft must be('defined) + hasLeft.get.address must be(secondAddress) + } + + testConductor.enter("finished") + } + } +} From 3ac39cce95b39279728282cfde36945b2cc36235 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 1 Jun 2012 16:50:56 +0200 Subject: [PATCH 254/538] Added test for testing cluster node transitioning from UP -> LEAVING -> EXITING. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../akka/cluster/NodeLeavingAndExiting.scala | 82 +++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala new file mode 100644 index 0000000000..8a1815a5dd --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala @@ -0,0 +1,82 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import scala.collection.immutable.SortedSet +import org.scalatest.BeforeAndAfter +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object NodeLeavingAndExitingMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" + akka.cluster { + leader-actions-frequency = 5000 ms # increase the leader action task frequency + unreachable-nodes-reaper-frequency = 30000 ms # turn "off" reaping to unreachable node set + } + """) + .withFallback(MultiNodeClusterSpec.clusterConfig))) +} + +class NodeLeavingAndExitingMultiJvmNode1 extends NodeLeavingAndExitingSpec +class NodeLeavingAndExitingMultiJvmNode2 extends NodeLeavingAndExitingSpec +class NodeLeavingAndExitingMultiJvmNode3 extends NodeLeavingAndExitingSpec + +abstract class NodeLeavingAndExitingSpec extends MultiNodeSpec(NodeLeavingAndExitingMultiJvmSpec) + with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + import NodeLeavingAndExitingMultiJvmSpec._ + + override def initialParticipants = 3 + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + lazy val thirdAddress = node(third).address + + "A node that is LEAVING a non-singleton cluster" must { + + "be moved to EXITING by the leader" taggedAs LongRunningTest in { + + runOn(first) { + cluster.self + } + testConductor.enter("first-started") + + runOn(second, third) { + cluster.join(firstAddress) + } + awaitUpConvergence(numberOfMembers = 3) + testConductor.enter("rest-started") + + runOn(first) { + cluster.leave(secondAddress) + } + testConductor.enter("second-left") + + runOn(first, third) { + + // 1. Verify that 'second' node is set to LEAVING + awaitCond(cluster.latestGossip.members.exists(_.status == MemberStatus.Leaving)) // wait on LEAVING + val hasLeft = cluster.latestGossip.members.find(_.status == MemberStatus.Leaving) // verify node that left + hasLeft must be('defined) + hasLeft.get.address must be(secondAddress) + + // 2. Verify that 'second' node is set to EXITING + awaitCond(cluster.latestGossip.members.exists(_.status == MemberStatus.Exiting)) // wait on EXITING + val hasExited = cluster.latestGossip.members.find(_.status == MemberStatus.Exiting) // verify node that exited + hasExited must be('defined) + hasExited.get.address must be(secondAddress) + } + + testConductor.enter("finished") + } + } +} From a3d48339ca4b2c62196fe40b9b1c8b286cfa3447 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 1 Jun 2012 16:51:18 +0200 Subject: [PATCH 255/538] Added test for testing cluster node transitioning from UP -> LEAVING -> EXITING -> REMOVED. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- ...NodeLeavingAndExitingAndBeingRemoved.scala | 74 +++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala new file mode 100644 index 0000000000..76856df236 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala @@ -0,0 +1,74 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import scala.collection.immutable.SortedSet +import org.scalatest.BeforeAndAfter +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode1 extends NodeLeavingAndExitingAndBeingRemovedSpec +class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode2 extends NodeLeavingAndExitingAndBeingRemovedSpec +class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode3 extends NodeLeavingAndExitingAndBeingRemovedSpec + +abstract class NodeLeavingAndExitingAndBeingRemovedSpec extends MultiNodeSpec(NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec) + with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + import NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec._ + + override def initialParticipants = 3 + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + lazy val thirdAddress = node(third).address + + val reaperWaitingTime = 30 seconds + + "A node that is LEAVING a non-singleton cluster" must { + + "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest in { + + runOn(first) { + cluster.self + } + testConductor.enter("first-started") + + runOn(second, third) { + cluster.join(firstAddress) + } + awaitUpConvergence(numberOfMembers = 3) + testConductor.enter("rest-started") + + runOn(first) { + cluster.leave(secondAddress) + } + testConductor.enter("second-left") + + runOn(first, third) { + // verify that the 'second' node is no longer part of the 'members' set + awaitCond(cluster.latestGossip.members.forall(_.address != secondAddress), reaperWaitingTime) + + // verify that the 'second' node is part of the 'unreachable' set + awaitCond(cluster.latestGossip.overview.unreachable.exists(_.status == MemberStatus.Removed), reaperWaitingTime) + + // verify node that got removed is 'second' node + val isRemoved = cluster.latestGossip.overview.unreachable.find(_.status == MemberStatus.Removed) + isRemoved must be('defined) + isRemoved.get.address must be(secondAddress) + } + + testConductor.enter("finished") + } + } +} From 505e0717b910ded32a9d4b2e2f8ff7839731f6ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 1 Jun 2012 17:25:27 +0200 Subject: [PATCH 256/538] Made the timeout for test dilated --- .../akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala index 76856df236..7c1037a624 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala @@ -33,7 +33,7 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec extends MultiNodeSpec(No lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address - val reaperWaitingTime = 30 seconds + val reaperWaitingTime = 30.seconds.dilated "A node that is LEAVING a non-singleton cluster" must { From e0fbf2f3246bf25d859557605b2366dc7a13de1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 1 Jun 2012 18:06:43 +0200 Subject: [PATCH 257/538] Renamed the NodeStartupSpec to NodeJoinAndUpSpec and added tests for both JOINING and UP. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../akka/cluster/NodeJoinAndUpSpec.scala | 76 +++++++++++++++++++ .../scala/akka/cluster/NodeStartupSpec.scala | 63 --------------- 2 files changed, 76 insertions(+), 63 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinAndUpSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinAndUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinAndUpSpec.scala new file mode 100644 index 0000000000..5415df1b4a --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinAndUpSpec.scala @@ -0,0 +1,76 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object NodeJoinAndUpMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" + akka.cluster { + gossip-frequency = 1000 ms + leader-actions-frequency = 5000 ms # increase the leader action task frequency + } + """) + .withFallback(MultiNodeClusterSpec.clusterConfig))) +} + +class NodeJoinAndUpMultiJvmNode1 extends NodeJoinAndUpSpec +class NodeJoinAndUpMultiJvmNode2 extends NodeJoinAndUpSpec + +abstract class NodeJoinAndUpSpec + extends MultiNodeSpec(NodeJoinAndUpMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender + with BeforeAndAfter { + + import NodeJoinAndUpMultiJvmSpec._ + + override def initialParticipants = 2 + + after { + testConductor.enter("after") + } + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + + "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { + + "be a singleton cluster when started up" taggedAs LongRunningTest in { + runOn(first) { + awaitCond(cluster.isSingletonCluster) + awaitUpConvergence(numberOfMembers = 1) + cluster.isLeader must be(true) + } + } + } + + "A second cluster node" must { + "join the cluster as JOINING - when sending a 'Join' command - and then be moved to UP by the leader" taggedAs LongRunningTest in { + + runOn(second) { + cluster.join(firstAddress) + } + + awaitCond(cluster.latestGossip.members.exists { member ⇒ member.address == secondAddress && member.status == MemberStatus.Joining }) + + awaitCond( + cluster.latestGossip.members.exists { member ⇒ member.address == secondAddress && member.status == MemberStatus.Up }, + 30.seconds.dilated) // waiting for the leader to move from JOINING -> UP (frequency set to 5 sec in config) + + cluster.latestGossip.members.size must be(2) + awaitCond(cluster.convergence.isDefined) + } + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala deleted file mode 100644 index 44682b81f7..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ - -object NodeStartupMultiJvmSpec extends MultiNodeConfig { - val first = role("first") - val second = role("second") - - commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) - -} - -class NodeStartupMultiJvmNode1 extends NodeStartupSpec -class NodeStartupMultiJvmNode2 extends NodeStartupSpec - -abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { - import NodeStartupMultiJvmSpec._ - - override def initialParticipants = 2 - - after { - testConductor.enter("after") - } - - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - - "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { - - "be a singleton cluster when started up" taggedAs LongRunningTest in { - runOn(first) { - awaitCond(cluster.isSingletonCluster) - awaitUpConvergence(numberOfMembers = 1) - cluster.isLeader must be(true) - } - } - } - - "A second cluster node" must { - "join the other node cluster when sending a Join command" taggedAs LongRunningTest in { - - runOn(second) { - cluster.join(firstAddress) - } - - awaitCond { - cluster.latestGossip.members.exists { member ⇒ - member.address == secondAddress && member.status == MemberStatus.Up - } - } - cluster.latestGossip.members.size must be(2) - awaitCond(cluster.convergence.isDefined) - } - } - -} From 12b9af25cfbb1c65ccf2b678afa5942e049e4c8b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 1 Jun 2012 21:29:47 +0200 Subject: [PATCH 258/538] #2168 - Exposing more Netty options in remtoe config --- .../akka/pattern/CircuitBreakerMTSpec.scala | 72 +++++++++---------- .../scala/akka/pattern/CircuitBreaker.scala | 2 +- .../CircuitBreakerDocSpec.scala | 14 ++-- .../actor/mailbox/DurableMailboxDocSpec.scala | 6 +- akka-remote/src/main/resources/reference.conf | 9 +++ .../main/scala/akka/remote/netty/Client.scala | 6 ++ .../main/scala/akka/remote/netty/Server.scala | 6 ++ .../scala/akka/remote/netty/Settings.scala | 12 ++++ .../scala/akka/remote/RemoteConfigSpec.scala | 3 + 9 files changed, 83 insertions(+), 47 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala index fab1cbab7a..35f55d703d 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala @@ -6,7 +6,7 @@ package akka.pattern import akka.testkit._ import akka.util.duration._ import org.scalatest.BeforeAndAfter -import akka.dispatch.{Promise, Await, Future} +import akka.dispatch.{ Promise, Await, Future } class CircuitBreakerMTSpec extends AkkaSpec with BeforeAndAfter { @@ -17,8 +17,8 @@ class CircuitBreakerMTSpec extends AkkaSpec with BeforeAndAfter { val halfOpenLatch = new TestLatch(1) - val breaker = new CircuitBreaker(system.scheduler,5,100.millis.dilated,500.millis.dilated) - .onHalfOpen(halfOpenLatch.countDown()) + val breaker = new CircuitBreaker(system.scheduler, 5, 100.millis.dilated, 500.millis.dilated) + .onHalfOpen(halfOpenLatch.countDown()) } @@ -28,30 +28,30 @@ class CircuitBreakerMTSpec extends AkkaSpec with BeforeAndAfter { def unreliableCall(param: String) = { param match { - case "fail" => throw new RuntimeException("FAIL") - case _ => param + case "fail" ⇒ throw new RuntimeException("FAIL") + case _ ⇒ param } } def openBreaker: Unit = { - for (i <- 1 to 5) + for (i ← 1 to 5) Await.result(breakers.breaker.withCircuitBreaker(Future(unreliableCall("fail"))) recoverWith { - case _ => Promise.successful("OK") + case _ ⇒ Promise.successful("OK") }, 1.second.dilated) } "A circuit breaker being called by many threads" must { "allow many calls while in closed state with no errors" in { - val futures = for (i <- 1 to 100) yield breakers.breaker.withCircuitBreaker(Future {Thread.sleep(10); unreliableCall("succeed")}) + val futures = for (i ← 1 to 100) yield breakers.breaker.withCircuitBreaker(Future { Thread.sleep(10); unreliableCall("succeed") }) val futureList = Future.sequence(futures) val result = Await.result(futureList, 1.second.dilated) - result.size must be (100) - result.distinct.size must be (1) - result.distinct must contain ("succeed") + result.size must be(100) + result.distinct.size must be(1) + result.distinct must contain("succeed") } @@ -59,19 +59,19 @@ class CircuitBreakerMTSpec extends AkkaSpec with BeforeAndAfter { openBreaker - val futures = for (i <- 1 to 100) yield breakers.breaker.withCircuitBreaker(Future { - Thread.sleep(10); unreliableCall("success") - }) recoverWith { - case _: CircuitBreakerOpenException => Promise.successful("CBO") - } + val futures = for (i ← 1 to 100) yield breakers.breaker.withCircuitBreaker(Future { + Thread.sleep(10); unreliableCall("success") + }) recoverWith { + case _: CircuitBreakerOpenException ⇒ Promise.successful("CBO") + } val futureList = Future.sequence(futures) val result = Await.result(futureList, 1.second.dilated) - result.size must be (100) - result.distinct.size must be (1) - result.distinct must contain ("CBO") + result.size must be(100) + result.distinct.size must be(1) + result.distinct must contain("CBO") } "allow a single call through in half-open state" in { @@ -79,20 +79,20 @@ class CircuitBreakerMTSpec extends AkkaSpec with BeforeAndAfter { Await.ready(breakers.halfOpenLatch, 2.seconds.dilated) - val futures = for (i <- 1 to 100) yield breakers.breaker.withCircuitBreaker(Future { - Thread.sleep(10); unreliableCall("succeed") - }) recoverWith { - case _: CircuitBreakerOpenException => Promise.successful("CBO") - } + val futures = for (i ← 1 to 100) yield breakers.breaker.withCircuitBreaker(Future { + Thread.sleep(10); unreliableCall("succeed") + }) recoverWith { + case _: CircuitBreakerOpenException ⇒ Promise.successful("CBO") + } val futureList = Future.sequence(futures) val result = Await.result(futureList, 1.second.dilated) - result.size must be (100) - result.distinct.size must be (2) - result.distinct must contain ("succeed") - result.distinct must contain ("CBO") + result.size must be(100) + result.distinct.size must be(2) + result.distinct must contain("succeed") + result.distinct must contain("CBO") } "recover and reset the breaker after the reset timeout" in { @@ -102,19 +102,19 @@ class CircuitBreakerMTSpec extends AkkaSpec with BeforeAndAfter { Await.ready(breakers.breaker.withCircuitBreaker(Future(unreliableCall("succeed"))), 1.second.dilated) - val futures = for (i <- 1 to 100) yield breakers.breaker.withCircuitBreaker(Future { - Thread.sleep(10); unreliableCall("succeed") - }) recoverWith { - case _: CircuitBreakerOpenException => Promise.successful("CBO") - } + val futures = for (i ← 1 to 100) yield breakers.breaker.withCircuitBreaker(Future { + Thread.sleep(10); unreliableCall("succeed") + }) recoverWith { + case _: CircuitBreakerOpenException ⇒ Promise.successful("CBO") + } val futureList = Future.sequence(futures) val result = Await.result(futureList, 1.second.dilated) - result.size must be (100) - result.distinct.size must be (1) - result.distinct must contain ("succeed") + result.size must be(100) + result.distinct.size must be(1) + result.distinct must contain("succeed") } } diff --git a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala index 79eba6aa1b..ac8fd1c5ed 100644 --- a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala +++ b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala @@ -142,7 +142,7 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati catch { case NonFatal(t) ⇒ Promise.failed(t)(CircuitBreaker.syncExecutionContext) } - }),callTimeout) + }), callTimeout) } /** diff --git a/akka-docs/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala b/akka-docs/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala index bd6c1447ad..c4603017e3 100644 --- a/akka-docs/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala +++ b/akka-docs/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala @@ -5,7 +5,7 @@ package docs.circuitbreaker //#imports1 -import akka.util.duration._ // small d is important here +import akka.util.duration._ // small d is important here import akka.pattern.CircuitBreaker import akka.actor.Actor import akka.dispatch.Future @@ -13,7 +13,7 @@ import akka.event.Logging //#imports1 -class CircuitBreakerDocSpec { } +class CircuitBreakerDocSpec {} //#circuit-breaker-initialization class DangerousActor extends Actor { @@ -26,18 +26,18 @@ class DangerousActor extends Actor { def notifyMeOnOpen = log.warning("My CircuitBreaker is now open, and will not close for one minute") -//#circuit-breaker-initialization + //#circuit-breaker-initialization -//#circuit-breaker-usage + //#circuit-breaker-usage def dangerousCall: String = "This really isn't that dangerous of a call after all" def receive = { - case "is my middle name" => + case "is my middle name" ⇒ sender ! breaker.withCircuitBreaker(Future(dangerousCall)) - case "block for me" => + case "block for me" ⇒ sender ! breaker.withSyncCircuitBreaker(dangerousCall) } -//#circuit-breaker-usage + //#circuit-breaker-usage } diff --git a/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala index 54349f73e0..b51c7bb170 100644 --- a/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala +++ b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala @@ -69,21 +69,21 @@ class MyMessageQueue(_owner: ActorContext) val storage = new QueueStorage // A real-world implmentation would use configuration to set the last // three parameters below - val breaker = CircuitBreaker(_owner.system.scheduler,5,30.seconds,1.minute) + val breaker = CircuitBreaker(_owner.system.scheduler, 5, 30.seconds, 1.minute) def enqueue(receiver: ActorRef, envelope: Envelope): Unit = breaker.withSyncCircuitBreaker { val data: Array[Byte] = serialize(envelope) storage.push(data) } - def dequeue(): Envelope = breaker.withSyncCircuitBreaker { + def dequeue(): Envelope = breaker.withSyncCircuitBreaker { val data: Option[Array[Byte]] = storage.pull() data.map(deserialize).orNull } def hasMessages: Boolean = breaker.withSyncCircuitBreaker { !storage.isEmpty } - def numberOfMessages: Int = breaker.withSyncCircuitBreaker { storage.size } + def numberOfMessages: Int = breaker.withSyncCircuitBreaker { storage.size } /** * Called when the mailbox is disposed. diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 97b85895ed..1ed3a274e9 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -133,6 +133,15 @@ akka { # (I) Maximum total size of all channels, 0 for off max-total-memory-size = 0b + # (I&O) Sets the high water mark for the in and outbound sockets, set to 0b for platform default + write-buffer-high-water-mark = 0b + + # (I&O) Sets the send buffer size of the Sockets, set to 0b for platform default + send-buffer-size = 0b + + # (I&O) Sets the receive buffer size of the Sockets, set to 0b for platform default + receive-buffer-size = 0b + # (O) Time between reconnect attempts for active clients reconnect-delay = 5s diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index c1737831da..cbd49fc202 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -147,6 +147,12 @@ private[akka] class ActiveRemoteClient private[akka] ( b.setOption("tcpNoDelay", true) b.setOption("keepAlive", true) b.setOption("connectTimeoutMillis", settings.ConnectionTimeout.toMillis) + if (settings.ReceiveBufferSize.isDefined) + b.setOption("receiveBufferSize", settings.ReceiveBufferSize.get) + if (settings.SendBufferSize.isDefined) + b.setOption("sendBufferSize", settings.SendBufferSize.get) + if (settings.WriteBufferHighWaterMark.isDefined) + b.setOption("writeBufferHighWaterMark", settings.WriteBufferHighWaterMark.get) settings.OutboundLocalAddress.foreach(s ⇒ b.setOption("localAddress", new InetSocketAddress(s, 0))) bootstrap = b diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index cc3310fada..170f7ebae1 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -45,6 +45,12 @@ private[akka] class NettyRemoteServer(val netty: NettyRemoteTransport) { b.setOption("tcpNoDelay", true) b.setOption("child.keepAlive", true) b.setOption("reuseAddress", true) + if (settings.ReceiveBufferSize.isDefined) + b.setOption("receiveBufferSize", settings.ReceiveBufferSize.get) + if (settings.SendBufferSize.isDefined) + b.setOption("sendBufferSize", settings.SendBufferSize.get) + if (settings.WriteBufferHighWaterMark.isDefined) + b.setOption("writeBufferHighWaterMark", settings.WriteBufferHighWaterMark.get) b } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index 64bc184408..a78703ebf5 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -37,8 +37,20 @@ private[akka] class NettySettings(config: Config, val systemName: String) { val WriteTimeout: Duration = Duration(getMilliseconds("write-timeout"), MILLISECONDS) val AllTimeout: Duration = Duration(getMilliseconds("all-timeout"), MILLISECONDS) val ReconnectDelay: Duration = Duration(getMilliseconds("reconnect-delay"), MILLISECONDS) + val MessageFrameSize: Int = getBytes("message-frame-size").toInt + private[this] def optionSize(s: String): Option[Int] = getBytes(s).toInt match { + case 0 ⇒ None + case x if x < 0 ⇒ + throw new ConfigurationException("Setting '%s' must be 0 or positive (and fit in an Int)" format s) + case other ⇒ Some(other) + } + + val WriteBufferHighWaterMark: Option[Int] = optionSize("write-buffer-high-water-mark") + val SendBufferSize: Option[Int] = optionSize("send-buffer-size") + val ReceiveBufferSize: Option[Int] = optionSize("receive-buffer-size") + val Hostname: String = getString("hostname") match { case "" ⇒ InetAddress.getLocalHost.getHostAddress case value ⇒ value diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala index f1809d42a5..5a9e79d67f 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala @@ -56,6 +56,9 @@ class RemoteConfigSpec extends AkkaSpec( WriteTimeout must be(10 seconds) AllTimeout must be(0 millis) ReconnectionTimeWindow must be(10 minutes) + WriteBufferHighWaterMark must be(None) + SendBufferSize must be(None) + ReceiveBufferSize must be(None) } } From 5ec38e82e5c356385760c52a686a609e5db6c704 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 1 Jun 2012 21:42:18 +0200 Subject: [PATCH 259/538] Switching over to foreach, anticipating the use of -optimize for 2.10 --- .../src/main/scala/akka/remote/netty/Client.scala | 9 +++------ .../src/main/scala/akka/remote/netty/Server.scala | 9 +++------ 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index cbd49fc202..3a7287d33a 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -147,12 +147,9 @@ private[akka] class ActiveRemoteClient private[akka] ( b.setOption("tcpNoDelay", true) b.setOption("keepAlive", true) b.setOption("connectTimeoutMillis", settings.ConnectionTimeout.toMillis) - if (settings.ReceiveBufferSize.isDefined) - b.setOption("receiveBufferSize", settings.ReceiveBufferSize.get) - if (settings.SendBufferSize.isDefined) - b.setOption("sendBufferSize", settings.SendBufferSize.get) - if (settings.WriteBufferHighWaterMark.isDefined) - b.setOption("writeBufferHighWaterMark", settings.WriteBufferHighWaterMark.get) + settings.ReceiveBufferSize.foreach(sz ⇒ b.setOption("receiveBufferSize", sz)) + settings.SendBufferSize.foreach(sz ⇒ b.setOption("sendBufferSize", sz)) + settings.WriteBufferHighWaterMark.foreach(sz ⇒ b.setOption("writeBufferHighWaterMark", sz)) settings.OutboundLocalAddress.foreach(s ⇒ b.setOption("localAddress", new InetSocketAddress(s, 0))) bootstrap = b diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index 170f7ebae1..f9cfcf2355 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -45,12 +45,9 @@ private[akka] class NettyRemoteServer(val netty: NettyRemoteTransport) { b.setOption("tcpNoDelay", true) b.setOption("child.keepAlive", true) b.setOption("reuseAddress", true) - if (settings.ReceiveBufferSize.isDefined) - b.setOption("receiveBufferSize", settings.ReceiveBufferSize.get) - if (settings.SendBufferSize.isDefined) - b.setOption("sendBufferSize", settings.SendBufferSize.get) - if (settings.WriteBufferHighWaterMark.isDefined) - b.setOption("writeBufferHighWaterMark", settings.WriteBufferHighWaterMark.get) + settings.ReceiveBufferSize.foreach(sz ⇒ b.setOption("receiveBufferSize", sz)) + settings.SendBufferSize.foreach(sz ⇒ b.setOption("sendBufferSize", sz)) + settings.WriteBufferHighWaterMark.foreach(sz ⇒ b.setOption("writeBufferHighWaterMark", sz)) b } From 0330a5fd2902d403e2db9b5e1c67f694347c307c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 1 Jun 2012 21:47:14 +0200 Subject: [PATCH 260/538] Adding the low watermark option --- akka-remote/src/main/resources/reference.conf | 3 +++ akka-remote/src/main/scala/akka/remote/netty/Client.scala | 1 + akka-remote/src/main/scala/akka/remote/netty/Server.scala | 1 + akka-remote/src/main/scala/akka/remote/netty/Settings.scala | 1 + akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala | 1 + 5 files changed, 7 insertions(+) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 1ed3a274e9..b9f57458ce 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -136,6 +136,9 @@ akka { # (I&O) Sets the high water mark for the in and outbound sockets, set to 0b for platform default write-buffer-high-water-mark = 0b + # (I&O) Sets the low water mark for the in and outbound sockets, set to 0b for platform default + write-buffer-low-water-mark = 0b + # (I&O) Sets the send buffer size of the Sockets, set to 0b for platform default send-buffer-size = 0b diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 3a7287d33a..c6d23e71f3 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -150,6 +150,7 @@ private[akka] class ActiveRemoteClient private[akka] ( settings.ReceiveBufferSize.foreach(sz ⇒ b.setOption("receiveBufferSize", sz)) settings.SendBufferSize.foreach(sz ⇒ b.setOption("sendBufferSize", sz)) settings.WriteBufferHighWaterMark.foreach(sz ⇒ b.setOption("writeBufferHighWaterMark", sz)) + settings.WriteBufferLowWaterMark.foreach(sz ⇒ b.setOption("writeBufferLowWaterMark", sz)) settings.OutboundLocalAddress.foreach(s ⇒ b.setOption("localAddress", new InetSocketAddress(s, 0))) bootstrap = b diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index f9cfcf2355..04dfbe525e 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -48,6 +48,7 @@ private[akka] class NettyRemoteServer(val netty: NettyRemoteTransport) { settings.ReceiveBufferSize.foreach(sz ⇒ b.setOption("receiveBufferSize", sz)) settings.SendBufferSize.foreach(sz ⇒ b.setOption("sendBufferSize", sz)) settings.WriteBufferHighWaterMark.foreach(sz ⇒ b.setOption("writeBufferHighWaterMark", sz)) + settings.WriteBufferLowWaterMark.foreach(sz ⇒ b.setOption("writeBufferLowWaterMark", sz)) b } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index a78703ebf5..0d105eda1d 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -48,6 +48,7 @@ private[akka] class NettySettings(config: Config, val systemName: String) { } val WriteBufferHighWaterMark: Option[Int] = optionSize("write-buffer-high-water-mark") + val WriteBufferLowWaterMark: Option[Int] = optionSize("write-buffer-low-water-mark") val SendBufferSize: Option[Int] = optionSize("send-buffer-size") val ReceiveBufferSize: Option[Int] = optionSize("receive-buffer-size") diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala index 5a9e79d67f..8ac11e2440 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala @@ -57,6 +57,7 @@ class RemoteConfigSpec extends AkkaSpec( AllTimeout must be(0 millis) ReconnectionTimeWindow must be(10 minutes) WriteBufferHighWaterMark must be(None) + WriteBufferLowWaterMark must be(None) SendBufferSize must be(None) ReceiveBufferSize must be(None) } From 17ee47079a2430c199b4154f176f22216b9fba9b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Sat, 2 Jun 2012 14:49:28 +0200 Subject: [PATCH 261/538] Incorporating Roland's feedback --- .../src/test/scala/akka/routing/RoutingSpec.scala | 4 ++-- akka-actor/src/main/scala/akka/actor/Actor.scala | 2 +- .../src/main/scala/akka/actor/ActorCell.scala | 11 ++++++----- .../src/main/scala/akka/actor/ActorRef.scala | 7 ++++--- .../main/scala/akka/actor/ActorRefProvider.scala | 12 +++++++----- .../src/main/scala/akka/actor/ActorSystem.scala | 15 +++------------ .../scala/akka/dispatch/AbstractDispatcher.scala | 4 ++-- .../src/main/scala/akka/pattern/AskSupport.scala | 6 +++--- .../scala/akka/pattern/GracefulStopSupport.scala | 11 +++++++---- .../code/docs/actor/FaultHandlingDocSpec.scala | 4 ++-- .../src/test/scala/akka/testkit/AkkaSpec.scala | 1 - 11 files changed, 37 insertions(+), 40 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 98d3e71384..f1952b8f79 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -73,7 +73,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with watch(router) watch(c2) system.stop(c2) - expectMsg(Terminated(c2)(stopped = true)) + expectMsg(Terminated(c2)(existenceConfirmed = true)) // it might take a while until the Router has actually processed the Terminated message awaitCond { router ! "" @@ -84,7 +84,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with res == Seq(c1, c1) } system.stop(c1) - expectMsg(Terminated(router)(stopped = true)) + expectMsg(Terminated(router)(existenceConfirmed = true)) } "be able to send their routees" in { diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index c8962e819f..c795534cdf 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -59,7 +59,7 @@ case object Kill extends Kill { /** * When Death Watch is used, the watcher will receive a Terminated(watched) message when watched is terminated. */ -case class Terminated(@BeanProperty actor: ActorRef)(@BeanProperty stopped: Boolean) +case class Terminated(@BeanProperty actor: ActorRef)(@BeanProperty existenceConfirmed: Boolean) abstract class ReceiveTimeout extends PossiblyHarmful diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 3db70d5735..736e004c6e 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -724,19 +724,20 @@ private[akka] class ActorCell( parent.sendSystemMessage(ChildTerminated(self)) if (!watchedBy.isEmpty) { - val terminated = Terminated(self)(stopped = true) + val terminated = Terminated(self)(existenceConfirmed = true) try { watchedBy foreach { - watcher ⇒ try watcher.tell(terminated, self) catch { - case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) - } + watcher ⇒ + try watcher.tell(terminated, self) catch { + case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) + } } } finally watchedBy = emptyActorRefSet } if (!watching.isEmpty) { try { - watching foreach { + watching foreach { // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ case watchee: InternalActorRef ⇒ try watchee.sendSystemMessage(Unwatch(watchee, self)) catch { case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index a713a61ddc..7368ae434a 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -424,10 +424,11 @@ private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider, protected def specialHandle(msg: Any): Boolean = msg match { case w: Watch ⇒ - if (w.watchee == this && w.watcher != this) w.watcher ! Terminated(w.watchee)(stopped = false) + if (w.watchee == this && w.watcher != this) + w.watcher ! Terminated(w.watchee)(existenceConfirmed = false) true - case w: Unwatch ⇒ true // Just ignore + case _: Unwatch ⇒ true // Just ignore case _ ⇒ false } } @@ -449,7 +450,7 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, override protected def specialHandle(msg: Any): Boolean = msg match { case w: Watch ⇒ - if (w.watchee != this && w.watcher != this) w.watcher ! Terminated(w.watchee)(stopped = false) + if (w.watchee != this && w.watcher != this) w.watcher ! Terminated(w.watchee)(existenceConfirmed = false) true case w: Unwatch ⇒ true // Just ignore diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index eede9e1bef..6807e34c55 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -403,8 +403,8 @@ class LocalActorRefProvider( def receive = { case Terminated(_) ⇒ context.stop(self) - case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case NonFatal(e) ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? - case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case NonFatal(e) ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? + case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case NonFatal(e) ⇒ Status.Failure(e) }) + case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case NonFatal(e) ⇒ Status.Failure(e) }) case StopChild(child) ⇒ context.stop(child); sender ! "ok" case m ⇒ deadLetters ! DeadLetter(m, sender, self) } @@ -435,8 +435,8 @@ class LocalActorRefProvider( def receive = { case Terminated(_) ⇒ eventStream.stopDefaultLoggers(); context.stop(self) - case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case e: Exception ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? - case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case e: Exception ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? + case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case NonFatal(e) ⇒ Status.Failure(e) }) + case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case NonFatal(e) ⇒ Status.Failure(e) }) case StopChild(child) ⇒ context.stop(child); sender ! "ok" case m ⇒ deadLetters ! DeadLetter(m, sender, self) } @@ -502,8 +502,10 @@ class LocalActorRefProvider( def init(_system: ActorSystemImpl) { system = _system // chain death watchers so that killing guardian stops the application - guardian.sendSystemMessage(Watch(guardian, systemGuardian)) + systemGuardian.sendSystemMessage(Watch(guardian, systemGuardian)) rootGuardian.sendSystemMessage(Watch(systemGuardian, rootGuardian)) + //guardian.sendSystemMessage(Watch(guardian, systemGuardian)) + //rootGuardian.sendSystemMessage(Watch(systemGuardian, rootGuardian)) eventStream.startDefaultLoggers(_system) } diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 993e7e98e4..008610c333 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -480,26 +480,17 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, private[akka] def systemActorOf(props: Props, name: String): ActorRef = { implicit val timeout = settings.CreationTimeout - Await.result(systemGuardian ? CreateChild(props, name), timeout.duration) match { - case ref: ActorRef ⇒ ref - case ex: Exception ⇒ throw ex - } + Await.result((systemGuardian ? CreateChild(props, name)).mapTo[ActorRef], timeout.duration) } def actorOf(props: Props, name: String): ActorRef = { implicit val timeout = settings.CreationTimeout - Await.result(guardian ? CreateChild(props, name), timeout.duration) match { - case ref: ActorRef ⇒ ref - case ex: Exception ⇒ throw ex - } + Await.result((guardian ? CreateChild(props, name)).mapTo[ActorRef], timeout.duration) } def actorOf(props: Props): ActorRef = { implicit val timeout = settings.CreationTimeout - Await.result(guardian ? CreateRandomNameChild(props), timeout.duration) match { - case ref: ActorRef ⇒ ref - case ex: Exception ⇒ throw ex - } + Await.result((guardian ? CreateRandomNameChild(props)).mapTo[ActorRef], timeout.duration) } def stop(actor: ActorRef): Unit = { diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 8e160276e8..9517a59b7c 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -102,11 +102,11 @@ private[akka] case class ChildTerminated(child: ActorRef) extends SystemMessage /** * INTERNAL API */ -private[akka] case class Watch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to self from ActorCell.watch +private[akka] case class Watch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to establish a DeathWatch /** * INTERNAL API */ -private[akka] case class Unwatch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to self from ActorCell.unwatch +private[akka] case class Unwatch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to tear down a DeathWatch final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cleanup: () ⇒ Unit) extends Runnable { def run(): Unit = diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 42154ff522..c66fa4178d 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -192,7 +192,7 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide @tailrec // Returns false if the Promise is already completed private[this] final def addWatcher(watcher: ActorRef): Boolean = watchedBy match { case null ⇒ false - case other ⇒ if (updateWatchedBy(other, other + watcher)) true else addWatcher(watcher) + case other ⇒ updateWatchedBy(other, other + watcher) || addWatcher(watcher) } @tailrec @@ -259,7 +259,7 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide case _: Terminate ⇒ stop() case Watch(watchee, watcher) ⇒ if (watchee == this && watcher != this) { - if (!addWatcher(watcher)) watcher ! Terminated(watchee)(stopped = true) + if (!addWatcher(watcher)) watcher ! Terminated(watchee)(existenceConfirmed = true) } else System.err.println("BUG: illegal Watch(%s,%s) for %s".format(watchee, watcher, this)) case Unwatch(watchee, watcher) ⇒ if (watchee == this && watcher != this) remWatcher(watcher) @@ -278,7 +278,7 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide if (!result.isCompleted) result.tryComplete(Left(new ActorKilledException("Stopped"))) val watchers = clearWatchers() if (!watchers.isEmpty) { - val termination = Terminated(this)(stopped = true) + val termination = Terminated(this)(existenceConfirmed = true) watchers foreach { w ⇒ try w.tell(termination, this) catch { case NonFatal(t) ⇒ /* FIXME LOG THIS */ } } } } diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index 35004e637d..91293cb0d1 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -40,12 +40,15 @@ trait GracefulStopSupport { val internalTarget = target.asInstanceOf[InternalActorRef] val ref = PromiseActorRef(e.provider, Timeout(timeout)) internalTarget.sendSystemMessage(Watch(target, ref)) - val result = ref.result map { - case Terminated(`target`) ⇒ true - case _ ⇒ internalTarget.sendSystemMessage(Unwatch(target, ref)); false // Just making sure we're not leaking here + ref.result onComplete { // Just making sure we're not leaking here + case Right(Terminated(`target`)) ⇒ () + case _ ⇒ internalTarget.sendSystemMessage(Unwatch(target, ref)) } target ! PoisonPill - result + ref.result map { + case Terminated(`target`) ⇒ true + case _ ⇒ false + } case s ⇒ throw new IllegalArgumentException("Unknown ActorSystem implementation: '" + s + "'") } } diff --git a/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala b/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala index 4e0fdc5ee5..65e03bd2ea 100644 --- a/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala @@ -111,7 +111,7 @@ class FaultHandlingDocSpec extends AkkaSpec with ImplicitSender { //#stop watch(child) // have testActor watch “child” child ! new IllegalArgumentException // break it - expectMsg(Terminated(child)(stopped = true)) + expectMsg(Terminated(child)(existenceConfirmed = true)) child.isTerminated must be(true) //#stop } @@ -125,7 +125,7 @@ class FaultHandlingDocSpec extends AkkaSpec with ImplicitSender { expectMsg(0) child2 ! new Exception("CRASH") // escalate failure - expectMsg(Terminated(child2)(stopped = true)) + expectMsg(Terminated(child2)(existenceConfirmed = true)) //#escalate-kill //#escalate-restart val supervisor2 = system.actorOf(Props[Supervisor2], "supervisor2") diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala index d2eeeee776..5eb0c0538a 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala @@ -12,7 +12,6 @@ import akka.util.duration._ import com.typesafe.config.Config import com.typesafe.config.ConfigFactory import akka.actor.PoisonPill -import akka.actor.CreateChild import akka.actor.DeadLetter import java.util.concurrent.TimeoutException import akka.dispatch.{ Await, MessageDispatcher } From d1fb1b925259bf52024be294593bfcc75f55feb0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 2 Jun 2012 17:36:10 +0200 Subject: [PATCH 262/538] Changed name of test files to end with *Spec. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../MembershipChangeListenerSpec.scala | 83 ------------------- ...eavingAndExitingAndBeingRemovedSpec.scala} | 0 ....scala => NodeLeavingAndExitingSpec.scala} | 0 ...odeLeaving.scala => NodeLeavingSpec.scala} | 3 +- 4 files changed, 2 insertions(+), 84 deletions(-) delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala rename akka-cluster/src/multi-jvm/scala/akka/cluster/{NodeLeavingAndExitingAndBeingRemoved.scala => NodeLeavingAndExitingAndBeingRemovedSpec.scala} (100%) rename akka-cluster/src/multi-jvm/scala/akka/cluster/{NodeLeavingAndExiting.scala => NodeLeavingAndExitingSpec.scala} (100%) rename akka-cluster/src/multi-jvm/scala/akka/cluster/{NodeLeaving.scala => NodeLeavingSpec.scala} (94%) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala deleted file mode 100644 index f818c97744..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import scala.collection.immutable.SortedSet -import org.scalatest.BeforeAndAfter -import com.typesafe.config.ConfigFactory -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ - -object MembershipChangeListenerMultiJvmSpec extends MultiNodeConfig { - val first = role("first") - val second = role("second") - val third = role("third") - - commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) - -} - -class MembershipChangeListenerMultiJvmNode1 extends MembershipChangeListenerSpec -class MembershipChangeListenerMultiJvmNode2 extends MembershipChangeListenerSpec -class MembershipChangeListenerMultiJvmNode3 extends MembershipChangeListenerSpec - -abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChangeListenerMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { - import MembershipChangeListenerMultiJvmSpec._ - - override def initialParticipants = 3 - - after { - testConductor.enter("after") - } - - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - - "A set of connected cluster systems" must { - - "(when two systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { - - // make sure that the node-to-join is started before other join - runOn(first) { - cluster.self - } - testConductor.enter("first-started") - - runOn(first, second) { - cluster.join(firstAddress) - val latch = TestLatch() - cluster.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) - latch.countDown() - } - }) - latch.await - cluster.convergence.isDefined must be(true) - } - - } - - "(when three systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { - - runOn(third) { - cluster.join(firstAddress) - } - - val latch = TestLatch() - cluster.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - if (members.size == 3 && members.forall(_.status == MemberStatus.Up)) - latch.countDown() - } - }) - latch.await - cluster.convergence.isDefined must be(true) - - } - } - -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala similarity index 100% rename from akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala similarity index 100% rename from akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala similarity index 94% rename from akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index a6ddccb806..c4cf3fc12c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -18,7 +18,8 @@ object NodeLeavingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster.unreachable-nodes-reaper-frequency = 30000 # turn "off" reaping to unreachable node set + akka.cluster.leader-actions-frequency = 5000 ms + akka.cluster.unreachable-nodes-reaper-frequency = 30000 ms # turn "off" reaping to unreachable node set """)) .withFallback(MultiNodeClusterSpec.clusterConfig)) } From bcc6e4c11f4fe9fc98d9c8a6ab2893c15552aafd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 2 Jun 2012 17:37:13 +0200 Subject: [PATCH 263/538] Added test for testing that MemberChangeListener is triggered by node EXITING event. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../MembershipChangeListenerExitingSpec.scala | 76 +++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala new file mode 100644 index 0000000000..0145628bd5 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -0,0 +1,76 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import scala.collection.immutable.SortedSet +import org.scalatest.BeforeAndAfter +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object MembershipChangeListenerExitingMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" + akka.cluster { + leader-actions-frequency = 5000 ms # increase the leader action task frequency + unreachable-nodes-reaper-frequency = 30000 ms # turn "off" reaping to unreachable node set + } + """) + .withFallback(MultiNodeClusterSpec.clusterConfig))) +} + +class MembershipChangeListenerExitingMultiJvmNode1 extends MembershipChangeListenerExitingSpec +class MembershipChangeListenerExitingMultiJvmNode2 extends MembershipChangeListenerExitingSpec +class MembershipChangeListenerExitingMultiJvmNode3 extends MembershipChangeListenerExitingSpec + +abstract class MembershipChangeListenerExitingSpec extends MultiNodeSpec(MembershipChangeListenerExitingMultiJvmSpec) + with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + import MembershipChangeListenerExitingMultiJvmSpec._ + + override def initialParticipants = 3 + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + lazy val thirdAddress = node(third).address + + "A registered MembershipChangeListener" must { + "be notified when new node is EXITING" taggedAs LongRunningTest in { + + runOn(first) { + cluster.self + } + testConductor.enter("first-started") + + runOn(second, third) { + cluster.join(firstAddress) + } + awaitUpConvergence(numberOfMembers = 3) + testConductor.enter("rest-started") + + runOn(third) { + val exitingLatch = TestLatch() + cluster.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + if (members.size == 3 && members.exists(_.status == MemberStatus.Exiting)) + exitingLatch.countDown() + } + }) + exitingLatch.await + } + + runOn(first) { + cluster.leave(secondAddress) + } + + testConductor.enter("finished") + } + } +} From 5dc039b0f1b9479e674d843fa8c28a443bbc85eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 2 Jun 2012 17:37:29 +0200 Subject: [PATCH 264/538] Added test for testing that MemberChangeListener is triggered by node JOINING and UP events. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- ...embershipChangeListenerJoinAndUpSpec.scala | 85 +++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinAndUpSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinAndUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinAndUpSpec.scala new file mode 100644 index 0000000000..81e32d1491 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinAndUpSpec.scala @@ -0,0 +1,85 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import scala.collection.immutable.SortedSet +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object MembershipChangeListenerJoinAndUpMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" + akka.cluster { + gossip-frequency = 1000 ms + leader-actions-frequency = 5000 ms # increase the leader action task frequency + } + """) + .withFallback(MultiNodeClusterSpec.clusterConfig))) +} + +class MembershipChangeListenerJoinAndUpMultiJvmNode1 extends MembershipChangeListenerJoinAndUpSpec +class MembershipChangeListenerJoinAndUpMultiJvmNode2 extends MembershipChangeListenerJoinAndUpSpec + +abstract class MembershipChangeListenerJoinAndUpSpec + extends MultiNodeSpec(MembershipChangeListenerJoinAndUpMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender + with BeforeAndAfter { + + import MembershipChangeListenerJoinAndUpMultiJvmSpec._ + + override def initialParticipants = 2 + + after { + testConductor.enter("after") + } + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + + "A registered MembershipChangeListener" must { + "be notified when new node is JOINING and node is marked as UP by the leader" taggedAs LongRunningTest in { + + runOn(first) { + cluster.self + } + + runOn(second) { + cluster.join(firstAddress) + } + + runOn(first) { + // JOINING + val joinLatch = TestLatch() + cluster.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + if (members.size == 2 && members.exists(_.status == MemberStatus.Joining)) // second node is not part of node ring anymore + joinLatch.countDown() + } + }) + joinLatch.await + cluster.convergence.isDefined must be(true) + + // UP + val upLatch = TestLatch() + cluster.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) + upLatch.countDown() + } + }) + upLatch.await + awaitCond(cluster.convergence.isDefined) + } + } + } +} From ead5bf8695c26b207066ec32b5ea1e4fbc5a1b42 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 2 Jun 2012 17:37:41 +0200 Subject: [PATCH 265/538] Added test for testing that MemberChangeListener is triggered by node LEAVING event. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../MembershipChangeListenerLeavingSpec.scala | 73 +++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala new file mode 100644 index 0000000000..f8b083c4d8 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -0,0 +1,73 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import scala.collection.immutable.SortedSet +import org.scalatest.BeforeAndAfter +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ + +object MembershipChangeListenerLeavingMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" + akka.cluster.leader-actions-frequency = 5000 ms + akka.cluster.unreachable-nodes-reaper-frequency = 30000 ms # turn "off" reaping to unreachable node set + """)) + .withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class MembershipChangeListenerLeavingMultiJvmNode1 extends MembershipChangeListenerLeavingSpec +class MembershipChangeListenerLeavingMultiJvmNode2 extends MembershipChangeListenerLeavingSpec +class MembershipChangeListenerLeavingMultiJvmNode3 extends MembershipChangeListenerLeavingSpec + +abstract class MembershipChangeListenerLeavingSpec extends MultiNodeSpec(MembershipChangeListenerLeavingMultiJvmSpec) + with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + import MembershipChangeListenerLeavingMultiJvmSpec._ + + override def initialParticipants = 3 + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + lazy val thirdAddress = node(third).address + + "A registered MembershipChangeListener" must { + "be notified when new node is LEAVING" taggedAs LongRunningTest in { + + runOn(first) { + cluster.self + } + testConductor.enter("first-started") + + runOn(second, third) { + cluster.join(firstAddress) + } + awaitUpConvergence(numberOfMembers = 3) + testConductor.enter("rest-started") + + runOn(third) { + val latch = TestLatch() + cluster.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + if (members.size == 3 && members.exists(_.status == MemberStatus.Leaving)) + latch.countDown() + } + }) + latch.await + } + + runOn(first) { + cluster.leave(secondAddress) + } + + testConductor.enter("finished") + } + } +} From 5848c88cbaad23b1429a54b7d5443da8d1e7e3c5 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Sat, 2 Jun 2012 23:07:51 +0200 Subject: [PATCH 266/538] Making sure we use vals for the fields in Terminated --- akka-actor/src/main/scala/akka/actor/Actor.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index c795534cdf..2721ccffa0 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -59,7 +59,7 @@ case object Kill extends Kill { /** * When Death Watch is used, the watcher will receive a Terminated(watched) message when watched is terminated. */ -case class Terminated(@BeanProperty actor: ActorRef)(@BeanProperty existenceConfirmed: Boolean) +case class Terminated(@BeanProperty actor: ActorRef)(@BeanProperty val existenceConfirmed: Boolean) abstract class ReceiveTimeout extends PossiblyHarmful From 3c7ade3cdb2bdfde90648a480a837ff18e1875ed Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 4 Jun 2012 08:49:05 +0200 Subject: [PATCH 267/538] Utility to replace jvm and host:port with role in logs, see 2173 --- .../akka/remote/testkit/LogRoleReplace.scala | 148 ++++++++++++++++++ .../akka/remote/testkit/MultiNodeSpec.scala | 3 + 2 files changed, 151 insertions(+) create mode 100644 akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala new file mode 100644 index 0000000000..3b3527240e --- /dev/null +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala @@ -0,0 +1,148 @@ +package akka.remote.testkit + +import java.awt.Toolkit +import java.awt.datatransfer.Clipboard +import java.awt.datatransfer.ClipboardOwner +import java.awt.datatransfer.DataFlavor +import java.awt.datatransfer.StringSelection +import java.awt.datatransfer.Transferable +import java.io.BufferedReader +import java.io.FileReader +import java.io.FileWriter +import java.io.InputStreamReader +import java.io.OutputStreamWriter +import java.io.PrintWriter +import java.io.StringReader +import java.io.StringWriter +import scala.annotation.tailrec + +/** + * Utility to make log files from multi-node tests easier to analyze. + * Replaces jvm names and host:port with corresponding logical role name. + */ +object LogRoleReplace extends ClipboardOwner { + + /** + * Main program. Use with 0, 1 or 2 arguments. + * + * When using 0 arguments it reads from standard input + * (System.in) and writes to standard output (System.out). + * + * With 1 argument it reads from the file specified in the first argument + * and writes to standard output. + * + * With 2 arguments it reads the file specified in the first argument + * and writes to the file specified in the second argument. + * + * You can also replace the contents of the clipboard instead of using files + * by supplying `clipboard` as argument + */ + def main(args: Array[String]): Unit = { + val replacer = new LogRoleReplace + + if (args.length == 0) { + replacer.process( + new BufferedReader(new InputStreamReader(System.in)), + new PrintWriter(new OutputStreamWriter(System.out))) + + } else if (args(0) == "clipboard") { + val clipboard = Toolkit.getDefaultToolkit.getSystemClipboard + val contents = clipboard.getContents(null) + if (contents != null && contents.isDataFlavorSupported(DataFlavor.stringFlavor)) { + val text = contents.getTransferData(DataFlavor.stringFlavor).asInstanceOf[String] + val result = new StringWriter + replacer.process( + new BufferedReader(new StringReader(text)), + new PrintWriter(result)) + clipboard.setContents(new StringSelection(result.toString), this) + println("Replaced clipboard contents") + } + + } else if (args.length == 1) { + val inputFile = new BufferedReader(new FileReader(args(0))) + try { + replacer.process( + inputFile, + new PrintWriter(new OutputStreamWriter(System.out))) + } finally { + inputFile.close() + } + + } else if (args.length == 2) { + val outputFile = new PrintWriter(new FileWriter(args(1))) + val inputFile = new BufferedReader(new FileReader(args(0))) + try { + replacer.process(inputFile, outputFile) + } finally { + outputFile.close() + inputFile.close() + } + } + } + + /** + * Empty implementation of the ClipboardOwner interface + */ + def lostOwnership(clipboard: Clipboard, contents: Transferable): Unit = () +} + +class LogRoleReplace { + + private val RoleStarted = """\[([\w\-]+)\].*Role \[([\w]+)\] started""".r + private val RemoteServerStarted = """\[([\w\-]+)\].*RemoteServerStarted@akka://.*@([\w\-\.]+):([0-9]+)""".r + + private var replacements: Map[String, String] = Map.empty + private var jvmToAddress: Map[String, String] = Map.empty + + def process(in: BufferedReader, out: PrintWriter): Unit = { + + @tailrec + def processLines(line: String): Unit = if (line ne null) { + out.println(processLine(line)) + processLines(in.readLine) + } + + processLines(in.readLine()) + } + + def processLine(line: String): String = { + if (updateReplacements(line)) + replaceLine(line) + else + line + } + + private def updateReplacements(line: String): Boolean = { + if (line.startsWith("[info] * ")) { + // reset when new test begins + replacements = Map.empty + jvmToAddress = Map.empty + } + + line match { + case RemoteServerStarted(jvm, host, port) ⇒ + jvmToAddress += (jvm -> (host + ":" + port)) + false + + case RoleStarted(jvm, role) ⇒ + jvmToAddress.get(jvm) match { + case Some(address) ⇒ + replacements += (jvm -> role) + replacements += (address -> role) + false + case None ⇒ false + } + + case _ ⇒ true + } + } + + private def replaceLine(line: String): String = { + var result = line + for ((from, to) ← replacements) { + result = result.replaceAll(from, to) + } + result + } + +} \ No newline at end of file diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 35a9cc14e7..8ab65aa2c3 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -249,4 +249,7 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, roles: } } + // useful to see which jvm is running which role + log.info("Role [{}] started", myself.name) + } \ No newline at end of file From de59444795e257fb9a310e202204b2c8159168ac Mon Sep 17 00:00:00 2001 From: Roland Date: Mon, 4 Jun 2012 10:03:41 +0200 Subject: [PATCH 268/538] offer TestKitBase trait, see #2174 --- .../code/docs/testkit/TestkitDocSpec.scala | 20 +++++++++++++++++++ akka-docs/scala/testing.rst | 14 +++++++++++++ .../src/main/scala/akka/testkit/TestKit.scala | 6 ++++-- 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala b/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala index ddb3eeaf1d..96c7857990 100644 --- a/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala +++ b/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala @@ -14,6 +14,8 @@ import akka.dispatch.Futures import akka.testkit.AkkaSpec import akka.testkit.DefaultTimeout import akka.testkit.ImplicitSender +import akka.util.NonFatal + object TestkitDocSpec { case object Say42 case object Unknown @@ -251,5 +253,23 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { } //#event-filter } + + "demonstrate TestKitBase" in { + //#test-kit-base + import akka.testkit.TestKitBase + + class MyTest extends TestKitBase { + implicit lazy val system = ActorSystem() + + //#put-your-test-code-here + val probe = TestProbe() + probe.send(testActor, "hello") + try expectMsg("hello") catch { case NonFatal(e) => system.shutdown(); throw e } + //#put-your-test-code-here + + system.shutdown() + } + //#test-kit-base + } } diff --git a/akka-docs/scala/testing.rst b/akka-docs/scala/testing.rst index a98ee14917..d2875ed62a 100644 --- a/akka-docs/scala/testing.rst +++ b/akka-docs/scala/testing.rst @@ -671,6 +671,20 @@ This section contains a collection of known gotchas with some other frameworks, which is by no means exhaustive and does not imply endorsement or special support. +When you need it to be a trait +------------------------------ + +If for some reason it is a problem to inherit from :class:`TestKit` due to it +being a concrete class instead of a trait, there’s :class:`TestKitBase`: + +.. includecode:: code/docs/testkit/TestkitDocSpec.scala + :include: test-kit-base + :exclude: put-your-test-code-here + +The ``implicit lazy val system`` must be declared exactly like that (you can of +course pass arguments to the actor system factory as needed) because trait +:class:`TestKitBase` needs the system during its construction. + Specs2 ------ diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index 156a9d8612..6d8f73e7b8 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -101,11 +101,11 @@ class TestActor(queue: BlockingDeque[TestActor.Message]) extends Actor { * @author Roland Kuhn * @since 1.1 */ -class TestKit(_system: ActorSystem) { +trait TestKitBase { import TestActor.{ Message, RealMessage, NullMessage } - implicit val system = _system + implicit val system: ActorSystem val testKitSettings = TestKitExtension(system) private val queue = new LinkedBlockingDeque[Message]() @@ -579,6 +579,8 @@ class TestKit(_system: ActorSystem) { private def format(u: TimeUnit, d: Duration) = "%.3f %s".format(d.toUnit(u), u.toString.toLowerCase) } +class TestKit(_system: ActorSystem) extends { implicit val system = _system } with TestKitBase + object TestKit { private[testkit] val testActorId = new AtomicInteger(0) From beac5c5cff08ea78e453566da62cfdcda49aafad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 4 Jun 2012 10:12:31 +0200 Subject: [PATCH 269/538] Fixed typo in config --- .../src/multi-jvm/scala/akka/cluster/NodeLeaving.scala | 2 +- .../multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala index a6ddccb806..058bfca7e9 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala @@ -18,7 +18,7 @@ object NodeLeavingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster.unreachable-nodes-reaper-frequency = 30000 # turn "off" reaping to unreachable node set + akka.cluster.unreachable-nodes-reaper-frequency = 30 s # turn "off" reaping to unreachable node set """)) .withFallback(MultiNodeClusterSpec.clusterConfig)) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala index 8a1815a5dd..f2032f28d3 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala @@ -20,8 +20,8 @@ object NodeLeavingAndExitingMultiJvmSpec extends MultiNodeConfig { debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" akka.cluster { - leader-actions-frequency = 5000 ms # increase the leader action task frequency - unreachable-nodes-reaper-frequency = 30000 ms # turn "off" reaping to unreachable node set + leader-actions-frequency = 5 s # increase the leader action task frequency + unreachable-nodes-reaper-frequency = 30 s # turn "off" reaping to unreachable node set } """) .withFallback(MultiNodeClusterSpec.clusterConfig))) From a515377592b6daae707a9b49c21717763edb554e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 4 Jun 2012 10:35:27 +0200 Subject: [PATCH 270/538] Formatting --- .../src/main/scala/akka/dispatch/ThreadPoolBuilder.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index b6fd432296..25125ae149 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -157,7 +157,8 @@ case class ThreadPoolConfigBuilder(config: ThreadPoolConfig) { def setQueueFactory(newQueueFactory: QueueFactory): ThreadPoolConfigBuilder = this.copy(config = config.copy(queueFactory = newQueueFactory)) - def configure(fs: Option[Function[ThreadPoolConfigBuilder, ThreadPoolConfigBuilder]]*): ThreadPoolConfigBuilder = fs.foldLeft(this)((c, f) ⇒ f.map(_(c)).getOrElse(c)) + def configure(fs: Option[Function[ThreadPoolConfigBuilder, ThreadPoolConfigBuilder]]*): ThreadPoolConfigBuilder = + fs.foldLeft(this)((c, f) ⇒ f.map(_(c)).getOrElse(c)) } object MonitorableThreadFactory { From 8297e0132b482136bca021074df91d702a7a4a56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 4 Jun 2012 10:38:10 +0200 Subject: [PATCH 271/538] Added comment to explain the increase in leader action frequency. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala index f2032f28d3..3fe9e220f6 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala @@ -20,7 +20,7 @@ object NodeLeavingAndExitingMultiJvmSpec extends MultiNodeConfig { debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" akka.cluster { - leader-actions-frequency = 5 s # increase the leader action task frequency + leader-actions-frequency = 5 s # increase the leader action task frequency to make sure we get a chance to test the LEAVING state unreachable-nodes-reaper-frequency = 30 s # turn "off" reaping to unreachable node set } """) @@ -64,6 +64,8 @@ abstract class NodeLeavingAndExitingSpec extends MultiNodeSpec(NodeLeavingAndExi runOn(first, third) { // 1. Verify that 'second' node is set to LEAVING + // We have set the 'leader-actions-frequency' to 5 seconds to make sure that we get a + // chance to test the LEAVING state before the leader moves the node to EXITING awaitCond(cluster.latestGossip.members.exists(_.status == MemberStatus.Leaving)) // wait on LEAVING val hasLeft = cluster.latestGossip.members.find(_.status == MemberStatus.Leaving) // verify node that left hasLeft must be('defined) From df479a0bf09c0cda9b646a341f9903674cd23f7a Mon Sep 17 00:00:00 2001 From: Roland Date: Mon, 4 Jun 2012 11:29:56 +0200 Subject: [PATCH 272/538] add back TestProbe.reply, see #2172 --- .../scala/code/docs/testkit/TestkitDocSpec.scala | 12 ++++++------ .../src/main/scala/akka/testkit/TestKit.scala | 13 +++++++------ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala b/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala index 96c7857990..564b7929ce 100644 --- a/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala +++ b/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala @@ -210,7 +210,7 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { val probe = TestProbe() val future = probe.ref ? "hello" probe.expectMsg(0 millis, "hello") // TestActor runs on CallingThreadDispatcher - probe.sender ! "world" + probe.reply("world") assert(future.isCompleted && future.value == Some(Right("world"))) //#test-probe-reply } @@ -253,20 +253,20 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { } //#event-filter } - + "demonstrate TestKitBase" in { //#test-kit-base import akka.testkit.TestKitBase - + class MyTest extends TestKitBase { implicit lazy val system = ActorSystem() - + //#put-your-test-code-here val probe = TestProbe() probe.send(testActor, "hello") - try expectMsg("hello") catch { case NonFatal(e) => system.shutdown(); throw e } + try expectMsg("hello") catch { case NonFatal(e) ⇒ system.shutdown(); throw e } //#put-your-test-code-here - + system.shutdown() } //#test-kit-base diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index 6d8f73e7b8..9dfa40a5ee 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -642,22 +642,23 @@ class TestProbe(_application: ActorSystem) extends TestKit(_application) { * Replies will be available for inspection with all of TestKit's assertion * methods. */ - def send(actor: ActorRef, msg: AnyRef) = { - actor.!(msg)(testActor) - } + def send(actor: ActorRef, msg: Any): Unit = actor.!(msg)(testActor) /** * Forward this message as if in the TestActor's receive method with self.forward. */ - def forward(actor: ActorRef, msg: AnyRef = lastMessage.msg) { - actor.!(msg)(lastMessage.sender) - } + def forward(actor: ActorRef, msg: Any = lastMessage.msg): Unit = actor.!(msg)(lastMessage.sender) /** * Get sender of last received message. */ def sender = lastMessage.sender + /** + * Send message to the sender of the last dequeued message. + */ + def reply(msg: Any): Unit = sender.!(msg)(ref) + } object TestProbe { From 2e788c970450cbd5e9efdab13f88c08e6812980e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 4 Jun 2012 11:46:59 +0200 Subject: [PATCH 273/538] Adding some sanity and some CAS-magic --- .../java/akka/actor/AbstractActorCell.java | 19 +++++++++++++ .../src/main/scala/akka/actor/ActorCell.scala | 28 +++++++++++++++---- .../akka/dispatch/AbstractDispatcher.scala | 15 ++++------ .../akka/dispatch/BalancingDispatcher.scala | 4 +-- .../main/scala/akka/dispatch/Mailbox.scala | 2 +- 5 files changed, 50 insertions(+), 18 deletions(-) create mode 100644 akka-actor/src/main/java/akka/actor/AbstractActorCell.java diff --git a/akka-actor/src/main/java/akka/actor/AbstractActorCell.java b/akka-actor/src/main/java/akka/actor/AbstractActorCell.java new file mode 100644 index 0000000000..d6005f463c --- /dev/null +++ b/akka-actor/src/main/java/akka/actor/AbstractActorCell.java @@ -0,0 +1,19 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.actor; + +import akka.util.Unsafe; + +final class AbstractActorCell { + final static long mailboxOffset; + + static { + try { + mailboxOffset = Unsafe.instance.objectFieldOffset(ActorCell.class.getDeclaredField("_mailboxDoNotCallMeDirectly")); + } catch(Throwable t){ + throw new ExceptionInInitializerError(t); + } + } +} diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 23cd796ad2..c74010668b 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -9,13 +9,12 @@ import scala.annotation.tailrec import java.util.concurrent.TimeUnit import java.util.concurrent.TimeUnit.MILLISECONDS import akka.event.Logging.{ Debug, Warning, Error } -import akka.util.{ Duration, Helpers } import akka.japi.Procedure import java.io.{ NotSerializableException, ObjectOutputStream } import akka.serialization.SerializationExtension -import akka.util.NonFatal import akka.event.Logging.LogEventException import collection.immutable.{ TreeSet, Stack, TreeMap } +import akka.util.{ Unsafe, Duration, Helpers, NonFatal } //TODO: everything here for current compatibility - could be limited more @@ -319,7 +318,7 @@ private[akka] class ActorCell( val props: Props, @volatile var parent: InternalActorRef, /*no member*/ _receiveTimeout: Option[Duration]) extends UntypedActorContext { - + import AbstractActorCell.mailboxOffset import ActorCell._ final def systemImpl = system @@ -412,8 +411,7 @@ private[akka] class ActorCell( var currentMessage: Envelope = _ var actor: Actor = _ private var behaviorStack: Stack[Actor.Receive] = Stack.empty - @volatile //This must be volatile since it isn't protected by the mailbox status - var mailbox: Mailbox = _ + @volatile var _mailboxDoNotCallMeDirectly: Mailbox = _ //This must be volatile since it isn't protected by the mailbox status var nextNameSequence: Long = 0 var watching: Set[ActorRef] = emptyActorRefSet var watchedBy: Set[ActorRef] = emptyActorRefSet @@ -428,6 +426,24 @@ private[akka] class ActorCell( @inline final val dispatcher: MessageDispatcher = system.dispatchers.lookup(props.dispatcher) + /** + * INTERNAL API + * + * Returns a reference to the current mailbox + */ + @inline final def mailbox: Mailbox = Unsafe.instance.getObjectVolatile(this, mailboxOffset).asInstanceOf[Mailbox] + + /** + * INTERNAL API + * + * replaces the current mailbox using getAndSet semantics + */ + @tailrec final def swapMailbox(newMailbox: Mailbox): Mailbox = { + val oldMailbox = mailbox + if (!Unsafe.instance.compareAndSwapObject(this, mailboxOffset, oldMailbox, newMailbox)) swapMailbox(newMailbox) + else oldMailbox + } + /** * UntypedActorContext impl */ @@ -440,7 +456,7 @@ private[akka] class ActorCell( * Create the mailbox and enqueue the Create() message to ensure that * this is processed before anything else. */ - mailbox = dispatcher.createMailbox(this) + swapMailbox(dispatcher.createMailbox(this)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ mailbox.systemEnqueue(self, Create()) diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 8e160276e8..4692486307 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -310,16 +310,14 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext case 0 ⇒ shutdownSchedule match { case UNSCHEDULED ⇒ - if (updateShutdownSchedule(UNSCHEDULED, SCHEDULED)) { - scheduleShutdownAction() - () - } else ifSensibleToDoSoThenScheduleShutdown() + if (updateShutdownSchedule(UNSCHEDULED, SCHEDULED)) scheduleShutdownAction() + else ifSensibleToDoSoThenScheduleShutdown() case SCHEDULED ⇒ if (updateShutdownSchedule(SCHEDULED, RESCHEDULED)) () else ifSensibleToDoSoThenScheduleShutdown() - case RESCHEDULED ⇒ () + case RESCHEDULED ⇒ } - case _ ⇒ () + case _ ⇒ } private def scheduleShutdownAction(): Unit = { @@ -349,9 +347,8 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext protected[akka] def unregister(actor: ActorCell) { if (debug) actors.remove(this, actor.self) addInhabitants(-1) - val mailBox = actor.mailbox + val mailBox = actor.swapMailbox(deadLetterMailbox) mailBox.becomeClosed() // FIXME reschedule in tell if possible race with cleanUp is detected in order to properly clean up - actor.mailbox = deadLetterMailbox mailBox.cleanUp() } @@ -359,7 +356,6 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext @tailrec final def run() { shutdownSchedule match { - case UNSCHEDULED ⇒ () case SCHEDULED ⇒ try { if (inhabitants == 0) shutdown() //Warning, racy @@ -369,6 +365,7 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext case RESCHEDULED ⇒ if (updateShutdownSchedule(RESCHEDULED, SCHEDULED)) scheduleShutdownAction() else run() + case UNSCHEDULED ⇒ } } } diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index 43e8944105..e50f9150a4 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -50,9 +50,9 @@ class BalancingDispatcher( private class SharingMailbox(_actor: ActorCell, _messageQueue: MessageQueue) extends Mailbox(_actor, _messageQueue) with DefaultSystemMessageQueue { override def cleanUp(): Unit = { + val dlq = actor.systemImpl.deadLetterMailbox //Don't call the original implementation of this since it scraps all messages, and we don't want to do that - if (hasSystemMessages) { - val dlq = actor.systemImpl.deadLetterMailbox + while (hasSystemMessages) { var message = systemDrain() while (message ne null) { // message must be “virgin” before being able to systemEnqueue again diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index 35b1e35012..b81a2fc0ba 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -235,7 +235,7 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes protected[dispatch] def cleanUp(): Unit = if (actor ne null) { // actor is null for the deadLetterMailbox val dlm = actor.systemImpl.deadLetterMailbox - if (hasSystemMessages) { + while (hasSystemMessages) { var message = systemDrain() while (message ne null) { // message must be “virgin” before being able to systemEnqueue again From 52f122107c04e88d1a9ef9dee4fe002b5653c05c Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 4 Jun 2012 11:38:39 +0200 Subject: [PATCH 274/538] Fix shutdown/remove race as described by @rkuhn, see #2137 * Skip nodes removal * Ignore removed client when enter barrier * Change order of testConductor.shutdown and testConductor.removeNode --- .../cluster/ClientDowningNodeThatIsUnreachableSpec.scala | 2 +- .../akka/cluster/GossipingAccrualFailureDetectorSpec.scala | 2 +- .../cluster/LeaderDowningNodeThatIsUnreachableSpec.scala | 4 ++-- .../multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala | 2 +- .../src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala | 2 +- .../src/main/scala/akka/remote/testconductor/Conductor.scala | 5 ++--- 6 files changed, 8 insertions(+), 9 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index 6ab4d1a39e..948791167e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -43,8 +43,8 @@ class ClientDowningNodeThatIsUnreachableSpec testConductor.enter("all-up") // kill 'third' node - testConductor.shutdown(third, 0) testConductor.removeNode(third) + testConductor.shutdown(third, 0) // mark 'third' node as DOWN cluster.down(thirdAddress) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 9d388622db..790c0e07fd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -57,8 +57,8 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi "mark node as 'unavailable' if a node in the cluster is shut down (and its heartbeats stops)" taggedAs LongRunningTest in { runOn(first) { - testConductor.shutdown(third, 0) testConductor.removeNode(third) + testConductor.shutdown(third, 0) } runOn(first, second) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index 63665d3c57..d04a97c9f1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -51,8 +51,8 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.enter("all-up") // kill 'fourth' node - testConductor.shutdown(fourth, 0) testConductor.removeNode(fourth) + testConductor.shutdown(fourth, 0) testConductor.enter("down-fourth-node") // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- @@ -91,8 +91,8 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.enter("all-up") // kill 'second' node - testConductor.shutdown(second, 0) testConductor.removeNode(second) + testConductor.shutdown(second, 0) testConductor.enter("down-second-node") // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index ba0471bedb..932eb91e15 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -65,8 +65,8 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp case `controller` ⇒ testConductor.enter("before-shutdown") - testConductor.shutdown(leader, 0) testConductor.removeNode(leader) + testConductor.shutdown(leader, 0) testConductor.enter("after-shutdown", "after-down", "completed") case `leader` ⇒ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index c0c12f4582..e72c8325f2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -57,8 +57,8 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) "become singleton cluster when one node is shutdown" taggedAs LongRunningTest in { runOn(first) { val secondAddress = node(second).address - testConductor.shutdown(second, 0) testConductor.removeNode(second) + testConductor.shutdown(second, 0) awaitUpConvergence(numberOfMembers = 1, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds) cluster.isSingletonCluster must be(true) assertLeader(first) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index 8fa8eeff21..f8f16a4d9c 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -444,7 +444,6 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP nodes(node).fsm forward ToClient(TerminateMsg(exitValueOrKill)) } case Remove(node) ⇒ - nodes -= node barrier ! BarrierCoordinator.RemoveClient(node) } case GetNodes ⇒ sender ! nodes.keys @@ -540,8 +539,8 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor when(Waiting) { case Event(EnterBarrier(name), d @ Data(clients, barrier, arrived)) ⇒ - if (name != barrier || clients.find(_.fsm == sender).isEmpty) throw WrongBarrier(name, sender, d) - val together = sender :: arrived + if (name != barrier) throw WrongBarrier(name, sender, d) + val together = if (clients.find(_.fsm == sender).isDefined) sender :: arrived else arrived handleBarrier(d.copy(arrived = together)) case Event(RemoveClient(name), d @ Data(clients, barrier, arrived)) ⇒ clients find (_.name == name) match { From e4104cfd0687ca09943a64e8a42706e1a97ebf1c Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 4 Jun 2012 11:58:09 +0200 Subject: [PATCH 275/538] Replace 'after' barrier with explicit barrier inside test method. * It's no problem using after, but scalatest will output the test method as completed (green) before running after, so it looks confusing in the logs * Using unique barrier names adds extra traceability in case of failures. --- .../ClientDowningNodeThatIsUnreachableSpec.scala | 4 +--- .../akka/cluster/ClientDowningNodeThatIsUpSpec.scala | 4 +--- .../cluster/GossipingAccrualFailureDetectorSpec.scala | 11 +++++------ .../LeaderDowningNodeThatIsUnreachableSpec.scala | 4 +--- .../akka/cluster/MembershipChangeListenerSpec.scala | 11 +++++------ .../multi-jvm/scala/akka/cluster/NodeLeaving.scala | 7 +++---- .../scala/akka/cluster/NodeLeavingAndExiting.scala | 7 +++---- .../NodeLeavingAndExitingAndBeingRemoved.scala | 3 +-- .../scala/akka/cluster/NodeMembershipSpec.scala | 11 +++++------ .../scala/akka/cluster/NodeShutdownSpec.scala | 10 ++++------ .../scala/akka/cluster/NodeStartupSpec.scala | 9 +++------ 11 files changed, 32 insertions(+), 49 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index 948791167e..f657bcee3e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -4,7 +4,6 @@ package akka.cluster import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -26,8 +25,7 @@ class ClientDowningNodeThatIsUnreachableMultiJvmNode4 extends ClientDowningNodeT class ClientDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(ClientDowningNodeThatIsUnreachableMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import ClientDowningNodeThatIsUnreachableMultiJvmSpec._ override def initialParticipants = 4 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index 6b0bbae22e..666c3e207a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -4,7 +4,6 @@ package akka.cluster import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -26,8 +25,7 @@ class ClientDowningNodeThatIsUpMultiJvmNode4 extends ClientDowningNodeThatIsUpSp class ClientDowningNodeThatIsUpSpec extends MultiNodeSpec(ClientDowningNodeThatIsUpMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import ClientDowningNodeThatIsUpMultiJvmSpec._ override def initialParticipants = 4 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 790c0e07fd..16113519da 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -3,7 +3,6 @@ */ package akka.cluster -import org.scalatest.BeforeAndAfter import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec @@ -25,7 +24,7 @@ class GossipingAccrualFailureDetectorMultiJvmNode2 extends GossipingAccrualFailu class GossipingAccrualFailureDetectorMultiJvmNode3 extends GossipingAccrualFailureDetectorSpec abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(GossipingAccrualFailureDetectorMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import GossipingAccrualFailureDetectorMultiJvmSpec._ override def initialParticipants = 3 @@ -34,10 +33,6 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address - after { - testConductor.enter("after") - } - "A Gossip-driven Failure Detector" must { "receive gossip heartbeats so that all member nodes in the cluster are marked 'available'" taggedAs LongRunningTest in { @@ -53,6 +48,8 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi cluster.failureDetector.isAvailable(firstAddress) must be(true) cluster.failureDetector.isAvailable(secondAddress) must be(true) cluster.failureDetector.isAvailable(thirdAddress) must be(true) + + testConductor.enter("after-1") } "mark node as 'unavailable' if a node in the cluster is shut down (and its heartbeats stops)" taggedAs LongRunningTest in { @@ -68,6 +65,8 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi cluster.failureDetector.isAvailable(firstAddress) must be(true) cluster.failureDetector.isAvailable(secondAddress) must be(true) } + + testConductor.enter("after-2") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index d04a97c9f1..fda3046e4c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -4,7 +4,6 @@ package akka.cluster import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -34,8 +33,7 @@ class LeaderDowningNodeThatIsUnreachableMultiJvmNode4 extends LeaderDowningNodeT class LeaderDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(LeaderDowningNodeThatIsUnreachableMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import LeaderDowningNodeThatIsUnreachableMultiJvmSpec._ override def initialParticipants = 4 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index f818c97744..070fb80553 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -4,7 +4,6 @@ package akka.cluster import scala.collection.immutable.SortedSet -import org.scalatest.BeforeAndAfter import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec @@ -24,15 +23,11 @@ class MembershipChangeListenerMultiJvmNode2 extends MembershipChangeListenerSpec class MembershipChangeListenerMultiJvmNode3 extends MembershipChangeListenerSpec abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChangeListenerMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import MembershipChangeListenerMultiJvmSpec._ override def initialParticipants = 3 - after { - testConductor.enter("after") - } - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address @@ -59,6 +54,8 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan cluster.convergence.isDefined must be(true) } + testConductor.enter("after-1") + } "(when three systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { @@ -77,6 +74,8 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan latch.await cluster.convergence.isDefined must be(true) + testConductor.enter("after-2") + } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala index 058bfca7e9..39fee8acfa 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala @@ -4,7 +4,6 @@ package akka.cluster import scala.collection.immutable.SortedSet -import org.scalatest.BeforeAndAfter import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec @@ -17,10 +16,10 @@ object NodeLeavingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" + .withFallback(ConfigFactory.parseString(""" akka.cluster.unreachable-nodes-reaper-frequency = 30 s # turn "off" reaping to unreachable node set """)) - .withFallback(MultiNodeClusterSpec.clusterConfig)) + .withFallback(MultiNodeClusterSpec.clusterConfig)) } class NodeLeavingMultiJvmNode1 extends NodeLeavingSpec @@ -28,7 +27,7 @@ class NodeLeavingMultiJvmNode2 extends NodeLeavingSpec class NodeLeavingMultiJvmNode3 extends NodeLeavingSpec abstract class NodeLeavingSpec extends MultiNodeSpec(NodeLeavingMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import NodeLeavingMultiJvmSpec._ override def initialParticipants = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala index 3fe9e220f6..448d57d6e7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala @@ -4,7 +4,6 @@ package akka.cluster import scala.collection.immutable.SortedSet -import org.scalatest.BeforeAndAfter import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec @@ -18,13 +17,13 @@ object NodeLeavingAndExitingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" + .withFallback(ConfigFactory.parseString(""" akka.cluster { leader-actions-frequency = 5 s # increase the leader action task frequency to make sure we get a chance to test the LEAVING state unreachable-nodes-reaper-frequency = 30 s # turn "off" reaping to unreachable node set } """) - .withFallback(MultiNodeClusterSpec.clusterConfig))) + .withFallback(MultiNodeClusterSpec.clusterConfig))) } class NodeLeavingAndExitingMultiJvmNode1 extends NodeLeavingAndExitingSpec @@ -32,7 +31,7 @@ class NodeLeavingAndExitingMultiJvmNode2 extends NodeLeavingAndExitingSpec class NodeLeavingAndExitingMultiJvmNode3 extends NodeLeavingAndExitingSpec abstract class NodeLeavingAndExitingSpec extends MultiNodeSpec(NodeLeavingAndExitingMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import NodeLeavingAndExitingMultiJvmSpec._ override def initialParticipants = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala index 7c1037a624..8ea16dfa8a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala @@ -4,7 +4,6 @@ package akka.cluster import scala.collection.immutable.SortedSet -import org.scalatest.BeforeAndAfter import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec @@ -24,7 +23,7 @@ class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode2 extends NodeLeavingAndEx class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode3 extends NodeLeavingAndExitingAndBeingRemovedSpec abstract class NodeLeavingAndExitingAndBeingRemovedSpec extends MultiNodeSpec(NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec._ override def initialParticipants = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index fecb53c898..cf6839dd83 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -4,7 +4,6 @@ package akka.cluster import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -22,15 +21,11 @@ class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec -abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) with MultiNodeClusterSpec { import NodeMembershipMultiJvmSpec._ override def initialParticipants = 3 - after { - testConductor.enter("after") - } - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address @@ -55,6 +50,8 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp awaitCond(cluster.convergence.isDefined) } + testConductor.enter("after-1") + } "(when three nodes) start gossiping to each other so that all nodes gets the same gossip info" taggedAs LongRunningTest in { @@ -70,6 +67,8 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp } awaitCond(cluster.convergence.isDefined) + testConductor.enter("after-2") + } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index e72c8325f2..e59382341f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -4,7 +4,6 @@ package akka.cluster import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -28,15 +27,11 @@ object NodeShutdownMultiJvmSpec extends MultiNodeConfig { class NodeShutdownMultiJvmNode1 extends NodeShutdownSpec class NodeShutdownMultiJvmNode2 extends NodeShutdownSpec -abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) with MultiNodeClusterSpec { import NodeShutdownMultiJvmSpec._ override def initialParticipants = 2 - after { - testConductor.enter("after") - } - "A cluster of 2 nodes" must { "not be singleton cluster when joined" taggedAs LongRunningTest in { @@ -52,6 +47,8 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) awaitUpConvergence(numberOfMembers = 2) cluster.isSingletonCluster must be(false) assertLeader(first, second) + + testConductor.enter("after-1") } "become singleton cluster when one node is shutdown" taggedAs LongRunningTest in { @@ -64,6 +61,7 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) assertLeader(first) } + testConductor.enter("after-2") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index b2b98f94fa..7e3fdb3323 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -4,7 +4,6 @@ package akka.cluster import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -20,15 +19,11 @@ object NodeStartupMultiJvmSpec extends MultiNodeConfig { class NodeStartupMultiJvmNode1 extends NodeStartupSpec class NodeStartupMultiJvmNode2 extends NodeStartupSpec -abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with MultiNodeClusterSpec { import NodeStartupMultiJvmSpec._ override def initialParticipants = 2 - after { - testConductor.enter("after") - } - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address @@ -40,6 +35,7 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi awaitUpConvergence(numberOfMembers = 1) assertLeader(first) } + testConductor.enter("after-1") } } @@ -58,6 +54,7 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi cluster.latestGossip.members.size must be(2) awaitCond(cluster.convergence.isDefined) assertLeader(first, second) + testConductor.enter("after-2") } } From fd1d0ce1212637084eeba004408fa9ba70eef21e Mon Sep 17 00:00:00 2001 From: Roland Date: Mon, 4 Jun 2012 12:18:30 +0200 Subject: [PATCH 276/538] make cleanUp of systemMessages atomic - extend systemDrain to take the new contents which shall be switched in - make NoMessage placeholder which will signal final closing of the mailbox - put that in when cleaning up, and check it when enqueuing --- .../akka/actor/dispatch/ActorModelSpec.scala | 2 +- .../main/scala/akka/actor/ActorSystem.scala | 2 +- .../akka/dispatch/AbstractDispatcher.scala | 4 ++ .../akka/dispatch/BalancingDispatcher.scala | 16 +++--- .../main/scala/akka/dispatch/Mailbox.scala | 51 ++++++++++--------- 5 files changed, 40 insertions(+), 35 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index acc416f04f..4d83c85b82 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -374,7 +374,7 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa def compare(l: AnyRef, r: AnyRef) = (l, r) match { case (ll: ActorCell, rr: ActorCell) ⇒ ll.self.path compareTo rr.self.path } } foreach { case cell: ActorCell ⇒ - System.err.println(" - " + cell.self.path + " " + cell.isTerminated + " " + cell.mailbox.status + " " + cell.mailbox.numberOfMessages + " " + SystemMessage.size(cell.mailbox.systemDrain())) + System.err.println(" - " + cell.self.path + " " + cell.isTerminated + " " + cell.mailbox.status + " " + cell.mailbox.numberOfMessages + " " + SystemMessage.size(cell.mailbox.systemDrain(null))) } System.err.println("Mailbox: " + mq.numberOfMessages + " " + mq.hasMessages) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 008610c333..af7313b41e 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -545,7 +545,7 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, becomeClosed() def systemEnqueue(receiver: ActorRef, handle: SystemMessage): Unit = deadLetters ! DeadLetter(handle, receiver, receiver) - def systemDrain(): SystemMessage = null + def systemDrain(newContents: SystemMessage): SystemMessage = null def hasSystemMessages = false } diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 48a91dd00c..12eea14ffc 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -107,6 +107,10 @@ private[akka] case class Watch(watchee: ActorRef, watcher: ActorRef) extends Sys * INTERNAL API */ private[akka] case class Unwatch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to tear down a DeathWatch +/** + * INTERNAL API + */ +private[akka] case object NoMessage extends SystemMessage // switched into the mailbox to signal termination final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cleanup: () ⇒ Unit) extends Runnable { def run(): Unit = diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index e50f9150a4..6beee3c9da 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -52,15 +52,13 @@ class BalancingDispatcher( override def cleanUp(): Unit = { val dlq = actor.systemImpl.deadLetterMailbox //Don't call the original implementation of this since it scraps all messages, and we don't want to do that - while (hasSystemMessages) { - var message = systemDrain() - while (message ne null) { - // message must be “virgin” before being able to systemEnqueue again - val next = message.next - message.next = null - dlq.systemEnqueue(actor.self, message) - message = next - } + var message = systemDrain(NoMessage) + while (message ne null) { + // message must be “virgin” before being able to systemEnqueue again + val next = message.next + message.next = null + dlq.systemEnqueue(actor.self, message) + message = next } } } diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index b81a2fc0ba..d26e7b2afc 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -169,6 +169,7 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes */ protected final def systemQueueGet: SystemMessage = Unsafe.instance.getObjectVolatile(this, AbstractMailbox.systemMessageOffset).asInstanceOf[SystemMessage] + protected final def systemQueuePut(_old: SystemMessage, _new: SystemMessage): Boolean = Unsafe.instance.compareAndSwapObject(this, AbstractMailbox.systemMessageOffset, _old, _new) @@ -208,14 +209,14 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes } final def processAllSystemMessages() { - var nextMessage = systemDrain() + var nextMessage = systemDrain(null) try { while ((nextMessage ne null) && !isClosed) { if (debug) println(actor.self + " processing system message " + nextMessage + " with " + actor.childrenRefs) actor systemInvoke nextMessage nextMessage = nextMessage.next // don’t ever execute normal message when system message present! - if (nextMessage eq null) nextMessage = systemDrain() + if (nextMessage eq null) nextMessage = systemDrain(null) } } catch { case NonFatal(e) ⇒ @@ -235,15 +236,13 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes protected[dispatch] def cleanUp(): Unit = if (actor ne null) { // actor is null for the deadLetterMailbox val dlm = actor.systemImpl.deadLetterMailbox - while (hasSystemMessages) { - var message = systemDrain() - while (message ne null) { - // message must be “virgin” before being able to systemEnqueue again - val next = message.next - message.next = null - dlm.systemEnqueue(actor.self, message) - message = next - } + var message = systemDrain(NoMessage) + while (message ne null) { + // message must be “virgin” before being able to systemEnqueue again + val next = message.next + message.next = null + dlm.systemEnqueue(actor.self, message) + message = next } if (messageQueue ne null) // needed for CallingThreadDispatcher, which never calls Mailbox.run() @@ -300,7 +299,7 @@ private[akka] trait SystemMessageQueue { /** * Dequeue all messages from system queue and return them as single-linked list. */ - def systemDrain(): SystemMessage + def systemDrain(newContents: SystemMessage): SystemMessage def hasSystemMessages: Boolean } @@ -315,26 +314,30 @@ private[akka] trait DefaultSystemMessageQueue { self: Mailbox ⇒ assert(message.next eq null) if (Mailbox.debug) println(actor.self + " having enqueued " + message) val head = systemQueueGet - /* - * this write is safely published by the compareAndSet contained within - * systemQueuePut; “Intra-Thread Semantics” on page 12 of the JSR133 spec - * guarantees that “head” uses the value obtained from systemQueueGet above. - * Hence, SystemMessage.next does not need to be volatile. - */ - message.next = head - if (!systemQueuePut(head, message)) { - message.next = null - systemEnqueue(receiver, message) + if (head == NoMessage) actor.system.deadLetterMailbox.systemEnqueue(receiver, message) + else { + /* + * this write is safely published by the compareAndSet contained within + * systemQueuePut; “Intra-Thread Semantics” on page 12 of the JSR133 spec + * guarantees that “head” uses the value obtained from systemQueueGet above. + * Hence, SystemMessage.next does not need to be volatile. + */ + message.next = head + if (!systemQueuePut(head, message)) { + message.next = null + systemEnqueue(receiver, message) + } } } @tailrec - final def systemDrain(): SystemMessage = { + final def systemDrain(newContents: SystemMessage): SystemMessage = { val head = systemQueueGet - if (systemQueuePut(head, null)) SystemMessage.reverse(head) else systemDrain() + if (systemQueuePut(head, newContents)) SystemMessage.reverse(head) else systemDrain(newContents) } def hasSystemMessages: Boolean = systemQueueGet ne null + } /** From b45305a61e29f4d755b048b7dcecefdef2662914 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 4 Jun 2012 13:34:30 +0200 Subject: [PATCH 277/538] More formatting --- .../akka/dispatch/ThreadPoolBuilder.scala | 26 ++++++------------- 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index 25125ae149..963299debc 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -29,30 +29,20 @@ object ThreadPoolConfig { val defaultTimeout: Duration = Duration(60000L, TimeUnit.MILLISECONDS) val defaultRejectionPolicy: RejectedExecutionHandler = new SaneRejectedExecutionHandler() - def scaledPoolSize(floor: Int, multiplier: Double, ceiling: Int): Int = { - import scala.math.{ min, max } - min(max((Runtime.getRuntime.availableProcessors * multiplier).ceil.toInt, floor), ceiling) - } + def scaledPoolSize(floor: Int, multiplier: Double, ceiling: Int): Int = + math.min(math.max((Runtime.getRuntime.availableProcessors * multiplier).ceil.toInt, floor), ceiling) - def arrayBlockingQueue(capacity: Int, fair: Boolean): QueueFactory = - () ⇒ new ArrayBlockingQueue[Runnable](capacity, fair) + def arrayBlockingQueue(capacity: Int, fair: Boolean): QueueFactory = () ⇒ new ArrayBlockingQueue[Runnable](capacity, fair) - def synchronousQueue(fair: Boolean): QueueFactory = - () ⇒ new SynchronousQueue[Runnable](fair) + def synchronousQueue(fair: Boolean): QueueFactory = () ⇒ new SynchronousQueue[Runnable](fair) - def linkedBlockingQueue(): QueueFactory = - () ⇒ new LinkedBlockingQueue[Runnable]() + def linkedBlockingQueue(): QueueFactory = () ⇒ new LinkedBlockingQueue[Runnable]() - def linkedBlockingQueue(capacity: Int): QueueFactory = - () ⇒ new LinkedBlockingQueue[Runnable](capacity) + def linkedBlockingQueue(capacity: Int): QueueFactory = () ⇒ new LinkedBlockingQueue[Runnable](capacity) - def reusableQueue(queue: BlockingQueue[Runnable]): QueueFactory = - () ⇒ queue + def reusableQueue(queue: BlockingQueue[Runnable]): QueueFactory = () ⇒ queue - def reusableQueue(queueFactory: QueueFactory): QueueFactory = { - val queue = queueFactory() - () ⇒ queue - } + def reusableQueue(queueFactory: QueueFactory): QueueFactory = reusableQueue(queueFactory()) } /** From e592cebe20682e0f789bd145e79922c70e267c0a Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 4 Jun 2012 13:35:08 +0200 Subject: [PATCH 278/538] Add script to run LogRoleReplace, see #2173 --- project/scripts/multi-node-log-replace | 11 +++++++++++ scripts/multi-node-log-replace.sh | 25 +++++++++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100755 project/scripts/multi-node-log-replace create mode 100755 scripts/multi-node-log-replace.sh diff --git a/project/scripts/multi-node-log-replace b/project/scripts/multi-node-log-replace new file mode 100755 index 0000000000..83f1b8a136 --- /dev/null +++ b/project/scripts/multi-node-log-replace @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# +# Utility to make log files from multi-node tests easier to analyze. +# Replaces jvm names and host:port with corresponding logical role name. +# + + +# check for an sbt command +type -P sbt &> /dev/null || fail "sbt command not found" + +sbt "project akka-remote-tests" "test:run-main akka.remote.testkit.LogRoleReplace $1 $2" \ No newline at end of file diff --git a/scripts/multi-node-log-replace.sh b/scripts/multi-node-log-replace.sh new file mode 100755 index 0000000000..8e8af7112a --- /dev/null +++ b/scripts/multi-node-log-replace.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# +# Utility to make log files from multi-node tests easier to analyze. +# Replaces jvm names and host:port with corresponding logical role name. +# +# Use with 0, 1 or 2 arguments. +# +# When using 0 arguments it reads from standard input +# and writes to standard output. +# +# With 1 argument it reads from the file specified in the first argument +# and writes to standard output. +# +# With 2 arguments it reads the file specified in the first argument +# and writes to the file specified in the second argument. +# +# You can also replace the contents of the clipboard instead of using files +# by supplying `clipboard` as argument +# + + +# check for an sbt command +type -P sbt &> /dev/null || fail "sbt command not found" + +sbt "project akka-remote-tests" "test:run-main akka.remote.testkit.LogRoleReplace $1 $2" \ No newline at end of file From c990fee724b96a9467b323f8eea99e06b118061b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 4 Jun 2012 13:46:29 +0200 Subject: [PATCH 279/538] Switching to the appropriate check for confirmed existence --- .../src/test/scala/akka/routing/RoutingSpec.scala | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index f1952b8f79..35631924cf 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -73,7 +73,9 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with watch(router) watch(c2) system.stop(c2) - expectMsg(Terminated(c2)(existenceConfirmed = true)) + expectMsgPF() { + case t @ Terminated(`c2`) if t.existenceConfirmed == true ⇒ t + } // it might take a while until the Router has actually processed the Terminated message awaitCond { router ! "" @@ -84,7 +86,9 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with res == Seq(c1, c1) } system.stop(c1) - expectMsg(Terminated(router)(existenceConfirmed = true)) + expectMsgPF() { + case t @ Terminated(`router`) if t.existenceConfirmed == true ⇒ t + } } "be able to send their routees" in { From 5810f7353039a88371d54c4f85a850860a74ae17 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 4 Jun 2012 13:51:30 +0200 Subject: [PATCH 280/538] Minor improvement from review feedback, see 2137 --- .../src/main/scala/akka/remote/testconductor/Conductor.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index f8f16a4d9c..3aed112b55 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -540,7 +540,7 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor when(Waiting) { case Event(EnterBarrier(name), d @ Data(clients, barrier, arrived)) ⇒ if (name != barrier) throw WrongBarrier(name, sender, d) - val together = if (clients.find(_.fsm == sender).isDefined) sender :: arrived else arrived + val together = if (clients.exists(_.fsm == sender)) sender :: arrived else arrived handleBarrier(d.copy(arrived = together)) case Event(RemoveClient(name), d @ Data(clients, barrier, arrived)) ⇒ clients find (_.name == name) match { From f30a1a0b1f7678fb66eef0e8509e6ddeb1899e8d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 4 Jun 2012 14:29:32 +0200 Subject: [PATCH 281/538] Always removeNode when shutdown, see 2137 --- .../akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala | 1 - .../akka/cluster/GossipingAccrualFailureDetectorSpec.scala | 1 - .../akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala | 2 -- .../src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala | 1 - .../src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala | 1 - .../src/main/scala/akka/remote/testconductor/Conductor.scala | 4 +++- 6 files changed, 3 insertions(+), 7 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index f657bcee3e..a5ce2d4258 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -41,7 +41,6 @@ class ClientDowningNodeThatIsUnreachableSpec testConductor.enter("all-up") // kill 'third' node - testConductor.removeNode(third) testConductor.shutdown(third, 0) // mark 'third' node as DOWN diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 16113519da..afaeac747b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -54,7 +54,6 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi "mark node as 'unavailable' if a node in the cluster is shut down (and its heartbeats stops)" taggedAs LongRunningTest in { runOn(first) { - testConductor.removeNode(third) testConductor.shutdown(third, 0) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index fda3046e4c..dfd8dde310 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -49,7 +49,6 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.enter("all-up") // kill 'fourth' node - testConductor.removeNode(fourth) testConductor.shutdown(fourth, 0) testConductor.enter("down-fourth-node") @@ -89,7 +88,6 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.enter("all-up") // kill 'second' node - testConductor.removeNode(second) testConductor.shutdown(second, 0) testConductor.enter("down-second-node") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index 932eb91e15..e5972b7d7c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -65,7 +65,6 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp case `controller` ⇒ testConductor.enter("before-shutdown") - testConductor.removeNode(leader) testConductor.shutdown(leader, 0) testConductor.enter("after-shutdown", "after-down", "completed") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index e59382341f..1179f89d76 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -54,7 +54,6 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) "become singleton cluster when one node is shutdown" taggedAs LongRunningTest in { runOn(first) { val secondAddress = node(second).address - testConductor.removeNode(second) testConductor.shutdown(second, 0) awaitUpConvergence(numberOfMembers = 1, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds) cluster.isSingletonCluster must be(true) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index 3aed112b55..17a2bfcd5f 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -168,7 +168,8 @@ trait Conductor { this: TestConductorExt ⇒ /** * Tell the remote node to shut itself down using System.exit with the given - * exitValue. + * exitValue. The node will also be removed, so that the remaining nodes may still + * pass subsequent barriers. * * @param node is the symbolic name of the node which is to be affected * @param exitValue is the return code which shall be given to System.exit @@ -441,6 +442,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP if (exitValueOrKill < 0) { // TODO: kill via SBT } else { + barrier ! BarrierCoordinator.RemoveClient(node) nodes(node).fsm forward ToClient(TerminateMsg(exitValueOrKill)) } case Remove(node) ⇒ From b1c507f3b95bd69eb75d8fa2ee13adb494c16d23 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 4 Jun 2012 11:37:23 +0200 Subject: [PATCH 282/538] Shutdown does removeNode, see #2137 --- .../scala/akka/cluster/ConvergenceSpec.scala | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index eeb9b864ed..a76083b0fc 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -4,7 +4,6 @@ package akka.cluster import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -33,15 +32,11 @@ class ConvergenceMultiJvmNode4 extends ConvergenceSpec abstract class ConvergenceSpec extends MultiNodeSpec(ConvergenceMultiJvmSpec) - with MultiNodeClusterSpec with BeforeAndAfter { + with MultiNodeClusterSpec { import ConvergenceMultiJvmSpec._ override def initialParticipants = 4 - after { - testConductor.enter("after") - } - "A cluster of 3 members" must { "reach initial convergence" taggedAs LongRunningTest in { @@ -58,6 +53,8 @@ abstract class ConvergenceSpec runOn(fourth) { // doesn't join immediately } + + testConductor.enter("after-1") } "not reach convergence while any nodes are unreachable" taggedAs LongRunningTest in { @@ -67,14 +64,13 @@ abstract class ConvergenceSpec runOn(first) { // kill 'third' node testConductor.shutdown(third, 0) - testConductor.removeNode(third) } runOn(first, second) { val firstAddress = node(first).address val secondAddress = node(second).address - within(30 seconds) { + within(25 seconds) { // third becomes unreachable awaitCond(cluster.latestGossip.overview.unreachable.size == 1) awaitCond(cluster.latestGossip.members.size == 2) @@ -89,6 +85,7 @@ abstract class ConvergenceSpec } } + testConductor.enter("after-2") } "not move a new joining node to Up while there is no convergence" taggedAs LongRunningTest in { @@ -126,6 +123,7 @@ abstract class ConvergenceSpec } } + testConductor.enter("after-3") } } } From 9ee971ee794feff5944b880a21a54aa5ddec8948 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Mon, 4 Jun 2012 16:22:10 +0200 Subject: [PATCH 283/538] We need to reregister a client conection when we know the actor system address. see #2175 --- .../NetworkFailureInjector.scala | 25 ++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index d30872cd6e..a0f53b5a9b 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -28,7 +28,7 @@ private[akka] class FailureInjector extends Actor with ActorLogging { ctx: Option[ChannelHandlerContext] = None, throttleSend: Option[SetRate] = None, throttleReceive: Option[SetRate] = None) - case class Injectors(sender: ActorRef, receiver: ActorRef) + case class Injectors(sender: ActorRef, receiver: ActorRef, known: Boolean) var channels = Map[ChannelHandlerContext, Injectors]() var settings = Map[Address, ChannelSettings]() @@ -37,12 +37,13 @@ private[akka] class FailureInjector extends Actor with ActorLogging { /** * Only for a NEW ctx, start ThrottleActors, prime them and update all maps. */ - def ingestContextAddress(ctx: ChannelHandlerContext, addr: Address): Injectors = { + def ingestContextAddress(ctx: ChannelHandlerContext, addr: Address, known: Boolean, + snd: Option[ActorRef] = None, rcv: Option[ActorRef] = None): Injectors = { val gen = generation.next val name = addr.host.get + ":" + addr.port.get - val thrSend = context.actorOf(Props(new ThrottleActor(ctx)), name + "-snd" + gen) - val thrRecv = context.actorOf(Props(new ThrottleActor(ctx)), name + "-rcv" + gen) - val injectors = Injectors(thrSend, thrRecv) + val thrSend = snd getOrElse context.actorOf(Props(new ThrottleActor(ctx)), name + "-snd" + gen) + val thrRecv = rcv getOrElse context.actorOf(Props(new ThrottleActor(ctx)), name + "-rcv" + gen) + val injectors = Injectors(thrSend, thrRecv, known) channels += ctx -> injectors settings += addr -> (settings get addr map { case c @ ChannelSettings(prevCtx, ts, tr) ⇒ @@ -134,7 +135,10 @@ private[akka] class FailureInjector extends Actor with ActorLogging { */ case s @ Send(ctx, direction, future, msg) ⇒ channels get ctx match { - case Some(Injectors(snd, rcv)) ⇒ + case Some(Injectors(snd, rcv, known)) ⇒ + // if the system registered with an empty name then check if we know it now + if (!known) ChannelAddress.get(ctx.getChannel).foreach(addr ⇒ + ingestContextAddress(ctx, addr, true, Some(snd), Some(rcv))) if (direction includes Direction.Send) snd ! s if (direction includes Direction.Receive) rcv ! s case None ⇒ @@ -142,21 +146,24 @@ private[akka] class FailureInjector extends Actor with ActorLogging { ctx.getChannel.getRemoteAddress match { case sockAddr: InetSocketAddress ⇒ val (ipaddr, ip, port) = (sockAddr.getAddress, sockAddr.getAddress.getHostAddress, sockAddr.getPort) - val addr = ChannelAddress.get(ctx.getChannel) orElse { + val (addr, known) = ChannelAddress.get(ctx.getChannel) orElse { settings collect { case (a @ Address("akka", _, Some(`ip`), Some(`port`)), _) ⇒ a } headOption } orElse { // only if raw IP failed, try with hostname val name = ipaddr.getHostName if (name == ip) None else settings collect { case (a @ Address("akka", _, Some(`name`), Some(`port`)), _) ⇒ a } headOption - } getOrElse Address("akka", "", ip, port) + } match { + case Some(a) ⇒ (a, true) + case None ⇒ (Address("akka", "", ip, port), false) + } /* * ^- the above last resort will not match later requests directly, but be * picked up by retrieveTargetSettings, so that throttle ops are * applied to the right throttle actors, assuming that there can * be only one actor system per host:port. */ - val inj = ingestContextAddress(ctx, addr) + val inj = ingestContextAddress(ctx, addr, known) if (direction includes Direction.Send) inj.sender ! s if (direction includes Direction.Receive) inj.receiver ! s case null ⇒ From 54febffb283129cf84a1de3dffba5b36691f24a0 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 4 Jun 2012 17:07:44 +0200 Subject: [PATCH 284/538] #2093 - Adding support for setting the sender when using TestActorRef.receive --- .../src/main/scala/akka/testkit/TestActorRef.scala | 12 +++++++++++- .../test/scala/akka/testkit/TestActorRefSpec.scala | 9 ++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index 0a5d6163e8..279c728e80 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -56,7 +56,17 @@ class TestActorRef[T <: Actor]( * thrown will be available to you, while still being able to use * become/unbecome. */ - def receive(o: Any): Unit = underlying.receiveMessage(o) + def receive(o: Any): Unit = receive(o, underlying.system.deadLetters) + + /** + * Directly inject messages into actor receive behavior. Any exceptions + * thrown will be available to you, while still being able to use + * become/unbecome. + */ + def receive(o: Any, sender: ActorRef): Unit = try { + underlying.currentMessage = Envelope(o, if (sender eq null) underlying.system.deadLetters else sender)(underlying.system) + underlying.receiveMessage(o) + } finally underlying.currentMessage = null /** * Retrieve reference to the underlying actor, where the static type matches the factory used inside the diff --git a/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala index 7c977884fc..492c44408c 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala @@ -246,11 +246,18 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA a.underlying.dispatcher.getClass must be(classOf[Dispatcher]) } - "proxy receive for the underlying actor" in { + "proxy receive for the underlying actor without sender" in { val ref = TestActorRef[WorkerActor] ref.receive("work") ref.isTerminated must be(true) } + "proxy receive for the underlying actor with sender" in { + val ref = TestActorRef[WorkerActor] + ref.receive("work", testActor) + ref.isTerminated must be(true) + expectMsg("workDone") + } + } } From b840624b7844ff4a8427a4e069b9cd8bdc3a5447 Mon Sep 17 00:00:00 2001 From: Roland Date: Mon, 4 Jun 2012 19:28:58 +0200 Subject: [PATCH 285/538] warn against using TestKitBase trait --- akka-docs/scala/testing.rst | 5 ++ .../src/main/scala/akka/testkit/TestKit.scala | 90 +++++++++++-------- 2 files changed, 59 insertions(+), 36 deletions(-) diff --git a/akka-docs/scala/testing.rst b/akka-docs/scala/testing.rst index d2875ed62a..d19a1ab753 100644 --- a/akka-docs/scala/testing.rst +++ b/akka-docs/scala/testing.rst @@ -685,6 +685,11 @@ The ``implicit lazy val system`` must be declared exactly like that (you can course pass arguments to the actor system factory as needed) because trait :class:`TestKitBase` needs the system during its construction. +.. warning:: + + Use of the trait is discouraged because of potential issues with binary + backwards compatibility in the future, use at own risk. + Specs2 ------ diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index 9dfa40a5ee..373f4c1fff 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -62,44 +62,22 @@ class TestActor(queue: BlockingDeque[TestActor.Message]) extends Actor { } /** - * Test kit for testing actors. Inheriting from this trait enables reception of - * replies from actors, which are queued by an internal actor and can be - * examined using the `expectMsg...` methods. Assertions and bounds concerning - * timing are available in the form of `within` blocks. + * Implementation trait behind the [[akka.testkit.TestKit]] class: you may use + * this if inheriting from a concrete class is not possible. * - *
- * class Test extends TestKit(ActorSystem()) {
- *     try {
- *
- *       val test = system.actorOf(Props[SomeActor]
- *
- *       within (1 second) {
- *         test ! SomeWork
- *         expectMsg(Result1) // bounded to 1 second
- *         expectMsg(Result2) // bounded to the remainder of the 1 second
- *       }
- *
- *     } finally {
- *       system.shutdown()
- *     }
+ * Use of the trait is discouraged because of potential issues with binary 
+ * backwards compatibility in the future, use at own risk.
+ * 
+ * This trait requires the concrete class mixing it in to provide an 
+ * [[akka.actor.ActorSystem]] which is available before this traits’s
+ * constructor is run. The recommended way is this:
+ * 
+ * {{{
+ * class MyTest extends TestKitBase {
+ *   implicit lazy val system = ActorSystem() // may add arguments here
+ *   ...
  * }
- * 
- * - * Beware of two points: - * - * - the ActorSystem passed into the constructor needs to be shutdown, - * otherwise thread pools and memory will be leaked - * - this trait is not thread-safe (only one actor with one queue, one stack - * of `within` blocks); it is expected that the code is executed from a - * constructor as shown above, which makes this a non-issue, otherwise take - * care not to run tests within a single test class instance in parallel. - * - * It should be noted that for CI servers and the like all maximum Durations - * are scaled using their Duration.dilated method, which uses the - * TestKitExtension.Settings.TestTimeFactor settable via akka.conf entry "akka.test.timefactor". - * - * @author Roland Kuhn - * @since 1.1 + * }}} */ trait TestKitBase { @@ -579,6 +557,46 @@ trait TestKitBase { private def format(u: TimeUnit, d: Duration) = "%.3f %s".format(d.toUnit(u), u.toString.toLowerCase) } +/** + * Test kit for testing actors. Inheriting from this trait enables reception of + * replies from actors, which are queued by an internal actor and can be + * examined using the `expectMsg...` methods. Assertions and bounds concerning + * timing are available in the form of `within` blocks. + * + *
+ * class Test extends TestKit(ActorSystem()) {
+ *     try {
+ *
+ *       val test = system.actorOf(Props[SomeActor]
+ *
+ *       within (1 second) {
+ *         test ! SomeWork
+ *         expectMsg(Result1) // bounded to 1 second
+ *         expectMsg(Result2) // bounded to the remainder of the 1 second
+ *       }
+ *
+ *     } finally {
+ *       system.shutdown()
+ *     }
+ * }
+ * 
+ * + * Beware of two points: + * + * - the ActorSystem passed into the constructor needs to be shutdown, + * otherwise thread pools and memory will be leaked + * - this trait is not thread-safe (only one actor with one queue, one stack + * of `within` blocks); it is expected that the code is executed from a + * constructor as shown above, which makes this a non-issue, otherwise take + * care not to run tests within a single test class instance in parallel. + * + * It should be noted that for CI servers and the like all maximum Durations + * are scaled using their Duration.dilated method, which uses the + * TestKitExtension.Settings.TestTimeFactor settable via akka.conf entry "akka.test.timefactor". + * + * @author Roland Kuhn + * @since 1.1 + */ class TestKit(_system: ActorSystem) extends { implicit val system = _system } with TestKitBase object TestKit { From b98fb0e37a132b2b2a29278f3d5ae47abf2919dd Mon Sep 17 00:00:00 2001 From: Roland Date: Mon, 4 Jun 2012 23:10:03 +0200 Subject: [PATCH 286/538] clarify deployment using anonymous factories --- akka-docs/java/remoting.rst | 8 ++++++++ akka-docs/scala/remoting.rst | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/akka-docs/java/remoting.rst b/akka-docs/java/remoting.rst index ae2ac9c246..910ec5fbb2 100644 --- a/akka-docs/java/remoting.rst +++ b/akka-docs/java/remoting.rst @@ -92,6 +92,14 @@ As you can see from the example above the following pattern is used to find an ` akka://@:/ +.. note:: + + In order to ensure serializability of ``Props`` when passing constructor + arguments to the actor being created, do not make the factory a non-static + inner class: this will inherently capture a reference to its enclosing + object, which in most cases is not serializable. It is best to make a static + inner class which implements :class:`UntypedActorFactory`. + Programmatic Remote Deployment ------------------------------ diff --git a/akka-docs/scala/remoting.rst b/akka-docs/scala/remoting.rst index 0f55ccdff4..0863d80b55 100644 --- a/akka-docs/scala/remoting.rst +++ b/akka-docs/scala/remoting.rst @@ -105,6 +105,14 @@ Once you have configured the properties above you would do the following in code ``SampleActor`` has to be available to the runtimes using it, i.e. the classloader of the actor systems has to have a JAR containing the class. +.. note:: + + In order to ensure serializability of ``Props`` when passing constructor + arguments to the actor being created, do not make the factory an inner class: + this will inherently capture a reference to its enclosing object, which in + most cases is not serializable. It is best to create a factory method in the + companion object of the actor’s class. + Programmatic Remote Deployment ------------------------------ From 391fed65941c29aa7d139011b0a97fb7c37f768e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 4 Jun 2012 23:21:28 +0200 Subject: [PATCH 287/538] Misc changes, fixes and improvements after review. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Renamed all 'frequency' to 'interval' - Split up NodeJoinAndUpSpec and into NodeJoinSpec and NodeUpSpec. - Split up MembershipChangeListenerJoinAndUpSpec and into MembershipChangeListenerJoinSpec and MembershipChangeListenerUpSpec. - Added utility method 'startClusterNode()' - Fixed race in register listener and telling node to leave - Removed 'after' blocks - Cleaned up unused code - Improved comments Signed-off-by: Jonas Bonér --- .../src/main/resources/reference.conf | 6 +- .../src/main/scala/akka/cluster/Cluster.scala | 12 +-- .../scala/akka/cluster/ClusterSettings.scala | 6 +- ...ientDowningNodeThatIsUnreachableSpec.scala | 6 +- .../ClientDowningNodeThatIsUpSpec.scala | 6 +- .../GossipingAccrualFailureDetectorSpec.scala | 2 +- .../akka/cluster/JoinTwoClustersSpec.scala | 7 +- ...aderDowningNodeThatIsUnreachableSpec.scala | 6 +- .../akka/cluster/LeaderElectionSpec.scala | 7 +- .../MembershipChangeListenerExitingSpec.scala | 28 ++++--- ...=> MembershipChangeListenerJoinSpec.scala} | 44 ++++------- .../MembershipChangeListenerLeavingSpec.scala | 28 ++++--- .../MembershipChangeListenerUpSpec.scala | 64 ++++++++++++++++ .../akka/cluster/MultiNodeClusterSpec.scala | 18 +++-- .../akka/cluster/NodeJoinAndUpSpec.scala | 76 ------------------- .../scala/akka/cluster/NodeJoinSpec.scala | 57 ++++++++++++++ ...LeavingAndExitingAndBeingRemovedSpec.scala | 2 +- .../cluster/NodeLeavingAndExitingSpec.scala | 14 ++-- .../scala/akka/cluster/NodeLeavingSpec.scala | 6 +- .../akka/cluster/NodeMembershipSpec.scala | 8 +- .../scala/akka/cluster/NodeShutdownSpec.scala | 4 +- .../scala/akka/cluster/NodeUpSpec.scala | 50 ++++++++++++ .../akka/cluster/ClusterConfigSpec.scala | 6 +- 23 files changed, 289 insertions(+), 174 deletions(-) rename akka-cluster/src/multi-jvm/scala/akka/cluster/{MembershipChangeListenerJoinAndUpSpec.scala => MembershipChangeListenerJoinSpec.scala} (52%) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinAndUpSpec.scala create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index 7dd511e34a..8c905d5b29 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -25,13 +25,13 @@ akka { periodic-tasks-initial-delay = 1s # how often should the node send out gossip information? - gossip-frequency = 1s + gossip-interval = 1s # how often should the leader perform maintenance tasks? - leader-actions-frequency = 1s + leader-actions-interval = 1s # how often should the node move nodes, marked as unreachable by the failure detector, out of the membership ring? - unreachable-nodes-reaper-frequency = 1s + unreachable-nodes-reaper-interval = 1s # accrual failure detection config failure-detector { diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index c5ad773989..8beb7f4164 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -380,9 +380,9 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ private val vclockNode = VectorClock.Node(selfAddress.toString) private val periodicTasksInitialDelay = clusterSettings.PeriodicTasksInitialDelay - private val gossipFrequency = clusterSettings.GossipFrequency - private val leaderActionsFrequency = clusterSettings.LeaderActionsFrequency - private val unreachableNodesReaperFrequency = clusterSettings.UnreachableNodesReaperFrequency + private val gossipInterval = clusterSettings.GossipInterval + private val leaderActionsInterval = clusterSettings.LeaderActionsInterval + private val unreachableNodesReaperInterval = clusterSettings.UnreachableNodesReaperInterval implicit private val defaultTimeout = Timeout(remoteSettings.RemoteSystemDaemonAckTimeout) @@ -424,17 +424,17 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // ======================================================== // start periodic gossip to random nodes in cluster - private val gossipCanceller = system.scheduler.schedule(periodicTasksInitialDelay, gossipFrequency) { + private val gossipCanceller = system.scheduler.schedule(periodicTasksInitialDelay, gossipInterval) { gossip() } // start periodic cluster failure detector reaping (moving nodes condemned by the failure detector to unreachable list) - private val failureDetectorReaperCanceller = system.scheduler.schedule(periodicTasksInitialDelay, unreachableNodesReaperFrequency) { + private val failureDetectorReaperCanceller = system.scheduler.schedule(periodicTasksInitialDelay, unreachableNodesReaperInterval) { reapUnreachableMembers() } // start periodic leader action management (only applies for the current leader) - private val leaderActionsCanceller = system.scheduler.schedule(periodicTasksInitialDelay, leaderActionsFrequency) { + private val leaderActionsCanceller = system.scheduler.schedule(periodicTasksInitialDelay, leaderActionsInterval) { leaderActions() } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 8e9b9c770d..0e7dac06ab 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -20,9 +20,9 @@ class ClusterSettings(val config: Config, val systemName: String) { case AddressFromURIString(addr) ⇒ Some(addr) } val PeriodicTasksInitialDelay = Duration(getMilliseconds("akka.cluster.periodic-tasks-initial-delay"), MILLISECONDS) - val GossipFrequency = Duration(getMilliseconds("akka.cluster.gossip-frequency"), MILLISECONDS) - val LeaderActionsFrequency = Duration(getMilliseconds("akka.cluster.leader-actions-frequency"), MILLISECONDS) - val UnreachableNodesReaperFrequency = Duration(getMilliseconds("akka.cluster.unreachable-nodes-reaper-frequency"), MILLISECONDS) + val GossipInterval = Duration(getMilliseconds("akka.cluster.gossip-interval"), MILLISECONDS) + val LeaderActionsInterval = Duration(getMilliseconds("akka.cluster.leader-actions-interval"), MILLISECONDS) + val UnreachableNodesReaperInterval = Duration(getMilliseconds("akka.cluster.unreachable-nodes-reaper-interval"), MILLISECONDS) val NrOfGossipDaemons = getInt("akka.cluster.nr-of-gossip-daemons") val NrOfDeputyNodes = getInt("akka.cluster.nr-of-deputy-nodes") val AutoDown = getBoolean("akka.cluster.auto-down") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index 6ab4d1a39e..ba34c9b0be 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -26,8 +26,8 @@ class ClientDowningNodeThatIsUnreachableMultiJvmNode4 extends ClientDowningNodeT class ClientDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(ClientDowningNodeThatIsUnreachableMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { + import ClientDowningNodeThatIsUnreachableMultiJvmSpec._ override def initialParticipants = 4 @@ -36,7 +36,7 @@ class ClientDowningNodeThatIsUnreachableSpec "be able to DOWN a node that is UNREACHABLE (killed)" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() awaitUpConvergence(numberOfMembers = 4) val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index 6b0bbae22e..ac1d68c8af 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -26,8 +26,8 @@ class ClientDowningNodeThatIsUpMultiJvmNode4 extends ClientDowningNodeThatIsUpSp class ClientDowningNodeThatIsUpSpec extends MultiNodeSpec(ClientDowningNodeThatIsUpMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { + import ClientDowningNodeThatIsUpMultiJvmSpec._ override def initialParticipants = 4 @@ -36,7 +36,7 @@ class ClientDowningNodeThatIsUpSpec "be able to DOWN a node that is UP (healthy and available)" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() awaitUpConvergence(numberOfMembers = 4) val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 9d388622db..cec99e9af9 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -43,7 +43,7 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi "receive gossip heartbeats so that all member nodes in the cluster are marked 'available'" taggedAs LongRunningTest in { // make sure that the node-to-join is started before other join runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index 9f1395b5dd..7b7263bbe0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -27,7 +27,10 @@ class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec -abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender { +abstract class JoinTwoClustersSpec + extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) + with MultiNodeClusterSpec { + import JoinTwoClustersMultiJvmSpec._ override def initialParticipants = 6 @@ -41,7 +44,7 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm "be able to 'elect' a single leader after joining (A -> B)" taggedAs LongRunningTest in { // make sure that the node-to-join is started before other join runOn(a1, b1, c1) { - cluster.self + startClusterNode() } testConductor.enter("first-started") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index 63665d3c57..7b2536d9d2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -34,8 +34,8 @@ class LeaderDowningNodeThatIsUnreachableMultiJvmNode4 extends LeaderDowningNodeT class LeaderDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(LeaderDowningNodeThatIsUnreachableMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { + import LeaderDowningNodeThatIsUnreachableMultiJvmSpec._ override def initialParticipants = 4 @@ -44,7 +44,7 @@ class LeaderDowningNodeThatIsUnreachableSpec "be able to DOWN a 'last' node that is UNREACHABLE" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() awaitUpConvergence(numberOfMembers = 4) val fourthAddress = node(fourth).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index ba0471bedb..bf60b6b4ac 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -26,7 +26,10 @@ class LeaderElectionMultiJvmNode3 extends LeaderElectionSpec class LeaderElectionMultiJvmNode4 extends LeaderElectionSpec class LeaderElectionMultiJvmNode5 extends LeaderElectionSpec -abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSpec) with MultiNodeClusterSpec { +abstract class LeaderElectionSpec + extends MultiNodeSpec(LeaderElectionMultiJvmSpec) + with MultiNodeClusterSpec { + import LeaderElectionMultiJvmSpec._ override def initialParticipants = 5 @@ -41,7 +44,7 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp "be able to 'elect' a single leader" taggedAs LongRunningTest in { // make sure that the node-to-join is started before other join runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index 0145628bd5..8932eed6ee 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -20,8 +20,8 @@ object MembershipChangeListenerExitingMultiJvmSpec extends MultiNodeConfig { debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" akka.cluster { - leader-actions-frequency = 5000 ms # increase the leader action task frequency - unreachable-nodes-reaper-frequency = 30000 ms # turn "off" reaping to unreachable node set + leader-actions-interval = 5 s # increase the leader action task interval + unreachable-nodes-reaper-interval = 30 s # turn "off" reaping to unreachable node set } """) .withFallback(MultiNodeClusterSpec.clusterConfig))) @@ -31,8 +31,10 @@ class MembershipChangeListenerExitingMultiJvmNode1 extends MembershipChangeListe class MembershipChangeListenerExitingMultiJvmNode2 extends MembershipChangeListenerExitingSpec class MembershipChangeListenerExitingMultiJvmNode3 extends MembershipChangeListenerExitingSpec -abstract class MembershipChangeListenerExitingSpec extends MultiNodeSpec(MembershipChangeListenerExitingMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +abstract class MembershipChangeListenerExitingSpec + extends MultiNodeSpec(MembershipChangeListenerExitingMultiJvmSpec) + with MultiNodeClusterSpec { + import MembershipChangeListenerExitingMultiJvmSpec._ override def initialParticipants = 3 @@ -45,7 +47,7 @@ abstract class MembershipChangeListenerExitingSpec extends MultiNodeSpec(Members "be notified when new node is EXITING" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") @@ -55,21 +57,27 @@ abstract class MembershipChangeListenerExitingSpec extends MultiNodeSpec(Members awaitUpConvergence(numberOfMembers = 3) testConductor.enter("rest-started") + runOn(first) { + testConductor.enter("registered-listener") + cluster.leave(secondAddress) + } + + runOn(second) { + testConductor.enter("registered-listener") + } + runOn(third) { val exitingLatch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { - if (members.size == 3 && members.exists(_.status == MemberStatus.Exiting)) + if (members.size == 3 && members.exists( m => m.address == secondAddress && m.status == MemberStatus.Exiting)) exitingLatch.countDown() } }) + testConductor.enter("registered-listener") exitingLatch.await } - runOn(first) { - cluster.leave(secondAddress) - } - testConductor.enter("finished") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinAndUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala similarity index 52% rename from akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinAndUpSpec.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index 81e32d1491..2f82e12506 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinAndUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -11,7 +11,7 @@ import akka.remote.testkit.MultiNodeSpec import akka.testkit._ import akka.util.duration._ -object MembershipChangeListenerJoinAndUpMultiJvmSpec extends MultiNodeConfig { +object MembershipChangeListenerJoinMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") @@ -19,46 +19,39 @@ object MembershipChangeListenerJoinAndUpMultiJvmSpec extends MultiNodeConfig { debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" akka.cluster { - gossip-frequency = 1000 ms - leader-actions-frequency = 5000 ms # increase the leader action task frequency + leader-actions-interval = 5 s # increase the leader action task interval to allow time checking for JOIN before leader moves it to UP } """) .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class MembershipChangeListenerJoinAndUpMultiJvmNode1 extends MembershipChangeListenerJoinAndUpSpec -class MembershipChangeListenerJoinAndUpMultiJvmNode2 extends MembershipChangeListenerJoinAndUpSpec +class MembershipChangeListenerJoinMultiJvmNode1 extends MembershipChangeListenerJoinSpec +class MembershipChangeListenerJoinMultiJvmNode2 extends MembershipChangeListenerJoinSpec -abstract class MembershipChangeListenerJoinAndUpSpec - extends MultiNodeSpec(MembershipChangeListenerJoinAndUpMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender - with BeforeAndAfter { +abstract class MembershipChangeListenerJoinSpec + extends MultiNodeSpec(MembershipChangeListenerJoinMultiJvmSpec) + with MultiNodeClusterSpec { - import MembershipChangeListenerJoinAndUpMultiJvmSpec._ + import MembershipChangeListenerJoinMultiJvmSpec._ override def initialParticipants = 2 - after { - testConductor.enter("after") - } - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address "A registered MembershipChangeListener" must { - "be notified when new node is JOINING and node is marked as UP by the leader" taggedAs LongRunningTest in { + "be notified when new node is JOINING" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() } runOn(second) { + testConductor.enter("registered-listener") cluster.join(firstAddress) } runOn(first) { - // JOINING val joinLatch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { @@ -66,20 +59,13 @@ abstract class MembershipChangeListenerJoinAndUpSpec joinLatch.countDown() } }) + testConductor.enter("registered-listener") + joinLatch.await cluster.convergence.isDefined must be(true) - - // UP - val upLatch = TestLatch() - cluster.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) - upLatch.countDown() - } - }) - upLatch.await - awaitCond(cluster.convergence.isDefined) } + + testConductor.enter("after") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala index f8b083c4d8..089f241849 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -18,8 +18,8 @@ object MembershipChangeListenerLeavingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster.leader-actions-frequency = 5000 ms - akka.cluster.unreachable-nodes-reaper-frequency = 30000 ms # turn "off" reaping to unreachable node set + akka.cluster.leader-actions-interval = 5 s + akka.cluster.unreachable-nodes-reaper-interval = 30 s """)) .withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -28,8 +28,10 @@ class MembershipChangeListenerLeavingMultiJvmNode1 extends MembershipChangeListe class MembershipChangeListenerLeavingMultiJvmNode2 extends MembershipChangeListenerLeavingSpec class MembershipChangeListenerLeavingMultiJvmNode3 extends MembershipChangeListenerLeavingSpec -abstract class MembershipChangeListenerLeavingSpec extends MultiNodeSpec(MembershipChangeListenerLeavingMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +abstract class MembershipChangeListenerLeavingSpec + extends MultiNodeSpec(MembershipChangeListenerLeavingMultiJvmSpec) + with MultiNodeClusterSpec { + import MembershipChangeListenerLeavingMultiJvmSpec._ override def initialParticipants = 3 @@ -42,7 +44,7 @@ abstract class MembershipChangeListenerLeavingSpec extends MultiNodeSpec(Members "be notified when new node is LEAVING" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") @@ -52,21 +54,27 @@ abstract class MembershipChangeListenerLeavingSpec extends MultiNodeSpec(Members awaitUpConvergence(numberOfMembers = 3) testConductor.enter("rest-started") + runOn(first) { + testConductor.enter("registered-listener") + cluster.leave(secondAddress) + } + + runOn(second) { + testConductor.enter("registered-listener") + } + runOn(third) { val latch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { - if (members.size == 3 && members.exists(_.status == MemberStatus.Leaving)) + if (members.size == 3 && members.exists( m => m.address == secondAddress && m.status == MemberStatus.Leaving)) latch.countDown() } }) + testConductor.enter("registered-listener") latch.await } - runOn(first) { - cluster.leave(secondAddress) - } - testConductor.enter("finished") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala new file mode 100644 index 0000000000..3df6b876f9 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -0,0 +1,64 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import scala.collection.immutable.SortedSet +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object MembershipChangeListenerUpMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class MembershipChangeListenerUpMultiJvmNode1 extends MembershipChangeListenerUpSpec +class MembershipChangeListenerUpMultiJvmNode2 extends MembershipChangeListenerUpSpec + +abstract class MembershipChangeListenerUpSpec + extends MultiNodeSpec(MembershipChangeListenerUpMultiJvmSpec) + with MultiNodeClusterSpec { + + import MembershipChangeListenerUpMultiJvmSpec._ + + override def initialParticipants = 2 + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + + "A registered MembershipChangeListener" must { + "be notified when new node is marked as UP by the leader" taggedAs LongRunningTest in { + + runOn(first) { + startClusterNode() + } + + runOn(second) { + testConductor.enter("registered-listener") + cluster.join(firstAddress) + } + + runOn(first) { + val upLatch = TestLatch() + cluster.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) + upLatch.countDown() + } + }) + testConductor.enter("registered-listener") + + upLatch.await + awaitUpConvergence(numberOfMembers = 2) + } + + testConductor.enter("after") + } + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 4d0c7f4720..dd57b4b13f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -15,11 +15,11 @@ import akka.util.Duration object MultiNodeClusterSpec { def clusterConfig: Config = ConfigFactory.parseString(""" akka.cluster { - auto-down = off - gossip-frequency = 200 ms - leader-actions-frequency = 200 ms - unreachable-nodes-reaper-frequency = 200 ms - periodic-tasks-initial-delay = 300 ms + auto-down = off + gossip-interval = 200 ms + leader-actions-interval = 200 ms + unreachable-nodes-reaper-interval = 200 ms + periodic-tasks-initial-delay = 300 ms } akka.test { single-expect-default = 5 s @@ -29,8 +29,16 @@ object MultiNodeClusterSpec { trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ + /** + * Create a cluster node using 'Cluster(system)'. + */ def cluster: Cluster = Cluster(system) + /** + * Use this method instead of 'cluster.self'. + */ + def startClusterNode(): Unit = cluster.self + /** * Assert that the member addresses match the expected addresses in the * sort order used by the cluster. diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinAndUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinAndUpSpec.scala deleted file mode 100644 index 5415df1b4a..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinAndUpSpec.scala +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ -import akka.util.duration._ - -object NodeJoinAndUpMultiJvmSpec extends MultiNodeConfig { - val first = role("first") - val second = role("second") - - commonConfig( - debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" - akka.cluster { - gossip-frequency = 1000 ms - leader-actions-frequency = 5000 ms # increase the leader action task frequency - } - """) - .withFallback(MultiNodeClusterSpec.clusterConfig))) -} - -class NodeJoinAndUpMultiJvmNode1 extends NodeJoinAndUpSpec -class NodeJoinAndUpMultiJvmNode2 extends NodeJoinAndUpSpec - -abstract class NodeJoinAndUpSpec - extends MultiNodeSpec(NodeJoinAndUpMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender - with BeforeAndAfter { - - import NodeJoinAndUpMultiJvmSpec._ - - override def initialParticipants = 2 - - after { - testConductor.enter("after") - } - - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - - "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { - - "be a singleton cluster when started up" taggedAs LongRunningTest in { - runOn(first) { - awaitCond(cluster.isSingletonCluster) - awaitUpConvergence(numberOfMembers = 1) - cluster.isLeader must be(true) - } - } - } - - "A second cluster node" must { - "join the cluster as JOINING - when sending a 'Join' command - and then be moved to UP by the leader" taggedAs LongRunningTest in { - - runOn(second) { - cluster.join(firstAddress) - } - - awaitCond(cluster.latestGossip.members.exists { member ⇒ member.address == secondAddress && member.status == MemberStatus.Joining }) - - awaitCond( - cluster.latestGossip.members.exists { member ⇒ member.address == secondAddress && member.status == MemberStatus.Up }, - 30.seconds.dilated) // waiting for the leader to move from JOINING -> UP (frequency set to 5 sec in config) - - cluster.latestGossip.members.size must be(2) - awaitCond(cluster.convergence.isDefined) - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala new file mode 100644 index 0000000000..99116ecb25 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala @@ -0,0 +1,57 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object NodeJoinMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" + akka.cluster { + leader-actions-interval = 5 s # increase the leader action task interval + } + """) + .withFallback(MultiNodeClusterSpec.clusterConfig))) +} + +class NodeJoinMultiJvmNode1 extends NodeJoinSpec +class NodeJoinMultiJvmNode2 extends NodeJoinSpec + +abstract class NodeJoinSpec + extends MultiNodeSpec(NodeJoinMultiJvmSpec) + with MultiNodeClusterSpec { + + import NodeJoinMultiJvmSpec._ + + override def initialParticipants = 2 + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + + "A cluster node" must { + "join another cluster and get status JOINING - when sending a 'Join' command" taggedAs LongRunningTest in { + + runOn(first) { + startClusterNode() + } + + runOn(second) { + cluster.join(firstAddress) + } + + awaitCond(cluster.latestGossip.members.exists { member ⇒ member.address == secondAddress && member.status == MemberStatus.Joining }) + + testConductor.enter("after") + } + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index 7c1037a624..da500323aa 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -40,7 +40,7 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec extends MultiNodeSpec(No "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 3fe9e220f6..189cb4c9c6 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -20,8 +20,8 @@ object NodeLeavingAndExitingMultiJvmSpec extends MultiNodeConfig { debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" akka.cluster { - leader-actions-frequency = 5 s # increase the leader action task frequency to make sure we get a chance to test the LEAVING state - unreachable-nodes-reaper-frequency = 30 s # turn "off" reaping to unreachable node set + leader-actions-interval = 5 s # increase the leader action task frequency to make sure we get a chance to test the LEAVING state + unreachable-nodes-reaper-interval = 30 s # turn "off" reaping to unreachable node set } """) .withFallback(MultiNodeClusterSpec.clusterConfig))) @@ -31,8 +31,10 @@ class NodeLeavingAndExitingMultiJvmNode1 extends NodeLeavingAndExitingSpec class NodeLeavingAndExitingMultiJvmNode2 extends NodeLeavingAndExitingSpec class NodeLeavingAndExitingMultiJvmNode3 extends NodeLeavingAndExitingSpec -abstract class NodeLeavingAndExitingSpec extends MultiNodeSpec(NodeLeavingAndExitingMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +abstract class NodeLeavingAndExitingSpec + extends MultiNodeSpec(NodeLeavingAndExitingMultiJvmSpec) + with MultiNodeClusterSpec { + import NodeLeavingAndExitingMultiJvmSpec._ override def initialParticipants = 3 @@ -46,7 +48,7 @@ abstract class NodeLeavingAndExitingSpec extends MultiNodeSpec(NodeLeavingAndExi "be moved to EXITING by the leader" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") @@ -64,7 +66,7 @@ abstract class NodeLeavingAndExitingSpec extends MultiNodeSpec(NodeLeavingAndExi runOn(first, third) { // 1. Verify that 'second' node is set to LEAVING - // We have set the 'leader-actions-frequency' to 5 seconds to make sure that we get a + // We have set the 'leader-actions-interval' to 5 seconds to make sure that we get a // chance to test the LEAVING state before the leader moves the node to EXITING awaitCond(cluster.latestGossip.members.exists(_.status == MemberStatus.Leaving)) // wait on LEAVING val hasLeft = cluster.latestGossip.members.find(_.status == MemberStatus.Leaving) // verify node that left diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index 300afdea20..ad445b4c42 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -18,8 +18,8 @@ object NodeLeavingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster.leader-actions-frequency = 5 s - akka.cluster.unreachable-nodes-reaper-frequency = 30 s # turn "off" reaping to unreachable node set + akka.cluster.leader-actions-interval = 5 s + akka.cluster.unreachable-nodes-reaper-interval = 30 s # turn "off" reaping to unreachable node set """)) .withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -43,7 +43,7 @@ abstract class NodeLeavingSpec extends MultiNodeSpec(NodeLeavingMultiJvmSpec) "be marked as LEAVING in the converged membership table" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index fecb53c898..369dcf56ad 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -22,7 +22,11 @@ class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec -abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +abstract class NodeMembershipSpec + extends MultiNodeSpec(NodeMembershipMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender with BeforeAndAfter { + import NodeMembershipMultiJvmSpec._ override def initialParticipants = 3 @@ -41,7 +45,7 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp // make sure that the node-to-join is started before other join runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index c0c12f4582..a9a5ee3233 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -42,7 +42,7 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) "not be singleton cluster when joined" taggedAs LongRunningTest in { // make sure that the node-to-join is started before other join runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") @@ -63,8 +63,6 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) cluster.isSingletonCluster must be(true) assertLeader(first) } - } } - } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala new file mode 100644 index 0000000000..7931ce48f1 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -0,0 +1,50 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object NodeUpMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class NodeUpMultiJvmNode1 extends NodeUpSpec +class NodeUpMultiJvmNode2 extends NodeUpSpec + +abstract class NodeUpSpec + extends MultiNodeSpec(NodeUpMultiJvmSpec) + with MultiNodeClusterSpec { + + import NodeUpMultiJvmSpec._ + + override def initialParticipants = 2 + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + + "A cluster node that is joining another cluster" must { + "be moved to UP by the leader after a convergence" taggedAs LongRunningTest in { + + runOn(first) { + startClusterNode() + } + + runOn(second) { + cluster.join(firstAddress) + } + + awaitUpConvergence(numberOfMembers = 2) + + testConductor.enter("after") + } + } +} diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 45b0a35521..6b2ff1962c 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -20,9 +20,9 @@ class ClusterConfigSpec extends AkkaSpec { FailureDetectorMaxSampleSize must be(1000) NodeToJoin must be(None) PeriodicTasksInitialDelay must be(1 seconds) - GossipFrequency must be(1 second) - LeaderActionsFrequency must be(1 second) - UnreachableNodesReaperFrequency must be(1 second) + GossipInterval must be(1 second) + LeaderActionsInterval must be(1 second) + UnreachableNodesReaperInterval must be(1 second) NrOfGossipDaemons must be(4) NrOfDeputyNodes must be(3) AutoDown must be(true) From 0a011ee50ea7bd235b4c612968fad163f4f9c6b3 Mon Sep 17 00:00:00 2001 From: Roland Date: Mon, 4 Jun 2012 23:35:52 +0200 Subject: [PATCH 288/538] =?UTF-8?q?fix=20a=20few=20doubled=20the=E2=80=99s?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- akka-docs/java/fault-tolerance-sample.rst | 2 +- akka-docs/java/logging.rst | 2 +- akka-docs/java/untyped-actors.rst | 2 +- akka-docs/scala/actors.rst | 2 +- akka-docs/scala/fault-tolerance-sample.rst | 2 +- akka-docs/scala/logging.rst | 2 +- akka-docs/scala/testing.rst | 8 ++++---- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/akka-docs/java/fault-tolerance-sample.rst b/akka-docs/java/fault-tolerance-sample.rst index cb7e1e774d..749cf7ef95 100644 --- a/akka-docs/java/fault-tolerance-sample.rst +++ b/akka-docs/java/fault-tolerance-sample.rst @@ -43,7 +43,7 @@ Step Description 9, 10, 11 and tells the ``Counter`` that there is no ``Storage``. 12 The ``CounterService`` schedules a ``Reconnect`` message to itself. 13, 14 When it receives the ``Reconnect`` message it creates a new ``Storage`` ... -15, 16 and tells the the ``Counter`` to use the new ``Storage`` +15, 16 and tells the ``Counter`` to use the new ``Storage`` =========== ================================================================================== Full Source Code of the Fault Tolerance Sample (Java) diff --git a/akka-docs/java/logging.rst b/akka-docs/java/logging.rst index 0f6f4479e5..03de58de5b 100644 --- a/akka-docs/java/logging.rst +++ b/akka-docs/java/logging.rst @@ -211,7 +211,7 @@ the first case and ``LoggerFactory.getLogger(String s)`` in the second). .. note:: - Beware that the the actor system’s name is appended to a :class:`String` log + Beware that the actor system’s name is appended to a :class:`String` log source if the LoggingAdapter was created giving an :class:`ActorSystem` to the factory. If this is not intended, give a :class:`LoggingBus` instead as shown below: diff --git a/akka-docs/java/untyped-actors.rst b/akka-docs/java/untyped-actors.rst index 31a0df9674..ac911fd216 100644 --- a/akka-docs/java/untyped-actors.rst +++ b/akka-docs/java/untyped-actors.rst @@ -586,7 +586,7 @@ What happens to the Message --------------------------- If an exception is thrown while a message is being processed (so taken of his -mailbox and handed over the the receive), then this message will be lost. It is +mailbox and handed over to the receive), then this message will be lost. It is important to understand that it is not put back on the mailbox. So if you want to retry processing of a message, you need to deal with it yourself by catching the exception and retry your flow. Make sure that you put a bound on the number diff --git a/akka-docs/scala/actors.rst b/akka-docs/scala/actors.rst index 4a556cf6c2..9b2cb9a7e5 100644 --- a/akka-docs/scala/actors.rst +++ b/akka-docs/scala/actors.rst @@ -651,7 +651,7 @@ What happens to the Message --------------------------- If an exception is thrown while a message is being processed (so taken of his -mailbox and handed over the the receive), then this message will be lost. It is +mailbox and handed over to the receive), then this message will be lost. It is important to understand that it is not put back on the mailbox. So if you want to retry processing of a message, you need to deal with it yourself by catching the exception and retry your flow. Make sure that you put a bound on the number diff --git a/akka-docs/scala/fault-tolerance-sample.rst b/akka-docs/scala/fault-tolerance-sample.rst index 56ac838b1f..12621e968b 100644 --- a/akka-docs/scala/fault-tolerance-sample.rst +++ b/akka-docs/scala/fault-tolerance-sample.rst @@ -45,7 +45,7 @@ Step Description 9, 10, 11 and tells the ``Counter`` that there is no ``Storage``. 12 The ``CounterService`` schedules a ``Reconnect`` message to itself. 13, 14 When it receives the ``Reconnect`` message it creates a new ``Storage`` ... -15, 16 and tells the the ``Counter`` to use the new ``Storage`` +15, 16 and tells the ``Counter`` to use the new ``Storage`` =========== ================================================================================== Full Source Code of the Fault Tolerance Sample (Scala) diff --git a/akka-docs/scala/logging.rst b/akka-docs/scala/logging.rst index 66cc6ae398..4ea96722e5 100644 --- a/akka-docs/scala/logging.rst +++ b/akka-docs/scala/logging.rst @@ -253,7 +253,7 @@ the first case and ``LoggerFactory.getLogger(s: String)`` in the second). .. note:: - Beware that the the actor system’s name is appended to a :class:`String` log + Beware that the actor system’s name is appended to a :class:`String` log source if the LoggingAdapter was created giving an :class:`ActorSystem` to the factory. If this is not intended, give a :class:`LoggingBus` instead as shown below: diff --git a/akka-docs/scala/testing.rst b/akka-docs/scala/testing.rst index d19a1ab753..0835db18e7 100644 --- a/akka-docs/scala/testing.rst +++ b/akka-docs/scala/testing.rst @@ -194,10 +194,10 @@ is a whole set of examination methods, e.g. receiving all consecutive messages matching certain criteria, receiving a whole sequence of fixed messages or classes, receiving nothing for some time, etc. -The ActorSystem passed in to the constructor of TestKit is accessible with -the the :obj:`system` member. -Remember to shut down the actor system after the test is finished (also in case -of failure) so that all actors—including the test actor—are stopped. +The ActorSystem passed in to the constructor of TestKit is accessible via the +:obj:`system` member. Remember to shut down the actor system after the test is +finished (also in case of failure) so that all actors—including the test +actor—are stopped. Built-In Assertions ------------------- From c6b2572623e4ff40a58e780eb60202d2cb40070b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Tue, 5 Jun 2012 11:27:37 +0200 Subject: [PATCH 289/538] changed val to lazy after review --- .../akka/remote/testconductor/NetworkFailureInjector.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index a0f53b5a9b..2d5b73216e 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -39,7 +39,7 @@ private[akka] class FailureInjector extends Actor with ActorLogging { */ def ingestContextAddress(ctx: ChannelHandlerContext, addr: Address, known: Boolean, snd: Option[ActorRef] = None, rcv: Option[ActorRef] = None): Injectors = { - val gen = generation.next + lazy val gen = generation.next val name = addr.host.get + ":" + addr.port.get val thrSend = snd getOrElse context.actorOf(Props(new ThrottleActor(ctx)), name + "-snd" + gen) val thrRecv = rcv getOrElse context.actorOf(Props(new ThrottleActor(ctx)), name + "-rcv" + gen) From 46c9cf41cd02146aa9ed6c45c8c338982100e63c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 5 Jun 2012 11:38:41 +0200 Subject: [PATCH 290/538] Review cleanup --- .../src/main/scala/akka/actor/ActorRef.scala | 5 ++--- .../scala/akka/actor/ActorRefProvider.scala | 2 -- .../main/java/akka/remote/RemoteProtocol.java | 20 +++++++++---------- 3 files changed, 12 insertions(+), 15 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 7368ae434a..30b1ccf998 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -426,7 +426,6 @@ private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider, case w: Watch ⇒ if (w.watchee == this && w.watcher != this) w.watcher ! Terminated(w.watchee)(existenceConfirmed = false) - true case _: Unwatch ⇒ true // Just ignore case _ ⇒ false @@ -450,8 +449,8 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, override protected def specialHandle(msg: Any): Boolean = msg match { case w: Watch ⇒ - if (w.watchee != this && w.watcher != this) w.watcher ! Terminated(w.watchee)(existenceConfirmed = false) - + if (w.watchee != this && w.watcher != this) + w.watcher ! Terminated(w.watchee)(existenceConfirmed = false) true case w: Unwatch ⇒ true // Just ignore case _ ⇒ false diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 6807e34c55..a985a6f8d5 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -504,8 +504,6 @@ class LocalActorRefProvider( // chain death watchers so that killing guardian stops the application systemGuardian.sendSystemMessage(Watch(guardian, systemGuardian)) rootGuardian.sendSystemMessage(Watch(systemGuardian, rootGuardian)) - //guardian.sendSystemMessage(Watch(guardian, systemGuardian)) - //rootGuardian.sendSystemMessage(Watch(systemGuardian, rootGuardian)) eventStream.startDefaultLoggers(_system) } diff --git a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java index 8f3ab4e1fb..204a68fca5 100644 --- a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java +++ b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java @@ -309,7 +309,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -981,7 +981,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1977,7 +1977,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -2527,7 +2527,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -2936,7 +2936,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3410,7 +3410,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3909,7 +3909,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -4487,7 +4487,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -5367,7 +5367,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -6067,7 +6067,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } From d56c0b4b340e3402025039ccc3c65b8bf81e5f6c Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 5 Jun 2012 13:07:01 +0200 Subject: [PATCH 291/538] update ActorPath.png, see #2147 --- akka-docs/general/ActorPath.png | Bin 50836 -> 42223 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/akka-docs/general/ActorPath.png b/akka-docs/general/ActorPath.png index 08ecb64284ef8f14812f1054f1606dec3bb4dec5..988a2807dd021318737e0f83351e9ecd2368626a 100644 GIT binary patch literal 42223 zcmeAS@N?(olHy`uVBq!ia0y~yU~XYxU_8XZ#=yX^{Q`S30|NtRfk$L90|U1(2s1Lw znj^u$z#v)T8c`CQpH@{ zsB@W6Z%s(i``$o<(tFj)GxlV>>QE0?_DB-i%CxV`O|?)!RK#VUXkyyCe{0_@Pyf1X zf7-JBzyGg(^{RjG_glNe*T+@=z5aR4^Fluk#TEf4yu??J$*UYV6kRMWN`KB4UbKAL zliMdRCcE%=-w?FDrW(I}ztQI(y{ke&<}?LxD7t*@*!R$Lqq(K#zc)LVx-2)Dcw=78 znn$%NV&$){gRD~FXccgp62>r3($?48PHlR?kFQg{EeUwCQA?;}_1A1p#TEgf7AFqH zUuq2MDo5}4Jo(%qwd;?lUZP`+VtFv!nna}!XO=KLG2H35FRVI9L9h47L+)QwTLqjv z1h^Di1nw#oI0Z3myEc*Ij5?qAi6V^_0VlBYl(`R{o}^UZ)*Nfg`qC&Z^#Vc73VXpSXHWD_kauCrLEs)fA^icOJM| zg#nz^)jMolHPSl85y9yK)V7E?5 z`k*zPLuN6@qO0sLHS@wvITTwaD2O<51acYfIHGy5W~;Jz5G)kBv{)kbcbq!ux+N@P zx=5@33au7sh`;ETNZG3pwp{G0oA1G0hcboW4*lZFv#uygKjQQjEz?#3Ck{?0#TEfu z(dOwpT~q=)9Xml>aNJ0F@Wf3OX?9u!ayZxtosNouupn$XvS5M+D1HvbtTH!$xckNksZTUPM-#r}IX=f%pccb?zf{jWdo{^|Np=WP>T zZ>sLDlFk47ALlsnqhIshtT8jMxpgM4+mYqzxBq+ghs{3pWQDHIMSD;-1i8|r zPeP^ROq16f>4zpYx1QRH1>Bw{9xkBd#lB_Ev?}{Y#m3dK*WBOg@4kEfU%0M$=e3km zf{HByi^HBd<7@w4&EgyB`=0b0T6}*t|IP$8fsG7#JM-sBSDh2T z+qZn$ZrO_)WA3-FUtikZYk98r!;Qc9o^DBgulIl6`y+MH%7VW?-mWjcJ?-x6^-KSs zSX}e7Bl_FZKexmFTzP-E&^p}q&Hefxx8mtYBOD7a8n6HUDCfk@_zz4R(|v_w^S8ymy#A4Q-k1J=#x1d*A0*bv_wjze zEAiHP;af%X4=tYmjCJ~UG;T86m>lnv^U>nz!Rdeg753%rJ-b(!b3gC>KRx2hIzrZU z7w6xrYTo`sks=KZD3e%>*{O;e5DS_#zem@QZFe(ja7GhbLgbl3j-ZSU*zU$18$ z50rbJF8%!OhqIseBs|%=_rHeL<$cjJ6PglQFIRyQx~hxbVkeG3M^JJ}y?D9DYmW5m z_(PMv7i8HcyfEwXU=nU_P~g(-c(|`!wlro|g3}d62Fw1&9Zf!|3g6WJec`W@U-z>2 zy^p_a_kr7gW43c^tat8S7+b!qi1D>@)Aeni=UORG%zCXRSpRTi1p9B-L*_O8E7mfs zQQzbHUj&+q1M8CnST}76dhT8#`+e^3J)*tGf4u*9C;ETE+7{;M`=6Xv2;A6Sd11qr z^F9e1*9KU$WjjjTGF5ANa`m67n1jos3!C@PuzEHl|L$YfhuX&d<e936i>BeuvK3_CyL)STA~UA~l3VQH$L%MQc)xT;kUJzw*I)Q`x5Nc-w#X{n~un zja{j?uHSLGc}+MlCnJvQ8EOatERxf;Bb1?c^gtfWMm?Ak^1r{0UtbKCQJ}drxtlDFREpBl|TjQfU4k&r> zh#jtLZd>Xiu}IKjo`Qk$va??v8Ql?&SX_Rm{Jr-S>Do4nGkP4tuYc~)6S^#aW7o;u zo7<8meeI0+S3Jq7JvdkBbbg)b*Cc$lq^WU>G65u6o@isqss;(&E2|O2k+0 z;)qG8lr8UE%fPJ^xZ}~(na8zXtq^ePSaflTmVi^or%65z`yEU#7DxP^e)8?+#s~q< zL$9w2K}%?t+%Cs6Id9iFWwU2XHr<^Oy8d8hr^`dPt)SZfs6#f$Z+p&6m3U*h=CAVA zmLm(6`-UpC2rP;?;iS7p0sOTQvCB9u;U_lGfqk(7*lbw2F26 z<~@0*t>z+rx8k;Fmg~zcE4f+(oW6+e?BP;$shknLrm5)Z=hIu)E`L+^ZGTNsMe+5X z{rX}oJ&K*SJ2`k4bxyi%@OJmq%D)Pa%8##lYPbL4K93y!`DT}Uf804^!uapqrQ4T3 z2~T=`;T5~!=|V-}y!dHqMU8X1YrZG%EA`j9p4@l_)TSw(IvrL)gcx32@ORUTl}oR! zo4sNl$DEw^-Kx9p{d~#0?~XyjnQ#C1+*h9}Z@qeL$$fpz0{6d{O(nPI*T4DavE;8s z<=ps!{F?GA6|eJOJ>T~(Os_YbDlV8PonEr#?7r*A-{*P>Pj2tE)KfJu|NG%}T+FSX zTrAzi*R2V(vj6_2QpYbQzM;A;86o?=|9K|)zOG_Jcv9zySrMnJGAv&e z&+L$16SmEN*?0M^`|m8yHWn1&TD85dWUch~wbgeTzuzmG{qOVZI%}oeKjroBT!o%@ zPWQKrcxn`>RqU#4S{`oQ_xil6#<@2V>vZ@0dv@?})Ajai&Kb5m{hzOk$NW1m&9gm1 zm^&q}Wd7A%A&M<4^22J&axTA`CEDU?cw^c9K#q$YZ>(>;tWsvX|MTSbyTlZ~y-jOYN-%ot7oo2eGbw|KH z;i=6P$+h##Z3zBT9er|P>eW?ujOImmCnN5IW*!gX6N#V=ng>Y}1} z%jeF|n3j{t+w}Zv%+KW}@q0g>u_9+%u-l7AC#&~KH}x6uTff>dh2i$DGe6(^Nk%aG z|9E)H`N*=!|2wV;>?_veDqX*{X5C8hEqiu8f4uE+yUaTK*#b^i^zX!EM+tB#x*Yy= zW_NXUsf^x~w8CF2YWKz`r?2Zg*0+4x@}08JZckh7Gw<-%b`Cq?=#Srb6wlIL(rJ1` zOs`*0ljH276J;$g7d+Xw@F`znn**rSxI1C<{s-%Mx1S0~X%*Xcu6B3OYp$Pp^}nyB z@9A(ZNz8om`{uXC`?hEIUaM*=|LDTvRO9<-pJ%;SZFzi}$_athGg~B^W@Pttf2w`` z|B3vS^DDg9d9Kc1(|$$te&+qt^+m^PKF?djG||f9o%Q#xyLvrAP2Cr|T}E0h0-+il zzB?4vd@e}raM`qv?@^M$^=&b!N-u)eO__LtOE>Z3cMi29^4F|3&zX7S*6c026c?Pi zbz&ojvP;&EzH@hUox?+pOzqUusaV)xmE*HrVsq_}eKGR))kSKiv>$NID6C_VJY*6+ zC8W<$=2)z~uw(wT-wX5=JEFfmH(dBuFbn;j&yQfSH#&3(-edF1WhBiJ=HdDIWa47 z!PFns`JL9kyW<#ge>_VMQtGaKvnN8~tW<}=Q;DW630IvZn8eIo3!v$^7p5AhE2*{v)tuETGOrv<(=!J&!j%p zox*Ru>&~s*-(T8=q0RQIM|OR;e{;2P&1q+qgvdj$rh0Zn9GL9$NbT}Ar&A`68iVcdH#Q16zYHSF$q$8w~%GZ!MT^NDFmq< z7tb=3o>5}RySBgV?lxY>w$s|LnG2`timWMLxH{J^ah7jJdQ@}A1UHp$t~^rf-`!bn zKL^@GUZM8O$m6|F)Lx;BHpaUxleQjulyWfnQ|{?dwQJ5TGGf|m>fQd>wyayUqgLsG zj|QLZb8fo_-Wo*$c4@9}%rhdQ>>ODXuD5;_SlE{R@?XlAOU|vYmi2r+Q0X35w3RY3gvYbMpCWvlM|w!w!*^mp4k^xw^Lmn)NKS8|3VjUJ6dI572w|Co22=;R;lo}Uw)#IW2?ZSO>M1w z3nyhquQPIfEgrVjpu+c0twuq_<@N|3=A%DWERwvVCRp>PpQC&FbY1?04t~DRal5{r zWpD6|x+5@+Lo3?8^FvbX{LbyQ`u`2L<_T?_v*X(7yYAlCt1rs*Uz!zRZh3F_oU~%* zrF(pyGq2-roTZVRuHy9H&{t*ivWfS#bgZXK=iA-8fB#|Z6nFkBxv?CIF6T3}zH-*> zU)t@ekh>~N(M9>-tabDEeX2WLo3B&Du{PpFx&)j$el32xIeB)T ze(1v@Nl~W`p$oGUjJEPkGgvcGsCla6Yfx7f)V*tAdoId+{j|Z%y&A={Z-1Pq&=!82QG%Ue!J$E)L&AUfsa%EoZvDgXH z#a!g{W^!pjh{W1NP`?t?>l1ZK{157j1$uu87v)fN`CHVLU7r56`X+Dj?>m~l(su*4 zhffVsnfKo6ti8Rs?fzS)2j7-8uYRXs-;9?!Y z-EF>w@;!52edD$BoEmtr?A{u)I3cT9?K}1c-o7WhwHVy*D?MnKxBB<>XYF^N&ze_$ zc-FU-PF_cDz7zfbXZ^eIe?QCTrtkd!V$M9Fu0{n#|6iE(KaM+p=4(Hz+D*HkoVvDX>d#-^ukS31e!lDI{#Ezj^)ViMQD%Hv`r9+zx$c4h*i9&NnI400o2Vl1ZBC8~**x-XFiSJ}+*i{352B zefK~A`xx_l<-2Kp^`-B37?ythYnGo?US0O?1M`asThFVo?F&9PJM2^TfuK*F`)lK! zHaPLz*nhNpis3|s`xc31CKY!3pFDR-czbi};a=JMkB+(a=iDydYX7aV;`ZGQb#tYy zYFw^8&baN|)@$)(M@)fk_Wa1ZuixLUDcEjpw(4`wNFC6 ze;0m!_2?0IMU$z&SU|0W=r326rKz=Ku*mJ$efICCjsJ^;^WwwG3@hsYTi;QiZT8B2 z1yflmZwly_)w|Gj&u9xB4c3GC|cy#WWop$y^6K~-0-#0Is zZT?pH=7{Iy+Syy*CQp7l<@)`Rite$lH2ZVI+57IIC0{3W3Y<$ke)o#i;reY~ zPL{4rwiLg9Y^zQhM4-TK->dj-Oq5ki!-}`;hT+dvS`a9c>$KQT< z`}(I9a}zH*-^9ZepTBUo<*z$^?i()VJ^UWMYY)es zU&n6$$<99f<>eG({Ysg)vtJ+Jy%Hb(rPArg)}`;5Iow)aTs7ZT$(Zp}{aoM-i6cfH z($gi@$i2w_;rpWgl`JSlzu2m<=Po~w)>+xJ|EC{GnA#MeU#>8}CR1I)$2jBJ!Phyr zKl_$l_-4HG!&Igt=k)IxbW@6vd#lW8_p?S*l=s+3gc|WIrg<~DgyeS zuh}O5ni{_Jj;-aq>py?JNwtrDaC)K5gWR&hc}8s7h3?<{>zu{s+Z^I&apJHz@H}V#pEuL4I5bOU^=+@MkmYkqI39fZ zZd?Am=W#PD_uT$+c>V6az1H5JY&SS;%<~OVkKp>hGrsbAiJYZDbp^w%mtTV2s#de* zmZx1=H{Xx1WUcLI$=m6%7B_3_{j#T>oM2|JkapqRf8}>|;XWH2PG$w&Wlm%IBO)xE zd-K}6nbMhac071}wuLq9glWJ1q^#>U>wbTFerH~+_2nba&tyNZ@KzE#5%XGryHR5& z%R9lVoPYgS>O2Gu=>$4ToG`kP`b>QLyxr5*eQ1u6mN7PPGtjFnntQ+Q=+@%DXXnq% zzP9kjnXNlHTk{;{x9)%QZSNoV@0hgNDSiF^;QhVQ zx9@A0w1qHC{}vr9dOdMnf%m+K*SfkpZYAD4@b>bNN2gwFRC(Fd-YxsT@ka{#g=d9Z z4?jH@^Vmymm1)4bD!1AV%hv7d-#PJ^{7jeZ7mIF1AG@;i#tL>0li>4LVx|PvtYObw z7SOS(eUIJMd7Hbws;fPzaFbEun)%oD#m9STGb;DKxc2(5LPPCai?k)iYXvQLeKoso zQ?a9-XTI6xx6%KPPpWATo^?w*sC@sAyVdKSu4w#Mcy`hCyMH&o*0#T?TwiCexWm3S zW&UZaZJE)bHOp;1zXz~wNQ>pX`)I=i^Ey@bIVaB1+L+ua@1|FL*Ga zfpgb}-nQEt10LU#iF$rx|N4u0mh+07%0tgJKfQ1&ZBk38$TjXHTb}ovU+)~7$#Un% zy}I&)FAqzv>%IQ_NQf_oQ_bZ<_Xo!(-0l$A%DQm&$=AWFJW6U6?JhiM%M*N*bD*bs z#oFy!QyO)bS1R7nm{b32!>-?c!hKQ!<@Y{$Qw{N9JZ-*rwt_oXrYUCgFK@B72qh0A>I zeRZi&dY2YwZ7!-i{kl)z14pCke@yXajHx@s6GHFL;n6r;s`hREupRIQf5_o8#~L-~aaSmXH6;Y5GoT!Kzrdjcl7H6rZ{^(@wto z$O3__?CV@y7R}!Ct;oFS_hr-KyJAQCj=z3={bysb?t!yaU%jn^`R}c-PVT?Gal7>e zk3Y7*KSrt@s;x}B{~=<>tIzJs_nr@tJN{wA>yvJOWZuZ{TGsadu#u%&bZkmd?o+8Z zg8yaI<{KQbKlENi|M)u7;Ga3CzuIcEhVd^lySMX6{Vv6e4@AX-x~FkO*gVQBTH2?e z)#7V2V+Iq`F#}I``JM=ul8br1F&Fg;@28*DzAf0ScFE^WZ!hP@AL(g=>9K3qa&LcN z8gb?4oK2NyH!%rUakyIZr_M21X??+0>BSkL|N9@^NZK(ajQLAu`aFXh(HzEKD`)=6f=3=11PUHA0oE)P7tS zOPW&}a_52YMTzv#QrYwWpU(c5_IK-f`KR%Z-*qf~-S+!_!S=eZKc>v~|1s}x#WB{B zkN3`MUO&&h|NEkH_NULz?X$7}dud(B%{l6(Cw?9K{I0X!{_gwvYxk_b^R?^$oo&4G zcWO?YF=XcZHv8|=6q_nwII4E}2=gL?9+g>Uem%cZnRe}Hoq76nNz5N( z;rhai_i7B>n#%k1!@hg%ik!~$%kr1cuXnEdE7jNjKPKc+@BZI!{;4`Oo}KC+=KY(x zZ=&7g+&%xU`Y!+Vm~Zw+_VT;akNvd!{N52)nShO*+ zmLAyJT6*wjh5Y|Gnd4h;=a_yydf@1(t&F|AEgJm?yQiK{IQHsRpLycReN%;+m-EO^ zxb{w$N#oJE=sodK)2sY{n=*DRZQheBmOK4FOJIdDsERfb^C`RNewqD}`Yqdry8eIn zf7Si6{T1<9=3()#%j@-TJ=lIReDhf~ao&60JYE9J6n-kdOLgk_#ri#eP3uvWV7sjX zi@JnF6k1L!+^je24g1L*vJbx{H?l;yI~_TCja&UUd-F5J!)?o-rX|N;lPR)ce(rVH z_l66P`Ma7V0RiWT-7`L(4VgDJb^YVpRY7vhogXaunmJD$ebf=E_iom#x7O{CCcHoY zm8qFmF2;L@nt@H5V@GFj-)RvYM#s*tg54_j`~N=5=BdnoZu&aG@aK=czI(0uJb9y; zJG!Oc{#X{-8KS*;(%W0NY}OZ^bY|Ez_rJ>y+3?*Lmg@hD>)5wpCs)y~t}_>w-%gns z$&`CY|MFkO;Q!+L)j!_)<--yC#r5Y(t?7X}Z0h^VCfA87N->CC6xPW)y+iw#4vS#Q zjyF$Eu};<0vCDJhS9gB>*h+lY(T9HKPxqARE1eCJC}(_R%bd1ZZI;KM$II7y-p)TG z%JOo@z9*$!yPvq;uReKy$IOkcC!R2UJhEo(*;7fZXIS){BHleR|9)@UbndhXf=wKU zd=?q`X-HYxH}kBU&e$3GuIQkT={38SIJ1e}-NO7wmPUF#{+jqYyNmay_WhqHdGpSk zIdt29M!Uhgd(%9`FHZX!QLK8lCI5eTTD`tY{z0u?g)`6Ow6wV2eXMY4`QCE+{jb&U z#b0J~dpaI*k^H-)(=|@qC{y#4b7&;T@w~{m@P|wFYqlL;d#~<=!j$tj!miw$z1@2A zG|R%T2Yapu^2k-17v4K8Xa7Pz{qMsC`=>d*T6m@A_f$UfU!6xv1KB^mPO`o(W_RuF zt5+Al#XN0~`JB`~d;h{i*)LMElHWHik1o%&Dof81=U7 ziFwpl+;TnF_@bDNnx4x}F>CeT56w<}RXX>^x_C~kzuF38HuH}UEFM}ONINEZq~+;s z?NGy@kd`;G|J}bID_{28W*W2qjF1n7i`l=0KUd0PD)v^}l)HEB&eh^CCcjjE{GBD* z`z(vf-}$U98ynKpWX`JopSHg5yKvUE>6hy-#dp72udeg`D7ptfF+j*Xw3aQ{Hv@#MZqNKJ|X%-@T~n zW!Ja2%iE;hH&}i9k+R-)tB_I0j$*&n(Vu@P|NHwzOWGr0VP$>2`_1#&O03>u=CNWg z{?~kS(HGcRc>7V^5v9#rIi%Ng$Q*tiA+d9!a!!T(E>|P#o)w#nET-q0KV)m2cKm^Y z^ql6I>DrTDFZH>1ynpioy={5t)8UO!#wyXNTn!>3N9+%@J_{vB=kX7Mp6i@$!1Ic0pjN)Bb5I>zAxI@1B2GKJ0%{`mMhW3CR)SA6Go6Py4RHy(9H+ z>AK&I8vMRX?GC=*UUFB=cI(sI{ac&U_gJh>)1KsL&-C!j!gJg#!n)i2-b@!tj+pM| z@;&R@;i#0RwBwC>`gTqi+YU&`o&0+8@Fn}%1ufPt1qx^CK9+^MHMTtET9}b~d*>eo zyL#Ij9&zy1B0x##Pb-Hcy-mHE2d`o%H# z&DUHnkp3O_CFjwdZ?;8O{WX*h-&g);cmH2&#}%L7$+9j}HlJN*d`Epv_U(7<-#1om zy07(5cCF2;tmBQ&uQ%MfX`0hj{dCJwR_=pOzw3T@=gOnhGj)C4gv(vwd2)AmT>ks) zY;3CK^}lC7-@W-a?jv*bnwW=+YN9v9cf3x~bvkmvpeoDs^GfFb`H7Dnu6+9L?$YhM z&(+?2`~1lKKL=&&LK2ymO>XBexc~D?VCh$D;g(e*mA4AK-9)0M+^pHqx5eC1KV!L% zP|KmqF>k6~8y#oiZ_b&$;00$yj?Sz+!=Imb?$^(-kNM(x^1!ZlSC*XlQ(Zm9e#5;V zU*_9sy9P0f+&bp%ay$LF+1XIHfPGX5Xj zTltdq?tXUEFaFy(X3mR_H?QxRp(iCMHYfYG>)*WvN2SuP7rzYjz4z(+iE2*4rXL4< zzhoc(_L-^eyr5yc^|q6?zw)NfzI&<=@NF7DHB$~f-$e1E=Eidm}d_MLIJ zHgi7A`I`KEdC!iBYQ>jKvwXiiy*+RJAD?TE>pmYidbfVB^uJp(Mb32YTEN%let&*( zwCzmOX}ca2f17{zl78sDQemOo{b~7l+>F z7m81GW(fR_nk8&^_}9Vgm{&e`ZrVjS{kXG6S#hC&iFo$8~1jIc;Y+%`p(kIj<8n>w|5FQthwDC?P7ejzxI6mp1U(QX-KIh+&uPG^+?M8 zrtQzQ@4xEb)fw$#+&J~ROaAr~CHwXU^zB?et907^6Q^8F{{1jMYx>(hNj7oWo|ZI0 z7oKCS3mp5l7;G#v-2fVoe!cs5SJbl)nbV((u<{NW8&7oZBpxJqgchr4P)h+OMut|*FCh$wGM@L-kwm>0= zb4$pbvo}74%(^Xe%`Hy(oX!T}T;YQf@%5tfk8qxE?^vw!?{tS_+~cZVRRQj$tzY;Z zMWX!7?L8&t&QBKDv*#F7*6aN4z2~;BcM|Bhw`Y@M&}@O3;n}LKEQzv*9PYS?H@&#{ zeOFTRpL0uES6KRryz%Ke=-vNLtT~~Hd7)e7!WCLBU!Dqead4~&oYKu;%pJ@!b8kG~%N5}4j`B+u}k?~Y>=-`8D}7s_T>JE!LM=c)HEZ*A^4c=~#o zTj{Nb@x^kRryL5uwv2tL=Z;@x|F>;Dv!GV`&b!3mp!X*yyZIe3)ec{>vn{Quv}N10 z4f7;TUf$U=ZBfBnH)R3;oOtaCPD#J(@4YWuS7mY5)I|7q+`q~7BBz8+|5OXjSt-*b zAlMebeN-`f#tfT{cjkVx?~4BRrda9nBfG8zPZ|@r?-~fU*T_AKJk9c)du@f|iQ zKikZAj8){2@WZATkE0^vcRY63-rAomkg44CwW{One0P49J5RH%YaWOQ>-VeObrakk zVYE~1j%4N8^o18rrzuLy{kFNIe|sl4D`V-)i=5rjcHFY1Nnz)lwm;Y2uYS$3KDqzZ zpP!#+?ftPdZzI#KpL!l#N!1CCw#6$>NAKTyXrH5SGINXU%EqP@buYHB`Oh66{whjG zso=-1SugJF)?P0lvXog{=AfQ{hmAn9nBlb;lg|h15|`Yqcu{-1?Z>Gpi&Ud6|Lv3L zH0@@I;g}=Nm{Do|c8{8Y?#vx4Y9)j(>_2z^!pu(rtHpxadYGT?U-!e7{ruXj*WXvq zUG-AYYF4)KB{5#U(=#F&1T){}mzz7eT%37$SM!1v=JI==e||U|bIPluLt)d?H`7{5 zxh_uYo#!UD^?_=~l$*Mp3L0sthnma_O=W$SmnZUC%SUT>2pkN(e1!Y?fxnaAAKc|& zcQ~kZcCun)gn&s$+-9F!TW4Q5S(exAr#@T2NKrH@{_*i!rpxorTr8aFuYIHUc~$bpC2A~IUmLAo{!L%PzG$WTqj!rUxGK)f+pmx;Fmu96@%XT* z7hlHR-FScRuHyQOr>t|%6o&r1eKBw5hK2fE@!?$m{B1t8EzsY3edi*f>C!&F|xiP^(f>SB___fJfo7uvayWfBNC2!TA!i|q^evIjkt=U=WhyVbsbHvUx^?$fg5>io4wPnYJkNIrk|%hJ65*M>Tu z4M)>^&pf@nrtgja-Ocv>)pj#?d;^sg^G{oy_$1x+fqjMvYd9V)@+`7Stj&E0M1|9+YJ{q;XJr|2d6hoqHi zVm1H#f1aSE5b|S}RHmHGo!(!|v;QbZ2h8lN-F;_(ew_Z;+H(kM<<*+8e^)fL#`>_D&M%N(rkKgOyvH38O_^!K1nXR+;nEjt*VNDUnKuc%+*!YTlzG{ zKI-%hiN!)Y=Wae&!It>ziRIPgRyB=9FCP9~aYem@E$i~4<(WUfSr`16XveQuc{0me zz-*^}PhOF%X_vcg^NG9yc}S{&vHcnxF6be<_|XQ?!C(Qo=Ki3w{URt+CFYfN_717&nmLI%yCWUdo>~#l)c^m9bTMCEg+_U^P z|L(qnJCzgpBrGmO^XUJ+wg1QUHHLpG>JRJfPxX)LQ)`>2R-c-1WLE~ag{h@w<&l;%{%WSbUvIen^WW~J&;GyJWAnUh zO&f>vj}?DYua_!Z+-F^u^Odnxp#SHOqSFQU-@W23n#y-$|5A-qUW+{^xtBDyFTbyB zUwbN`{L6{McWz8$PCj`%ZGG?b8KL}lYJa~95{)yhDjduxAW)%qw8fF;xGgqHw zP*W7K-t)Ep{5-#9e+}O=oW43!-#pV&(}aJacw=?>ub7q{lMh{7yM5`AB(px53vcQQ z&KZ{8^q-=|zfeQZ_Uy&B_aASdTfGF^uG3S$ z->}(g(f55?*cqjJ_b2Nd-=neS{n6;tYO`DjFz+~jla_THBFM>olFM|e2colE{XtA65_Mcv0Qod~s$nLWSb<$Qxx z1^t|x8NMv*6`y(RgfqwOV=<48<;6W?dw+YuysbS>6H0q{X1(B0VRNh!knTff~w*zR~=qVoN&@wQF_Ob%ikZEy|vYBvh-qkP}+Fg zbkW3{G2Sl^`~KhgdDdj(19#Wme#o#gO!s*Nb7Znb%`Knr*Z9|e&F1;c>GV`yWGmZi z?vwT&@_o{pVTZF~_~Y^pr7B+g9~reFCP6mY$GyPHI`5ERV##k81^d}5+`rv_CD~j* zv-oY3+>LH0?M1rl6D0%oeN;)BkZPEAGU)4^WoX_Pf+OeYN_RSLu030+&{l!^LD9@PY2}ZM~eQ7Sopd@U}@Ux886>82R{G4 zlIPk|oywxQ{Dpm2H{6<0^F;91!r}ueB6<&(%sS|-7$ZJ;#?!ZbD~`V@n2~;E&4dXY zM?&iVUVq^E{{?9Eq}_rG?)huNqlHV^Cs%jZ?>%*PUj6#>=GsEF@ijAj6$P>zIydJW zS6n0UIgwLE>QA%dDuZ7^Nh%43%5ja6|2}mc{MXq~tr+qA6Q`!UL4HK=y#@Ma2T#%z$*+ zt%OG#T;#5=6*v1WqSxUxo7HSRYfQ_ViDz|J&N~=o@$g&fy890jBfncN+IiAf($<65 z=ZcQl#1NHV`+8kHG#}noPiEX&uu$azm)_(Z{cVc`7Bd`U{1i|u^ZLan$Njzr4zITh z9~7L;wQpX`_x5}D!hZ$X_xgl-B?`AzC+|GjBCz%RvyH~xYHYlVCVpDscFwx0;PAg| zhbM17ey1Y$ZhXhnOGf6Z-M4toJc*OzaJf@^=%DxF!XJT;H|Nb(dc9x;vq-85}(%Lc4rE677YX84UPaP8F-WZtA=iEN`d2lu_C%ra0b}PExg3)VlIBh!8LY!JVoLEGymj+JS)Tec+E_`Nb^(b8Mp+HvSZb=t1=5^J2d zzYogzYhn~Cnrb3_mc4xUMENy^%G=j}oOmot*R)lj_tZbe9PwN&-#Woews$PIYN(PQdsb81C^^m&KJE|O>20y#Pz zP90`>q~NL~x~S^^=CjV9o{8nCoU_gfILWdwBWFrS9EYNdGshtxri~NV@;Y_gYHE;o z=ezOn-hKV;?tw8NNk>57kvITF;vUg}^&DIHt`BTJyc?c(3Nn2LiS1uOBRq zJu3b-g8gLiO^yvdPu@OW7nZy#Xv3TgN&ou`{-va@jVpM0H|+VM*rtGxo~`HC1gUnk zEeZ)cBwtpyHYKlf5r1>XI>&d@n*Rq)+9Z9{DkGusP>{wd?oY1&{~oDa)uH5ItM$L^ zYh(GL`%(*fy#>0uJ|4K6E7ZDZp~lpy0_!X*x_`$$k6iUc>*3pV7yp@8Ufn9P_l?^o zzGeI~^1toubS-Ug?XI~PQQ&%iq1;OU&O@&n?>F6l@|f|l3P-C3>-zm$^n{vM-%aM% z4NZl(>u%nL*|!Uuf8RS;|8bgTpk=|sy^kL7dnvIkytt-0)5%(>MStGLBfASaPdPA5 zx$d{`^4_{ru|mzN0bi^XR-W5@ zx4y*h`)ez2)@uhNm&>f=>X+E^z0|zwb+TQ||AeP%dOJB>T7*A-Z4P|C+UXR7<@Vz_ zu`h4$xK;l1mL+3df&Y4^)d`-$m%RnLHf^p9-aWhW8|TXJ;rA<|zXS*i_8$HJ@r`xr zN$(B|8&#p!2qzK+%G=*2GTZ!SZf{Jk925zLZO>W)&FY0=?wWRXBG%e(sUc@{yUfyZU)u2huY znDIF1ylx_U$7EL!Rm<8DvwaD*) z>XbdrKO-tm$AnJHuy|q>b$>&jf%x3tWtD{;-Y=!^&wO|9=gIAVpKsyZ*~JB3P-e=X z&Fox$VdG=<9g2r{rSDPqvoii#b@-RK3YY8Q#@8>l`$@0uT5^>b{+<;=Gi~5@~zDFC!Jg0J$!!auIF-pzqiKE z`&jdyu9n|X$vwTIaL1p0t?yn=)efm#v{2!dM)pyczZY#PDsEXFGTB&hYv()A0CVsE zcjqq$1y5*@D^CJ#U%8^ezfeOn+HC9ZC7+-B&!1RqUnCG$l)Ab;A@E9+)5VSrVL6xY zG3VX=Ex*d>RS%=?@}CVxh2Qr6QQw~bcYe(W$>!2#E;E%DfkiPVoHV?+rX8qBGB~&6 zqi&o8*Ts$v^8_3fUUfI>PUaKrV9RE=uS(uqe|Fcw{CQ?koG)4-dzf65n*bou5+GF-R-qkxKeS2NWU$ecB^8Q>2%=_YC`JMwDwoV;wtd0!^6ExN= zp8fVg%MWLkzMYS%_OCM#X?2mh`?0?Mc<-AV%TEb`Q%{RPFW*Jgbj_y@cM}{_eRvKX z5LVl{I6B|^?dNZ+|DQfP+thigDs;(TOL4Fpe~{cRtHuR}9ftz{m-ya#?jrX05wE?+ zUiJ9b-=A+;bqKZ?u)ywXgG*wq!Mo`1GM^;(ztz3})7<>;sntRA*Vepn-2Nt=J0uah z641#*`&xf(a4K|#BF995R`Yp=ZWrBO2k;!a@KAZr(t;BkTrSMC-Cefp^*)=kZ}07N zo~j1-%JLqG6;nh^yZo){Urm%ex}fmV%gJ{i#jp3;I~}~E3$zqZi8m?YNU{!ebyC~!U3jhR%nKk+a3z1k3HuyZ_=yt043xWsc=Q;(^EPv_XwH0=H= zf2>L|USoR9%~xr0*6LsD&OYwn(YpOiXeuNhIB{ImgZ{e5KocEN|^r?Rs_K8h72- zBNJ=2(!c#--FRa0<~z62!$eEoFqdyn6{)Q;d%fsn-o>?>ETFb_>|!W)xg(@5U)iFy z_|S~*48_;1pBO5{`B~pjzJ0u?|1n3<=G4n>|L4_;Y;fa% z>=3-dxI?w>^_GkGjFxIFdD?DuyjEQN;zj1);%j4E9>#iyEm^vFN9MJjIZkPyI-cIqjVTb1dp^n{}*+TE5b}4P`-P2xO_O`%(eau4nK$vT|({xoP?!6{9jc-kp z$}IhDp$5Xw{pS~YSarP@pU~G-D|WYP?{Vly zcG2|X1;#_!(-r+#6$#u-=j#~{CStx1R3ufR3c4b~cm__Y&uoS)|VcIEju64!W` zUJD2|Ke0}^y(f}WTXCYtV^B_6p^-aZ_IAl`mW80bj2;qU3_4BEWo>t8wIpz?R85Pu zxalry8o1-h$x^#R8F~f6t|FJ--}-g1bMmgcp1P(-KRv(2UuY=m#1UBE6ri`*iR0n| z0hZKjn(kHK-PaoWwC^&w9`I_#ts@E%S)E%-MAjOA+EpVohfO?aMrE0|%0Ij3^9#jU zb((F$(h8ixrhpeJw=9h3-~M9H zIc2Sz|I^_EucY0fwKZRv=XW)gc5ryj`}jaY>Gc9gVcC+Q(=o%7i&ZCCT+VojVo+ED z?~~p4jGku9_nu(2!ezsZdre#Ozx}BWelJ+ux@7Tz)Dr(ovrfET>c8&ux+T-rUs%(@ zWu)99@YT0Iv{AsR!y@9?zN$wa|0{~mAAdKq{QIub_w_IC-M=4bu2gVpqWg}M7n4uD zxYfU>>Ztj%UDvO#|D37OknS?Cb5~ANLDiPpYp#?3)~)e$@rkQ_oO^Nc+4qO5oQ|mG zE&DbTTy-h72=E^H=*;(>U$}k!+dJ>CxjmZjJc_CO!bwlT6W>>wd$>0?Y(B9#;unWh z^NiDMf^LG7m&|wInY8|b%M#Y8mwJI1B(RmcQhouWRCF=ziVZF_R)0CJC1@=3bzMb6E3oO%_ICY z{=vJLO-`M!TLl*J1>E9P{Ke3$!lcWg=(4qO#q7$rbFNe?^(Qf&aWymSXcaWv@#DJx z&poZe>b~FRL#mfW3lopcd2-H)Bd}M%b;{NTws#Bqziv3cEAv`UN>|tFJIv+VD_1K# z70O6^YrHz-B)E)r$vnt(T%kpy;!K{gwr|`TgUF&Cy~j7r*R^<3G0#$O&edm|H~6F- z;$`#YDVrC!^AM;NV_%UQG4svg*T+?OS_MK+y!pv}=ZI(evU`u+qE<{;+<$vh=Hh<6 zuH7$heALbKeE(DtQhZLCqZ@bO?z?AaXFa?-)1z;Xx=@2kup7rGhYK6!Z+mR%ad<2! z*S{cn+H8SWW)l>9tO{fVzi1q1vxxX7&}}0e(^!0d_SgMOJKYppR+MdcAsCQ$V#C9g z=U?!1zdWt8YK5(`p5?nwo1<&&ZN5Ho={sD|1lpm`vQR*kapCb>VlJ|RUwpPa+~)Eq z)h=23x59%*#t%B|wna`#V^RM7_}3BR+soZ^WOLBwr^b}pz zb~q{ZAG6inBx?>TDTQ;w_-g07>K-jlN>dSdYNrKh6pPI6G!sbZFlJNZFiI#m{6OXM zfg>#oHmbe+mwC-aE<)IRP5kt-z8?=fI25x4-#-;_kyQPZq1c#c(PlTb_TwyrK#$5D zryg4}R)n7j(tsG*6O(ji(F0%m)Fsh%_nVqdCAZahtU2TIpd~DHo4_Ky#H5}Qk%byO z*ICkDR|__L>D^;?vE5<|%eM|UwJRG$POXjOP|V_fswiU0y!m_t^WpDnvNxKx@Pz5; zi(T7S^Ugc@xWC8F8C;4kiyh{>|M!sMdhm0J=VX3Q7kTm4#Q4sRCl*JR*7n_4V!!Oa z>GnFs7J;k%IxUN|TI1Cm#g~;QTxdM4p8c=j1I8W@HiuwBf z;T%mNf!|sLwz3I2d+O!u?E3nvNm+G!oW;%W`=6+sF?OEV|qyQl?$tiRHd)2I3K_x;KmMwreZ4tWf)~ zg5z?aPS>xEPIqiKsyuS?l9y>(pmKg;r_1ZZk=NI5ZFS-Z+>5;Of0dYG$F{BK*L+() zGnJ2Bj7Mg}uZi!u#4oxSG@rjX?azTj+ggn;%Os1rL^GeLo09$XD7${!)XuH^-HUfF zH8M!*O?yNEh9nbrN$-Y{(F%NxNW8#{88n!$H=4R{PWGv@4sF

>@<=%|4@ z6%N;{hov0Zq@6evy_l2uw)-WYecUgyMNi87LV@i<*V<#7o6kq)doV~ZT?8qGt9bLg zcgA?N3OId{JtAw#%6rH5bL6)d^EVnuO?z=q=sNRxmqsyg;m{)B)S<uS|l=^fG8FFN>>*eD#;P z{Qf$Z&v2i(CC0UthWhVoL@?hl1CJ83t=v9Me>|S_QyoDYOXiYH)eFfBb8? zwobK#Fof46 zubW->G<4lT@$1+&=SzVapH@KIC<2QdXV}&|Mm@P`{rpT( z_Wa1DU4qb-q|+5^7dh1vXKs8H4w`@Wik0(AL}c|UalMb)q@*i*)OT|lvT1?}pI*dz zhcCI8e{;*lvYQki$ zAM*+ku_cf}0Z+Ddi+vD1Szx_ew~+L6V3W526tpi&wC>_uK0lQe4nY`ynXS!T=k((89Nx=&qU7Poyx z@ASUDXNr0EA4~?Phb!XB+wI@F)ooC#ihKMkTOhEg%$51s+T>-s`Q7|VW&gIXayoV8 zn0tQg%eJ>stIgurwz=p2ZVn87y{cD#PwKk)9SXc&8x#J%UKIVj%DX&Xa_?8e`|YcH zSA&KPccna9a&qN$=eFK}R*pcGHaF&W;s4)iBUw)KFq_G-&u-tL`*>YJzI^#st)(qa z1&5BgW<5?#yTYFRr`gQ!_p-UunOUyQi?zOU+3&F z*W8TweMZaPty6wF_qx_Sv$7s%J>KdPywd-}Z{67KuKu}MX-*w(4G9v}W$#{GWLOj- zbAR83-rJjE?43k6lwWZ9ek!v&RaeJo{*T+=@6K4O9V*Cs;aIu*-xsCv^PX*UsnzbME=g( z)&;jJN*kUZ&sM)5sr$Dg`bt{9e&|CkE#(#g*A!Le&gkc1DGPKABW!u@Z9h2o*Qe6D zhH(GMvNJC^8a#NWXt=^G&gYqH5`VYCYcIj%)AO^xi`m{<(0O#F`1ucqdft8((UvS^B^|4@1ww(WyoqzsL)6-{Kzwe{j|64~C-p(m{rrKX^Vb%9~L*3l- z!iUzM-Tz$jV28loU&qY!8P@(eV!2H8&)k*%TR*;HT;E^y`nW1btH7cY1_~?NYBG=S z``W-J7;2SZ_j7AysrAi+3-)|Hbn@Nr;;7)})zRsdujd(p zhEwiI$((cPYi@qYqDCx=!+5Rb8TK%bU*C3kXL2MJKHFNnM^&&pVO?oQ>$bwm ziV*!Z+9|blw(hs5wgzxqoKUbiUG2xG&Cz>GnX~1yoq0CbZE$OSYkc1BV6>Bu!>xpe zyY%bN?eB~BxH0`x#l9a;1XVYy^xfL$W}i0Ixa{+$RlDSFZQEJ-$?EpEnv)MDr+iO4 za5p$ba!sAvwMhGvyd@vq)ht zH?}Ws-T%Ep@pi@D@6M)AjZQ7{tDE5XX`|SpSBe*3-#e%ty0!bL==F2HJAZxXS&_Wp z^ygUb%j_)A=1n>6dPTyzt|`7)h|l1ZV2QKHv18xA{eFH@ohNg}``m*@Gwy8ocILqK zTPDph*Em7j)aLUf3fR0}bn@Tlig{+U85YKhE8m|b;G-3>#Q#@vjM2;f%@d+?UMfL1S+B=bz+RQ zz3Mvzn?HMZfHojlZ+K*!bw{YB)|qo!>xyPSwOtoCvUMDPw$ZqD)AgtmXOHC=FblGs zdoo>Vzvyd@?78gq-`7=tIe+_ac3S1mm5=QzkL#Q7H1?NxlU{$`zIMj$SD)Q?U)6tG zvj4~3>VKcb@7;LK&b+;*bWi=+=i+nh?%e%#^>J=YOa13h4jO)sDL$ll{q)uX--zEk*z9&c{AYgWjlS&T z{m(ytFXK1ZUvypjUAXz&SL`M={@-4D)_$Pz3i2 z)VV*0M}40D0k^dY<}ohQ&VDfJ?k?E${mvXg=LQXv?+Z4si+`T~sY7{9PV&Fs^Y>O9 z?(InSy`lK*(K@Fo$x28cCas*AXTKibr=P>MgMKT%_BQ zA+zSa`;OQzd#49AT~ND_^XaVG#~aiCU46&xbE1$(xHED7xpe>iTTV~k{k6U(N;Fn+ zmC*{Glj*8#ttz!=1i1|%N^;;M>?ri$hQ=K6p!zONh$4xkUeH6c@@W+nd z0lJ)SE`m(#iHCiq7am)a_u}7S-rH-^a*PEs)LIVxJ^Sbw?>u&;GimL-o43u3&}j(> zDDsWb=1)*KxbuhH!<`Z#H@+HPNT{}$@iM#igY5=4u}6BLXXnhH^Csc%KotKIZ}{N0Kd%x|U4{<=J4&^+gbg0U)b%xlY zeFuBLJh`6tsc}&kBlESICY^nDtP$)@3!HT(DKQylWUs35oxpf)!kw8sdJWyN5?A{e zO(%LD-XgrlzjMC&<@Bjc(>-JKcOIP0ovv0@mF(7GD13%ld%;<*_NTc&ZbsHU*zm)A z_TB}rITy0>A544xXyVzU9oa?;!{7bVP^%S8^=XP=2{`p|@6@Rt|MqI73NAUTzdt`Q zYf|&JbN`imzUaEFJRues5+e62aF6Ml^G8*$sNLpL4Zg3%sNpz6P^j21BR=HY=XJNQ zJ}r}ZVO7E)RC{b&lGDu9+tc+XpEr(m4@|GzR-S6>zwNcw_51@rZ`;3FmGLu2|66{X z##Ebf)dN8lIf6Ef5-t<8Cmi{-GhwdBwyp9@dMo~HmCd~PGQ#?1p+?yRb=Q_(<2Zl{^6J#HL`*Fn0)VHMLR%<7_n%1@}ozZoB6XS#xzdWv}+0eUh zvfU-Mdlh`YQ-06+d&q9?^=B6qwfsXpi|@E-3ueA-+h4XTTK!wG_>p(t_1~1`{cH34 z{>1XEtM&Coy|H=8CLi`EgnU*^dNWJh`c50S*XPNgomAT`tiJzTEM$Dt-~IXTTN{1f z&GY|OtjyGI8UW9%LWD|^dcg^Ns2e@zeMNr9{ zW3@ZGdfvT=V}JfXz3=~hox``E=hOYa=-KbtwzcKJ>w;JvJ ziO=7ocja}@yTx_$`+nQs3zVms0W|^+LiEI&XN8{=~ z-F+9u3_B94e#q}Se|oDz%WeCTmG8@MJZA6K6p^IjJ4 z`)B%bF3#ESzwiF~3!hG_o+>_fAoRnXy?@_!u}*ZqpEL9F;^Ykp0+(hxvLswRVb*b$ z&G^!e#$&<$SESY)Uvs===bG1tO;^UTovpX~{@qqeMcD6t&;o&N`zBtto3g6cxIx$5 z&-7Z9;m3rxl7%OBR^GiWqT#b4z%RcjvuFd4IKAe1{GPj_oV{uFYsGD=FT6ke-I>ELruIVp`VD#-ldTr@e2Cgy8}B0a z{DH)_nJbpc9ZFw!cir(*ecTDBWCD+b-JJYs=eg*nUr%cz?qAX0uX#Q9KJ%mfr4jxQ zf3CGT)fpeXtoZ!A3s2-K*Ps3$*ZM~+MmV?gd(7>bhlQ?-Zu`AKrdUu;?&|Ty7b`#g zEnB)so!R!z^9{3ZC@`Iny&7!D;k8V6RjK0UGWn57QW;N%^;zrP z*!B)rm&;GHt%IBdS4%UUEkASd!Efzs>)_w_1bG)dPA&Fpxuz@0cCFB`;ppijY)=Es znCs$Z&)AS1wZ(9w?q$OU0f&}N=fnR^+R$Sw7-6$tjFaI=!$G?%f=pl6*q1E1$}Q&l zd*2oL`zP1TyE0Qg#r^(87Z)`F^DF%4XD)~r++Wi z9sg3SazyfXkoh*g$yS>`2l;2deWWj&`Shdp!);wxjqdOK$s6ynF8^sw$AOsK^RKS` zH)G#@{XpB>c||#M7eyX;TmCfSR>n@rm;?3m8;!*FzTWr8l6RVGEZ4!Dx6$t|EInAX zs&!%puZt36n}Bc2sdU>Zt|41Ac5&E#JNWmk+{)@7-{ZdgmN$FMzfVV}a3_oU?_=km z|9+9OK>5^ynp0c9?i4G$zD|1&-)^zKrF*W_+EnGgjL^|4*x9P^I_Pmy!HEqIUmb5< zVZbYVul=CW>tkmo89&<4Qo*5X&GSrg-@Y?P5?-Wr9H{y<=N+TXyHYnb^WF0z*f%=m z?cHFR+GxD7aq6=+51ttX4lHNqUC@7S$hkV!$tTJ1(XZmpW#0Yow%yvh;dO!*1Mm5T z*2<-=Gj_arQFpQY)jH!fQExpr9{+kzaPy|X`!ip)7jJ0GJCgWLKs%|M|G`?8?u#pD*OrOK5cMx%uAjZ$NtM9P1LreQ#dq{l8tD zvu>Nat&RK^W|oW_@6O9ydnuOx#^-O`7Te#k=VfMPEZ-b$^uKIrOGD7iYl4rYytF!g zC|F)j*mZ9u`}b?}0-Doo z^E)dyOF0V&wXBR{>fyXNq3HtKk%&l%9m%4ro2s1bb9hU{mbVt^I}(%gAbdR$dqU*c9*{RdH&1kN}hKUBi^@t&;NA)=z7_j zrOKtT9bPT#1fy2Ask^eS>x}Q-8TV;k_WzG7eeY#i{pxIv`|xSk^4AK2cjEh5)V>Oc zdwO97xnJ9x?*(Pee-7Sjx5(!C0#%{X_{iH&kIJn(K4s<)TdTbW{H(J&OAUL4_|-U^6jj`&1)RbnrgXj`seN`I5a0~-+S+8A?7hSCm*Mc;@f!_c|H&7w_^jPu}tHZ(PT|3+pHTbK!X)7#NZL zh*|Pl?92)dM-j!6M}a!e)nh+vTt5C7jozUrl+Ry>8ESUylff!|v-Gi`Nymm&)vW z=Xsl1=lH{~BF0&H8*kX=c-^XI$~Y6f!$edsK18h8wr~HsXaDR<*7N_4iCL0+vSF{D z`Ih`kQy%bXo$|LVUZtofSbOutGp|{?@@Zz9o-(>RC|Oj``o4L+far9)8qlzY_X>?m zDQ-n8*-r8^x6V;1Yx&x@T+$+`o7XPC=J;Di zTjYB6vkNUx^+meNb1Womch#Kgs5xH1+rh?W>ms@Lf6tNcRsW3t{@p5Sw7ccP?iV*# z{F%1y-TN(uf_skWgmMJBtS_Fsa%<1S2Rc1@Ug~Ma(P}xHJA=5V2z901`y9)CbKd$t zF=AFtYj14bJim0`Gq=}wQ~Pth)^he-pFE$Zqwcr2u@Bo5>+QZOcdRyQUaiyj^^1Cv zRXwwAcZ=$FOVb(YhZY2A=l!@nwX6B;%=ERofA;Q_SzGhsV9#&E2NwGonS?@&?n~$L zWgGoh&Aat&S5%@?$Nw{-$CuTgGrgRz#B%uh)P_HG(;n-G|5p2zG($c(cTYd_J{`-e z9KHFLS2^UGm-25+%l>v|*Rks2-6rCJ_v6z~u55eC7uX@K9%d!nkY<_^U11mFakBa3 z&qFeECDnN+UvoOmoHfm@xOnNFqx)~ZiFMEIUBu~nE|4#wjJFmO_VF)jWMo?O;pgW|(ke~@;Vx`HI^=~T6OVWaN7VFPNH(}Xd24f` zWxDO;2=3!sAFY!r>W=rmF*EJssZyRKhCl}g!F}do{%c}6T&`Vtdic6`$UV>1!j7dn z)91TJ|2=wJ_Fn9zPk&k+vp38rjAPlp)_rlEP$g5WmWTJA;>}-+ujn^s&HJXbyZZ0q z+V7AJOHRk_%HGB;P_gDX@-O_~rI+>5o2Bkm`6cDXNBHL~_FFTLYp%cxxhrKq&7Rla zZ`KQ4fAdzH+B^Hj^Y4qlw)pBi^P!8A-lP4oE3HoOyZN5AUU6X7q}&H*q_o$b)X?2x zpLcD`&JU)!xBnIUd{Q{nCSR6O65rjG9T6{JZzjiS-jg6xmnUrdFeP5`t+Aov)u*@1 ze(ahQ@jLCW%u2OKZ-0ON`g%`+$kByDmRE0x7pABFst(^2Gw<+c#V044U#hFJn=gO5 zm#05iJAPc=kiX5*r(*5L9}3tCS~s=1ue z@~xk`$tpU?Y1A+{Y|%MthpXD`>Wf++&LYKS%Ra!ZDw6r z`e=v0vEii^M?MQERm}Kd9{BCW?z#6ipFT0I-m)&{hWRg!FZC}ye1nV|&ztT3|2Q)3 z_(lDA^M&zof>nt{MY9Y=YweyI*Ug*o#VrMuF3ZH!b$djd(!k>&2xhfiqBJoE*x9Wf6v)y z0n-RQ`_GeKdTmHBuE_9TwkFZn*!=&|leyR8TXU`Jax6}4Z1k28IK!g+JF#(l(c_;+ zxpRF@*G_-?(t+vp_wUcQRC#+X=AWj@@!DIUZL+*r1>5$E$K!8b+)yem-oo-~bDiOA zfo^W6*VTs~ANrfaq$lTj|CbLFUt4XdVu0Nte#f?S*+s7|EpeUj`53RnoBWH{4p%e& z{VnUcUUS>g#s7}~{_?|CVUA1sA)`0HKW;gs+;=kn+vBRWcPeb3o!%RBzG=g%rc(kD zNxx65d=+YZAgBHB%*cKX!-)HH?koIzU2p&KS)6?L?~9+F-})=1ePrPZnL7!&42OKn z(=T}(_c1X45%V{+;?3meCCfb={}}Gr(7-%>{ac@ zu2{3n86mN2|NK2tc`r-h(nJ1p37wI=KdrCZUzOn8a^sKgul4VxYl7yVO339p$RW_a zV^Y^C0WRabUt$|huXpXtJzbi^p?7_!(i5{oOC;XjUa!9=xlzs5<*w5Om6nHXUtb+w z`~LRL?u^+V|B0Eg)vj~(*dCG`A?*CH{j@q;yK%djsEp>74H}=tHrk7HADQFuL6PUN z=9xqLY(=`Pt1I6hs5On>QZ5;iQt0)WH{>9xS6mibJyCB<1_r8wqZ=t3;N>75y4#{fm|2l_}fAgIu z$y<{nFKe_2Y;9QTn9OU|7+TQWb6hmx?EMc*uJUgsFxFOlv?YMw+(qT}iMLTZ!-^JB;Uo2NA0r`&N`Szy4l zwQ!+X+|eV=rzCDJni+k6T4uDwOS@o`LT|gnx+LbWt>^pwDCe)CaJ87Dl}-4% zeK!{!X`lG_ZRxVlX}jJ(>H0QrwamBECHAYIFE9$VPxV?Lxu&)K{rxv?x$%!#A2q!A z`k+?&N;5~`QKkuB8-KI+l*;$adG)!+i{qO9nMG;4H>zKHU9z7)e?#jTv2?Bp0{?ZQ zEp2&EDYOV&b=g*D8h&3)PWZj_s!u!Pc;l{ML&ORDCFdJ{vzPS5vZFKP^!=(P}{ge@@v|Q1FoXlvg_7tjuyEY z3|iRX;>j*FwPgA3R}pt#d~RGCv*OCQJ_&DXtSQEbe9^)@d0{0l$576H>9rY|A( z(cKb(;a}CiNMB91Ung_t>bY%J`Hw+cX95pzI5*35)*`Eqj8l)D3vpf1ySle~PLlJ> zYpcX{9z-AanXUj@C9`)=*{8X60cxxGFRjs75OiO$eC4ejw<>C{KR%oFIP~`aJ2BsL zV?cXpKu1QFy}z@tOJGrd#JjMyk5}3WDo?-n$RWY<)(*BaS&zSRP273)*lO-N?pr_J zv3&@bpdd0;xaGTULAG2xNS8-*r}+7ZN89?ZL{0H(el5QLsM*@pT2<2V=BYcuR(^jx zKT1jbrB>!+jxTPRk3;9rh`XZRZ@cBnUCZ=ovn^iwZ2xkybd`W&7st=2<9Tt<+-f(t zO$~g;$g*;xiN`xj{yTS{ndN7dZ+@;{0NM#69WEVizs@X9P~hTQWl)QT(2ngNGVFHTUcN+POMbI7XwAU>(ZGgspIo`09Dw*R=k`cu~-pWHo@UWQ*e=4$z?aO=lQoetAtOp2tZD~P1B zGw%2{^Xkg)XLyVm1e`>~uZeBi=bl~TvbBjJrsVXUMO!{@51qT{-0#Azg@IaI+5)D? zvLx(shR%ETjoHH;mKF6 z0!~-dStcqzo#6Ot;cRie(4!oREi4OHl#AW9G8eT_5TDlV#i8g@>v;d%F303IKRCM# zbU{mi7712VJ?)xOEx_C`^ZL1un-8(QUmjO8v%WWD&&tv^Eng1BR~+(zTTI0QOFET6 z+e%m#uBfi*TN(ZQm#I>B{og6oKX#UxKksABY;O3x{QQTDPs8^8YMgERKkmOIBRgn) z6#E_aLkdQl7ymxsx!gbOD2Jkp=z=3(7A@iFY>`*mARy2QT4YpzA~%FX@fGtlrWVJP zb^AV=u}&0p>dO4}jJuRGt6SusS~{7~)0H{+w4cU0g0+ zyHb6v?|iUJAbgEd?c?SXXS!G?My^sY0*4rAvCXdZI2E?-Ik#q1^-Ej{Vt^|aJv_@^ zuC`I*V)p#VySjYhdWFH~W-mJ>>h$OTo4N55Vy>31@BjCsD7S@W*THG))_RyJZDgLZ zWQu2ABKN}s%!{-f)i{DmS35NK-Zz;f94Fw&!SRUo_@xhNNp4Rx6{NYBbG>9tn!r&i z8x#I0>Cq96Xs&Nld3Gu=ZgcLr@HT&b+*;%GU-!>^IrF8YK0Y+Oe7^nfId*%`+r8d? z{Z_X3^rUHlf`Sw6+6Da8diz%@O1wY5bUiMdr=_HHxmnH$5TgI$h6u5nunkcj85g76FlUAAY7sO*{BSgx|iP z{O#{otJj}3ySw{vx3^ zKaq4bzp`&~U`aP97)3yVcqWuDn@UT5sowDaA=W9;$N?zWMEVMv*N&?;?0H4(QMaxJ(QYvm~{4ZoH%ndQ{BbI z+W&ieeW$xiWjlIWspLwRRW!?8wDsJ~oadCn8Q+C3eryowYyR{TN zy6Svp>R~Iv;)xz?67Dy2I(g)dHK-Wz+x_bik8yEv)KF4Vy353w_~Dn~I(;Y0`)fNa z57~(8M$THhzw~p5&w`F4VUPbi@rW6BDY2c^&{Dc&ZP6Cs;Z)g{lG`c6960ZqWmB!!Mm&=om<@0a&<=bbQgP0(m4&qJpt%m*n`RmT6YQQ|oBv5xfewU8* zw_6i;&2&zde9U^%;8fAQk9=lA%~LExIP6vTh$Lz%DgABqOm%T_N#o$yC!Ke9&Jo!~ z6UvR+-5yCCk-h9(^SV_z^Hs-TeR;1(>n>UxC|@OfO?>hR1r1*%WoX{gQ*p9nUl%iT zZQO1>&V4q8XHJ>KvM3e&{xDA@>{b>}LLg65Qh>{n{{|@`E-oo-O%=&|6c+L2CoV2* z?cI@oxh=@;Vj?^Rq^LDi-2QuL;upz$#m8*jkGVjJ_;9WK?_+&A?-%!9*|&awX>i%u zUg-QAJo-f~c(~pTx3Y_w>O$p@Vmr(`@20aza!~cBlPFuR=wX0%S%yI5!^=A*d{=W20 zx4*`I@wQy57_X*U<}>T};s4L=e7SY=gju%dUY>mQH(2q! zZ@ypVb;Bl)B?q`2+YFEFx~u=kL^t{Oqkp^QZ%sUQ?aklo+brjz5o5Ovm{3?&&`GDx9+Mnhcg~;ew^>JWb%b|Ir9Jfx?C3YTE}Hx?|Rg0 z)^7H8>h$v^vmRRJsC?Mg@1?Z)w2BcZ0H(}5{Oi@7Xop*+Jd#z%pPxN{{IrW!**3)` zT#r|Bth^a1eoW~1BoD<)o0Q*N3*%X{Xu84KpP{JdG{w@%fgjtsNa-*v~m@7@soGYT`INyIt|f^E(Mu zYJ2(K+zuA*cRjq45fTxEOZwEV={=zn~+AEtjSqT0bXG zR%vflj`In1xqTm4+BPKLy{DFWo%!3+GmmD-2wfEk-n5;`@zoY#rAwI}U9J)8aR-Wa z)bwcQxCZht{hs3d($&S~i-6a4!AQr`^Q_h4(y}^^dbZfko}_fdOr?6ExvfoyM3DbB zf!3Vn^1=tJLNou*Sk@%>rA+a*rki+hc)`8#W(wh7bwV#j2)gB^14XcRGnGxIzwdM&LsbGuf1_!zqDR#K6qt~&4TZ9 z-)c;qr0N}fNm5X7V%m%lw_Wqn6z3}`?d6)c_h5B- z%J1**R&v@Ny7oGiGnL2WcFze7s~opLLBY&ka2rLhbz}Y8p8vP)v^vk|feLSr=ogA2 zf`TudIQWlR-{4wyK2d7kgK4+lNtuSWf*lSjzY2dJ*p+kofcWe*(bSlShxzZsg6fMC z8+4SEF7dVXbaix;C~{aINm#sQQH8)s7b%_n8Lo$0jFrH}mEc54m(3rWH9OBZXhJPL zal;T?Ft!~m5jh@j9rfGq+r^{a(<&2PAT`9L(|63Z7Qfg4YMOv6;Y+V4xTJy0!pSbn z=4NVw6Q`1rlCgx)nVkD30l)XX+GLvfsY(!1ZFuk(zuBsM^R4u`GrBu6PF~9S{XtH@ zrvp;qR?VCeSmLaSV*KYEebtGQd&)UX z%55P>9(=sscWs|Co49`DvsRef^^5N>WJQdHr&;I`I#Mp|DUi!0O z0oeLT<@3SA{5jGSU7k(5{mux~v%r=M^VaTq!=*o| z0~*zW$GFXsGG2#YmT5Vf0!oj;y^4#Kl$0jBs7%Q`(pc-iL_C=@e0}WnWpgiNLt7CO z3Jyprf;j!D)qN4>HIUHdT z*&oCs9$WqRY#%?gHRG}W0LzyO7WVfy6C+kYoS5=z#^-<5H!JgHPn@g%>`?Y?ecZ`& zRquC*rwUcnpWE{$JUxEK-qL%o-kfb+z4^aQc46*aS?ltbf^$;rKAjHV^fO=n%(<0& zp3SSe&TZtm$F@Fn)#EE)#9p>t}Bc9-lw6AoBmyy~#HZc}e^I zotG~=e_B}#sC{c3#Gz+vHKk?2vBM%ZF51^C>aIpS-j!@2A8R$Ehx_P*Cm*BpZD#6P zeTyjjww|xra2dm^@9K5m3clTo?A~O4ZoiuKzKq(ktap~dML8_jcIVIckgO2De)5p? z>)Chn4dpX#2A|(z@Lb1Y`@_1H+VivP-_5Z3Gsk;zN>H+3Q_TC%_itoBo&BtAPy7ti zo9q?ue!SeRm4ACpUeFUzLO&jFJ;`ozLgL1i*Id=kROY`sZ};_8S!;rL!I~t4|F<{! z+Fs8~j>z9t?w%{zSzKm6rT%@3`x4O#7W=QSYPk+(vL$vL{B!NlY4x`f@8{S|%-!>@ zD@9x3#ER-0+=2gp)croNOXY?@X^s4vHQf2~zxDQKqzaX8lneBZZ0l} z^FiOdpS}{iD?*rF{hZdQpB7!l=kK{(F6o2u|2=`uJGm8D{+8G6QLX;kqc^Lb<23r8m`2KnTHWC%~0yhK@#?@LqvL;=s@ zWs92q4R6eK*?l=PutP)njgX=Eg>z!I6#;K5ocFBV_v_XcJr>2AwaRxU|NSX*(X+SW zjkI-;r6SwIw)HuFiwthhoZhVAQlsQ!dGAB8aQ^3)SAu!U(yjm9i|pQgGI)i8h~P`L z7p!vG78gAGo5p5FLEp^pK;z*ndQc`+8fi8eZ;;P!RGDA>=vQz$w|wb@^Pg7F z%h28a_c5dX6j`~u`drnLtP4B7tW?-z-DzudDDlsw%{B#BSIv6i{33Tf$F8f{*Oz{M zA0{mGATg7b&BX27tJT++8tQQzJ1NwfBK+2U-JjFbH>KBCY^FFJ{=r?J250^KIk%iiL60`%f2k9qND1e4cMlvyHc!7T5Xe>}OX# z3h%Gkum9RY(nGXGNy(N&Nx<2xC;r&%mgoHM?oU;hPq3YTzxH+YNwg1O6|INZtwvWrZ*u?RaFru1cF(e8 zpEad5&n|XqusWH%EMays;5?~wE!ai;_JxOTw|D(nB&vDRr^D%)$dY|V_wseVvpniE zIwaipd`?lvhmK7zA1tov;F$O#;-O9V?KXwpzA0rkZ&dVVa@u_2-+!X*-@pIgmR7v^ z>M=*sxNX9ZS2Jp^T@al3Zn?$9?Cs~CHnXqic+^|i_|fu!f}}UUL8n<)a=4n+K9^(1 zw&(xv5nsr6~4_`Uo2(og$CciJme z@-FIs`g8N6TURtCobo@+lim6_fQ`v$)AVPa>1~&{h&o5_pOyYFcVb26vX0)r_BGbk zkADBxzx^wH|J`ffp8r4pK7Ieq&z9QTn+hITSS~+trqy84j59NS)~IRQ{&eAalIF5P z-K}W9ztO+v_y2fS#TS2UdUMPEPyS0|+oOW>BIEWwcw>3VA$T|ugw}qnh$ZGXHC3(rMM@SC+XGB%S6 z`~S9IpL~PsV(W*)8V?lf|Ni|ysYT{C|bERx5^@3`hnqc<|P(--R}jp)Rq2nzg1W2JGVXQ zch`Z%e|yWOew(q)l12HnK=avolHdQ`&3yV~Lx%w{+k7b?0nbAIdytKWw1aTruYzPfsx8y?q>_TIkNCl}9EJGJOWUVPohKT*>DOWa(tr`*1ucAm#2 zse&i<)yq>Al1COiW^t{W?&qI&M#BD()$FsDt${J~T&e?2*V--8e-?7vZch8Z4LW;N zQzmnF-4mX4lqXbJQiS(a9J65ChQ|+k#ox2X$IonunYmF#V8!=A>&SE`YD(q~Uz<2h#K`nT7xrKi;E^N!dpo6PpQtWkfhAt|O@dFjLD_N><{-ybd9 z{`9?)aAQ^abLVoWDISG6X0p{b{4aX4x5v(XVeI_NN8sPLiF3OgC)zCto-BFXChM@j z>G>9qst=W)-TNe6WH#|ITQ2M9C~Tc*DYCF*;-Pf4mwIY<6LdJ;td5md2)&j(X?(Zf z*}}8U^RG|b|EK@U+<<_pj>F!^pKtu~?vdjXnR|z4tVox+@HxijoQsQWs>tb8&nLby zTTx%}MIi0*LLH@*_tzdX3%vhY@|mZEUgn?J6q^et-L?f-i|-QN)0}GP9(}%Izjb{4 zq+7u@?N8I)cFvs#a6&kx*e1b=llD9gN<-7K!wtZ=NsVfW1E49O3p zx7S-$$|&!d@_kdI!}=1L_S%ZAQt3_^!cLobeizyt+Z5m@vpu>u<&bH){g&d8Yf%&9 z8u@H*6>e&AD`_hcuV6p@@avZOTduBAzB2jimbHe*WIi4*ZhBf@^}j#pw)MsIHXpso zc{}aO|9pv^x;^)8PW8k6_w1gX^ffdybesRL*>dr<*q{E(Crykpb5U11W%s*(`KH6! z&w7l0?f-IVWlrYNjJy;P|0@OmKY7cinAg^XSslB0s?sJ=lBaBOjX1d3n>BbL0E3oW<3R@BjBdKbE5M_-mS-w?b~M zefqySYpnd-+3)o~+HvJ1Lq2Dlv8~@+xzeZGF8O*{|Kwk<9(ZG&!k;(c{7XM>PG;rzOu4-FChyfN zzaO`5`9JYw`I-qAULM?UKh4H0*!SXR+eLlWWdSKsFQptS;-r%bUYuaRDZGBZpW@_- z<5HEKaW%icXe5iizcfAW*8-&_Mg43K^Z4cdDpbz@;PG(p0gJpRFEiDCojO)JWg_>C zJJ~08IvXyT65!$F5E+&~|Hs+*-)G*4%-a6v=J&}CkG|Lb{{B2<+WD0oM}w7F5-YfN zx_ykk%)H>>``nU!KR5Abr55eAuvYX zzkj&rr~c3LeIbYQyRUctZ^Q=InFV1dq|(4Fd1f6LFlGP)gjU1NE~|3~+JewKgw zJRm0gi@>z^Q+MopTk-t%%b)ov8vaKj-hHoRUHZFKVT&-IZZf;0TZG~JqlLb%2Sw#< zS(KHujI@vK2%jeCbvE7Ui4t>uPILH#rySciz0f%~vGe!@1FODpp4ok8`i`>5O+3=P zX20UQ4@R4EQ*w09{dBARcsuD9!?)i1LI3Vf)?_KS-=eJgs_R5wL(bW5^Uw1-_SIMPv6S-< z{eMe$wl9x;pD^d}+|Yxq3DLhE)|`=7tde*h$d;<~O6qO1;QzbVKMQ!)*&j3CFRv)@ zXsyM+zJQ=Q?%N7h`~F;7`G$S|?Ab>I_D}z7X_wUa`e3QkhAY-)sqfcIeSWij3-3g6 zrzg?-?LS$4+Fy8%L+TjAwAPx{U)P@M&yM`_U!c#`FRaBcM!T*$rdxdV^=BVu?R+^a z@Z8+cgT?uw5yf^>j&EJc_atf8&yC%;m9{C_OfJ@tPd?cG>Hn^Kh8EZDf8KcWTzSE+U%(|8m^1?Yp``M(lrk#Zr=7n_2;!! zmaONGy}4zfzHR5v$C1Goub_8Kt(FKRc?tT>2gJ zl-`fB59`(Ii+6pyEp|p%>T#q{|4S*|Ig^i-GR`|FFnQ;MIg?o=o?m)XGd;{U&{f}m zv4JBC%kzkR|2o?9!&zk>aXU`@Kka7j+WgSEKi^&XW3_kq$xMG+*&6ZiM!A^Hat^~+ zkuA%o*`}ZEJNwn9c<<`RcG}lxpXa#s;nT<2U0-)p@SKYKu+TX5^S|x(=l9MHEN7oS zSMvVxoo(Xl@6}&lp{KmbGVPAM_PLpJ_f&7KexICvEzoX#wzNycM`_QH;6HCChAflx z3H!f#v%ZWXOLsEM?7|;U#eXl_sOMLEk$29DZO(V}kDkBF_S~(u@7#eOHgk`pd^B|1 zAenRdT-mjchF3bfIJDDdxC$7}<6j?jW~(IA#y2;+pZYBE{gGjKG0>1lxbVcMqHjk7 zbJLzyxa!Ao?B~#Q?Ko!P?2=aT)oH;dzKbX7CSDa}JoP?V(#crJ-1UUke8JU|U&`wA z3+DXWKR+kiT5zUK+W9HMPZ|3Zy(S4dsec#m@nN|%JzgsFj5upF$KFK;4oSB@XG$!& zYCkut>P^@^$Cj+M?VC5rCN&t@|L%XUzEJX*&?234?aB8;Y?r6~esH%lr%&SA#1z$D z-8CE%E@^=+SBy`*YJK-qY3a|6w@tU*;BWj~BD=9HFZ4po2d57AA34^qqo>OMKOZsa zr-GKC|NgI6exEcJYifOcuxK5l{xtEu+1k&x*T4Dn_1t8}xt^bz4m>S)tzA*hv(4S( z*v}79zaB>V_FwL*Dz$7C6z@B|YtccQX~{BwzrJ{FGH;2W%7q)7io;K>)40o?dntYX z`P+*gJrBEZ&+N6r0v)SO75Ywh(=FGRSxIyqSDf|a(y5<%A;;8L z2f2^Wna-gS*WoyAso5H-MK?FCU7Zu|v-`HgwEs9{d_B*yAtJeQcR>t`!Xsmr zj0!=)R+;)$&+n^f>*fCV6>YwBLcmO!jMs~D-%R_gxqbbOwNK6V3MM`e;wuYMIg$PK z_Xg>WFV(7QcRjW8V>dqL+~N4)Zv2yZb}jP!-=ZEz8YcXIHYxJtjiWzwq;}_={*?4d z)aBTx^v@s88a#V%8FSx3qPyH+!I`8C?Zo!x(`i?2%nj4tv(KM>w`jh=#EO3relK1g zTe^9&go}#Nxiz9wTz6(~T6E)tt>xLiyVsh(zIge>F{^wRZ_BZh-*3m?U-hSKZ}Y_! zM+^^XpZwQxsD6J|-luIvb8pXGw&do<{(rx|tau;up5y$Js{19Lazc0B)GvPHr@A^b zWcur?{U3!=r`G?zBzAqv3t65dy)7J;w|>7+F8zM=!Hf2jJ-am-`4X=QraH3UJ<-wq zu{do`PS|$ey_;O0$J%BX$JeNTpC@h~|9j)hy={g|IX}mmt-hZUzvrR$?B9nJW_DT0 zN!oQYah{isvpk}hWN_@cDf7Wrf$ohBHS5fKJ z{>6Ps{0Z;xefjt;N2~3iLCw=;S^Ij*cGe{R)%kz7tX8BY_rvObo&1~G1(%P1zF?|# z_Fs^GVaHwxX>q0A@_p^%6W$uINz8YBs+hA`L(h_L{n}Q;xa5;MUgtS}3rBSpp2MfkSo+qduD7y7ezQ}v^p-ks5PliQyO`R!ToOJ3>3qo^X^ zBpZe6ov&pUO}O86eD%cd8za*$?qRfv<(~QEs9~hsy?|+g>nekrbv8<@FA0g+XYl?^ z#}N<1YwM>PX#X$oePwj~;jbe#&4zK`Z|}D+Q=HX5^=NVJv+bW}zt5Sp^4s3cVp~nL zJ}fganj@L~bVkq0*56HKpxV^*iQCOi=;E?m_*t`4Pa*sVsBNdVas%@>rUvIr!=y$YqO1E<2 zQ44F;*Js<;D$H@amp8FY&R@cf{ZQLHhPaR?k9-?1?%6Np6=`ziysz95%a!5M`o1k1 z^ZtKX`9#!-qh&c;v(h1k0 zuFXl8=7+tWJ@x(>=XSvpJ2&Z}l^V}imc)F^e$trxbmkK) zr8VEe5Bq)YS=iHjcG7|8RqmhWZ8G1Owxq`5|9ek97mqJZJ1aZ#t@)ci)wg%#giKdn z==-s}zWVBqMJK0+F6ZprJ`T1yDwZlK#qRrJ0TOONZ&?cb2NVwczc4gh) zM_*Q)KiVhv&i-BYtM3mk8OPOGNhjS4vumHwbFTSCPsHYe9R8A9g17oZ@*Y0)Q$Ka= zRYbH^vzgxPyP-`RGECf$vp-Jfe^afbd%R%TW#_;fk{>3C8rNLQ>B%aRaIkT0^KS{| zP&*P*@m3(x|Nm80(~C!+H@xlIpqsRDhUt`(dzv48{1L-r{&Y{)_t?auf)&^Tol)LEcpLqYgWuL0f)?fv$lNwRyEPh2iwf-6}Rld$VD|oix z=ZMcbc9EiXiSq;1w&~}pz5c{fwt4lUcg=w(rEkwouXHW#+Hz;x+5Wjl9==|Fd8hc> z;+*JP`QH+Mr%hD*GtJ{0`^3*p$scxa*e-Rp{SL>GC!trDwEgu9_x1U|GW`ANbM?vj z;a`tUXjl@*s{1{!e<{0ka>tr!e_g)ZF*iCAwKl&*`SD$8Z~w^U8D($6gg2H)$Ii=( z+m*5-??i@nS7&iD2|sm9H+}BS$F}K<`0XE6*B9K}C1E$M+~HhS5lNUg_ch@{x@u4+x_)|#Ia9|hj{Z7 zDm{3f_kA?u)6%lOGR65&&dL9?eW`q<^5;h>QLSc4LmE^?nI{dno?M`HDHf-UKL z%h#-Sa<@KsRz95N+hm0)*OETC@Bf|vn(33(Tpxefb+_QL_k2rLT#J`Ispwg&+jqEj ziPgL%(-$0eUNT*AVUK63)$L`|j5{XBFV?6#=kwcC{r9cqpXV?OW*)3!k=QBv@0W2T z*TtT~#S?jt&ZCI!-0o>il(#7$k}1n@o+&!tw>AkA%DFc%6q@h(h!+w zoK&!9;i5}-FFf{M5zzD6d5xfx$+UKx-}b-L{+c{`TKdK9bAr0e{KCBwlGAT%Dl64q zTlTD2G+kMUS!vIQmWQ>Rjg~ir>24swKbXSbpYtz&Yc_brR11 zb2qn`S8#sbxx;LZ}2^2mz?&wm+z~tZJp^W z(3&C_Ta}_QyZTjZG|NP}hU#BexwkF(Q1H_C(}f>9w`NA`>9+(VGwA516tBL1`;3`; z{oNY|tG4S+5;92TQw*pSOZd+%`0}lp>5k2Q&n4rw3sg^RXe~3W!*uZ*xYkEth**wtr%V%MZh|H-y(eKc2e(#J@eSEotv#I{THO3){qzm1H}zagKKJ{5N_XG;bLZ+GpM10Cd*5xNjxzy@Cw@FAkMhx1 zPCm}lW89p1&M#e7{B?!+Wu4O*`i>e}3ujg^q$eqDh&yL=>ua&R!Lg3Q);*xP=S$8H z_wOm!vHl(2p6lqcOXQFfCui!VpWDwx-`%*e3A7l3?fj{@@+yO+&S(DK&5m4H-6&}) zDaW?#ai5EeTUY)^d!dOlbvVSo?@^F>z2xIvja$`{s=GQ)@H|`Fx_U3iKli=0f6smX zRcf98OFr`WTF}&^;7qxHfB$4x?R#FdthnYPZ(IxKhhsmpziuk4mW|Jm-QM?I?~nQ3 zxZ|MNCF42atAoFKd@szoSk-RjGvS1)!d=dJ!oqF7d;0!AY0{1B&};p+{QBm_+8nMf zX%_E8mVch`t|{(+ee|}I+!q;N%za+vRsQjGzfSmxl8)_Sae3DFmcFg9uQ>4hS9o{F znVL@*E*`zfy8hmC5!=`U=U3g_xU|^7-mvtKuzmH7?u)DsBwcuBx$fHh_Qy)g&zt=B z?z(HaW4YuZmwr8IpZ}ir7ruTL=<4wJedNyC?mpXPj)uG!X8+l2$Du#VZ$?wr%ZrN7 z)#j*4Y_2JJneZv2^KFGbm!nW)X`;2=%FhB?9Ih@^Q$F9jGk@;)YmH9~uT;#F;h1?i zyOr_boY}$pJoT@8)r0Srx${yz`-dv zF>TJ`jW(KpUMZh``$FJlq2mh)(0b9CA7{J_a!IL~ks9Ix2B38UO3>9jf`ZWHNgdGT zK_C_2p4?R5C(1aozL<_7zXt+cz=){+5!Re`kVj+@>|J z&wqdL`TUabnNP*no(YRBb1gg9RBT;$=Dhx_P|a%JZx0@yk4RjiU-?GbdQ;`p>esWp ztPO=Lel+GTo_2P2ecraJx4SsE7yH}o(w$9(>AL-9M#T`|E`C z@+-OX>TQl6|IBlz;;!h_=dU(z-R1Kaw9rNS{GRtqi(Jz_{Z-;}%6!gNpW*)W;3tn? zHK%iLr?n_NaoqZ6k?2--d4Bh4XJ_wM=-kGwANNvT`;ubc-ZB;GTfeTB)UVxFb$3b0 zu97%`XKx)h>K=ITv^?jgrA=OQ+qd-J%NkFW?l9Y({9I4F{cF`J&=RDje9yj`vn2cx zv(m0$J2&ON(mnl4efV;By`Sf%UEXVbsk#69y5;<-prrw) z^W`JV*L||cukv(VqO)$>QnN>QxBFkS5fV;Z_0IH2!WO|j-~PRhUa8FK=ziN=ZqeL3 zL4wT{eqZKAtY>~@Qd(`KxG4Sf0rAbQ-qTWd$6HLA*Ie@Cl4Ya$`wyq}({{_>dN^PE zp8g_*Dc=;IurPNTXdRmWsx|4Hh{3-{ z$v*nK`TE!CdeVZr7vAj+(_4P$+LYUKG*-`j(chYr z`|GSKhuV(k0`HIh3ap7UT`m2z=+t-jpLU&ZAMZT+vv6K^{+^u=Eh2cn6ef`Ikd>u^zUW>RZDjvih4)*wwv6r3ow$L&r&>{_|;0lF}Iy|OT>dAZfUMtPX ziaW^gcv-6awP3c?iz|3ywq~^)EZ_6%!D7P$*`5_*kL#iYd3iX~8r&K`YH^iSyJ&&z>P&CiPtWqM%?n2#nXY1UGCEKlgtfeB^ll++H_phw1}=!}79Eu`%beEc9;a4ED3;G*~n@k;i9eZFgDI=jG-$ zJ)a)^bgLG7{;uNby>-|B{J+9HuUF#mmHva(Ci3Z_+-8WzXG_ z@7Su;9&b10RoRsD+g@uwmlCuQkGoPgH+y+rx6jk!1(&~{u9(McWaguF`L%h)RcncY z&4vjxq*vcKe8M?-MUk?8)FzYOtK2&tK03C)=IyR4#wXL|=2X5owJngBm8t8V>Y>=K zr#vxRjd&;RuX%fdU1aaiw`t#wT(I8ozVk}8R2)N{veWhon+J?1-!Eb8y6@%e?0i*Y zBj3Um&EISGmYuc8uUhxQGFhhGWBozfuX#`Z@iRC4o@1zT$NH(2zlL;Js8_wfv4MaJnqd*Y1c`U@+My0`?|uaLI6erQ2%n0ZB8QN6)6+gEW$UC-yw zp11qy^TIjd8`r$3$=NQv=q@DPoZJzAtCsJ?j|4i_kYnh+f=P}f^kdt;ikaaDU) z!j`StZHJU2>lo$+`zD?-Je8PuH11E;)2AypZdXn0I^q0>_g?X{ns0x99N`d@J}%8~ z((tr+`NBtgT&o0o5|2FZ$Fvd$*K)n_~NV;-8hdhZh&w#|EpH*4?Ro z`XT7uX*V~9=OT&T+4bM|TnSsVzchGT?T<4%k4Mb-y7Zgwt$8zT)tzYaHbo^A} zjGBD&#idg9Ti^~9mM?mc#qwN&Q%=HVf9mtLwv|tk*B!0lJDh&J^j}+> zr@i3gsb%{&WKU7-na-PR{O;aPB+bS@{!=e&Lq>WPt0ue>+(kT|&G z<)e3e%||X|UsY}qP-$sA)_mXlyvA>VO=?QLogcQ$c)x~6*8R(qvf^!Li&s7V*RbW! z#61lqf4>@;ToYcHma`?UoZHY$ZlSqYen)Baub-9*Tuzw`wpJaBtL|+H4K<9A5qCdh z=2OB@HKqB*n&}&s?(kjCaWGou|HSXJv^ZR)1T~LIry3ubuq0UXn1ZjH^u|?N@)qT? z*q-PKP5i@oZ<54zp{Igj%L9}5>Ro%ir%vZZ#@==8r!D<1maPqJ!-S6?-V5JX#(44GAGXuqX9q-IJlJi+Wi|@(V z%iQ&fk`azATTG-B&imSh%++63;Ws5VU*VHWN8GYUGnA!fRIQx!xj6n}NuKP)eJ@S4 zmPq7Z^E23|^)8i%N$$#bQK#+~nrfmyYL^x)cKaSBlrgnzL0q)(%s%y58OoFVTs1-# zmu2RAMWw1Vezm$kPiMR9`Qrhzv_EhwAA4w8{jc!OuJ6a&O1FM}do8{v@0+&tnJby^ zR#)^zZC|>3KJ(;yP`xE}_d#h;^mNX}I#sTXYicA4m#j+Tlc-Xhsl8^-)yZ-`Hbr+% z_$Zw!ORU)V?w^-si{u9V*PvT??@d$N8}OJ>(OX}L-{V|s^2HrIVb$wS9m{B$z#CE@ zZ(^hL*TwQ^h4Q0D8Cj=0_7C@VRL`$mqVf9QL)+Z@*;|;mus_|_R@PjkCiQEYyX~5l zm63~Ly`BeWOgEU3dHTzOFXelm{ZBu^==T0s*4|}xpKDSoHwqi`&yJq(DlaEIru=kn zdG9X!zVPR_bIdi_zLx%YY4cLL$>iO{noI9?U$&JvU-5cd$>-t(t#{%tc{Z#4W;icv zUmN~3clPwER|gof)Q>C;wBYG=skjqSUfKL%t5Kwtqgbwmja&EneeZ@z=iK_i?Mk z{T6Rq7I~s-bLqvlg1U=lPgkD%e#J=qy0!ZIpa0fge^amE+g-HiVX^Kj!LPI4zPfrl z$+Gudf7xq^+fSX9KNV~Fzl*h59#*=$=FCRB^rOF1ZO%9P-O*2$+;!z}&hI~qA}2rl z77p4`*3ki4UuObZrtae6vcx|SDyRY4>I7=h4z4ya7pUEitz~}d%-p7Ao8Zda&q3Xw z7JEmJx4*B{X0_jM=3Cd@+8^My@#8M-P0i=+XWQBT`0;&H==9i!u7ZLu8QFh|z9;#;26o;6>Dz4B6h_oT$4$!}aa&Z@$c+CF>s#b##6=`=r(K(NcVA-cw^RK5m4(nn@t61)efam#_U4|S+?T$W-?y82 z*KUJe_46m3`hAe4;x1Klx%J~_-rb+=|MvIC>wU{UK8E%%CfYT$B&tm1 CqNPXZP?H>cOPT8N9O_I_6qNFo44l=wZc*H?UT-ovL<_1+U( z&eVn}Ow!w~&i|$JUx|T%VF5_Z3ylw_57}EQzIk07!8+$!+mS7K(SKL!f6`}$DOl@N zpX+oZzu@?zAKIdo9BaHanB|_|XJ=q=0Qs!pLj2DKJPc4?z{aNkrJM`v+4fF!Dt40G z(B#qtbLy$k7t^=aCpvwQck53{b#@YL2nX2@ir|J#0)LjPxBO$PS{!V0e3`1AoYIEr z@8D*3HSK4)F8Rf6`+^uo?U*~-46ZPZ3@r5`U`H^4L=Ns>B*J+6m4B=k$YikZG}<1- z&ldP2{i0WHp{1FwN~8h<0|VGfC$9hHJ1f%aH~so|e(A3!d4;`r|zYqM;3-@S%&T1=o+%W)SZ4zj^3nsLL?qshCfd;0HH9Q`V3 zaYWWUE5`HN#eetzFaN#bXXyXh7d@7$x6jF?UE205IDQGpOS{;eK;gW*^}ngD?w{_v z{x)yEc-Clo}#k}BA`rrK8zQ50$WAD8`82<0{ z+aKS;wckz(`qxord&CcH@P2K*X1NR_m+%YYgpSp67FX6=|7hC(dH+(23-4pI`;R`C_j%)e z`shXH-$|!mO`cZs_Wj*sOY=$&$G%(p{>}q7-<^l9o{7EmaK~S_-mhF-vZuG1KB-*r zxM%lUcg@efoeMu-D4!eu^?>5K`zH$RdK_-A*qgQ8hDRi9L2dNxCxr>QVZTfEIW6Q+ zPP+Yi&y;=#E|$-30ry#+m$pw@#I4g4n+z_4EXt)B7#yCjT*l47uweC(mJ9hu?b4<@ zMSuJIOnlvc&Bu2+)Bmph|M#Hq>b(N_PU|^e>w2#%P0ovu6JE9c^8|(W9p5$0I;}b}nycyX`#(4L-rsU)YkkoT z?yJn_{zPB@ek4%#mU?2%`Tsx5?L9N5I~@>W2;-N2d9U^bUwKWA?A+#$=f39`F8=qi zeur{(*-rE6aj#!ZTVL#~KkMl#cAk&NFMJdFclF=o|MTDfy3f^|(YN*e5$%7PWpejF zZ5ux>V;?pRm{Sy{wJKCz5EL*dAwPv0wf7Djh z7pJx4Ti3kU^n(5R_42cqHz{!0PH0k%S zk|GQY3>TUY?ce!tI?JXz-Vg5oJ$?0_=slr#Z~y;YUz5>LnDBXhl2hqBy|-Pr`=jDN zA2R&AbIRw*2J1gCXfmlx@s2PzujhPo@Y}na7oJSz`uN!QgCa-%9ka{YyiaCM;HwnZ z@Hz0yaq=(MEwWbAv>mQ;oa&I=n4QO1E(b~#3hbbeXjR>~a$ShjVu@dd=J)0+wRlgc zTl+9ZKV^D@bTzx{fs^KS*VR85YwzB}uCgP#T6Ft-9>I>a^|#8e{r)^f$oci0T~99Z z?PTH6bQU=#pz>$8%=(?hv+l)gU;FQhSe`utL&Ki~%%BR_b>+@~r`N__d$WNx&S~?S zH$t;`D_Lu<{P8;X+i&~6XOT^NV>qjnSXjS4X8t|#X#Cr$;Rh~AuH@t?t@|W5LCN8^ zXXS)-XH_5Vx_?(gB!RUYR>f$@^)9_4ar7VmJR#Y``ifD9Hcb5kR%k2nlKZoL9uyviA2iHchiDen(#% zujls^{PEOwK`o1@{Gth}eBCGBv@n4xhffUx3=9k>X1mCg#jf1q&a_qNF>l@urJ|iJ z8LHQbXz~?X)g4v;arymYx87ps^fR8GKg<&z zwVpHm`lep5wd0q|QVxFANka2#9{<&R8zS!&cyWJT(`q?`Bjr`9?SJ{U*r@w6(^e}Tcyr?mN4YMv8T3MCv+$u%R|W3h4>aqiE6u3C@@JLx zzb&1tqATwI{HGhYW9`h{Dz1_ruWipSSS&3@r4~^;UM|RwS|w4{xmFnVdP-_WTi3*=y>{3|^RLt&z6)xOuMSUR7|@ z)ysqNLrwPYl3lV43u<;GKJM#UwRvCt=lWfj_kVt}@&5k*Q>D!o?@Ks7dBS}m;jH8L zzD@XlcUM}c_?q9nGOtyjw10h~C?IlW zamzQK*C#IJU;F>SXzRwwTn)SGtJ1EQ{;xULk@hZzfk8ugmj1exSt~tQ9eQ~G>}Ozj zdT=(o-Mc*t{{H^|MJ$$cQjzj>0n1ObW*g^yKKFg!LpGluIZ@&t9(>CAJ$*rK&D|qY zYBT>b3hVjYe14DD*Yjb_v?|WWMUm52?`kQ^{rvA1`5>`8v$>CJEB^Zq$D#)f_NzvnGlG?icB#XtSiJ0c%9zxFJ!-|#bBGj~$c!702i zw+XxRzx}-K-=-(OS?W0#MsxLOyxuM-vP0B4f8%GNNu@cFZ<=3hf0n&-tLt;=tIHHV zIavIjIQ_C#+7r_Se?2+{!#;Q{n6Bjbe5pZuL1}BuHO@t;%1O@!zifHI6aDX(qPada zmz{|0sQN$Q_lC85y?;+wpdDVq`62L7eYB57Sd*px0e6m-H>aiFt`~@W)ZUi#H8MKM}psf%B%Le^bfrnF4R(cQ9)2pTcPt@07nm$uVH1VC&rU z9rkaw`)5R$AC!OJdpoV_eCN4o7E>;{Jmh~khhdq;hU8P55A2a?GW@%9P4bHSOEgZ3 zsZHXJ4g{60;3Dr4XP}Uc>Ko4$lU48Cxf|SaXT}7rg)vSAH%jepe~S4g?~?kqDXf(9 z<6r078t3AiR*CQzEi}Jg%DMm6wKdm^I{B-Vm)!O{F*o+O(ZlfH7Yeto+vgqNpI-Z3 zC+mIY{mA!zdtOz&zqkI(uepItEB5XT)41?oASB?)=DvToo!9@HZvFGv`?A|xt6!W5 z%mZ~^L8bWw?%2Y`(;U>lS6%HpKkaO6(-xgKMg09iOfI_T)BiioV+s;rS$jk1(cx+3 zy4P)5`8^8kMFeI&ouzkrFH5HC)zhWckz1b5TK_nE=e6M3Z>!CYx=KXb-wFSAd)nz= zdPh4%7(ba!P*RZBd82gwQ_IPMW}fH5Tl-Xl-{?KuZ@n%aTBTgb4*2$e-LC)aA3uIy z==1$lOuk1zXwQyU=Vw<}YUJ0DXs`FH(~wreO@QyH*g+65bh1u++P6@9(` zb8mFbw!bmkCl@8WF_$meJxlIG?j$D1$+uMe88>&ew>~L35$*8yhN#x*<@f(GdkU=e zwc01d7WI!Y^#=d9slS{!S(=|KyywbQ*JsVnzp&czb(^TbrwtR1H)=)}#k&7rc`N7m z#rkVEC$uWGfU2&N7A;VN?d_q-YmNzhN!c1c$#&fful@YCxwjVg{LPxpc|Wl5 zzuS(Gg;Tf=?h824c;~a~ifBm(+5b z(#ylZAh2lF4fdLSTldb*V`tFFo%VO()wdQuIAsOH1dgzTa$bl%8^nJ!@#B+=@2)r= zzbMGS5b#Q8=Zz&JtN>zDbz-?LwI7&%YX zx#z+OYDR!MpNkG=afz1NPCgIffSV>y{`U7(tk!fms=*MD!3Y^QY3Td&b+K{@Q^+(X zaMv5$M!6vP=>Nh;A`BHE$ARo`=zGA(FqKUaR3C$CMTMsC{M#>b1{D6uJnvotvH;Y+ z<7fgkham$OFL=2aY#0_WDQ*W@4Uz?!)ez?#zl(u^!DZ(W&-w9RS6S~~b(6QS{m131 z8TQ+*?<&9LzRP~+u1R^%7b?5|+|0yd@ne_tru=T}cOU%BKU^rYTGDPAWP3tZs$!Y7 zNy47n<|&)9Ut7=c(^O_V{%+~NjnymvKMb>4+DcO?g zVS68>eXYFa`*L$HpXO1=@ZSvIeSZIZ_i^#%yWDSuz1G!Q^VMB?{eST*<=?Ts*^{Mr zzUE|^DwOhrU3&8U^7$5LxAyHk_O_qXcWp49sEwj?= zdrOku{FmC9y*GS`<4Z-=hP&<>%;0vI@k)QkT!L%+_Mef7EJ z_Ma~--?6(tpRz=0!StN&lkF(na3#uK1iHt+wB`P+A1Gxhdbq?^7w zZTq(O{|o*H{S9vUZ{!>&wfgtHw|TEWKaMNCxB7kbBfWJiUm3)Uozp%q^=x|P+$5#O zr*p#di<0k0^&SXnJF%3qs+*7qZed+RpUn#P!&-21O6=E3#l*Dfxv-G2YrzrEFe3g7(W)Vsdvwse}hEl1CT zy3(9l@o#tQo_8Ng`x0NEKlOR7Nu~GR@H-ANpn;YHpTEujcU$(}<lnU!9x%chl?q*hj7ZcN#jzUH|mS^#4OPe%WW&{uQ|TY}j|@|Gt!G zIs4bGc_(z~=%3WW@Bf|}CdM)Sn7saNVRO>$%>TFM8*uM^HoGDJ(62>Pdp(b~JX>2? zwOU%(df&Fd+g~z|Pd)VDjKBY<%Z;_we~h*sz4m_1hxOH*2lww;nH%|(O{VJZ-={s+ z0SpC}3=CiNass|SIBzy3=C)tiZh?Q=9F@#>p4Gh(+y2*kz0Lox{bsv93ZMVn6ui;# z$9fx8hv=^g-w%H9<|^aie6058wyE~Qn=M7chu;1*v@nc+{bKK?SLa$i9b>j+)z6+i z`L*ng?T2*Z4X#&nXGqH)wqZZ?-Q%*ewpGsh+*ix5{QT|pI{x$4*1X4)U++mu{B$L4 ze{V}&uti3#@9Ou_vuxxJ)xUSEl;-;`|M;FHXT@#Iz4hxp19O_3+G7}v8OMm zmF+R#(%Vvf*L~Bz?hjSTpFT89{ND3T_?+N^YGa8!h4&1VDjtj*|4tRZ+iYLGEc*S% z%l~Wm57oCWG~m1IzE|<28W#gYgOO{qZ1MUjUtODJ3^Psyl;=O*wM0NE(OGQ9*M7T6 zcfwwu@JgB{_V)OOid5biO-*O!rO&!^?*08qe{4&R$&`QUy|4SDa@8LB$bZ_CZ?CF< zGtV+;7Z*cu^|Y;VhnmZ)`_3ANCkuP1Jjjq$T>a=w_WI&-l{=FUv%EL`Ew}GF*D{6- z)oWXY0+WnW=j(@PG>AmsWchkcR(Im*+<9-6;~VW?S9 zR`UAq(mUK5m$bq;H{>2_-uvy3Cld!lfZB<(fzxI@kG>sy`I-41RTs|*XHIE6ZP2k0 zGTL(PmwoL6T?3(zl$}$)2A)6^S1KeCia+Qp9j;kH{a#he|PT$YxqJ9!v}@!<%>WS(Zfoz@+E5cP(C=|kfZVK^@E0wcK=pS;GDWEma*XJ8$PEl z7J;L>-i8T<)7w}kms~qp@@4((2|}OaZ3{9ko_imxx`;vG`CPLJ(o3f+G^XB~x_J-x z$6M`TKYsgko@r`2b4*>n-1Kpl^%Mc)qfwfHXDv92ROSeTUEjN|xN`E$q6o3CJE!{> zrTfi!;(Si$TKs#l;110cx2}xVBTu?*ZmJx3ZE>)A<-fMpyo-1Ce?Mg&T+@Dg(S4yG zqD%|6ZGYF%&g7vf;N!NU_*KuMb$3#`*y}f5zS{Td{kuP(R#xV!FFm-SajLwk-Gd9? zHc2rrVkr2yLuS$)28Z+shXO9oxTaRVqrUgH=_jr0QNi&Wj(FX5ow7i1-g3u2HXosg zoClMqe=tbk5clAb{`17TU=>SU)o;ta@8+obRCqs{{CbZ<@=u1QndcUt>JZ(%Mc$!& z(WeU8cde$c_ar&o{I)bCtm91Mj0L%pUsS(%KbS7$@LuY!{H=bSZ_{e+<=5?0oVWd0 zU;52ost2@}-4`j|^}i%0RAs|9omtkm^OL!n3PKrnmzVOqP4{~v(cH#U%>6Dda7w|P zc`k7=v*y)S=EuD^tZX>@|Io?2U(4Hzninu=$cn6A7^Ly^-85!~rHx6cQ+d*!tW&=; z|NM@{OEw>JJ8G6SPc>>1gK~E#TT)T`6$W?7DT^m*Ol7`b@k9UJ>KGE@WQ4*I}Jw zr_Sf1ll32ao8RsJ?B~C(`sV(9Pv-sGUi`N_TCeWk?z`{YKOf)ixt!N)-SWRD&i;RO zp65HWvTXYMgJ%2lBfSshUikCc&ivN@xUb#idrDm<_bDdcoWlBe|F@>2q`p#q8(#bE#QOWba^Al;X!Xh(h11EpZN5~MjMmKPRFa2cdk!hWs-I0Vqbl3Vd}MMe>s=B6vQX}oK^a7 z@A8xuPgaLR&VF1)nhb|-hbUErzx-SB?`-tn&r#>UDa=~_y=&gxs{_^Kq zwaa^kvzMz+IB|YCv%P%Ew&{}`&sVjczG*EHz)+&C{;uxaTsh%?g`e3QoHUr!_Eb%- z&VKV%HCjHbdBK9{#X82TI1=(^tZK{&^Im-A(d6QzEhog~W=s*eV|rsk=%GiSn>{ql zOdK{I?Azyk!+iC&YnKzAuB`vP<^Nj8?r-U*O=oawF`8I#$-STC&+n-v{A_L2UH%@E z0-b!s0f6o4Yz4mGG;y)joGNg-7^X!UxRnr_M&iPrb#=GHqxo??`Y&_F8mcJh= zUu>WDdi%DEmG))7mrrW8<>lHcZ@sOhlJoDuWbU{B|JLs}Sh-_^y-xm9SUShD+eOYq$2Z@-xd%Op~ zmgbx_-%`JIVaOv7_Aj<(cMR7{Og$qa^r=+;WNyTFmphZyzTPvso_19J^}b70U6ZFt zmt=F^vuC(aeQ|Td`O3{7ycu)OSMJ@sM1)_f=4;&L&r$QM*1!C@_I;ImM(ySw+?pa& zq7_!8O!y%G&459KA-29yzUJ;#v-OYeGc_z)VB{TSCDycVibm9yO&7W4gZQraTb|{X zJ1CJF86YBW`Tf($I@z5eTH7pjo@DQ>aN6uW`Snz(#K!w;{_kAi*nQ`o>JGN=6a6j< zhy@)^WJpZB@olc(j`M9Tsk|GWC@$E*7#UxrdLyjX+wU7z$W?EE(SuXO#rWdaN zFYK<`aIe2;yY1P_wRS&ewA8I#ULClU`D{_-<6;&cf6H}!7p3p?F0{GX`DvBX#MXO4 zH(%YO$6-IaW*_VdQGXDM?Q@AO@jS5#rYZQh2zFLs>% zci~l;%paB1zs0ley%A(>V3R5`-*SJaiqWJG@+JP$6>^xTYAjmwjs$aSeSPnNCkEBBp0U+=!gJHL8^ zZtIi9=5dAZ&GbsQdS8FGHP(Q|;k)Y(xy!Z-s17+~k+_H@_`@x9R?N zSH0@8^4+h$r##H6?5qFQVej_j@RPS+-oDD4WuH^Nde-CPmrdR;HLL$)5Vh>ztITgT zm)`B$xK{UOT~seO?x~WKW#BRB%&e zeq>FW`4O(@Wlu^nB16ukT&a5acg}_FC2`!F0=zw@ZIBW9r&}5K>8HeoPucr+e*ayq z?QL7lxy3QOQd8i}Dmk?r-j{2RM&}wi-i!I%YIk{V_=ehp{|vYPU*LNG#Iwi+xuzOt zoEMcOB3*a)*WB1!c-tp_l~KIc%Ux}s94g(Lz6bm$w^UBuViTJ- zY4@T>6RPVjq{g3^8-G5(viRlhd5;g4cgml4)7bL0F6f6t!OU%!za1>F&^T8AH z=SSP*E^c|YeooNSSAHu_Y5bYLf92V^<`(zj{)R>C_j5kik12U`E$e4M?TWuk-ye>- z_Vbebt)$mmcOTuD%=I^->dLJ7x0as#<@@`Z-IvVV&aCqAA3yfW<~{$OSJ)o+H+N61@vG1Gj<0O)d)#=f z`9&0G*cuJ7u)@Pyoe^L8Vn5%UG%fPbg_8ZJ1uJVke}sRESlII`!13|V8@u)IZVrF1 zt;tYwS6gw-z0_UuCBCn1mwli3j=MJMo_KBeQ%P;!6I%?_x(1hzS9__moi)^yt;0MS*iM_lGlIaX2-l$ z<6Ze_zpBXZgRdXV_^|uwiv0PSY*(H=c>m7ro|ym9bCz?1j@*(7>+^Ua_p>DW)V7^p z67^oYXsVw3e^#*k>kGSye-oztPtwTOnmO0>>$|hNSYzcfI$gD|muN-ru;CC{uAhB& z2Gez!`d4rNzPq^p&C7}V{=KjJKjBXvvw-xH=x0@x%wMkkI?{DmfBK}Vz&{J#dcR|L zfBt08zNEk9|BJR(?|<3u5;|NnE7D`meg`FHhQwD^~KJXb%>2{F1HY<-gx|ThBk=y00ioRP{e9K2>D|L;^W;=3-u33r_Pa1u zz;ge#RnD^y9l!Xov}c6@)7k5vj^DSP_3h64dve?BynkH(HC;XC&sWXt?}wj%`zZF_ zz-qamzwF204}Z4A@7#ZT;}_c-{V&fsPj0ooGpXS1b^(^?wEe0b?X9nEIrUfG65jRy z*2Rs7rq#*nzlyL5{~G>bfAIg7!jsDt~>8_Z+)#tAOkN<|ci_51i=T_HrZ+r4n zX49KLUP59b|E31+*}U}78keAjt@VOEQ{Q=}Oyr0%I693x2{IRbjgI_6i9!q8LtEIv&DhVvHy}Fr^r%ERZ+%s%3ySKIj+xXFvT<-qgP4e#BIU6+B*1i~*S&|EE2jwm{qa-|Ne( z_j-TVt+$y~cK2s#VflN5sIoYph-n9M_O~BB_I9`4>Ti>$oxNG)qcDy8Z};!Ttbb;| zTBOg+S(tTy&Bf!>RyWba^!e`~K1;dS8ZpYQtbU)TNGe)ZkKX*(tz{N)@`&+qUn8z9D^nH%~7S}euEhskeL0GBGGQA}7W=JLLH5Z@>}eCL(vySek0-rPb9Pv>{dypsGs{n0!JJXVOG za$NW}=YN#{{;hkjU98#pCjGSc!j?=fW24q)RoS&F2kXDqde?G9ovJlSS+P4s?cINd zy`onhxw;&h#S$v4p{jDK);dSm)bZ25;zf5qss8Xhz#ldLz5ViA+_7pOJS+ZwynUSI z-iiy1!XMUKtKNDW<8=Sggx2V$du#kj3Zh5fVeDekY6TYl`A^mxVpnpaK9rzZbS&YhOM zPlP2XBgLx!<>CIf_Ba0jny1I&e2+0`N5ODQ|yRn!@&Ukuh)Ae>I zr6)TV1~hMII#?^q2Z$A6Kw#~Z}7?O2o>CRv8?_PLf zck^r5t*^g$BpwwrJ~{orye9s8ec$>uJy*4KBltwwbe`Rq!thIWi-i_n8Sw{S=F-j*Z0>S%U@i( zN@ux;>X4PH}TZd+9P{e8kiRB5oP$hYEMDt$(tr8U&Tmo5c6JPtl#)r zN$Akg*qgCk1!;#?a`xBlf1h^#kDTg*^k6;@jj07%-utJBhxc1OYfW(4p>t5EsW!vG zXN3trlcvV9oUG5cGw!Z-UmkVp?A<#p#}}O5KTT6H;lidldO41V*4z>lar$~!gk`f^ zx?O+G#wV>oC(iVLSk}?bwWKX@g?w6Xo_`=)YV`4E$Jg_(jhEHTnElm0nCY;AmD{fW z$J;w)I^UOvdS^*^#uOI&BwmW1P*U3%ARneL%gv->p6kKF50 zy}w$8Z{Mf*xEJ?#KioLy+I05@Ri-oT&Kon?rw8;4Z2956Vfxd1lVVQ&(qCvhXQPc) z;DT+NmrQGGDN$9gzHwBPAxPrc6s63aAUt(VKogUnpFC_X(f&G&p! zBh#0l6MSJQDms1j+s<;IENpdHbs;6}-SPX~WsLH-%=PB9e`fM92+_3fF5O>M@@#+D z>*K6{UVIHRb=PK3nRw4;>n-lfLi1D&?BoK>PE@TwQjvAIS$AdR+o_WOH=eBB^Lp1R^9pQ`RN4<4|fSo=C|#i=Qu4S_vYlh$yyZ=8T}sN zdB$BUgi>dIOZ@3s#xBD?0KDh8C!bX1K?TxOn-I?FUw`|_R35jS)9-u|-B zV|xD@36{_KthbkckFwlz`qacp-xw@{Vk4)?$DWC=eYrw>(-|$fsWbeWrk*SvHghMeDBHJV|K8^5jsX^PO_o@dt*OY|wd1Je zKA-2I)lIemx9)y>x$^jhqP5lkmp@Yd@+k7l>OBSP|7yI8UnsOm=YFN5{Cmd*rTdDy zyjQQ`P%i%^t}M4_*7Cz`FU z|NFnroML$J^O_|t^*`;}O>P7^+54{FzvpT-Q;V=p>q^P+Xfv*6XZOBcvg~%6D=vil z9ok=vGupAMtqEEf|OLKXv@BX|0|CIWl z(;`d?Y(6^-)+cK)KIq^o{x4n>tyfz2=k@nHm;aaLowDJ)zUh{?VuI7IN|uyEr+k<7 zgeKiys$Hq(y{~WSq*O7b3yEtCR_zaG3ULaZcrrNsN9MmxqU$uj*_{$&S|G!aURO1# z`Q6^>Iws-CU-r6teV@~uy8UVEn(}r#^{)YZ38y!T%Kbm(5moIZDxR4i%!|!c>qilaETv~H7 zN}IKjrG_&<@u^e51)t?I6B6XhrmyEtU3h(4Pzj6fw=g4rIVQ)`TT?ge*PFQcfp_N} z_9~5|zYkSDw3jb0Q$4ynHTsTFURWi!`i7Ruo97mrO)M~YQ@7skdgaQ!=hcLm(t>?n z%(=pMdwNU9vm0gM{p(I;%YKbFJE%OlKxnGV#dRlr{`dcinYUFCXyH5&ixyTr2rz$hsmflHGeOPuzzpg4<;Q!arhneI-nMqe?mtrw z#+-0fi#d9D!Uo-kF{cOb2+N7xn--~0EQ)%s&1P~mrm#9 z{@i5KyOO6C{PIQ6!JdisR@?p0wQZfJFsGZVY9dp@_Yaxtwxx!;+DfQ+XtEq=spG7D zc;=^5(@K`VmeGA>5;;|@j&GJt=-#&Gt<)T^7uR-8=josRHQ22D?t^Jdbk{|$_TuXF zx-sqDL50(`R!usLJ-Li>%eE3=p=X9Yn=WDi| z{XY3Z)xjslJMBzVSWasn`V}U&so>BapXZ5QJe+5y?YU?&XP4Ci!SaM?apL-j>?KuC1Q;nP3zLl?kYx}0kde{4NA&Qo1X=`IP6~+qb5R6jjxE zTWX{()3n$+SChy0VDm2ZckNa8L+3<)4mV(qD z+k@smYtmoZFon76?{udJprJzXXZ3ak+rt?$?9=vMPv7CP;@i?CjR*hUuCZ==dwE*6 zz1oBJsoXQC@GkABTzOL3;r)^Y?hX6A9ScO4GFYoR+%CSc=E}~thPL_N1K+bqMm$-1 z`_IDO`Hz#ms^_Of&*R-1o4mi|(Eh#oP04pxR6hJZOH#79GQBeO_Pa9sXID1uUHG9n zl~Fk@Bz0}hUy0Hh$sK9EYM<_E{Ce^C=e2Kp{w@W4V&Wz%XHQ=Ca-ql< z7LWG4T?t1UKG>yLp1NW-e?oq}`}Bgt$B!OoJEQe<`nuk<$a7cRf@hVq`usjwCU@;x zN$u5}+fr`D>%C6dwJxRi{5PQshL^t9Z~O0lyd#|Zh3w3KPyg*NEt;JDdiH+Sz=e#f z-^I+Bl#zMylxeE*gDPleP4kuE|K0cPr^MK6usEo?iSANelEEAHVgggZsiO~!)hy0? zW$4M>S3jzJiY>m1&vRp1v&Y@}VMh4NO9J-jBPJ$djVF>+6|ooBuq2 zU&b@Dw@UxU=J;c~7hH09xG${i);)H|{(#)%dFeN&J#V@vZp5@KIO;|1;=iogy{*qR z-}i)fsou-3O80ckd%q;-z0OI_s_A-uQ*PCn1$y1m%ALt{U{BQNx5qzxZ0)H!^;cBp z{eIJLRo}~Uv=pbs!~}2MBEhcpQ;XqOvD%)hU{~7!HBpV|U3vdDSl5ZnY{~l1aB>s> z>a+=doQeuklw;0MBy;Z+l`Q-AocNyO0bIR9U58d|P{<5XN)TP}?8>5cLT-JOT zuVLb#H*M3VHBoymM8`Z~4bbCa&{5k2}|DEdIm((D_-0Ueg!}ycV)B~AK8|FQ|K6ldZZ$cSg zd-&~^HygeSn`9}`Cs`Ap5$L}s#D}A?M`g(~C>CVA^w|%W%vgkQ9Fa-IS z+wHv?TmL%t-*zu{r@SMp(|1lux%5-v_1=~XKB2Ro#A>b&iTwHaebcu+)5_U;@0vcH zb>rv3XB85KX8IRO-s@EJKNB{am~YQj`AYH4^o>joTdth@-dw2h)^S}-xe4pqFovfc zG5`M;9h05A?O*%ER@ZwBQY9z37sNO;ys`>23U=BTBNuw@#YBgD;*rINI!(UX@bViq zU*oNO_2pP&Z_@wl={B$U%=gbKy(_$G^_?ez%sUtul>*kyVz2+!bNh#iBE!z4$iLRJ zf1jALYv#PWLaVm>ulIY#J~`0$RI>KP>A979E!wFwZ`ayApV-arxT`#D-R<(F@@v;G zI3vm+)sjE^gMt?WgGSef_0P{PY)ltRxnR1vXOf<;tnCDDCI3ZP_On+nT9x)63K#&sgCw_4TKm7qTKg_ojPGM{lZ-D%q-26286vmHP??R)+x232Rlv8646r z@9bZeaA<#^-^Xb!j4AH#W~9|#d>$U3<-J;9f~ZTVZ_CsNA(xbn8n@|M+N;anNFu#r|}1$SC%-p>`C?cr-}!%t2tkn@yrdjtDWU-?^O0{T5z&2r$N$n z;dR?8r@p>j)=)3Nu!~hyCiF~w`Od%LT%VYJ9G(69MTbeohoy{@?g%}dl&R{{t2z7D zpN6NeT!b7#SuW&zAJg>iTciCvzboC~TEa8S_?694C+4z#uV>I;(dyJmyu9hr>&mr9 ziULE|=iR*atnFL(@qMRbW2aRgU8>DEZ?FCokF(^x_5K|$`RtaFcUQh2{qnHz;8m~fQ9pAF(--N? z+de_Qll%30|M!#6SWn2UTz8__;ddaX^ILZ=#_IH!f0ul|^!vrRkD+z8+Qzx3&HdMY zI@R%W>&N>-d(KM-&HZfc{a5*5e9nBi_xm4wi+y{1#pBO%zDr+73eC&UeP8zVOksQZ zF2cKmCPPQL8-pOZnr?c?8~_xatmT60!RSod%0k@A1PZNJ~TcbL`wT>bao>{h#* zenKlE%^!tg{&H^*hgWGY-?Q~rZ?A8_&Oh%CJ%8Dv1gc+r)I$tD{i(4wn<@Ys6#Tlj z#Xex#?(gnNdzl$DST?N?b7o|?5PDAP*8Xtr{|pQaHxa6J7kp1-3+$K&bn-*-Ep!9a_C9i+&ObiQFzqxbK zN1Z{WA*q6~=J$*6JMy=)U%ugb6yN`6^Y6*4wSR2YR24lkal(&Vk>7>50vt~yb*8(2 z{CQfk`_93*H(GI>Tdtga|8?p7%4nyf8-MLn)nH`caxmP(aAc2icOX;9gb)8ee|yPl z%XaO5M#!_0+b!&YOd%Z~{!cf4w?MgyLGyBn{6dx$JfM|w-|ZI4YA`I=bYp*cfAs$s z%6V`3!!JlOur82@WB7g7q5SN`d_lf|V{fwMAFQ+AwBx~{{jrlH{$JK++~${esCTE& z#81z*Zl0$6XL^TF!|xR}fgV>@Z~QOtP@a_`M8TK;C@W|kS5+zlLy7*;PfP+_>?{61 ziTnO~@$<_cQ&oQaU+~sD@7cuAcdg$zS?%W;{ZIaTUT@Eyx^H{yK6En#+a2mknzD4y z&x9YUB_5r3-9wTz<=K{rcgyMY5k$wx5o@mA?6R%vJ6@ulc!uI8~3_eyhA^!~UNyS{6n8X*QW- zI_FY-Wl(L(e(R6Ty?H10>8|_#FwFeHgV*3?h|Aa~R_|o-;hZKv$Kq^to*7G!Lf|Gj46-@E4j(?sf-7^bv1Es%1J z>JNSv_HU6U!>^*14oeq2sds$Bsj!qGj)CEB|C)8P@7cbHyIs8_J?>LpLSW39g|FX4 z{cfTkJR&wmo}-*yFu#g9|R0}ns67+97#LWcnr`f9{QH9A#EFJZ@nwIeDLpu=dVSaan$KR_e^2NCZNs=z>Adi% ze~h5TxeakA&dHtyCCrqV=Lh!fJ^87D;lf`N0WO9}37e1ySr$c_2*TuRK4c{(kRMZFU*?+sC{(>S_;h#QbLj zZB};3cUkxUG&57}*&R{kf7g~TU1MDNs>J;5smPP}Z^u_oT*bh^`sCP*hBBv8QCY@` zCl<}`bqrotki4&MzxD5l)9->e&vZ(q4ga5HSKpDh60vX=G53WgIhOdrmD&wm-f>maK3Y*LC4B*Da~e26zH zE?d4$)_C1HZDxiA1t+4cORq3kF$IY-EHC}S@WMF3}rahncMo-!=>UYWEq&?V! z3=9FcVycgI-mBskP?^C|@S?|ZUD?USnqCZ{j2W?;kJN`-a2A511ym|8I5P3-w`=<) zTZI^cR`_xJ7W(CW<^G$!Om5~&)C&GS=)9VDXtn>30+6G?Tj5g|u}t7LpUQZ^fWP74 z{Q1xOq)pG&hhN&iLFmEq^1qvJZ^~qrWn*9f?~y*my0hT4FoP;X$E!x?SWrI;w9UsT zo{O=Sg(YySxN0qEeKlzPdV>(dDS@5;yaff)&RzQd^z>FK_HS=Xx1XQ;_S^fVMW797 zT%bMWoD8W)A83YGO#g4NO3Lv2o5^>Y?Qc(5#FBCM;-kD@k9e#=X9j@GT)@&$q_j$8 z0q6fXM}v|XyOUEFOPf5I(SG1tcwEVMos0#b{oA6tXlO9cKkdo{RfUte-2y7qn$Xt{EU40Ij6hPmp2x%JVHkMX{7 zPygZ+BKPUlt%Z`;cfPvC7G8h2X-eg3@iR+}7~rbiWbR+DW}0KV#eVKZ?+MqBUq5eo zGS;Lj!SA*Hj_TIgm2XQmKTi=Uc;1(0B@9js3=C6*nSXsNy#Hr*pq-eE*6hi@!mC@) zKN0--_xk7QYhOgjZD{<|08W4m3=4Dub#~l2w9shc^mZpZkzW(_SMe-Z((v#UL-(_D z%aXyFih;qwQfd2#qm$jMy$wV7yuPXH^H)}vOw&*W4OxpVu~|^WW3#{B;%YVildzhAZTrjGX^qFUMcx`P)>vuK>&<=;GF5rCx zUZPXELjL>+&VOHSpMUeUT~7A$U@sBPtY`B~@BB}1ebzT)!o6LKZ$4)fSjF=o_Pe?H z?;6IJ8)BFor?@*TWcj2sq3Ga@vZp!W_Dj!cvxoEvFfc3-I>5}pz~CjfhtW$s@^9!t z$F5mP9RH7fGJS0|h5gL&r1f97>}#p76k%zt4;DMqbN2SXQ(^y7U|#b%BmLm@^Eoa{ z8-yNin#}$)qCT`h!%XPH``no83vCuW`co|WPt;6ddwt~Z#TFa*zt=-GUP$(lGd#dO zS4-c0(t&x7^CYxx{&;HarxUTJUav5OUOJUy;SY#Tzh{@o(6^hPl?Wc7nnqN0C#3fnkd7 zCcE>tzg51s-dgxhT(|n}vhREsHCY`L#a}(1SMc>bE9Xlut`$P{ZJU019|(8*ub}or z`g%?7Lh&EZGi%s5EM&R7Zq8F}CXQC=p7|ChtGPhyr*z{#%kNWBWMFXE8CY`v zyH~&|Pyjb{IWJ^9CAcB$RYd$Ezm2aaNNRddx%&T}X7Zh0VYeNd+hyaYJy-0m zKfB}Bll+~Zy*tjgU;VE8(US4WEB*e4+50M1`s--8I%!*i&Uoli1Z|iJF%egdxpCj- z!^YcR-Z|RW-f%OSrI@{cS<$91>4&DX#|Zo1Qup4g$@qk`z+GvXy|mVUbB%5CtxOq{ z$`8Ed-Mm-5L4@Ix(E=+528Johpe;KN>m4Frhi>zWV~n?dt`cv%V0#DKiEPuT*t{g3HT7tQ(T#k{bF;gqJbYtuJfedg}};qMqKL5HPGVTPEO z{HcSnZkF`#eNX>7=e>@%FNsbHR;^~!Sa|wre4YHW)_=OQ=am|-->Y}D+o5T{7XOv|8yaSFF)*BBTE*bt>L|*< z@Pdm=xFYv`;(DG7>6VKx)utbBspI_9^*lVTPi07)%Bu22+cBYH z-?!zpyi!JI_7@l3)n{OEXjl}=2+q6;jfYrFV(U&N{QvD=ZC6x(tNO-Uiyxi03m+tZ zzkWDZ^>$h0kEgfv7uLtTk*R8)YA>#`MLVzl@#OyB_fK&z_`{Gr@7}9pJ*!Xes*TxT zzbqh3>40l-`fruS&(m!m6fRHh(4j$olDYbzvf?LS-|`L%kr zbba{52EE3;bzZSvsSQuLrL0uAecE;Y3H=KdGnhLy{vN|B5reu%OV)IO>Qhi1&5+s^ z`SAQQZLcU!0UiDo8yad(F`s(0zvk>NzC(MbFoTM2VOFVAk_$F4g3hZ6@JjFkE!vN3 zWclH$m>A#rC*+j=^xVZxY-fxe4(%^uwYh)m-L&;T`)|}q?+pS~j;ss}B~rX5#gc27 z7#!U7W?J4>f6u)rsPX*T`0FeG-nA>=rZ2rhs4=^K$02Q&5}sBukDUwdgX$&*h6TYV zPAy>xVPLpmx$fsJC;bh}wHT#7iCtT20%{sOyTA8PyWk0L2d`UmH>Piy!sG-ywl^(U;|$eaR+Te@D#Cg}MKK@kGtulE2ki za@p4SwZ%^(5C8dh=rzm!+xZL(C-^zvOkVEi|L@yW#%Jfgoqrf}^Pg8tVge`QRCxza zrmr&F)AyX>U;@{r8iFZ-({C?Z1}YtH_%39;bFO7+?CB>;THDWohT7uYPv`x5wDjb` zMQlyd^JAWGpIqws`l&;(wQps+WVu>(p0`ah^n7qGb+)5(+pcA2&z^kZC-vv--kM4BcTRex z7=90ry*fbOaRHHK9fJIyQlafG?)M;j)_iTIFeSSkH~AHP=I4Irc+^p1=l5*;PfRa-tQi;8*O_-MHZU;n#)_pm1^fh|7I=Ut|q#N z>;9ze2Ub~&Ka)vgU|_fqE5O9?ASUH+`q|Aq7rQRXi%37vEDO$?>%}%BJ5#+?=tQ!@ z{4Ch`l;=cZZ8wzI3o*JSQ^SMW%`{Yncb^{3-qG-j<)d^Pgm1n2>1qdUGh( z3eO#7S@&N~*mPhO*NY#QlYUPqJ#oh=U2U5C*6qzq53;%UMz3V4->O=p$G~u*RQ15# z>_>TjrtySzg;z2?5!x>Q)rS8W3&%93E7k`m@oNc!njMNOwZ1XV&(Ft?T`5;JoWE=8>T0T3jz#vPfsflU|`Ui zq-r=b1-8GJ!GUGdllHZrR=?Y0shaL%_K1^F|7%=)uYbNT*Z)(Z3J>Gs(zzrj6e-M) z0ktzV7#OTf56qrd@|c~0fn$-tx`Nb(&~EW(F_I2mER{1^o!PgnEjr23%A6s)`QP)X zL^cM71;zqQ45F^-pi`n4HYtk#>-D#r_eXd7-vpidbu7httZ?5>b6MyZ(FEl*-rgwJOg~F`V*U z+{Li^eY8_L6NAIWm)@Xp6NV>!YkshQd-qY5vG*zWvt5RdU)8w%&tcI9Z5kF4wgDN( z@af5{^}25YHjF3w(AcFETptwmAKHtRzSws2*c*=v=&{!BN%Wb)Q(g z9H?yv5oKL?is2Ir$8`2R>gPYfI#LV_UNWI<6Vx39LfI59!#%aaLG{3*so6o#*6z#C zJol6l+M!}#n8K;W9|7*~Y4Llcg8HlAKnh3%6?E;N8oao66uTSGWriMW$Dq~u^K@8= z4*v~@N(XRG0v$#cxWcGm64SMw)xPI~QyW~1(=S`XeROFG!}oQO(}Uy+PA^N>1bGz_ zmRh|PsSU@bRBG|>_|~rdLI!S_XN*yCS^KAk)$jJ)wB24Oi)1(FRCbl~Sk2GV^k=-@mUGMDIkd~n(BSpxSM>A}4gQMi!#SsznIPN585+DE_jR0N z0Cng=-D^-2n}K14$rKTT6)XX%3{%-z;6dTI!e)UD)dT-TH zZe*YLvX-y+(BxlniUE66V>;hyDkHcLk#O~cvng)XaM8~_~g16P!$U5 zH1H=MyZZA}1N*nPg_D<`Ti^_}K18nhdv2j~Qt6Okv6(1`!4>mz5P4; zHa<-yE&dykecgBMKmJ~EwchFcqUsj~Z~k4Z-C}R{RZXWeHGJLkoSd6m4qok9{khBd zp68y)44}L^#e$h{-@B}+>$`-@^Gj-P^IcF>bqT1rvLO5ViPbULzI(G@ThBPBX}GHY zO}TRU+H&PG+lOp&xBS|tXt=Lp-^t(R|M`1RegNB=UB+_zl=`PR6jUBm~ws*e8$ylS} zvQHAX<8RJdld$k>>ora}@8dUj)WsUzIog+e+dcBz+Z#^DxBk6hXeDi?oLZ85T5C_* zDHcsD_GAC_ZGT6-dRCpva0F3DF*11ds|z*nT)i&$#m8-PcKo(1|22QQ@`fpfoA%|* z{-7{zx|~bc|IKmHEeC>*olssd<4||Zr~F?@F%HwB_pv-@HcX5bt17-6zq4Xtc2&mG z^=^uZwWpV*?`3oo)$*y>zhl{{tM@l3XqI%pUBBl=T3q#9{q&VTH=Sv({E&WLg|%ew zGrt8FbgnzEo(XhG0KhsBz2(&n6NisX6te@=M* zp}YH?Ca60YPGKlsc4Y0hDt?a@)0Wo0dph~fOkwq(uY}d6tp3%a+3lNYko)Y-k^()} z299;*)rWJoe>{0S`Or^k&(*cNzV~TzG~I6RUt691E_L!B+oju9$`qi!?qGt zmWFbn_e;Ej83Nb@7#2)deBHY&$VG1&&lW9q&iO)4)yzT>hpxE^2c%5B@@Xz-`I97t z)AonzpDuX*=mAr8kgYw z%0TBfTsR$YCuL>Pwe0Pm*+U;RcpJ{xYpCA9QZHB%sK>S`;?cMGPOpaQJKoRNY0mh1 z=^n4xa{1Ofe^Y(=7w3wIEvTKrFfY^o=7fNF<8@{)6%#xv1@=r|5)#nI$SwL}olCt_ zoXK+KKjAV}X0Ie~&Mszp0IK#L&8S{#m%4NRr!|T9mIl6?lGwdyjRNm<-%qD=Wj9Y_ z`JR0LZTXLiZTvf%qw*S(ICQ53uJW1Evd&y7(e>KJB(DEXCnXksy72kqk+>-$p<;F1 zvp0G&{XY{hZ`LnqF}3UO&sF<&N}O~Kp84flx(XKqLxYg}#>1O-o!A@5)EZ+b%B*=X zj%lCbbk9h~l+r++C!k{)L|M8PPl zhYmS-DxYp^spotv#h{rvX_M0I*0`oUBFs_>nX;NpVt4i)(w`i??5KC2=M>*f2SYZr zOnPKL^@!*1?MJ+v^rp3@Jy>}C=VWe|&(-of_3!Rp>weo{LCfQdOTSEFP+mGyB!1UF z(ewG=r?v11KM+6qj(=_X=S!U4H>c#bGF|_FYMFJbn0HL3kN1u@>-j5dU)ohD1$^@N z)NS7QZ(DE6H^aFB3@zWMzO*>Fjf3<5r%jnPRoQ!l9N&~*`M2m<=&!wBe!TzpYX08| z@8({qJ}x6Vk=cRY`Rze8BQE|_|J4>X3zTdd+g1o3MX!N=dCh%a7n?F$vfod zz8c-WTk!^smuBQ`diI1V%=-nW3WMjzuMf^kO}@U5^G({>5KF_#`z*;nx9+L)nk!zj z=uw~P`Igo1dc4oMnd+UMYFqIB*#GONK14>^*uJ0Ya(+_eE2-<&DOcy3t3Uo%dui_t zYxh(7-eMZ>jVo7r8cx%{yl&3h@B00-AN)11zsXfE6RKd~x>fbomTew_3o5hc_3eMY zWKQrUUrk1zN?H43I`#8k&DGRruNC`z_ge6pUq`Li%?M<8p|WcIpMxJV->uzveRu!we$AiX+;}5Q{1utn*2qY`IFYoT zF|u5S`IJ2GGtk!c5*HSsPwc#suRdpMd~szu`JmfeR+_Aak{Sirz}e^&lNHmC1*Ua!yb)sUT5 zrEj+K*;O{#^w2l^LL{R8)U7TB4JU2CxZ97b>F368x2nD8|5{tVKtz03dh0V!%>{p7 z#C=y~KOXw6{`Z^H)pskG^Oo5E`&#|(U-R;viOj{qKSA4*@BVxmT;0mFz&v;w7lW#B zkXxcqF@wW&xrUAD9gj3k->zBtJ@VA)opC#lN^w_mM2YuR+ROazVEh}OZAV$&o&NV#^}e2Lw9Tj2<$s@-&X<0x zYZCSS)p;I&x#(sy-UFLtqkqr+-#Yp3UHh98@5IgDY*!q#$@g^Kr)Bo$ultw(e;Z@} zUz-2FYURFzxm)&K>y28Ldu7}F|9RWvkADk&=aDgCYe8zzg&D6~Mef>9I~pU$H0w;# zEZ+_$4fZ_>GL5b6sk_oew3cbkuk}{wpEW-(zAWD@QL!*_(VmA}=9fH=T6yAc{Clpw z@n2%!&wfxHe7KJc6Ex_)lgby=y3G|z2;CwrU3#3CJ{yHmQS9^AkDf`cowFy|Sy9b89udly_?aIEF?>Hl9-xBqQ_uFmdm)^X9{`JdU_ zs~T&si9fpC^ZyoiT*0&pa`&dQ0x4-?qvcJ}<(l1IZgyF`6Gq)0J7yeW`5FLHBZ^0v{&sX(dDbJle zEmOY#?!T3D)_Oi|dZ&F^tUJz=W!OF^@5&Fe%bNZHhqC)4IxfnF3 z9_~u>tvdI5?yBUke7a+kCzA%a-gf%I z-q5tLK*=_}%F1|+&cr~;!)MoCj3`aNb0~i&W79$gr$as#lXp&tUiR(s%q1R6J9I7` zWbEJP9dK3Q*o6nKO&eNL79Q+hyZ+~$YX8^|ngAj7bml74Iflq|Uv#Z_$OSPi7q3&a%}V+?H8=mVcY?+qEXF4T|0i z?{qJ95q@@0U%S3MO`M%ULqzD3>YUj@*Y})$^LNE}?|{bK_X`cGuD9IxuX2B}H|4$L zUhl7QtFDwYaWx1D{PX<4&d}0sNdkTA~tbp!?v@}Z)}@8ZRUY> zjqw*=35T?0d{Eau*HQl`*GR?jcy~}tNR(J6_tc4tbbnY&oX)?W&GdbmR-VSqnG37E zz9()FQD<=QPkJw_`fqY;{N#JSRsAgYGk0>li86onSL6==l$R0PWuz)Wt;avi4uMk{ z()X6_u6}vP^zJO-19v_y-t?6{@LMwH|HtZnUNhJK@Kkl3)V*n%cH~S|OQEjtyjy?T zzKaA*TKY8mn(E}4tdk8hrKA04us8Nex87Nh?PX%QfK6HI#fiD`d402V9v9va((m4P z)bLpsZ=H%ky0n4khH24DSe{va|96pp$C;x>!dc-J4C<@B?T+(lxs>G8rg=R|VRqj7 zarNqL)vk@d)Yfd-ux$!g*k|k5m;${Q_c+x~Y&r9w>;3el@y91@SbyeW+4@)ag33x# zwZkh8%xDqsb;{%5ZYXk*OY+)qeQGtQSAT)TjJ@JZRIcwnd9N{WURL56VLh*d6W;F@ z@#>!Rok3zxes}-rx0(Aku03n}>D$Eyvk?k|j1uN{}Uz_}vr;5Mjb`aMIE&cb?90k9-+IehL zTN)PdP*u@G^-RfH?;F>@Z%A13=0*0rTaTtP?em%WzgFk+4w1}T`4Kax`)y=f+PkNs z>3h_Va?8a9VwD}Pw(l42xoB)HaW3t>%3k@Y%hg`}m8e@kW5W9HOYiv}Or7R1U5gRC zf4-+SyVP^b!>UPjVjc$l+F|Bv-v!}H%AChHtmpBc{b4xbyz3^}$CfrYpF1g}%BmEi(SOa=EYcfmEw)+qie9bLHwUb-J)?vE!V% zi~E#U?`LcIwQfnBlN=+L@rLBcP}c13bF0y|AR(x~5(^;pj!vR8RBn&d~iAYLgb4?poK}X?mIK&e&kx@JLc zFaF=P+q>m!9?x*EMxhUl~=Z|za!>UdO-g6oii_VnYAXzXdM#{wp%%?Ma1W3%eyJ% z`ET}}P;F6rYw@6$*-pldYh8knF{kye_nRu2zs~+Ud&;J?hxctyzSpaxF z&T=QQM9vB!6IC9sjw#}eGfY=$?VQ`9A2~zyMhok$jPYU57JQ9AesP~%z5cr7 ziBn?^O*-d3?fr%aOaA` z%fBt(c4h8^JHCdF7B5s4p0?i7x^mvp(IPf_&H5IdYv=hN)?AeR>r{_Qr( zSNfa;?WRrPJN#>z_>RN>SO0Q;%NNIdurezC`J8vRc)mUVbL&d#u5au5zAjwH`1bif z|E2%Lf4vuDU&p(4vDrNH9a@$4E&mt$eDB(N`=9>pu>GYvxr_fhy)}61bH%Xv`hs%{ zkA9E-Q=OahJ4w^u?@vPVJuU{$e_mo~%({y=Uz=94buy!tXyv&Vc3j6^b+3#1zwk$d zUeAh4GaVCSryWfw<*U5hBFd0a->8UC3WLj=+o_|vPt!Mw!-^Vt;(|FH( z{@T%t5-lt~8{)5}ExB>%qp8Q{vN=i|4-c@rT$MQLHbGxnMP|j%Mcv8ySCyJ{mMCo7 z=5gWa>70&<1y@5@k0d-?>69aSghNovY~Dl->B)wNrZw%I&D@4VCR!%M%sn)9x1*YB5gdjDS+hfY#%{B<*Or`);2?f;j5e13M_Qqe2-r^R^W z?Bg$O6jBzgTYdgPNi0LIoBD5&`TS0A#O*B>FMfV%v&KH=kI@k?1p5x3ZYVci*ZM~! zaJ~(oRsD%J*Z+cEz0M}*oR#Gk2eeBhsB%T!@!f8%4)zAq7LDh}SaU@K2; zsZvN$i_L8Q`{UflH;3OJRWS7S7+$YQT(&oM_4ki>K1c@ z&z)daCiSp)8?=^%J-wpU?eHgE^kG|VcT);S9mg3(1!b1{kIlxfuYFIw*%RZzo5(4!$9&-QVw&}~8*1~@BR%_@{*D3Z_6&c?C zQjGcg{myP(@qdSXck&qZfB#%mEzBAx@XXQU*QxuK)iHVJnpM_JiTo4%?A+}6vK2Y` z|0K&IrY@GBqPX&^eNy*jmeePUB0kIOt$BXBBStRbMKC`DODW4Q`8%6;?2|vWx#TMU z6pkZm$q)ILI%;>@9WVYD{_pei&&e8-m%0Qxl+4oEBCeRzX1FpmT4_pK?UMRilTKXF zU|2Tk&eT2AgcM`8^gk23=JxRas`vY+uZ^_WoayuL+^h%UOji%xd1NUi9(P9JeBJ4D z=dHxfJhI`wSD$u*p-Iz8>Zx+b(%Xd}-A?c+b{Prm*}1PcJ)HG(M`ZtnRwGqUj`Wwd z0r9S^Zdnig-ZPq)wA>7dc>4d(F|#!{=EitWY~5*oWaj?lwa0$-itf@;lt2IR7=N!_ zyv~I;^N$@a-{hb3`_sYio6PHqfAt*cm}_%A?ESN8hp$TfShXnm%4cTnYK>4Q&BN8w z4}}+VEik@+J?vD+BjZOuHhe0-6t*wILjL)~9Uqi9cAN|Fimj_#?Cu?S@k+M9^z%gt zS5<%Bd*N{4=KUfkGo3Gwx|lX5xo`2l_j;w5S!9X-yfwcUuRS{BLZ;HylWb2V57|64 zN)Q8bgCrds{@tXe}Zx#Ps|NU>0OQVeYrBiK%Yn7JN z|6C`&DL+m8`qa$3Dw0cl`Z(V=Ta>GCzERKc)_Hwu#uYC?*$w@yRL*BJ9G+BuKc7m@OgPzXbZ(r?me(O}GhTqS3KI-aMYD+(E-O%n?SjsiU zdAojAeE8Zaf}d|+Dc!v-Xqk;xT>8cC#rM^s?{fUVwA;mgV)dJptN%aet&6otPIxDH z)PO7Om)FQ(c2>t z>U$6StWNmSdhF0imY4DCX3gELFFw6?apIr1>1odvy$;x9$mf(B)DPkrTX7g7sj0eNtH7)n%8J z>gK*)aOQNj9P|A@K392X?_qqxxM0r<6RXnGX`d>$sPxUd@3isKo2Xr_p6_n88t?WJ zyWV+qeUAJgZGl zhsNR`J%^mSf>uu6R@CN{SQgqCvgcS`D1*T1`rF^`-kg`-pCY~O{i(Z}OYJ*F{buaB zu>JAgOFBEV^_}%5r0oon()s^+Rb1HQttGkU8=lBTo+!7iGl;G`+`F%bna#UTRQKh} zpcKU=<&)T(Ya^%Wv_77t>)56`zj$L!q_M!4oqRF>FZ=Ev^JeUdZf zB6xF*!dPZ|L@u9E-m^1hb4<$FQ$NCtuGxe~Cw3dInEqhjxu5*}{IHv(kH6 zj%veS)7Lc|-4mbg*I2OO@5=Dic~9+kY`Lg)_W%EFN=rgn|E>Dad7R;zKg%tFC6`Q} zcCThQQg%v@xpDW*g6X?Tm6jCQX9n4vdTzc>|Lm{068^QjX7tOoACR2<#AIFd<3;aU z%_ludyz0O9`NZai+_&pgTRdOoMqZn-Lob$9VP3pO>Zy)h{;wxwc=t5V-7;0=(f{sBPP^ay@2}~seH<+3>uP_+OR(X!f_xD}LTl!;XZt@rE4{f( z_gKugTPYp?W^(ep*XIhlSA6Sr$?oIj-tXpq+xIDVV;9%=s8F?YXGE9mWm+AxAYk{) z*Rwy_F#ML=vH#`1-~Wzht6hG>+_6|+_?+Md!E=F34`dzw*l+wNc)UMAUhl^mYq^I^ zZ@f3{+wekphw`2K|J21z_p+_|DH9x1+hD}$tMH}4=tD?{0ZW~KICID6<;TBmYHASB zwo|#Bb9GvbU-60C&-GW|JFs)nJMp{ytP?L-%6d-}zNR;4*N2^D2KT-;EsAD6AW`vq zjcah(x~w^+rqZ^DWG8F3yu4mt{OokIs@Pf~{pwj6`~njMCasRVwEAgK+?vFHbL00- zKfbtKL3lMoifOh+lh`_wvuplGw9e*?;z-Lm-c-8fSzeq^Rr}HJE5qII{%_X*|NYh1 zL|HD~gg1w64HHx3x3w>7DhjA5dHl`!_=oLVH!Z4R$*D_@mUF-VDC!l{2kym^|CJr@ zOgns)kGIuPMO8&@zlVQ++c&VTlPDC_j|@~fT4 zOk=0VXFvWe{+{Fgko*_O)AH1Ae)ihE7U6sXzx$mYeBA%1MDI-f^EctetPD?GoE-m& z#eUc9UecKP^JDcKJ#)^j?-wzqr8vd=7XbyTDVSVY} zzJGbD^Jahf`Epg*QN!hTzkNGYy*NRp>-`S%V=Es0b?IB3*0yefci**Xm2MBu*!;EL zFJm@KXui_U*7)iRI9X)2VZZp zd@nP9QZ{#X{u;x1QE|oPufO;1s&8Lv)w1PE7O$X^fH&i^?0d_9yIamFPV}ufyefK` zQ9S{wbf>5{SzydEuA#& z^2x0iDmU2LsB7q3aenKq%Dwh;>GN(Y?cLM-zGptOI{M&%^@UeEm%aBXIdaE%A^*p7 zh4m_bl%LJ7)=~TF{d%)Rcj%hxbIua(n=f>nH4WczLi6LV-Rbv=&2yK&*(o|d`{L<^ zH}fC96ns9Hi%;+7u-?VzCQni;D*xc+vgT+een5z#my=6CwH>ER}q%} zC4296#8c%pQd{OnoK>Ax+1u&0=9lAqsamVm^IDh`bd_!5I)r^iS$6mr`EA>1?{z_= z|I=Zo=1bj0Gdfy}KAz7OKKVTBt=^u~N!2mQU%0n8{(8UZ$b!jd&)e4of9pQAJa|LW zr8YjBZytZmh05O6{P2|a=C=Ft`S&LNa66+NHShW7y~~rSes(6*jAOT>r^4KR&+pfZ z3|;@r6mMRno_y=L`17vHyjA`ERrZf~B_DG?ow&i;_M3;U%e0?CyWhUOQL|&0lGtOJ z#FJ-?BX8!^`EEPlu_NRE%$=K)WRmW5TPDqNR(scN)M)hKPmk!{q&Z#5U5}*-YFhja zmTmICQe1Pqz_n5$RN3d-!dW(lgD&?x?rGFuy|>~dW9GZ9kGSIQ_IU;$Sa#Yi&BwFs z+`MpxKi{tbrG-5<8O!^P=ZfZaEtug z_i5MEn@4ASx!u42c3{%WG_HmZY<_p2U%sETCu5q!hP&VVo|QLzI6Ffmj^(A%>6IJg zAM8DODn>*1(nock=8Zljh1HuYQww9de{-=qMA|Oa=nTEF!g$KPg#sS01y$WNjs~(U znRmhU@e>1WhAO+u7x%{$_#W5W5yU5TTD$oB>s4-k5962EzCSl>Hb39v#)#BA|2)sQ zoKaoPwm@XUKc%^Ajy_>rDBaK4^4;&(6_bZ*ljm5lt)Krq>txrX!};!$3Ytzwr=-Wl zaGyTZB+QvKeXf0kd0--;JWnsS;Q0>jl&<+ z4t$c6;}u+Xi?8LQ$^CGS|AFVH%CBuFyLrj2hnlq~_gvic_;UWvTD8ghBG?4D z@2J=9QJY=+YKeai&yigp_htxAy4T$IyIcN)t>vzQw=-h?pH|QmW;%1NbahYSJLgND z&byS{Oy{J!T~D2J^I6(8b?t*S;xBU*!tO25-B_X!dEPZ8C->#LyVak|T8z$q%y{$I zF}%2I)`weqo^oNIPHg#~ZeOGSJ4!L#XZPE=1(mwfg+=ZJKs}p+@Ct# zZkOVByIDP+*N@JRdgFcVxXq{47ex&p^frBMjX0WjLO5gJ?Tn=x_UG{(>Pj?Gdd#jm zt82@y&PMaW9%1&{DkY=DJ;v!vl>XeGw%0Ph zbE+cmgc^==R~=IyAEj%Bh4V_*OZD&EV0QC>=;Sr0d`za5PDr1g^QVI0i8gD_ zjpG)%C@9{Kc>OaYH+01-qn5dv$^m@_amBhVIstB`6Bov@L|$WG&0z9)jxhUYo$Xpo zE-8DO>`(qvyZlOj?#0!s8C=93Cb2zSkaO}>p67HuS^q`XHk5ry+H_s+%nq9gJGc%! zFuoSbkdkz4NkmnZPHpa&uWS3dug1=L`YvmGtx|_7|M68GdeK6Aq8H6OUOYe0t&sDB zob2RfIid~wO!*U*Dyux$e$R5+-2bl{V-yXRfBL`qzFkhgs**12zw&1>Gv==NqPTwU zc9ZWqLYc0+IpgeBN~9Mp%6j(b9Ke>|Q{U9%xa9p`=98Cd{~UV#xxb_1i~f^}!mZ+hf)iIh*S24yw_W^U-Wn!@6LX{< z?I~j8=<4wBZ!mY$Z!KM#EqwaN!QXCb^M#)oN@m=%%APpYptbheobo>YvwtUfw_RxZ zxa-|_-)N~M{tGkB@&yH7vT~dhn7_WKf4er*B5gI^0&A=PMcRp76{RPB={Ru*)bKZd z@Rzqd<2gT+p`*jY|HCQP)1onKKW@&ORof?O$Qp3I&6RK61*hmqD-V`E*&NEKprmAU zvZ*`r5qofk<*tOaqJ~o^-<~+NErw^s1*dn~Go@C%o^P$jEAyrI%S0^(7Z;bRl&00K z*&ABw!`@u{WxqAxzWvV|u~k=i1io}@c`TQEb;mGs!!ea##}efyhcKKtQ22aT=*`*r zd-7a6zG&M2eUW=}qvG{*{F6Ss51M_M`K#aG)hrS`59ikfF3a}`;(Rb;36sMTcgGL* z8}jqFy>Gl5*5)Dq`8@aaRQsPd!dj=>wfcOq|FbvNZ%dT}XF6DKdt{?92$}2T1y4dt)VS0arqEhX- zHRbouxb?k1saNsz;=-uDmGfEalhW>&XzucSeB-qHCe!#`O1rbK-+S}OU;o{Y9mRe! zXWaU1XUzS7i&1;yaYeVEQ@Pijd_0AB{>!t5o!d-)U-8sl!p~Cg#Qi(P=T^zh6Ft@; z3>_sWOXo+=kgflj@$diI-TUUo?yG5?s$`@v=j*lZo0XyuQ{!AG%=~=LP?X7*A$!t( zFpa}cJjf~wh#3`em?*79@HUSw=*#`JoQi5(zh4dR=s^u@h^N?{DbAP z3mqOWW?E}+@&4-aqNBHN=F6Wut}e=Sng4D3w=Djs$oyEm4G0*bHnaNW-FN(jq zKRsrb&h4KcCWrNObnvh|*>UsxfxlnhSMBcX;Nb~g#$dqSz{B(X?%tIP1a8dNW_59K zVN2}sZvDUW^RwrP3`$B$S_~;K94@#&$X_lZASn3KeBQ0?+l}U#O0`|fyLl~?k3mpS zP?W*Qz(n}VS0(|$iFMD8%FCYGwl4bWhuf!SUoVyYt*@lCh{=JC$>{&|$p6i6x3Awm z$;D;Kdxe+x*RBpvQTN{y_&@jM)$3`mx8-aua&d8CZQyzSWX0Us_BSWV|B+VrR#LhY zpRKx10L(&1#&)klNVM;bHG^?EjVBy-Vu1 zhqnALel|b*X-7u~7lWi-N4JieYZlwP$wwu;1O+F?wOrU{^4R{)*6Q`1{~=bYM|^&- z!|~;a>#_%0C#6mvY&C5}C+U{S=+ov8~^injeucKoH!-R(^$B%uj z%&~W}6JBh4{-4hFz9ljHYFxiv*zCRu6!E(IA0E3?TXg*G>(3{rc64~GFMRV<`Q|Kr zQ?6e<@z?Js8AS^U&Xh=tX1$cZVa@$JmCw_5%h}(UXsy!4V3(g)XKVf8#=Sp_rI&Ve zd=aWV_CK^&@_%5_{j^K=5TEnpO-L?mRVkeGTCKaoLtkiKE@ND_wS3*BE$d@?bEZ;w$+`WfD;rHoam@HZL{y>e;en#-KBebjSSd&B_$=J*bPj)^E;CczY^v3 z{D0!d{)0XLK}E|Bu7-||4iD~YH7~omKJ2&u|Nn$5_om%tz4?|m-)>*Oemc9#-{hV4 zlmB&goJd*B4puBISDR2Sw|al#?{#mMt*=+M{(0>6PN#y=s4j z1t)4c@`F95rts(fm#@BGkJNatkDGY>U4HC#tNYh{&(xts)s~J9AI>VUzo#@T{r}^% z`=*_p*>=-@&E>bBss85hr1N$vU$T$SHa*`s<*E6d8L{=JtF)tFLFFSJ@%pDs#hZya z@dxkDkEp%#jgwPhTK)OGx2=T!UGDj>@;7?-O^~k_F*&%nxTHjJdfPWyIoaj?cje%{ z@$*CZ=lyGMY%HvGK2|^JU*I-qN&|%szb0eB_xs-u{9XF)-`hvu<^Rmi{kc(->pwWB zfQ*u|{Pt<$olE@->hIs3U6i>0#k>8#RvoR(a8S3|nxDTd`Ej^(U9u^o@xN|ZG-Wa^ ztou5(Tifa1>^c%SF7rOeiFUeDsDS-?~gwVPu*|zJ$Psk+J|BcRZ#XZ+3h?KB*?+z2Lv=dHdAExkC2mpW6O#-~Dr4`xC!* zwJoVv5xTQ0d%dVGs4Q^-**kf%dfa+#rlgbmo|@l@2Ll$;swsz`SJ6$g_(^0 z-QQNb9T8k7N*Yt=^S}F}EUtUv8+b_HUF*K!t!BT;-xg}zD1QH~?Y>#aDZ`iZVJUg0 zo4_~uJ=>+G`%kjh`M%?wyBxy-<%XqyK5%3#_qkE{`&Zk24n>9hhK>%8^$9`_ptNsw z;+uW>zn!Jt_Jw~rK1O_q{q=U{09A;rwxD`3Wko`! z{hD9J&rOd#oxHUE`QiI#of!gH1isgQTrQTrTh9K9QQLlvKa;<#pL)o_#U*9e1!hnx zVH48%yn21qoamn?pUKNq{`LO(AMrkrsfYE{o$m+!^6uCC15Xf|Y^VRt zOYeW8_tV;}&UE{=$D#}>e{b)4eY;fE;mEIYE=v{-3fV z+1oI7TvtDBFIICPsb47J<<;xYI{)9&Wc;9TV6uYmukBARr9zS&q#W{L{(g6_=ItoK z8-ia>ezKSPZ9hw#LBDZ#+55BG1*I-@t!-UW4~p<@?`~zg!aenpMX~-T@9LuOHAa7w zA8bFxeL<^H{vPATDuxLk|F7%%kPj^%K?Rg+!b|pWp*!eFqK3 zS&ow&oLGX)a_;9MsSXu5-@jDeabCs$2Zy@Fp6UI1nZHtq;XK2n?}|sZZ+f&i;zr;J zUWXkFo-D5L@{pMBJh6`=+dT`uOL+sL$ zdJW!NPq_r(MQ5YJjpT*LyY|2S=vDYgpm-q8PWo!o*xWLp@-To__rA~w);x}kgto$s0WBu!0S8u-37Ju&a z+jQ$xh7SxDfBr1Yd3jYkO6`O4OL?yrkwZKd(>ppm_9t2L2w$MOXmcq6%1Ug;I;%PCE6a?rF*-``|rmt0eQ~~vDe0d*K0j=HrE$SkqcwNSx6#4zC+Abxf!%R&zSqJT>MgQDC;s4X}q;H3h+D~IS zQYBx>2=dI&7Jin>WBn31llDI~U$aVmMN*K|)8c(5^QLg>?4P{VD*oE4P4d?_Zz<9XRE|44 zX3Sd?Ic-MHCjM)xYBLUe3t5nU=;Tl573tP`EC1AenEAIdXjOsx(N9O7Oui9%ZRInW zg#iij`;ObG=AL5LS7PR`eFSQv9gnx0_5J;q!(Y~~+$82G=ihLrD*oku>%a+a|K}Y0 zTW(d?(b3@}Fk8#$q-=1Zk~4#g)|;K>)$<%@xdhe-94Tb&oe|XXsb+tA{}U$5-)rvH z`0JfmuD7!0_^$r}x%O)FeS&7jB%STQA32LF+fHn?$eQRvK06-|`90;)xlX%Q>b)_x zeEOg3zmkzoOSJu|;3*3$7?yNJYv}#`o-w;%rOnIxuZ}IL*Qgi$=jxIY5Xh(y)X16o zYSQ`t8{Zw@bunU+z`e^67YcZ7E0an*HP`qZ&AAA&aNXTKYad1&V(yw8t~R%j@7qJE zzS$rAT~yT1zVP4g^*`#&8+q^l7whBxth}c8MwY_~y`#gVY0*?Wsq*@kAB%R|cQtXG z&pI}@wdcQ}SpuxwspPSfV~m))K}hiQd_TEp$2awcVx^s-T1uA$cf5MPvpm{1Q=5yy zX6uFz;nNxaILS?O)d*J?ka9S5V2X!~1~@sV9Iy;~lvb~E{@VTpdVe1CI=uONYWDT# zVoGX@UQP-;o3P0%^4OHFjvn71>vz49U)kyw8+c#r-M^gs8$YhT*?nIA)Zw*Jksn{~ z{wU+2_`X|vRY%7Og9JVXNv#gi)KAA-jGi9Yx0iSSQ}1tYrAsxRN<8wsEb+JObtX6_U7_{=2onNh@1vikL^V*%QI+S4?Zl#Euq zU{r8sjr@M{%Kpy}-!Fpm>Q09KK$?C$=6^sBVy8uE>HD$ zE9Xr2%+UFMT{mULGMm1+*6;89S)aFB$!Nue`xncnMScxzTjZl!&y~nnlb`nYyz#&F zS!aIuTU^ftwG{s_H=x32r_5-$ET$2G;k zF(LoiSC=@3Usoc8woZNh|C-Y!d#63ORJlY11tm?=e7l$4ZgxhsQDT=Ks+iOi*GCn)2^}y3HScYG6dZo+zYw4= zC@AO*YCkR6Zu#NHJ)4L3PYSPpe)_xV;#ptvOAqH~o8M1fF&*4(6BHDD$?m~a@#m@X z%~P|lo8NsNk!tPDx@<$zfv?-wY-fU1#2))EXjRv@a4eDh@A<2IHmJ?yB4q$>o_;a4 zHd}X|&;EbRUu)0*QLS7Zuxjb0@w?sC!OoZV3;a9#1JXqW^?h5Ge|vlO-R|Fg;NH=+ z*gr3if;xPVYE+U1(L<^{1Zp=1g2q#nm>uSQ$uCX5w0~)M{Oq;2H=R8t2&?Ox54ClD zsQ>eH@z?h$f9@w7tA|x_Mix2yYir9s%>4WHJ-kQMv4TNBP;esS!;9=?>DBM<-urMn zcK7#LU;M#+R8a4Y>*|vF)zWp*yMNBhIk_}E!V=s=2Xz?)6kgs3cP~%p?=al`Z`xNR zcdI`!{zQyT#DK#HzuHUSC8{DLX z_8wipvC4zgdo;_rpNixkjSx@<{E_?g?CVY0c@e3gt~bOxjtxud%fnj!L;4S?uzo(c z9e85F21q9%<5CMZ)YDH$n%It)4I_uiJS zyB#3-&llmTUoFk&ZD#7fyxx;_Y5!H;{cG>m_?P|sBpNmko}$!2@jmatdXD{Te_j&h z-4E)hzxfq?EjkkxluPa_tp4BE``WwpKe!=&Ezb zL{B$xr*>vY-`<)f532)z#8-aa^=6iISY#mFMe{A=0vML)>@11gme&nV;NZ~nxL^EX z=3n3Kdw(o;u8d>&*9G@w@GZ`7I*dD>-+O!W)#_;}*K6|<*YQ|fJCqE{pb)+Mlk|RH z?|ZB-fA*X02J73q@y=;S z5B^`aKL6CZl^p++;BnsJE`MX~16XccVtuaa_0czbuJb*qf4-+!$NKl%!(pdPKD}GB z^XBdJ_&Ixx(^aeu^F6=as`TC*y!m|QjrJvV9RDvx{}Fq;{8Hb!Ra+EqU49d+zIICH zleL@0QvX{$KlEm;S9R6qOVipor~Pdyjo#*a%MvtFVe=$o(bNgg=UB`*ySi3l^0Pg~ zccxd)O4P|RRqF(`pkFfA{hsQ+ng9PEJN59rW{oR2dVOjhHidtCS|7jGXGdRrMQvFQ5!Yc^~Uzn@#D%+ldMW$UX`M=O&89_au6 zVE#}3ezW=g3S0a0SLLm@*Uh_eZd-Kt+7;CTESWp!%>M8E$9`WwYfIfNL9Wc!`r}LX zy-Q!iB&jmbuI`qXfTXhF##gPXH~-z8`1WS&@$WwZepOsDU2hav_i(ax`=Kt&RSY{8 zAAHT|nE7>T_v^P=PZqV_47s&%#)rBkN?$(PEb+g6v9tN+#n$Z2^*bNT(zH}(`Z}+A zd-%P_`A@=%kG0B5@1K?b_-++v#S1x!E^?9lOllz>%S8i+nyK@3xwt4v7 z-`m4lDp@`pulrPGw!KK^X|P;NYh7M$sz4uaV(sB?cT115?0;MJD4eBq&mv7|PovZt_ad%sqPY|fE&%L>EW0vR5 zpC2Y)u9EfJ!j{rh_$uXFr~G$X*PgURlJ1Km7)7sZz&rcckJ(PK98Nefc ztN8sk06g`~n(`_huG9 zTyo4v{>FCm|GW1Xm@P?WW%_-oYeIa@a!a;{a{Y}Tl(gTqi|(6U*j3bJ$@Ia-<1p_B zz5~jPtVbR`pMSc{?$2G$+7d-v;T8u-9kTs3_b?IiHgjxPwtx~ zv|u_%d8*_d&g)H!t*d@X2}X6O_sw{=X~*-hk0(;Mbm!0W>GNdaV=pC4Q&6k9#7%{j7a@_O+VMP5BA+jTQBhIv-dh zBtHo_gGOblWbPOV-q^T7!Qn%_S|n(2aJPflcU3v#HEM4T7e=@&n$xk9hq3+bOBY_p z_S`9rTc7@#ecj(Pj$w_CT9}ki#Wr{GHE$!kPlLKW#n0zP_hJy`nd8USwcHElYazQDBKsg4LzmpHPr2p>bzi+bT`+D|<4v+q3 zX~RPtOzei!Ci?U|agWetZkaZ9{_nXl1?~?bzN|d@)9{GTp_5$#hwc9K{y+XbFu1_*%67uB>AW#rIZEzTPKc{9t92 zhJ&QHUToMd+q&5r+%=m&PnkQtkn5@J1pb6Y7rN654&Ep@Ctx2h%AllF%hYsg-Esb> zO-FKXm#0f_dw&YFNWxZbQs;$F(s^fCg}*s2I2cv=>4@}CI}N@YVv>`7*4OYCEI#tc z^XHVp>+kmd{K#%Kqoa4a<>d``LhU~si(dZ8-?;40_5bF-KgH@_*>C@S^Y%@}=kwk@ zIR0(%{;Kp-?-z%+22IHXRVth33zy5xJS~{wl;Hn(qI02`oYCd2Tjo2RE-;#Se)|p` zxvsSnyTzV#J2LpE@Jjn`e*XJf_u45#gE?|ZFnoY23Bo0Y-y%%+^uyjmBA&You#93K?f zc5?jwCbXGnvE-Z1-z;|VNpHIEY+e28YwL3fFa16HKL~M$|H;l;n6X8)<#X1FGynF; zx_tYacYJFt|DAaI_=*P?zID7je);Xe>g`Y8PWde4CVYKU;@7S}GbA>5t!6s%OIts8ze)AerffCS)t_8~#M;m2ALoAe(`V8QmQdZPzmM1UIkJmaYxq_4 z{draYY}zO9y611$8fV=XuX}KRPGQXEzmH`iK3dQ0Xg#_t=l-T5i~8Lsqhe+^)mw5f z@1Aclh1$Dm2R<@lpR9nQLA!pnn@NDZd`anGwv9 zrxdrbm`pLiLMl z{nPyxZP;V*^HJ5KTl;o?dt@xa{ATmd_x~)bAL-i%|3A0!9oK1R;}TPe7$8&+WzAe z7CJUx7YqOVaWLM7F=(OO!TE)~hK+??hE4yC;|#?%*?Tb`a~9z&w7D(u{$JVKMKwIh zGD$Iuc#fTyzq9#<+ft^4q63fazdp^`=~3Uveo?)Ck6QK9rl+=ZSIV5OD`YyHpJ=G) zsBk}c_0!;Bf!+7lw>b1Jn1AR9$3r`o!ZlJHUwv9;2pkl*udlOvnRvT=-}fxL!mivY z(!y$|4!PQ&O0riAINB*RSywIbkK8n=Q*IIIm!?L&4^&yuk|1PE88M$jj2}1-Jhi;j8@OZbzEw(dLQW~RIJhhcKW27J z_5e47YRM{w1fECVB{oZlOU#zYm)IS^_Cflw(C>Yp=G)(2V6pGry82Um|Iem>{%^eR z&-7^fg-JWlzgVr(AYb{?ueDxqrpX7kA4mGHKY4x8U;W6J$t^2qRLXCEViG8Hh=cQ& z+v6$yPnf({sVG!e3dSfbS6W@azFDESlttTZ$7g1yqCm}t1N{r+m%N)&!M^1FF=d!OTHh-;uqY+rZJ)X-}U637w&gH+A+CsgQ5LW3S6KK&SRxFs=j~grm~5X~%yZjP z-uq|hRHokNDTj^=HHEq!p8PdC>YfXy!>7%Ty>pM8oydQ!=X~wgoiU6J6V9n*MVigJ zpfTxpDYvk`4a+T6_eD7|`s&Un17kk%ulC!omi;JJZvBNzI&L$JZcn|fBPSWKO;j}R z@Qje)D)}iFTlOBQ)^BD$^~OYcSFTALJ$9(dGW`mD&H^5^%3 z^Y5iEZ(RTXS;@BlS1UX?K&OPqBfAUGOCOfCfnP2BU-Wc|-=Eq64hxWG9a^D!w z+WY4W&)JRg^KTr>Po2k*dYqr@p;%|AO_JJ7n?zmC*X*Yl)vN@M$1lm?^kmJO#JVi? z<8~dk`J7VM1+GutKYwa*+w9c9j{MtUS(8onNLj>J=}&%|YcTV{Ml{7;Vk{AH*4<~OU}&p%aHT>3M2)7y{t=TFtOu79-tceehX>9rr5 zjW-?roHl3U+vxoku{*b2p8EORETMy0&o6%8r8QXp|+vnVus|e$G>;I?j z=f3a%AMJg!(`)Ysp4(gh`~6}l>f2NH@9ATqsEB@xX^#%a##d!;a^Y(bSm&8C#jmPk z{uBM*u3`TJ1eORS&p-BJu}4SkVx2YmZw~(#JazhkOhoj*KeO%6eowBCF`L;V+W%1M zQt`6e%A1W7ULIeqC?s8!TjSsVwlrWt;!pMau3KeSGQ2SNDCjiGS=GvRD{svs!CXnk z&a#_63&k&(FKG(-SiQgT>ijtd8@0~dsr;obW6@4la!y7|-V zyM<5ozB#Y;eZz;p#zm~G3TyTU3f}v_N@0b5O?~2rbsSwstKufSY5s8Dj$_`G{)C@T zPyIY!d%ybTF<0Z=b?bt8yXM{&|GG`sUu(klThDLtpF1)CV8zT1qo4>`2K|P-sSHOu z!wUYMeJxvB#i3^V(x_TfP%6N!u=mNM?@2{V!%{`pFZa=Ke34OlZqFZ;n1vN}EpwBC z4%an&5IL0Ax4&k~Kk3e&m%g4@YTqKXru*e7skseI>vC*A`5lb%-}dRzrr8`0b9d>9 zi;Dft`r313Q)YC~M6-%1y85&8ytj)V?=0D05|d-5H?`Mx5mSPb#rv(yvTxf}zweB? z^iDuXy{kZR-;+5y;vUwCryo9Rl03wBJwha8!!xBDN(;)gk4%YGE^YcC?__t1p(Xsl zsoIm4#+zUBO^fb7d~}zOrNH%@YwTuS6+i#{&iAeR#an)TiI^hZ|7TU!SKs+>|9n6D z;zi{4ThIONO*RMp%c$L~|2+Gd*}Ydg`@YI%h0ian`MB3X^5FaXf6lB~_IBZ#Gk&+Y zQsw`aKK{0&J?*%Owfa+=?`glk1uYZE4mVG|e)9kHRO4IUMDK1f{ zpKZD|M>#;@i`|wdXWnwKMI61Zvu4-q&HDEbuj;w*Vf%c^QrF{qWnLFv+$*t=rTn+i zL7f)abMxPwuRJ$5Ry1qlv^MEqQ@4H=KmF^C{hnxX7r{^ZEAMh&|9oN6%$r-~=gf@$ zxqj`=Rqfg~hc0d}Vm-2^Mt&XZQHKBhRtw(j;5+9Vt5mb`?P>P=r(ahb{Jr}2|5w6` zzY5-COTGSS-|>5E{~xog4BJtl98&cDj(5bJZl*N74WZ|@mK{7&tu9QYB~3d^IDdA^=PHX;$aNa zQ*Q^>3;t9S_!G#`QZSF}pW+JT8ey#i|C3kx>|r?6%qE^Pg(c_sRLS*C5%qKaYRopW z5Zj&_^KEwJk1NMp_y13Q_pax5$q|?B*30A-0@ad(Hg7YLYu(bZf?-BT!Twy*V?}CeuQTGFUJGbi^BM>|J<4GcQy8N z=gIssU5;9g=BqNyi%Xr)*p}{l@Yh$3Z)RHxKddx`MYYKPhWiAiY4xyY^sBL zu4-*KgQJYA^BrfI$y!dAij^8R&wbt~aL-Gjze-nY$BNlod<*_O+jC~kI**;(iryE> zoS#w9k><6%V)}~iFO>!QkKeX;d~|=YKJ97#+aNCs2Sb$viNv2<9ep)4l9|6hiTJfi zbj!xmZfOyTl5viwKe5c)>zDPFU)5n1!;j6f?VoQvFw6i+GDjVCxSWgQ4*}%1|@YMgJt23YOo2cTa*PY3Cu&3g; zU#skz=^U{#yEiP8;b82|c9pnQ_@%_wQvTY_4S$Ms^8VhDZ_2I8-?{Z$t#MXY!^Qje zGbS3!F!Y>z`SiZG!#%AL7>-PtjMDS;Vm* zt~`t-L+6_2!!Wr@egP%rd;5RfS+|hE~$E{Z0VwoX)9HoB$MA6U99Z-)yT0oM|_)f@nfUpXshF|O;l#@O222` zQ+kinnEjzzg`tO0`)?~If5A1^&k9{=ZQXRH>co<+w-FNSf7MkxE}WVWruL>qplL(h z$A7QB7R$ffl#+AY>P-Gc3xO-nADj&KKIjboX}T%CR$zAQ-hX#WcZbEzo1oiy+^xdB zLtb>vDQQn1w{GFUS6@!7-@NVmCEb|x&=Yeaf5n_zd-T8inScCxYtw3;@0fB!ea%IU z%C7h9vPaVYKMZ-t+sL%=&CRV<>(*w3sek18u;75Ho!!nRwLqh(w*3KaCfuDGcFLzZ zo?rKj_O$j|H~F>a`iW&9j_p3%|B~%oyw5g?>$g^&VNJG>e=625?a$;hX+rMZt>&O< zt~*nM*s>2DtCp+P`=3AkxYE+ge?Pv{%5Hx6u_8T*7C=t>mxAyUw?tdA#HOW7Y}%<@u%S8gjRlq;57!TKhTd^tY)~ zJvAIQhgwz5EREilD!uLfxwSVhSsR>RW}oioZGX2o_ltO_5T47 z?SJ1MhgPaRn>6=CfMMTM-LF2fpI)ZfEL#(|=5yS__t*K8C$%`V1o$vn#vNE>Iw5z5 z{rbp)Up=1o?=H^fpMS1(bI5CvkC$Y+HymyE4Anjxs;zA(_G-V@N2v!@1@oV^E?>Mt zPw+?F-AA1tx?UTJoiVSQY`SCP=3f45aymA9-e?F5tP-f%xa5)Aznkd-e^$SHE8{=6 zwZSHEHN%!&)AFnZ-ff>P%C(|DE2jGOUx81T%ay>ip&2`BkmbPLqzFW?-o0z@)t^Ci@{A=d9P3L{%?_UYDIO`k# zzs-2NU55M5mVfW-r`mp;K0k9#e`rhgnr|8`P9FUI4PhJdb*@Vm#7rqXwr9b%HIg6y zo=y7Y`Kw)M!DLaZUyRefJa*2IN(67`JoqhDnx`Q)fA-39k;Llw(?htpkn^Tx+!vzcb(*(=TCjQzvO{!`j*D!c^mHk zOf}7S+MWHf?W}2b_4ck#55F6E^GjAb%!}N;lr!4)dKyQ`a3SOR>Hn%`3OMp|vI#>JX^L!d z!Y5xOOHBp;23UQ}ntp^w(Y;FgmzT)4J@tS7zgX`-eP92>RrgQ%?hlQ(7R>8>7^RS~*-!WK`>*(EyYAio4Mv>DyF8^crv?54P?BB}a z{NQ_uX~UwTMZ&4B`dgOX-uC|7?I2@U21|wrwhxEw0#v>-KaTCyv9$~Lo4V$Yh}*hN z0Zm_i_&FaBni1`0x8a2IHC~AwdHzK@hUqbX_C5RjB<-PqaN?An$_mpbk>CHPeJSAp z?S{6xmhjZ+=)~_u-070i#ogz6Y_9Eo@yaRL^~P~k<5JtsqVLx?U0=T7EVpmSRnz}_ zm*w4@&AR{&z1XIJuR5U*6XXEmrCS6Sn8`-T$gf7zs{<1-JQ$9C;weJS$wk8Z1v<7{#Iu! zrv0_}cjxBX*0fKSTY}s+_?yb@cva}|CwNuLizRE9baB6%C@`xcEqd1$FE`nbe-16( zb#+qH9cix#>-QF{s`Hy?dcMY{K>rHsd)Gb9SFXBzb8b+boZ_UiOS_G2fye1`zx^hXaXgb5=b7#ldT>*D=|V>Sd%Mq0-{POK?TLI-r9;Glr}O7cPyeAl z*}m{+;zW1Nh9ZVkCMWZme*LI z%JOGX(ZZ>l7ETvste*EbGBet0L;iaGmn;YV^e(yiW$T&P7q?H}xiR}-VN2Jtymu>K zTRnE~G2duAx4rP+mx^O^^Nxu<%gn6oQCBn$Fnb&wn3u$NwD#DfE0yPFnyoH9y5s0p z!EZ93A5UPtRB&UX9s9be=@a^^^MB=STeLiv3AEp*y8POmoNp^XN$9minD|XRE1f8^ z&|JzdW#)Q*EBWVh5{_I$RT^mXZK1_h6ngz#mZ zP7m^y397KkYiIBIQuANmJ8hn#h@oI?ut(3q(p<|J-p~hjKj!&;n(-%5^!_C7fcICg zhwR_m`E=`yUi-hi=4w(Ra|NCTl=uJtx59Mi-K`($oupqZ(U`oUeN!mI55FCUm05XJ zt~a%_e9*B7zan}3Lv+-Co9ubwbB(T_wv1@f53LDMlXZ!7@7x`@`nl9s?+DL5^`X~) z%`8=QSZ%e??tE>$sh-X}?^z8Se%QRIDVJHPW|+hJ{p@ zj`gx1(N_e2^4<7Wk(=6ZBKhJQIg9i?*;iceu$KnKEHnJ8`?vmM)FR&F))Mz`N4mxD zd@yU>Z#@x4e%t6+MFwu42&0(=f<_8^x!=30oL$@>|KZO@g0qm`GY4nRKrw^Zu`+)v~J3yr(BV-u$fP{pZhN^VIz-YK}Rr@&T>ft~}8@<@3V%_ovtH z_%!k0)9U&UUuN_#l|4FX%WgUQ43D!G$5bZWQ{ZJXB0*fSx{#^>;fPC>nOiqod>Nvcfa?Ee$__SpOx^OrqNyE$j; zpVhn;+{d_nD<)7Hejd>~*dCUFG}(i@C2oi(@#fb!B1fk-6z>I`6xr zyxH^R;_YLxY}dpOF+~_Q>^Lo6Uvi@BZSbL`8|E%eT{CTl-;ZVD(7Pa-=kSr)lU0|0 zH1T%!-~I5@t)f#E`)fZo_g*i4+`dY2ilCXK+%EI79Y=nI^mO<0yxd`NfvX{S^ZAo~ zdNI~h_|uEezHNK+^{>!=&CAZl;&qbN>ti<9RB!J7@xPB{#;GOUE{&J&@A)_TWpotB z^)ri%Dy-J0YdOx`_Pf*h=AECAlXHSz?^U%Zp{I9^d`;Rikzp zLj!1O!0b<<1xPpe}dB1=a%d~t$IYL*}X2-uCVH{dHTAh zrfM}N`~4piQ)jon|Gu-M!^bq@SfTxKt9e}WQ){a>?mu48{6J=PPas>e_bi54_gAgX z>+LgtIqBMz_O14^${g$w25p9nPaRjMZrpj@#^nKXx?XtiB}IR+h}G=tHqV|a5OnG4 zj#CPOinoIs=ZD_2u;2f*Q}X}Iy@ppCxfblZ z8a~HaPdIB`c;T9+lC1L_`=QhN`szwbwgu(ie6PfgVU)jH>VMnI;WQotzscZ;5Xk5C1XD5$Z z#Q*l21#3V%s;bg9nV!+BZ1!n+b@GXG#S?9Do$X3F-bQ{I?pl}GCeP=ubczBkQ@CUd zDtXqlFfyCpdaSIywnMY!%r!2(X3Z@#wv@eEp(cAj!Iej%;v&!P@^<6m zJ+JHqEDA&aU3~vG?8kdX)rPk#_TRpAku_WNUv;X_ub$kM^KVZ)^j{-*--5@NkNeNx zcFWaeN$@$#LWA88rn+m-uMKnKcV;>st~EL3(cif#U(>9AeK`OA^s~@^UhlbeRDN&P z{hAjoW%(_k>|ZwfnjSeeeG?J$iVI&nt#85hq;_;%nep}A?42KquHA0m436*GS4*Z| zViwj}^C#kHTczjHx<~Nc(j7B{fBrXEUwqG-$^LEoHy1gP>1W=|H|+eF5w}+NY~}ME zQ;p6Vh%M}SmM>qmU?ph1svu|s71J^w#su)XWzeQUMF;S5QBb-Ag*ZqBbZIGQcLykV zKpVk8dp$u);EQrW>Oe6HQlzv9lm@`cK%oy-2VPGKQUqBD3Q~qeRR?ia;#0Mxd}gj_ zhmY>@IJ-%p#aCuU7Y~7#WGX6MDz@30pI`Rlr}NMM(_^Y!{|QR6+^c!~_056g>dlX* zZj=zK%_{s?H{baG9ZuQn{C1yH{%!o*;jw-pdqc??(B|M+Gb82dkM8Zie%XPRqSnbD z1+BC*kN&(=`%~xpltW7%@7w+UggUoU(tgukNo|$wcGxbpFh2AU36^y{>tm&VZS=vj~l8@?|r>R`gZ>f zJ6G0*FS;B@L7O@L|36sGzIbzF(aTMs{g#XWIXc&WnECf-%7ehxV!_{Q?Otpbt9>=; z&4J|pC#Oz#1~9$3vBS>#&!6I(pQQ87eEZvfebe&(`RnGSPu9OYp>e~W_l>{VSG~R9 z_U+DY+vuvVTW_vA&j0Ma*?iDG*nD@*1sn9X`3Ev81TB7bYn#IV|Bam6CKzihK04=B zqj=-|&fJ=JxAvOf{`sMN_2dV)ulMhadDr~=;XaoXgWqd6?DOBtv|!`?TYrx%jES(X zeAIi#J7JHD{hqSV+X{EOc>munyWsBvm)RctvtCMk&Xm^JCh$-4wf?^^kA7`CJ$?O& zYk4cV0=539-ngA!G28b@g_0J-l77W)yxhWtxe>ExWbnOGIQe82-_+_WLJCRyukyFF z&igH^|Il#Z2m3n-p+|0~yu7OY>i%!hHA^3=$u`vqz7u9X@YQ_%_lMnYgW2RJGk8Ti zUEZYkN`oO$K;-qY7-SlCNl-Z4M9(&xc4W3`vA z8k2upRX+NU z|6*ADeD3V;(MOGT@70%#S@r(^#hNooGDmamzAw0W@81rS%9M|>@5|1t-(?woJimjb z`sm+Vy+8lRvnpKspZ4DH?e54$cBlMqyKnRtkZk!T*{QSGH>@}!Co7{mIdlF|mYFXr zwtRl5Ub{exVM+MHjgOMN)D6wAo2@&i>ud6A)4?fzjN3&gHm%fMwsU%-Bi)mtKCnIq~;4kAKXk6b zTYr|P7tQ=;`<{Nc?0#2~;04S3ce)A!L*2H$-+t(?@wvb<|BK}p?SuIL?9KiC&iLH@ zleH%wY?r+^y~4h7eMS3^y)%upj%|ANd()aPvG3E)JfHjQu4_Sm`Sia(FRZ&(9mp2+F95)sCe!b5_>X_-#mh$s^Z@==~D7R|r(fd=) z*Pni`Ub^?uE#-cO)FXdCXgVGDk}Zev(DJ~Et*68OZ~;` zTf7gnPdNXCZ^N@CIV)m}zq{-gdRmz8BQdjOsiNTAd((N$LbLDw<(>SuV*RYGviFqx z9`5b>*yI0eU2xv=WV_#O1&+VF%cn1y^6_1s{>jUibI$lr-~5J6q)>6q8_#2Pzl3gy z*0!Bku*LtD`&Qk$#b)mR-S*ZloAzttHveaPH2A*R{N7v@EhNftX|apY+L8x7GZt^E zxbuja zV%!+B`{z7kmUuRMwYeFeFY&1+wRafmO1JEe_3O$$FzNWuQ)&+ib>9S+yv<$yw%@d& zTexFUz`dMD_V?DTc4cIJBjNgHd(^~Tscc6Jm^j`yO`LVRe)A@mpnwUY$xkzd|FktU zGECB1@hDbb`g~OH{I&N#zWNf84Ft~v9CuiK?&U1WBc#W$rieDQtT^)Kt*T=q=+8>e#Z^1CH^ z1y^~??YGQ0a%Is=ohpHjj=$GJ1l~<+VMsWcKaGLQpCep!qTh|+)9ITNzsheH=sEvo z!OQlI^URt8`;8WI|_^-nD;oqx|v6Xd}^dt}X{H!i-tP1wmQci-iLc@y;x6}jZh4>ng9 z7iBQ&eBdU{l)svB(wl`5my?w4YRJs@-1RJ9{lZ1pH93)MWM$)y-13`t%vk6D(I={p z&oU|~eT#nlcKeR@SD(Co7pppOSj?Hculvk})Ys<8ys1sgmU=9Dd!S9s^UI7I?sE#a zZ2S_dFU(PKJ@UP@;-1S-YWD0doM+T%yJ>FuExAnPig#PPZtAZKmHTZH9l!Uso#T;j zntNL;8J_UG5RlM_PF>Wt>CEC1Gat8?IZB^1qVzTvZ%hfbQfyy7OD9q2y5q6eCT;7l zehLo`bU*rA`yRjWr@SOS*Cq3Rt!1^noPSiM$1`b8_wSTP;$jO$)bj2G?`m1eW4=dt z$L{SyOagKN8xsF|-~V0FUp{5eYn}NgcehQr_;$wm-!UOuE|*!@zuhYRG<40I;GF)K ze}ylJH`#taWoZ$<@vZr5VV9iw$=UObzP?~oNI$S-LT9qil-U#0ejoX2w#J8Ni^{3) zgCe>KQnP34ZDf~eU)q)G{G#Q>kryFJyzy$=H~NTwT(?;Ghet;h=KD$U)A($EoLRLe<{SU%(;?rY zW3s6aGnlGvHt8czP%ad$slIW_#Imwb@qMpU9=?pKtoh$x|FJ$TUehP(E-;T zwVc;4?w0dq#c`PrPX2zkFWKwb^SOps%fEg3czJp4 zK2X2HNH*lUyZxm9`)B`17o6ysayeDmc$v!X9g3hcag>Zq9<)B#^+d^N%?y?Ra36)_ zz6{=83cSaIR~2MW9;j!8w_^nFis4m7oRuLA5v-u#0c{dI|0dJox!USji2qfdpL7gk zxbyJy)RT|Sn#Y|FtIJC)yL{a|_4@ry_qYUKCZDVRy7G;(e(u?{`Bs^!UnJF^>7I=H z^`h-f{PUQMR3GEb_iByv-JkC-J8r;1RZJs+V9^!&o-&GgQwwxQBjS5*+K38 zwBmb~d97a>AN>E!e}Br}$e-Gi4{w~_pOU?=#I@}Dy1h4#&X2qEu@JNcjN3qqscL@t zv!0aoF$KP5H7QG(y!-n+x2TFT{#E=TzyHIpeUg*)q-RQ9jA3oy5drP3EBo_w@z4L; z-~B2B?>6i@(NJ(LT0iZ1Xv_3UQOC4%*2hCQzA)s^-MT_)P0S4cb#b=!E5-hMvgw<* z{6Bo&etP}=4YlEAXL(oe(t5mIE4SS&B-j1;=lw>1ne5+kuu92`X8dK0%hI@ZuI_h% zo!v@-8Fd-aAYl04h`qjJq+uOB0>AjXyu9JD@({7ms9UWifRsQZS{o3{BNvHX%{knU9-O9N; kEf=&+wWsrcd;=px%ASkK`qL%%f$lr?boFyt=akR{0D$o3e*gdg From c64775857900ffa168e6d68a7314ff82f3ad8f10 Mon Sep 17 00:00:00 2001 From: Peter Badenhorst Date: Tue, 5 Jun 2012 13:44:05 +0200 Subject: [PATCH 292/538] Updated to support 3 different random number generators: 1) SecureRandom supported by Java (default) 2) SHA1PRNG (causes problems on Linux) 3) Various versions of the AES Counter RNG (faster than default at generating random data) --- .../provider/AES128CounterRNGFast.java | 51 ++++++ .../provider/AES128CounterRNGSecure.java | 49 ++++++ .../provider/AES256CounterRNGSecure.java | 49 ++++++ .../akka/security/provider/AkkaProvider.java | 37 ++++ akka-remote/src/main/resources/reference.conf | 19 ++- .../remote/netty/NettyRemoteSupport.scala | 2 +- .../akka/remote/netty/NettySSLSupport.scala | 159 ++++++++++++------ .../scala/akka/remote/netty/Settings.scala | 10 ++ .../akka/remote/Ticket1978ConfigSpec.scala | 2 + project/AkkaBuild.scala | 4 +- 10 files changed, 324 insertions(+), 58 deletions(-) create mode 100644 akka-remote/src/main/java/akka/security/provider/AES128CounterRNGFast.java create mode 100644 akka-remote/src/main/java/akka/security/provider/AES128CounterRNGSecure.java create mode 100644 akka-remote/src/main/java/akka/security/provider/AES256CounterRNGSecure.java create mode 100644 akka-remote/src/main/java/akka/security/provider/AkkaProvider.java diff --git a/akka-remote/src/main/java/akka/security/provider/AES128CounterRNGFast.java b/akka-remote/src/main/java/akka/security/provider/AES128CounterRNGFast.java new file mode 100644 index 0000000000..a982a6f705 --- /dev/null +++ b/akka-remote/src/main/java/akka/security/provider/AES128CounterRNGFast.java @@ -0,0 +1,51 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.security.provider; + +import org.uncommons.maths.random.SecureRandomSeedGenerator; +import org.uncommons.maths.random.SeedException; + +import java.security.GeneralSecurityException; +import java.security.SecureRandom; + +/** + * Internal API + */ +public class AES128CounterRNGFast extends java.security.SecureRandomSpi { + private org.uncommons.maths.random.AESCounterRNG rng; + + public AES128CounterRNGFast() throws SeedException, GeneralSecurityException { + rng = new org.uncommons.maths.random.AESCounterRNG(new SecureRandomSeedGenerator()); + } + + /** + * This is managed internally only + */ + @Override + protected void engineSetSeed(byte[] seed) { + + } + + /** + * Generates a user-specified number of random bytes. + * + * @param bytes the array to be filled in with random bytes. + */ + @Override + protected void engineNextBytes(byte[] bytes) { + rng.nextBytes(bytes); + } + + /** + * Returns the given number of seed bytes. This call may be used to + * seed other random number generators. + * + * @param numBytes the number of seed bytes to generate. + * @return the seed bytes. + */ + @Override + protected byte[] engineGenerateSeed(int numBytes) { + return (new SecureRandom()).generateSeed(numBytes); + } +} diff --git a/akka-remote/src/main/java/akka/security/provider/AES128CounterRNGSecure.java b/akka-remote/src/main/java/akka/security/provider/AES128CounterRNGSecure.java new file mode 100644 index 0000000000..178a6c392b --- /dev/null +++ b/akka-remote/src/main/java/akka/security/provider/AES128CounterRNGSecure.java @@ -0,0 +1,49 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.security.provider; + +import org.uncommons.maths.random.DefaultSeedGenerator; + +import java.security.GeneralSecurityException; + +/** + * Internal API + */ +public class AES128CounterRNGSecure extends java.security.SecureRandomSpi { + private org.uncommons.maths.random.AESCounterRNG rng; + + public AES128CounterRNGSecure() throws GeneralSecurityException { + rng = new org.uncommons.maths.random.AESCounterRNG(); + } + + /** + * This is managed internally only + */ + @Override + protected void engineSetSeed(byte[] seed) { + + } + + /** + * Generates a user-specified number of random bytes. + * + * @param bytes the array to be filled in with random bytes. + */ + @Override + protected void engineNextBytes(byte[] bytes) { + rng.nextBytes(bytes); + } + + /** + * Returns the given number of seed bytes. This call may be used to + * seed other random number generators. + * + * @param numBytes the number of seed bytes to generate. + * @return the seed bytes. + */ + @Override + protected byte[] engineGenerateSeed(int numBytes) { + return DefaultSeedGenerator.getInstance().generateSeed(numBytes); + } +} diff --git a/akka-remote/src/main/java/akka/security/provider/AES256CounterRNGSecure.java b/akka-remote/src/main/java/akka/security/provider/AES256CounterRNGSecure.java new file mode 100644 index 0000000000..48d651b86b --- /dev/null +++ b/akka-remote/src/main/java/akka/security/provider/AES256CounterRNGSecure.java @@ -0,0 +1,49 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.security.provider; + +import org.uncommons.maths.random.DefaultSeedGenerator; + +import java.security.GeneralSecurityException; + +/** + * Internal API + */ +public class AES256CounterRNGSecure extends java.security.SecureRandomSpi { + private org.uncommons.maths.random.AESCounterRNG rng; + + public AES256CounterRNGSecure() throws GeneralSecurityException { + rng = new org.uncommons.maths.random.AESCounterRNG(32); + } + + /** + * This is managed internally only + */ + @Override + protected void engineSetSeed(byte[] seed) { + + } + + /** + * Generates a user-specified number of random bytes. + * + * @param bytes the array to be filled in with random bytes. + */ + @Override + protected void engineNextBytes(byte[] bytes) { + rng.nextBytes(bytes); + } + + /** + * Returns the given number of seed bytes. This call may be used to + * seed other random number generators. + * + * @param numBytes the number of seed bytes to generate. + * @return the seed bytes. + */ + @Override + protected byte[] engineGenerateSeed(int numBytes) { + return DefaultSeedGenerator.getInstance().generateSeed(numBytes); + } +} diff --git a/akka-remote/src/main/java/akka/security/provider/AkkaProvider.java b/akka-remote/src/main/java/akka/security/provider/AkkaProvider.java new file mode 100644 index 0000000000..9c4a0c2181 --- /dev/null +++ b/akka-remote/src/main/java/akka/security/provider/AkkaProvider.java @@ -0,0 +1,37 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.security.provider; + +import java.security.AccessController; +import java.security.Provider; + +/** + * A provider that for AES128CounterRNGFast, a cryptographically secure random number generator through SecureRandom + */ +public final class AkkaProvider extends Provider { + public AkkaProvider() { + super("Akka", 1.0, "Akka provider 1.0 that implements a secure AES random number generator"); + + AccessController.doPrivileged(new java.security.PrivilegedAction() { + public Object run() { + + /** + * SecureRandom + */ + put("SecureRandom.AES128CounterRNGFast", "akka.security.provider.AES128CounterRNGFast"); + put("SecureRandom.AES128CounterRNGSecure", "akka.security.provider.AES128CounterRNGSecure"); + put("SecureRandom.AES256CounterRNGSecure", "akka.security.provider.AES256CounterRNGSecure"); + + /** + * Implementation type: software or hardware + */ + put("SecureRandom.AES128CounterRNGFast ImplementedIn", "Software"); + put("SecureRandom.AES128CounterRNGSecure ImplementedIn", "Software"); + put("SecureRandom.AES256CounterRNGSecure ImplementedIn", "Software"); + + return null; + } + }); + } +} diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index d20a57d1a5..80719decf4 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -175,14 +175,29 @@ akka { # (I&O) Protocol to use for SSL encryption, choose from: # Java 6 & 7: - # SSLv3, TLSv1, + # 'SSLv3', 'TLSv1' # Java 7: - # TLSv1.1, TLSv1.2 + # 'TLSv1.1', 'TLSv1.2' protocol = "TLSv1" # You need to install the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256 # More info here: http://docs.oracle.com/javase/7/docs/technotes/guides/security/SunProviders.html#SunJCEProvider supported-algorithms = ["TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"] + + # Using /dev/./urandom is only necessary when using SHA1PRNG on Linux to prevent blocking + # It is NOT as secure because it reuses the seed + # '' => defaults to /dev/random or whatever is set in java.security for example: securerandom.source=file:/dev/random + # '/dev/./urandom' => NOT '/dev/urandom' as that doesn't work according to: http://bugs.sun.com/view_bug.do;jsessionid=ff625daf459fdffffffffcd54f1c775299e0?bug_id=6202721 + sha1prng-random-source = "" + + # There are three options, in increasing order of security: + # "" or SecureRandom => (default) + # "SHA1PRNG" => Can be slow because of blocking issues on Linux + # "AES128CounterRNGFast" => fastest startup and based on AES encryption algorithm + # The following use one of 3 possible seed sources, depending on availability: /dev/random, random.org and SecureRandom (provided by Java) + # "AES128CounterRNGSecure" + # "AES256CounterRNGSecure" (Install JCE Unlimited Strength Jurisdiction Policy Files first) + random-number-generator = "" } } } diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 32aba84893..84a46f05cd 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -71,7 +71,7 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider * actually dispatches the received messages to the local target actors). */ def defaultStack(withTimeout: Boolean, isClient: Boolean): Seq[ChannelHandler] = - (if (settings.EnableSSL) NettySSLSupport(settings, NettyRemoteTransport.this, isClient) :: Nil else Nil) ::: + (if (settings.EnableSSL) NettySSLSupport(settings, NettyRemoteTransport.this.log, isClient) :: Nil else Nil) ::: (if (withTimeout) timeout :: Nil else Nil) ::: msgFormat ::: authenticator ::: diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala index d830c87a07..011aa92233 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala @@ -4,44 +4,112 @@ package akka.remote.netty +import _root_.java.security.Provider +import _root_.java.security.SecureRandom +import _root_.java.security.Security import org.jboss.netty.handler.ssl.SslHandler -import com.sun.xml.internal.bind.v2.model.core.NonElement -import com.sun.xml.internal.ws.resources.SoapMessages import javax.net.ssl.{ KeyManagerFactory, TrustManager, TrustManagerFactory, SSLContext } -import akka.remote.{ RemoteClientError, RemoteTransportException, RemoteServerError } -import java.security.{ GeneralSecurityException, SecureRandom, KeyStore } +import akka.remote.{ RemoteTransportException } +import akka.event.LoggingAdapter import java.io.{ IOException, FileNotFoundException, FileInputStream } +import java.security.{ SecureRandom, GeneralSecurityException, KeyStore } +import akka.security.provider.AkkaProvider +import com.sun.xml.internal.bind.v2.model.core.NonElement -object NettySSLSupport { +/** + * Used for adding SSL support to Netty pipeline + * Internal use only + */ +private object NettySSLSupport { /** * Construct a SSLHandler which can be inserted into a Netty server/client pipeline */ - def apply(settings: NettySettings, netty: NettyRemoteTransport, isClient: Boolean): SslHandler = { - if (isClient) initialiseClientSSL(settings, netty) - else initialiseServerSSL(settings, netty) + def apply(settings: NettySettings, log: LoggingAdapter, isClient: Boolean): SslHandler = { + if (isClient) initialiseClientSSL(settings, log) + else initialiseServerSSL(settings, log) } - private def initialiseClientSSL(settings: NettySettings, netty: NettyRemoteTransport): SslHandler = { - netty.log.debug("Client SSL is enabled, initialising ...") + private def initialiseCustomSecureRandom(settings: NettySettings, log: LoggingAdapter): SecureRandom = { + /** + * According to this bug report: http://bugs.sun.com/view_bug.do;jsessionid=ff625daf459fdffffffffcd54f1c775299e0?bug_id=6202721 + * Using /dev/./urandom is only necessary when using SHA1PRNG on Linux + * Use 'new SecureRandom()' instead of 'SecureRandom.getInstance("SHA1PRNG")' to avoid having problems + */ + settings.SSLRandomSource match { + case Some(path) ⇒ System.setProperty("java.security.egd", path) + case None ⇒ + } + + val rng = settings.SSLRandomNumberGenerator match { + case Some(generator) ⇒ generator match { + case "AES128CounterRNGFast" ⇒ { + log.debug("SSL random number generator set to: AES128CounterRNGFast") + val akka = new AkkaProvider + Security.addProvider(akka) + SecureRandom.getInstance("AES128CounterRNGFast", akka) + } + case "AES128CounterRNGSecure" ⇒ { + log.debug("SSL random number generator set to: AES128CounterRNGSecure") + val akka = new AkkaProvider + Security.addProvider(akka) + SecureRandom.getInstance("AES128CounterRNGSecure", akka) + } + case "AES256CounterRNGSecure" ⇒ { + log.debug("SSL random number generator set to: AES256CounterRNGSecure") + val akka = new AkkaProvider + Security.addProvider(akka) + SecureRandom.getInstance("AES256CounterRNGSecure", akka) + } + case "SHA1PRNG" ⇒ { + log.debug("SSL random number generator set to: SHA1PRNG") + // This needs /dev/urandom to be the source on Linux to prevent problems with /dev/random blocking + // However, this also makes the seed source insecure as the seed is reused to avoid blocking (not a problem on FreeBSD). + SecureRandom.getInstance("SHA1PRNG") + } + case _ ⇒ { + log.debug("SSL random number generator set to default: SecureRandom") + new SecureRandom + } + } + case None ⇒ { + log.debug("SSL random number generator not set. Setting to default: SecureRandom") + new SecureRandom + } + } + // prevent stall on first access + rng.nextInt() + rng + } + + private def initialiseClientSSL(settings: NettySettings, log: LoggingAdapter): SslHandler = { + log.debug("Client SSL is enabled, initialising ...") val sslContext: Option[SSLContext] = { (settings.SSLTrustStore, settings.SSLTrustStorePassword, settings.SSLProtocol) match { - case (Some(trustStore), Some(password), Some(protocol)) ⇒ constructClientContext(settings, netty, trustStore, password, protocol) - case _ ⇒ throw new GeneralSecurityException("Could not find all SSL trust store settings") + case (Some(trustStore), Some(password), Some(protocol)) ⇒ constructClientContext(settings, log, trustStore, password, protocol) + case (trustStore, password, protocol) ⇒ + val msg = "SSL trust store settings went missing. [trust-store: %s] [trust-store-password: %s] [protocol: %s]" + .format(trustStore, password, protocol) + throw new GeneralSecurityException(msg) } } sslContext match { case Some(context) ⇒ { - netty.log.debug("Using client SSL context to create SSLEngine ...") + log.debug("Using client SSL context to create SSLEngine ...") val sslEngine = context.createSSLEngine sslEngine.setUseClientMode(true) sslEngine.setEnabledCipherSuites(settings.SSLSupportedAlgorithms.toArray.map(_.toString)) new SslHandler(sslEngine) } - case None ⇒ throw new GeneralSecurityException("Failed to initialise client SSL") + case None ⇒ { + val msg = "Failed to initialise client SSL because SSL context could not be found. " + + "Make sure your settings are correct: [trust-store: %s] [trust-store-password: %s] [protocol: %s]" + .format(settings.SSLTrustStore, settings.SSLTrustStorePassword, settings.SSLProtocol) + throw new GeneralSecurityException(msg) + } } } - private def constructClientContext(settings: NettySettings, netty: NettyRemoteTransport, trustStorePath: String, trustStorePassword: String, protocol: String): Option[SSLContext] = { + private def constructClientContext(settings: NettySettings, log: LoggingAdapter, trustStorePath: String, trustStorePassword: String, protocol: String): Option[SSLContext] = { try { val trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm) val trustStore = KeyStore.getInstance(KeyStore.getDefaultType) @@ -50,48 +118,43 @@ object NettySSLSupport { trustManagerFactory.init(trustStore) val trustManagers: Array[TrustManager] = trustManagerFactory.getTrustManagers val sslContext = SSLContext.getInstance(protocol) - sslContext.init(null, trustManagers, new SecureRandom()) + sslContext.init(null, trustManagers, initialiseCustomSecureRandom(settings, log)) Some(sslContext) } catch { - case e: FileNotFoundException ⇒ { - val exception = new RemoteTransportException("Client SSL connection could not be established because trust store could not be loaded", e) - netty.notifyListeners(RemoteClientError(exception, netty, netty.address)) - throw exception - } - case e: IOException ⇒ { - val exception = new RemoteTransportException("Client SSL connection could not be established because: " + e.getMessage, e) - netty.notifyListeners(RemoteClientError(exception, netty, netty.address)) - throw exception - } - case e: GeneralSecurityException ⇒ { - val exception = new RemoteTransportException("Client SSL connection could not be established because SSL context could not be constructed", e) - netty.notifyListeners(RemoteClientError(exception, netty, netty.address)) - throw exception - } + case e: FileNotFoundException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because trust store could not be loaded", e) + case e: IOException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because: " + e.getMessage, e) + case e: GeneralSecurityException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because SSL context could not be constructed", e) } } - private def initialiseServerSSL(settings: NettySettings, netty: NettyRemoteTransport): SslHandler = { - netty.log.debug("Server SSL is enabled, initialising ...") + private def initialiseServerSSL(settings: NettySettings, log: LoggingAdapter): SslHandler = { + log.debug("Server SSL is enabled, initialising ...") val sslContext: Option[SSLContext] = { (settings.SSLKeyStore, settings.SSLKeyStorePassword, settings.SSLProtocol) match { - case (Some(keyStore), Some(password), Some(protocol)) ⇒ constructServerContext(settings, netty, keyStore, password, protocol) - case _ ⇒ throw new GeneralSecurityException("Could not find all SSL key store settings") + case (Some(keyStore), Some(password), Some(protocol)) ⇒ constructServerContext(settings, log, keyStore, password, protocol) + case (keyStore, password, protocol) ⇒ + val msg = "SSL key store settings went missing. [key-store: %s] [key-store-password: %s] [protocol: %s]".format(keyStore, password, protocol) + throw new GeneralSecurityException(msg) } } sslContext match { case Some(context) ⇒ { - netty.log.debug("Using server SSL context to create SSLEngine ...") + log.debug("Using server SSL context to create SSLEngine ...") val sslEngine = context.createSSLEngine sslEngine.setUseClientMode(false) sslEngine.setEnabledCipherSuites(settings.SSLSupportedAlgorithms.toArray.map(_.toString)) new SslHandler(sslEngine) } - case None ⇒ throw new GeneralSecurityException("Failed to initialise server SSL") + case None ⇒ { + val msg = "Failed to initialise server SSL because SSL context could not be found. " + + "Make sure your settings are correct: [key-store: %s] [key-store-password: %s] [protocol: %s]" + .format(settings.SSLKeyStore, settings.SSLKeyStorePassword, settings.SSLProtocol) + throw new GeneralSecurityException(msg) + } } } - private def constructServerContext(settings: NettySettings, netty: NettyRemoteTransport, keyStorePath: String, keyStorePassword: String, protocol: String): Option[SSLContext] = { + private def constructServerContext(settings: NettySettings, log: LoggingAdapter, keyStorePath: String, keyStorePassword: String, protocol: String): Option[SSLContext] = { try { val factory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm) val keyStore = KeyStore.getInstance(KeyStore.getDefaultType) @@ -99,24 +162,12 @@ object NettySSLSupport { keyStore.load(stream, keyStorePassword.toCharArray) factory.init(keyStore, keyStorePassword.toCharArray) val sslContext = SSLContext.getInstance(protocol) - sslContext.init(factory.getKeyManagers, null, new SecureRandom()) + sslContext.init(factory.getKeyManagers, null, initialiseCustomSecureRandom(settings, log)) Some(sslContext) } catch { - case e: FileNotFoundException ⇒ { - val exception = new RemoteTransportException("Server SSL connection could not be established because key store could not be loaded", e) - netty.notifyListeners(RemoteServerError(exception, netty)) - throw exception - } - case e: IOException ⇒ { - val exception = new RemoteTransportException("Server SSL connection could not be established because: " + e.getMessage, e) - netty.notifyListeners(RemoteServerError(exception, netty)) - throw exception - } - case e: GeneralSecurityException ⇒ { - val exception = new RemoteTransportException("Server SSL connection could not be established because SSL context could not be constructed", e) - netty.notifyListeners(RemoteServerError(exception, netty)) - throw exception - } + case e: FileNotFoundException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because key store could not be loaded", e) + case e: IOException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because: " + e.getMessage, e) + case e: GeneralSecurityException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because SSL context could not be constructed", e) } } } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index 5d829127f8..22a659958c 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -100,6 +100,16 @@ private[akka] class NettySettings(config: Config, val systemName: String) { case protocol ⇒ Some(protocol) } + val SSLRandomSource = getString("ssl.sha1prng-random-source") match { + case "" ⇒ None + case path ⇒ Some(path) + } + + val SSLRandomNumberGenerator = getString("ssl.random-number-generator") match { + case "" ⇒ None + case rng ⇒ Some(rng) + } + val EnableSSL = { val enableSSL = getBoolean("ssl.enable") if (enableSSL) { diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala index 0d429043c2..c6556f0160 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala @@ -41,6 +41,8 @@ akka { SSLTrustStorePassword must be(Some("changeme")) SSLProtocol must be(Some("TLSv1")) SSLSupportedAlgorithms must be(java.util.Arrays.asList("TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA")) + SSLRandomSource must be(None) + SSLRandomNumberGenerator must be(None) } } } diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 4b8f72e424..3416993f89 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -413,7 +413,7 @@ object Dependencies { ) val remote = Seq( - netty, protobuf, Test.junit, Test.scalatest + netty, protobuf, uncommonsMath, Test.junit, Test.scalatest ) val cluster = Seq(Test.junit, Test.scalatest) @@ -451,6 +451,7 @@ object Dependency { val ScalaStm = "0.5" val Scalatest = "1.6.1" val Slf4j = "1.6.4" + val UncommonsMath = "1.2.2a" } // Compile @@ -460,6 +461,7 @@ object Dependency { val protobuf = "com.google.protobuf" % "protobuf-java" % V.Protobuf // New BSD val scalaStm = "org.scala-tools" % "scala-stm_2.9.1" % V.ScalaStm // Modified BSD (Scala) val slf4jApi = "org.slf4j" % "slf4j-api" % V.Slf4j // MIT + val uncommonsMath = "org.uncommons.maths" % "uncommons-maths" % V.UncommonsMath // ApacheV2 val zeroMQ = "org.zeromq" % "zeromq-scala-binding_2.9.1" % "0.0.6" // ApacheV2 // Runtime From 5ccfb2cfeefe7cecf5102c3ac8e0802b01b8c728 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 5 Jun 2012 14:13:28 +0200 Subject: [PATCH 293/538] Remove cluster settings vals --- .../src/main/scala/akka/cluster/Cluster.scala | 25 +++++++------------ 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 0b2b3919f7..b947782a9a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -372,24 +372,17 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val remoteSettings = new RemoteSettings(system.settings.config, system.name) val clusterSettings = new ClusterSettings(system.settings.config, system.name) + import clusterSettings._ val selfAddress = remote.transport.address val failureDetector = new AccrualFailureDetector( - system, selfAddress, clusterSettings.FailureDetectorThreshold, clusterSettings.FailureDetectorMaxSampleSize) + system, selfAddress, FailureDetectorThreshold, FailureDetectorMaxSampleSize) private val vclockNode = VectorClock.Node(selfAddress.toString) - private val periodicTasksInitialDelay = clusterSettings.PeriodicTasksInitialDelay - private val gossipInterval = clusterSettings.GossipInterval - private val leaderActionsInterval = clusterSettings.LeaderActionsInterval - private val unreachableNodesReaperInterval = clusterSettings.UnreachableNodesReaperInterval - implicit private val defaultTimeout = Timeout(remoteSettings.RemoteSystemDaemonAckTimeout) - private val autoDown = clusterSettings.AutoDown - private val nrOfDeputyNodes = clusterSettings.NrOfDeputyNodes - private val nrOfGossipDaemons = clusterSettings.NrOfGossipDaemons - private val nodeToJoin: Option[Address] = clusterSettings.NodeToJoin filter (_ != selfAddress) + private val nodeToJoin: Option[Address] = NodeToJoin filter (_ != selfAddress) private val serialization = remote.serialization @@ -424,17 +417,17 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // ======================================================== // start periodic gossip to random nodes in cluster - private val gossipCanceller = system.scheduler.schedule(periodicTasksInitialDelay, gossipInterval) { + private val gossipCanceller = system.scheduler.schedule(PeriodicTasksInitialDelay, GossipInterval) { gossip() } // start periodic cluster failure detector reaping (moving nodes condemned by the failure detector to unreachable list) - private val failureDetectorReaperCanceller = system.scheduler.schedule(periodicTasksInitialDelay, unreachableNodesReaperInterval) { + private val failureDetectorReaperCanceller = system.scheduler.schedule(PeriodicTasksInitialDelay, UnreachableNodesReaperInterval) { reapUnreachableMembers() } // start periodic leader action management (only applies for the current leader) - private val leaderActionsCanceller = system.scheduler.schedule(periodicTasksInitialDelay, leaderActionsInterval) { + private val leaderActionsCanceller = system.scheduler.schedule(PeriodicTasksInitialDelay, LeaderActionsInterval) { leaderActions() } @@ -983,7 +976,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ } localGossip copy (members = newMembers) // update gossip - } else if (autoDown) { + } else if (AutoDown) { // we don't have convergence - so we might have unreachable nodes // if 'auto-down' is turned on, then try to auto-down any unreachable nodes @@ -1055,7 +1048,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val views = Set.empty[VectorClock] ++ seen.values if (views.size == 1) { - log.debug("Cluster Node [{}] - Cluster convergence reached", selfAddress) + log.debug("Cluster Node [{}] - Cluster convergence reached: [{}]", selfAddress, gossip.members.mkString(", ")) Some(gossip) } else None } else None @@ -1091,7 +1084,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * Gets an Iterable with the addresses of a all the 'deputy' nodes - excluding this node if part of the group. */ - private def deputyNodes: Iterable[Address] = state.get.latestGossip.members.toIterable map (_.address) drop 1 take nrOfDeputyNodes filter (_ != selfAddress) + private def deputyNodes: Iterable[Address] = state.get.latestGossip.members.toIterable map (_.address) drop 1 take NrOfDeputyNodes filter (_ != selfAddress) private def selectRandomNode(addresses: Iterable[Address]): Address = addresses.toSeq(ThreadLocalRandom.current nextInt addresses.size) From f02793ebd6d32c4a23d85d7aec0725600ccd6657 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 5 Jun 2012 14:13:44 +0200 Subject: [PATCH 294/538] Refactor cluster startup join in tests and fix barrier race * Refactored common code to MultiNodeClusterSpec.awaitClusterUp * Fixed some race conditions of barriers --- ...ientDowningNodeThatIsUnreachableSpec.scala | 24 +++------- .../ClientDowningNodeThatIsUpSpec.scala | 23 ++------- .../scala/akka/cluster/ConvergenceSpec.scala | 12 +---- .../GossipingAccrualFailureDetectorSpec.scala | 8 +--- ...aderDowningNodeThatIsUnreachableSpec.scala | 48 +++++-------------- .../akka/cluster/LeaderElectionSpec.scala | 13 ++--- .../MembershipChangeListenerExitingSpec.scala | 17 ++----- .../MembershipChangeListenerJoinSpec.scala | 18 +++---- .../MembershipChangeListenerLeavingSpec.scala | 17 ++----- .../MembershipChangeListenerSpec.scala | 10 ++-- .../MembershipChangeListenerUpSpec.scala | 14 ++---- .../akka/cluster/MultiNodeClusterSpec.scala | 28 ++++++++++- .../scala/akka/cluster/NodeJoinSpec.scala | 4 +- ...LeavingAndExitingAndBeingRemovedSpec.scala | 11 +---- .../cluster/NodeLeavingAndExitingSpec.scala | 11 +---- .../scala/akka/cluster/NodeLeavingSpec.scala | 11 +---- .../scala/akka/cluster/NodeShutdownSpec.scala | 11 +---- .../scala/akka/cluster/NodeUpSpec.scala | 13 +---- 18 files changed, 85 insertions(+), 208 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index b241899ad6..d78afcdeb7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -34,13 +34,10 @@ class ClientDowningNodeThatIsUnreachableSpec "Client of a 4 node cluster" must { "be able to DOWN a node that is UNREACHABLE (killed)" taggedAs LongRunningTest in { + val thirdAddress = node(third).address + awaitClusterUp(first, second, third, fourth) + runOn(first) { - startClusterNode() - awaitUpConvergence(numberOfMembers = 4) - - val thirdAddress = node(third).address - testConductor.enter("all-up") - // kill 'third' node testConductor.shutdown(third, 0) @@ -50,28 +47,19 @@ class ClientDowningNodeThatIsUnreachableSpec awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) cluster.latestGossip.members.exists(_.address == thirdAddress) must be(false) - testConductor.enter("await-completion") } runOn(third) { - cluster.join(node(first).address) - - awaitUpConvergence(numberOfMembers = 4) - testConductor.enter("all-up") + testConductor.enter("down-third-node") } runOn(second, fourth) { - cluster.join(node(first).address) - awaitUpConvergence(numberOfMembers = 4) - - val thirdAddress = node(third).address - testConductor.enter("all-up") - testConductor.enter("down-third-node") awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) - testConductor.enter("await-completion") } + + testConductor.enter("await-completion") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index ff048a2eda..5f778c25d1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -34,42 +34,29 @@ class ClientDowningNodeThatIsUpSpec "Client of a 4 node cluster" must { "be able to DOWN a node that is UP (healthy and available)" taggedAs LongRunningTest in { + val thirdAddress = node(third).address + awaitClusterUp(first, second, third, fourth) + runOn(first) { - startClusterNode() - awaitUpConvergence(numberOfMembers = 4) - - val thirdAddress = node(third).address - testConductor.enter("all-up") - // mark 'third' node as DOWN cluster.down(thirdAddress) testConductor.enter("down-third-node") awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) cluster.latestGossip.members.exists(_.address == thirdAddress) must be(false) - testConductor.enter("await-completion") } runOn(third) { - cluster.join(node(first).address) - awaitUpConvergence(numberOfMembers = 4) - testConductor.enter("all-up") testConductor.enter("down-third-node") - testConductor.enter("await-completion") } runOn(second, fourth) { - cluster.join(node(first).address) - awaitUpConvergence(numberOfMembers = 4) - - val thirdAddress = node(third).address - testConductor.enter("all-up") - testConductor.enter("down-third-node") awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) - testConductor.enter("await-completion") } + + testConductor.enter("await-completion") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index a76083b0fc..a7e5712cfa 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -40,15 +40,7 @@ abstract class ConvergenceSpec "A cluster of 3 members" must { "reach initial convergence" taggedAs LongRunningTest in { - runOn(first) { - cluster.self - awaitUpConvergence(numberOfMembers = 3) - } - - runOn(second, third) { - cluster.join(node(first).address) - awaitUpConvergence(numberOfMembers = 3) - } + awaitClusterUp(first, second, third) runOn(fourth) { // doesn't join immediately @@ -70,7 +62,7 @@ abstract class ConvergenceSpec val firstAddress = node(first).address val secondAddress = node(second).address - within(25 seconds) { + within(28 seconds) { // third becomes unreachable awaitCond(cluster.latestGossip.overview.unreachable.size == 1) awaitCond(cluster.latestGossip.members.size == 2) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 27a012d32e..9df3e20d68 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -36,13 +36,7 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi "A Gossip-driven Failure Detector" must { "receive gossip heartbeats so that all member nodes in the cluster are marked 'available'" taggedAs LongRunningTest in { - // make sure that the node-to-join is started before other join - runOn(first) { - startClusterNode() - } - testConductor.enter("first-started") - - cluster.join(firstAddress) + awaitClusterUp(first, second, third) 5.seconds.dilated.sleep // let them gossip cluster.failureDetector.isAvailable(firstAddress) must be(true) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index e8b956e87b..ffbd4eb287 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -42,13 +42,10 @@ class LeaderDowningNodeThatIsUnreachableSpec "The Leader in a 4 node cluster" must { "be able to DOWN a 'last' node that is UNREACHABLE" taggedAs LongRunningTest in { + val fourthAddress = node(fourth).address + awaitClusterUp(first, second, third, fourth) + runOn(first) { - startClusterNode() - awaitUpConvergence(numberOfMembers = 4) - - val fourthAddress = node(fourth).address - testConductor.enter("all-up") - // kill 'fourth' node testConductor.shutdown(fourth, 0) testConductor.enter("down-fourth-node") @@ -56,38 +53,26 @@ class LeaderDowningNodeThatIsUnreachableSpec // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(fourthAddress), 30.seconds) - testConductor.enter("await-completion") } runOn(fourth) { - cluster.join(node(first).address) - - awaitUpConvergence(numberOfMembers = 4) - testConductor.enter("all-up") + testConductor.enter("down-fourth-node") } runOn(second, third) { - cluster.join(node(first).address) - awaitUpConvergence(numberOfMembers = 4) - - val fourthAddress = node(fourth).address - testConductor.enter("all-up") - testConductor.enter("down-fourth-node") awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(fourthAddress), 30.seconds) - testConductor.enter("await-completion") } + + testConductor.enter("await-completion-1") } "be able to DOWN a 'middle' node that is UNREACHABLE" taggedAs LongRunningTest in { + val secondAddress = node(second).address + testConductor.enter("before-down-second-node") + runOn(first) { - cluster.self - awaitUpConvergence(numberOfMembers = 3) - - val secondAddress = node(second).address - testConductor.enter("all-up") - // kill 'second' node testConductor.shutdown(second, 0) testConductor.enter("down-second-node") @@ -95,28 +80,19 @@ class LeaderDowningNodeThatIsUnreachableSpec // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds) - testConductor.enter("await-completion") } runOn(second) { - cluster.join(node(first).address) - - awaitUpConvergence(numberOfMembers = 3) - testConductor.enter("all-up") + testConductor.enter("down-second-node") } runOn(third) { - cluster.join(node(first).address) - awaitUpConvergence(numberOfMembers = 3) - - val secondAddress = node(second).address - testConductor.enter("all-up") - testConductor.enter("down-second-node") awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Seq(secondAddress), 30 seconds) - testConductor.enter("await-completion") } + + testConductor.enter("await-completion-2") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index 5a155fc195..ce4d5a8042 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -33,26 +33,19 @@ abstract class LeaderElectionSpec override def initialParticipants = 5 - lazy val firstAddress = node(first).address - // sorted in the order used by the cluster lazy val roles = Seq(first, second, third, fourth).sorted "A cluster of four nodes" must { "be able to 'elect' a single leader" taggedAs LongRunningTest in { - // make sure that the node-to-join is started before other join - runOn(first) { - startClusterNode() - } - testConductor.enter("first-started") + awaitClusterUp(first, second, third, fourth) if (myself != controller) { - cluster.join(firstAddress) - awaitUpConvergence(numberOfMembers = roles.size) cluster.isLeader must be(myself == roles.head) assertLeaderIn(roles) } + testConductor.enter("after") } @@ -71,7 +64,7 @@ abstract class LeaderElectionSpec testConductor.enter("after-shutdown", "after-down", "completed") case `leader` ⇒ - testConductor.enter("before-shutdown") + testConductor.enter("before-shutdown", "after-shutdown") // this node will be shutdown by the controller and doesn't participate in more barriers case `aUser` ⇒ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index 8932eed6ee..cdf809187a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -18,13 +18,13 @@ object MembershipChangeListenerExitingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" + .withFallback(ConfigFactory.parseString(""" akka.cluster { leader-actions-interval = 5 s # increase the leader action task interval unreachable-nodes-reaper-interval = 30 s # turn "off" reaping to unreachable node set } """) - .withFallback(MultiNodeClusterSpec.clusterConfig))) + .withFallback(MultiNodeClusterSpec.clusterConfig))) } class MembershipChangeListenerExitingMultiJvmNode1 extends MembershipChangeListenerExitingSpec @@ -46,16 +46,7 @@ abstract class MembershipChangeListenerExitingSpec "A registered MembershipChangeListener" must { "be notified when new node is EXITING" taggedAs LongRunningTest in { - runOn(first) { - startClusterNode() - } - testConductor.enter("first-started") - - runOn(second, third) { - cluster.join(firstAddress) - } - awaitUpConvergence(numberOfMembers = 3) - testConductor.enter("rest-started") + awaitClusterUp(first, second, third) runOn(first) { testConductor.enter("registered-listener") @@ -70,7 +61,7 @@ abstract class MembershipChangeListenerExitingSpec val exitingLatch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { - if (members.size == 3 && members.exists( m => m.address == secondAddress && m.status == MemberStatus.Exiting)) + if (members.size == 3 && members.exists(m ⇒ m.address == secondAddress && m.status == MemberStatus.Exiting)) exitingLatch.countDown() } }) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index 2f82e12506..c07ec19f77 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -17,12 +17,12 @@ object MembershipChangeListenerJoinMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" + .withFallback(ConfigFactory.parseString(""" akka.cluster { leader-actions-interval = 5 s # increase the leader action task interval to allow time checking for JOIN before leader moves it to UP } """) - .withFallback(MultiNodeClusterSpec.clusterConfig))) + .withFallback(MultiNodeClusterSpec.clusterConfig))) } class MembershipChangeListenerJoinMultiJvmNode1 extends MembershipChangeListenerJoinSpec @@ -42,15 +42,6 @@ abstract class MembershipChangeListenerJoinSpec "A registered MembershipChangeListener" must { "be notified when new node is JOINING" taggedAs LongRunningTest in { - runOn(first) { - startClusterNode() - } - - runOn(second) { - testConductor.enter("registered-listener") - cluster.join(firstAddress) - } - runOn(first) { val joinLatch = TestLatch() cluster.registerListener(new MembershipChangeListener { @@ -65,6 +56,11 @@ abstract class MembershipChangeListenerJoinSpec cluster.convergence.isDefined must be(true) } + runOn(second) { + testConductor.enter("registered-listener") + cluster.join(firstAddress) + } + testConductor.enter("after") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala index 089f241849..41b69ce7b4 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -17,11 +17,11 @@ object MembershipChangeListenerLeavingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" + .withFallback(ConfigFactory.parseString(""" akka.cluster.leader-actions-interval = 5 s akka.cluster.unreachable-nodes-reaper-interval = 30 s """)) - .withFallback(MultiNodeClusterSpec.clusterConfig)) + .withFallback(MultiNodeClusterSpec.clusterConfig)) } class MembershipChangeListenerLeavingMultiJvmNode1 extends MembershipChangeListenerLeavingSpec @@ -43,16 +43,7 @@ abstract class MembershipChangeListenerLeavingSpec "A registered MembershipChangeListener" must { "be notified when new node is LEAVING" taggedAs LongRunningTest in { - runOn(first) { - startClusterNode() - } - testConductor.enter("first-started") - - runOn(second, third) { - cluster.join(firstAddress) - } - awaitUpConvergence(numberOfMembers = 3) - testConductor.enter("rest-started") + awaitClusterUp(first, second, third) runOn(first) { testConductor.enter("registered-listener") @@ -67,7 +58,7 @@ abstract class MembershipChangeListenerLeavingSpec val latch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { - if (members.size == 3 && members.exists( m => m.address == secondAddress && m.status == MemberStatus.Leaving)) + if (members.size == 3 && members.exists(m ⇒ m.address == secondAddress && m.status == MemberStatus.Leaving)) latch.countDown() } }) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index 352f9de1a4..c87a280e17 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -32,13 +32,9 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan "A set of connected cluster systems" must { - "(when two systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { + "(when two nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { - // make sure that the node-to-join is started before other join - runOn(first) { - cluster.self - } - testConductor.enter("first-started") + awaitClusterUp(first) runOn(first, second) { cluster.join(firstAddress) @@ -56,7 +52,7 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan testConductor.enter("after-1") } - "(when three systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { + "(when three nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { runOn(third) { cluster.join(firstAddress) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala index 3df6b876f9..7709e9854a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -35,15 +35,6 @@ abstract class MembershipChangeListenerUpSpec "A registered MembershipChangeListener" must { "be notified when new node is marked as UP by the leader" taggedAs LongRunningTest in { - runOn(first) { - startClusterNode() - } - - runOn(second) { - testConductor.enter("registered-listener") - cluster.join(firstAddress) - } - runOn(first) { val upLatch = TestLatch() cluster.registerListener(new MembershipChangeListener { @@ -58,6 +49,11 @@ abstract class MembershipChangeListenerUpSpec awaitUpConvergence(numberOfMembers = 2) } + runOn(second) { + testConductor.enter("registered-listener") + cluster.join(firstAddress) + } + testConductor.enter("after") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index bf431f74f6..5fe5e7de37 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -30,15 +30,39 @@ object MultiNodeClusterSpec { trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ /** - * Create a cluster node using 'Cluster(system)'. + * Get or create a cluster node using 'Cluster(system)' extension. */ def cluster: Cluster = Cluster(system) /** - * Use this method instead of 'cluster.self'. + * Use this method instead of 'cluster.self' + * for the initial startup of the cluster node. */ def startClusterNode(): Unit = cluster.self + def startCluster(roles: RoleName*): Unit = { + awaitStartCluster(false, roles.toSeq) + } + + def awaitClusterUp(roles: RoleName*): Unit = { + awaitStartCluster(true, roles.toSeq) + } + + private def awaitStartCluster(upConvergence: Boolean = true, roles: Seq[RoleName]): Unit = { + runOn(roles.head) { + // make sure that the node-to-join is started before other join + startClusterNode() + } + testConductor.enter(roles.head.name + "-started") + if (roles.tail.contains(myself)) { + cluster.join(node(roles.head).address) + } + if (upConvergence && roles.contains(myself)) { + awaitUpConvergence(numberOfMembers = roles.length) + } + testConductor.enter(roles.map(_.name).mkString("-") + "-joined") + } + /** * Assert that the member addresses match the expected addresses in the * sort order used by the cluster. diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala index 99116ecb25..0d6a50b82a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala @@ -16,12 +16,12 @@ object NodeJoinMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" + .withFallback(ConfigFactory.parseString(""" akka.cluster { leader-actions-interval = 5 s # increase the leader action task interval } """) - .withFallback(MultiNodeClusterSpec.clusterConfig))) + .withFallback(MultiNodeClusterSpec.clusterConfig))) } class NodeJoinMultiJvmNode1 extends NodeJoinSpec diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index ebab4f6ba3..a974930d0a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -40,16 +40,7 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest in { - runOn(first) { - startClusterNode() - } - testConductor.enter("first-started") - - runOn(second, third) { - cluster.join(firstAddress) - } - awaitUpConvergence(numberOfMembers = 3) - testConductor.enter("rest-started") + awaitClusterUp(first, second, third) runOn(first) { cluster.leave(secondAddress) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 31630f934c..3773ccbd5d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -46,16 +46,7 @@ abstract class NodeLeavingAndExitingSpec "be moved to EXITING by the leader" taggedAs LongRunningTest in { - runOn(first) { - startClusterNode() - } - testConductor.enter("first-started") - - runOn(second, third) { - cluster.join(firstAddress) - } - awaitUpConvergence(numberOfMembers = 3) - testConductor.enter("rest-started") + awaitClusterUp(first, second, third) runOn(first) { cluster.leave(secondAddress) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index 17db90c880..96876cf4cb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -40,16 +40,7 @@ abstract class NodeLeavingSpec extends MultiNodeSpec(NodeLeavingMultiJvmSpec) "be marked as LEAVING in the converged membership table" taggedAs LongRunningTest in { - runOn(first) { - startClusterNode() - } - testConductor.enter("first-started") - - runOn(second, third) { - cluster.join(firstAddress) - } - awaitUpConvergence(numberOfMembers = 3) - testConductor.enter("rest-started") + awaitClusterUp(first, second, third) runOn(first) { cluster.leave(secondAddress) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index c0ac1ee22b..b54c0c1b39 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -35,16 +35,7 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) "A cluster of 2 nodes" must { "not be singleton cluster when joined" taggedAs LongRunningTest in { - // make sure that the node-to-join is started before other join - runOn(first) { - startClusterNode() - } - testConductor.enter("first-started") - - runOn(second) { - cluster.join(node(first).address) - } - awaitUpConvergence(numberOfMembers = 2) + awaitClusterUp(first, second) cluster.isSingletonCluster must be(false) assertLeader(first, second) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index 7931ce48f1..eafdf2fffd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -28,21 +28,10 @@ abstract class NodeUpSpec override def initialParticipants = 2 - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - "A cluster node that is joining another cluster" must { "be moved to UP by the leader after a convergence" taggedAs LongRunningTest in { - runOn(first) { - startClusterNode() - } - - runOn(second) { - cluster.join(firstAddress) - } - - awaitUpConvergence(numberOfMembers = 2) + awaitClusterUp(first, second) testConductor.enter("after") } From ac98dddfe8432bd6610bceea20c6efcbf1f1e423 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 5 Jun 2012 15:53:30 +0200 Subject: [PATCH 295/538] ScalaDoc of awaitClusterUp --- .../scala/akka/cluster/MultiNodeClusterSpec.scala | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 5fe5e7de37..113064e13c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -40,10 +40,21 @@ trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ */ def startClusterNode(): Unit = cluster.self + /** + * Initialize the cluster with the specified member + * nodes (roles). First node will be started first + * and others will join the first. + */ def startCluster(roles: RoleName*): Unit = { awaitStartCluster(false, roles.toSeq) } + /** + * Initialize the cluster of the specified member + * nodes (roles) and wait until all joined and `Up`. + * First node will be started first and others will join + * the first. + */ def awaitClusterUp(roles: RoleName*): Unit = { awaitStartCluster(true, roles.toSeq) } From 4d3e9f19fe93db77d184b0fbf2d23baa75d058df Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 5 Jun 2012 18:19:46 +0200 Subject: [PATCH 296/538] Fixing ScalaDoc messup --- akka-cluster/src/main/scala/akka/cluster/Cluster.scala | 2 +- .../src/main/scala/akka/serialization/ProtobufSerializer.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index b947782a9a..c16a34a2ca 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -481,7 +481,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * Checks if we have a cluster convergence. * - * @returns Some(convergedGossip) if convergence have been reached and None if not + * @return Some(convergedGossip) if convergence have been reached and None if not */ def convergence: Option[Gossip] = convergence(latestGossip) diff --git a/akka-remote/src/main/scala/akka/serialization/ProtobufSerializer.scala b/akka-remote/src/main/scala/akka/serialization/ProtobufSerializer.scala index d9a5c7b0c4..77f6702a77 100644 --- a/akka-remote/src/main/scala/akka/serialization/ProtobufSerializer.scala +++ b/akka-remote/src/main/scala/akka/serialization/ProtobufSerializer.scala @@ -27,7 +27,7 @@ object ProtobufSerializer { /** * Helper to materialize (lookup) an [[akka.actor.ActorRef]] * from Akka's protobuf representation in the supplied - * [[akka.actor.ActorSystem]. + * [[akka.actor.ActorSystem]]. */ def deserializeActorRef(system: ActorSystem, refProtocol: ActorRefProtocol): ActorRef = system.actorFor(refProtocol.getPath) From 211435048a7c4c2678ac1abdaeaed9b3f9c42067 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 6 Jun 2012 13:56:46 +0200 Subject: [PATCH 297/538] #2189 - Removing RemoteClientWriteFailed and log it as a RemoteClientError and send the message to DeadLetters --- akka-docs/java/remoting.rst | 4 ---- akka-docs/scala/remoting.rst | 4 ---- .../main/scala/akka/remote/RemoteTransport.scala | 15 --------------- .../src/main/scala/akka/remote/netty/Client.scala | 14 ++++++++------ 4 files changed, 8 insertions(+), 29 deletions(-) diff --git a/akka-docs/java/remoting.rst b/akka-docs/java/remoting.rst index 910ec5fbb2..82a736973f 100644 --- a/akka-docs/java/remoting.rst +++ b/akka-docs/java/remoting.rst @@ -280,10 +280,6 @@ which holds the transport used (RemoteTransport) and the outbound address that i To intercept when an outbound client is shut down you listen to ``RemoteClientShutdown`` which holds the transport used (RemoteTransport) and the outbound address that it was connected to (Address). -To intercept when an outbound message cannot be sent, you listen to ``RemoteClientWriteFailed`` which holds -the payload that was not written (AnyRef), the cause of the failed send (Throwable), -the transport used (RemoteTransport) and the outbound address that was the destination (Address). - For general outbound-related errors, that do not classify as any of the others, you can listen to ``RemoteClientError``, which holds the cause (Throwable), the transport used (RemoteTransport) and the outbound address (Address). diff --git a/akka-docs/scala/remoting.rst b/akka-docs/scala/remoting.rst index 0863d80b55..ab49765fad 100644 --- a/akka-docs/scala/remoting.rst +++ b/akka-docs/scala/remoting.rst @@ -288,10 +288,6 @@ which holds the transport used (RemoteTransport) and the outbound address that i To intercept when an outbound client is shut down you listen to ``RemoteClientShutdown`` which holds the transport used (RemoteTransport) and the outbound address that it was connected to (Address). -To intercept when an outbound message cannot be sent, you listen to ``RemoteClientWriteFailed`` which holds -the payload that was not written (AnyRef), the cause of the failed send (Throwable), -the transport used (RemoteTransport) and the outbound address that was the destination (Address). - For general outbound-related errors, that do not classify as any of the others, you can listen to ``RemoteClientError``, which holds the cause (Throwable), the transport used (RemoteTransport) and the outbound address (Address). diff --git a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala index 249c23e968..aefd34ec74 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala @@ -77,21 +77,6 @@ case class RemoteClientShutdown( override def toString: String = "RemoteClientShutdown@" + remoteAddress } -/** - * RemoteClientWriteFailed is published when a remote send of a message detectably fails (throws an exception). - */ -case class RemoteClientWriteFailed( - @BeanProperty request: AnyRef, - @BeanProperty cause: Throwable, - @transient @BeanProperty remote: RemoteTransport, - @BeanProperty remoteAddress: Address) extends RemoteClientLifeCycleEvent { - override def logLevel: Logging.LogLevel = Logging.WarningLevel - override def toString: String = - "RemoteClientWriteFailed@" + remoteAddress + - ": MessageClass[" + (if (request ne null) request.getClass.getName else "no message") + - "] Error[" + cause + "]" -} - /** * Life-cycle events for RemoteServer. */ diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index c6d23e71f3..76b400dd00 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -13,11 +13,11 @@ import org.jboss.netty.handler.codec.frame.{ LengthFieldPrepender, LengthFieldBa import org.jboss.netty.handler.execution.ExecutionHandler import org.jboss.netty.handler.timeout.{ IdleState, IdleStateEvent, IdleStateAwareChannelHandler, IdleStateHandler } import akka.remote.RemoteProtocol.{ RemoteControlProtocol, CommandType, AkkaRemoteProtocol } -import akka.remote.{ RemoteProtocol, RemoteMessage, RemoteLifeCycleEvent, RemoteClientStarted, RemoteClientShutdown, RemoteClientException, RemoteClientError, RemoteClientDisconnected, RemoteClientConnected, RemoteClientWriteFailed } -import akka.actor.{ Address, ActorRef } +import akka.remote.{ RemoteProtocol, RemoteMessage, RemoteLifeCycleEvent, RemoteClientStarted, RemoteClientShutdown, RemoteClientException, RemoteClientError, RemoteClientDisconnected, RemoteClientConnected } import akka.AkkaException import akka.event.Logging -import akka.util.Switch +import akka.actor.{ DeadLetter, Address, ActorRef } +import akka.util.{ NonFatal, Switch } /** * This is the abstract baseclass for netty remote clients, currently there's only an @@ -65,7 +65,9 @@ private[akka] abstract class RemoteClient private[akka] (val netty: NettyRemoteT new ChannelFutureListener { def operationComplete(future: ChannelFuture) { if (future.isCancelled || !future.isSuccess) { - netty.notifyListeners(RemoteClientWriteFailed(request, future.getCause, netty, remoteAddress)) + netty.notifyListeners(RemoteClientError(future.getCause, netty, remoteAddress)) + val (message, sender, recipient) = request + netty.system.deadLetters ! DeadLetter(message, sender.getOrElse(netty.system.deadLetters), recipient) } } }) @@ -75,11 +77,11 @@ private[akka] abstract class RemoteClient private[akka] (val netty: NettyRemoteT if (backoff.length > 0 && !f.await(backoff.length, backoff.unit)) f.cancel() //Waited as long as we could, now back off } } catch { - case e: Exception ⇒ netty.notifyListeners(RemoteClientError(e, netty, remoteAddress)) + case NonFatal(e) ⇒ netty.notifyListeners(RemoteClientError(e, netty, remoteAddress)) } } - override def toString = name + override def toString: String = name } /** From 1c5d0bdf42e489dc60d9f28432ef5038defec38c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 6 Jun 2012 14:04:02 +0200 Subject: [PATCH 298/538] Adding a FIXME --- akka-remote/src/main/scala/akka/remote/netty/Client.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 76b400dd00..2d3748fb52 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -65,6 +65,7 @@ private[akka] abstract class RemoteClient private[akka] (val netty: NettyRemoteT new ChannelFutureListener { def operationComplete(future: ChannelFuture) { if (future.isCancelled || !future.isSuccess) { + //FIXME Should we just _not_ notifyListeners here and just assume that the other error reporting is sufficient? netty.notifyListeners(RemoteClientError(future.getCause, netty, remoteAddress)) val (message, sender, recipient) = request netty.system.deadLetters ! DeadLetter(message, sender.getOrElse(netty.system.deadLetters), recipient) From c686622acf190867498a21b25001fd9b366fc20c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 6 Jun 2012 14:23:34 +0200 Subject: [PATCH 299/538] Deciding not to publish errors when a message delivery fails --- akka-remote/src/main/scala/akka/remote/netty/Client.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 2d3748fb52..c9e78902f4 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -65,8 +65,8 @@ private[akka] abstract class RemoteClient private[akka] (val netty: NettyRemoteT new ChannelFutureListener { def operationComplete(future: ChannelFuture) { if (future.isCancelled || !future.isSuccess) { - //FIXME Should we just _not_ notifyListeners here and just assume that the other error reporting is sufficient? - netty.notifyListeners(RemoteClientError(future.getCause, netty, remoteAddress)) + // We don't call notifyListeners here since we don't think failed message deliveries are errors + // If the connection goes down we'll get the error reporting done by the pipeline. val (message, sender, recipient) = request netty.system.deadLetters ! DeadLetter(message, sender.getOrElse(netty.system.deadLetters), recipient) } From 5ec760680afa6903e884c519321a7b1c101b413f Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 6 Jun 2012 14:28:06 +0200 Subject: [PATCH 300/538] Minor restructuring of the send-callback --- .../src/main/scala/akka/remote/netty/Client.scala | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index c9e78902f4..86c534c418 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -63,14 +63,13 @@ private[akka] abstract class RemoteClient private[akka] (val netty: NettyRemoteT val f = channel.write(request) f.addListener( new ChannelFutureListener { - def operationComplete(future: ChannelFuture) { - if (future.isCancelled || !future.isSuccess) { + import netty.system.deadLetters + def operationComplete(future: ChannelFuture): Unit = + if (future.isCancelled || !future.isSuccess) request match { + case (msg, sender, recipient) ⇒ deadLetters ! DeadLetter(msg, sender.getOrElse(deadLetters), recipient) // We don't call notifyListeners here since we don't think failed message deliveries are errors - // If the connection goes down we'll get the error reporting done by the pipeline. - val (message, sender, recipient) = request - netty.system.deadLetters ! DeadLetter(message, sender.getOrElse(netty.system.deadLetters), recipient) + /// If the connection goes down we'll get the error reporting done by the pipeline. } - } }) // Check if we should back off if (!channel.isWritable) { From 82fbca9241aba4c1b0376d67bdb5a53154b519e6 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 6 Jun 2012 16:35:15 +0200 Subject: [PATCH 301/538] Clarifying semantics for ActorSystem.registerOnTermination --- akka-actor/src/main/scala/akka/actor/ActorSystem.scala | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index af7313b41e..721375adda 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -305,8 +305,9 @@ abstract class ActorSystem extends ActorRefFactory { implicit def dispatcher: MessageDispatcher /** - * Register a block of code (callback) to run after all actors in this actor system have - * been stopped. Multiple code blocks may be registered by calling this method multiple times. + * Register a block of code (callback) to run after ActorSystem.shutdown has been issued and + * all actors in this actor system have been stopped. + * Multiple code blocks may be registered by calling this method multiple times. * The callbacks will be run sequentially in reverse order of registration, i.e. * last registration is run first. * @@ -317,8 +318,9 @@ abstract class ActorSystem extends ActorRefFactory { def registerOnTermination[T](code: ⇒ T): Unit /** - * Register a block of code (callback) to run after all actors in this actor system have - * been stopped. Multiple code blocks may be registered by calling this method multiple times. + * Register a block of code (callback) to run after ActorSystem.shutdown has been issued and + * all actors in this actor system have been stopped. + * Multiple code blocks may be registered by calling this method multiple times. * The callbacks will be run sequentially in reverse order of registration, i.e. * last registration is run first. * From 60c11cab7bc4e46bb230e00eeb8070f0862339bf Mon Sep 17 00:00:00 2001 From: viktorklang Date: Thu, 7 Jun 2012 00:46:01 +0300 Subject: [PATCH 302/538] Adding missing slash --- akka-docs/general/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-docs/general/configuration.rst b/akka-docs/general/configuration.rst index 3be3704b22..1f3f051614 100644 --- a/akka-docs/general/configuration.rst +++ b/akka-docs/general/configuration.rst @@ -65,7 +65,7 @@ When using JarJar, OneJar, Assembly or any jar-bundler Akka's configuration approach relies heavily on the notion of every module/jar having its own reference.conf file, all of these will be discovered by the configuration and loaded. Unfortunately this also means - that if you put merge multiple jars into the same jar, you need to merge all the + that if you put/merge multiple jars into the same jar, you need to merge all the reference.confs as well. Otherwise all defaults will be lost and Akka will not function. Custom application.conf From bc289df018150c7139bc81f3625220a792d306ce Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 5 Jun 2012 22:16:15 +0200 Subject: [PATCH 303/538] Unit tests of Cluster, see 2163 * ClusterSpec - Test gossiping rules for deputies and unreachable - Fix strange/wrong probabilites for gossip to unreachable and deputy nodes - Fix lost order of Members when using map (without .toSeq) on the members SortedSet * MemberSpec - Test equals, hashCode * GossipSpec - Test member merge by status prio - Fix bug in member merge (groupBy was wrong) --- .../src/main/scala/akka/cluster/Cluster.scala | 114 +++++---- .../akka/cluster/MultiNodeClusterSpec.scala | 1 + .../test/scala/akka/cluster/ClusterSpec.scala | 232 ++++++++++++++++++ .../test/scala/akka/cluster/GossipSpec.scala | 42 ++++ .../test/scala/akka/cluster/MemberSpec.scala | 14 ++ 5 files changed, 360 insertions(+), 43 deletions(-) create mode 100644 akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala create mode 100644 akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index c16a34a2ca..935df0acce 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -180,7 +180,7 @@ case class GossipOverview( */ case class Gossip( overview: GossipOverview = GossipOverview(), - members: SortedSet[Member], // sorted set of members with their status, sorted by name + members: SortedSet[Member], // sorted set of members with their status, sorted by address meta: Map[String, Array[Byte]] = Map.empty[String, Array[Byte]], version: VectorClock = VectorClock()) // vector clock version extends ClusterMessage // is a serializable cluster message @@ -214,12 +214,8 @@ case class Gossip( // 1. merge vector clocks val mergedVClock = this.version merge that.version - // 2. group all members by Address => Vector[Member] - var membersGroupedByAddress = Map.empty[Address, Vector[Member]] - (this.members ++ that.members) foreach { m ⇒ - val ms = membersGroupedByAddress.get(m.address).getOrElse(Vector.empty[Member]) - membersGroupedByAddress += (m.address -> (ms :+ m)) - } + // 2. group all members by Address => Seq[Member] + val membersGroupedByAddress = (this.members.toSeq ++ that.members.toSeq).groupBy(_.address) // 3. merge members by selecting the single Member with highest MemberStatus out of the Member groups val mergedMembers = @@ -252,10 +248,9 @@ case class Gossip( * Manages routing of the different cluster commands. * Instantiated as a single instance for each Cluster - e.g. commands are serialized to Cluster message after message. */ -final class ClusterCommandDaemon extends Actor { +private[akka] final class ClusterCommandDaemon(cluster: Cluster) extends Actor { import ClusterAction._ - val cluster = Cluster(context.system) val log = Logging(context.system, this) def receive = { @@ -273,9 +268,8 @@ final class ClusterCommandDaemon extends Actor { * Pooled and routed with N number of configurable instances. * Concurrent access to Cluster. */ -final class ClusterGossipDaemon extends Actor { +private[akka] final class ClusterGossipDaemon(cluster: Cluster) extends Actor { val log = Logging(context.system, this) - val cluster = Cluster(context.system) def receive = { case GossipEnvelope(sender, gossip) ⇒ cluster.receive(sender, gossip) @@ -287,13 +281,13 @@ final class ClusterGossipDaemon extends Actor { /** * Supervisor managing the different Cluster daemons. */ -final class ClusterDaemonSupervisor extends Actor { +private[akka] final class ClusterDaemonSupervisor(cluster: Cluster) extends Actor { val log = Logging(context.system, this) - val cluster = Cluster(context.system) - private val commands = context.actorOf(Props[ClusterCommandDaemon], "commands") + private val commands = context.actorOf(Props(new ClusterCommandDaemon(cluster)), "commands") private val gossip = context.actorOf( - Props[ClusterGossipDaemon].withRouter(RoundRobinRouter(cluster.clusterSettings.NrOfGossipDaemons)), "gossip") + Props(new ClusterGossipDaemon(cluster)).withRouter( + RoundRobinRouter(cluster.clusterSettings.NrOfGossipDaemons)), "gossip") def receive = Actor.emptyBehavior @@ -396,7 +390,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // create superisor for daemons under path "/system/cluster" private val clusterDaemons = { - val createChild = CreateChild(Props[ClusterDaemonSupervisor], "cluster") + val createChild = CreateChild(Props(new ClusterDaemonSupervisor(this)), "cluster") Await.result(system.systemGuardian ? createChild, defaultTimeout.duration) match { case a: ActorRef ⇒ a case e: Exception ⇒ throw e @@ -794,9 +788,11 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ } /** + * INTERNAL API + * * Gossips latest gossip to an address. */ - private def gossipTo(address: Address): Unit = { + protected def gossipTo(address: Address): Unit = { val connection = clusterGossipConnectionFor(address) log.debug("Cluster Node [{}] - Gossiping to [{}]", selfAddress, connection) connection ! GossipEnvelope(self, latestGossip) @@ -805,23 +801,43 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * Gossips latest gossip to a random member in the set of members passed in as argument. * - * @return 'true' if it gossiped to a "deputy" member. + * @return the used [[akka.actor.Address] if any */ - private def gossipToRandomNodeOf(addresses: Iterable[Address]): Boolean = { + private def gossipToRandomNodeOf(addresses: IndexedSeq[Address]): Option[Address] = { log.debug("Cluster Node [{}] - Selecting random node to gossip to [{}]", selfAddress, addresses.mkString(", ")) - if (addresses.isEmpty) false - else { - val peers = addresses filter (_ != selfAddress) // filter out myself - val peer = selectRandomNode(peers) - gossipTo(peer) - deputyNodes exists (peer == _) + val peers = addresses filterNot (_ == selfAddress) // filter out myself + val peer = selectRandomNode(peers) + peer foreach gossipTo + peer + } + + /** + * INTERNAL API + */ + protected[akka] def gossipToUnreachableProbablity(membersSize: Int, unreachableSize: Int): Double = + (membersSize + unreachableSize) match { + case 0 ⇒ 0.0 + case sum ⇒ unreachableSize.toDouble / sum + } + + /** + * INTERNAL API + */ + protected[akka] def gossipToDeputyProbablity(membersSize: Int, unreachableSize: Int, nrOfDeputyNodes: Int): Double = { + if (nrOfDeputyNodes > membersSize) 1.0 + else if (nrOfDeputyNodes == 0) 0.0 + else (membersSize + unreachableSize) match { + case 0 ⇒ 0.0 + case sum ⇒ (nrOfDeputyNodes + unreachableSize).toDouble / sum } } /** + * INTERNAL API + * * Initates a new round of gossip. */ - private def gossip(): Unit = { + private[akka] def gossip(): Unit = { val localState = state.get if (isSingletonCluster(localState)) { @@ -833,38 +849,42 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ log.debug("Cluster Node [{}] - Initiating new round of gossip", selfAddress) val localGossip = localState.latestGossip - val localMembers = localGossip.members + // important to not accidentally use `map` of the SortedSet, since the original order is not preserved + val localMembers = localGossip.members.toIndexedSeq val localMembersSize = localMembers.size + val localMemberAddresses = localMembers map { _.address } - val localUnreachableMembers = localGossip.overview.unreachable + val localUnreachableMembers = localGossip.overview.unreachable.toIndexedSeq val localUnreachableSize = localUnreachableMembers.size // 1. gossip to alive members - val gossipedToDeputy = gossipToRandomNodeOf(localMembers map { _.address }) + val gossipedToAlive = gossipToRandomNodeOf(localMemberAddresses) // 2. gossip to unreachable members if (localUnreachableSize > 0) { - val probability: Double = localUnreachableSize / (localMembersSize + 1) - if (ThreadLocalRandom.current.nextDouble() < probability) gossipToRandomNodeOf(localUnreachableMembers.map(_.address)) + val probability = gossipToUnreachableProbablity(localMembersSize, localUnreachableSize) + if (ThreadLocalRandom.current.nextDouble() < probability) + gossipToRandomNodeOf(localUnreachableMembers.map(_.address)) } // 3. gossip to a deputy nodes for facilitating partition healing - val deputies = deputyNodes - if ((!gossipedToDeputy || localMembersSize < 1) && deputies.nonEmpty) { - if (localMembersSize == 0) gossipToRandomNodeOf(deputies) - else { - val probability = 1.0 / localMembersSize + localUnreachableSize - if (ThreadLocalRandom.current.nextDouble() <= probability) gossipToRandomNodeOf(deputies) - } + val deputies = deputyNodes(localMemberAddresses) + val alreadyGossipedToDeputy = gossipedToAlive.map(deputies.contains(_)).getOrElse(false) + if ((!alreadyGossipedToDeputy || localMembersSize < NrOfDeputyNodes) && deputies.nonEmpty) { + val probability = gossipToDeputyProbablity(localMembersSize, localUnreachableSize, NrOfDeputyNodes) + if (ThreadLocalRandom.current.nextDouble() < probability) + gossipToRandomNodeOf(deputies) } } } /** + * INTERNAL API + * * Reaps the unreachable members (moves them to the 'unreachable' list in the cluster overview) according to the failure detector's verdict. */ @tailrec - final private def reapUnreachableMembers(): Unit = { + final private[akka] def reapUnreachableMembers(): Unit = { val localState = state.get if (!isSingletonCluster(localState) && isAvailable(localState)) { @@ -905,10 +925,12 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ } /** + * INTERNAL API + * * Runs periodic leader actions, such as auto-downing unreachable nodes, assigning partitions etc. */ @tailrec - final private def leaderActions(): Unit = { + final private[akka] def leaderActions(): Unit = { val localState = state.get val localGossip = localState.latestGossip val localMembers = localGossip.members @@ -1082,11 +1104,17 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ private def clusterGossipConnectionFor(address: Address): ActorRef = system.actorFor(RootActorPath(address) / "system" / "cluster" / "gossip") /** - * Gets an Iterable with the addresses of a all the 'deputy' nodes - excluding this node if part of the group. + * Gets the addresses of a all the 'deputy' nodes - excluding this node if part of the group. */ - private def deputyNodes: Iterable[Address] = state.get.latestGossip.members.toIterable map (_.address) drop 1 take NrOfDeputyNodes filter (_ != selfAddress) + private def deputyNodes(addresses: IndexedSeq[Address]): IndexedSeq[Address] = + addresses drop 1 take NrOfDeputyNodes filterNot (_ == selfAddress) - private def selectRandomNode(addresses: Iterable[Address]): Address = addresses.toSeq(ThreadLocalRandom.current nextInt addresses.size) + /** + * INTERNAL API + */ + protected def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = + if (addresses.isEmpty) None + else Some(addresses(ThreadLocalRandom.current nextInt addresses.size)) private def isSingletonCluster(currentState: State): Boolean = currentState.latestGossip.members.size == 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 113064e13c..7f7d60fcdc 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -20,6 +20,7 @@ object MultiNodeClusterSpec { leader-actions-interval = 200 ms unreachable-nodes-reaper-interval = 200 ms periodic-tasks-initial-delay = 300 ms + nr-of-deputy-nodes = 2 } akka.test { single-expect-default = 5 s diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala new file mode 100644 index 0000000000..fdc3095f74 --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -0,0 +1,232 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import akka.testkit.AkkaSpec +import akka.util.duration._ +import akka.util.Duration +import akka.actor.ExtendedActorSystem +import akka.actor.Address +import java.util.concurrent.atomic.AtomicInteger +import org.scalatest.BeforeAndAfter + +object ClusterSpec { + val config = """ + akka.cluster { + auto-down = off + nr-of-deputy-nodes = 3 + periodic-tasks-initial-delay = 120 seconds // turn off scheduled tasks + } + akka.actor.provider = "akka.remote.RemoteActorRefProvider" + akka.remote.netty.port = 0 + akka.loglevel = DEBUG + """ + + case class GossipTo(address: Address) +} + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { + import ClusterSpec._ + + val deterministicRandom = new AtomicInteger + + val cluster = new Cluster(system.asInstanceOf[ExtendedActorSystem]) { + + override def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = { + if (addresses.isEmpty) None + else Some(addresses.toSeq(deterministicRandom.getAndIncrement % addresses.size)) + } + + override def gossipTo(address: Address): Unit = { + if (address == self.address) { + super.gossipTo(address) + } + // represent the gossip with a message to be used in asserts + testActor ! GossipTo(address) + } + + @volatile + var _gossipToUnreachableProbablity = 0.0 + + override def gossipToUnreachableProbablity(membersSize: Int, unreachableSize: Int): Double = { + if (_gossipToUnreachableProbablity < 0.0) super.gossipToUnreachableProbablity(membersSize, unreachableSize) + else _gossipToUnreachableProbablity + } + + @volatile + var _gossipToDeputyProbablity = 0.0 + + override def gossipToDeputyProbablity(membersSize: Int, unreachableSize: Int, deputySize: Int): Double = { + if (_gossipToDeputyProbablity < 0.0) super.gossipToDeputyProbablity(membersSize, unreachableSize, deputySize) + else _gossipToDeputyProbablity + } + + @volatile + var _unavailable: Set[Address] = Set.empty + + override val failureDetector = new AccrualFailureDetector( + system, selfAddress, clusterSettings.FailureDetectorThreshold, clusterSettings.FailureDetectorMaxSampleSize) { + + override def isAvailable(connection: Address): Boolean = { + if (_unavailable.contains(connection)) false + else super.isAvailable(connection) + } + } + + } + + val selfAddress = cluster.self.address + val addresses = IndexedSeq( + selfAddress, + Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 1), + Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 2), + Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 3), + Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 4), + Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 5)) + + def memberStatus(address: Address): Option[MemberStatus] = + cluster.latestGossip.members.collectFirst { case m if m.address == address ⇒ m.status } + + before { + cluster._gossipToUnreachableProbablity = 0.0 + cluster._gossipToDeputyProbablity = 0.0 + cluster._unavailable = Set.empty + deterministicRandom.set(0) + } + + "A Cluster" must { + + "initially be singleton cluster and reach convergence after first gossip" in { + cluster.isSingletonCluster must be(true) + cluster.latestGossip.members.map(_.address) must be(Set(selfAddress)) + memberStatus(selfAddress) must be(Some(MemberStatus.Joining)) + cluster.convergence.isDefined must be(false) + cluster.gossip() + expectMsg(GossipTo(selfAddress)) + awaitCond(cluster.convergence.isDefined) + memberStatus(selfAddress) must be(Some(MemberStatus.Joining)) + cluster.leaderActions() + memberStatus(selfAddress) must be(Some(MemberStatus.Up)) + } + + "accept a joining node" in { + cluster.joining(addresses(1)) + cluster.latestGossip.members.map(_.address) must be(Set(selfAddress, addresses(1))) + memberStatus(addresses(1)) must be(Some(MemberStatus.Joining)) + // FIXME why is it still convergence immediately after joining? + //cluster.convergence.isDefined must be(false) + } + + "accept a few more joining nodes" in { + for (a ← addresses.drop(2)) { + cluster.joining(a) + memberStatus(a) must be(Some(MemberStatus.Joining)) + } + cluster.latestGossip.members.map(_.address) must be(addresses.toSet) + } + + "order members by host and port" in { + // note the importance of using toSeq before map, otherwise it will not preserve the order + cluster.latestGossip.members.toSeq.map(_.address) must be(addresses.toSeq) + } + + "gossip to random live node" in { + cluster.latestGossip.members + cluster.gossip() + cluster.gossip() + cluster.gossip() + cluster.gossip() + + expectMsg(GossipTo(addresses(1))) + expectMsg(GossipTo(addresses(2))) + expectMsg(GossipTo(addresses(3))) + expectMsg(GossipTo(addresses(4))) + + expectNoMsg(1 second) + } + + "use certain probability for gossiping to unreachable node depending on the number of unreachable and live nodes" in { + cluster._gossipToUnreachableProbablity = -1.0 // use real impl + cluster.gossipToUnreachableProbablity(10, 1) must be < (cluster.gossipToUnreachableProbablity(9, 1)) + cluster.gossipToUnreachableProbablity(10, 1) must be < (cluster.gossipToUnreachableProbablity(10, 2)) + cluster.gossipToUnreachableProbablity(10, 5) must be < (cluster.gossipToUnreachableProbablity(10, 9)) + cluster.gossipToUnreachableProbablity(0, 10) must be <= (1.0) + cluster.gossipToUnreachableProbablity(1, 10) must be <= (1.0) + cluster.gossipToUnreachableProbablity(10, 0) must be(0.0 plusOrMinus (0.0001)) + cluster.gossipToUnreachableProbablity(0, 0) must be(0.0 plusOrMinus (0.0001)) + } + + "use certain probability for gossiping to deputy node depending on the number of unreachable and live nodes" in { + cluster._gossipToDeputyProbablity = -1.0 // use real impl + cluster.gossipToDeputyProbablity(10, 1, 2) must be < (cluster.gossipToDeputyProbablity(9, 1, 2)) + cluster.gossipToDeputyProbablity(10, 1, 2) must be < (cluster.gossipToDeputyProbablity(10, 2, 2)) + cluster.gossipToDeputyProbablity(10, 1, 2) must be < (cluster.gossipToDeputyProbablity(10, 2, 3)) + cluster.gossipToDeputyProbablity(10, 5, 5) must be < (cluster.gossipToDeputyProbablity(10, 9, 5)) + cluster.gossipToDeputyProbablity(0, 10, 0) must be <= (1.0) + cluster.gossipToDeputyProbablity(1, 10, 1) must be <= (1.0) + cluster.gossipToDeputyProbablity(10, 0, 0) must be(0.0 plusOrMinus (0.0001)) + cluster.gossipToDeputyProbablity(0, 0, 0) must be(0.0 plusOrMinus (0.0001)) + cluster.gossipToDeputyProbablity(4, 0, 4) must be(1.0 plusOrMinus (0.0001)) + cluster.gossipToDeputyProbablity(3, 7, 4) must be(1.0 plusOrMinus (0.0001)) + } + + "gossip to duputy node" in { + cluster._gossipToDeputyProbablity = 1.0 // always + + // we have configured 2 deputy nodes + cluster.gossip() // 1 is deputy + cluster.gossip() // 2 is deputy + cluster.gossip() // 3 is deputy + cluster.gossip() // 4 is not deputy, and therefore a deputy is also used + + expectMsg(GossipTo(addresses(1))) + expectMsg(GossipTo(addresses(2))) + expectMsg(GossipTo(addresses(3))) + expectMsg(GossipTo(addresses(4))) + // and the extra gossip to deputy + expectMsgAnyOf(GossipTo(addresses(1)), GossipTo(addresses(2)), GossipTo(addresses(3))) + + expectNoMsg(1 second) + + } + + "gossip to random unreachable node" in { + val dead = Set(addresses(1)) + cluster._unavailable = dead + cluster._gossipToUnreachableProbablity = 1.0 // always + + cluster.reapUnreachableMembers() + cluster.latestGossip.overview.unreachable.map(_.address) must be(dead) + + cluster.gossip() + + expectMsg(GossipTo(addresses(2))) // first available + expectMsg(GossipTo(addresses(1))) // the unavailable + + expectNoMsg(1 second) + } + + "gossip to random deputy node if number of live nodes is less than number of deputy nodes" in { + cluster._gossipToDeputyProbablity = -1.0 // real impl + // 0 and 2 still alive + val dead = Set(addresses(1), addresses(3), addresses(4), addresses(5)) + cluster._unavailable = dead + + cluster.reapUnreachableMembers() + cluster.latestGossip.overview.unreachable.map(_.address) must be(dead) + + for (n ← 1 to 20) { + cluster.gossip() + expectMsg(GossipTo(addresses(2))) // the only available + // and always to one of the 3 deputies + expectMsgAnyOf(GossipTo(addresses(1)), GossipTo(addresses(2)), GossipTo(addresses(3))) + } + + expectNoMsg(1 second) + + } + } +} diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala new file mode 100644 index 0000000000..77cd0c52ba --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -0,0 +1,42 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers +import akka.actor.Address +import scala.collection.immutable.SortedSet + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class GossipSpec extends WordSpec with MustMatchers { + + "A Gossip" must { + + "merge members by status priority" in { + import MemberStatus._ + val a1 = Member(Address("akka", "sys", "a", 2552), Up) + val a2 = Member(Address("akka", "sys", "a", 2552), Joining) + val b1 = Member(Address("akka", "sys", "b", 2552), Up) + val b2 = Member(Address("akka", "sys", "b", 2552), Removed) + val c1 = Member(Address("akka", "sys", "c", 2552), Leaving) + val c2 = Member(Address("akka", "sys", "c", 2552), Up) + val d1 = Member(Address("akka", "sys", "d", 2552), Leaving) + val d2 = Member(Address("akka", "sys", "d", 2552), Removed) + + val g1 = Gossip(members = SortedSet(a1, b1, c1, d1)) + val g2 = Gossip(members = SortedSet(a2, b2, c2, d2)) + + val merged1 = g1 merge g2 + merged1.members must be(SortedSet(a1, b2, c1, d2)) + merged1.members.toSeq.map(_.status) must be(Seq(Up, Removed, Leaving, Removed)) + + val merged2 = g2 merge g1 + merged2.members must be(SortedSet(a1, b2, c1, d2)) + merged2.members.toSeq.map(_.status) must be(Seq(Up, Removed, Leaving, Removed)) + + } + + } +} diff --git a/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala index 050407577e..bc1f70ae86 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala @@ -8,6 +8,7 @@ import org.scalatest.WordSpec import org.scalatest.matchers.MustMatchers import akka.actor.Address import scala.util.Random +import scala.collection.immutable.SortedSet @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class MemberSpec extends WordSpec with MustMatchers { @@ -26,6 +27,19 @@ class MemberSpec extends WordSpec with MustMatchers { val expected = IndexedSeq(m1, m2, m3, m4, m5) val shuffled = Random.shuffle(expected) shuffled.sorted must be(expected) + (SortedSet.empty[Member] ++ shuffled).toIndexedSeq must be(expected) + } + + "have stable equals and hashCode" in { + val m1 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Joining) + val m2 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Up) + val m3 = Member(Address("akka", "sys1", "host1", 10000), MemberStatus.Up) + + m1 must be(m2) + m1.hashCode must be(m2.hashCode) + + m3 must not be (m2) + m3 must not be (m1) } } } From 502bf5f8d59c091894ee70b8ba1a8981cc3ba93b Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 5 Jun 2012 14:19:38 +0200 Subject: [PATCH 304/538] unbreak config check in RoutedActorRef, which blocked&leaked threads --- .../src/main/scala/akka/routing/Routing.scala | 21 +++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 2f585a1790..21b14a6a3d 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -29,12 +29,6 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup _supervisor, _path) { - // verify that a BalancingDispatcher is not used with a Router - if (_props.routerConfig != NoRouter && _system.dispatchers.isBalancingDispatcher(_props.routerConfig.routerDispatcher)) - throw new ConfigurationException( - "Configuration for actor [" + _path.toString + - "] is invalid - you can not use a 'BalancingDispatcher' as a Router's dispatcher, you can however use it for the routees.") - /* * CAUTION: RoutedActorRef is PROBLEMATIC * ====================================== @@ -47,6 +41,13 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup * before we are done with them: lock the monitor of the actor cell (hence the * override of newActorCell) and use that to block the Router constructor for * as long as it takes to setup the RoutedActorRef itself. + * + * ===> I M P O R T A N T N O T I C E <=== + * + * DO NOT THROW ANY EXCEPTIONS BEFORE THE FOLLOWING TRY-BLOCK WITHOUT + * EXITING THE MONITOR OF THE actorCell! + * + * This is important, just don’t do it! No kidding. */ override def newActorCell( system: ActorSystemImpl, @@ -74,6 +75,14 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup val route = try { + // verify that a BalancingDispatcher is not used with a Router + if (_props.routerConfig != NoRouter && _system.dispatchers.isBalancingDispatcher(_props.routerConfig.routerDispatcher)) { + actorContext.stop(actorContext.self) + throw new ConfigurationException( + "Configuration for actor [" + _path.toString + + "] is invalid - you can not use a 'BalancingDispatcher' as a Router's dispatcher, you can however use it for the routees.") + } + _routeeProvider = routerConfig.createRouteeProvider(actorContext) val r = routerConfig.createRoute(routeeProps, routeeProvider) // initial resize, before message send From 4e9a658609c4eb3c72d4332589fd109c1be52b93 Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 5 Jun 2012 15:07:19 +0200 Subject: [PATCH 305/538] remove unused _receiveTimeout constructor args (ActorCell and LocalActorRef) --- .../src/main/scala/akka/actor/ActorCell.scala | 6 ++---- .../src/main/scala/akka/actor/ActorRef.scala | 14 ++++---------- .../main/scala/akka/actor/ActorRefProvider.scala | 6 ++++++ .../src/main/scala/akka/routing/Routing.scala | 5 ++--- .../src/main/scala/akka/testkit/TestActorRef.scala | 9 ++------- 5 files changed, 16 insertions(+), 24 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 9cb2cb674a..51e223e73f 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -316,8 +316,7 @@ private[akka] class ActorCell( val system: ActorSystemImpl, val self: InternalActorRef, val props: Props, - @volatile var parent: InternalActorRef, - /*no member*/ _receiveTimeout: Option[Duration]) extends UntypedActorContext { + @volatile var parent: InternalActorRef) extends UntypedActorContext { import AbstractActorCell.mailboxOffset import ActorCell._ @@ -351,8 +350,7 @@ private[akka] class ActorCell( /** * In milliseconds */ - var receiveTimeoutData: (Long, Cancellable) = - if (_receiveTimeout.isDefined) (_receiveTimeout.get.toMillis, emptyCancellable) else emptyReceiveTimeoutData + var receiveTimeoutData: (Long, Cancellable) = emptyReceiveTimeoutData @volatile var childrenRefs: ChildrenContainer = EmptyChildrenContainer diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 30b1ccf998..861df570b7 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -228,8 +228,7 @@ private[akka] class LocalActorRef private[akka] ( _props: Props, _supervisor: InternalActorRef, override val path: ActorPath, - val systemService: Boolean = false, - _receiveTimeout: Option[Duration] = None) + val systemService: Boolean = false) extends InternalActorRef with LocalRef { /* @@ -242,16 +241,11 @@ private[akka] class LocalActorRef private[akka] ( * us to use purely factory methods for creating LocalActorRefs. */ @volatile - private var actorCell = newActorCell(_system, this, _props, _supervisor, _receiveTimeout) + private var actorCell = newActorCell(_system, this, _props, _supervisor) actorCell.start() - protected def newActorCell( - system: ActorSystemImpl, - ref: InternalActorRef, - props: Props, - supervisor: InternalActorRef, - receiveTimeout: Option[Duration]): ActorCell = - new ActorCell(system, ref, props, supervisor, receiveTimeout) + protected def newActorCell(system: ActorSystemImpl, ref: InternalActorRef, props: Props, supervisor: InternalActorRef): ActorCell = + new ActorCell(system, ref, props, supervisor) protected def actorContext: ActorContext = actorCell diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index a985a6f8d5..960e8a37e5 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -552,3 +552,9 @@ class LocalActorRefProvider( def getExternalAddressFor(addr: Address): Option[Address] = if (addr == rootPath.address) Some(addr) else None } + +private[akka] class GuardianCell(_system: ActorSystemImpl, _self: InternalActorRef, _props: Props, _parent: InternalActorRef) + extends ActorCell(_system, _self, _props, _parent) { + +} + diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 21b14a6a3d..bcd92794da 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -53,9 +53,8 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup system: ActorSystemImpl, ref: InternalActorRef, props: Props, - supervisor: InternalActorRef, - receiveTimeout: Option[Duration]): ActorCell = { - val cell = super.newActorCell(system, ref, props, supervisor, receiveTimeout) + supervisor: InternalActorRef): ActorCell = { + val cell = super.newActorCell(system, ref, props, supervisor) Unsafe.instance.monitorEnter(cell) cell } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index 279c728e80..c0442d45d6 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -36,13 +36,8 @@ class TestActorRef[T <: Actor]( import TestActorRef.InternalGetActor - override def newActorCell( - system: ActorSystemImpl, - ref: InternalActorRef, - props: Props, - supervisor: InternalActorRef, - receiveTimeout: Option[Duration]): ActorCell = - new ActorCell(system, ref, props, supervisor, receiveTimeout) { + override def newActorCell(system: ActorSystemImpl, ref: InternalActorRef, props: Props, supervisor: InternalActorRef): ActorCell = + new ActorCell(system, ref, props, supervisor) { override def autoReceiveMessage(msg: Envelope) { msg.message match { case InternalGetActor ⇒ sender ! actor From 3271fddd92cd3a0a450fec682aa15b2d2212a2c1 Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 5 Jun 2012 16:58:24 +0200 Subject: [PATCH 306/538] do not discard system messages silently after stop --- .../src/main/scala/akka/actor/ActorCell.scala | 2 +- .../main/scala/akka/dispatch/Mailbox.scala | 40 ++++++++++++++----- 2 files changed, 30 insertions(+), 12 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 51e223e73f..9dbe610195 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -671,7 +671,7 @@ private[akka] class ActorCell( checkReceiveTimeout // Reschedule receive timeout } - private final def handleInvokeFailure(t: Throwable, message: String): Unit = try { + final def handleInvokeFailure(t: Throwable, message: String): Unit = try { dispatcher.reportFailure(new LogEventException(Error(t, self.path.toString, clazz(actor), message), t)) // prevent any further messages to be processed until the actor has been restarted dispatcher.suspend(this) diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index d26e7b2afc..11e58ede7e 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -209,20 +209,38 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes } final def processAllSystemMessages() { + var failure: Throwable = null var nextMessage = systemDrain(null) - try { - while ((nextMessage ne null) && !isClosed) { - if (debug) println(actor.self + " processing system message " + nextMessage + " with " + actor.childrenRefs) - actor systemInvoke nextMessage - nextMessage = nextMessage.next - // don’t ever execute normal message when system message present! - if (nextMessage eq null) nextMessage = systemDrain(null) + while ((nextMessage ne null) && !isClosed) { + val msg = nextMessage + nextMessage = nextMessage.next + msg.next = null + if (debug) println(actor.self + " processing system message " + msg + " with " + actor.childrenRefs) + try actor systemInvoke msg + catch { + case NonFatal(e) ⇒ + if (failure eq null) failure = e + actor.system.eventStream.publish(Error(e, actor.self.path.toString, this.getClass, "exception during processing system message " + msg + ": " + e.getMessage)) } - } catch { - case NonFatal(e) ⇒ - actor.system.eventStream.publish(Error(e, actor.self.path.toString, this.getClass, "exception during processing system messages, dropping " + SystemMessage.size(nextMessage) + " messages!")) - throw e + // don’t ever execute normal message when system message present! + if ((nextMessage eq null) && !isClosed) nextMessage = systemDrain(null) } + /* + * if we closed the mailbox, we must dump the remaining system messages + * to deadLetters (this is essential for DeathWatch) + */ + while (nextMessage ne null) { + val msg = nextMessage + nextMessage = nextMessage.next + msg.next = null + try actor.systemImpl.deadLetterMailbox.systemEnqueue(actor.self, msg) + catch { + case NonFatal(e) ⇒ actor.system.eventStream.publish( + Error(e, actor.self.path.toString, this.getClass, "error while enqueuing " + msg + " to deadLetters: " + e.getMessage)) + } + } + // if something happened while processing, fail this actor (most probable: exception in supervisorStrategy) + if (failure ne null) actor.handleInvokeFailure(failure, failure.getMessage) } @inline From bff03676feb08acdf1c3e2cc98d9d1f1ba3159ee Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 7 Jun 2012 10:45:45 +0200 Subject: [PATCH 307/538] =?UTF-8?q?add=20java=20testing=20doc=20chapter=20?= =?UTF-8?q?and=20link=20to=20Munish=E2=80=99s=20blog?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- akka-docs/java/index.rst | 1 + akka-docs/java/testing.rst | 14 ++++++++++++++ 2 files changed, 15 insertions(+) create mode 100644 akka-docs/java/testing.rst diff --git a/akka-docs/java/index.rst b/akka-docs/java/index.rst index 4022092dba..669bd7c362 100644 --- a/akka-docs/java/index.rst +++ b/akka-docs/java/index.rst @@ -24,4 +24,5 @@ Java API extending-akka zeromq microkernel + testing howto diff --git a/akka-docs/java/testing.rst b/akka-docs/java/testing.rst new file mode 100644 index 0000000000..d49ba2512f --- /dev/null +++ b/akka-docs/java/testing.rst @@ -0,0 +1,14 @@ +.. _akka-testkit-java: + +############################## +Testing Actor Systems (Java) +############################## + +Due to the conciseness of test DSLs available for Scala, it may be a good idea +to write the test suite in that language even if the main project is written in +Java. If that is not desirable, you can also use :class:`TestKit` and friends +from Java, albeit with more verbose syntax Munish Gupta has `published a nice +post `_ +showing several patterns you may find useful, and for reference documentation +please refer to :ref:`akka-testkit` until that section has been ported over to +cover Java in full. From 921d900f99c326133b6ab4b3ce552efaaf389569 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 7 Jun 2012 12:03:34 +0200 Subject: [PATCH 308/538] Change protected to private[akka], see #2163 --- akka-cluster/src/main/scala/akka/cluster/Cluster.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 935df0acce..ce2e01cbca 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -792,7 +792,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * * Gossips latest gossip to an address. */ - protected def gossipTo(address: Address): Unit = { + private[akka] def gossipTo(address: Address): Unit = { val connection = clusterGossipConnectionFor(address) log.debug("Cluster Node [{}] - Gossiping to [{}]", selfAddress, connection) connection ! GossipEnvelope(self, latestGossip) @@ -814,7 +814,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * INTERNAL API */ - protected[akka] def gossipToUnreachableProbablity(membersSize: Int, unreachableSize: Int): Double = + private[akka] def gossipToUnreachableProbablity(membersSize: Int, unreachableSize: Int): Double = (membersSize + unreachableSize) match { case 0 ⇒ 0.0 case sum ⇒ unreachableSize.toDouble / sum @@ -823,7 +823,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * INTERNAL API */ - protected[akka] def gossipToDeputyProbablity(membersSize: Int, unreachableSize: Int, nrOfDeputyNodes: Int): Double = { + private[akka] def gossipToDeputyProbablity(membersSize: Int, unreachableSize: Int, nrOfDeputyNodes: Int): Double = { if (nrOfDeputyNodes > membersSize) 1.0 else if (nrOfDeputyNodes == 0) 0.0 else (membersSize + unreachableSize) match { @@ -1112,7 +1112,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * INTERNAL API */ - protected def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = + private[akka] def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = if (addresses.isEmpty) None else Some(addresses(ThreadLocalRandom.current nextInt addresses.size)) From 9ca794dcc851698a920ef6bd4cd395d1910bacbb Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 7 Jun 2012 13:32:12 +0200 Subject: [PATCH 309/538] Totally skip running multi-jvm tests when long-running is excluded, see #2194 --- .../src/test/scala/akka/cluster/ClusterSpec.scala | 2 +- project/AkkaBuild.scala | 14 ++++++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index fdc3095f74..d3d1d6d0a2 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -21,7 +21,7 @@ object ClusterSpec { } akka.actor.provider = "akka.remote.RemoteActorRefProvider" akka.remote.netty.port = 0 - akka.loglevel = DEBUG + # akka.loglevel = DEBUG """ case class GossipTo(address: Address) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 23d51fe77c..f2535d8b93 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -338,6 +338,11 @@ object AkkaBuild extends Build { // for running only tests by tag use system property: -Dakka.test.tags.only= lazy val useOnlyTestTags: Set[String] = systemPropertyAsSeq("akka.test.tags.only").toSet + def executeMultiJvmTests: Boolean = { + useOnlyTestTags.contains("long-running") || + !(useExcludeTestTags -- useIncludeTestTags).contains("long-running") + } + def systemPropertyAsSeq(name: String): Seq[String] = { val prop = System.getProperty(name, "") if (prop.isEmpty) Seq.empty else prop.split(",").toSeq @@ -402,20 +407,21 @@ object AkkaBuild extends Build { lazy val multiJvmSettings = MultiJvmPlugin.settings ++ inConfig(MultiJvm)(ScalariformPlugin.scalariformSettings) ++ Seq( compileInputs in MultiJvm <<= (compileInputs in MultiJvm) dependsOn (ScalariformKeys.format in MultiJvm), - ScalariformKeys.preferences in MultiJvm := formattingPreferences, - if (multiNodeEnabled) + ScalariformKeys.preferences in MultiJvm := formattingPreferences) ++ + (if (multiNodeEnabled) executeTests in Test <<= ((executeTests in Test), (multiNodeExecuteTests in MultiJvm)) map { case (tr, mr) => val r = tr._2 ++ mr._2 (Tests.overall(r.values), r) } - else + else if (executeMultiJvmTests) executeTests in Test <<= ((executeTests in Test), (executeTests in MultiJvm)) map { case (tr, mr) => val r = tr._2 ++ mr._2 (Tests.overall(r.values), r) } - ) + else Seq.empty) + lazy val mimaSettings = mimaDefaultSettings ++ Seq( // MiMa From 0aa81229e5ab9f408082a56d9262f9060d2def63 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 7 Jun 2012 14:12:13 +0200 Subject: [PATCH 310/538] Fix feedback, see #2194 --- project/AkkaBuild.scala | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index f2535d8b93..e4a865b5a7 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -408,20 +408,21 @@ object AkkaBuild extends Build { lazy val multiJvmSettings = MultiJvmPlugin.settings ++ inConfig(MultiJvm)(ScalariformPlugin.scalariformSettings) ++ Seq( compileInputs in MultiJvm <<= (compileInputs in MultiJvm) dependsOn (ScalariformKeys.format in MultiJvm), ScalariformKeys.preferences in MultiJvm := formattingPreferences) ++ - (if (multiNodeEnabled) - executeTests in Test <<= ((executeTests in Test), (multiNodeExecuteTests in MultiJvm)) map { - case (tr, mr) => - val r = tr._2 ++ mr._2 - (Tests.overall(r.values), r) - } - else if (executeMultiJvmTests) - executeTests in Test <<= ((executeTests in Test), (executeTests in MultiJvm)) map { - case (tr, mr) => - val r = tr._2 ++ mr._2 - (Tests.overall(r.values), r) - } - else Seq.empty) - + ((executeMultiJvmTests, multiNodeEnabled) match { + case (true, true) => + executeTests in Test <<= ((executeTests in Test), (multiNodeExecuteTests in MultiJvm)) map { + case ((_, testResults), (_, multiNodeResults)) => + val results = testResults ++ multiNodeResults + (Tests.overall(results.values), results) + } + case (true, false) => + executeTests in Test <<= ((executeTests in Test), (executeTests in MultiJvm)) map { + case ((_, testResults), (_, multiNodeResults)) => + val results = testResults ++ multiNodeResults + (Tests.overall(results.values), results) + } + case (false, _) => Seq.empty + }) lazy val mimaSettings = mimaDefaultSettings ++ Seq( // MiMa From 72f678281ed9aad6571d2483425398213ab555c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Thu, 7 Jun 2012 14:21:14 +0200 Subject: [PATCH 311/538] Fixed wrong formatting in docs --- akka-docs/cluster/cluster.rst | 7 ++++--- akka-docs/general/message-send-semantics.rst | 6 +++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/akka-docs/cluster/cluster.rst b/akka-docs/cluster/cluster.rst index fb53f13131..1368d7835f 100644 --- a/akka-docs/cluster/cluster.rst +++ b/akka-docs/cluster/cluster.rst @@ -5,8 +5,7 @@ Cluster Specification ###################### -.. note:: *This document describes the new clustering coming in Akka Coltrane and -is not available in the latest stable release)* +.. note:: *This document describes the new clustering coming in Akka Coltrane and is not available in the latest stable release)* Intro ===== @@ -304,7 +303,9 @@ node from the cluster, marking it as ``removed``. A node can also be removed forcefully by moving it directly to the ``removed`` state using the ``remove`` action. The cluster will rebalance based on the new -cluster membership. +cluster membership. This will also happen if you are shutting the system down +forcefully (through an external ``SIGKILL`` signal, ``System.exit(status)`` or +similar. If a node is unreachable then gossip convergence is not possible and therefore any ``leader`` actions are also not possible (for instance, allowing a node to diff --git a/akka-docs/general/message-send-semantics.rst b/akka-docs/general/message-send-semantics.rst index d9488d1f2b..41eb727358 100644 --- a/akka-docs/general/message-send-semantics.rst +++ b/akka-docs/general/message-send-semantics.rst @@ -48,14 +48,14 @@ At-most-once Actual transports may provide stronger semantics, but at-most-once is the semantics you should expect. -The alternatives would be once-and-only-once, which is extremely costly, +The alternatives would be once-and-only-once, which is extremely costly, or at-least-once which essentially requires idempotency of message processing, which is a user-level concern. Ordering is preserved on a per-sender basis ------------------------------------------- -Actor ``A1` sends messages ``M1``, ``M2``, ``M3`` to ``A2`` +Actor ``A1`` sends messages ``M1``, ``M2``, ``M3`` to ``A2`` Actor ``A3`` sends messages ``M4``, ``M5``, ``M6`` to ``A2`` This means that: @@ -66,4 +66,4 @@ This means that: 5) ``A2`` can see messages from ``A1`` interleaved with messages from ``A3`` 6) Since there is no guaranteed delivery, none, some or all of the messages may arrive to ``A2`` -.. _Erlang documentation: http://www.erlang.org/faq/academic.html \ No newline at end of file +.. _Erlang documentation: http://www.erlang.org/faq/academic.html From dbac17621f80d62e7cd9aa0d4adbc964ad2e82a6 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 7 Jun 2012 15:45:10 +0200 Subject: [PATCH 312/538] Node that joins again should be ignored, see #2184 --- .../src/main/scala/akka/cluster/Cluster.scala | 31 ++++++++++--------- .../scala/akka/cluster/NodeUpSpec.scala | 30 +++++++++++++++++- 2 files changed, 45 insertions(+), 16 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index ce2e01cbca..7d1222a7ab 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -571,27 +571,28 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val localState = state.get val localGossip = localState.latestGossip val localMembers = localGossip.members - val localOverview = localGossip.overview - val localUnreachableMembers = localOverview.unreachable - // remove the node from the 'unreachable' set in case it is a DOWN node that is rejoining cluster - val newUnreachableMembers = localUnreachableMembers filterNot { _.address == node } - val newOverview = localOverview copy (unreachable = newUnreachableMembers) + if (!localMembers.exists(_.address == node)) { - val newMembers = localMembers + Member(node, MemberStatus.Joining) // add joining node as Joining - val newGossip = localGossip copy (overview = newOverview, members = newMembers) + // remove the node from the 'unreachable' set in case it is a DOWN node that is rejoining cluster + val newUnreachableMembers = localGossip.overview.unreachable filterNot { _.address == node } + val newOverview = localGossip.overview copy (unreachable = newUnreachableMembers) - val versionedGossip = newGossip + vclockNode - val seenVersionedGossip = versionedGossip seen selfAddress + val newMembers = localMembers + Member(node, MemberStatus.Joining) // add joining node as Joining + val newGossip = localGossip copy (overview = newOverview, members = newMembers) - val newState = localState copy (latestGossip = seenVersionedGossip) + val versionedGossip = newGossip + vclockNode + val seenVersionedGossip = versionedGossip seen selfAddress - if (!state.compareAndSet(localState, newState)) joining(node) // recur if we failed update - else { - if (node != selfAddress) failureDetector heartbeat node + val newState = localState copy (latestGossip = seenVersionedGossip) - if (convergence(newState.latestGossip).isDefined) { - newState.memberMembershipChangeListeners foreach { _ notify newMembers } + if (!state.compareAndSet(localState, newState)) joining(node) // recur if we failed update + else { + if (node != selfAddress) failureDetector heartbeat node + + if (convergence(newState.latestGossip).isDefined) { + newState.memberMembershipChangeListeners foreach { _ notify newMembers } + } } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index eafdf2fffd..b5fc5d626b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -9,6 +9,8 @@ import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ import akka.util.duration._ +import scala.collection.immutable.SortedSet +import java.util.concurrent.atomic.AtomicReference object NodeUpMultiJvmSpec extends MultiNodeConfig { val first = role("first") @@ -33,7 +35,33 @@ abstract class NodeUpSpec awaitClusterUp(first, second) - testConductor.enter("after") + testConductor.enter("after-1") + } + + "be unaffected when joining again" taggedAs LongRunningTest in { + + val unexpected = new AtomicReference[SortedSet[Member]] + cluster.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + if (members.size != 2 || members.exists(_.status != MemberStatus.Up)) + unexpected.set(members) + } + }) + testConductor.enter("listener-registered") + + runOn(second) { + cluster.join(node(first).address) + } + testConductor.enter("joined-again") + + // let it run for a while to make sure that nothing bad happens + for (n ← 1 to 20) { + 100.millis.dilated.sleep() + unexpected.get must be(null) + cluster.latestGossip.members.forall(_.status == MemberStatus.Up) + } + + testConductor.enter("after-2") } } } From 8c9d40eb00f927353eda94ccf3dcb0dae97ef302 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 7 Jun 2012 15:56:59 +0200 Subject: [PATCH 313/538] Add missing assert --- akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index b5fc5d626b..f8d0a1f6e2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -58,7 +58,7 @@ abstract class NodeUpSpec for (n ← 1 to 20) { 100.millis.dilated.sleep() unexpected.get must be(null) - cluster.latestGossip.members.forall(_.status == MemberStatus.Up) + cluster.latestGossip.members.forall(_.status == MemberStatus.Up) must be(true) } testConductor.enter("after-2") From fcd08ed2b95a1ed438b4dcb0009695aa1b73dbc1 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 7 Jun 2012 15:14:38 +0200 Subject: [PATCH 314/538] Test normal healthy cluster, see #2195 * Fix that membership listeners should only notified when something changed --- .../src/main/scala/akka/cluster/Cluster.scala | 32 ++++---- .../scala/akka/cluster/SunnyWeatherSpec.scala | 78 +++++++++++++++++++ .../akka/remote/testkit/MultiNodeSpec.scala | 2 +- 3 files changed, 92 insertions(+), 20 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 7d1222a7ab..e2b5c8e751 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -618,13 +618,16 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ if (!state.compareAndSet(localState, newState)) leaving(address) // recur if we failed update else { - failureDetector heartbeat address // update heartbeat in failure detector - if (convergence(newState.latestGossip).isDefined) { - newState.memberMembershipChangeListeners foreach { _ notify newMembers } - } + if (address != selfAddress) failureDetector heartbeat address // update heartbeat in failure detector + notifyMembershipChangeListeners(localState, newState) } } + private def notifyMembershipChangeListeners(oldState: State, newState: State): Unit = + if (newState.latestGossip != oldState.latestGossip && convergence(newState.latestGossip).isDefined) { + newState.memberMembershipChangeListeners foreach { _ notify newState.latestGossip.members } + } + /** * State transition to EXITING. */ @@ -698,9 +701,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ if (!state.compareAndSet(localState, newState)) downing(address) // recur if we fail the update else { - if (convergence(newState.latestGossip).isDefined) { - newState.memberMembershipChangeListeners foreach { _ notify newState.latestGossip.members } - } + notifyMembershipChangeListeners(localState, newState) } } @@ -741,10 +742,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ log.debug("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, sender.address) if (sender.address != selfAddress) failureDetector heartbeat sender.address - - if (convergence(newState.latestGossip).isDefined) { - newState.memberMembershipChangeListeners foreach { _ notify newState.latestGossip.members } - } + notifyMembershipChangeListeners(localState, newState) } } @@ -841,14 +839,14 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ private[akka] def gossip(): Unit = { val localState = state.get + log.debug("Cluster Node [{}] - Initiating new round of gossip", selfAddress) + if (isSingletonCluster(localState)) { // gossip to myself // TODO could perhaps be optimized, no need to gossip to myself when Up? gossipTo(selfAddress) } else if (isAvailable(localState)) { - log.debug("Cluster Node [{}] - Initiating new round of gossip", selfAddress) - val localGossip = localState.latestGossip // important to not accidentally use `map` of the SortedSet, since the original order is not preserved val localMembers = localGossip.members.toIndexedSeq @@ -917,9 +915,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ else { log.info("Cluster Node [{}] - Marking node(s) as UNREACHABLE [{}]", selfAddress, newlyDetectedUnreachableMembers.mkString(", ")) - if (convergence(newState.latestGossip).isDefined) { - newState.memberMembershipChangeListeners foreach { _ notify newMembers } - } + notifyMembershipChangeListeners(localState, newState) } } } @@ -1040,9 +1036,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // if we won the race then update else try again if (!state.compareAndSet(localState, newState)) leaderActions() // recur else { - if (convergence(newState.latestGossip).isDefined) { - newState.memberMembershipChangeListeners foreach { _ notify newGossip.members } - } + notifyMembershipChangeListeners(localState, newState) } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala new file mode 100644 index 0000000000..b74fdd09db --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -0,0 +1,78 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ +import java.util.concurrent.atomic.AtomicReference +import scala.collection.immutable.SortedSet + +object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + val fifth = role("fifth") + + commonConfig(ConfigFactory.parseString(""" + akka.cluster { + gossip-interval = 400 ms + nr-of-deputy-nodes = 0 + } + akka.loglevel = DEBUG + """)) +} + +class SunnyWeatherMultiJvmNode1 extends SunnyWeatherSpec +class SunnyWeatherMultiJvmNode2 extends SunnyWeatherSpec +class SunnyWeatherMultiJvmNode3 extends SunnyWeatherSpec +class SunnyWeatherMultiJvmNode4 extends SunnyWeatherSpec +class SunnyWeatherMultiJvmNode5 extends SunnyWeatherSpec + +abstract class SunnyWeatherSpec + extends MultiNodeSpec(SunnyWeatherMultiJvmSpec) + with MultiNodeClusterSpec { + + import SunnyWeatherMultiJvmSpec._ + + override def initialParticipants = roles.size + + "A normal cluster" must { + "be healthy" taggedAs LongRunningTest in { + + // start some + awaitClusterUp(first, second, third) + runOn(first, second, third) { + log.info("3 joined") + } + + // add a few more + awaitClusterUp(first, second, third, fourth, fifth) + log.info("5 joined") + + val unexpected = new AtomicReference[SortedSet[Member]] + cluster.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + // we don't expected any changes to the cluster + unexpected.set(members) + } + }) + + for (n ← 1 to 40) { + testConductor.enter("period-" + n) + unexpected.get must be(null) + awaitUpConvergence(roles.size) + assertLeaderIn(roles) + if (n % 5 == 0) log.info("Passed period [{}]", n) + 1.seconds.sleep + } + + testConductor.enter("after") + } + } +} diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 01a08da718..a0d7d5eac4 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -84,7 +84,7 @@ abstract class MultiNodeConfig { private[testkit] def deployments(node: RoleName): Seq[String] = (_deployments get node getOrElse Nil) ++ _allDeploy - private[testkit] def roles: Seq[RoleName] = _roles + def roles: Seq[RoleName] = _roles } From 2cbc04a5abf8ea3731d09a765e67595514f91629 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 7 Jun 2012 18:38:24 +0200 Subject: [PATCH 315/538] #2196 - Updating Logback dep to 1.0.0 --- project/AkkaBuild.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index e4a865b5a7..b59bec9d42 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -484,7 +484,7 @@ object Dependency { object V { val Camel = "2.8.0" - val Logback = "0.9.28" + val Logback = "1.0.0" val Netty = "3.3.0.Final" val Protobuf = "2.4.1" val ScalaStm = "0.5" From f6365e83e74d743a9e4a223d297cdb3c1dbdeb64 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 7 Jun 2012 20:40:14 +0200 Subject: [PATCH 316/538] Change to logback 1.0.4, see #2198 --- akka-docs/java/logging.rst | 2 +- akka-docs/scala/logging.rst | 2 +- project/AkkaBuild.scala | 8 +------- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/akka-docs/java/logging.rst b/akka-docs/java/logging.rst index 03de58de5b..647525ba76 100644 --- a/akka-docs/java/logging.rst +++ b/akka-docs/java/logging.rst @@ -187,7 +187,7 @@ It has one single dependency; the slf4j-api jar. In runtime you also need a SLF4 ch.qos.logback logback-classic - 1.0.0 + 1.0.4 runtime diff --git a/akka-docs/scala/logging.rst b/akka-docs/scala/logging.rst index 4ea96722e5..8f765b4f7e 100644 --- a/akka-docs/scala/logging.rst +++ b/akka-docs/scala/logging.rst @@ -230,7 +230,7 @@ It has one single dependency; the slf4j-api jar. In runtime you also need a SLF4 .. code-block:: scala - lazy val logback = "ch.qos.logback" % "logback-classic" % "1.0.0" % "runtime" + lazy val logback = "ch.qos.logback" % "logback-classic" % "1.0.4" % "runtime" You need to enable the Slf4jEventHandler in the 'event-handlers' element in diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index b59bec9d42..736927e7c2 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -484,7 +484,7 @@ object Dependency { object V { val Camel = "2.8.0" - val Logback = "1.0.0" + val Logback = "1.0.4" val Netty = "3.3.0.Final" val Protobuf = "2.4.1" val ScalaStm = "0.5" @@ -501,12 +501,6 @@ object Dependency { val slf4jApi = "org.slf4j" % "slf4j-api" % V.Slf4j // MIT val zeroMQ = "org.zeromq" % "zeromq-scala-binding_2.9.1" % "0.0.6" // ApacheV2 - // Runtime - - object Runtime { - val logback = "ch.qos.logback" % "logback-classic" % V.Logback % "runtime" // MIT - } - // Test object Test { From 1b68ea7c9db76adf66887fd803f0fdabdf9d9cc7 Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 7 Jun 2012 15:19:28 +0200 Subject: [PATCH 317/538] document processAllSystemMessages semantics --- .../src/main/scala/akka/dispatch/Mailbox.scala | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index 11e58ede7e..b6af478ac7 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -208,6 +208,13 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes } } + /** + * Will at least try to process all queued system messages: in case of + * failure simply drop and go on to the next, because there is nothing to + * restart here (failure is in ActorCell somewhere …). In case the mailbox + * becomes closed (because of processing a Terminate message), dump all + * already dequeued message to deadLetters. + */ final def processAllSystemMessages() { var failure: Throwable = null var nextMessage = systemDrain(null) @@ -216,8 +223,9 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes nextMessage = nextMessage.next msg.next = null if (debug) println(actor.self + " processing system message " + msg + " with " + actor.childrenRefs) - try actor systemInvoke msg - catch { + try { + actor systemInvoke msg + } catch { case NonFatal(e) ⇒ if (failure eq null) failure = e actor.system.eventStream.publish(Error(e, actor.self.path.toString, this.getClass, "exception during processing system message " + msg + ": " + e.getMessage)) From d4070d36ab7cc1fbfe9fb707e780ef0ab1f1c228 Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 7 Jun 2012 23:56:04 +0200 Subject: [PATCH 318/538] remove LocalActorRef.systemService (residue, very old) --- akka-actor/src/main/scala/akka/actor/ActorRef.scala | 3 +-- akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala | 4 ++-- akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala | 3 +-- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 861df570b7..0620a73a28 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -227,8 +227,7 @@ private[akka] class LocalActorRef private[akka] ( _system: ActorSystemImpl, _props: Props, _supervisor: InternalActorRef, - override val path: ActorPath, - val systemService: Boolean = false) + override val path: ActorPath) extends InternalActorRef with LocalRef { /* diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 960e8a37e5..4c200b204c 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -473,7 +473,7 @@ class LocalActorRefProvider( private val guardianProps = Props(new Guardian) lazy val rootGuardian: InternalActorRef = - new LocalActorRef(system, guardianProps, theOneWhoWalksTheBubblesOfSpaceTime, rootPath, true) { + new LocalActorRef(system, guardianProps, theOneWhoWalksTheBubblesOfSpaceTime, rootPath) { override def getParent: InternalActorRef = this override def getSingleChild(name: String): InternalActorRef = name match { case "temp" ⇒ tempContainer @@ -541,7 +541,7 @@ class LocalActorRefProvider( def actorOf(system: ActorSystemImpl, props: Props, supervisor: InternalActorRef, path: ActorPath, systemService: Boolean, deploy: Option[Deploy], lookupDeploy: Boolean): InternalActorRef = { props.routerConfig match { - case NoRouter ⇒ new LocalActorRef(system, props, supervisor, path, systemService) // create a local actor + case NoRouter ⇒ new LocalActorRef(system, props, supervisor, path) // create a local actor case router ⇒ val lookup = if (lookupDeploy) deployer.lookup(path) else None val fromProps = Iterator(props.deploy.copy(routerConfig = props.deploy.routerConfig withFallback router)) diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index c0442d45d6..ed151b6b12 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -31,8 +31,7 @@ class TestActorRef[T <: Actor]( if (_props.dispatcher == Dispatchers.DefaultDispatcherId) CallingThreadDispatcher.Id else _props.dispatcher), _supervisor, - _supervisor.path / name, - false) { + _supervisor.path / name) { import TestActorRef.InternalGetActor From df89184b30c5eedd17ee1c131322f6677accabc6 Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 5 Jun 2012 19:01:12 +0200 Subject: [PATCH 319/538] make system.actorOf non-blocking (*), see #2031 (*) that actually depends on whether provider.actorOf blocks, which currently MAY happen when actor creation triggers a remote connection - properly CAS childrenRefs within ActorCell, making it safe to update from the outside - add reserve/unreserve to ChildrenContainer and move uniqueness check there - when creating, first reserve, then add the actor; unreserve if provider.actorOf did fail --- .../java/akka/actor/AbstractActorCell.java | 2 + .../src/main/scala/akka/actor/ActorCell.scala | 133 ++++++++++++++---- .../scala/akka/actor/ActorRefProvider.scala | 4 +- .../main/scala/akka/actor/ActorSystem.scala | 18 ++- 4 files changed, 120 insertions(+), 37 deletions(-) diff --git a/akka-actor/src/main/java/akka/actor/AbstractActorCell.java b/akka-actor/src/main/java/akka/actor/AbstractActorCell.java index d6005f463c..8384e67af0 100644 --- a/akka-actor/src/main/java/akka/actor/AbstractActorCell.java +++ b/akka-actor/src/main/java/akka/actor/AbstractActorCell.java @@ -8,10 +8,12 @@ import akka.util.Unsafe; final class AbstractActorCell { final static long mailboxOffset; + final static long childrenOffset; static { try { mailboxOffset = Unsafe.instance.objectFieldOffset(ActorCell.class.getDeclaredField("_mailboxDoNotCallMeDirectly")); + childrenOffset = Unsafe.instance.objectFieldOffset(ActorCell.class.getDeclaredField("_childrenRefsDoNotCallMeDirectly")); } catch(Throwable t){ throw new ExceptionInInitializerError(t); } diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 9dbe610195..b0f8063160 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -201,18 +201,22 @@ private[akka] object ActorCell { def children: Iterable[ActorRef] def stats: Iterable[ChildRestartStats] def shallDie(actor: ActorRef): ChildrenContainer + def reserve(name: String): ChildrenContainer + def unreserve(name: String): ChildrenContainer } trait EmptyChildrenContainer extends ChildrenContainer { val emptyStats = TreeMap.empty[String, ChildRestartStats] def add(child: ActorRef): ChildrenContainer = - new NormalChildrenContainer(emptyStats.updated(child.path.name, ChildRestartStats(child))) + new NormalChildrenContainer(emptyStats.updated(child.path.name, ChildRestartStats(child)), Set()) def remove(child: ActorRef): ChildrenContainer = this def getByName(name: String): Option[ChildRestartStats] = None def getByRef(actor: ActorRef): Option[ChildRestartStats] = None def children: Iterable[ActorRef] = Nil def stats: Iterable[ChildRestartStats] = Nil def shallDie(actor: ActorRef): ChildrenContainer = this + def reserve(name: String): ChildrenContainer = new NormalChildrenContainer(emptyStats, Set(name)) + def unreserve(name: String): ChildrenContainer = this override def toString = "no children" } @@ -228,6 +232,7 @@ private[akka] object ActorCell { */ object TerminatedChildrenContainer extends EmptyChildrenContainer { override def add(child: ActorRef): ChildrenContainer = this + override def reserve(name: String): ChildrenContainer = this } /** @@ -236,11 +241,12 @@ private[akka] object ActorCell { * calling context.stop(child) and processing the ChildTerminated() system * message). */ - class NormalChildrenContainer(c: TreeMap[String, ChildRestartStats]) extends ChildrenContainer { + class NormalChildrenContainer(c: TreeMap[String, ChildRestartStats], reservedNames: Set[String]) extends ChildrenContainer { - def add(child: ActorRef): ChildrenContainer = new NormalChildrenContainer(c.updated(child.path.name, ChildRestartStats(child))) + def add(child: ActorRef): ChildrenContainer = + new NormalChildrenContainer(c.updated(child.path.name, ChildRestartStats(child)), reservedNames - child.path.name) - def remove(child: ActorRef): ChildrenContainer = NormalChildrenContainer(c - child.path.name) + def remove(child: ActorRef): ChildrenContainer = NormalChildrenContainer(c - child.path.name, reservedNames) def getByName(name: String): Option[ChildRestartStats] = c get name @@ -253,7 +259,14 @@ private[akka] object ActorCell { def stats: Iterable[ChildRestartStats] = c.values - def shallDie(actor: ActorRef): ChildrenContainer = TerminatingChildrenContainer(c, Set(actor), UserRequest) + def shallDie(actor: ActorRef): ChildrenContainer = TerminatingChildrenContainer(c, Set(actor), UserRequest, reservedNames) + + def reserve(name: String): ChildrenContainer = + if ((c contains name) || (reservedNames contains name)) + throw new InvalidActorNameException("actor name " + name + " is not unique!") + else new NormalChildrenContainer(c, reservedNames + name) + + def unreserve(name: String): ChildrenContainer = NormalChildrenContainer(c, reservedNames - name) override def toString = if (c.size > 20) c.size + " children" @@ -261,9 +274,9 @@ private[akka] object ActorCell { } object NormalChildrenContainer { - def apply(c: TreeMap[String, ChildRestartStats]): ChildrenContainer = - if (c.isEmpty) EmptyChildrenContainer - else new NormalChildrenContainer(c) + def apply(c: TreeMap[String, ChildRestartStats], reservedName: Set[String]): ChildrenContainer = + if (c.isEmpty && reservedName.isEmpty) EmptyChildrenContainer + else new NormalChildrenContainer(c, reservedName) } /** @@ -276,16 +289,21 @@ private[akka] object ActorCell { * type of container, depending on whether or not children are left and whether or not * the reason was “Terminating”. */ - case class TerminatingChildrenContainer(c: TreeMap[String, ChildRestartStats], toDie: Set[ActorRef], reason: SuspendReason) + case class TerminatingChildrenContainer( + c: TreeMap[String, ChildRestartStats], + toDie: Set[ActorRef], + reason: SuspendReason, + reservedNames: Set[String]) extends ChildrenContainer { - def add(child: ActorRef): ChildrenContainer = copy(c.updated(child.path.name, ChildRestartStats(child))) + def add(child: ActorRef): ChildrenContainer = + copy(c.updated(child.path.name, ChildRestartStats(child)), reservedNames = reservedNames - child.path.name) def remove(child: ActorRef): ChildrenContainer = { val t = toDie - child if (t.isEmpty) reason match { case Termination ⇒ TerminatedChildrenContainer - case _ ⇒ NormalChildrenContainer(c - child.path.name) + case _ ⇒ NormalChildrenContainer(c - child.path.name, reservedNames) } else copy(c - child.path.name, t) } @@ -303,6 +321,13 @@ private[akka] object ActorCell { def shallDie(actor: ActorRef): ChildrenContainer = copy(toDie = toDie + actor) + def reserve(name: String): ChildrenContainer = + if ((c contains name) || (reservedNames contains name)) + throw new InvalidActorNameException("actor name " + name + " is not unique!") + else copy(reservedNames = reservedNames + name) + + def unreserve(name: String): ChildrenContainer = copy(reservedNames = reservedNames - name) + override def toString = if (c.size > 20) c.size + " children" else c.mkString("children (" + toDie.size + " terminating):\n ", "\n ", "\n") + toDie @@ -317,7 +342,8 @@ private[akka] class ActorCell( val self: InternalActorRef, val props: Props, @volatile var parent: InternalActorRef) extends UntypedActorContext { - import AbstractActorCell.mailboxOffset + + import AbstractActorCell.{ mailboxOffset, childrenOffset } import ActorCell._ final def systemImpl = system @@ -353,15 +379,20 @@ private[akka] class ActorCell( var receiveTimeoutData: (Long, Cancellable) = emptyReceiveTimeoutData @volatile - var childrenRefs: ChildrenContainer = EmptyChildrenContainer + private var _childrenRefsDoNotCallMeDirectly: ChildrenContainer = EmptyChildrenContainer + + def childrenRefs: ChildrenContainer = Unsafe.instance.getObjectVolatile(this, childrenOffset).asInstanceOf[ChildrenContainer] + + private def swapChildrenRefs(oldChildren: ChildrenContainer, newChildren: ChildrenContainer): Boolean = + Unsafe.instance.compareAndSwapObject(this, childrenOffset, oldChildren, newChildren) private def isTerminating = childrenRefs match { - case TerminatingChildrenContainer(_, _, Termination) ⇒ true + case TerminatingChildrenContainer(_, _, Termination, _) ⇒ true case TerminatedChildrenContainer ⇒ true case _ ⇒ false } private def isNormal = childrenRefs match { - case TerminatingChildrenContainer(_, _, Termination | _: Recreation) ⇒ false + case TerminatingChildrenContainer(_, _, Termination | _: Recreation, _) ⇒ false case _ ⇒ true } @@ -379,8 +410,27 @@ private[akka] class ActorCell( // in case we are currently terminating, swallow creation requests and return EmptyLocalActorRef if (isTerminating) provider.actorFor(self, Seq(name)) else { - val actor = provider.actorOf(systemImpl, props, self, self.path / name, false, None, true) - childrenRefs = childrenRefs.add(actor) + @tailrec def reserve(name: String): Boolean = { + val c = childrenRefs + swapChildrenRefs(c, c.reserve(name)) || reserve(name) + } + reserve(name) + val actor = + try provider.actorOf(systemImpl, props, self, self.path / name, false, None, true) + catch { + case NonFatal(e) ⇒ + @tailrec def unreserve(name: String): Boolean = { + val c = childrenRefs + swapChildrenRefs(c, c.unreserve(name)) || unreserve(name) + } + unreserve(name) + throw e + } + @tailrec def add(ref: ActorRef): Boolean = { + val c = childrenRefs + swapChildrenRefs(c, c.add(ref)) || add(ref) + } + add(actor) actor } } @@ -395,14 +445,15 @@ private[akka] class ActorCell( case ElementRegex() ⇒ // this is fine case _ ⇒ throw new InvalidActorNameException("illegal actor name '" + name + "', must conform to " + ElementRegex) } - childrenRefs.getByName(name) match { - case None ⇒ _actorOf(props, name) - case _ ⇒ throw new InvalidActorNameException("actor name " + name + " is not unique!") - } + _actorOf(props, name) } final def stop(actor: ActorRef): Unit = { - if (childrenRefs.getByRef(actor).isDefined) childrenRefs = childrenRefs.shallDie(actor) + @tailrec def shallDie(ref: ActorRef): Boolean = { + val c = childrenRefs + swapChildrenRefs(c, c.shallDie(ref)) || shallDie(ref) + } + if (childrenRefs.getByRef(actor).isDefined) shallDie(actor) actor.asInstanceOf[InternalActorRef].stop() } @@ -568,7 +619,13 @@ private[akka] class ActorCell( } childrenRefs match { case ct: TerminatingChildrenContainer ⇒ - childrenRefs = ct.copy(reason = Recreation(cause)) + @tailrec def rec(cause: Throwable): Boolean = { + childrenRefs match { + case c: TerminatingChildrenContainer ⇒ swapChildrenRefs(c, c.copy(reason = Recreation(cause))) || rec(cause) + case _ ⇒ true // cannot happen + } + } + rec(cause) dispatcher suspend this case _ ⇒ doRecreate(cause, failedActor) @@ -626,7 +683,13 @@ private[akka] class ActorCell( childrenRefs match { case ct: TerminatingChildrenContainer ⇒ - childrenRefs = ct.copy(reason = Termination) + @tailrec def rec(): Boolean = { + childrenRefs match { + case c: TerminatingChildrenContainer ⇒ swapChildrenRefs(c, c.copy(reason = Termination)) || rec() + case _ ⇒ true // cannot happen + } + } + rec() // do not process normal messages while waiting for all children to terminate dispatcher suspend this if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopping")) @@ -635,7 +698,13 @@ private[akka] class ActorCell( } def supervise(child: ActorRef): Unit = if (!isTerminating) { - if (childrenRefs.getByRef(child).isEmpty) childrenRefs = childrenRefs.add(child) + if (childrenRefs.getByRef(child).isEmpty) { + @tailrec def add(ref: ActorRef): Boolean = { + val c = childrenRefs + swapChildrenRefs(c, c.add(ref)) || add(ref) + } + add(child) + } if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now supervising " + child)) } @@ -650,6 +719,7 @@ private[akka] class ActorCell( case Terminate() ⇒ terminate() case Supervise(child) ⇒ supervise(child) case ChildTerminated(child) ⇒ handleChildTerminated(child) + case NoMessage ⇒ // only here to suppress warning } } catch { case e @ (_: InterruptedException | NonFatal(_)) ⇒ handleInvokeFailure(e, "error while processing " + message) @@ -797,10 +867,15 @@ private[akka] class ActorCell( } final def handleChildTerminated(child: ActorRef): Unit = try { + @tailrec def remove(ref: ActorRef): ChildrenContainer = { + val c = childrenRefs + val n = c.remove(ref) + if (swapChildrenRefs(c, n)) n + else remove(ref) + } childrenRefs match { - case tc @ TerminatingChildrenContainer(_, _, reason) ⇒ - val n = tc.remove(child) - childrenRefs = n + case tc @ TerminatingChildrenContainer(_, _, reason, _) ⇒ + val n = remove(child) actor.supervisorStrategy.handleChildTerminated(this, child, children) if (!n.isInstanceOf[TerminatingChildrenContainer]) reason match { case Recreation(cause) ⇒ doRecreate(cause, actor) // doRecreate since this is the continuation of "recreate" @@ -808,7 +883,7 @@ private[akka] class ActorCell( case _ ⇒ } case _ ⇒ - childrenRefs = childrenRefs.remove(child) + remove(child) actor.supervisorStrategy.handleChildTerminated(this, child, children) } } catch { diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 4c200b204c..fd2ce85012 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -482,10 +482,10 @@ class LocalActorRefProvider( } lazy val guardian: InternalActorRef = - actorOf(system, guardianProps, rootGuardian, rootPath / "user", true, None, false) + actorOf(system, guardianProps, rootGuardian, rootPath / "user", systemService = true, None, false) lazy val systemGuardian: InternalActorRef = - actorOf(system, guardianProps.withCreator(new SystemGuardian), rootGuardian, rootPath / "system", true, None, false) + actorOf(system, guardianProps.withCreator(new SystemGuardian), rootGuardian, rootPath / "system", systemService = true, None, false) lazy val tempContainer = new VirtualPathContainer(system.provider, tempNode, rootGuardian, log) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 721375adda..da8a6c4734 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -481,18 +481,24 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, protected def systemImpl: ActorSystemImpl = this private[akka] def systemActorOf(props: Props, name: String): ActorRef = { - implicit val timeout = settings.CreationTimeout - Await.result((systemGuardian ? CreateChild(props, name)).mapTo[ActorRef], timeout.duration) + systemGuardian match { + case g: LocalActorRef ⇒ g.underlying.actorOf(props, name) + case s ⇒ throw new UnsupportedOperationException("unknown systemGuardian type " + s.getClass) + } } def actorOf(props: Props, name: String): ActorRef = { - implicit val timeout = settings.CreationTimeout - Await.result((guardian ? CreateChild(props, name)).mapTo[ActorRef], timeout.duration) + guardian match { + case g: LocalActorRef ⇒ g.underlying.actorOf(props, name) + case s ⇒ throw new UnsupportedOperationException("unknown guardian type " + s.getClass) + } } def actorOf(props: Props): ActorRef = { - implicit val timeout = settings.CreationTimeout - Await.result((guardian ? CreateRandomNameChild(props)).mapTo[ActorRef], timeout.duration) + guardian match { + case g: LocalActorRef ⇒ g.underlying.actorOf(props) + case s ⇒ throw new UnsupportedOperationException("unknown guardian type " + s.getClass) + } } def stop(actor: ActorRef): Unit = { From 4f862c4328da8145ec72fbdf5b5bf223d1f01bc2 Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 7 Jun 2012 10:32:14 +0200 Subject: [PATCH 320/538] add test for waves of top-level actors --- .../scala/akka/actor/ActorSystemSpec.scala | 44 ++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala index 7ae79fea34..8ffdef1233 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala @@ -10,6 +10,9 @@ import akka.dispatch.Await import akka.util.duration._ import scala.collection.JavaConverters import java.util.concurrent.{ TimeUnit, RejectedExecutionException, CountDownLatch, ConcurrentLinkedQueue } +import akka.pattern.ask +import akka.util.Timeout +import akka.dispatch.Future class JavaExtensionSpec extends JavaExtension with JUnitSuite @@ -21,6 +24,38 @@ object TestExtension extends ExtensionId[TestExtension] with ExtensionIdProvider // Dont't place inside ActorSystemSpec object, since it will not be garbage collected and reference to system remains class TestExtension(val system: ExtendedActorSystem) extends Extension +object ActorSystemSpec { + + class Waves extends Actor { + var master: ActorRef = _ + + def receive = { + case n: Int ⇒ + master = sender + for (i ← 1 to n) context.watch(context.system.actorOf(Props[Terminater])) ! "run" + case Terminated(child) if context.actorFor(child.path.parent) == self ⇒ + if (context.children.isEmpty) { + master ! "done" + context stop self + } + } + + override def preRestart(cause: Throwable, msg: Option[Any]) { + if (master ne null) { + master ! "failed with " + cause + " while processing " + msg + } + context stop self + } + } + + class Terminater extends Actor { + def receive = { + case "run" ⇒ context.stop(self) + } + } + +} + @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class ActorSystemSpec extends AkkaSpec("""akka.extensions = ["akka.actor.TestExtension$"]""") { @@ -112,6 +147,13 @@ class ActorSystemSpec extends AkkaSpec("""akka.extensions = ["akka.actor.TestExt }.getMessage must be("Must be called prior to system shutdown.") } + "reliably create waves of actors" in { + import system.dispatcher + implicit val timeout = Timeout(30 seconds) + val waves = for (i ← 1 to 3) yield system.actorOf(Props[ActorSystemSpec.Waves]) ? 50000 + Await.result(Future.sequence(waves), timeout.duration + 5.seconds) must be === Seq("done", "done", "done") + } + } -} \ No newline at end of file +} From dd596a20cbba52a4080a259089fb0b01b8c87ccc Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 7 Jun 2012 10:45:14 +0200 Subject: [PATCH 321/538] fold reservedNames into normal ChildStats trees inside ChildrenContainers --- .../scala/akka/actor/ActorSystemSpec.scala | 2 +- .../src/main/scala/akka/actor/ActorCell.scala | 99 ++++++++++--------- .../scala/akka/actor/ActorRefProvider.scala | 5 - .../main/scala/akka/actor/FaultHandling.scala | 13 ++- 4 files changed, 64 insertions(+), 55 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala index 8ffdef1233..33a41c25c8 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala @@ -39,7 +39,7 @@ object ActorSystemSpec { context stop self } } - + override def preRestart(cause: Throwable, msg: Option[Any]) { if (master ne null) { master ! "failed with " + cause + " while processing " + msg diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index b0f8063160..acebede751 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -15,6 +15,7 @@ import akka.serialization.SerializationExtension import akka.event.Logging.LogEventException import collection.immutable.{ TreeSet, Stack, TreeMap } import akka.util.{ Unsafe, Duration, Helpers, NonFatal } +import java.util.concurrent.atomic.AtomicLong //TODO: everything here for current compatibility - could be limited more @@ -206,16 +207,16 @@ private[akka] object ActorCell { } trait EmptyChildrenContainer extends ChildrenContainer { - val emptyStats = TreeMap.empty[String, ChildRestartStats] + val emptyStats = TreeMap.empty[String, ChildStats] def add(child: ActorRef): ChildrenContainer = - new NormalChildrenContainer(emptyStats.updated(child.path.name, ChildRestartStats(child)), Set()) + new NormalChildrenContainer(emptyStats.updated(child.path.name, ChildRestartStats(child))) def remove(child: ActorRef): ChildrenContainer = this def getByName(name: String): Option[ChildRestartStats] = None def getByRef(actor: ActorRef): Option[ChildRestartStats] = None def children: Iterable[ActorRef] = Nil def stats: Iterable[ChildRestartStats] = Nil def shallDie(actor: ActorRef): ChildrenContainer = this - def reserve(name: String): ChildrenContainer = new NormalChildrenContainer(emptyStats, Set(name)) + def reserve(name: String): ChildrenContainer = new NormalChildrenContainer(emptyStats.updated(name, ChildNameReserved)) def unreserve(name: String): ChildrenContainer = this override def toString = "no children" } @@ -241,32 +242,38 @@ private[akka] object ActorCell { * calling context.stop(child) and processing the ChildTerminated() system * message). */ - class NormalChildrenContainer(c: TreeMap[String, ChildRestartStats], reservedNames: Set[String]) extends ChildrenContainer { + class NormalChildrenContainer(c: TreeMap[String, ChildStats]) extends ChildrenContainer { def add(child: ActorRef): ChildrenContainer = - new NormalChildrenContainer(c.updated(child.path.name, ChildRestartStats(child)), reservedNames - child.path.name) + new NormalChildrenContainer(c.updated(child.path.name, ChildRestartStats(child))) - def remove(child: ActorRef): ChildrenContainer = NormalChildrenContainer(c - child.path.name, reservedNames) + def remove(child: ActorRef): ChildrenContainer = NormalChildrenContainer(c - child.path.name) - def getByName(name: String): Option[ChildRestartStats] = c get name - - def getByRef(actor: ActorRef): Option[ChildRestartStats] = c get actor.path.name match { - case c @ Some(crs) if (crs.child == actor) ⇒ c - case _ ⇒ None + def getByName(name: String): Option[ChildRestartStats] = c get name match { + case s @ Some(_: ChildRestartStats) ⇒ s.asInstanceOf[Option[ChildRestartStats]] + case _ ⇒ None } - def children: Iterable[ActorRef] = c.values.view.map(_.child) + def getByRef(actor: ActorRef): Option[ChildRestartStats] = c get actor.path.name match { + case c @ Some(crs: ChildRestartStats) if (crs.child == actor) ⇒ c.asInstanceOf[Option[ChildRestartStats]] + case _ ⇒ None + } - def stats: Iterable[ChildRestartStats] = c.values + def children: Iterable[ActorRef] = c.values.view.collect { case ChildRestartStats(child, _, _) ⇒ child } - def shallDie(actor: ActorRef): ChildrenContainer = TerminatingChildrenContainer(c, Set(actor), UserRequest, reservedNames) + def stats: Iterable[ChildRestartStats] = c.values.collect { case c: ChildRestartStats ⇒ c } + + def shallDie(actor: ActorRef): ChildrenContainer = TerminatingChildrenContainer(c, Set(actor), UserRequest) def reserve(name: String): ChildrenContainer = - if ((c contains name) || (reservedNames contains name)) + if (c contains name) throw new InvalidActorNameException("actor name " + name + " is not unique!") - else new NormalChildrenContainer(c, reservedNames + name) + else new NormalChildrenContainer(c.updated(name, ChildNameReserved)) - def unreserve(name: String): ChildrenContainer = NormalChildrenContainer(c, reservedNames - name) + def unreserve(name: String): ChildrenContainer = c get name match { + case Some(ChildNameReserved) ⇒ NormalChildrenContainer(c - name) + case _ ⇒ this + } override def toString = if (c.size > 20) c.size + " children" @@ -274,9 +281,9 @@ private[akka] object ActorCell { } object NormalChildrenContainer { - def apply(c: TreeMap[String, ChildRestartStats], reservedName: Set[String]): ChildrenContainer = - if (c.isEmpty && reservedName.isEmpty) EmptyChildrenContainer - else new NormalChildrenContainer(c, reservedName) + def apply(c: TreeMap[String, ChildStats]): ChildrenContainer = + if (c.isEmpty) EmptyChildrenContainer + else new NormalChildrenContainer(c) } /** @@ -289,44 +296,45 @@ private[akka] object ActorCell { * type of container, depending on whether or not children are left and whether or not * the reason was “Terminating”. */ - case class TerminatingChildrenContainer( - c: TreeMap[String, ChildRestartStats], - toDie: Set[ActorRef], - reason: SuspendReason, - reservedNames: Set[String]) + case class TerminatingChildrenContainer(c: TreeMap[String, ChildStats], toDie: Set[ActorRef], reason: SuspendReason) extends ChildrenContainer { - def add(child: ActorRef): ChildrenContainer = - copy(c.updated(child.path.name, ChildRestartStats(child)), reservedNames = reservedNames - child.path.name) + def add(child: ActorRef): ChildrenContainer = copy(c.updated(child.path.name, ChildRestartStats(child))) def remove(child: ActorRef): ChildrenContainer = { val t = toDie - child if (t.isEmpty) reason match { case Termination ⇒ TerminatedChildrenContainer - case _ ⇒ NormalChildrenContainer(c - child.path.name, reservedNames) + case _ ⇒ NormalChildrenContainer(c - child.path.name) } else copy(c - child.path.name, t) } - def getByName(name: String): Option[ChildRestartStats] = c get name - - def getByRef(actor: ActorRef): Option[ChildRestartStats] = c get actor.path.name match { - case c @ Some(crs) if (crs.child == actor) ⇒ c - case _ ⇒ None + def getByName(name: String): Option[ChildRestartStats] = c get name match { + case s @ Some(_: ChildRestartStats) ⇒ s.asInstanceOf[Option[ChildRestartStats]] + case _ ⇒ None } - def children: Iterable[ActorRef] = c.values.view.map(_.child) + def getByRef(actor: ActorRef): Option[ChildRestartStats] = c get actor.path.name match { + case c @ Some(crs: ChildRestartStats) if (crs.child == actor) ⇒ c.asInstanceOf[Option[ChildRestartStats]] + case _ ⇒ None + } - def stats: Iterable[ChildRestartStats] = c.values + def children: Iterable[ActorRef] = c.values.view.collect { case ChildRestartStats(child, _, _) ⇒ child } + + def stats: Iterable[ChildRestartStats] = c.values.collect { case c: ChildRestartStats ⇒ c } def shallDie(actor: ActorRef): ChildrenContainer = copy(toDie = toDie + actor) def reserve(name: String): ChildrenContainer = - if ((c contains name) || (reservedNames contains name)) + if (c contains name) throw new InvalidActorNameException("actor name " + name + " is not unique!") - else copy(reservedNames = reservedNames + name) + else copy(c = c.updated(name, ChildNameReserved)) - def unreserve(name: String): ChildrenContainer = copy(reservedNames = reservedNames - name) + def unreserve(name: String): ChildrenContainer = c get name match { + case Some(ChildNameReserved) ⇒ copy(c = c - name) + case _ ⇒ this + } override def toString = if (c.size > 20) c.size + " children" @@ -387,12 +395,12 @@ private[akka] class ActorCell( Unsafe.instance.compareAndSwapObject(this, childrenOffset, oldChildren, newChildren) private def isTerminating = childrenRefs match { - case TerminatingChildrenContainer(_, _, Termination, _) ⇒ true + case TerminatingChildrenContainer(_, _, Termination) ⇒ true case TerminatedChildrenContainer ⇒ true case _ ⇒ false } private def isNormal = childrenRefs match { - case TerminatingChildrenContainer(_, _, Termination | _: Recreation, _) ⇒ false + case TerminatingChildrenContainer(_, _, Termination | _: Recreation) ⇒ false case _ ⇒ true } @@ -461,16 +469,11 @@ private[akka] class ActorCell( var actor: Actor = _ private var behaviorStack: Stack[Actor.Receive] = Stack.empty @volatile var _mailboxDoNotCallMeDirectly: Mailbox = _ //This must be volatile since it isn't protected by the mailbox status - var nextNameSequence: Long = 0 var watching: Set[ActorRef] = emptyActorRefSet var watchedBy: Set[ActorRef] = emptyActorRefSet - //Not thread safe, so should only be used inside the actor that inhabits this ActorCell - final protected def randomName(): String = { - val n = nextNameSequence - nextNameSequence = n + 1 - Helpers.base64(n) - } + val nextNameSequence = new AtomicLong + final protected def randomName(): String = Helpers.base64(nextNameSequence.getAndIncrement()) @inline final val dispatcher: MessageDispatcher = system.dispatchers.lookup(props.dispatcher) @@ -874,7 +877,7 @@ private[akka] class ActorCell( else remove(ref) } childrenRefs match { - case tc @ TerminatingChildrenContainer(_, _, reason, _) ⇒ + case tc @ TerminatingChildrenContainer(_, _, reason) ⇒ val n = remove(child) actor.supervisorStrategy.handleChildTerminated(this, child, children) if (!n.isInstanceOf[TerminatingChildrenContainer]) reason match { diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index fd2ce85012..64ffe6d39d 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -553,8 +553,3 @@ class LocalActorRefProvider( def getExternalAddressFor(addr: Address): Option[Address] = if (addr == rootPath.address) Some(addr) else None } -private[akka] class GuardianCell(_system: ActorSystemImpl, _self: InternalActorRef, _props: Props, _parent: InternalActorRef) - extends ActorCell(_system, _self, _props, _parent) { - -} - diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index 27a9f346db..76eed2eca9 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -9,11 +9,22 @@ import scala.collection.JavaConversions._ import java.lang.{ Iterable ⇒ JIterable } import akka.util.Duration +/** + * INTERNAL API + */ +private[akka] sealed trait ChildStats + +/** + * INTERNAL API + */ +private[akka] case object ChildNameReserved extends ChildStats + /** * ChildRestartStats is the statistics kept by every parent Actor for every child Actor * and is used for SupervisorStrategies to know how to deal with problems that occur for the children. */ -case class ChildRestartStats(val child: ActorRef, var maxNrOfRetriesCount: Int = 0, var restartTimeWindowStartNanos: Long = 0L) { +case class ChildRestartStats(val child: ActorRef, var maxNrOfRetriesCount: Int = 0, var restartTimeWindowStartNanos: Long = 0L) + extends ChildStats { //FIXME How about making ChildRestartStats immutable and then move these methods into the actual supervisor strategies? def requestRestartPermission(retriesWindow: (Option[Int], Option[Int])): Boolean = From 8cd11550fad39ce361f418a76f8379e8ca14b18a Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 7 Jun 2012 15:10:19 +0200 Subject: [PATCH 322/538] ActorCell: move out and reuse children updaters, add stress test --- .../scala/akka/actor/ActorSystemSpec.scala | 32 ++++- .../src/main/scala/akka/actor/ActorCell.scala | 111 +++++++++--------- 2 files changed, 86 insertions(+), 57 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala index 33a41c25c8..b9540fbf33 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala @@ -28,13 +28,19 @@ object ActorSystemSpec { class Waves extends Actor { var master: ActorRef = _ + var terminaters = Set[ActorRef]() def receive = { case n: Int ⇒ master = sender - for (i ← 1 to n) context.watch(context.system.actorOf(Props[Terminater])) ! "run" - case Terminated(child) if context.actorFor(child.path.parent) == self ⇒ - if (context.children.isEmpty) { + terminaters = Set() ++ (for (i ← 1 to n) yield { + val man = context.watch(context.system.actorOf(Props[Terminater])) + man ! "run" + man + }) + case Terminated(child) if terminaters contains child ⇒ + terminaters -= child + if (terminaters.isEmpty) { master ! "done" context stop self } @@ -57,7 +63,7 @@ object ActorSystemSpec { } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class ActorSystemSpec extends AkkaSpec("""akka.extensions = ["akka.actor.TestExtension$"]""") { +class ActorSystemSpec extends AkkaSpec("""akka.extensions = ["akka.actor.TestExtension$"]""") with ImplicitSender { "An ActorSystem" must { @@ -154,6 +160,24 @@ class ActorSystemSpec extends AkkaSpec("""akka.extensions = ["akka.actor.TestExt Await.result(Future.sequence(waves), timeout.duration + 5.seconds) must be === Seq("done", "done", "done") } + "reliable deny creation of actors while shutting down" in { + val system = ActorSystem() + system.scheduler.scheduleOnce(200 millis) { system.shutdown() } + var failing = false + var created = Vector.empty[ActorRef] + while (!system.isTerminated) { + try { + val t = system.actorOf(Props[ActorSystemSpec.Terminater]) + failing must not be true // because once failing => always failing (it’s due to shutdown) + created :+= t + } catch { + case e: Exception ⇒ failing = true + } + } + println(created.last) + created filter (!_.isTerminated) must be(Seq()) + } + } } diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index acebede751..ab8571100f 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -202,7 +202,13 @@ private[akka] object ActorCell { def children: Iterable[ActorRef] def stats: Iterable[ChildRestartStats] def shallDie(actor: ActorRef): ChildrenContainer + /** + * reserve that name or throw an exception + */ def reserve(name: String): ChildrenContainer + /** + * cancel a reservation + */ def unreserve(name: String): ChildrenContainer } @@ -233,7 +239,8 @@ private[akka] object ActorCell { */ object TerminatedChildrenContainer extends EmptyChildrenContainer { override def add(child: ActorRef): ChildrenContainer = this - override def reserve(name: String): ChildrenContainer = this + override def reserve(name: String): ChildrenContainer = + throw new IllegalStateException("cannot reserve actor name '" + name + "': already terminated") } /** @@ -326,10 +333,13 @@ private[akka] object ActorCell { def shallDie(actor: ActorRef): ChildrenContainer = copy(toDie = toDie + actor) - def reserve(name: String): ChildrenContainer = - if (c contains name) - throw new InvalidActorNameException("actor name " + name + " is not unique!") - else copy(c = c.updated(name, ChildNameReserved)) + def reserve(name: String): ChildrenContainer = reason match { + case Termination ⇒ throw new IllegalStateException("cannot reserve actor name '" + name + "': terminating") + case _ ⇒ + if (c contains name) + throw new InvalidActorNameException("actor name " + name + " is not unique!") + else copy(c = c.updated(name, ChildNameReserved)) + } def unreserve(name: String): ChildrenContainer = c get name match { case Some(ChildNameReserved) ⇒ copy(c = c - name) @@ -394,6 +404,40 @@ private[akka] class ActorCell( private def swapChildrenRefs(oldChildren: ChildrenContainer, newChildren: ChildrenContainer): Boolean = Unsafe.instance.compareAndSwapObject(this, childrenOffset, oldChildren, newChildren) + @tailrec private def reserveChild(name: String): Boolean = { + val c = childrenRefs + swapChildrenRefs(c, c.reserve(name)) || reserveChild(name) + } + + @tailrec private def unreserveChild(name: String): Boolean = { + val c = childrenRefs + swapChildrenRefs(c, c.unreserve(name)) || unreserveChild(name) + } + + @tailrec private def addChild(ref: ActorRef): Boolean = { + val c = childrenRefs + swapChildrenRefs(c, c.add(ref)) || addChild(ref) + } + + @tailrec private def shallDie(ref: ActorRef): Boolean = { + val c = childrenRefs + swapChildrenRefs(c, c.shallDie(ref)) || shallDie(ref) + } + + @tailrec private def removeChild(ref: ActorRef): ChildrenContainer = { + val c = childrenRefs + val n = c.remove(ref) + if (swapChildrenRefs(c, n)) n + else removeChild(ref) + } + + @tailrec private def setChildrenTerminationReason(reason: SuspendReason): Boolean = { + childrenRefs match { + case c: TerminatingChildrenContainer ⇒ swapChildrenRefs(c, c.copy(reason = reason)) || setChildrenTerminationReason(reason) + case _ ⇒ false + } + } + private def isTerminating = childrenRefs match { case TerminatingChildrenContainer(_, _, Termination) ⇒ true case TerminatedChildrenContainer ⇒ true @@ -418,27 +462,16 @@ private[akka] class ActorCell( // in case we are currently terminating, swallow creation requests and return EmptyLocalActorRef if (isTerminating) provider.actorFor(self, Seq(name)) else { - @tailrec def reserve(name: String): Boolean = { - val c = childrenRefs - swapChildrenRefs(c, c.reserve(name)) || reserve(name) - } - reserve(name) + reserveChild(name) + // this name will either be unreserved or overwritten with a real child below val actor = try provider.actorOf(systemImpl, props, self, self.path / name, false, None, true) catch { case NonFatal(e) ⇒ - @tailrec def unreserve(name: String): Boolean = { - val c = childrenRefs - swapChildrenRefs(c, c.unreserve(name)) || unreserve(name) - } - unreserve(name) + unreserveChild(name) throw e } - @tailrec def add(ref: ActorRef): Boolean = { - val c = childrenRefs - swapChildrenRefs(c, c.add(ref)) || add(ref) - } - add(actor) + addChild(actor) actor } } @@ -457,10 +490,6 @@ private[akka] class ActorCell( } final def stop(actor: ActorRef): Unit = { - @tailrec def shallDie(ref: ActorRef): Boolean = { - val c = childrenRefs - swapChildrenRefs(c, c.shallDie(ref)) || shallDie(ref) - } if (childrenRefs.getByRef(actor).isDefined) shallDie(actor) actor.asInstanceOf[InternalActorRef].stop() } @@ -622,13 +651,7 @@ private[akka] class ActorCell( } childrenRefs match { case ct: TerminatingChildrenContainer ⇒ - @tailrec def rec(cause: Throwable): Boolean = { - childrenRefs match { - case c: TerminatingChildrenContainer ⇒ swapChildrenRefs(c, c.copy(reason = Recreation(cause))) || rec(cause) - case _ ⇒ true // cannot happen - } - } - rec(cause) + setChildrenTerminationReason(Recreation(cause)) dispatcher suspend this case _ ⇒ doRecreate(cause, failedActor) @@ -686,13 +709,7 @@ private[akka] class ActorCell( childrenRefs match { case ct: TerminatingChildrenContainer ⇒ - @tailrec def rec(): Boolean = { - childrenRefs match { - case c: TerminatingChildrenContainer ⇒ swapChildrenRefs(c, c.copy(reason = Termination)) || rec() - case _ ⇒ true // cannot happen - } - } - rec() + setChildrenTerminationReason(Termination) // do not process normal messages while waiting for all children to terminate dispatcher suspend this if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopping")) @@ -701,13 +718,7 @@ private[akka] class ActorCell( } def supervise(child: ActorRef): Unit = if (!isTerminating) { - if (childrenRefs.getByRef(child).isEmpty) { - @tailrec def add(ref: ActorRef): Boolean = { - val c = childrenRefs - swapChildrenRefs(c, c.add(ref)) || add(ref) - } - add(child) - } + if (childrenRefs.getByRef(child).isEmpty) addChild(child) if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now supervising " + child)) } @@ -870,15 +881,9 @@ private[akka] class ActorCell( } final def handleChildTerminated(child: ActorRef): Unit = try { - @tailrec def remove(ref: ActorRef): ChildrenContainer = { - val c = childrenRefs - val n = c.remove(ref) - if (swapChildrenRefs(c, n)) n - else remove(ref) - } childrenRefs match { case tc @ TerminatingChildrenContainer(_, _, reason) ⇒ - val n = remove(child) + val n = removeChild(child) actor.supervisorStrategy.handleChildTerminated(this, child, children) if (!n.isInstanceOf[TerminatingChildrenContainer]) reason match { case Recreation(cause) ⇒ doRecreate(cause, actor) // doRecreate since this is the continuation of "recreate" @@ -886,7 +891,7 @@ private[akka] class ActorCell( case _ ⇒ } case _ ⇒ - remove(child) + removeChild(child) actor.supervisorStrategy.handleChildTerminated(this, child, children) } } catch { From 6a380550f90362c1391ef19086fdb0b78024d26d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 08:59:05 +0200 Subject: [PATCH 323/538] Notify MembershipChangeListeners when 'members' change --- .../src/main/scala/akka/cluster/Cluster.scala | 13 ++++++------- .../MembershipChangeListenerSpec.scala | 19 ++++++++++++------- .../scala/akka/cluster/SunnyWeatherSpec.scala | 2 +- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index e2b5c8e751..4ea43d50e4 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -589,10 +589,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ if (!state.compareAndSet(localState, newState)) joining(node) // recur if we failed update else { if (node != selfAddress) failureDetector heartbeat node - - if (convergence(newState.latestGossip).isDefined) { - newState.memberMembershipChangeListeners foreach { _ notify newMembers } - } + notifyMembershipChangeListeners(localState, newState) } } } @@ -623,10 +620,12 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ } } - private def notifyMembershipChangeListeners(oldState: State, newState: State): Unit = - if (newState.latestGossip != oldState.latestGossip && convergence(newState.latestGossip).isDefined) { + private def notifyMembershipChangeListeners(oldState: State, newState: State): Unit = { + val oldMembersStatus = oldState.latestGossip.members.toSeq.map(m ⇒ (m.address, m.status)) + val newMembersStatus = newState.latestGossip.members.toSeq.map(m ⇒ (m.address, m.status)) + if (newMembersStatus != oldMembersStatus) newState.memberMembershipChangeListeners foreach { _ notify newState.latestGossip.members } - } + } /** * State transition to EXITING. diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index c87a280e17..9e190050f9 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -37,7 +37,6 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan awaitClusterUp(first) runOn(first, second) { - cluster.join(firstAddress) val latch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { @@ -45,8 +44,13 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan latch.countDown() } }) + testConductor.enter("listener-1-registered") + cluster.join(firstAddress) latch.await - cluster.convergence.isDefined must be(true) + } + + runOn(third) { + testConductor.enter("listener-1-registered") } testConductor.enter("after-1") @@ -54,10 +58,6 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan "(when three nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { - runOn(third) { - cluster.join(firstAddress) - } - val latch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { @@ -65,8 +65,13 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan latch.countDown() } }) + testConductor.enter("listener-2-registered") + + runOn(third) { + cluster.join(firstAddress) + } + latch.await - cluster.convergence.isDefined must be(true) testConductor.enter("after-2") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index b74fdd09db..f4f42f0117 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -52,7 +52,7 @@ abstract class SunnyWeatherSpec } // add a few more - awaitClusterUp(first, second, third, fourth, fifth) + awaitClusterUp(roles: _*) log.info("5 joined") val unexpected = new AtomicReference[SortedSet[Member]] From 56735477b8758c51ed762629ca1afac7dcbbb96d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 09:23:36 +0200 Subject: [PATCH 324/538] initialParticipants default as roles.size in cluster tests --- .../ClientDowningNodeThatIsUnreachableSpec.scala | 2 -- .../akka/cluster/ClientDowningNodeThatIsUpSpec.scala | 2 -- .../multi-jvm/scala/akka/cluster/ConvergenceSpec.scala | 2 -- .../cluster/GossipingAccrualFailureDetectorSpec.scala | 2 -- .../scala/akka/cluster/JoinTwoClustersSpec.scala | 2 -- .../LeaderDowningNodeThatIsUnreachableSpec.scala | 2 -- .../scala/akka/cluster/LeaderElectionSpec.scala | 10 ++++------ .../cluster/MembershipChangeListenerExitingSpec.scala | 2 -- .../cluster/MembershipChangeListenerJoinSpec.scala | 2 -- .../cluster/MembershipChangeListenerLeavingSpec.scala | 2 -- .../akka/cluster/MembershipChangeListenerSpec.scala | 2 -- .../akka/cluster/MembershipChangeListenerUpSpec.scala | 2 -- .../scala/akka/cluster/MultiNodeClusterSpec.scala | 2 ++ .../multi-jvm/scala/akka/cluster/NodeJoinSpec.scala | 2 -- .../NodeLeavingAndExitingAndBeingRemovedSpec.scala | 2 -- .../scala/akka/cluster/NodeLeavingAndExitingSpec.scala | 2 -- .../multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala | 2 -- .../scala/akka/cluster/NodeMembershipSpec.scala | 2 -- .../scala/akka/cluster/NodeShutdownSpec.scala | 2 -- .../src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala | 2 -- .../scala/akka/cluster/SunnyWeatherSpec.scala | 2 -- .../test/scala/akka/remote/testkit/MultiNodeSpec.scala | 9 +++++++-- 22 files changed, 13 insertions(+), 46 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index d78afcdeb7..6d4d09f7cb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -29,8 +29,6 @@ class ClientDowningNodeThatIsUnreachableSpec import ClientDowningNodeThatIsUnreachableMultiJvmSpec._ - override def initialParticipants = 4 - "Client of a 4 node cluster" must { "be able to DOWN a node that is UNREACHABLE (killed)" taggedAs LongRunningTest in { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index 5f778c25d1..db00438c9e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -29,8 +29,6 @@ class ClientDowningNodeThatIsUpSpec import ClientDowningNodeThatIsUpMultiJvmSpec._ - override def initialParticipants = 4 - "Client of a 4 node cluster" must { "be able to DOWN a node that is UP (healthy and available)" taggedAs LongRunningTest in { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index a7e5712cfa..9963903b90 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -35,8 +35,6 @@ abstract class ConvergenceSpec with MultiNodeClusterSpec { import ConvergenceMultiJvmSpec._ - override def initialParticipants = 4 - "A cluster of 3 members" must { "reach initial convergence" taggedAs LongRunningTest in { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 9df3e20d68..f75ca3b058 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -27,8 +27,6 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi with MultiNodeClusterSpec { import GossipingAccrualFailureDetectorMultiJvmSpec._ - override def initialParticipants = 3 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index e01839684a..e86602949f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -33,8 +33,6 @@ abstract class JoinTwoClustersSpec import JoinTwoClustersMultiJvmSpec._ - override def initialParticipants = 6 - lazy val a1Address = node(a1).address lazy val b1Address = node(b1).address lazy val c1Address = node(c1).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index ffbd4eb287..616c412556 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -37,8 +37,6 @@ class LeaderDowningNodeThatIsUnreachableSpec import LeaderDowningNodeThatIsUnreachableMultiJvmSpec._ - override def initialParticipants = 4 - "The Leader in a 4 node cluster" must { "be able to DOWN a 'last' node that is UNREACHABLE" taggedAs LongRunningTest in { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index ce4d5a8042..43f0fc19eb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -31,10 +31,8 @@ abstract class LeaderElectionSpec import LeaderElectionMultiJvmSpec._ - override def initialParticipants = 5 - // sorted in the order used by the cluster - lazy val roles = Seq(first, second, third, fourth).sorted + lazy val sortedRoles = Seq(first, second, third, fourth).sorted "A cluster of four nodes" must { @@ -42,15 +40,15 @@ abstract class LeaderElectionSpec awaitClusterUp(first, second, third, fourth) if (myself != controller) { - cluster.isLeader must be(myself == roles.head) - assertLeaderIn(roles) + cluster.isLeader must be(myself == sortedRoles.head) + assertLeaderIn(sortedRoles) } testConductor.enter("after") } def shutdownLeaderAndVerifyNewLeader(alreadyShutdown: Int): Unit = { - val currentRoles = roles.drop(alreadyShutdown) + val currentRoles = sortedRoles.drop(alreadyShutdown) currentRoles.size must be >= (2) val leader = currentRoles.head val aUser = currentRoles.last diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index cdf809187a..d76c3cf689 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -37,8 +37,6 @@ abstract class MembershipChangeListenerExitingSpec import MembershipChangeListenerExitingMultiJvmSpec._ - override def initialParticipants = 3 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index c07ec19f77..bdf8f7d44d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -34,8 +34,6 @@ abstract class MembershipChangeListenerJoinSpec import MembershipChangeListenerJoinMultiJvmSpec._ - override def initialParticipants = 2 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala index 41b69ce7b4..1ff11465bb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -34,8 +34,6 @@ abstract class MembershipChangeListenerLeavingSpec import MembershipChangeListenerLeavingMultiJvmSpec._ - override def initialParticipants = 3 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index 9e190050f9..c48727b1cd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -25,8 +25,6 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan with MultiNodeClusterSpec { import MembershipChangeListenerMultiJvmSpec._ - override def initialParticipants = 3 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala index 7709e9854a..3e22dd456d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -27,8 +27,6 @@ abstract class MembershipChangeListenerUpSpec import MembershipChangeListenerUpMultiJvmSpec._ - override def initialParticipants = 2 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 7f7d60fcdc..b185067ab0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -30,6 +30,8 @@ object MultiNodeClusterSpec { trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ + override def initialParticipants = roles.size + /** * Get or create a cluster node using 'Cluster(system)' extension. */ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala index 0d6a50b82a..066e86aae6 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala @@ -33,8 +33,6 @@ abstract class NodeJoinSpec import NodeJoinMultiJvmSpec._ - override def initialParticipants = 2 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index a974930d0a..8e274be311 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -28,8 +28,6 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec import NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec._ - override def initialParticipants = 3 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 3773ccbd5d..79fff4770f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -36,8 +36,6 @@ abstract class NodeLeavingAndExitingSpec import NodeLeavingAndExitingMultiJvmSpec._ - override def initialParticipants = 3 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index 96876cf4cb..b834492045 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -30,8 +30,6 @@ abstract class NodeLeavingSpec extends MultiNodeSpec(NodeLeavingMultiJvmSpec) with MultiNodeClusterSpec { import NodeLeavingMultiJvmSpec._ - override def initialParticipants = 3 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index edd3e44121..ef65cefd0f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -26,8 +26,6 @@ abstract class NodeMembershipSpec import NodeMembershipMultiJvmSpec._ - override def initialParticipants = 3 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index b54c0c1b39..4dc90a5b89 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -30,8 +30,6 @@ class NodeShutdownMultiJvmNode2 extends NodeShutdownSpec abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) with MultiNodeClusterSpec { import NodeShutdownMultiJvmSpec._ - override def initialParticipants = 2 - "A cluster of 2 nodes" must { "not be singleton cluster when joined" taggedAs LongRunningTest in { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index f8d0a1f6e2..6cb8bf9e07 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -28,8 +28,6 @@ abstract class NodeUpSpec import NodeUpMultiJvmSpec._ - override def initialParticipants = 2 - "A cluster node that is joining another cluster" must { "be moved to UP by the leader after a convergence" taggedAs LongRunningTest in { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index f4f42f0117..c2f8e8d3f5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -40,8 +40,6 @@ abstract class SunnyWeatherSpec import SunnyWeatherMultiJvmSpec._ - override def initialParticipants = roles.size - "A normal cluster" must { "be healthy" taggedAs LongRunningTest in { diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index a0d7d5eac4..faaab5cdc4 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -84,7 +84,7 @@ abstract class MultiNodeConfig { private[testkit] def deployments(node: RoleName): Seq[String] = (_deployments get node getOrElse Nil) ++ _allDeploy - def roles: Seq[RoleName] = _roles + private[testkit] def roles: Seq[RoleName] = _roles } @@ -131,7 +131,7 @@ object MultiNodeSpec { * `AskTimeoutException: sending to terminated ref breaks promises`. Using lazy * val is fine. */ -abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, roles: Seq[RoleName], deployments: RoleName ⇒ Seq[String]) +abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: Seq[RoleName], deployments: RoleName ⇒ Seq[String]) extends AkkaSpec(_system) { import MultiNodeSpec._ @@ -143,6 +143,11 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, roles: * Test Class Interface */ + /** + * All registered roles + */ + def roles: Seq[RoleName] = _roles + /** * TO BE DEFINED BY USER: Defines the number of participants required for starting the test. This * might not be equals to the number of nodes available to the test. From 233b9a6291a9a06e91907f8fd3291d456d7e846f Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 09:41:42 +0200 Subject: [PATCH 325/538] Change loglevel to info, gossiping verification done, see #2195 --- .../src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index c2f8e8d3f5..fcb1393f8a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -24,7 +24,7 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { gossip-interval = 400 ms nr-of-deputy-nodes = 0 } - akka.loglevel = DEBUG + akka.loglevel = INFO """)) } @@ -61,7 +61,7 @@ abstract class SunnyWeatherSpec } }) - for (n ← 1 to 40) { + for (n ← 1 to 30) { testConductor.enter("period-" + n) unexpected.get must be(null) awaitUpConvergence(roles.size) From 531e675ef9020859c96b130b07ed5301173d2c46 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 11:20:19 +0200 Subject: [PATCH 326/538] Ignore the leaving/exit failing tests --- .../cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala | 3 ++- .../scala/akka/cluster/NodeLeavingAndExitingSpec.scala | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index 8e274be311..d85016c714 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -36,7 +36,8 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec "A node that is LEAVING a non-singleton cluster" must { - "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest in { + // FIXME make it work and remove ignore + "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest ignore { awaitClusterUp(first, second, third) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 79fff4770f..2909362fa7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -42,7 +42,8 @@ abstract class NodeLeavingAndExitingSpec "A node that is LEAVING a non-singleton cluster" must { - "be moved to EXITING by the leader" taggedAs LongRunningTest in { + // FIXME make it work and remove ignore + "be moved to EXITING by the leader" taggedAs LongRunningTest ignore { awaitClusterUp(first, second, third) From c7af802dc8c9586c549d9a6fd13814c6c70e53bd Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 11:30:44 +0200 Subject: [PATCH 327/538] Turn on debug logging due to failures --- .../src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index 4dc90a5b89..37d4b4571e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -13,7 +13,7 @@ object NodeShutdownMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false). + commonConfig(debugConfig(on = true). withFallback(ConfigFactory.parseString(""" akka.cluster { auto-down = on From 57fadc1f7d776738b874f10ff9eb6e49c4055ff4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 8 Jun 2012 11:50:36 +0200 Subject: [PATCH 328/538] Added MembershipChangeListenerRemovedMultiJvmSpec MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../MembershipChangeListenerRemovedSpec.scala | 71 +++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerRemovedSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerRemovedSpec.scala new file mode 100644 index 0000000000..6b737a22e2 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerRemovedSpec.scala @@ -0,0 +1,71 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import scala.collection.immutable.SortedSet +import org.scalatest.BeforeAndAfter +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object MembershipChangeListenerRemovedMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class MembershipChangeListenerRemovedMultiJvmNode1 extends MembershipChangeListenerRemovedSpec +class MembershipChangeListenerRemovedMultiJvmNode2 extends MembershipChangeListenerRemovedSpec +class MembershipChangeListenerRemovedMultiJvmNode3 extends MembershipChangeListenerRemovedSpec + +abstract class MembershipChangeListenerRemovedSpec extends MultiNodeSpec(MembershipChangeListenerRemovedMultiJvmSpec) + with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + import MembershipChangeListenerRemovedMultiJvmSpec._ + + override def initialParticipants = 3 + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + lazy val thirdAddress = node(third).address + + val reaperWaitingTime = 30.seconds.dilated + + "A registered MembershipChangeListener" must { + "be notified when new node is REMOVED" taggedAs LongRunningTest in { + + runOn(first) { + cluster.self + } + testConductor.enter("first-started") + + runOn(second, third) { + cluster.join(firstAddress) + } + awaitUpConvergence(numberOfMembers = 3) + testConductor.enter("rest-started") + + runOn(third) { + val removedLatch = TestLatch() + cluster.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + println("------- MembershipChangeListener " + members.mkString(", ")) + if (members.size == 3 && members.find(_.address == secondAddress).isEmpty) + removedLatch.countDown() + } + }) + removedLatch.await + } + + runOn(first) { + cluster.leave(secondAddress) + } + + testConductor.enter("finished") + } + } +} From 45b2484f62982c19ece9aca1b3301375153db7e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 8 Jun 2012 11:51:34 +0200 Subject: [PATCH 329/538] Implemented/Fixed Cluster.remove() and state transition from LEAVING -> REMOVED. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/cluster/Cluster.scala | 91 +++++++++++-------- 1 file changed, 55 insertions(+), 36 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index c5ad773989..b2fe9c7352 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -50,7 +50,7 @@ sealed trait ClusterMessage extends Serializable /** * Cluster commands sent by the USER. */ -object ClusterAction { +object ClusterUserAction { /** * Command to join the cluster. Sent when a node (reprsesented by 'address') @@ -72,6 +72,12 @@ object ClusterAction { * Command to remove a node from the cluster immediately. */ case class Remove(address: Address) extends ClusterMessage +} + +/** + * Cluster commands sent by the LEADER. + */ +object ClusterLeaderAction { /** * Command to mark a node to be removed from the cluster immediately. @@ -197,8 +203,8 @@ case class Gossip( } /** - * Marks the gossip as seen by this node (selfAddress) by updating the address entry in the 'gossip.overview.seen' - * Map with the VectorClock for the new gossip. + * Marks the gossip as seen by this node (address) by updating the address entry in the 'gossip.overview.seen' + * Map with the VectorClock (version) for the new gossip. */ def seen(address: Address): Gossip = { if (overview.seen.contains(address) && overview.seen(address) == version) this @@ -253,7 +259,8 @@ case class Gossip( * Instantiated as a single instance for each Cluster - e.g. commands are serialized to Cluster message after message. */ final class ClusterCommandDaemon extends Actor { - import ClusterAction._ + import ClusterUserAction._ + import ClusterLeaderAction._ val cluster = Cluster(context.system) val log = Logging(context.system, this) @@ -331,8 +338,6 @@ trait ClusterNodeMBean { def leave(address: String) def down(address: String) def remove(address: String) - - def shutdown() } /** @@ -499,10 +504,14 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks. + * + * INTERNAL API: + * Should not called by the user. The user can issue a LEAVE command which will tell the node + * to go through graceful handoff process LEAVE -> EXITING -> REMOVED -> SHUTDOWN. */ - def shutdown(): Unit = { + private[akka] def shutdown(): Unit = { if (isRunning.compareAndSet(true, false)) { - log.info("Cluster Node [{}] - Shutting down cluster Node and cluster daemons...", selfAddress) + log.info("Cluster Node [{}] - Shutting down cluster node...", selfAddress) gossipCanceller.cancel() failureDetectorReaperCanceller.cancel() leaderActionsCanceller.cancel() @@ -512,6 +521,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ } catch { case e: InstanceNotFoundException ⇒ // ignore - we are running multiple cluster nodes in the same JVM (probably for testing) } + log.info("Cluster Node [{}] - Cluster node successfully shut down", selfAddress) } } @@ -543,7 +553,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ */ def join(address: Address): Unit = { val connection = clusterCommandConnectionFor(address) - val command = ClusterAction.Join(selfAddress) + val command = ClusterUserAction.Join(selfAddress) log.info("Cluster Node [{}] - Trying to send JOIN to [{}] through connection [{}]", selfAddress, address, connection) connection ! command } @@ -552,21 +562,21 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * Send command to issue state transition to LEAVING for the node specified by 'address'. */ def leave(address: Address): Unit = { - clusterCommandDaemon ! ClusterAction.Leave(address) + clusterCommandDaemon ! ClusterUserAction.Leave(address) } /** - * Send command to issue state transition to from DOWN to EXITING for the node specified by 'address'. + * Send command to DOWN the node specified by 'address'. */ def down(address: Address): Unit = { - clusterCommandDaemon ! ClusterAction.Down(address) + clusterCommandDaemon ! ClusterUserAction.Down(address) } /** - * Send command to issue state transition to REMOVED for the node specified by 'address'. + * Send command to REMOVE the node specified by 'address'. */ def remove(address: Address): Unit = { - clusterCommandDaemon ! ClusterAction.Remove(address) + clusterCommandDaemon ! ClusterUserAction.Remove(address) } // ======================================================== @@ -642,13 +652,15 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ */ private[cluster] final def exiting(address: Address): Unit = { log.info("Cluster Node [{}] - Marking node [{}] as EXITING", selfAddress, address) + // FIXME implement when we implement hand-off } /** * State transition to REMOVED. */ private[cluster] final def removing(address: Address): Unit = { - log.info("Cluster Node [{}] - Marking node [{}] as REMOVED", selfAddress, address) + log.info("Cluster Node [{}] - Node has been REMOVED by the leader - shutting down...", selfAddress) + shutdown() } /** @@ -727,6 +739,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val winningGossip = if (remoteGossip.version <> localGossip.version) { // concurrent + println("=======>>> CONCURRENT") val mergedGossip = remoteGossip merge localGossip val versionedMergedGossip = mergedGossip + vclockNode @@ -737,20 +750,23 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ versionedMergedGossip } else if (remoteGossip.version < localGossip.version) { + println("=======>>> LOCAL") // local gossip is newer localGossip } else { + println("=======>>> REMOTE") // remote gossip is newer remoteGossip } + println("=======>>> WINNING " + winningGossip.members.mkString(", ")) val newState = localState copy (latestGossip = winningGossip seen selfAddress) // if we won the race then update else try again if (!state.compareAndSet(localState, newState)) receive(sender, remoteGossip) // recur if we fail the update else { - log.debug("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, sender.address) + log.info("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, sender.address) if (sender.address != selfAddress) failureDetector heartbeat sender.address @@ -772,8 +788,8 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * @param oldState the state to change the member status in * @return the updated new state with the new member status */ - private def switchMemberStatusTo(newStatus: MemberStatus, state: State): State = { - log.info("Cluster Node [{}] - Switching membership status to [{}]", selfAddress, newStatus) + private def switchMemberStatusTo(newStatus: MemberStatus, state: State): State = { // TODO: Removed this method? Currently not used. + log.debug("Cluster Node [{}] - Switching membership status to [{}]", selfAddress, newStatus) val localSelf = self @@ -789,7 +805,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ else member } - // ugly crap to work around bug in scala colletions ('val ss: SortedSet[Member] = SortedSet.empty[Member] ++ aSet' does not compile) + // NOTE: ugly crap to work around bug in scala colletions ('val ss: SortedSet[Member] = SortedSet.empty[Member] ++ aSet' does not compile) val newMembersSortedSet = SortedSet[Member](newMembersSet.toList: _*) val newGossip = localGossip copy (members = newMembersSortedSet) @@ -936,8 +952,8 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val localUnreachableMembers = localOverview.unreachable // Leader actions are as follows: - // 1. Move JOINING => UP -- When a node joins the cluster - // 2. Move EXITING => REMOVED -- When all nodes have seen that the node is EXITING (convergence) + // 1. Move EXITING => REMOVED -- When all nodes have seen that the node is EXITING (convergence) - remove the nodes from the node ring + // 2. Move JOINING => UP -- When a node joins the cluster // 3. Move LEAVING => EXITING -- When all partition handoff has completed // 4. Move UNREACHABLE => DOWN -- When the node is in the UNREACHABLE set it can be auto-down by leader // 5. Updating the vclock version for the changes @@ -951,9 +967,20 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val newMembers = - localMembers map { member ⇒ + // ---------------------- + // 1. Move EXITING => REMOVED - e.g. remove the nodes from the 'members' set/node ring + // ---------------------- + localMembers filter { member ⇒ + if (member.status == MemberStatus.Exiting) { + log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED - Removing node from node ring", selfAddress, member.address) + hasChangedState = true + clusterCommandConnectionFor(member.address) ! ClusterUserAction.Remove(member.address) // tell the removed node to shut himself down + false + } else true + + } map { member ⇒ // ---------------------- - // 1. Move JOINING => UP (once all nodes have seen that this node is JOINING e.g. we have a convergence) + // 2. Move JOINING => UP (once all nodes have seen that this node is JOINING e.g. we have a convergence) // ---------------------- if (member.status == MemberStatus.Joining) { log.info("Cluster Node [{}] - Leader is moving node [{}] from JOINING to UP", selfAddress, member.address) @@ -961,16 +988,6 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ member copy (status = MemberStatus.Up) } else member - } map { member ⇒ - // ---------------------- - // 2. Move EXITING => REMOVED (once all nodes have seen that this node is EXITING e.g. we have a convergence) - // ---------------------- - if (member.status == MemberStatus.Exiting) { - log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED", selfAddress, member.address) - hasChangedState = true - member copy (status = MemberStatus.Removed) - } else member - } map { member ⇒ // ---------------------- // 3. Move LEAVING => EXITING (once we have a convergence on LEAVING *and* if we have a successful partition handoff) @@ -978,10 +995,12 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ if (member.status == MemberStatus.Leaving && hasPartionHandoffCompletedSuccessfully(localGossip)) { log.info("Cluster Node [{}] - Leader is moving node [{}] from LEAVING to EXITING", selfAddress, member.address) hasChangedState = true + clusterCommandConnectionFor(member.address) ! ClusterLeaderAction.Exit(member.address) // FIXME should use ? to await completion of handoff? member copy (status = MemberStatus.Exiting) } else member } + localGossip copy (members = newMembers) // update gossip } else if (autoDown) { @@ -1045,7 +1064,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // First check that: // 1. we don't have any members that are unreachable (unreachable.isEmpty == true), or - // 2. all unreachable members in the set have status DOWN + // 2. all unreachable members in the set have status DOWN or REMOVED // Else we can't continue to check for convergence // When that is done we check that all the entries in the 'seen' table have the same vector clock version if (unreachable.isEmpty || !unreachable.exists { m ⇒ @@ -1055,8 +1074,10 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val seen = gossip.overview.seen val views = Set.empty[VectorClock] ++ seen.values + println("=======>>> VIEWS " + views.size) if (views.size == 1) { log.debug("Cluster Node [{}] - Cluster convergence reached", selfAddress) + println("=======>>> ----------------------- HAS CONVERGENCE") Some(gossip) } else None } else None @@ -1144,8 +1165,6 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ def down(address: String) = clusterNode.down(AddressFromURIString(address)) def remove(address: String) = clusterNode.remove(AddressFromURIString(address)) - - def shutdown() = clusterNode.shutdown() } log.info("Cluster Node [{}] - registering cluster JMX MBean [{}]", selfAddress, clusterMBeanName) try { From dcae863f7fbe4de0b57c0634daeb4e99de0416a9 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 13:44:40 +0200 Subject: [PATCH 330/538] Use all heartbeats in failure detector, see #2182 * Failure detector didn't use hearbeat 1 and 2 * Included heartbeat 2 in ordinary stats * For heartbeat 1 use guess stats, important so that connections with only one heartbeat becomes unavailble, the guess corresponds to 1 second interval which results in phi > 8 after 18 seconds * Improved AccrualFailureDetectorSpec --- .../akka/cluster/AccrualFailureDetector.scala | 60 ++++++------- ...LeavingAndExitingAndBeingRemovedSpec.scala | 3 +- .../cluster/NodeLeavingAndExitingSpec.scala | 3 +- .../scala/akka/cluster/NodeShutdownSpec.scala | 2 +- .../cluster/AccrualFailureDetectorSpec.scala | 84 ++++++++++--------- 5 files changed, 78 insertions(+), 74 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index 3caece392c..c86eb3361e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -36,7 +36,11 @@ class AccrualFailureDetector( /** * Holds the failure statistics for a specific node Address. */ - private case class FailureStats(mean: Double = 0.0D, variance: Double = 0.0D, deviation: Double = 0.0D) + private case class FailureStats(mean: Double = 0.0, variance: Double = 0.0, deviation: Double = 0.0) + + // guess statistics for first heartbeat, + // important so that connections with only one heartbeat becomes unavailble + private val failureStatsFirstHeartbeat = FailureStats(mean = 1000.0) /** * Implement using optimistic lockless concurrency, all state is represented @@ -72,7 +76,7 @@ class AccrualFailureDetector( // add starter records for this new connection val newState = oldState copy ( version = oldState.version + 1, - failureStats = oldState.failureStats + (connection -> FailureStats()), + failureStats = oldState.failureStats + (connection -> failureStatsFirstHeartbeat), intervalHistory = oldState.intervalHistory + (connection -> IndexedSeq.empty[Long]), timestamps = oldState.timestamps + (connection -> timeMachine()), explicitRemovals = oldState.explicitRemovals - connection) @@ -93,30 +97,25 @@ class AccrualFailureDetector( case _ ⇒ IndexedSeq.empty[Long] }) :+ interval - val newFailureStats = - if (newIntervalsForConnection.size > 1) { + val newFailureStats = { + val newMean: Double = newIntervalsForConnection.sum.toDouble / newIntervalsForConnection.size - val newMean: Double = newIntervalsForConnection.sum / newIntervalsForConnection.size.toDouble - - val oldConnectionFailureStats = oldState.failureStats.get(connection).getOrElse { - throw new IllegalStateException("Can't calculate new failure statistics due to missing heartbeat history") - } - - val deviationSum = - newIntervalsForConnection - .map(_.toDouble) - .foldLeft(0.0D)((x, y) ⇒ x + (y - newMean)) - - val newVariance: Double = deviationSum / newIntervalsForConnection.size.toDouble - val newDeviation: Double = math.sqrt(newVariance) - - val newFailureStats = oldConnectionFailureStats copy (mean = newMean, deviation = newDeviation, variance = newVariance) - oldState.failureStats + (connection -> newFailureStats) - - } else { - oldState.failureStats + val oldConnectionFailureStats = oldState.failureStats.get(connection).getOrElse { + throw new IllegalStateException("Can't calculate new failure statistics due to missing heartbeat history") } + val deviationSum = + newIntervalsForConnection + .map(_.toDouble) + .foldLeft(0.0)((x, y) ⇒ x + (y - newMean)) + + val newVariance: Double = deviationSum / newIntervalsForConnection.size + val newDeviation: Double = math.sqrt(newVariance) + + val newFailureStats = oldConnectionFailureStats copy (mean = newMean, deviation = newDeviation, variance = newVariance) + oldState.failureStats + (connection -> newFailureStats) + } + val newState = oldState copy (version = oldState.version + 1, failureStats = newFailureStats, intervalHistory = oldState.intervalHistory + (connection -> newIntervalsForConnection), @@ -132,8 +131,7 @@ class AccrualFailureDetector( * Calculates how likely it is that the connection has failed. *

* If a connection does not have any records in failure detector then it is - * considered dead. This is true either if the heartbeat have not started - * yet or the connection have been explicitly removed. + * considered healthy. *

* Implementations of 'Cumulative Distribution Function' for Exponential Distribution. * For a discussion on the math read [https://issues.apache.org/jira/browse/CASSANDRA-2597]. @@ -145,21 +143,22 @@ class AccrualFailureDetector( val phi = // if connection has been removed explicitly if (oldState.explicitRemovals.contains(connection)) Double.MaxValue - else if (oldTimestamp.isEmpty) 0.0D // treat unmanaged connections, e.g. with zero heartbeats, as healthy connections + else if (oldTimestamp.isEmpty) 0.0 // treat unmanaged connections, e.g. with zero heartbeats, as healthy connections else { val timestampDiff = timeMachine() - oldTimestamp.get val mean = oldState.failureStats.get(connection) match { case Some(FailureStats(mean, _, _)) ⇒ mean - case _ ⇒ throw new IllegalStateException("Can't calculate Failure Detector Phi value for a node that have no heartbeat history") + case _ ⇒ + if (!oldState.intervalHistory.contains(connection)) 1000.0 + else throw new IllegalStateException("Can't calculate Failure Detector Phi value for a node that have no heartbeat history") } - if (mean == 0.0D) 0.0D + if (mean == 0.0) 0.0 else PhiFactor * timestampDiff / mean } - // only log if PHI value is starting to get interesting - if (phi > 0.0D) log.debug("Node [{}] - Phi value [{}] and threshold [{}] for connection [{}] ", address, phi, threshold, connection) + log.debug("Node [{}] - Phi value [{}] and threshold [{}] for connection [{}] ", address, phi, threshold, connection) phi } @@ -168,6 +167,7 @@ class AccrualFailureDetector( */ @tailrec final def remove(connection: Address) { + log.debug("Node [{}] - Remove connection [{}] ", address, connection) val oldState = state.get if (oldState.failureStats.contains(connection)) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index d85016c714..8e274be311 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -36,8 +36,7 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec "A node that is LEAVING a non-singleton cluster" must { - // FIXME make it work and remove ignore - "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest ignore { + "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest in { awaitClusterUp(first, second, third) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 2909362fa7..79fff4770f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -42,8 +42,7 @@ abstract class NodeLeavingAndExitingSpec "A node that is LEAVING a non-singleton cluster" must { - // FIXME make it work and remove ignore - "be moved to EXITING by the leader" taggedAs LongRunningTest ignore { + "be moved to EXITING by the leader" taggedAs LongRunningTest in { awaitClusterUp(first, second, third) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index 37d4b4571e..4dc90a5b89 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -13,7 +13,7 @@ object NodeShutdownMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = true). + commonConfig(debugConfig(on = false). withFallback(ConfigFactory.parseString(""" akka.cluster { auto-down = on diff --git a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala index 173ce799f8..1cf62daf1c 100644 --- a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala @@ -17,7 +17,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" val conn = Address("akka", "", "localhost", 2552) val conn2 = Address("akka", "", "localhost", 2553) - def fakeTimeGenerator(timeIntervals: List[Long]): () ⇒ Long = { + def fakeTimeGenerator(timeIntervals: Seq[Long]): () ⇒ Long = { var times = timeIntervals.tail.foldLeft(List[Long](timeIntervals.head))((acc, c) ⇒ acc ::: List[Long](acc.last + c)) def timeGenerator(): Long = { val currentTime = times.head @@ -27,22 +27,47 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" timeGenerator } - "return phi value of 0.0D on startup for each address" in { + "return phi value of 0.0 on startup for each address, when no heartbeats" in { val fd = new AccrualFailureDetector(system, conn) - fd.phi(conn) must be(0.0D) - fd.phi(conn2) must be(0.0D) + fd.phi(conn) must be(0.0) + fd.phi(conn2) must be(0.0) + } + + "return phi based on guess when only one heartbeat" in { + // 1 second ticks + val timeInterval = Vector.fill(30)(1000L) + val fd = new AccrualFailureDetector(system, conn, + timeMachine = fakeTimeGenerator(timeInterval)) + + fd.heartbeat(conn) + fd.phi(conn) must be > (0.0) + // let time go + for (n ← 2 to 8) + fd.phi(conn) must be < (4.0) + for (n ← 9 to 18) + fd.phi(conn) must be < (8.0) + + fd.phi(conn) must be > (8.0) + } + + "return phi value using first interval after second heartbeat" in { + val timeInterval = List[Long](0, 100, 100, 100) + val fd = new AccrualFailureDetector(system, conn, + timeMachine = fakeTimeGenerator(timeInterval)) + + fd.heartbeat(conn) + fd.phi(conn) must be > (0.0) + fd.heartbeat(conn) + fd.phi(conn) must be > (0.0) } "mark node as available after a series of successful heartbeats" in { val timeInterval = List[Long](0, 1000, 100, 100) - val ft = fakeTimeGenerator(timeInterval) - - val fd = new AccrualFailureDetector(system, conn, timeMachine = ft) + val fd = new AccrualFailureDetector(system, conn, + timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) - fd.heartbeat(conn) - fd.heartbeat(conn) fd.isAvailable(conn) must be(true) @@ -50,18 +75,13 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as dead after explicit removal of connection" in { val timeInterval = List[Long](0, 1000, 100, 100, 100) - val ft = fakeTimeGenerator(timeInterval) - - val fd = new AccrualFailureDetector(system, conn, timeMachine = ft) + val fd = new AccrualFailureDetector(system, conn, + timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) - fd.heartbeat(conn) - fd.heartbeat(conn) - fd.isAvailable(conn) must be(true) - fd.remove(conn) fd.isAvailable(conn) must be(false) @@ -69,14 +89,12 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as available after explicit removal of connection and receiving heartbeat again" in { val timeInterval = List[Long](0, 1000, 100, 1100, 1100, 1100, 1100, 1100, 100) - val ft = fakeTimeGenerator(timeInterval) - - val fd = new AccrualFailureDetector(system, conn, timeMachine = ft) + val fd = new AccrualFailureDetector(system, conn, + timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 fd.heartbeat(conn) //1000 - fd.heartbeat(conn) //1100 fd.isAvailable(conn) must be(true) //2200 @@ -87,9 +105,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" // it receives heartbeat from an explicitly removed node fd.heartbeat(conn) //4400 - fd.heartbeat(conn) //5500 - fd.heartbeat(conn) //6600 fd.isAvailable(conn) must be(true) //6700 @@ -98,40 +114,29 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as dead if heartbeat are missed" in { val timeInterval = List[Long](0, 1000, 100, 100, 5000) val ft = fakeTimeGenerator(timeInterval) - - val fd = new AccrualFailureDetector(system, conn, threshold = 3, timeMachine = ft) + val fd = new AccrualFailureDetector(system, conn, threshold = 3, + timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 - fd.heartbeat(conn) //1000 - fd.heartbeat(conn) //1100 fd.isAvailable(conn) must be(true) //1200 - fd.isAvailable(conn) must be(false) //6200 } "mark node as available if it starts heartbeat again after being marked dead due to detection of failure" in { val timeInterval = List[Long](0, 1000, 100, 1100, 5000, 100, 1000, 100, 100) - val ft = fakeTimeGenerator(timeInterval) - - val fd = new AccrualFailureDetector(system, conn, threshold = 3, timeMachine = ft) + val fd = new AccrualFailureDetector(system, conn, threshold = 3, + timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 - fd.heartbeat(conn) //1000 - fd.heartbeat(conn) //1100 - fd.isAvailable(conn) must be(true) //1200 - fd.isAvailable(conn) must be(false) //6200 - fd.heartbeat(conn) //6300 - fd.heartbeat(conn) //7300 - fd.heartbeat(conn) //7400 fd.isAvailable(conn) must be(true) //7500 @@ -139,8 +144,8 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "use maxSampleSize heartbeats" in { val timeInterval = List[Long](0, 100, 100, 100, 100, 600, 1000, 1000, 1000, 1000, 1000) - val ft = fakeTimeGenerator(timeInterval) - val fd = new AccrualFailureDetector(system, conn, maxSampleSize = 3, timeMachine = ft) + val fd = new AccrualFailureDetector(system, conn, maxSampleSize = 3, + timeMachine = fakeTimeGenerator(timeInterval)) // 100 ms interval fd.heartbeat(conn) //0 @@ -156,5 +161,6 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" val phi2 = fd.phi(conn) //5000 phi2 must be(phi1.plusOrMinus(0.001)) } + } } From 18260a3b7bcda1055572eef472cb654d62c25604 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 8 Jun 2012 13:56:53 +0200 Subject: [PATCH 331/538] #2203 - publish failed message deliveries to DeadLetters when bounded or durable mailbox enqueues fail --- .../actor/ActorWithBoundedStashSpec.scala | 51 ++++++------------- .../akka/dispatch/MailboxConfigSpec.scala | 10 ++-- .../main/scala/akka/dispatch/Mailbox.scala | 35 +++++-------- 3 files changed, 32 insertions(+), 64 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala index 9d411268aa..33283b18cf 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala @@ -6,7 +6,7 @@ package akka.actor import akka.testkit._ import akka.testkit.DefaultTimeout import akka.testkit.TestEvent._ -import akka.dispatch.{ Await, MessageQueueAppendFailedException, BoundedDequeBasedMailbox } +import akka.dispatch.{ Await, BoundedDequeBasedMailbox } import akka.pattern.ask import akka.util.duration._ import akka.actor.ActorSystem.Settings @@ -17,16 +17,8 @@ object ActorWithBoundedStashSpec { class StashingActor(implicit sys: ActorSystem) extends Actor with Stash { def receive = { - case "hello" ⇒ - stash() - sender ! "OK" - case "world" ⇒ - try { - unstashAll() - } catch { - case e: MessageQueueAppendFailedException ⇒ - expectedException.open() - } + case "hello" ⇒ stash() + case "world" ⇒ unstashAll() } } @@ -36,18 +28,10 @@ object ActorWithBoundedStashSpec { def receive = { case "hello" ⇒ numStashed += 1 - try { - stash() - } catch { - case e: StashOverflowException ⇒ - if (numStashed == 21) stashOverflow.open() - } + try stash() catch { case e: StashOverflowException ⇒ if (numStashed == 21) sender ! "STASHOVERFLOW" } } } - @volatile var expectedException: TestLatch = null - @volatile var stashOverflow: TestLatch = null - val testConf: Config = ConfigFactory.parseString(""" my-dispatcher { mailbox-type = "akka.actor.ActorWithBoundedStashSpec$Bounded" @@ -56,47 +40,42 @@ object ActorWithBoundedStashSpec { """) // bounded deque-based mailbox with capacity 10 - class Bounded(settings: Settings, config: Config) extends BoundedDequeBasedMailbox(10, 5 seconds) - + class Bounded(settings: Settings, config: Config) extends BoundedDequeBasedMailbox(10, 1 seconds) } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class ActorWithBoundedStashSpec extends AkkaSpec(ActorWithBoundedStashSpec.testConf) with DefaultTimeout with BeforeAndAfterEach { +class ActorWithBoundedStashSpec extends AkkaSpec(ActorWithBoundedStashSpec.testConf) with DefaultTimeout with BeforeAndAfterEach with ImplicitSender { import ActorWithBoundedStashSpec._ implicit val sys = system - override def atStartup { - system.eventStream.publish(Mute(EventFilter[Exception]("Crashing..."))) - } + override def atStartup { system.eventStream.publish(Mute(EventFilter[Exception]("Crashing..."))) } def myProps(creator: ⇒ Actor): Props = Props(creator).withDispatcher("my-dispatcher") "An Actor with Stash and BoundedDequeBasedMailbox" must { - "throw a MessageQueueAppendFailedException in case of a capacity violation" in { - ActorWithBoundedStashSpec.expectedException = new TestLatch + "end up in DeadLetters in case of a capacity violation" in { + system.eventStream.subscribe(testActor, classOf[DeadLetter]) + val stasher = system.actorOf(myProps(new StashingActor)) // fill up stash - val futures = for (_ ← 1 to 11) yield { stasher ? "hello" } - futures foreach { Await.ready(_, 10 seconds) } + (1 to 11) foreach { _ ⇒ stasher ! "hello" } // cause unstashAll with capacity violation stasher ! "world" - Await.ready(ActorWithBoundedStashSpec.expectedException, 10 seconds) + expectMsg(DeadLetter("hello", testActor, stasher)) + system.eventStream.unsubscribe(testActor, classOf[DeadLetter]) } - } "An Actor with bounded Stash" must { "throw a StashOverflowException in case of a stash capacity violation" in { - ActorWithBoundedStashSpec.stashOverflow = new TestLatch val stasher = system.actorOf(myProps(new StashingActorWithOverflow)) // fill up stash - for (_ ← 1 to 21) { stasher ! "hello" } - Await.ready(ActorWithBoundedStashSpec.stashOverflow, 10 seconds) + (1 to 21) foreach { _ ⇒ stasher ! "hello" } + expectMsg("STASHOVERFLOW") } - } } diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala index 8759f1aad9..4f2d61de65 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala @@ -6,9 +6,8 @@ import java.util.concurrent.ConcurrentLinkedQueue import akka.util._ import akka.util.duration._ import akka.testkit.AkkaSpec -import akka.actor.{ ActorRef, ActorContext, Props, LocalActorRef } import com.typesafe.config.Config -import akka.actor.ActorSystem +import akka.actor._ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAndAfterEach { @@ -39,9 +38,10 @@ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAn q.numberOfMessages must be === config.capacity q.hasMessages must be === true - intercept[MessageQueueAppendFailedException] { - q.enqueue(null, exampleMessage) - } + system.eventStream.subscribe(testActor, classOf[DeadLetter]) + q.enqueue(testActor, exampleMessage) + expectMsg(DeadLetter(exampleMessage.message, system.deadLetters, testActor)) + system.eventStream.unsubscribe(testActor, classOf[DeadLetter]) q.dequeue must be === exampleMessage q.numberOfMessages must be(config.capacity - 1) diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index b6af478ac7..25fc0250af 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -6,18 +6,11 @@ package akka.dispatch import akka.AkkaException import java.util.{ Comparator, PriorityQueue, Queue, Deque } import akka.util._ -import akka.actor.{ ActorCell, ActorRef } import java.util.concurrent._ import annotation.tailrec import akka.event.Logging.Error -import akka.actor.ActorContext import com.typesafe.config.Config -import akka.actor.ActorSystem - -/** - * This exception normally is thrown when a bounded mailbox is over capacity - */ -class MessageQueueAppendFailedException(message: String, cause: Throwable = null) extends AkkaException(message, cause) +import akka.actor._ /** * INTERNAL API @@ -401,13 +394,11 @@ trait BoundedMessageQueueSemantics extends QueueBasedMessageQueue { def pushTimeOut: Duration override def queue: BlockingQueue[Envelope] - def enqueue(receiver: ActorRef, handle: Envelope) { + def enqueue(receiver: ActorRef, handle: Envelope): Unit = if (pushTimeOut.length > 0) { - queue.offer(handle, pushTimeOut.length, pushTimeOut.unit) || { - throw new MessageQueueAppendFailedException("Couldn't enqueue message " + handle + " to " + receiver) - } + if (!queue.offer(handle, pushTimeOut.length, pushTimeOut.unit)) + receiver.asInstanceOf[InternalActorRef].provider.deadLetters ! DeadLetter(handle.message, handle.sender, receiver) } else queue put handle - } def dequeue(): Envelope = queue.poll() } @@ -439,18 +430,16 @@ trait BoundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueue { override def queue: BlockingDeque[Envelope] def enqueue(receiver: ActorRef, handle: Envelope): Unit = - if (pushTimeOut.length > 0) - queue.offer(handle, pushTimeOut.length, pushTimeOut.unit) || { - throw new MessageQueueAppendFailedException("Couldn't enqueue message " + handle + " to " + receiver) - } - else queue put handle + if (pushTimeOut.length > 0) { + if (!queue.offer(handle, pushTimeOut.length, pushTimeOut.unit)) + receiver.asInstanceOf[InternalActorRef].provider.deadLetters ! DeadLetter(handle.message, handle.sender, receiver) + } else queue put handle def enqueueFirst(receiver: ActorRef, handle: Envelope): Unit = - if (pushTimeOut.length > 0) - queue.offerFirst(handle, pushTimeOut.length, pushTimeOut.unit) || { - throw new MessageQueueAppendFailedException("Couldn't enqueue message " + handle + " to " + receiver) - } - else queue putFirst handle + if (pushTimeOut.length > 0) { + if (!queue.offerFirst(handle, pushTimeOut.length, pushTimeOut.unit)) + receiver.asInstanceOf[InternalActorRef].provider.deadLetters ! DeadLetter(handle.message, handle.sender, receiver) + } else queue putFirst handle def dequeue(): Envelope = queue.poll() } From f6fb742fcf2ef9200c4589812e6fd09f53955d66 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 14:13:08 +0200 Subject: [PATCH 332/538] Incorparated improvements/cleanup based on feedback, see #2182 --- .../scala/akka/cluster/AccrualFailureDetector.scala | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index c86eb3361e..c7aaf12fcf 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -104,10 +104,9 @@ class AccrualFailureDetector( throw new IllegalStateException("Can't calculate new failure statistics due to missing heartbeat history") } - val deviationSum = - newIntervalsForConnection - .map(_.toDouble) - .foldLeft(0.0)((x, y) ⇒ x + (y - newMean)) + val deviationSum = (0.0d /: newIntervalsForConnection) { (mean, interval) ⇒ + mean + interval.toDouble - newMean + } val newVariance: Double = deviationSum / newIntervalsForConnection.size val newDeviation: Double = math.sqrt(newVariance) @@ -149,9 +148,7 @@ class AccrualFailureDetector( val mean = oldState.failureStats.get(connection) match { case Some(FailureStats(mean, _, _)) ⇒ mean - case _ ⇒ - if (!oldState.intervalHistory.contains(connection)) 1000.0 - else throw new IllegalStateException("Can't calculate Failure Detector Phi value for a node that have no heartbeat history") + case _ ⇒ throw new IllegalStateException("Can't calculate Failure Detector Phi value for a node that have no heartbeat history") } if (mean == 0.0) 0.0 From c1d68ecfbf3ca67f3eeceff5879d92e4807c6bfc Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 8 Jun 2012 14:26:10 +0200 Subject: [PATCH 333/538] Minor formatting --- .../src/test/scala/akka/actor/SupervisorSpec.scala | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala index d295e6db4f..9e14a510e2 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala @@ -339,9 +339,7 @@ class SupervisorSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitSende OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = 10 seconds)(classOf[Exception] :: Nil)))) val dyingProps = Props(new Actor { - inits.incrementAndGet - - if (inits.get % 2 == 0) throw new IllegalStateException("Don't wanna!") + if (inits.incrementAndGet % 2 == 0) throw new IllegalStateException("Don't wanna!") def receive = { case Ping ⇒ sender ! PongMessage From fb62311f49f4e0155d080fcbcf788e932bb75757 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 15:03:11 +0200 Subject: [PATCH 334/538] Rename NodeShutdownSpec to SingletonClusterSpec, see #2182 --- ...deShutdownSpec.scala => SingletonClusterSpec.scala} | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) rename akka-cluster/src/multi-jvm/scala/akka/cluster/{NodeShutdownSpec.scala => SingletonClusterSpec.scala} (78%) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala similarity index 78% rename from akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala index 4dc90a5b89..68d20012f5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala @@ -9,7 +9,7 @@ import akka.remote.testkit.MultiNodeSpec import akka.testkit._ import akka.util.duration._ -object NodeShutdownMultiJvmSpec extends MultiNodeConfig { +object SingletonClusterMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") @@ -24,11 +24,11 @@ object NodeShutdownMultiJvmSpec extends MultiNodeConfig { } -class NodeShutdownMultiJvmNode1 extends NodeShutdownSpec -class NodeShutdownMultiJvmNode2 extends NodeShutdownSpec +class SingletonClusterMultiJvmNode1 extends SingletonClusterSpec +class SingletonClusterMultiJvmNode2 extends SingletonClusterSpec -abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) with MultiNodeClusterSpec { - import NodeShutdownMultiJvmSpec._ +abstract class SingletonClusterSpec extends MultiNodeSpec(SingletonClusterMultiJvmSpec) with MultiNodeClusterSpec { + import SingletonClusterMultiJvmSpec._ "A cluster of 2 nodes" must { From a1dd4bc23560778fc48cb7440cb3c4f36753a83a Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 15:28:03 +0200 Subject: [PATCH 335/538] Remove jenkins color codes in LogRoleReplace script --- .../scala/akka/remote/testkit/LogRoleReplace.scala | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala index 3b3527240e..1e5a53d82e 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala @@ -90,6 +90,7 @@ class LogRoleReplace { private val RoleStarted = """\[([\w\-]+)\].*Role \[([\w]+)\] started""".r private val RemoteServerStarted = """\[([\w\-]+)\].*RemoteServerStarted@akka://.*@([\w\-\.]+):([0-9]+)""".r + private val ColorCode = """\[[0-9]+m""" private var replacements: Map[String, String] = Map.empty private var jvmToAddress: Map[String, String] = Map.empty @@ -106,12 +107,16 @@ class LogRoleReplace { } def processLine(line: String): String = { - if (updateReplacements(line)) - replaceLine(line) + val cleanLine = removeColorCodes(line) + if (updateReplacements(cleanLine)) + replaceLine(cleanLine) else - line + cleanLine } + private def removeColorCodes(line: String): String = + line.replaceAll(ColorCode, "") + private def updateReplacements(line: String): Boolean = { if (line.startsWith("[info] * ")) { // reset when new test begins From 8479db7e75dea006658869addc02d22cbe644c52 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 8 Jun 2012 15:30:17 +0200 Subject: [PATCH 336/538] #2186 - Adding test to verify that parent is restarted if decider throws an exception --- .../scala/akka/actor/SupervisorSpec.scala | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala index 9e14a510e2..3db5b5b5dc 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala @@ -364,5 +364,39 @@ class SupervisorSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitSende system.stop(supervisor) } + + "must not lose system messages when a NonFatal exception occurs when processing a system message" in { + val parent = system.actorOf(Props(new Actor { + override val supervisorStrategy = OneForOneStrategy()({ + case e: IllegalStateException if e.getMessage == "OHNOES" ⇒ throw e + case _ ⇒ SupervisorStrategy.Restart + }) + val child = context.watch(context.actorOf(Props(new Actor { + override def postRestart(reason: Throwable): Unit = testActor ! "child restarted" + def receive = { + case "die" ⇒ throw new IllegalStateException("OHNOES") + case "test" ⇒ sender ! "child green" + } + }), "child")) + + override def postRestart(reason: Throwable): Unit = testActor ! "parent restarted" + + def receive = { + case t @ Terminated(`child`) ⇒ testActor ! "child terminated" + case "die" ⇒ child ! "die" + case "test" ⇒ sender ! "green" + case "testchild" ⇒ child forward "test" + } + })) + + parent ! "die" + parent ! "testchild" + expectMsg("parent restarted") + expectMsg("child terminated") + parent ! "test" + expectMsg("green") + parent ! "testchild" + expectMsg("child green") + } } } From c9e259d56992a785de70361734c62a3b35b591d0 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 16:51:27 +0200 Subject: [PATCH 337/538] Turn on debug logging due to failures --- .../src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala | 2 +- .../src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index e86602949f..2e27f4c3bd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -17,7 +17,7 @@ object JoinTwoClustersMultiJvmSpec extends MultiNodeConfig { val c1 = role("c1") val c2 = role("c2") - commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) + commonConfig(debugConfig(on = true).withFallback(MultiNodeClusterSpec.clusterConfig)) } class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index fcb1393f8a..e36980d859 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -24,7 +24,7 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { gossip-interval = 400 ms nr-of-deputy-nodes = 0 } - akka.loglevel = INFO + akka.loglevel = DEBUG """)) } From 2b69f67777fa3b7d73a97a3afc23ac45182e14b4 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 16:54:40 +0200 Subject: [PATCH 338/538] Ignore due to failures, see #2180 --- .../cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala | 3 ++- .../scala/akka/cluster/NodeLeavingAndExitingSpec.scala | 3 ++- .../src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index 8e274be311..d85016c714 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -36,7 +36,8 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec "A node that is LEAVING a non-singleton cluster" must { - "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest in { + // FIXME make it work and remove ignore + "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest ignore { awaitClusterUp(first, second, third) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 79fff4770f..2909362fa7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -42,7 +42,8 @@ abstract class NodeLeavingAndExitingSpec "A node that is LEAVING a non-singleton cluster" must { - "be moved to EXITING by the leader" taggedAs LongRunningTest in { + // FIXME make it work and remove ignore + "be moved to EXITING by the leader" taggedAs LongRunningTest ignore { awaitClusterUp(first, second, third) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index b834492045..27bc36a3bf 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -36,7 +36,8 @@ abstract class NodeLeavingSpec extends MultiNodeSpec(NodeLeavingMultiJvmSpec) "A node that is LEAVING a non-singleton cluster" must { - "be marked as LEAVING in the converged membership table" taggedAs LongRunningTest in { + // FIXME make it work and remove ignore + "be marked as LEAVING in the converged membership table" taggedAs LongRunningTest ignore { awaitClusterUp(first, second, third) From fd455d14bff465d51a5abc1cb98a781cd65147c3 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 8 Jun 2012 21:57:39 +0200 Subject: [PATCH 339/538] #2208 - Upgrading to Netty 3.5.0 - remove StaticChannelPipeline since it's deprecated. --- .../akka/remote/testconductor/RemoteConnection.scala | 7 +++++-- .../src/main/scala/akka/remote/netty/Client.scala | 2 +- .../scala/akka/remote/netty/NettyRemoteSupport.scala | 9 ++++++--- project/AkkaBuild.scala | 2 +- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala index 5aeb484c42..1979857bf0 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala @@ -3,7 +3,7 @@ */ package akka.remote.testconductor -import org.jboss.netty.channel.{ Channel, ChannelPipeline, ChannelPipelineFactory, ChannelUpstreamHandler, SimpleChannelUpstreamHandler, StaticChannelPipeline } +import org.jboss.netty.channel.{ Channel, ChannelPipeline, ChannelPipelineFactory, ChannelUpstreamHandler, SimpleChannelUpstreamHandler, DefaultChannelPipeline } import org.jboss.netty.channel.socket.nio.{ NioClientSocketChannelFactory, NioServerSocketChannelFactory } import org.jboss.netty.bootstrap.{ ClientBootstrap, ServerBootstrap } import org.jboss.netty.handler.codec.frame.{ LengthFieldBasedFrameDecoder, LengthFieldPrepender } @@ -12,6 +12,7 @@ import org.jboss.netty.handler.codec.protobuf.{ ProtobufDecoder, ProtobufEncoder import org.jboss.netty.handler.timeout.{ ReadTimeoutHandler, ReadTimeoutException } import java.net.InetSocketAddress import java.util.concurrent.Executors +import akka.event.Logging /** * INTERNAL API. @@ -21,7 +22,9 @@ private[akka] class TestConductorPipelineFactory(handler: ChannelUpstreamHandler val encap = List(new LengthFieldPrepender(4), new LengthFieldBasedFrameDecoder(10000, 0, 4, 0, 4)) val proto = List(new ProtobufEncoder, new ProtobufDecoder(TestConductorProtocol.Wrapper.getDefaultInstance)) val msg = List(new MsgEncoder, new MsgDecoder) - new StaticChannelPipeline(encap ::: proto ::: msg ::: handler :: Nil: _*) + (encap ::: proto ::: msg ::: handler :: Nil).foldLeft(new DefaultChannelPipeline) { + (pipe, handler) ⇒ pipe.addLast(Logging.simpleName(handler.getClass), handler); pipe + } } } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 86c534c418..e3a2cea9a7 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -8,7 +8,7 @@ import java.net.{ InetAddress, InetSocketAddress } import org.jboss.netty.util.{ Timeout, TimerTask, HashedWheelTimer } import org.jboss.netty.bootstrap.ClientBootstrap import org.jboss.netty.channel.group.DefaultChannelGroup -import org.jboss.netty.channel.{ ChannelFutureListener, ChannelHandler, StaticChannelPipeline, MessageEvent, ExceptionEvent, ChannelStateEvent, ChannelPipelineFactory, ChannelPipeline, ChannelHandlerContext, ChannelFuture, Channel } +import org.jboss.netty.channel.{ ChannelFutureListener, ChannelHandler, DefaultChannelPipeline, MessageEvent, ExceptionEvent, ChannelStateEvent, ChannelPipelineFactory, ChannelPipeline, ChannelHandlerContext, ChannelFuture, Channel } import org.jboss.netty.handler.codec.frame.{ LengthFieldPrepender, LengthFieldBasedFrameDecoder } import org.jboss.netty.handler.execution.ExecutionHandler import org.jboss.netty.handler.timeout.{ IdleState, IdleStateEvent, IdleStateAwareChannelHandler, IdleStateHandler } diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index b42239f470..61124cfecb 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -12,7 +12,7 @@ import java.util.concurrent.Executors import scala.collection.mutable.HashMap import org.jboss.netty.channel.group.{ DefaultChannelGroup, ChannelGroupFuture } import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory -import org.jboss.netty.channel.{ ChannelHandlerContext, Channel, StaticChannelPipeline, ChannelHandler, ChannelPipelineFactory, ChannelLocal } +import org.jboss.netty.channel.{ ChannelHandlerContext, Channel, DefaultChannelPipeline, ChannelHandler, ChannelPipelineFactory, ChannelLocal } import org.jboss.netty.handler.codec.frame.{ LengthFieldPrepender, LengthFieldBasedFrameDecoder } import org.jboss.netty.handler.codec.protobuf.{ ProtobufEncoder, ProtobufDecoder } import org.jboss.netty.handler.execution.{ ExecutionHandler, OrderedMemoryAwareThreadPoolExecutor } @@ -50,10 +50,13 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider */ object PipelineFactory { /** - * Construct a StaticChannelPipeline from a sequence of handlers; to be used + * Construct a DefaultChannelPipeline from a sequence of handlers; to be used * in implementations of ChannelPipelineFactory. */ - def apply(handlers: Seq[ChannelHandler]): StaticChannelPipeline = new StaticChannelPipeline(handlers: _*) + def apply(handlers: Seq[ChannelHandler]): DefaultChannelPipeline = + handlers.foldLeft(new DefaultChannelPipeline) { + (pipe, handler) ⇒ pipe.addLast(Logging.simpleName(handler.getClass), handler); pipe + } /** * Constructs the NettyRemoteTransport default pipeline with the give “head” handler, which diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 736927e7c2..d6d23eb56b 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -485,7 +485,7 @@ object Dependency { object V { val Camel = "2.8.0" val Logback = "1.0.4" - val Netty = "3.3.0.Final" + val Netty = "3.5.0.Final" val Protobuf = "2.4.1" val ScalaStm = "0.5" val Scalatest = "1.6.1" From e6ee3e2a953768c982f0150837471041dcf46060 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sun, 10 Jun 2012 16:50:04 +0200 Subject: [PATCH 340/538] Ignoring ConvergenceSpec until fixed. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index 9963903b90..65571b97b3 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -37,7 +37,7 @@ abstract class ConvergenceSpec "A cluster of 3 members" must { - "reach initial convergence" taggedAs LongRunningTest in { + "reach initial convergence" taggedAs LongRunningTest ignore { awaitClusterUp(first, second, third) runOn(fourth) { @@ -47,7 +47,7 @@ abstract class ConvergenceSpec testConductor.enter("after-1") } - "not reach convergence while any nodes are unreachable" taggedAs LongRunningTest in { + "not reach convergence while any nodes are unreachable" taggedAs LongRunningTest ignore { val thirdAddress = node(third).address testConductor.enter("before-shutdown") @@ -78,7 +78,7 @@ abstract class ConvergenceSpec testConductor.enter("after-2") } - "not move a new joining node to Up while there is no convergence" taggedAs LongRunningTest in { + "not move a new joining node to Up while there is no convergence" taggedAs LongRunningTest ignore { runOn(fourth) { // try to join cluster.join(node(first).address) From a4499b06bb00945bd63f6b352190e6e0a4560b26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sun, 10 Jun 2012 16:52:33 +0200 Subject: [PATCH 341/538] Abstracted the FailureDetector into a interface trait and added controllable failure detector mock. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Abstracted a FailureDetector trait. - Added a FailureDetectorPuppet mock that can be user controllable - Added option to define a custom failure detector - Misc minor fixes Signed-off-by: Jonas Bonér --- .../akka/cluster/AccrualFailureDetector.scala | 28 ++++-- .../src/main/scala/akka/cluster/Cluster.scala | 21 +++- .../scala/akka/cluster/ClusterSettings.scala | 4 + .../scala/akka/cluster/FailureDetector.scala | 99 +++++++++++++++++++ .../cluster/AccrualFailureDetectorSpec.scala | 18 ++-- .../akka/cluster/ClusterConfigSpec.scala | 1 + .../test/scala/akka/cluster/ClusterSpec.scala | 6 +- 7 files changed, 151 insertions(+), 26 deletions(-) create mode 100644 akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index c7aaf12fcf..cdca8c9503 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -4,7 +4,8 @@ package akka.cluster -import akka.actor.{ ActorSystem, Address } +import akka.actor.{ ActorSystem, Address, ExtendedActorSystem } +import akka.remote.RemoteActorRefProvider import akka.event.Logging import scala.collection.immutable.Map @@ -23,11 +24,20 @@ import java.util.concurrent.atomic.AtomicReference * Default threshold is 8, but can be configured in the Akka config. */ class AccrualFailureDetector( - system: ActorSystem, - address: Address, + val system: ActorSystem, val threshold: Int = 8, val maxSampleSize: Int = 1000, - val timeMachine: () ⇒ Long = System.currentTimeMillis) { + val timeMachine: () ⇒ Long = System.currentTimeMillis) extends FailureDetector { + + def this( + system: ActorSystem, + settings: ClusterSettings, + timeMachine: () ⇒ Long = System.currentTimeMillis) = + this( + system, + settings.FailureDetectorThreshold, + settings.FailureDetectorMaxSampleSize, + timeMachine) private final val PhiFactor = 1.0 / math.log(10.0) @@ -65,8 +75,8 @@ class AccrualFailureDetector( * Records a heartbeat for a connection. */ @tailrec - final def heartbeat(connection: Address) { - log.debug("Node [{}] - Heartbeat from connection [{}] ", address, connection) + final def heartbeat(connection: Address): Unit = { + log.debug("Heartbeat from connection [{}] ", connection) val oldState = state.get val latestTimestamp = oldState.timestamps.get(connection) @@ -155,7 +165,7 @@ class AccrualFailureDetector( else PhiFactor * timestampDiff / mean } - log.debug("Node [{}] - Phi value [{}] and threshold [{}] for connection [{}] ", address, phi, threshold, connection) + log.debug("Phi value [{}] and threshold [{}] for connection [{}] ", phi, threshold, connection) phi } @@ -163,8 +173,8 @@ class AccrualFailureDetector( * Removes the heartbeat management for a connection. */ @tailrec - final def remove(connection: Address) { - log.debug("Node [{}] - Remove connection [{}] ", address, connection) + final def remove(connection: Address): Unit = { + log.debug("Remove connection [{}] ", connection) val oldState = state.get if (oldState.failureStats.contains(connection)) { diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 4ea43d50e4..e788450148 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -306,7 +306,22 @@ object Cluster extends ExtensionId[Cluster] with ExtensionIdProvider { override def lookup = Cluster - override def createExtension(system: ExtendedActorSystem): Cluster = new Cluster(system) + override def createExtension(system: ExtendedActorSystem): Cluster = { + val clusterSettings = new ClusterSettings(system.settings.config, system.name) + + def createDefaultFD() = new AccrualFailureDetector(system, clusterSettings) + val failureDetector = clusterSettings.FailureDetectorImplementationClass match { + case None ⇒ createDefaultFD() + case Some(fqcn) ⇒ system.dynamicAccess.createInstanceFor[FailureDetector](fqcn, Seq((classOf[ActorSystem], system), (classOf[ClusterSettings], clusterSettings))) match { + case Right(fd) ⇒ fd + case Left(e) ⇒ + system.log.error(e, "Could not create custom failure detector - falling back to default") + createDefaultFD() + } + } + + new Cluster(system, failureDetector) + } } /** @@ -349,7 +364,7 @@ trait ClusterNodeMBean { * if (Cluster(system).isLeader) { ... } * }}} */ -class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ +class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) extends Extension { clusterNode ⇒ /** * Represents the state for this Cluster. Implemented using optimistic lockless concurrency. @@ -369,8 +384,6 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ import clusterSettings._ val selfAddress = remote.transport.address - val failureDetector = new AccrualFailureDetector( - system, selfAddress, FailureDetectorThreshold, FailureDetectorMaxSampleSize) private val vclockNode = VectorClock.Node(selfAddress.toString) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 0e7dac06ab..b58775e222 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -15,6 +15,10 @@ class ClusterSettings(val config: Config, val systemName: String) { import config._ val FailureDetectorThreshold = getInt("akka.cluster.failure-detector.threshold") val FailureDetectorMaxSampleSize = getInt("akka.cluster.failure-detector.max-sample-size") + val FailureDetectorImplementationClass: Option[String] = getString("akka.cluster.failure-detector.implementation-class") match { + case "" ⇒ None + case fqcn ⇒ Some(fqcn) + } val NodeToJoin: Option[Address] = getString("akka.cluster.node-to-join") match { case "" ⇒ None case AddressFromURIString(addr) ⇒ Some(addr) diff --git a/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala new file mode 100644 index 0000000000..897d0413b5 --- /dev/null +++ b/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala @@ -0,0 +1,99 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import akka.actor.{ Address, ActorSystem } +import akka.event.{ Logging, LogSource } + +/** + * Interface for Akka failure detectors. + */ +trait FailureDetector { + + /** + * Returns true if the connection is considered to be up and healthy + * and returns false otherwise. + */ + def isAvailable(connection: Address): Boolean + + /** + * Records a heartbeat for a connection. + */ + def heartbeat(connection: Address): Unit + + /** + * Calculates how likely it is that the connection has failed. + *

+ * If a connection does not have any records in failure detector then it is + * considered healthy. + */ + def phi(connection: Address): Double + + /** + * Removes the heartbeat management for a connection. + */ + def remove(connection: Address): Unit +} + +/** + * User controllable "puppet" failure detector. + */ +class FailureDetectorPuppet(system: ActorSystem, connectionsToStartWith: Address*) extends FailureDetector { + import java.util.concurrent.ConcurrentHashMap + + trait Status + object Up extends Status + object Down extends Status + + implicit val logSource: LogSource[AnyRef] = new LogSource[AnyRef] { + def genString(o: AnyRef): String = o.getClass.getName + override def getClazz(o: AnyRef): Class[_] = o.getClass + } + + val log = Logging(system, this) + + private val connections = { + val cs = new ConcurrentHashMap[Address, Status] + connectionsToStartWith foreach { cs put (_, Up) } + cs + } + + def +(connection: Address): this.type = { + log.debug("Adding cluster node [{}]", connection) + connections.put(connection, Up) + this + } + + def markAsDown(connection: Address): this.type = { + connections.put(connection, Down) + this + } + + def markAsUp(connection: Address): this.type = { + connections.put(connection, Up) + this + } + + def isAvailable(connection: Address): Boolean = connections.get(connection) match { + case null ⇒ + this + connection + true + case Up ⇒ + log.debug("isAvailable: Cluster node IS NOT available [{}]", connection) + true + case Down ⇒ + log.debug("isAvailable: Cluster node IS available [{}]", connection) + false + } + + def heartbeat(connection: Address): Unit = log.debug("Heart beat from cluster node[{}]", connection) + + def phi(connection: Address): Double = 0.1D + + def remove(connection: Address): Unit = { + log.debug("Removing cluster node [{}]", connection) + connections.remove(connection) + } +} diff --git a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala index 1cf62daf1c..bd4d5d2c52 100644 --- a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala @@ -28,7 +28,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" } "return phi value of 0.0 on startup for each address, when no heartbeats" in { - val fd = new AccrualFailureDetector(system, conn) + val fd = new AccrualFailureDetector(system) fd.phi(conn) must be(0.0) fd.phi(conn2) must be(0.0) } @@ -36,7 +36,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "return phi based on guess when only one heartbeat" in { // 1 second ticks val timeInterval = Vector.fill(30)(1000L) - val fd = new AccrualFailureDetector(system, conn, + val fd = new AccrualFailureDetector(system, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) @@ -52,7 +52,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "return phi value using first interval after second heartbeat" in { val timeInterval = List[Long](0, 100, 100, 100) - val fd = new AccrualFailureDetector(system, conn, + val fd = new AccrualFailureDetector(system, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) @@ -63,7 +63,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as available after a series of successful heartbeats" in { val timeInterval = List[Long](0, 1000, 100, 100) - val fd = new AccrualFailureDetector(system, conn, + val fd = new AccrualFailureDetector(system, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) @@ -75,7 +75,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as dead after explicit removal of connection" in { val timeInterval = List[Long](0, 1000, 100, 100, 100) - val fd = new AccrualFailureDetector(system, conn, + val fd = new AccrualFailureDetector(system, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) @@ -89,7 +89,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as available after explicit removal of connection and receiving heartbeat again" in { val timeInterval = List[Long](0, 1000, 100, 1100, 1100, 1100, 1100, 1100, 100) - val fd = new AccrualFailureDetector(system, conn, + val fd = new AccrualFailureDetector(system, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 @@ -114,7 +114,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as dead if heartbeat are missed" in { val timeInterval = List[Long](0, 1000, 100, 100, 5000) val ft = fakeTimeGenerator(timeInterval) - val fd = new AccrualFailureDetector(system, conn, threshold = 3, + val fd = new AccrualFailureDetector(system, threshold = 3, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 @@ -127,7 +127,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as available if it starts heartbeat again after being marked dead due to detection of failure" in { val timeInterval = List[Long](0, 1000, 100, 1100, 5000, 100, 1000, 100, 100) - val fd = new AccrualFailureDetector(system, conn, threshold = 3, + val fd = new AccrualFailureDetector(system, threshold = 3, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 @@ -144,7 +144,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "use maxSampleSize heartbeats" in { val timeInterval = List[Long](0, 100, 100, 100, 100, 600, 1000, 1000, 1000, 1000, 1000) - val fd = new AccrualFailureDetector(system, conn, maxSampleSize = 3, + val fd = new AccrualFailureDetector(system, maxSampleSize = 3, timeMachine = fakeTimeGenerator(timeInterval)) // 100 ms interval diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 6b2ff1962c..9bce41a831 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -18,6 +18,7 @@ class ClusterConfigSpec extends AkkaSpec { import settings._ FailureDetectorThreshold must be(8) FailureDetectorMaxSampleSize must be(1000) + FailureDetectorImplementationClass must be(None) NodeToJoin must be(None) PeriodicTasksInitialDelay must be(1 seconds) GossipInterval must be(1 second) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index d3d1d6d0a2..5b4bca3379 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -33,7 +33,7 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { val deterministicRandom = new AtomicInteger - val cluster = new Cluster(system.asInstanceOf[ExtendedActorSystem]) { + val cluster = new Cluster(system.asInstanceOf[ExtendedActorSystem], new FailureDetectorPuppet(system)) { override def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = { if (addresses.isEmpty) None @@ -67,9 +67,7 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { @volatile var _unavailable: Set[Address] = Set.empty - override val failureDetector = new AccrualFailureDetector( - system, selfAddress, clusterSettings.FailureDetectorThreshold, clusterSettings.FailureDetectorMaxSampleSize) { - + override val failureDetector = new AccrualFailureDetector(system, clusterSettings) { override def isAvailable(connection: Address): Boolean = { if (_unavailable.contains(connection)) false else super.isAvailable(connection) From 0030fa1b528bdac181a34e7d211b4cc26b09c678 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sun, 10 Jun 2012 16:53:17 +0200 Subject: [PATCH 342/538] Made LeaderDowningNodeThatIsUnreachableSpec make use of the new FailureDetectorPuppet as a sample of how to use it. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- ...aderDowningNodeThatIsUnreachableSpec.scala | 28 +++++++++++++------ 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index 616c412556..f3f8015ced 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -7,7 +7,7 @@ import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.actor.Address +import akka.actor._ import akka.util.duration._ object LeaderDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { @@ -16,12 +16,9 @@ object LeaderDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(debugConfig(on = true). + commonConfig(debugConfig(on = false). withFallback(ConfigFactory.parseString(""" - akka.cluster { - auto-down = on - failure-detector.threshold = 4 - } + akka.cluster.auto-down = on """)). withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -37,10 +34,20 @@ class LeaderDowningNodeThatIsUnreachableSpec import LeaderDowningNodeThatIsUnreachableMultiJvmSpec._ + // Set up the puppet failure detector + lazy val failureDetector = new FailureDetectorPuppet(system = system) + lazy val clusterNode = new Cluster(system.asInstanceOf[ExtendedActorSystem], failureDetector) + + override def cluster = clusterNode + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + lazy val thirdAddress = node(third).address + lazy val fourthAddress = node(fourth).address + "The Leader in a 4 node cluster" must { "be able to DOWN a 'last' node that is UNREACHABLE" taggedAs LongRunningTest in { - val fourthAddress = node(fourth).address awaitClusterUp(first, second, third, fourth) runOn(first) { @@ -48,6 +55,9 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.shutdown(fourth, 0) testConductor.enter("down-fourth-node") + // mark the node as unreachable in the failure detector + failureDetector markAsDown fourthAddress + // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(fourthAddress), 30.seconds) @@ -67,7 +77,6 @@ class LeaderDowningNodeThatIsUnreachableSpec } "be able to DOWN a 'middle' node that is UNREACHABLE" taggedAs LongRunningTest in { - val secondAddress = node(second).address testConductor.enter("before-down-second-node") runOn(first) { @@ -75,6 +84,9 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.shutdown(second, 0) testConductor.enter("down-second-node") + // mark the node as unreachable in the failure detector + failureDetector markAsDown secondAddress + // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds) From ec7177be740fc070c2a5fe483dbc76a49b35d6fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 11 Jun 2012 10:06:53 +0200 Subject: [PATCH 343/538] Misc fixes after FailureDetectorPuppet and abstraction review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Moved FailureDetectorPuppet to its own file in src/test. - Removed 'phi' method from FailureDetector public API. - Throwing exception instead of falling back to default if we can't load the custom FD. - Removed add-connection method in FailureDetectorPuppet. Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/cluster/Cluster.scala | 15 ++-- .../scala/akka/cluster/FailureDetector.scala | 75 +------------------ .../test/scala/akka/cluster/ClusterSpec.scala | 2 +- .../akka/cluster/FailureDetectorPuppet.scala | 60 +++++++++++++++ 4 files changed, 70 insertions(+), 82 deletions(-) create mode 100644 akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index e788450148..891c8972b0 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -309,15 +309,14 @@ object Cluster extends ExtensionId[Cluster] with ExtensionIdProvider { override def createExtension(system: ExtendedActorSystem): Cluster = { val clusterSettings = new ClusterSettings(system.settings.config, system.name) - def createDefaultFD() = new AccrualFailureDetector(system, clusterSettings) val failureDetector = clusterSettings.FailureDetectorImplementationClass match { - case None ⇒ createDefaultFD() - case Some(fqcn) ⇒ system.dynamicAccess.createInstanceFor[FailureDetector](fqcn, Seq((classOf[ActorSystem], system), (classOf[ClusterSettings], clusterSettings))) match { - case Right(fd) ⇒ fd - case Left(e) ⇒ - system.log.error(e, "Could not create custom failure detector - falling back to default") - createDefaultFD() - } + case None ⇒ new AccrualFailureDetector(system, clusterSettings) + case Some(fqcn) ⇒ + system.dynamicAccess.createInstanceFor[FailureDetector]( + fqcn, Seq((classOf[ActorSystem], system), (classOf[ClusterSettings], clusterSettings))) match { + case Right(fd) ⇒ fd + case Left(e) ⇒ throw new ConfigurationException("Could not create custom failure detector [" + fqcn + "] due to:" + e.toString) + } } new Cluster(system, failureDetector) diff --git a/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala index 897d0413b5..60af0a1c41 100644 --- a/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala @@ -4,8 +4,7 @@ package akka.cluster -import akka.actor.{ Address, ActorSystem } -import akka.event.{ Logging, LogSource } +import akka.actor.Address /** * Interface for Akka failure detectors. @@ -13,8 +12,7 @@ import akka.event.{ Logging, LogSource } trait FailureDetector { /** - * Returns true if the connection is considered to be up and healthy - * and returns false otherwise. + * Returns true if the connection is considered to be up and healthy and returns false otherwise. */ def isAvailable(connection: Address): Boolean @@ -23,77 +21,8 @@ trait FailureDetector { */ def heartbeat(connection: Address): Unit - /** - * Calculates how likely it is that the connection has failed. - *

- * If a connection does not have any records in failure detector then it is - * considered healthy. - */ - def phi(connection: Address): Double - /** * Removes the heartbeat management for a connection. */ def remove(connection: Address): Unit } - -/** - * User controllable "puppet" failure detector. - */ -class FailureDetectorPuppet(system: ActorSystem, connectionsToStartWith: Address*) extends FailureDetector { - import java.util.concurrent.ConcurrentHashMap - - trait Status - object Up extends Status - object Down extends Status - - implicit val logSource: LogSource[AnyRef] = new LogSource[AnyRef] { - def genString(o: AnyRef): String = o.getClass.getName - override def getClazz(o: AnyRef): Class[_] = o.getClass - } - - val log = Logging(system, this) - - private val connections = { - val cs = new ConcurrentHashMap[Address, Status] - connectionsToStartWith foreach { cs put (_, Up) } - cs - } - - def +(connection: Address): this.type = { - log.debug("Adding cluster node [{}]", connection) - connections.put(connection, Up) - this - } - - def markAsDown(connection: Address): this.type = { - connections.put(connection, Down) - this - } - - def markAsUp(connection: Address): this.type = { - connections.put(connection, Up) - this - } - - def isAvailable(connection: Address): Boolean = connections.get(connection) match { - case null ⇒ - this + connection - true - case Up ⇒ - log.debug("isAvailable: Cluster node IS NOT available [{}]", connection) - true - case Down ⇒ - log.debug("isAvailable: Cluster node IS available [{}]", connection) - false - } - - def heartbeat(connection: Address): Unit = log.debug("Heart beat from cluster node[{}]", connection) - - def phi(connection: Address): Double = 0.1D - - def remove(connection: Address): Unit = { - log.debug("Removing cluster node [{}]", connection) - connections.remove(connection) - } -} diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index 5b4bca3379..f60e6fa7dc 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -67,7 +67,7 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { @volatile var _unavailable: Set[Address] = Set.empty - override val failureDetector = new AccrualFailureDetector(system, clusterSettings) { + override val failureDetector = new FailureDetectorPuppet(system) { override def isAvailable(connection: Address): Boolean = { if (_unavailable.contains(connection)) false else super.isAvailable(connection) diff --git a/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala new file mode 100644 index 0000000000..3245a15f97 --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala @@ -0,0 +1,60 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import akka.actor.{ Address, ActorSystem } +import akka.event.{ Logging, LogSource } + +/** + * User controllable "puppet" failure detector. + */ +class FailureDetectorPuppet(system: ActorSystem, settings: ClusterSettings) extends FailureDetector { + import java.util.concurrent.ConcurrentHashMap + + def this(system: ActorSystem) = this(system, new ClusterSettings(system.settings.config, system.name)) + + trait Status + object Up extends Status + object Down extends Status + + implicit private val logSource: LogSource[AnyRef] = new LogSource[AnyRef] { + def genString(o: AnyRef): String = o.getClass.getName + override def getClazz(o: AnyRef): Class[_] = o.getClass + } + + private val log = Logging(system, this) + + private val connections = new ConcurrentHashMap[Address, Status] + + def markAsDown(connection: Address): this.type = { + connections.put(connection, Down) + this + } + + def markAsUp(connection: Address): this.type = { + connections.put(connection, Up) + this + } + + def isAvailable(connection: Address): Boolean = connections.get(connection) match { + case null ⇒ + log.debug("Adding cluster node [{}]", connection) + connections.put(connection, Up) + true + case Up ⇒ + log.debug("isAvailable: Cluster node IS NOT available [{}]", connection) + true + case Down ⇒ + log.debug("isAvailable: Cluster node IS available [{}]", connection) + false + } + + def heartbeat(connection: Address): Unit = log.debug("Heart beat from cluster node[{}]", connection) + + def remove(connection: Address): Unit = { + log.debug("Removing cluster node [{}]", connection) + connections.remove(connection) + } +} From 44fefb9b55dc682fac79fb0337bd0045b37ec728 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 11 Jun 2012 11:05:19 +0200 Subject: [PATCH 344/538] #2187 - Making Warning sections in docs yellow so they aren't overlooked --- akka-docs/_sphinx/themes/akka/static/docs.css | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/akka-docs/_sphinx/themes/akka/static/docs.css b/akka-docs/_sphinx/themes/akka/static/docs.css index 7b6d3dbf52..3d37718c68 100644 --- a/akka-docs/_sphinx/themes/akka/static/docs.css +++ b/akka-docs/_sphinx/themes/akka/static/docs.css @@ -90,6 +90,42 @@ strong {color: #1d3c52; } box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25); } +.warning { + background-image: none; + background-color: #fdf5d9; + filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); + padding: 14px; + border-color: #ffffc4; + -webkit-box-shadow: none; + -moz-box-shadow: none; + box-shadow: none; + margin-bottom: 18px; + position: relative; + padding: 7px 15px; + color: #404040; + background-repeat: repeat-x; + background-image: -khtml-gradient(linear, left top, left bottom, from(#ffffc4), to(#ffff00)); + background-image: -moz-linear-gradient(top, #ffffc4, #ffff00); + background-image: -ms-linear-gradient(top, #ffffc4, #ffff00); + background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #ffffc4), color-stop(100%, #ffff00)); + background-image: -webkit-linear-gradient(top, #ffffc4, #ffff00); + background-image: -o-linear-gradient(top, #ffffc4, #ffff00); + background-image: linear-gradient(top, #ffffc4, #ffff00); + filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffc4', endColorstr='#ffff00', GradientType=0); + text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); + border-color: #dff69a #ffff00 #E4C652; + border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); + text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5); + border-width: 1px; + border-style: solid; + -webkit-border-radius: 4px; + -moz-border-radius: 4px; + border-radius: 4px; + -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25); + -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25); + box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25); +} + .admonition p.admonition-title { color: rgba(0, 0, 0, 0.6); text-shadow: 0 1px 0 rgba(255, 255, 255, .7); From edc0c0d888d88adf90cc9c3201f9707d6fa1c4b5 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 11 Jun 2012 11:26:28 +0200 Subject: [PATCH 345/538] #2119 - enforce Java6 for releases --- project/scripts/release | 2 ++ 1 file changed, 2 insertions(+) diff --git a/project/scripts/release b/project/scripts/release index 886e6629b1..13795b3d53 100755 --- a/project/scripts/release +++ b/project/scripts/release @@ -93,6 +93,8 @@ fi declare -r version=$1 declare -r publish_path="${release_server}:${release_path}" +[[ `java -version 2>&1 | grep "java version" | awk '{print $3}' | tr -d \" | awk '{split($0, array, ".")} END{print array[2]}'` -eq 6 ]] || fail "Java version is not 1.6" + # check for a git command type -P git &> /dev/null || fail "git command not found" From 523f433e4b328ab6976d0b4f037432c9772a4825 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 11 Jun 2012 11:36:29 +0200 Subject: [PATCH 346/538] Fixed potential problem in test --- .../cluster/LeaderDowningNodeThatIsUnreachableSpec.scala | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index f3f8015ced..dc383dca43 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -40,16 +40,12 @@ class LeaderDowningNodeThatIsUnreachableSpec override def cluster = clusterNode - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - lazy val thirdAddress = node(third).address - lazy val fourthAddress = node(fourth).address - "The Leader in a 4 node cluster" must { "be able to DOWN a 'last' node that is UNREACHABLE" taggedAs LongRunningTest in { awaitClusterUp(first, second, third, fourth) + val fourthAddress = node(fourth).address runOn(first) { // kill 'fourth' node testConductor.shutdown(fourth, 0) @@ -77,8 +73,9 @@ class LeaderDowningNodeThatIsUnreachableSpec } "be able to DOWN a 'middle' node that is UNREACHABLE" taggedAs LongRunningTest in { - testConductor.enter("before-down-second-node") + val secondAddress = node(second).address + testConductor.enter("before-down-second-node") runOn(first) { // kill 'second' node testConductor.shutdown(second, 0) From b65cf5c2ec233a9f7952485aee9081498f4fa95c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 11 Jun 2012 14:32:17 +0200 Subject: [PATCH 347/538] Created FailureDetectorStrategy with two implementations: FailureDetectorPuppetStrategy and AccrualFailureDetectorStrategy. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Created FailureDetectorStrategy base trait. - Created FailureDetectorPuppetStrategy. - Created AccrualFailureDetectorStrategy. - Created two versions of LeaderDowningNodeThatIsUnreachableMultiJvmSpec - LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppet - LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetector - Added AccrualFailureDetectorStrategy to all the remaining tests - will be split up into two versions shortly. Signed-off-by: Jonas Bonér --- ...ientDowningNodeThatIsUnreachableSpec.scala | 10 +-- .../ClientDowningNodeThatIsUpSpec.scala | 10 +-- .../scala/akka/cluster/ConvergenceSpec.scala | 8 +-- .../cluster/FailureDetectorStrategy.scala | 61 +++++++++++++++++++ .../GossipingAccrualFailureDetectorSpec.scala | 6 +- .../akka/cluster/JoinTwoClustersSpec.scala | 12 ++-- ...aderDowningNodeThatIsUnreachableSpec.scala | 25 ++++---- .../akka/cluster/LeaderElectionSpec.scala | 10 +-- .../MembershipChangeListenerExitingSpec.scala | 6 +- .../MembershipChangeListenerJoinSpec.scala | 4 +- .../MembershipChangeListenerLeavingSpec.scala | 6 +- .../MembershipChangeListenerSpec.scala | 6 +- .../MembershipChangeListenerUpSpec.scala | 4 +- .../akka/cluster/MultiNodeClusterSpec.scala | 17 +++--- .../scala/akka/cluster/NodeJoinSpec.scala | 4 +- ...LeavingAndExitingAndBeingRemovedSpec.scala | 6 +- .../cluster/NodeLeavingAndExitingSpec.scala | 6 +- .../scala/akka/cluster/NodeLeavingSpec.scala | 6 +- .../akka/cluster/NodeMembershipSpec.scala | 6 +- .../scala/akka/cluster/NodeShutdownSpec.scala | 4 +- .../scala/akka/cluster/NodeUpSpec.scala | 4 +- .../scala/akka/cluster/SunnyWeatherSpec.scala | 10 +-- .../akka/cluster/FailureDetectorPuppet.scala | 4 +- 23 files changed, 149 insertions(+), 86 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/FailureDetectorStrategy.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index 6d4d09f7cb..d1a9f756dd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -18,12 +18,12 @@ object ClientDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class ClientDowningNodeThatIsUnreachableMultiJvmNode1 extends ClientDowningNodeThatIsUnreachableSpec -class ClientDowningNodeThatIsUnreachableMultiJvmNode2 extends ClientDowningNodeThatIsUnreachableSpec -class ClientDowningNodeThatIsUnreachableMultiJvmNode3 extends ClientDowningNodeThatIsUnreachableSpec -class ClientDowningNodeThatIsUnreachableMultiJvmNode4 extends ClientDowningNodeThatIsUnreachableSpec +class ClientDowningNodeThatIsUnreachableMultiJvmNode1 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUnreachableMultiJvmNode2 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUnreachableMultiJvmNode3 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUnreachableMultiJvmNode4 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy -class ClientDowningNodeThatIsUnreachableSpec +abstract class ClientDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(ClientDowningNodeThatIsUnreachableMultiJvmSpec) with MultiNodeClusterSpec { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index db00438c9e..687596745b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -18,12 +18,12 @@ object ClientDowningNodeThatIsUpMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class ClientDowningNodeThatIsUpMultiJvmNode1 extends ClientDowningNodeThatIsUpSpec -class ClientDowningNodeThatIsUpMultiJvmNode2 extends ClientDowningNodeThatIsUpSpec -class ClientDowningNodeThatIsUpMultiJvmNode3 extends ClientDowningNodeThatIsUpSpec -class ClientDowningNodeThatIsUpMultiJvmNode4 extends ClientDowningNodeThatIsUpSpec +class ClientDowningNodeThatIsUpMultiJvmNode1 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUpMultiJvmNode2 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUpMultiJvmNode3 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUpMultiJvmNode4 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy -class ClientDowningNodeThatIsUpSpec +abstract class ClientDowningNodeThatIsUpSpec extends MultiNodeSpec(ClientDowningNodeThatIsUpMultiJvmSpec) with MultiNodeClusterSpec { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index 65571b97b3..df47e19bec 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -25,10 +25,10 @@ object ConvergenceMultiJvmSpec extends MultiNodeConfig { withFallback(MultiNodeClusterSpec.clusterConfig)) } -class ConvergenceMultiJvmNode1 extends ConvergenceSpec -class ConvergenceMultiJvmNode2 extends ConvergenceSpec -class ConvergenceMultiJvmNode3 extends ConvergenceSpec -class ConvergenceMultiJvmNode4 extends ConvergenceSpec +class ConvergenceMultiJvmNode1 extends ConvergenceSpec with AccrualFailureDetectorStrategy +class ConvergenceMultiJvmNode2 extends ConvergenceSpec with AccrualFailureDetectorStrategy +class ConvergenceMultiJvmNode3 extends ConvergenceSpec with AccrualFailureDetectorStrategy +class ConvergenceMultiJvmNode4 extends ConvergenceSpec with AccrualFailureDetectorStrategy abstract class ConvergenceSpec extends MultiNodeSpec(ConvergenceMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/FailureDetectorStrategy.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/FailureDetectorStrategy.scala new file mode 100644 index 0000000000..dcbb65d0f1 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/FailureDetectorStrategy.scala @@ -0,0 +1,61 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import akka.actor.Address +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ + +/** + * Base trait for all failure detector strategies. + */ +trait FailureDetectorStrategy { + + /** + * Get or create the FailureDetector to be used in the cluster node. + * To be defined by subclass. + */ + def failureDetector: FailureDetector + + /** + * Marks a node as available in the failure detector. + * To be defined by subclass. + */ + def markNodeAsAvailable(address: Address): Unit + + /** + * Marks a node as unavailable in the failure detector. + * To be defined by subclass. + */ + def markNodeAsUnavailable(address: Address): Unit +} + +/** + * Defines a FailureDetectorPuppet-based FailureDetectorStrategy. + */ +trait FailureDetectorPuppetStrategy extends FailureDetectorStrategy { self: MultiNodeSpec ⇒ + + /** + * The puppet instance. Separated from 'failureDetector' field so we don't have to cast when using the puppet specific methods. + */ + private val puppet = new FailureDetectorPuppet(system) + + override def failureDetector: FailureDetector = puppet + + override def markNodeAsAvailable(address: Address): Unit = puppet markNodeAsAvailable address + + override def markNodeAsUnavailable(address: Address): Unit = puppet markNodeAsUnavailable address +} + +/** + * Defines a AccrualFailureDetector-based FailureDetectorStrategy. + */ +trait AccrualFailureDetectorStrategy extends FailureDetectorStrategy { self: MultiNodeSpec ⇒ + + override val failureDetector: FailureDetector = new AccrualFailureDetector(system, new ClusterSettings(system.settings.config, system.name)) + + override def markNodeAsAvailable(address: Address): Unit = { /* no-op */ } + + override def markNodeAsUnavailable(address: Address): Unit = { /* no-op */ } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index f75ca3b058..63090b7a1f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -19,9 +19,9 @@ object GossipingAccrualFailureDetectorMultiJvmSpec extends MultiNodeConfig { withFallback(MultiNodeClusterSpec.clusterConfig)) } -class GossipingAccrualFailureDetectorMultiJvmNode1 extends GossipingAccrualFailureDetectorSpec -class GossipingAccrualFailureDetectorMultiJvmNode2 extends GossipingAccrualFailureDetectorSpec -class GossipingAccrualFailureDetectorMultiJvmNode3 extends GossipingAccrualFailureDetectorSpec +class GossipingAccrualFailureDetectorMultiJvmNode1 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy +class GossipingAccrualFailureDetectorMultiJvmNode2 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy +class GossipingAccrualFailureDetectorMultiJvmNode3 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(GossipingAccrualFailureDetectorMultiJvmSpec) with MultiNodeClusterSpec { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index e86602949f..2000e63253 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -20,12 +20,12 @@ object JoinTwoClustersMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec -class JoinTwoClustersMultiJvmNode2 extends JoinTwoClustersSpec -class JoinTwoClustersMultiJvmNode3 extends JoinTwoClustersSpec -class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec -class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec -class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec +class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy +class JoinTwoClustersMultiJvmNode2 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy +class JoinTwoClustersMultiJvmNode3 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy +class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy +class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy +class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index dc383dca43..7dcb6b20f6 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -23,23 +23,22 @@ object LeaderDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { withFallback(MultiNodeClusterSpec.clusterConfig)) } -class LeaderDowningNodeThatIsUnreachableMultiJvmNode1 extends LeaderDowningNodeThatIsUnreachableSpec -class LeaderDowningNodeThatIsUnreachableMultiJvmNode2 extends LeaderDowningNodeThatIsUnreachableSpec -class LeaderDowningNodeThatIsUnreachableMultiJvmNode3 extends LeaderDowningNodeThatIsUnreachableSpec -class LeaderDowningNodeThatIsUnreachableMultiJvmNode4 extends LeaderDowningNodeThatIsUnreachableSpec +class LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode1 extends LeaderDowningNodeThatIsUnreachableSpec with FailureDetectorPuppetStrategy +class LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode2 extends LeaderDowningNodeThatIsUnreachableSpec with FailureDetectorPuppetStrategy +class LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode3 extends LeaderDowningNodeThatIsUnreachableSpec with FailureDetectorPuppetStrategy +class LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode4 extends LeaderDowningNodeThatIsUnreachableSpec with FailureDetectorPuppetStrategy -class LeaderDowningNodeThatIsUnreachableSpec +class LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode1 extends LeaderDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode2 extends LeaderDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode3 extends LeaderDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode4 extends LeaderDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy + +abstract class LeaderDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(LeaderDowningNodeThatIsUnreachableMultiJvmSpec) with MultiNodeClusterSpec { import LeaderDowningNodeThatIsUnreachableMultiJvmSpec._ - // Set up the puppet failure detector - lazy val failureDetector = new FailureDetectorPuppet(system = system) - lazy val clusterNode = new Cluster(system.asInstanceOf[ExtendedActorSystem], failureDetector) - - override def cluster = clusterNode - "The Leader in a 4 node cluster" must { "be able to DOWN a 'last' node that is UNREACHABLE" taggedAs LongRunningTest in { @@ -52,7 +51,7 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.enter("down-fourth-node") // mark the node as unreachable in the failure detector - failureDetector markAsDown fourthAddress + markNodeAsUnavailable(fourthAddress) // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- @@ -82,7 +81,7 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.enter("down-second-node") // mark the node as unreachable in the failure detector - failureDetector markAsDown secondAddress + markNodeAsUnavailable(secondAddress) // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index 43f0fc19eb..f44b494917 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -19,11 +19,11 @@ object LeaderElectionMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class LeaderElectionMultiJvmNode1 extends LeaderElectionSpec -class LeaderElectionMultiJvmNode2 extends LeaderElectionSpec -class LeaderElectionMultiJvmNode3 extends LeaderElectionSpec -class LeaderElectionMultiJvmNode4 extends LeaderElectionSpec -class LeaderElectionMultiJvmNode5 extends LeaderElectionSpec +class LeaderElectionMultiJvmNode1 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionMultiJvmNode2 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionMultiJvmNode3 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionMultiJvmNode4 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionMultiJvmNode5 extends LeaderElectionSpec with AccrualFailureDetectorStrategy abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index d76c3cf689..7389a01ffc 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -27,9 +27,9 @@ object MembershipChangeListenerExitingMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class MembershipChangeListenerExitingMultiJvmNode1 extends MembershipChangeListenerExitingSpec -class MembershipChangeListenerExitingMultiJvmNode2 extends MembershipChangeListenerExitingSpec -class MembershipChangeListenerExitingMultiJvmNode3 extends MembershipChangeListenerExitingSpec +class MembershipChangeListenerExitingMultiJvmNode1 extends MembershipChangeListenerExitingSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerExitingMultiJvmNode2 extends MembershipChangeListenerExitingSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerExitingMultiJvmNode3 extends MembershipChangeListenerExitingSpec with AccrualFailureDetectorStrategy abstract class MembershipChangeListenerExitingSpec extends MultiNodeSpec(MembershipChangeListenerExitingMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index bdf8f7d44d..8a940375ef 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -25,8 +25,8 @@ object MembershipChangeListenerJoinMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class MembershipChangeListenerJoinMultiJvmNode1 extends MembershipChangeListenerJoinSpec -class MembershipChangeListenerJoinMultiJvmNode2 extends MembershipChangeListenerJoinSpec +class MembershipChangeListenerJoinMultiJvmNode1 extends MembershipChangeListenerJoinSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerJoinMultiJvmNode2 extends MembershipChangeListenerJoinSpec with AccrualFailureDetectorStrategy abstract class MembershipChangeListenerJoinSpec extends MultiNodeSpec(MembershipChangeListenerJoinMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala index 1ff11465bb..d7c79407a2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -24,9 +24,9 @@ object MembershipChangeListenerLeavingMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig)) } -class MembershipChangeListenerLeavingMultiJvmNode1 extends MembershipChangeListenerLeavingSpec -class MembershipChangeListenerLeavingMultiJvmNode2 extends MembershipChangeListenerLeavingSpec -class MembershipChangeListenerLeavingMultiJvmNode3 extends MembershipChangeListenerLeavingSpec +class MembershipChangeListenerLeavingMultiJvmNode1 extends MembershipChangeListenerLeavingSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerLeavingMultiJvmNode2 extends MembershipChangeListenerLeavingSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerLeavingMultiJvmNode3 extends MembershipChangeListenerLeavingSpec with AccrualFailureDetectorStrategy abstract class MembershipChangeListenerLeavingSpec extends MultiNodeSpec(MembershipChangeListenerLeavingMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index c48727b1cd..914db94acb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -17,9 +17,9 @@ object MembershipChangeListenerMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class MembershipChangeListenerMultiJvmNode1 extends MembershipChangeListenerSpec -class MembershipChangeListenerMultiJvmNode2 extends MembershipChangeListenerSpec -class MembershipChangeListenerMultiJvmNode3 extends MembershipChangeListenerSpec +class MembershipChangeListenerMultiJvmNode1 extends MembershipChangeListenerSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerMultiJvmNode2 extends MembershipChangeListenerSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerMultiJvmNode3 extends MembershipChangeListenerSpec with AccrualFailureDetectorStrategy abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChangeListenerMultiJvmSpec) with MultiNodeClusterSpec { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala index 3e22dd456d..4cd81cd0e7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -18,8 +18,8 @@ object MembershipChangeListenerUpMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class MembershipChangeListenerUpMultiJvmNode1 extends MembershipChangeListenerUpSpec -class MembershipChangeListenerUpMultiJvmNode2 extends MembershipChangeListenerUpSpec +class MembershipChangeListenerUpMultiJvmNode1 extends MembershipChangeListenerUpSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerUpMultiJvmNode2 extends MembershipChangeListenerUpSpec with AccrualFailureDetectorStrategy abstract class MembershipChangeListenerUpSpec extends MultiNodeSpec(MembershipChangeListenerUpMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index b185067ab0..39ecd8b0dc 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -5,7 +5,7 @@ package akka.cluster import com.typesafe.config.Config import com.typesafe.config.ConfigFactory -import akka.actor.Address +import akka.actor.{Address, ExtendedActorSystem} import akka.remote.testconductor.RoleName import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -28,14 +28,19 @@ object MultiNodeClusterSpec { """) } -trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ +trait MultiNodeClusterSpec extends FailureDetectorStrategy { self: MultiNodeSpec ⇒ override def initialParticipants = roles.size /** - * Get or create a cluster node using 'Cluster(system)' extension. + * The cluster node instance. Needs to be lazily created. */ - def cluster: Cluster = Cluster(system) + private lazy val clusterNode = new Cluster(system.asInstanceOf[ExtendedActorSystem], failureDetector) + + /** + * Get the cluster node to use. + */ + def cluster: Cluster = clusterNode /** * Use this method instead of 'cluster.self' @@ -48,9 +53,7 @@ trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ * nodes (roles). First node will be started first * and others will join the first. */ - def startCluster(roles: RoleName*): Unit = { - awaitStartCluster(false, roles.toSeq) - } + def startCluster(roles: RoleName*): Unit = awaitStartCluster(false, roles.toSeq) /** * Initialize the cluster of the specified member diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala index 066e86aae6..58ed162af7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala @@ -24,8 +24,8 @@ object NodeJoinMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class NodeJoinMultiJvmNode1 extends NodeJoinSpec -class NodeJoinMultiJvmNode2 extends NodeJoinSpec +class NodeJoinMultiJvmNode1 extends NodeJoinSpec with AccrualFailureDetectorStrategy +class NodeJoinMultiJvmNode2 extends NodeJoinSpec with AccrualFailureDetectorStrategy abstract class NodeJoinSpec extends MultiNodeSpec(NodeJoinMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index 8e274be311..a16ae055f0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -18,9 +18,9 @@ object NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec extends MultiNodeConfig commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode1 extends NodeLeavingAndExitingAndBeingRemovedSpec -class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode2 extends NodeLeavingAndExitingAndBeingRemovedSpec -class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode3 extends NodeLeavingAndExitingAndBeingRemovedSpec +class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode1 extends NodeLeavingAndExitingAndBeingRemovedSpec with AccrualFailureDetectorStrategy +class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode2 extends NodeLeavingAndExitingAndBeingRemovedSpec with AccrualFailureDetectorStrategy +class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode3 extends NodeLeavingAndExitingAndBeingRemovedSpec with AccrualFailureDetectorStrategy abstract class NodeLeavingAndExitingAndBeingRemovedSpec extends MultiNodeSpec(NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 79fff4770f..bb32d8641f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -26,9 +26,9 @@ object NodeLeavingAndExitingMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class NodeLeavingAndExitingMultiJvmNode1 extends NodeLeavingAndExitingSpec -class NodeLeavingAndExitingMultiJvmNode2 extends NodeLeavingAndExitingSpec -class NodeLeavingAndExitingMultiJvmNode3 extends NodeLeavingAndExitingSpec +class NodeLeavingAndExitingMultiJvmNode1 extends NodeLeavingAndExitingSpec with AccrualFailureDetectorStrategy +class NodeLeavingAndExitingMultiJvmNode2 extends NodeLeavingAndExitingSpec with AccrualFailureDetectorStrategy +class NodeLeavingAndExitingMultiJvmNode3 extends NodeLeavingAndExitingSpec with AccrualFailureDetectorStrategy abstract class NodeLeavingAndExitingSpec extends MultiNodeSpec(NodeLeavingAndExitingMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index b834492045..eccba596f2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -22,9 +22,9 @@ object NodeLeavingMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig)) } -class NodeLeavingMultiJvmNode1 extends NodeLeavingSpec -class NodeLeavingMultiJvmNode2 extends NodeLeavingSpec -class NodeLeavingMultiJvmNode3 extends NodeLeavingSpec +class NodeLeavingMultiJvmNode1 extends NodeLeavingSpec with AccrualFailureDetectorStrategy +class NodeLeavingMultiJvmNode2 extends NodeLeavingSpec with AccrualFailureDetectorStrategy +class NodeLeavingMultiJvmNode3 extends NodeLeavingSpec with AccrualFailureDetectorStrategy abstract class NodeLeavingSpec extends MultiNodeSpec(NodeLeavingMultiJvmSpec) with MultiNodeClusterSpec { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index ef65cefd0f..c7fa1569f2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -16,9 +16,9 @@ object NodeMembershipMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec -class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec -class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec +class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec with AccrualFailureDetectorStrategy +class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec with AccrualFailureDetectorStrategy +class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec with AccrualFailureDetectorStrategy abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index 4dc90a5b89..7417ae06d5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -24,8 +24,8 @@ object NodeShutdownMultiJvmSpec extends MultiNodeConfig { } -class NodeShutdownMultiJvmNode1 extends NodeShutdownSpec -class NodeShutdownMultiJvmNode2 extends NodeShutdownSpec +class NodeShutdownMultiJvmNode1 extends NodeShutdownSpec with AccrualFailureDetectorStrategy +class NodeShutdownMultiJvmNode2 extends NodeShutdownSpec with AccrualFailureDetectorStrategy abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) with MultiNodeClusterSpec { import NodeShutdownMultiJvmSpec._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index 6cb8bf9e07..4a2342fca1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -19,8 +19,8 @@ object NodeUpMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class NodeUpMultiJvmNode1 extends NodeUpSpec -class NodeUpMultiJvmNode2 extends NodeUpSpec +class NodeUpMultiJvmNode1 extends NodeUpSpec with AccrualFailureDetectorStrategy +class NodeUpMultiJvmNode2 extends NodeUpSpec with AccrualFailureDetectorStrategy abstract class NodeUpSpec extends MultiNodeSpec(NodeUpMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index fcb1393f8a..cabaf21ab1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -28,11 +28,11 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { """)) } -class SunnyWeatherMultiJvmNode1 extends SunnyWeatherSpec -class SunnyWeatherMultiJvmNode2 extends SunnyWeatherSpec -class SunnyWeatherMultiJvmNode3 extends SunnyWeatherSpec -class SunnyWeatherMultiJvmNode4 extends SunnyWeatherSpec -class SunnyWeatherMultiJvmNode5 extends SunnyWeatherSpec +class SunnyWeatherMultiJvmNode1 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy +class SunnyWeatherMultiJvmNode2 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy +class SunnyWeatherMultiJvmNode3 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy +class SunnyWeatherMultiJvmNode4 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy +class SunnyWeatherMultiJvmNode5 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy abstract class SunnyWeatherSpec extends MultiNodeSpec(SunnyWeatherMultiJvmSpec) diff --git a/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala index 3245a15f97..f35bca381d 100644 --- a/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala +++ b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala @@ -28,12 +28,12 @@ class FailureDetectorPuppet(system: ActorSystem, settings: ClusterSettings) exte private val connections = new ConcurrentHashMap[Address, Status] - def markAsDown(connection: Address): this.type = { + def markNodeAsUnavailable(connection: Address): this.type = { connections.put(connection, Down) this } - def markAsUp(connection: Address): this.type = { + def markNodeAsAvailable(connection: Address): this.type = { connections.put(connection, Up) this } From e2551494c41eef95a916b38897bb6ad09aef89a0 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 11 Jun 2012 14:59:34 +0200 Subject: [PATCH 348/538] Use Use separate heartbeats for FailureDetector, see #2214 * Send Heartbeat message to all members at regular interval * Removed the need to gossip to myself --- .../src/main/resources/reference.conf | 3 + .../akka/cluster/AccrualFailureDetector.scala | 6 +- .../src/main/scala/akka/cluster/Cluster.scala | 76 +++++++++++++------ .../scala/akka/cluster/ClusterSettings.scala | 1 + .../akka/cluster/MultiNodeClusterSpec.scala | 1 + .../scala/akka/cluster/SunnyWeatherSpec.scala | 2 +- .../akka/cluster/ClusterConfigSpec.scala | 1 + .../test/scala/akka/cluster/ClusterSpec.scala | 8 +- 8 files changed, 65 insertions(+), 33 deletions(-) diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index 8c905d5b29..1e7c0e4c08 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -27,6 +27,9 @@ akka { # how often should the node send out gossip information? gossip-interval = 1s + # how often should the node send out heartbeats? + heartbeat-interval = 1s + # how often should the leader perform maintenance tasks? leader-actions-interval = 1s diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index c7aaf12fcf..76c773f759 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -66,7 +66,8 @@ class AccrualFailureDetector( */ @tailrec final def heartbeat(connection: Address) { - log.debug("Node [{}] - Heartbeat from connection [{}] ", address, connection) + // FIXME change to debug log level, when failure detector is stable + log.info("Node [{}] - Heartbeat from connection [{}] ", address, connection) val oldState = state.get val latestTimestamp = oldState.timestamps.get(connection) @@ -155,7 +156,8 @@ class AccrualFailureDetector( else PhiFactor * timestampDiff / mean } - log.debug("Node [{}] - Phi value [{}] and threshold [{}] for connection [{}] ", address, phi, threshold, connection) + // FIXME change to debug log level, when failure detector is stable + log.info("Node [{}] - Phi value [{}] and threshold [{}] for connection [{}] ", address, phi, threshold, connection) phi } diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 4ea43d50e4..df8f2ec89b 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -138,7 +138,7 @@ object Member { /** * Envelope adding a sender address to the gossip. */ -case class GossipEnvelope(sender: Member, gossip: Gossip) extends ClusterMessage +case class GossipEnvelope(from: Address, gossip: Gossip) extends ClusterMessage /** * Defines the current status of a cluster member node @@ -244,6 +244,8 @@ case class Gossip( ")" } +case class Heartbeat(from: Address) + /** * Manages routing of the different cluster commands. * Instantiated as a single instance for each Cluster - e.g. commands are serialized to Cluster message after message. @@ -272,7 +274,8 @@ private[akka] final class ClusterGossipDaemon(cluster: Cluster) extends Actor { val log = Logging(context.system, this) def receive = { - case GossipEnvelope(sender, gossip) ⇒ cluster.receive(sender, gossip) + case Heartbeat(from) ⇒ cluster.receiveHeartbeat(from) + case GossipEnvelope(from, gossip) ⇒ cluster.receiveGossip(from, gossip) } override def unhandled(unknown: Any) = log.error("[/system/cluster/gossip] can not respond to messages - received [{}]", unknown) @@ -388,7 +391,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ log.info("Cluster Node [{}] - is starting up...", selfAddress) - // create superisor for daemons under path "/system/cluster" + // create supervisor for daemons under path "/system/cluster" private val clusterDaemons = { val createChild = CreateChild(Props(new ClusterDaemonSupervisor(this)), "cluster") Await.result(system.systemGuardian ? createChild, defaultTimeout.duration) match { @@ -399,8 +402,9 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ private val state = { val member = Member(selfAddress, MemberStatus.Joining) - val gossip = Gossip(members = SortedSet.empty[Member] + member) + vclockNode // add me as member and update my vector clock - new AtomicReference[State](State(gossip)) + val versionedGossip = Gossip(members = SortedSet.empty[Member] + member) + vclockNode // add me as member and update my vector clock + val seenVersionedGossip = versionedGossip seen selfAddress + new AtomicReference[State](State(seenVersionedGossip)) } // try to join the node defined in the 'akka.cluster.node-to-join' option @@ -415,6 +419,11 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ gossip() } + // start periodic heartbeat to all nodes in cluster + private val heartbeatCanceller = system.scheduler.schedule(PeriodicTasksInitialDelay, HeartbeatInterval) { + heartbeat() + } + // start periodic cluster failure detector reaping (moving nodes condemned by the failure detector to unreachable list) private val failureDetectorReaperCanceller = system.scheduler.schedule(PeriodicTasksInitialDelay, UnreachableNodesReaperInterval) { reapUnreachableMembers() @@ -491,6 +500,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ if (isRunning.compareAndSet(true, false)) { log.info("Cluster Node [{}] - Shutting down cluster Node and cluster daemons...", selfAddress) gossipCanceller.cancel() + heartbeatCanceller.cancel() failureDetectorReaperCanceller.cancel() leaderActionsCanceller.cancel() system.stop(clusterDaemons) @@ -588,6 +598,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ if (!state.compareAndSet(localState, newState)) joining(node) // recur if we failed update else { + // treat join as initial heartbeat, so that it becomes unavailable if nothing more happens if (node != selfAddress) failureDetector heartbeat node notifyMembershipChangeListeners(localState, newState) } @@ -615,7 +626,6 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ if (!state.compareAndSet(localState, newState)) leaving(address) // recur if we failed update else { - if (address != selfAddress) failureDetector heartbeat address // update heartbeat in failure detector notifyMembershipChangeListeners(localState, newState) } } @@ -708,7 +718,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * Receive new gossip. */ @tailrec - final private[cluster] def receive(sender: Member, remoteGossip: Gossip): Unit = { + final private[cluster] def receiveGossip(from: Address, remoteGossip: Gossip): Unit = { val localState = state.get val localGossip = localState.latestGossip @@ -718,8 +728,9 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val mergedGossip = remoteGossip merge localGossip val versionedMergedGossip = mergedGossip + vclockNode - log.debug( - "Can't establish a causal relationship between \"remote\" gossip [{}] and \"local\" gossip [{}] - merging them into [{}]", + // FIXME change to debug log level, when failure detector is stable + log.info( + """Can't establish a causal relationship between "remote" gossip [{}] and "local" gossip [{}] - merging them into [{}]""", remoteGossip, localGossip, versionedMergedGossip) versionedMergedGossip @@ -736,15 +747,20 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val newState = localState copy (latestGossip = winningGossip seen selfAddress) // if we won the race then update else try again - if (!state.compareAndSet(localState, newState)) receive(sender, remoteGossip) // recur if we fail the update + if (!state.compareAndSet(localState, newState)) receiveGossip(from, remoteGossip) // recur if we fail the update else { - log.debug("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, sender.address) - - if (sender.address != selfAddress) failureDetector heartbeat sender.address + log.debug("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, from) notifyMembershipChangeListeners(localState, newState) } } + /** + * INTERNAL API + */ + private[cluster] def receiveHeartbeat(from: Address): Unit = { + failureDetector heartbeat from + } + /** * Joins the pre-configured contact point. */ @@ -769,14 +785,12 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val newSelf = localSelf copy (status = newStatus) // change my state in 'gossip.members' - val newMembersSet = localMembers map { member ⇒ + val newMembers = localMembers map { member ⇒ if (member.address == selfAddress) newSelf else member } - // ugly crap to work around bug in scala colletions ('val ss: SortedSet[Member] = SortedSet.empty[Member] ++ aSet' does not compile) - val newMembersSortedSet = SortedSet[Member](newMembersSet.toList: _*) - val newGossip = localGossip copy (members = newMembersSortedSet) + val newGossip = localGossip copy (members = newMembers) // version my changes val versionedGossip = newGossip + vclockNode @@ -793,7 +807,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ private[akka] def gossipTo(address: Address): Unit = { val connection = clusterGossipConnectionFor(address) log.debug("Cluster Node [{}] - Gossiping to [{}]", selfAddress, connection) - connection ! GossipEnvelope(self, latestGossip) + connection ! GossipEnvelope(selfAddress, latestGossip) } /** @@ -840,12 +854,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ log.debug("Cluster Node [{}] - Initiating new round of gossip", selfAddress) - if (isSingletonCluster(localState)) { - // gossip to myself - // TODO could perhaps be optimized, no need to gossip to myself when Up? - gossipTo(selfAddress) - - } else if (isAvailable(localState)) { + if (!isSingletonCluster(localState) && isAvailable(localState)) { val localGossip = localState.latestGossip // important to not accidentally use `map` of the SortedSet, since the original order is not preserved val localMembers = localGossip.members.toIndexedSeq @@ -876,6 +885,25 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ } } + /** + * INTERNAL API + */ + private[akka] def heartbeat(): Unit = { + val localState = state.get + + if (!isSingletonCluster(localState)) { + val liveMembers = localState.latestGossip.members.toIndexedSeq + val unreachableMembers = localState.latestGossip.overview.unreachable + + // FIXME use unreachable? + for (member ← (liveMembers ++ unreachableMembers); if member.address != selfAddress) { + val connection = clusterGossipConnectionFor(member.address) + log.debug("Cluster Node [{}] - Heartbeat to [{}]", selfAddress, connection) + connection ! Heartbeat(selfAddress) + } + } + } + /** * INTERNAL API * diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 0e7dac06ab..90831db2e6 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -21,6 +21,7 @@ class ClusterSettings(val config: Config, val systemName: String) { } val PeriodicTasksInitialDelay = Duration(getMilliseconds("akka.cluster.periodic-tasks-initial-delay"), MILLISECONDS) val GossipInterval = Duration(getMilliseconds("akka.cluster.gossip-interval"), MILLISECONDS) + val HeartbeatInterval = Duration(getMilliseconds("akka.cluster.heartbeat-interval"), MILLISECONDS) val LeaderActionsInterval = Duration(getMilliseconds("akka.cluster.leader-actions-interval"), MILLISECONDS) val UnreachableNodesReaperInterval = Duration(getMilliseconds("akka.cluster.unreachable-nodes-reaper-interval"), MILLISECONDS) val NrOfGossipDaemons = getInt("akka.cluster.nr-of-gossip-daemons") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index b185067ab0..729923699d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -17,6 +17,7 @@ object MultiNodeClusterSpec { akka.cluster { auto-down = off gossip-interval = 200 ms + heartbeat-interval = 400 ms leader-actions-interval = 200 ms unreachable-nodes-reaper-interval = 200 ms periodic-tasks-initial-delay = 300 ms diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index e36980d859..fcb1393f8a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -24,7 +24,7 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { gossip-interval = 400 ms nr-of-deputy-nodes = 0 } - akka.loglevel = DEBUG + akka.loglevel = INFO """)) } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 6b2ff1962c..3a96451466 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -21,6 +21,7 @@ class ClusterConfigSpec extends AkkaSpec { NodeToJoin must be(None) PeriodicTasksInitialDelay must be(1 seconds) GossipInterval must be(1 second) + HeartbeatInterval must be(1 second) LeaderActionsInterval must be(1 second) UnreachableNodesReaperInterval must be(1 second) NrOfGossipDaemons must be(4) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index d3d1d6d0a2..9b1a9706af 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -99,15 +99,11 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { "A Cluster" must { - "initially be singleton cluster and reach convergence after first gossip" in { + "initially be singleton cluster and reach convergence immediately" in { cluster.isSingletonCluster must be(true) cluster.latestGossip.members.map(_.address) must be(Set(selfAddress)) memberStatus(selfAddress) must be(Some(MemberStatus.Joining)) - cluster.convergence.isDefined must be(false) - cluster.gossip() - expectMsg(GossipTo(selfAddress)) - awaitCond(cluster.convergence.isDefined) - memberStatus(selfAddress) must be(Some(MemberStatus.Joining)) + cluster.convergence.isDefined must be(true) cluster.leaderActions() memberStatus(selfAddress) must be(Some(MemberStatus.Up)) } From 2dcceb58ce688b9fec6174126bfe6d3b774d0f74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 11 Jun 2012 16:48:19 +0200 Subject: [PATCH 349/538] Split up all tests that are related to failure detection into two versions: Accrual FD and FD Puppet. Also moved all tests that are not failure detection tests to use FD Puppet. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- ...ientDowningNodeThatIsUnreachableSpec.scala | 14 +++- .../ClientDowningNodeThatIsUpSpec.scala | 15 +++- .../scala/akka/cluster/ConvergenceSpec.scala | 21 ++--- .../GossipingAccrualFailureDetectorSpec.scala | 10 ++- .../akka/cluster/JoinTwoClustersSpec.scala | 12 +-- ...aderDowningNodeThatIsUnreachableSpec.scala | 4 +- .../akka/cluster/LeaderElectionSpec.scala | 19 +++-- .../MembershipChangeListenerExitingSpec.scala | 6 +- .../MembershipChangeListenerJoinSpec.scala | 12 +-- .../MembershipChangeListenerLeavingSpec.scala | 6 +- .../MembershipChangeListenerSpec.scala | 77 ------------------- .../MembershipChangeListenerUpSpec.scala | 53 +++++++++---- .../scala/akka/cluster/NodeJoinSpec.scala | 12 +-- .../cluster/NodeLeavingAndExitingSpec.scala | 6 +- .../scala/akka/cluster/NodeLeavingSpec.scala | 14 ++-- .../akka/cluster/NodeMembershipSpec.scala | 6 +- .../scala/akka/cluster/NodeShutdownSpec.scala | 17 +++- .../scala/akka/cluster/NodeUpSpec.scala | 4 +- .../scala/akka/cluster/SunnyWeatherSpec.scala | 12 +-- 19 files changed, 145 insertions(+), 175 deletions(-) delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index d1a9f756dd..343f0c7c17 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -18,10 +18,15 @@ object ClientDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class ClientDowningNodeThatIsUnreachableMultiJvmNode1 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy -class ClientDowningNodeThatIsUnreachableMultiJvmNode2 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy -class ClientDowningNodeThatIsUnreachableMultiJvmNode3 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy -class ClientDowningNodeThatIsUnreachableMultiJvmNode4 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode1 extends ClientDowningNodeThatIsUnreachableSpec with FailureDetectorPuppetStrategy +class ClientDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode2 extends ClientDowningNodeThatIsUnreachableSpec with FailureDetectorPuppetStrategy +class ClientDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode3 extends ClientDowningNodeThatIsUnreachableSpec with FailureDetectorPuppetStrategy +class ClientDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode4 extends ClientDowningNodeThatIsUnreachableSpec with FailureDetectorPuppetStrategy + +class ClientDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode1 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode2 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode3 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode4 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy abstract class ClientDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(ClientDowningNodeThatIsUnreachableMultiJvmSpec) @@ -38,6 +43,7 @@ abstract class ClientDowningNodeThatIsUnreachableSpec runOn(first) { // kill 'third' node testConductor.shutdown(third, 0) + markNodeAsUnavailable(thirdAddress) // mark 'third' node as DOWN cluster.down(thirdAddress) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index 687596745b..95eeefd982 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -18,10 +18,15 @@ object ClientDowningNodeThatIsUpMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class ClientDowningNodeThatIsUpMultiJvmNode1 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy -class ClientDowningNodeThatIsUpMultiJvmNode2 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy -class ClientDowningNodeThatIsUpMultiJvmNode3 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy -class ClientDowningNodeThatIsUpMultiJvmNode4 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUpWithFailureDetectorPuppetMultiJvmNode1 extends ClientDowningNodeThatIsUpSpec with FailureDetectorPuppetStrategy +class ClientDowningNodeThatIsUpWithFailureDetectorPuppetMultiJvmNode2 extends ClientDowningNodeThatIsUpSpec with FailureDetectorPuppetStrategy +class ClientDowningNodeThatIsUpWithFailureDetectorPuppetMultiJvmNode3 extends ClientDowningNodeThatIsUpSpec with FailureDetectorPuppetStrategy +class ClientDowningNodeThatIsUpWithFailureDetectorPuppetMultiJvmNode4 extends ClientDowningNodeThatIsUpSpec with FailureDetectorPuppetStrategy + +class ClientDowningNodeThatIsUpWithAccrualFailureDetectorMultiJvmNode1 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUpWithAccrualFailureDetectorMultiJvmNode2 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUpWithAccrualFailureDetectorMultiJvmNode3 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUpWithAccrualFailureDetectorMultiJvmNode4 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy abstract class ClientDowningNodeThatIsUpSpec extends MultiNodeSpec(ClientDowningNodeThatIsUpMultiJvmSpec) @@ -40,6 +45,8 @@ abstract class ClientDowningNodeThatIsUpSpec cluster.down(thirdAddress) testConductor.enter("down-third-node") + markNodeAsUnavailable(thirdAddress) + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) cluster.latestGossip.members.exists(_.address == thirdAddress) must be(false) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index df47e19bec..bdc0a1ae8b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -17,22 +17,24 @@ object ConvergenceMultiJvmSpec extends MultiNodeConfig { val fourth = role("fourth") commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" - akka.cluster { - failure-detector.threshold = 4 - } - """)). + withFallback(ConfigFactory.parseString("akka.cluster.failure-detector.threshold = 4")). withFallback(MultiNodeClusterSpec.clusterConfig)) } -class ConvergenceMultiJvmNode1 extends ConvergenceSpec with AccrualFailureDetectorStrategy -class ConvergenceMultiJvmNode2 extends ConvergenceSpec with AccrualFailureDetectorStrategy -class ConvergenceMultiJvmNode3 extends ConvergenceSpec with AccrualFailureDetectorStrategy -class ConvergenceMultiJvmNode4 extends ConvergenceSpec with AccrualFailureDetectorStrategy +class ConvergenceWithFailureDetectorPuppetMultiJvmNode1 extends ConvergenceSpec with FailureDetectorPuppetStrategy +class ConvergenceWithFailureDetectorPuppetMultiJvmNode2 extends ConvergenceSpec with FailureDetectorPuppetStrategy +class ConvergenceWithFailureDetectorPuppetMultiJvmNode3 extends ConvergenceSpec with FailureDetectorPuppetStrategy +class ConvergenceWithFailureDetectorPuppetMultiJvmNode4 extends ConvergenceSpec with FailureDetectorPuppetStrategy + +class ConvergenceWithAccrualFailureDetectorMultiJvmNode1 extends ConvergenceSpec with AccrualFailureDetectorStrategy +class ConvergenceWithAccrualFailureDetectorMultiJvmNode2 extends ConvergenceSpec with AccrualFailureDetectorStrategy +class ConvergenceWithAccrualFailureDetectorMultiJvmNode3 extends ConvergenceSpec with AccrualFailureDetectorStrategy +class ConvergenceWithAccrualFailureDetectorMultiJvmNode4 extends ConvergenceSpec with AccrualFailureDetectorStrategy abstract class ConvergenceSpec extends MultiNodeSpec(ConvergenceMultiJvmSpec) with MultiNodeClusterSpec { + import ConvergenceMultiJvmSpec._ "A cluster of 3 members" must { @@ -54,6 +56,7 @@ abstract class ConvergenceSpec runOn(first) { // kill 'third' node testConductor.shutdown(third, 0) + markNodeAsUnavailable(thirdAddress) } runOn(first, second) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 63090b7a1f..b14c0d927c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -19,12 +19,14 @@ object GossipingAccrualFailureDetectorMultiJvmSpec extends MultiNodeConfig { withFallback(MultiNodeClusterSpec.clusterConfig)) } -class GossipingAccrualFailureDetectorMultiJvmNode1 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy -class GossipingAccrualFailureDetectorMultiJvmNode2 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy -class GossipingAccrualFailureDetectorMultiJvmNode3 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy +class GossipingWithAccrualFailureDetectorMultiJvmNode1 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy +class GossipingWithAccrualFailureDetectorMultiJvmNode2 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy +class GossipingWithAccrualFailureDetectorMultiJvmNode3 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy -abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(GossipingAccrualFailureDetectorMultiJvmSpec) +abstract class GossipingAccrualFailureDetectorSpec + extends MultiNodeSpec(GossipingAccrualFailureDetectorMultiJvmSpec) with MultiNodeClusterSpec { + import GossipingAccrualFailureDetectorMultiJvmSpec._ lazy val firstAddress = node(first).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index 2000e63253..4b64bb6e58 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -20,12 +20,12 @@ object JoinTwoClustersMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy -class JoinTwoClustersMultiJvmNode2 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy -class JoinTwoClustersMultiJvmNode3 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy -class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy -class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy -class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy +class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec with FailureDetectorPuppetStrategy +class JoinTwoClustersMultiJvmNode2 extends JoinTwoClustersSpec with FailureDetectorPuppetStrategy +class JoinTwoClustersMultiJvmNode3 extends JoinTwoClustersSpec with FailureDetectorPuppetStrategy +class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec with FailureDetectorPuppetStrategy +class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec with FailureDetectorPuppetStrategy +class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec with FailureDetectorPuppetStrategy abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index 7dcb6b20f6..5e2545394d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -17,9 +17,7 @@ object LeaderDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { val fourth = role("fourth") commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" - akka.cluster.auto-down = on - """)). + withFallback(ConfigFactory.parseString("akka.cluster.auto-down = on")). withFallback(MultiNodeClusterSpec.clusterConfig)) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index f44b494917..e161206ba0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -19,11 +19,17 @@ object LeaderElectionMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class LeaderElectionMultiJvmNode1 extends LeaderElectionSpec with AccrualFailureDetectorStrategy -class LeaderElectionMultiJvmNode2 extends LeaderElectionSpec with AccrualFailureDetectorStrategy -class LeaderElectionMultiJvmNode3 extends LeaderElectionSpec with AccrualFailureDetectorStrategy -class LeaderElectionMultiJvmNode4 extends LeaderElectionSpec with AccrualFailureDetectorStrategy -class LeaderElectionMultiJvmNode5 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionWithFailureDetectorPuppetMultiJvmNode1 extends LeaderElectionSpec with FailureDetectorPuppetStrategy +class LeaderElectionWithFailureDetectorPuppetMultiJvmNode2 extends LeaderElectionSpec with FailureDetectorPuppetStrategy +class LeaderElectionWithFailureDetectorPuppetMultiJvmNode3 extends LeaderElectionSpec with FailureDetectorPuppetStrategy +class LeaderElectionWithFailureDetectorPuppetMultiJvmNode4 extends LeaderElectionSpec with FailureDetectorPuppetStrategy +class LeaderElectionWithFailureDetectorPuppetMultiJvmNode5 extends LeaderElectionSpec with FailureDetectorPuppetStrategy + +class LeaderElectionWithAccrualFailureDetectorMultiJvmNode1 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionWithAccrualFailureDetectorMultiJvmNode2 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionWithAccrualFailureDetectorMultiJvmNode3 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionWithAccrualFailureDetectorMultiJvmNode4 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionWithAccrualFailureDetectorMultiJvmNode5 extends LeaderElectionSpec with AccrualFailureDetectorStrategy abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSpec) @@ -57,9 +63,11 @@ abstract class LeaderElectionSpec myself match { case `controller` ⇒ + val leaderAddress = node(leader).address testConductor.enter("before-shutdown") testConductor.shutdown(leader, 0) testConductor.enter("after-shutdown", "after-down", "completed") + markNodeAsUnavailable(leaderAddress) case `leader` ⇒ testConductor.enter("before-shutdown", "after-shutdown") @@ -71,6 +79,7 @@ abstract class LeaderElectionSpec // user marks the shutdown leader as DOWN cluster.down(leaderAddress) testConductor.enter("after-down", "completed") + markNodeAsUnavailable(leaderAddress) case _ if remainingRoles.contains(myself) ⇒ // remaining cluster nodes, not shutdown diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index 7389a01ffc..d9b2c7b876 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -27,9 +27,9 @@ object MembershipChangeListenerExitingMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class MembershipChangeListenerExitingMultiJvmNode1 extends MembershipChangeListenerExitingSpec with AccrualFailureDetectorStrategy -class MembershipChangeListenerExitingMultiJvmNode2 extends MembershipChangeListenerExitingSpec with AccrualFailureDetectorStrategy -class MembershipChangeListenerExitingMultiJvmNode3 extends MembershipChangeListenerExitingSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerExitingMultiJvmNode1 extends MembershipChangeListenerExitingSpec with FailureDetectorPuppetStrategy +class MembershipChangeListenerExitingMultiJvmNode2 extends MembershipChangeListenerExitingSpec with FailureDetectorPuppetStrategy +class MembershipChangeListenerExitingMultiJvmNode3 extends MembershipChangeListenerExitingSpec with FailureDetectorPuppetStrategy abstract class MembershipChangeListenerExitingSpec extends MultiNodeSpec(MembershipChangeListenerExitingMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index 8a940375ef..2809ae820b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -17,16 +17,12 @@ object MembershipChangeListenerJoinMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" - akka.cluster { - leader-actions-interval = 5 s # increase the leader action task interval to allow time checking for JOIN before leader moves it to UP - } - """) - .withFallback(MultiNodeClusterSpec.clusterConfig))) + .withFallback(ConfigFactory.parseString("akka.cluster.leader-actions-interval = 5 s") // increase the leader action task interval to allow time checking for JOIN before leader moves it to UP + .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class MembershipChangeListenerJoinMultiJvmNode1 extends MembershipChangeListenerJoinSpec with AccrualFailureDetectorStrategy -class MembershipChangeListenerJoinMultiJvmNode2 extends MembershipChangeListenerJoinSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerJoinMultiJvmNode1 extends MembershipChangeListenerJoinSpec with FailureDetectorPuppetStrategy +class MembershipChangeListenerJoinMultiJvmNode2 extends MembershipChangeListenerJoinSpec with FailureDetectorPuppetStrategy abstract class MembershipChangeListenerJoinSpec extends MultiNodeSpec(MembershipChangeListenerJoinMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala index d7c79407a2..57cec4f389 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -24,9 +24,9 @@ object MembershipChangeListenerLeavingMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig)) } -class MembershipChangeListenerLeavingMultiJvmNode1 extends MembershipChangeListenerLeavingSpec with AccrualFailureDetectorStrategy -class MembershipChangeListenerLeavingMultiJvmNode2 extends MembershipChangeListenerLeavingSpec with AccrualFailureDetectorStrategy -class MembershipChangeListenerLeavingMultiJvmNode3 extends MembershipChangeListenerLeavingSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerLeavingMultiJvmNode1 extends MembershipChangeListenerLeavingSpec with FailureDetectorPuppetStrategy +class MembershipChangeListenerLeavingMultiJvmNode2 extends MembershipChangeListenerLeavingSpec with FailureDetectorPuppetStrategy +class MembershipChangeListenerLeavingMultiJvmNode3 extends MembershipChangeListenerLeavingSpec with FailureDetectorPuppetStrategy abstract class MembershipChangeListenerLeavingSpec extends MultiNodeSpec(MembershipChangeListenerLeavingMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala deleted file mode 100644 index 914db94acb..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import scala.collection.immutable.SortedSet -import com.typesafe.config.ConfigFactory -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ - -object MembershipChangeListenerMultiJvmSpec extends MultiNodeConfig { - val first = role("first") - val second = role("second") - val third = role("third") - - commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) -} - -class MembershipChangeListenerMultiJvmNode1 extends MembershipChangeListenerSpec with AccrualFailureDetectorStrategy -class MembershipChangeListenerMultiJvmNode2 extends MembershipChangeListenerSpec with AccrualFailureDetectorStrategy -class MembershipChangeListenerMultiJvmNode3 extends MembershipChangeListenerSpec with AccrualFailureDetectorStrategy - -abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChangeListenerMultiJvmSpec) - with MultiNodeClusterSpec { - import MembershipChangeListenerMultiJvmSpec._ - - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - - "A set of connected cluster systems" must { - - "(when two nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { - - awaitClusterUp(first) - - runOn(first, second) { - val latch = TestLatch() - cluster.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) - latch.countDown() - } - }) - testConductor.enter("listener-1-registered") - cluster.join(firstAddress) - latch.await - } - - runOn(third) { - testConductor.enter("listener-1-registered") - } - - testConductor.enter("after-1") - } - - "(when three nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { - - val latch = TestLatch() - cluster.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - if (members.size == 3 && members.forall(_.status == MemberStatus.Up)) - latch.countDown() - } - }) - testConductor.enter("listener-2-registered") - - runOn(third) { - cluster.join(firstAddress) - } - - latch.await - - testConductor.enter("after-2") - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala index 4cd81cd0e7..c89bbe1f0a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -5,21 +5,21 @@ package akka.cluster import scala.collection.immutable.SortedSet import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ object MembershipChangeListenerUpMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") + val third = role("third") commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class MembershipChangeListenerUpMultiJvmNode1 extends MembershipChangeListenerUpSpec with AccrualFailureDetectorStrategy -class MembershipChangeListenerUpMultiJvmNode2 extends MembershipChangeListenerUpSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerUpMultiJvmNode1 extends MembershipChangeListenerUpSpec with FailureDetectorPuppetStrategy +class MembershipChangeListenerUpMultiJvmNode2 extends MembershipChangeListenerUpSpec with FailureDetectorPuppetStrategy +class MembershipChangeListenerUpMultiJvmNode3 extends MembershipChangeListenerUpSpec with FailureDetectorPuppetStrategy abstract class MembershipChangeListenerUpSpec extends MultiNodeSpec(MembershipChangeListenerUpMultiJvmSpec) @@ -30,29 +30,50 @@ abstract class MembershipChangeListenerUpSpec lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address - "A registered MembershipChangeListener" must { - "be notified when new node is marked as UP by the leader" taggedAs LongRunningTest in { + "A set of connected cluster systems" must { - runOn(first) { - val upLatch = TestLatch() + "(when two nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { + + awaitClusterUp(first) + + runOn(first, second) { + val latch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) - upLatch.countDown() + latch.countDown() } }) - testConductor.enter("registered-listener") - - upLatch.await - awaitUpConvergence(numberOfMembers = 2) + testConductor.enter("listener-1-registered") + cluster.join(firstAddress) + latch.await } - runOn(second) { - testConductor.enter("registered-listener") + runOn(third) { + testConductor.enter("listener-1-registered") + } + + testConductor.enter("after-1") + } + + "(when three nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { + + val latch = TestLatch() + cluster.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + if (members.size == 3 && members.forall(_.status == MemberStatus.Up)) + latch.countDown() + } + }) + testConductor.enter("listener-2-registered") + + runOn(third) { cluster.join(firstAddress) } - testConductor.enter("after") + latch.await + + testConductor.enter("after-2") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala index 58ed162af7..6cf5fc220d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala @@ -16,16 +16,12 @@ object NodeJoinMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" - akka.cluster { - leader-actions-interval = 5 s # increase the leader action task interval - } - """) - .withFallback(MultiNodeClusterSpec.clusterConfig))) + .withFallback(ConfigFactory.parseString("akka.cluster.leader-actions-interval = 5 s") // increase the leader action task interval + .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class NodeJoinMultiJvmNode1 extends NodeJoinSpec with AccrualFailureDetectorStrategy -class NodeJoinMultiJvmNode2 extends NodeJoinSpec with AccrualFailureDetectorStrategy +class NodeJoinMultiJvmNode1 extends NodeJoinSpec with FailureDetectorPuppetStrategy +class NodeJoinMultiJvmNode2 extends NodeJoinSpec with FailureDetectorPuppetStrategy abstract class NodeJoinSpec extends MultiNodeSpec(NodeJoinMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index bb32d8641f..ef285b5070 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -26,9 +26,9 @@ object NodeLeavingAndExitingMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class NodeLeavingAndExitingMultiJvmNode1 extends NodeLeavingAndExitingSpec with AccrualFailureDetectorStrategy -class NodeLeavingAndExitingMultiJvmNode2 extends NodeLeavingAndExitingSpec with AccrualFailureDetectorStrategy -class NodeLeavingAndExitingMultiJvmNode3 extends NodeLeavingAndExitingSpec with AccrualFailureDetectorStrategy +class NodeLeavingAndExitingMultiJvmNode1 extends NodeLeavingAndExitingSpec with FailureDetectorPuppetStrategy +class NodeLeavingAndExitingMultiJvmNode2 extends NodeLeavingAndExitingSpec with FailureDetectorPuppetStrategy +class NodeLeavingAndExitingMultiJvmNode3 extends NodeLeavingAndExitingSpec with FailureDetectorPuppetStrategy abstract class NodeLeavingAndExitingSpec extends MultiNodeSpec(NodeLeavingAndExitingMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index eccba596f2..8f637d87e5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -16,18 +16,18 @@ object NodeLeavingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" - akka.cluster.unreachable-nodes-reaper-frequency = 30 s - """)) + .withFallback(ConfigFactory.parseString("akka.cluster.unreachable-nodes-reaper-frequency = 30 s")) .withFallback(MultiNodeClusterSpec.clusterConfig)) } -class NodeLeavingMultiJvmNode1 extends NodeLeavingSpec with AccrualFailureDetectorStrategy -class NodeLeavingMultiJvmNode2 extends NodeLeavingSpec with AccrualFailureDetectorStrategy -class NodeLeavingMultiJvmNode3 extends NodeLeavingSpec with AccrualFailureDetectorStrategy +class NodeLeavingMultiJvmNode1 extends NodeLeavingSpec with FailureDetectorPuppetStrategy +class NodeLeavingMultiJvmNode2 extends NodeLeavingSpec with FailureDetectorPuppetStrategy +class NodeLeavingMultiJvmNode3 extends NodeLeavingSpec with FailureDetectorPuppetStrategy -abstract class NodeLeavingSpec extends MultiNodeSpec(NodeLeavingMultiJvmSpec) +abstract class NodeLeavingSpec + extends MultiNodeSpec(NodeLeavingMultiJvmSpec) with MultiNodeClusterSpec { + import NodeLeavingMultiJvmSpec._ lazy val firstAddress = node(first).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index c7fa1569f2..fb0573f77f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -16,9 +16,9 @@ object NodeMembershipMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec with AccrualFailureDetectorStrategy -class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec with AccrualFailureDetectorStrategy -class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec with AccrualFailureDetectorStrategy +class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec with FailureDetectorPuppetStrategy +class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec with FailureDetectorPuppetStrategy +class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec with FailureDetectorPuppetStrategy abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index 7417ae06d5..69b0a43a20 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -16,7 +16,7 @@ object NodeShutdownMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false). withFallback(ConfigFactory.parseString(""" akka.cluster { - auto-down = on + auto-down = on failure-detector.threshold = 4 } """)). @@ -24,10 +24,16 @@ object NodeShutdownMultiJvmSpec extends MultiNodeConfig { } -class NodeShutdownMultiJvmNode1 extends NodeShutdownSpec with AccrualFailureDetectorStrategy -class NodeShutdownMultiJvmNode2 extends NodeShutdownSpec with AccrualFailureDetectorStrategy +class NodeShutdownWithFailureDetectorPuppetMultiJvmNode1 extends NodeShutdownSpec with FailureDetectorPuppetStrategy +class NodeShutdownWithFailureDetectorPuppetMultiJvmNode2 extends NodeShutdownSpec with FailureDetectorPuppetStrategy + +class NodeShutdownWithAccrualFailureDetectorMultiJvmNode1 extends NodeShutdownSpec with AccrualFailureDetectorStrategy +class NodeShutdownWithAccrualFailureDetectorMultiJvmNode2 extends NodeShutdownSpec with AccrualFailureDetectorStrategy + +abstract class NodeShutdownSpec + extends MultiNodeSpec(NodeShutdownMultiJvmSpec) + with MultiNodeClusterSpec { -abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) with MultiNodeClusterSpec { import NodeShutdownMultiJvmSpec._ "A cluster of 2 nodes" must { @@ -44,6 +50,9 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) runOn(first) { val secondAddress = node(second).address testConductor.shutdown(second, 0) + + markNodeAsUnavailable(secondAddress) + awaitUpConvergence(numberOfMembers = 1, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds) cluster.isSingletonCluster must be(true) assertLeader(first) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index 4a2342fca1..0fdc3c89b8 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -19,8 +19,8 @@ object NodeUpMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class NodeUpMultiJvmNode1 extends NodeUpSpec with AccrualFailureDetectorStrategy -class NodeUpMultiJvmNode2 extends NodeUpSpec with AccrualFailureDetectorStrategy +class NodeUpMultiJvmNode1 extends NodeUpSpec with FailureDetectorPuppetStrategy +class NodeUpMultiJvmNode2 extends NodeUpSpec with FailureDetectorPuppetStrategy abstract class NodeUpSpec extends MultiNodeSpec(NodeUpMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index cabaf21ab1..b8486841c6 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -21,18 +21,18 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { commonConfig(ConfigFactory.parseString(""" akka.cluster { - gossip-interval = 400 ms + gossip-interval = 400 ms nr-of-deputy-nodes = 0 } akka.loglevel = INFO """)) } -class SunnyWeatherMultiJvmNode1 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy -class SunnyWeatherMultiJvmNode2 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy -class SunnyWeatherMultiJvmNode3 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy -class SunnyWeatherMultiJvmNode4 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy -class SunnyWeatherMultiJvmNode5 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy +class SunnyWeatherMultiJvmNode1 extends SunnyWeatherSpec with FailureDetectorPuppetStrategy +class SunnyWeatherMultiJvmNode2 extends SunnyWeatherSpec with FailureDetectorPuppetStrategy +class SunnyWeatherMultiJvmNode3 extends SunnyWeatherSpec with FailureDetectorPuppetStrategy +class SunnyWeatherMultiJvmNode4 extends SunnyWeatherSpec with FailureDetectorPuppetStrategy +class SunnyWeatherMultiJvmNode5 extends SunnyWeatherSpec with FailureDetectorPuppetStrategy abstract class SunnyWeatherSpec extends MultiNodeSpec(SunnyWeatherMultiJvmSpec) From 36b040cfab7164079af75f6cf5ebbd00a279a245 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 11 Jun 2012 18:11:02 +0200 Subject: [PATCH 350/538] Unbreaking master --- akka-cluster/src/main/resources/reference.conf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index 8c905d5b29..cdaf8c729c 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -33,7 +33,6 @@ akka { # how often should the node move nodes, marked as unreachable by the failure detector, out of the membership ring? unreachable-nodes-reaper-interval = 1s - # accrual failure detection config failure-detector { # defines the failure detector threshold @@ -43,6 +42,8 @@ akka { # actual crashes threshold = 8 + implementation-class = "" + max-sample-size = 1000 } } From 399a08b8b37990827e03f41d4fd68f7a5d82a3f0 Mon Sep 17 00:00:00 2001 From: Peter Badenhorst Date: Mon, 11 Jun 2012 18:33:05 +0200 Subject: [PATCH 351/538] Used RemoteCommunicationSpec as a template to implement a functional spec to test SSL communication. 1) Converted provider and related RNG's from Java to Scala 2) Added trust/key stores for testing purposes 3) As stated in the test comments, Internet access is required for the 2 'Secure' RNG variants to function within the time limit. 4) Fixed unnecessary imports --- .../provider/AES128CounterRNGFast.java | 51 ------ .../provider/AES128CounterRNGSecure.java | 49 ------ .../provider/AES256CounterRNGSecure.java | 49 ------ .../akka/security/provider/AkkaProvider.java | 37 ---- akka-remote/src/main/resources/reference.conf | 2 +- .../akka/remote/netty/NettySSLSupport.scala | 63 +++---- .../provider/AES128CounterRNGFast.scala | 41 +++++ .../provider/AES128CounterRNGSecure.scala | 40 +++++ .../provider/AES256CounterRNGSecure.scala | 40 +++++ .../akka/security/provider/AkkaProvider.scala | 31 ++++ akka-remote/src/test/resources/keystore | Bin 0 -> 1342 bytes akka-remote/src/test/resources/truststore | Bin 0 -> 637 bytes .../remote/Ticket1978CommunicationSpec.scala | 161 ++++++++++++++++++ 13 files changed, 333 insertions(+), 231 deletions(-) delete mode 100644 akka-remote/src/main/java/akka/security/provider/AES128CounterRNGFast.java delete mode 100644 akka-remote/src/main/java/akka/security/provider/AES128CounterRNGSecure.java delete mode 100644 akka-remote/src/main/java/akka/security/provider/AES256CounterRNGSecure.java delete mode 100644 akka-remote/src/main/java/akka/security/provider/AkkaProvider.java create mode 100644 akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala create mode 100644 akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala create mode 100644 akka-remote/src/main/scala/akka/security/provider/AES256CounterRNGSecure.scala create mode 100644 akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala create mode 100644 akka-remote/src/test/resources/keystore create mode 100644 akka-remote/src/test/resources/truststore create mode 100644 akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala diff --git a/akka-remote/src/main/java/akka/security/provider/AES128CounterRNGFast.java b/akka-remote/src/main/java/akka/security/provider/AES128CounterRNGFast.java deleted file mode 100644 index a982a6f705..0000000000 --- a/akka-remote/src/main/java/akka/security/provider/AES128CounterRNGFast.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.security.provider; - -import org.uncommons.maths.random.SecureRandomSeedGenerator; -import org.uncommons.maths.random.SeedException; - -import java.security.GeneralSecurityException; -import java.security.SecureRandom; - -/** - * Internal API - */ -public class AES128CounterRNGFast extends java.security.SecureRandomSpi { - private org.uncommons.maths.random.AESCounterRNG rng; - - public AES128CounterRNGFast() throws SeedException, GeneralSecurityException { - rng = new org.uncommons.maths.random.AESCounterRNG(new SecureRandomSeedGenerator()); - } - - /** - * This is managed internally only - */ - @Override - protected void engineSetSeed(byte[] seed) { - - } - - /** - * Generates a user-specified number of random bytes. - * - * @param bytes the array to be filled in with random bytes. - */ - @Override - protected void engineNextBytes(byte[] bytes) { - rng.nextBytes(bytes); - } - - /** - * Returns the given number of seed bytes. This call may be used to - * seed other random number generators. - * - * @param numBytes the number of seed bytes to generate. - * @return the seed bytes. - */ - @Override - protected byte[] engineGenerateSeed(int numBytes) { - return (new SecureRandom()).generateSeed(numBytes); - } -} diff --git a/akka-remote/src/main/java/akka/security/provider/AES128CounterRNGSecure.java b/akka-remote/src/main/java/akka/security/provider/AES128CounterRNGSecure.java deleted file mode 100644 index 178a6c392b..0000000000 --- a/akka-remote/src/main/java/akka/security/provider/AES128CounterRNGSecure.java +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.security.provider; - -import org.uncommons.maths.random.DefaultSeedGenerator; - -import java.security.GeneralSecurityException; - -/** - * Internal API - */ -public class AES128CounterRNGSecure extends java.security.SecureRandomSpi { - private org.uncommons.maths.random.AESCounterRNG rng; - - public AES128CounterRNGSecure() throws GeneralSecurityException { - rng = new org.uncommons.maths.random.AESCounterRNG(); - } - - /** - * This is managed internally only - */ - @Override - protected void engineSetSeed(byte[] seed) { - - } - - /** - * Generates a user-specified number of random bytes. - * - * @param bytes the array to be filled in with random bytes. - */ - @Override - protected void engineNextBytes(byte[] bytes) { - rng.nextBytes(bytes); - } - - /** - * Returns the given number of seed bytes. This call may be used to - * seed other random number generators. - * - * @param numBytes the number of seed bytes to generate. - * @return the seed bytes. - */ - @Override - protected byte[] engineGenerateSeed(int numBytes) { - return DefaultSeedGenerator.getInstance().generateSeed(numBytes); - } -} diff --git a/akka-remote/src/main/java/akka/security/provider/AES256CounterRNGSecure.java b/akka-remote/src/main/java/akka/security/provider/AES256CounterRNGSecure.java deleted file mode 100644 index 48d651b86b..0000000000 --- a/akka-remote/src/main/java/akka/security/provider/AES256CounterRNGSecure.java +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.security.provider; - -import org.uncommons.maths.random.DefaultSeedGenerator; - -import java.security.GeneralSecurityException; - -/** - * Internal API - */ -public class AES256CounterRNGSecure extends java.security.SecureRandomSpi { - private org.uncommons.maths.random.AESCounterRNG rng; - - public AES256CounterRNGSecure() throws GeneralSecurityException { - rng = new org.uncommons.maths.random.AESCounterRNG(32); - } - - /** - * This is managed internally only - */ - @Override - protected void engineSetSeed(byte[] seed) { - - } - - /** - * Generates a user-specified number of random bytes. - * - * @param bytes the array to be filled in with random bytes. - */ - @Override - protected void engineNextBytes(byte[] bytes) { - rng.nextBytes(bytes); - } - - /** - * Returns the given number of seed bytes. This call may be used to - * seed other random number generators. - * - * @param numBytes the number of seed bytes to generate. - * @return the seed bytes. - */ - @Override - protected byte[] engineGenerateSeed(int numBytes) { - return DefaultSeedGenerator.getInstance().generateSeed(numBytes); - } -} diff --git a/akka-remote/src/main/java/akka/security/provider/AkkaProvider.java b/akka-remote/src/main/java/akka/security/provider/AkkaProvider.java deleted file mode 100644 index 9c4a0c2181..0000000000 --- a/akka-remote/src/main/java/akka/security/provider/AkkaProvider.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.security.provider; - -import java.security.AccessController; -import java.security.Provider; - -/** - * A provider that for AES128CounterRNGFast, a cryptographically secure random number generator through SecureRandom - */ -public final class AkkaProvider extends Provider { - public AkkaProvider() { - super("Akka", 1.0, "Akka provider 1.0 that implements a secure AES random number generator"); - - AccessController.doPrivileged(new java.security.PrivilegedAction() { - public Object run() { - - /** - * SecureRandom - */ - put("SecureRandom.AES128CounterRNGFast", "akka.security.provider.AES128CounterRNGFast"); - put("SecureRandom.AES128CounterRNGSecure", "akka.security.provider.AES128CounterRNGSecure"); - put("SecureRandom.AES256CounterRNGSecure", "akka.security.provider.AES256CounterRNGSecure"); - - /** - * Implementation type: software or hardware - */ - put("SecureRandom.AES128CounterRNGFast ImplementedIn", "Software"); - put("SecureRandom.AES128CounterRNGSecure ImplementedIn", "Software"); - put("SecureRandom.AES256CounterRNGSecure ImplementedIn", "Software"); - - return null; - } - }); - } -} diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 80719decf4..0172b14e38 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -187,7 +187,7 @@ akka { # Using /dev/./urandom is only necessary when using SHA1PRNG on Linux to prevent blocking # It is NOT as secure because it reuses the seed # '' => defaults to /dev/random or whatever is set in java.security for example: securerandom.source=file:/dev/random - # '/dev/./urandom' => NOT '/dev/urandom' as that doesn't work according to: http://bugs.sun.com/view_bug.do;jsessionid=ff625daf459fdffffffffcd54f1c775299e0?bug_id=6202721 + # '/dev/./urandom' => NOT '/dev/urandom' as that doesn't work according to: http://bugs.sun.com/view_bug.do?bug_id=6202721 sha1prng-random-source = "" # There are three options, in increasing order of security: diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala index 011aa92233..99f56bf301 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala @@ -4,17 +4,13 @@ package akka.remote.netty -import _root_.java.security.Provider -import _root_.java.security.SecureRandom -import _root_.java.security.Security import org.jboss.netty.handler.ssl.SslHandler import javax.net.ssl.{ KeyManagerFactory, TrustManager, TrustManagerFactory, SSLContext } -import akka.remote.{ RemoteTransportException } +import akka.remote.RemoteTransportException import akka.event.LoggingAdapter import java.io.{ IOException, FileNotFoundException, FileInputStream } -import java.security.{ SecureRandom, GeneralSecurityException, KeyStore } +import java.security.{ SecureRandom, GeneralSecurityException, KeyStore, Security } import akka.security.provider.AkkaProvider -import com.sun.xml.internal.bind.v2.model.core.NonElement /** * Used for adding SSL support to Netty pipeline @@ -31,50 +27,29 @@ private object NettySSLSupport { private def initialiseCustomSecureRandom(settings: NettySettings, log: LoggingAdapter): SecureRandom = { /** - * According to this bug report: http://bugs.sun.com/view_bug.do;jsessionid=ff625daf459fdffffffffcd54f1c775299e0?bug_id=6202721 + * According to this bug report: http://bugs.sun.com/view_bug.do?bug_id=6202721 * Using /dev/./urandom is only necessary when using SHA1PRNG on Linux * Use 'new SecureRandom()' instead of 'SecureRandom.getInstance("SHA1PRNG")' to avoid having problems */ - settings.SSLRandomSource match { - case Some(path) ⇒ System.setProperty("java.security.egd", path) - case None ⇒ - } + settings.SSLRandomSource foreach { path ⇒ System.setProperty("java.security.egd", path) } val rng = settings.SSLRandomNumberGenerator match { - case Some(generator) ⇒ generator match { - case "AES128CounterRNGFast" ⇒ { - log.debug("SSL random number generator set to: AES128CounterRNGFast") - val akka = new AkkaProvider - Security.addProvider(akka) - SecureRandom.getInstance("AES128CounterRNGFast", akka) - } - case "AES128CounterRNGSecure" ⇒ { - log.debug("SSL random number generator set to: AES128CounterRNGSecure") - val akka = new AkkaProvider - Security.addProvider(akka) - SecureRandom.getInstance("AES128CounterRNGSecure", akka) - } - case "AES256CounterRNGSecure" ⇒ { - log.debug("SSL random number generator set to: AES256CounterRNGSecure") - val akka = new AkkaProvider - Security.addProvider(akka) - SecureRandom.getInstance("AES256CounterRNGSecure", akka) - } - case "SHA1PRNG" ⇒ { - log.debug("SSL random number generator set to: SHA1PRNG") - // This needs /dev/urandom to be the source on Linux to prevent problems with /dev/random blocking - // However, this also makes the seed source insecure as the seed is reused to avoid blocking (not a problem on FreeBSD). - SecureRandom.getInstance("SHA1PRNG") - } - case _ ⇒ { - log.debug("SSL random number generator set to default: SecureRandom") - new SecureRandom - } - } - case None ⇒ { - log.debug("SSL random number generator not set. Setting to default: SecureRandom") + case Some(r @ ("AES128CounterRNGFast" | "AES128CounterRNGSecure" | "AES256CounterRNGSecure")) ⇒ + log.debug("SSL random number generator set to: {}", r) + val akka = new AkkaProvider + Security.addProvider(akka) + SecureRandom.getInstance(r, akka) + case Some("SHA1PRNG") ⇒ + log.debug("SSL random number generator set to: SHA1PRNG") + // This needs /dev/urandom to be the source on Linux to prevent problems with /dev/random blocking + // However, this also makes the seed source insecure as the seed is reused to avoid blocking (not a problem on FreeBSD). + SecureRandom.getInstance("SHA1PRNG") + case Some(unknown) ⇒ + log.debug("Unknown SSLRandomNumberGenerator [{}] falling back to SecureRandom", unknown) + new SecureRandom + case None ⇒ + log.debug("SSLRandomNumberGenerator not specified, falling back to SecureRandom") new SecureRandom - } } // prevent stall on first access rng.nextInt() diff --git a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala b/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala new file mode 100644 index 0000000000..12f0d2a83e --- /dev/null +++ b/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala @@ -0,0 +1,41 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.security.provider + +import org.uncommons.maths.random.{ AESCounterRNG, SecureRandomSeedGenerator } +import java.security.SecureRandom + +/** + * Internal API + */ +class AES128CounterRNGFast extends java.security.SecureRandomSpi { + private val rng = new AESCounterRNG(new SecureRandomSeedGenerator()) + + /** + * This is managed internally only + */ + protected def engineSetSeed(seed: Array[Byte]) { + } + + /** + * Generates a user-specified number of random bytes. + * + * @param bytes the array to be filled in with random bytes. + */ + protected def engineNextBytes(bytes: Array[Byte]) { + rng.nextBytes(bytes) + } + + /** + * Returns the given number of seed bytes. This call may be used to + * seed other random number generators. + * + * @param numBytes the number of seed bytes to generate. + * @return the seed bytes. + */ + protected def engineGenerateSeed(numBytes: Int): Array[Byte] = { + (new SecureRandom).generateSeed(numBytes) + } +} + diff --git a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala b/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala new file mode 100644 index 0000000000..4859a8ea4b --- /dev/null +++ b/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala @@ -0,0 +1,40 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.security.provider + +import org.uncommons.maths.random.{ AESCounterRNG, DefaultSeedGenerator } + +/** + * Internal API + */ +class AES128CounterRNGSecure extends java.security.SecureRandomSpi { + private val rng = new AESCounterRNG() + + /** + * This is managed internally only + */ + protected def engineSetSeed(seed: Array[Byte]) { + } + + /** + * Generates a user-specified number of random bytes. + * + * @param bytes the array to be filled in with random bytes. + */ + protected def engineNextBytes(bytes: Array[Byte]) { + rng.nextBytes(bytes) + } + + /** + * Returns the given number of seed bytes. This call may be used to + * seed other random number generators. + * + * @param numBytes the number of seed bytes to generate. + * @return the seed bytes. + */ + protected def engineGenerateSeed(numBytes: Int): Array[Byte] = { + DefaultSeedGenerator.getInstance.generateSeed(numBytes) + } +} + diff --git a/akka-remote/src/main/scala/akka/security/provider/AES256CounterRNGSecure.scala b/akka-remote/src/main/scala/akka/security/provider/AES256CounterRNGSecure.scala new file mode 100644 index 0000000000..3aeda2b1a1 --- /dev/null +++ b/akka-remote/src/main/scala/akka/security/provider/AES256CounterRNGSecure.scala @@ -0,0 +1,40 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.security.provider + +import org.uncommons.maths.random.{ AESCounterRNG, DefaultSeedGenerator } + +/** + * Internal API + */ +class AES256CounterRNGSecure extends java.security.SecureRandomSpi { + private val rng = new AESCounterRNG(32) + + /** + * This is managed internally only + */ + protected def engineSetSeed(seed: Array[Byte]) { + } + + /** + * Generates a user-specified number of random bytes. + * + * @param bytes the array to be filled in with random bytes. + */ + protected def engineNextBytes(bytes: Array[Byte]) { + rng.nextBytes(bytes) + } + + /** + * Returns the given number of seed bytes. This call may be used to + * seed other random number generators. + * + * @param numBytes the number of seed bytes to generate. + * @return the seed bytes. + */ + protected def engineGenerateSeed(numBytes: Int): Array[Byte] = { + DefaultSeedGenerator.getInstance.generateSeed(numBytes) + } +} + diff --git a/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala b/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala new file mode 100644 index 0000000000..705afa37ba --- /dev/null +++ b/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala @@ -0,0 +1,31 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.security.provider + +import java.security.{ PrivilegedAction, AccessController, Provider } + +/** + * A provider that for AES128CounterRNGFast, a cryptographically secure random number generator through SecureRandom + */ +final class AkkaProvider extends Provider("Akka", 1.0, "Akka provider 1.0 that implements a secure AES random number generator") { + AccessController.doPrivileged(new PrivilegedAction[AkkaProvider] { + def run = { + /** + * SecureRandom + */ + put("SecureRandom.AES128CounterRNGFast", "akka.security.provider.AES128CounterRNGFast") + put("SecureRandom.AES128CounterRNGSecure", "akka.security.provider.AES128CounterRNGSecure") + put("SecureRandom.AES256CounterRNGSecure", "akka.security.provider.AES256CounterRNGSecure") + + /** + * Implementation type: software or hardware + */ + put("SecureRandom.AES128CounterRNGFast ImplementedIn", "Software") + put("SecureRandom.AES128CounterRNGSecure ImplementedIn", "Software") + put("SecureRandom.AES256CounterRNGSecure ImplementedIn", "Software") + null + } + }) +} + diff --git a/akka-remote/src/test/resources/keystore b/akka-remote/src/test/resources/keystore new file mode 100644 index 0000000000000000000000000000000000000000..ee5581d930a1cb38981f2a547aab3acf24861e71 GIT binary patch literal 1342 zcmezO_TO6u1_mYu1_nkjX3ee4POW5MU^GA7;L^;%z_in#iD|0=9~+l88zT#&7Ly<& zBP#<-6Vt*Lr#iO3d!phiO(!{)GWxCBqR4KxSN-)mxxc<^>*`br4K@BHt~ki3Iko1i zFn8Nbu2)`EZ_Af-S6!S9{%lD6ax4vqIuM+Etxqil0#b86~A#=eO1S5e{%P` zhYnuWFE;T9el?upf9KpL2M6scKEBl)M^>G$c$Iv(L^i>I!QWzb!&R<^cdxnpEHmO( z8K}k0SiDi;JC}m__6Fv zLMoe`r*C1^2g5)1?{|Du3sS23oqUC_hIh`US(9us=SS{HdR0C@@tH=BqR!UWPX(7B zc(K)gj>j3T1Dh}Os_XUsR>{b^%=Y@v!@M(BcO70}bu#5n%8SYCluveUTh=L8%Co}! ziN?j`{(_9t4(j}lldH}KYrgy6xz4Ts!i&f6l%-A8epId97}3*F%Bq{?*?z~a?Uee| z>=?rnTQ!?fvab9)lz02}_wuuP$6Bu3YU7$zcjVX3{m-Jq=DX`WySH-R8&3IH@nbxb z!yl?F4PSZT-p`n*S+*5LO^akEIn`c2Z@kTLRm{)xt2V`I&+7gar>VgG)K>i8$(yCG zYpW)|Uia<8N70(OcT;D&UkVZV&3%DOI#oYIXiZPJQweYQUItB7d8acv+a~mEI`{98=_Og#`sGaDy|wHLA{Iza z>N)g8dddg$GixUvnOn#F=Y3Cdo#t-7gp2G(I{#xNPF?Bsn-F+P6_TDK^h^yb85o%C z4Vsv&4VoC2EMR70WMX3Rzj*3`0WTY;R+~rLcV0$DR#pasL_=-^PB!LH7B*p~C`Usc z11=DULzuZdH3`OJhwuaq_(5`9!WG&C}Y3edw5jq{Ox&dAEZ+}O)t z(Ade;*vQbe%rjTX@(7Re$BD;t4m_~q`sUtt=l0fllUD55_+Zv%*=G|Zd;gv3ICyPh z$PTVI^Rt*QIf$K+oB6h{YVS?~4+ZwyU0L!~i!XU<>#G$+{5`r|Eze=8p45+HOG3MD z1q;|e++`MRP-bc;e<>nd*K6Ut+1^vWHbk%aQtlt)ee=|(4ZTdvj0}v(&SM2RuaO}n z{@bs4%#V`Vwz&V>%<+7Jc9XZ=O@ZUCPcEk=o?m+74v(IM-nyGQtKAkoOGN8U;EPUNAwEWKh{Z;W<0QMEHk{lvQ`-Y DEz%@Z literal 0 HcmV?d00001 diff --git a/akka-remote/src/test/resources/truststore b/akka-remote/src/test/resources/truststore new file mode 100644 index 0000000000000000000000000000000000000000..cc07616dad6cd4bb2833468ee5b4e6bf79b62b97 GIT binary patch literal 637 zcmezO_TO6u1_mYu1_nkj&6-=8om$Djz-WHDxleOoi}O4j*SmyZI*pDL9+MXnT~_kCWh?bdNV(Z`I3X! z8M&En`>OWt6!1`BzulE3U$yv>r?$RYLB!vq+tusGLU{li^m(FSFv zcJh}Z!gakC&YSH$Dq51hHg}QgTH^`-7oL^i+^@&VxzKTvSwMBYx2U(gzo&W#< literal 0 HcmV?d00001 diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala new file mode 100644 index 0000000000..ff41e369ff --- /dev/null +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -0,0 +1,161 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.remote + +import akka.testkit._ +import akka.actor._ +import com.typesafe.config._ +import akka.dispatch.{ Await, Future } +import akka.pattern.ask +import java.io.File +import java.security.{ PrivilegedAction, AccessController } + +object Configuration { + // set this in your JAVA_OPTS to see all ssl debug info: "-Djavax.net.debug=ssl,keymanager" + // The certificate will expire in 2109 + private val trustStore = getPath("truststore") + private val keyStore = getPath("keystore") + private def getPath(name: String): String = (new File("akka-remote/src/test/resources/" + name)).getAbsolutePath.replace("\\", "\\\\") + private val conf = """ + akka { + actor.provider = "akka.remote.RemoteActorRefProvider" + remote.netty { + hostname = localhost + port = 12345 + ssl { + enable = on + trust-store = "%s" + key-store = "%s" + random-number-generator = "%s" + } + } + actor.deployment { + /blub.remote = "akka://remote-sys@localhost:12346" + /looker/child.remote = "akka://remote-sys@localhost:12346" + /looker/child/grandchild.remote = "akka://Ticket1978CommunicationSpec@localhost:12345" + } + } + """ + + def getConfig(rng: String): String = { + conf.format(trustStore, keyStore, rng) + } +} + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class Ticket1978SHA1PRNG extends Ticket1978CommunicationSpec(Configuration.getConfig("SHA1PRNG")) + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class Ticket1978AES128CounterRNGFast extends Ticket1978CommunicationSpec(Configuration.getConfig("AES128CounterRNGFast")) + +/** + * Both of the Secure variants require access to the Internet to access random.org. + */ +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class Ticket1978AES128CounterRNGSecure extends Ticket1978CommunicationSpec(Configuration.getConfig("AES128CounterRNGSecure")) + +/** + * Both of the Secure variants require access to the Internet to access random.org. + */ +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class Ticket1978AES256CounterRNGSecure extends Ticket1978CommunicationSpec(Configuration.getConfig("AES256CounterRNGSecure")) + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class Ticket1978CommunicationSpec(val configuration: String) + extends AkkaSpec(configuration) with ImplicitSender with DefaultTimeout { + + import RemoteCommunicationSpec._ + + // default SecureRandom RNG + def this() = this(Configuration.getConfig("")) + + val conf = ConfigFactory.parseString("akka.remote.netty.port=12346").withFallback(system.settings.config) + val other = ActorSystem("remote-sys", conf) + + val remote = other.actorOf(Props(new Actor { + def receive = { + case "ping" ⇒ sender ! (("pong", sender)) + } + }), "echo") + + val here = system.actorFor("akka://remote-sys@localhost:12346/user/echo") + + override def atTermination() { + other.shutdown() + } + + "SSL Remoting" must { + + "support remote look-ups" in { + here ! "ping" + expectMsgPF() { + case ("pong", s: AnyRef) if s eq testActor ⇒ true + } + } + + "send error message for wrong address" in { + EventFilter.error(start = "dropping", occurrences = 1).intercept { + system.actorFor("akka://remotesys@localhost:12346/user/echo") ! "ping" + }(other) + } + + "support ask" in { + Await.result(here ? "ping", timeout.duration) match { + case ("pong", s: akka.pattern.PromiseActorRef) ⇒ // good + case m ⇒ fail(m + " was not (pong, AskActorRef)") + } + } + + "send dead letters on remote if actor does not exist" in { + EventFilter.warning(pattern = "dead.*buh", occurrences = 1).intercept { + system.actorFor("akka://remote-sys@localhost:12346/does/not/exist") ! "buh" + }(other) + } + + "create and supervise children on remote node" in { + val r = system.actorOf(Props[Echo], "blub") + r.path.toString must be === "akka://remote-sys@localhost:12346/remote/Ticket1978CommunicationSpec@localhost:12345/user/blub" + r ! 42 + expectMsg(42) + EventFilter[Exception]("crash", occurrences = 1).intercept { + r ! new Exception("crash") + }(other) + expectMsg("preRestart") + r ! 42 + expectMsg(42) + system.stop(r) + expectMsg("postStop") + } + + "look-up actors across node boundaries" in { + val l = system.actorOf(Props(new Actor { + def receive = { + case (p: Props, n: String) ⇒ sender ! context.actorOf(p, n) + case s: String ⇒ sender ! context.actorFor(s) + } + }), "looker") + l ! (Props[Echo], "child") + val r = expectMsgType[ActorRef] + r ! (Props[Echo], "grandchild") + val remref = expectMsgType[ActorRef] + remref.isInstanceOf[LocalActorRef] must be(true) + val myref = system.actorFor(system / "looker" / "child" / "grandchild") + myref.isInstanceOf[RemoteActorRef] must be(true) + myref ! 43 + expectMsg(43) + lastSender must be theSameInstanceAs remref + r.asInstanceOf[RemoteActorRef].getParent must be(l) + system.actorFor("/user/looker/child") must be theSameInstanceAs r + Await.result(l ? "child/..", timeout.duration).asInstanceOf[AnyRef] must be theSameInstanceAs l + Await.result(system.actorFor(system / "looker" / "child") ? "..", timeout.duration).asInstanceOf[AnyRef] must be theSameInstanceAs l + } + + "not fail ask across node boundaries" in { + val f = for (_ ← 1 to 1000) yield here ? "ping" mapTo manifest[(String, ActorRef)] + Await.result(Future.sequence(f), remaining).map(_._1).toSet must be(Set("pong")) + } + + } + +} From d957c686390b9799e568bbc5a0c5b17621a3d87b Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 11 Jun 2012 21:12:57 +0200 Subject: [PATCH 352/538] Incorporate feedback from review, see #2214 --- .../src/main/scala/akka/cluster/Cluster.scala | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index df8f2ec89b..b463c3b0ea 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -14,18 +14,14 @@ import akka.pattern.ask import akka.util._ import akka.util.duration._ import akka.ConfigurationException - import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean } import java.util.concurrent.TimeUnit._ import java.util.concurrent.TimeoutException import akka.jsr166y.ThreadLocalRandom - import java.lang.management.ManagementFactory import javax.management._ - import scala.collection.immutable.{ Map, SortedSet } import scala.annotation.tailrec - import com.google.protobuf.ByteString /** @@ -44,6 +40,8 @@ trait MetaDataChangeListener { /** * Base trait for all cluster messages. All ClusterMessage's are serializable. + * + * FIXME Protobuf all ClusterMessages */ sealed trait ClusterMessage extends Serializable @@ -82,6 +80,7 @@ object ClusterAction { /** * Represents the address and the current status of a cluster member node. + * */ class Member(val address: Address, val status: MemberStatus) extends ClusterMessage { override def hashCode = address.## @@ -175,6 +174,10 @@ case class GossipOverview( "])" } +object Gossip { + val emptyMembers: SortedSet[Member] = SortedSet.empty +} + /** * Represents the state of the cluster; cluster ring membership, ring convergence, meta data - all versioned by a vector clock. */ @@ -219,7 +222,7 @@ case class Gossip( // 3. merge members by selecting the single Member with highest MemberStatus out of the Member groups val mergedMembers = - SortedSet.empty[Member] ++ + Gossip.emptyMembers ++ membersGroupedByAddress.values.foldLeft(Vector.empty[Member]) { (acc, members) ⇒ acc :+ members.reduceLeft(Member.highestPriorityOf(_, _)) } @@ -244,7 +247,10 @@ case class Gossip( ")" } -case class Heartbeat(from: Address) +/** + * Sent at regular intervals for failure detection. + */ +case class Heartbeat(from: Address) extends ClusterMessage /** * Manages routing of the different cluster commands. @@ -372,6 +378,8 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ import clusterSettings._ val selfAddress = remote.transport.address + private val selfHeartbeat = Heartbeat(selfAddress) + val failureDetector = new AccrualFailureDetector( system, selfAddress, FailureDetectorThreshold, FailureDetectorMaxSampleSize) @@ -402,7 +410,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ private val state = { val member = Member(selfAddress, MemberStatus.Joining) - val versionedGossip = Gossip(members = SortedSet.empty[Member] + member) + vclockNode // add me as member and update my vector clock + val versionedGossip = Gossip(members = Gossip.emptyMembers + member) + vclockNode // add me as member and update my vector clock val seenVersionedGossip = versionedGossip seen selfAddress new AtomicReference[State](State(seenVersionedGossip)) } @@ -757,9 +765,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * INTERNAL API */ - private[cluster] def receiveHeartbeat(from: Address): Unit = { - failureDetector heartbeat from - } + private[cluster] def receiveHeartbeat(from: Address): Unit = failureDetector heartbeat from /** * Joins the pre-configured contact point. @@ -785,10 +791,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val newSelf = localSelf copy (status = newStatus) // change my state in 'gossip.members' - val newMembers = localMembers map { member ⇒ - if (member.address == selfAddress) newSelf - else member - } + val newMembers = localMembers map { member ⇒ if (member.address == selfAddress) newSelf else member } val newGossip = localGossip copy (members = newMembers) @@ -893,13 +896,11 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ if (!isSingletonCluster(localState)) { val liveMembers = localState.latestGossip.members.toIndexedSeq - val unreachableMembers = localState.latestGossip.overview.unreachable - // FIXME use unreachable? - for (member ← (liveMembers ++ unreachableMembers); if member.address != selfAddress) { + for (member ← liveMembers; if member.address != selfAddress) { val connection = clusterGossipConnectionFor(member.address) log.debug("Cluster Node [{}] - Heartbeat to [{}]", selfAddress, connection) - connection ! Heartbeat(selfAddress) + connection ! selfHeartbeat } } } From 34c9e49ee0915318e4f36c7ce33248c3072ab50c Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 11 Jun 2012 22:12:45 +0200 Subject: [PATCH 353/538] Schedule cluster tasks with more accurate, see #2114 * Use scheduler with more accurate settings * New FixedRateTask that compensates for inaccuracy --- .../src/main/scala/akka/cluster/Cluster.scala | 29 ++++++---- .../scala/akka/cluster/FixedRateTask.scala | 54 +++++++++++++++++++ .../akka/cluster/FixedRateTaskSpec.scala | 36 +++++++++++++ 3 files changed, 110 insertions(+), 9 deletions(-) create mode 100644 akka-cluster/src/main/scala/akka/cluster/FixedRateTask.scala create mode 100644 akka-cluster/src/test/scala/akka/cluster/FixedRateTaskSpec.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index b463c3b0ea..8be6b21d25 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -23,6 +23,8 @@ import javax.management._ import scala.collection.immutable.{ Map, SortedSet } import scala.annotation.tailrec import com.google.protobuf.ByteString +import akka.util.internal.HashedWheelTimer +import akka.dispatch.MonitorableThreadFactory /** * Interface for membership change listener. @@ -422,28 +424,35 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // ===================== WORK DAEMONS ===================== // ======================================================== + private def hwt = new HashedWheelTimer(log, + MonitorableThreadFactory(system.name + "-cluster-scheduler", system.settings.Daemonicity, None), 50.millis, + system.settings.SchedulerTicksPerWheel) + private val clusterScheduler = new DefaultScheduler(hwt, log, system.dispatcher) + // start periodic gossip to random nodes in cluster - private val gossipCanceller = system.scheduler.schedule(PeriodicTasksInitialDelay, GossipInterval) { + private val gossipTask = FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay, GossipInterval) { gossip() } // start periodic heartbeat to all nodes in cluster - private val heartbeatCanceller = system.scheduler.schedule(PeriodicTasksInitialDelay, HeartbeatInterval) { + private val heartbeatTask = FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay, HeartbeatInterval) { heartbeat() } // start periodic cluster failure detector reaping (moving nodes condemned by the failure detector to unreachable list) - private val failureDetectorReaperCanceller = system.scheduler.schedule(PeriodicTasksInitialDelay, UnreachableNodesReaperInterval) { + private val failureDetectorReaperTask = FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay, UnreachableNodesReaperInterval) { reapUnreachableMembers() } // start periodic leader action management (only applies for the current leader) - private val leaderActionsCanceller = system.scheduler.schedule(PeriodicTasksInitialDelay, LeaderActionsInterval) { + private val leaderActionsTask = FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay, LeaderActionsInterval) { leaderActions() } createMBean() + system.registerOnTermination(shutdown()) + log.info("Cluster Node [{}] - has started up successfully", selfAddress) // ====================================================== @@ -507,11 +516,13 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ def shutdown(): Unit = { if (isRunning.compareAndSet(true, false)) { log.info("Cluster Node [{}] - Shutting down cluster Node and cluster daemons...", selfAddress) - gossipCanceller.cancel() - heartbeatCanceller.cancel() - failureDetectorReaperCanceller.cancel() - leaderActionsCanceller.cancel() - system.stop(clusterDaemons) + gossipTask.cancel() + heartbeatTask.cancel() + failureDetectorReaperTask.cancel() + leaderActionsTask.cancel() + clusterScheduler.close() + if (!clusterDaemons.isTerminated) + system.stop(clusterDaemons) try { mBeanServer.unregisterMBean(clusterMBeanName) } catch { diff --git a/akka-cluster/src/main/scala/akka/cluster/FixedRateTask.scala b/akka-cluster/src/main/scala/akka/cluster/FixedRateTask.scala new file mode 100644 index 0000000000..0f594316d9 --- /dev/null +++ b/akka-cluster/src/main/scala/akka/cluster/FixedRateTask.scala @@ -0,0 +1,54 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import java.util.concurrent.TimeUnit +import java.util.concurrent.atomic.AtomicBoolean +import java.util.concurrent.atomic.AtomicLong + +import akka.actor.Scheduler +import akka.util.Duration + +/** + * INTERNAL API + */ +private[akka] object FixedRateTask { + def apply(scheduler: Scheduler, initalDelay: Duration, delay: Duration)(f: ⇒ Unit): FixedRateTask = { + new FixedRateTask(scheduler, initalDelay, delay, new Runnable { def run(): Unit = f }) + } +} + +/** + * INTERNAL API + * + * Task to be scheduled periodically at a fixed rate, compensating, on average, + * for inaccuracy in scheduler. It will start when constructed, using the + * initialDelay. + */ +private[akka] class FixedRateTask(scheduler: Scheduler, initalDelay: Duration, delay: Duration, task: Runnable) extends Runnable { + + private val delayMillis = delay.toMillis + private val minDelayMillis = 1L + private val cancelled = new AtomicBoolean(false) + private val counter = new AtomicLong(0L) + private val startTime = System.currentTimeMillis + initalDelay.toMillis + scheduler.scheduleOnce(initalDelay, this) + + def cancel(): Unit = cancelled.set(true) + + override final def run(): Unit = if (!cancelled.get) try { + task.run() + } finally if (!cancelled.get) { + val nextTime = startTime + delayMillis * counter.incrementAndGet + val nextDelayMillis = nextTime - System.currentTimeMillis + val nextDelay = Duration( + (if (nextDelayMillis <= minDelayMillis) minDelayMillis else nextDelayMillis), + TimeUnit.MILLISECONDS) + try { + scheduler.scheduleOnce(nextDelay, this) + } catch { case e: IllegalStateException ⇒ /* will happen when scheduler is closed, nothing wrong */ } + } + +} diff --git a/akka-cluster/src/test/scala/akka/cluster/FixedRateTaskSpec.scala b/akka-cluster/src/test/scala/akka/cluster/FixedRateTaskSpec.scala new file mode 100644 index 0000000000..3efa3ab3ab --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/FixedRateTaskSpec.scala @@ -0,0 +1,36 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import java.util.concurrent.atomic.AtomicInteger +import akka.testkit.AkkaSpec +import akka.util.duration._ +import akka.testkit.TimingTest + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class FixedRateTaskSpec extends AkkaSpec { + + "Task scheduled at fixed rate" must { + "adjust for scheduler inaccuracy" taggedAs TimingTest in { + val counter = new AtomicInteger + FixedRateTask(system.scheduler, 150.millis, 150.millis) { + counter.incrementAndGet() + } + 5000.millis.sleep() + counter.get must (be(33) or be(34)) + } + + "compensate for long running task" taggedAs TimingTest in { + val counter = new AtomicInteger + FixedRateTask(system.scheduler, 225.millis, 225.millis) { + counter.incrementAndGet() + 80.millis.sleep() + } + 5000.millis.sleep() + counter.get must (be(22) or be(23)) + } + } +} + From 649b9d51816f3be5c035178ca6daee01cbbfd0af Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 11 Jun 2012 22:53:45 +0200 Subject: [PATCH 354/538] Switching to Mr Pink's Java6 detector --- project/scripts/release | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/scripts/release b/project/scripts/release index 13795b3d53..9e418317bd 100755 --- a/project/scripts/release +++ b/project/scripts/release @@ -93,7 +93,7 @@ fi declare -r version=$1 declare -r publish_path="${release_server}:${release_path}" -[[ `java -version 2>&1 | grep "java version" | awk '{print $3}' | tr -d \" | awk '{split($0, array, ".")} END{print array[2]}'` -eq 6 ]] || fail "Java version is not 1.6" +[[ `java -version 2>&1 | head -1 | cut -d ' ' -f3 | cut -d '.' -f2` -eq 6 ]] || fail "Java version is not 1.6" # check for a git command type -P git &> /dev/null || fail "git command not found" From b27bae655404f9635b206b3a194008a3a7f3f221 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 12 Jun 2012 13:34:59 +0200 Subject: [PATCH 355/538] Use dedicated cluster scheduler only when default scheduler resolution isn't good enough, see #2214 * Config properties for scheduler * Commented shutdown considerations --- .../src/main/resources/reference.conf | 9 ++++++ .../src/main/scala/akka/cluster/Cluster.scala | 30 +++++++++++++++---- .../scala/akka/cluster/ClusterSettings.scala | 26 ++++++++-------- .../akka/cluster/ClusterConfigSpec.scala | 2 ++ 4 files changed, 50 insertions(+), 17 deletions(-) diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index 7fb930eaef..b9104fe6cf 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -49,5 +49,14 @@ akka { max-sample-size = 1000 } + + # If the tick-duration of the default scheduler is longer than the tick-duration + # configured here a dedicated scheduler will be used for periodic tasks of the cluster, + # otherwise the default scheduler is used. + # See akka.scheduler settings for more details about the HashedWheelTimer. + scheduler { + tick-duration = 33ms + ticks-per-wheel = 512 + } } } diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 46c6919cc1..dda05bf6b0 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -19,6 +19,7 @@ import java.util.concurrent.TimeUnit._ import java.util.concurrent.TimeoutException import akka.jsr166y.ThreadLocalRandom import java.lang.management.ManagementFactory +import java.io.Closeable import javax.management._ import scala.collection.immutable.{ Map, SortedSet } import scala.annotation.tailrec @@ -435,10 +436,21 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // ===================== WORK DAEMONS ===================== // ======================================================== - private def hwt = new HashedWheelTimer(log, - MonitorableThreadFactory(system.name + "-cluster-scheduler", system.settings.Daemonicity, None), 50.millis, - system.settings.SchedulerTicksPerWheel) - private val clusterScheduler = new DefaultScheduler(hwt, log, system.dispatcher) + private def useDedicatedScheduler: Boolean = system.settings.SchedulerTickDuration > SchedulerTickDuration + + private val clusterScheduler: Scheduler = { + if (useDedicatedScheduler) { + val threadFactory = system.threadFactory match { + case tf: MonitorableThreadFactory ⇒ tf.copy(name = tf.name + "-cluster-scheduler") + case tf ⇒ tf + } + val hwt = new HashedWheelTimer(log, + threadFactory, + SchedulerTickDuration, SchedulerTicksPerWheel) + new DefaultScheduler(hwt, log, system.dispatcher) + } else + system.scheduler + } // start periodic gossip to random nodes in cluster private val gossipTask = FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay, GossipInterval) { @@ -527,13 +539,21 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) def shutdown(): Unit = { if (isRunning.compareAndSet(true, false)) { log.info("Cluster Node [{}] - Shutting down cluster Node and cluster daemons...", selfAddress) + + // cancel the periodic tasks, note that otherwise they will be run when scheduler is shutdown gossipTask.cancel() heartbeatTask.cancel() failureDetectorReaperTask.cancel() leaderActionsTask.cancel() - clusterScheduler.close() + if (useDedicatedScheduler) clusterScheduler match { + case x: Closeable ⇒ x.close() + case _ ⇒ + } + // FIXME isTerminated check can be removed when ticket #2221 is fixed + // now it prevents logging if system is shutdown (or in progress of shutdown) if (!clusterDaemons.isTerminated) system.stop(clusterDaemons) + try { mBeanServer.unregisterMBean(clusterMBeanName) } catch { diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 9a17f2a0eb..ee4f6a03d2 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -13,22 +13,24 @@ import akka.actor.AddressFromURIString class ClusterSettings(val config: Config, val systemName: String) { import config._ - val FailureDetectorThreshold = getInt("akka.cluster.failure-detector.threshold") - val FailureDetectorMaxSampleSize = getInt("akka.cluster.failure-detector.max-sample-size") - val FailureDetectorImplementationClass: Option[String] = getString("akka.cluster.failure-detector.implementation-class") match { + final val FailureDetectorThreshold = getInt("akka.cluster.failure-detector.threshold") + final val FailureDetectorMaxSampleSize = getInt("akka.cluster.failure-detector.max-sample-size") + final val FailureDetectorImplementationClass: Option[String] = getString("akka.cluster.failure-detector.implementation-class") match { case "" ⇒ None case fqcn ⇒ Some(fqcn) } - val NodeToJoin: Option[Address] = getString("akka.cluster.node-to-join") match { + final val NodeToJoin: Option[Address] = getString("akka.cluster.node-to-join") match { case "" ⇒ None case AddressFromURIString(addr) ⇒ Some(addr) } - val PeriodicTasksInitialDelay = Duration(getMilliseconds("akka.cluster.periodic-tasks-initial-delay"), MILLISECONDS) - val GossipInterval = Duration(getMilliseconds("akka.cluster.gossip-interval"), MILLISECONDS) - val HeartbeatInterval = Duration(getMilliseconds("akka.cluster.heartbeat-interval"), MILLISECONDS) - val LeaderActionsInterval = Duration(getMilliseconds("akka.cluster.leader-actions-interval"), MILLISECONDS) - val UnreachableNodesReaperInterval = Duration(getMilliseconds("akka.cluster.unreachable-nodes-reaper-interval"), MILLISECONDS) - val NrOfGossipDaemons = getInt("akka.cluster.nr-of-gossip-daemons") - val NrOfDeputyNodes = getInt("akka.cluster.nr-of-deputy-nodes") - val AutoDown = getBoolean("akka.cluster.auto-down") + final val PeriodicTasksInitialDelay = Duration(getMilliseconds("akka.cluster.periodic-tasks-initial-delay"), MILLISECONDS) + final val GossipInterval = Duration(getMilliseconds("akka.cluster.gossip-interval"), MILLISECONDS) + final val HeartbeatInterval = Duration(getMilliseconds("akka.cluster.heartbeat-interval"), MILLISECONDS) + final val LeaderActionsInterval = Duration(getMilliseconds("akka.cluster.leader-actions-interval"), MILLISECONDS) + final val UnreachableNodesReaperInterval = Duration(getMilliseconds("akka.cluster.unreachable-nodes-reaper-interval"), MILLISECONDS) + final val NrOfGossipDaemons = getInt("akka.cluster.nr-of-gossip-daemons") + final val NrOfDeputyNodes = getInt("akka.cluster.nr-of-deputy-nodes") + final val AutoDown = getBoolean("akka.cluster.auto-down") + final val SchedulerTickDuration = Duration(getMilliseconds("akka.cluster.scheduler.tick-duration"), MILLISECONDS) + final val SchedulerTicksPerWheel = getInt("akka.cluster.scheduler.ticks-per-wheel") } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 6c9023d410..481d9f7e5a 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -28,6 +28,8 @@ class ClusterConfigSpec extends AkkaSpec { NrOfGossipDaemons must be(4) NrOfDeputyNodes must be(3) AutoDown must be(true) + SchedulerTickDuration must be(33 millis) + SchedulerTicksPerWheel must be(512) } } } From 7b6ae2f5c91e266a2202c192d00f3f1baeb8b22d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 12 Jun 2012 13:37:21 +0200 Subject: [PATCH 356/538] Use nanoTime in FixedRateTask, see #2214 * Rewrote test to use latch and assert rate instead --- .../scala/akka/cluster/FixedRateTask.scala | 13 ++++------ .../akka/cluster/FixedRateTaskSpec.scala | 25 ++++++++++++------- 2 files changed, 21 insertions(+), 17 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/FixedRateTask.scala b/akka-cluster/src/main/scala/akka/cluster/FixedRateTask.scala index 0f594316d9..25ef058465 100644 --- a/akka-cluster/src/main/scala/akka/cluster/FixedRateTask.scala +++ b/akka-cluster/src/main/scala/akka/cluster/FixedRateTask.scala @@ -29,11 +29,10 @@ private[akka] object FixedRateTask { */ private[akka] class FixedRateTask(scheduler: Scheduler, initalDelay: Duration, delay: Duration, task: Runnable) extends Runnable { - private val delayMillis = delay.toMillis - private val minDelayMillis = 1L + private val delayNanos = delay.toNanos private val cancelled = new AtomicBoolean(false) private val counter = new AtomicLong(0L) - private val startTime = System.currentTimeMillis + initalDelay.toMillis + private val startTime = System.nanoTime + initalDelay.toNanos scheduler.scheduleOnce(initalDelay, this) def cancel(): Unit = cancelled.set(true) @@ -41,11 +40,9 @@ private[akka] class FixedRateTask(scheduler: Scheduler, initalDelay: Duration, d override final def run(): Unit = if (!cancelled.get) try { task.run() } finally if (!cancelled.get) { - val nextTime = startTime + delayMillis * counter.incrementAndGet - val nextDelayMillis = nextTime - System.currentTimeMillis - val nextDelay = Duration( - (if (nextDelayMillis <= minDelayMillis) minDelayMillis else nextDelayMillis), - TimeUnit.MILLISECONDS) + val nextTime = startTime + delayNanos * counter.incrementAndGet + // it's ok to schedule with negative duration, will run asap + val nextDelay = Duration(nextTime - System.nanoTime, TimeUnit.NANOSECONDS) try { scheduler.scheduleOnce(nextDelay, this) } catch { case e: IllegalStateException ⇒ /* will happen when scheduler is closed, nothing wrong */ } diff --git a/akka-cluster/src/test/scala/akka/cluster/FixedRateTaskSpec.scala b/akka-cluster/src/test/scala/akka/cluster/FixedRateTaskSpec.scala index 3efa3ab3ab..d259a5310b 100644 --- a/akka-cluster/src/test/scala/akka/cluster/FixedRateTaskSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/FixedRateTaskSpec.scala @@ -4,32 +4,39 @@ package akka.cluster -import java.util.concurrent.atomic.AtomicInteger import akka.testkit.AkkaSpec import akka.util.duration._ import akka.testkit.TimingTest +import akka.testkit.TestLatch +import akka.dispatch.Await @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class FixedRateTaskSpec extends AkkaSpec { "Task scheduled at fixed rate" must { "adjust for scheduler inaccuracy" taggedAs TimingTest in { - val counter = new AtomicInteger + val startTime = System.nanoTime + val n = 33 + val latch = new TestLatch(n) FixedRateTask(system.scheduler, 150.millis, 150.millis) { - counter.incrementAndGet() + latch.countDown() } - 5000.millis.sleep() - counter.get must (be(33) or be(34)) + Await.ready(latch, 6.seconds) + val rate = n * 1000.0 / (System.nanoTime - startTime).nanos.toMillis + rate must be(6.66 plusOrMinus (0.4)) } "compensate for long running task" taggedAs TimingTest in { - val counter = new AtomicInteger + val startTime = System.nanoTime + val n = 22 + val latch = new TestLatch(n) FixedRateTask(system.scheduler, 225.millis, 225.millis) { - counter.incrementAndGet() 80.millis.sleep() + latch.countDown() } - 5000.millis.sleep() - counter.get must (be(22) or be(23)) + Await.ready(latch, 6.seconds) + val rate = n * 1000.0 / (System.nanoTime - startTime).nanos.toMillis + rate must be(4.4 plusOrMinus (0.3)) } } } From 40d9b27e735a092997391a0685ce1e790bd48ab2 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 12 Jun 2012 14:16:30 +0200 Subject: [PATCH 357/538] Info log about dedicated scheduler, and refactoring, see #2214 * Refactoring with wrapping of Scheduler according to @viktorklang's wish --- .../src/main/scala/akka/cluster/Cluster.scala | 37 ++++++++++++++----- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index dda05bf6b0..571a8eaf68 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -436,10 +436,11 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // ===================== WORK DAEMONS ===================== // ======================================================== - private def useDedicatedScheduler: Boolean = system.settings.SchedulerTickDuration > SchedulerTickDuration - - private val clusterScheduler: Scheduler = { - if (useDedicatedScheduler) { + private val clusterScheduler: Scheduler with Closeable = { + if (system.settings.SchedulerTickDuration > SchedulerTickDuration) { + log.info("Using a dedicated scheduler for cluster. Default scheduler can be used if configured " + + "with 'akka.scheduler.tick-duration' [{} ms] <= 'akka.cluster.scheduler.tick-duration' [{} ms].", + system.settings.SchedulerTickDuration.toMillis, SchedulerTickDuration.toMillis) val threadFactory = system.threadFactory match { case tf: MonitorableThreadFactory ⇒ tf.copy(name = tf.name + "-cluster-scheduler") case tf ⇒ tf @@ -448,8 +449,26 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) threadFactory, SchedulerTickDuration, SchedulerTicksPerWheel) new DefaultScheduler(hwt, log, system.dispatcher) - } else - system.scheduler + } else { + // delegate to system.scheduler, but don't close + val systemScheduler = system.scheduler + new Scheduler with Closeable { + // we are using system.scheduler, which we are not responsible for closing + def close(): Unit = () + def schedule(initialDelay: Duration, frequency: Duration, receiver: ActorRef, message: Any): Cancellable = + systemScheduler.schedule(initialDelay, frequency, receiver, message) + def schedule(initialDelay: Duration, frequency: Duration)(f: ⇒ Unit): Cancellable = + systemScheduler.schedule(initialDelay, frequency)(f) + def schedule(initialDelay: Duration, frequency: Duration, runnable: Runnable): Cancellable = + systemScheduler.schedule(initialDelay, frequency, runnable) + def scheduleOnce(delay: Duration, runnable: Runnable): Cancellable = + systemScheduler.scheduleOnce(delay, runnable) + def scheduleOnce(delay: Duration, receiver: ActorRef, message: Any): Cancellable = + systemScheduler.scheduleOnce(delay, receiver, message) + def scheduleOnce(delay: Duration)(f: ⇒ Unit): Cancellable = + systemScheduler.scheduleOnce(delay)(f) + } + } } // start periodic gossip to random nodes in cluster @@ -545,10 +564,8 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) heartbeatTask.cancel() failureDetectorReaperTask.cancel() leaderActionsTask.cancel() - if (useDedicatedScheduler) clusterScheduler match { - case x: Closeable ⇒ x.close() - case _ ⇒ - } + clusterScheduler.close() + // FIXME isTerminated check can be removed when ticket #2221 is fixed // now it prevents logging if system is shutdown (or in progress of shutdown) if (!clusterDaemons.isTerminated) From 13f3cddbfb86633f8b2abd90233a345255a546ae Mon Sep 17 00:00:00 2001 From: Dale Date: Tue, 12 Jun 2012 16:16:25 +0300 Subject: [PATCH 358/538] Minor markup fix. --- akka-docs/java/typed-actors.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-docs/java/typed-actors.rst b/akka-docs/java/typed-actors.rst index 6ad870b309..4d36872f1a 100644 --- a/akka-docs/java/typed-actors.rst +++ b/akka-docs/java/typed-actors.rst @@ -130,7 +130,7 @@ if needed. It will return ``None`` if a timeout occurs. .. includecode:: code/docs/actor/TypedActorDocTestBase.java :include: typed-actor-call-strict -This will block for as long as the timeout that was set in the ``Props` of the Typed Actor, +This will block for as long as the timeout that was set in the ``Props`` of the Typed Actor, if needed. It will throw a ``java.util.concurrent.TimeoutException`` if a timeout occurs. Request-reply-with-future message send From 8d12385a3edb1b6eedc4ad756296fd542cc6743e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 12 Jun 2012 15:48:23 +0200 Subject: [PATCH 359/538] Prolonging wait time for TypedActorSpec to avoid problems on slower machines --- akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala index 502712872a..b7a5a8f64b 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala @@ -307,7 +307,7 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) "be able to call methods returning Scala Options" in { val t = newFooBar(Duration(500, "ms")) t.optionPigdog(200).get must be("Pigdog") - t.optionPigdog(700) must be(None) + t.optionPigdog(1000) must be(None) mustStop(t) } From de1ad302172ff82b60b7cfbdefa2f6c5d295b811 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 12 Jun 2012 16:07:20 +0200 Subject: [PATCH 360/538] Fix false convergence when singleton cluster, see #2222 * All members must be in seen table for convergence * Added extra debug logging due to convergence issues * Enabled test of convergence for node joining singleton cluster --- .../src/main/scala/akka/cluster/Cluster.scala | 34 +++++++++++++------ .../MembershipChangeListenerJoinSpec.scala | 7 ++-- .../test/scala/akka/cluster/ClusterSpec.scala | 3 +- 3 files changed, 29 insertions(+), 15 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 571a8eaf68..c090995e4c 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -1138,24 +1138,38 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) private def convergence(gossip: Gossip): Option[Gossip] = { val overview = gossip.overview val unreachable = overview.unreachable + val seen = overview.seen // First check that: - // 1. we don't have any members that are unreachable (unreachable.isEmpty == true), or + // 1. we don't have any members that are unreachable, or // 2. all unreachable members in the set have status DOWN // Else we can't continue to check for convergence // When that is done we check that all the entries in the 'seen' table have the same vector clock version - if (unreachable.isEmpty || !unreachable.exists { m ⇒ - m.status != MemberStatus.Down && - m.status != MemberStatus.Removed - }) { - val seen = gossip.overview.seen - val views = Set.empty[VectorClock] ++ seen.values + // and that all members exists in seen table + val hasUnreachable = unreachable.nonEmpty && unreachable.exists { m ⇒ + m.status != MemberStatus.Down && m.status != MemberStatus.Removed + } + val allMembersInSeen = gossip.members.forall(m ⇒ seen.contains(m.address)) - if (views.size == 1) { + if (hasUnreachable) { + log.debug("Cluster Node [{}] - No cluster convergence, due to unreachable [{}].", selfAddress, unreachable) + None + } else if (!allMembersInSeen) { + log.debug("Cluster Node [{}] - No cluster convergence, due to members not in seen table [{}].", selfAddress, + gossip.members.map(_.address) -- seen.keySet) + None + } else { + + val views = (Set.empty[VectorClock] ++ seen.values).size + + if (views == 1) { log.debug("Cluster Node [{}] - Cluster convergence reached: [{}]", selfAddress, gossip.members.mkString(", ")) Some(gossip) - } else None - } else None + } else { + log.debug("Cluster Node [{}] - No cluster convergence, due to [{}] different views.", selfAddress, views) + None + } + } } private def isAvailable(state: State): Boolean = !isUnavailable(state) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index 2809ae820b..1b296c58f1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -18,7 +18,7 @@ object MembershipChangeListenerJoinMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString("akka.cluster.leader-actions-interval = 5 s") // increase the leader action task interval to allow time checking for JOIN before leader moves it to UP - .withFallback(MultiNodeClusterSpec.clusterConfig))) + .withFallback(MultiNodeClusterSpec.clusterConfig))) } class MembershipChangeListenerJoinMultiJvmNode1 extends MembershipChangeListenerJoinSpec with FailureDetectorPuppetStrategy @@ -40,14 +40,13 @@ abstract class MembershipChangeListenerJoinSpec val joinLatch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { - if (members.size == 2 && members.exists(_.status == MemberStatus.Joining)) // second node is not part of node ring anymore + if (members.size == 2 && members.exists(_.status == MemberStatus.Joining)) joinLatch.countDown() } }) testConductor.enter("registered-listener") joinLatch.await - cluster.convergence.isDefined must be(true) } runOn(second) { @@ -55,6 +54,8 @@ abstract class MembershipChangeListenerJoinSpec cluster.join(firstAddress) } + awaitUpConvergence(2) + testConductor.enter("after") } } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index 112da9d0c0..03f6460ea1 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -110,8 +110,7 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { cluster.joining(addresses(1)) cluster.latestGossip.members.map(_.address) must be(Set(selfAddress, addresses(1))) memberStatus(addresses(1)) must be(Some(MemberStatus.Joining)) - // FIXME why is it still convergence immediately after joining? - //cluster.convergence.isDefined must be(false) + cluster.convergence.isDefined must be(false) } "accept a few more joining nodes" in { From 92cab53b1e3b635f0e28ea96d82316d33710188b Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 12 Jun 2012 16:15:05 +0200 Subject: [PATCH 361/538] Rename + operator of VectorClock and Versioned to :+ * + is kind of reserved for string concatination --- .../src/main/scala/akka/cluster/Cluster.scala | 20 +-- .../main/scala/akka/cluster/VectorClock.scala | 4 +- .../scala/akka/cluster/VectorClockSpec.scala | 166 +++++++++--------- 3 files changed, 95 insertions(+), 95 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index c090995e4c..5bc968920a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -195,9 +195,9 @@ case class Gossip( /** * Increments the version for this 'Node'. */ - def +(node: VectorClock.Node): Gossip = copy(version = version + node) + def :+(node: VectorClock.Node): Gossip = copy(version = version :+ node) - def +(member: Member): Gossip = { + def :+(member: Member): Gossip = { if (members contains member) this else this copy (members = members + member) } @@ -424,7 +424,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) private val state = { val member = Member(selfAddress, MemberStatus.Joining) - val versionedGossip = Gossip(members = Gossip.emptyMembers + member) + vclockNode // add me as member and update my vector clock + val versionedGossip = Gossip(members = Gossip.emptyMembers + member) :+ vclockNode // add me as member and update my vector clock val seenVersionedGossip = versionedGossip seen selfAddress new AtomicReference[State](State(seenVersionedGossip)) } @@ -658,7 +658,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val newMembers = localMembers + Member(node, MemberStatus.Joining) // add joining node as Joining val newGossip = localGossip copy (overview = newOverview, members = newMembers) - val versionedGossip = newGossip + vclockNode + val versionedGossip = newGossip :+ vclockNode val seenVersionedGossip = versionedGossip seen selfAddress val newState = localState copy (latestGossip = seenVersionedGossip) @@ -686,7 +686,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val newMembers = localMembers + Member(address, MemberStatus.Leaving) // mark node as LEAVING val newGossip = localGossip copy (members = newMembers) - val versionedGossip = newGossip + vclockNode + val versionedGossip = newGossip :+ vclockNode val seenVersionedGossip = versionedGossip seen selfAddress val newState = localState copy (latestGossip = seenVersionedGossip) @@ -772,7 +772,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachablePlusNewlyDownedMembers) // update gossip overview val newGossip = localGossip copy (overview = newOverview, members = newMembers) // update gossip - val versionedGossip = newGossip + vclockNode + val versionedGossip = newGossip :+ vclockNode val newState = localState copy (latestGossip = versionedGossip seen selfAddress) if (!state.compareAndSet(localState, newState)) downing(address) // recur if we fail the update @@ -793,7 +793,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) if (remoteGossip.version <> localGossip.version) { // concurrent val mergedGossip = remoteGossip merge localGossip - val versionedMergedGossip = mergedGossip + vclockNode + val versionedMergedGossip = mergedGossip :+ vclockNode // FIXME change to debug log level, when failure detector is stable log.info( @@ -855,7 +855,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val newGossip = localGossip copy (members = newMembers) // version my changes - val versionedGossip = newGossip + vclockNode + val versionedGossip = newGossip :+ vclockNode val seenVersionedGossip = versionedGossip seen selfAddress state copy (latestGossip = seenVersionedGossip) @@ -992,7 +992,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val newGossip = localGossip copy (overview = newOverview, members = newMembers) // updating vclock and 'seen' table - val versionedGossip = newGossip + vclockNode + val versionedGossip = newGossip :+ vclockNode val seenVersionedGossip = versionedGossip seen selfAddress val newState = localState copy (latestGossip = seenVersionedGossip) @@ -1111,7 +1111,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // ---------------------- // 5. Updating the vclock version for the changes // ---------------------- - val versionedGossip = newGossip + vclockNode + val versionedGossip = newGossip :+ vclockNode // ---------------------- // 6. Updating the 'seen' table diff --git a/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala index 82c1b9881d..ed6724058f 100644 --- a/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala +++ b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala @@ -19,7 +19,7 @@ class VectorClockException(message: String) extends AkkaException(message) */ trait Versioned[T] { def version: VectorClock - def +(node: VectorClock.Node): T + def :+(node: VectorClock.Node): T } /** @@ -142,7 +142,7 @@ case class VectorClock( /** * Increment the version for the node passed as argument. Returns a new VectorClock. */ - def +(node: Node): VectorClock = copy(versions = versions + (node -> Timestamp())) + def :+(node: Node): VectorClock = copy(versions = versions + (node -> Timestamp())) /** * Returns true if this and that are concurrent else false. diff --git a/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala b/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala index de1142b668..19ad9410c4 100644 --- a/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala @@ -27,67 +27,67 @@ class VectorClockSpec extends AkkaSpec { "pass misc comparison test 1" in { val clock1_1 = VectorClock() - val clock2_1 = clock1_1 + Node("1") - val clock3_1 = clock2_1 + Node("2") - val clock4_1 = clock3_1 + Node("1") + val clock2_1 = clock1_1 :+ Node("1") + val clock3_1 = clock2_1 :+ Node("2") + val clock4_1 = clock3_1 :+ Node("1") val clock1_2 = VectorClock() - val clock2_2 = clock1_2 + Node("1") - val clock3_2 = clock2_2 + Node("2") - val clock4_2 = clock3_2 + Node("1") + val clock2_2 = clock1_2 :+ Node("1") + val clock3_2 = clock2_2 :+ Node("2") + val clock4_2 = clock3_2 :+ Node("1") clock4_1 <> clock4_2 must be(false) } "pass misc comparison test 2" in { val clock1_1 = VectorClock() - val clock2_1 = clock1_1 + Node("1") - val clock3_1 = clock2_1 + Node("2") - val clock4_1 = clock3_1 + Node("1") + val clock2_1 = clock1_1 :+ Node("1") + val clock3_1 = clock2_1 :+ Node("2") + val clock4_1 = clock3_1 :+ Node("1") val clock1_2 = VectorClock() - val clock2_2 = clock1_2 + Node("1") - val clock3_2 = clock2_2 + Node("2") - val clock4_2 = clock3_2 + Node("1") - val clock5_2 = clock4_2 + Node("3") + val clock2_2 = clock1_2 :+ Node("1") + val clock3_2 = clock2_2 :+ Node("2") + val clock4_2 = clock3_2 :+ Node("1") + val clock5_2 = clock4_2 :+ Node("3") clock4_1 < clock5_2 must be(true) } "pass misc comparison test 3" in { var clock1_1 = VectorClock() - val clock2_1 = clock1_1 + Node("1") + val clock2_1 = clock1_1 :+ Node("1") val clock1_2 = VectorClock() - val clock2_2 = clock1_2 + Node("2") + val clock2_2 = clock1_2 :+ Node("2") clock2_1 <> clock2_2 must be(true) } "pass misc comparison test 4" in { val clock1_3 = VectorClock() - val clock2_3 = clock1_3 + Node("1") - val clock3_3 = clock2_3 + Node("2") - val clock4_3 = clock3_3 + Node("1") + val clock2_3 = clock1_3 :+ Node("1") + val clock3_3 = clock2_3 :+ Node("2") + val clock4_3 = clock3_3 :+ Node("1") val clock1_4 = VectorClock() - val clock2_4 = clock1_4 + Node("1") - val clock3_4 = clock2_4 + Node("1") - val clock4_4 = clock3_4 + Node("3") + val clock2_4 = clock1_4 :+ Node("1") + val clock3_4 = clock2_4 :+ Node("1") + val clock4_4 = clock3_4 :+ Node("3") clock4_3 <> clock4_4 must be(true) } "pass misc comparison test 5" in { val clock1_1 = VectorClock() - val clock2_1 = clock1_1 + Node("2") - val clock3_1 = clock2_1 + Node("2") + val clock2_1 = clock1_1 :+ Node("2") + val clock3_1 = clock2_1 :+ Node("2") val clock1_2 = VectorClock() - val clock2_2 = clock1_2 + Node("1") - val clock3_2 = clock2_2 + Node("2") - val clock4_2 = clock3_2 + Node("2") - val clock5_2 = clock4_2 + Node("3") + val clock2_2 = clock1_2 :+ Node("1") + val clock3_2 = clock2_2 :+ Node("2") + val clock4_2 = clock3_2 :+ Node("2") + val clock5_2 = clock4_2 :+ Node("3") clock3_1 < clock5_2 must be(true) clock5_2 > clock3_1 must be(true) @@ -95,12 +95,12 @@ class VectorClockSpec extends AkkaSpec { "pass misc comparison test 6" in { val clock1_1 = VectorClock() - val clock2_1 = clock1_1 + Node("1") - val clock3_1 = clock2_1 + Node("2") + val clock2_1 = clock1_1 :+ Node("1") + val clock3_1 = clock2_1 :+ Node("2") val clock1_2 = VectorClock() - val clock2_2 = clock1_2 + Node("1") - val clock3_2 = clock2_2 + Node("1") + val clock2_2 = clock1_2 :+ Node("1") + val clock3_2 = clock2_2 :+ Node("1") clock3_1 <> clock3_2 must be(true) clock3_2 <> clock3_1 must be(true) @@ -108,14 +108,14 @@ class VectorClockSpec extends AkkaSpec { "pass misc comparison test 7" in { val clock1_1 = VectorClock() - val clock2_1 = clock1_1 + Node("1") - val clock3_1 = clock2_1 + Node("2") - val clock4_1 = clock3_1 + Node("2") - val clock5_1 = clock4_1 + Node("3") + val clock2_1 = clock1_1 :+ Node("1") + val clock3_1 = clock2_1 :+ Node("2") + val clock4_1 = clock3_1 :+ Node("2") + val clock5_1 = clock4_1 :+ Node("3") val clock1_2 = VectorClock() - val clock2_2 = clock1_2 + Node("2") - val clock3_2 = clock2_2 + Node("2") + val clock2_2 = clock1_2 :+ Node("2") + val clock3_2 = clock2_2 :+ Node("2") clock5_1 <> clock3_2 must be(true) clock3_2 <> clock5_1 must be(true) @@ -127,14 +127,14 @@ class VectorClockSpec extends AkkaSpec { val node3 = Node("3") val clock1_1 = VectorClock() - val clock2_1 = clock1_1 + node1 - val clock3_1 = clock2_1 + node2 - val clock4_1 = clock3_1 + node2 - val clock5_1 = clock4_1 + node3 + val clock2_1 = clock1_1 :+ node1 + val clock3_1 = clock2_1 :+ node2 + val clock4_1 = clock3_1 :+ node2 + val clock5_1 = clock4_1 :+ node3 val clock1_2 = VectorClock() - val clock2_2 = clock1_2 + node2 - val clock3_2 = clock2_2 + node2 + val clock2_2 = clock1_2 :+ node2 + val clock3_2 = clock2_2 :+ node2 val merged1 = clock3_2 merge clock5_1 merged1.versions.size must be(3) @@ -164,14 +164,14 @@ class VectorClockSpec extends AkkaSpec { val node4 = Node("4") val clock1_1 = VectorClock() - val clock2_1 = clock1_1 + node1 - val clock3_1 = clock2_1 + node2 - val clock4_1 = clock3_1 + node2 - val clock5_1 = clock4_1 + node3 + val clock2_1 = clock1_1 :+ node1 + val clock3_1 = clock2_1 :+ node2 + val clock4_1 = clock3_1 :+ node2 + val clock5_1 = clock4_1 :+ node3 val clock1_2 = VectorClock() - val clock2_2 = clock1_2 + node4 - val clock3_2 = clock2_2 + node4 + val clock2_2 = clock1_2 :+ node4 + val clock3_2 = clock2_2 :+ node4 val merged1 = clock3_2 merge clock5_1 merged1.versions.size must be(4) @@ -204,8 +204,8 @@ class VectorClockSpec extends AkkaSpec { val v1 = VectorClock() val v2 = VectorClock() - val vv1 = v1 + node1 - val vv2 = v2 + node2 + val vv1 = v1 :+ node1 + val vv2 = v2 :+ node2 (vv1 > v1) must equal(true) (vv2 > v2) must equal(true) @@ -225,12 +225,12 @@ class VectorClockSpec extends AkkaSpec { val a = VectorClock() val b = VectorClock() - val a1 = a + node1 - val b1 = b + node2 + val a1 = a :+ node1 + val b1 = b :+ node2 - var a2 = a1 + node1 + var a2 = a1 :+ node1 var c = a2.merge(b1) - var c1 = c + node3 + var c1 = c :+ node3 (c1 > a2) must equal(true) (c1 > b1) must equal(true) @@ -239,7 +239,7 @@ class VectorClockSpec extends AkkaSpec { "An instance of Versioned" must { class TestVersioned(val version: VectorClock = VectorClock()) extends Versioned[TestVersioned] { - def +(node: Node): TestVersioned = new TestVersioned(version + node) + def :+(node: Node): TestVersioned = new TestVersioned(version :+ node) } import Versioned.latestVersionOf @@ -251,67 +251,67 @@ class VectorClockSpec extends AkkaSpec { "happen before an identical versioned with a single additional event" in { val versioned1_1 = new TestVersioned() - val versioned2_1 = versioned1_1 + Node("1") - val versioned3_1 = versioned2_1 + Node("2") - val versioned4_1 = versioned3_1 + Node("1") + val versioned2_1 = versioned1_1 :+ Node("1") + val versioned3_1 = versioned2_1 :+ Node("2") + val versioned4_1 = versioned3_1 :+ Node("1") val versioned1_2 = new TestVersioned() - val versioned2_2 = versioned1_2 + Node("1") - val versioned3_2 = versioned2_2 + Node("2") - val versioned4_2 = versioned3_2 + Node("1") - val versioned5_2 = versioned4_2 + Node("3") + val versioned2_2 = versioned1_2 :+ Node("1") + val versioned3_2 = versioned2_2 :+ Node("2") + val versioned4_2 = versioned3_2 :+ Node("1") + val versioned5_2 = versioned4_2 :+ Node("3") latestVersionOf[TestVersioned](versioned4_1, versioned5_2) must be(versioned5_2) } "pass misc comparison test 1" in { var versioned1_1 = new TestVersioned() - val versioned2_1 = versioned1_1 + Node("1") + val versioned2_1 = versioned1_1 :+ Node("1") val versioned1_2 = new TestVersioned() - val versioned2_2 = versioned1_2 + Node("2") + val versioned2_2 = versioned1_2 :+ Node("2") latestVersionOf[TestVersioned](versioned2_1, versioned2_2) must be(versioned2_2) } "pass misc comparison test 2" in { val versioned1_3 = new TestVersioned() - val versioned2_3 = versioned1_3 + Node("1") - val versioned3_3 = versioned2_3 + Node("2") - val versioned4_3 = versioned3_3 + Node("1") + val versioned2_3 = versioned1_3 :+ Node("1") + val versioned3_3 = versioned2_3 :+ Node("2") + val versioned4_3 = versioned3_3 :+ Node("1") val versioned1_4 = new TestVersioned() - val versioned2_4 = versioned1_4 + Node("1") - val versioned3_4 = versioned2_4 + Node("1") - val versioned4_4 = versioned3_4 + Node("3") + val versioned2_4 = versioned1_4 :+ Node("1") + val versioned3_4 = versioned2_4 :+ Node("1") + val versioned4_4 = versioned3_4 :+ Node("3") latestVersionOf[TestVersioned](versioned4_3, versioned4_4) must be(versioned4_4) } "pass misc comparison test 3" in { val versioned1_1 = new TestVersioned() - val versioned2_1 = versioned1_1 + Node("2") - val versioned3_1 = versioned2_1 + Node("2") + val versioned2_1 = versioned1_1 :+ Node("2") + val versioned3_1 = versioned2_1 :+ Node("2") val versioned1_2 = new TestVersioned() - val versioned2_2 = versioned1_2 + Node("1") - val versioned3_2 = versioned2_2 + Node("2") - val versioned4_2 = versioned3_2 + Node("2") - val versioned5_2 = versioned4_2 + Node("3") + val versioned2_2 = versioned1_2 :+ Node("1") + val versioned3_2 = versioned2_2 :+ Node("2") + val versioned4_2 = versioned3_2 :+ Node("2") + val versioned5_2 = versioned4_2 :+ Node("3") latestVersionOf[TestVersioned](versioned3_1, versioned5_2) must be(versioned5_2) } "pass misc comparison test 4" in { val versioned1_1 = new TestVersioned() - val versioned2_1 = versioned1_1 + Node("1") - val versioned3_1 = versioned2_1 + Node("2") - val versioned4_1 = versioned3_1 + Node("2") - val versioned5_1 = versioned4_1 + Node("3") + val versioned2_1 = versioned1_1 :+ Node("1") + val versioned3_1 = versioned2_1 :+ Node("2") + val versioned4_1 = versioned3_1 :+ Node("2") + val versioned5_1 = versioned4_1 :+ Node("3") val versioned1_2 = new TestVersioned() - val versioned2_2 = versioned1_2 + Node("2") - val versioned3_2 = versioned2_2 + Node("2") + val versioned2_2 = versioned1_2 :+ Node("2") + val versioned3_2 = versioned2_2 :+ Node("2") latestVersionOf[TestVersioned](versioned5_1, versioned3_2) must be(versioned3_2) } From cc79aae1a41a36b3eaa3f2f96a1154e6b6cf9092 Mon Sep 17 00:00:00 2001 From: Gert Vanthienen Date: Tue, 12 Jun 2012 16:08:19 +0200 Subject: [PATCH 362/538] Adding PojoSR tests and a lot of code cleanup --- .../additional/code/osgi/Activator.scala | 2 +- .../akka/osgi/blueprint/aries/akka.xsd | 41 +---- .../akka/osgi/ActorSystemActivator.scala | 1 - .../akka/osgi/OsgiActorSystemFactory.scala | 14 +- .../BlueprintActorSystemFactory.scala | 38 +++-- .../blueprint/aries/NamespaceHandler.scala | 154 +++++++++++------- .../osgi/blueprint/aries/ParserHelper.scala | 16 ++ .../akka/osgi/blueprint/aries/config.xml | 13 ++ .../akka/osgi/blueprint/aries/injection.xml | 13 ++ .../akka/osgi/blueprint/aries/simple.xml | 6 +- akka-osgi/src/test/resources/logback-test.xml | 23 +++ .../akka/osgi/ActorSystemActivatorTest.scala | 56 ++----- .../scala/akka/osgi/PojoSRTestSupport.scala | 150 +++++++++++++++++ .../aries/NamespaceHandlerTest.scala | 94 +++++++++++ .../akka/osgi/test/ActorSystemAwareBean.scala | 11 ++ .../test/scala/akka/osgi/test/PingPong.scala | 22 +++ .../osgi/test/TestActorSystemActivator.scala | 16 ++ project/AkkaBuild.scala | 17 +- 18 files changed, 517 insertions(+), 170 deletions(-) create mode 100644 akka-osgi/src/main/scala/akka/osgi/blueprint/aries/ParserHelper.scala create mode 100644 akka-osgi/src/test/resources/akka/osgi/blueprint/aries/config.xml create mode 100644 akka-osgi/src/test/resources/akka/osgi/blueprint/aries/injection.xml create mode 100644 akka-osgi/src/test/resources/logback-test.xml create mode 100644 akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala create mode 100644 akka-osgi/src/test/scala/akka/osgi/blueprint/aries/NamespaceHandlerTest.scala create mode 100644 akka-osgi/src/test/scala/akka/osgi/test/ActorSystemAwareBean.scala create mode 100644 akka-osgi/src/test/scala/akka/osgi/test/PingPong.scala create mode 100644 akka-osgi/src/test/scala/akka/osgi/test/TestActorSystemActivator.scala diff --git a/akka-docs/additional/code/osgi/Activator.scala b/akka-docs/additional/code/osgi/Activator.scala index 0e3a5c82ee..06a538d242 100644 --- a/akka-docs/additional/code/osgi/Activator.scala +++ b/akka-docs/additional/code/osgi/Activator.scala @@ -1,4 +1,4 @@ -import akka.actor.{Props, ActorSystem} +import akka.actor.{ Props, ActorSystem } import akka.osgi.ActorSystemActivator import org.apache.servicemix.examples.akka.Listener import org.apache.servicemix.examples.akka.Master diff --git a/akka-osgi/src/main/resources/akka/osgi/blueprint/aries/akka.xsd b/akka-osgi/src/main/resources/akka/osgi/blueprint/aries/akka.xsd index 256dff22e9..d7d0f77a2c 100644 --- a/akka-osgi/src/main/resources/akka/osgi/blueprint/aries/akka.xsd +++ b/akka-osgi/src/main/resources/akka/osgi/blueprint/aries/akka.xsd @@ -1,25 +1,4 @@ - - - - - + + - - - - - Defines an Akka Actor - - - - - - + + + + Defines an Akka ActorSystem configuration + + diff --git a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala index c6d24e8262..ef04607976 100644 --- a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala +++ b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala @@ -40,7 +40,6 @@ abstract class ActorSystemActivator(nameFor: (BundleContext) ⇒ String) extends */ def stop(context: BundleContext) { if (system != null) { - system.shutdown() system.shutdown() system = null } diff --git a/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala b/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala index 8c41521964..cddf797d07 100644 --- a/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala +++ b/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala @@ -2,9 +2,9 @@ package akka.osgi import impl.BundleDelegatingClassLoader import org.osgi.framework.BundleContext -import java.util.Properties import akka.actor.ActorSystem import com.typesafe.config.{ ConfigFactory, Config } +import java.util.{ Dictionary, Properties } /** * Factory class to create ActorSystem implementations in an OSGi environment. This mainly involves dealing with @@ -20,16 +20,18 @@ class OsgiActorSystemFactory(val context: BundleContext) { /** * Creates the ActorSystem and registers it in the OSGi Service Registry */ - def createActorSystem(name: String) = { + def createActorSystem(name: String): ActorSystem = createActorSystem(Option(name)) + + def createActorSystem(name: Option[String]): ActorSystem = { val system = ActorSystem(actorSystemName(name), actorSystemConfig(context), classloader) registerService(system) system } def registerService(system: ActorSystem) { - val properties = new Properties(); + val properties = new Properties() properties.put("name", system.name) - context.registerService(classOf[ActorSystem].getName, system, properties) + context.registerService(classOf[ActorSystem].getName, system, properties.asInstanceOf[Dictionary[String, Any]]) } /** @@ -44,8 +46,8 @@ class OsgiActorSystemFactory(val context: BundleContext) { /** * Determine a the ActorSystem name */ - def actorSystemName(name: String): String = - Option(name).getOrElse("bundle-%s-ActorSystem".format(context.getBundle().getBundleId)) + def actorSystemName(name: Option[String]): String = + name.getOrElse("bundle-%s-ActorSystem".format(context.getBundle().getBundleId)) } diff --git a/akka-osgi/src/main/scala/akka/osgi/blueprint/BlueprintActorSystemFactory.scala b/akka-osgi/src/main/scala/akka/osgi/blueprint/BlueprintActorSystemFactory.scala index 92e7e8a099..51c3e7291f 100644 --- a/akka-osgi/src/main/scala/akka/osgi/blueprint/BlueprintActorSystemFactory.scala +++ b/akka-osgi/src/main/scala/akka/osgi/blueprint/BlueprintActorSystemFactory.scala @@ -4,31 +4,39 @@ import org.osgi.framework.BundleContext import akka.osgi.OsgiActorSystemFactory import collection.mutable.Buffer import akka.actor.{ Actor, Props, ActorSystem } +import com.typesafe.config.ConfigFactory /** * A set of helper/factory classes to build a Akka system using Blueprint */ class BlueprintActorSystemFactory(context: BundleContext, name: String) extends OsgiActorSystemFactory(context) { - val systems: Buffer[ActorSystem] = Buffer() + var config: Option[String] = None - def this(context: BundleContext) = this(context, null) + lazy val system = super.createActorSystem(stringToOption(name)) - def create: ActorSystem = create(null) - def create(name: String): ActorSystem = { - val system = super.createActorSystem(name) - systems += system - system + def setConfig(config: String) = { this.config = Some(config) } + + def create = system + + def destroy = system.shutdown() + + def stringToOption(original: String) = if (original == null || original.isEmpty) { + None + } else { + Some(original) } - def destroy = for (system ← systems) { - system.shutdown() + /** + * Strategy method to create the Config for the ActorSystem, ensuring that the default/reference configuration is + * loaded from the akka-actor bundle. + */ + override def actorSystemConfig(context: BundleContext) = { + config match { + case Some(value) ⇒ ConfigFactory.parseString(value).withFallback(super.actorSystemConfig(context)) + case None ⇒ super.actorSystemConfig(context) + } + } } -class BlueprintActorSystem(context: BundleContext, system: ActorSystem) { - - def createActor(name: String) = system.actorOf(Props(context.getBundle.loadClass(name).asInstanceOf[Class[Actor]])) - -} - diff --git a/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/NamespaceHandler.scala b/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/NamespaceHandler.scala index b1412eae91..245ea538b6 100644 --- a/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/NamespaceHandler.scala +++ b/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/NamespaceHandler.scala @@ -9,92 +9,122 @@ import org.osgi.framework.BundleContext import org.apache.aries.blueprint.reflect.{ ValueMetadataImpl, RefMetadataImpl, BeanArgumentImpl } import org.w3c.dom.{ NodeList, Element, Node } import org.osgi.service.blueprint.reflect.{ BeanMetadata, ComponentMetadata } -import akka.actor.{ ActorRef, ActorSystem } -import akka.osgi.blueprint.{ BlueprintActorSystem, BlueprintActorSystemFactory } +import akka.actor.{ ActorSystem } +import akka.osgi.blueprint.{ BlueprintActorSystemFactory } +import java.util.concurrent.atomic.AtomicInteger + +import ParserHelper.childElements /** * Aries Blueprint namespace handler implementation */ class NamespaceHandler extends org.apache.aries.blueprint.NamespaceHandler { - val CLASS_ATTRIBUTE = "class"; - val ID_ATTRIBUTE = "id"; - val NAME_ATTRIBUTE = "name"; + import NamespaceHandler._ - var idCounter = 1 + val idCounter = new AtomicInteger(0) def getSchemaLocation(namespace: String) = getClass().getResource("akka.xsd") def getManagedClasses = setAsJavaSet(Set(classOf[BlueprintActorSystemFactory])) - def parse(element: Element, context: ParserContext) = { - val factory = context.createMetadata(classOf[MutableBeanMetadata]) - factory.setId(getId(context, element)) - factory.setScope(BeanMetadata.SCOPE_SINGLETON) - factory.setProcessor(true) - factory.setClassName(classOf[BlueprintActorSystemFactory].getName) - factory.setDestroyMethod("destroy") - factory.addArgument(new BeanArgumentImpl(new RefMetadataImpl("blueprintBundleContext"), classOf[BundleContext].getName, -1)) - - val system = context.createMetadata(classOf[MutableBeanMetadata]) - system.setId(getId(context, element)) - system.setFactoryComponent(factory) - system.setFactoryMethod("create") - system.setRuntimeClass(classOf[ActorSystem]) - if (element.hasAttribute(NAME_ATTRIBUTE)) { - system.addArgument(new BeanArgumentImpl(new ValueMetadataImpl(element.getAttribute(NAME_ATTRIBUTE)), classOf[String].getName, -1)) - } - - val actorsystem = context.createMetadata(classOf[MutableBeanMetadata]) - actorsystem.setId(getId(context, element)) - actorsystem.setClassName(classOf[BlueprintActorSystem].getName) - actorsystem.addArgument(new BeanArgumentImpl(new RefMetadataImpl("blueprintBundleContext"), classOf[BundleContext].getName, -1)) - actorsystem.addArgument(new BeanArgumentImpl(system, classOf[ActorSystem].getName, -1)) - context.getComponentDefinitionRegistry.registerComponentDefinition(actorsystem) - - val nodelist = element.getChildNodes - var i = 0 - while (i < nodelist.getLength) { - val node = nodelist.item(i) - node.getLocalName match { - case "actor" if node.isInstanceOf[Element] ⇒ parseActor(node.asInstanceOf[Element], context, actorsystem) - case _ ⇒ - } - i += 1 - } - factory - } - - def parseActor(node: Element, context: ParserContext, actorsystem: MutableBeanMetadata) = { - val actor = context.createMetadata(classOf[MutableBeanMetadata]) - actor.setFactoryComponent(actorsystem) - if (node.hasAttribute(CLASS_ATTRIBUTE)) { - actor.addArgument(new BeanArgumentImpl(new ValueMetadataImpl(node.getAttribute(CLASS_ATTRIBUTE)), classOf[String].getName, -1)) - } - actor.setId(getId(context, node)) - actor.setFactoryMethod("createActor") - // actor.setRuntimeClass(classOf[ActorRef]) - context.getComponentDefinitionRegistry.registerComponentDefinition(actor) + def parse(element: Element, context: ParserContext) = element.getLocalName match { + case ACTORSYSTEM_ELEMENT_NAME ⇒ parseActorSystem(element, context) + case _ ⇒ throw new ComponentDefinitionException("Unexpected element for Akka namespace: %s".format(element)) } def decorate(node: Node, component: ComponentMetadata, context: ParserContext) = throw new ComponentDefinitionException("Bad xml syntax: node decoration is not supported"); + /* + * Parse + */ + def parseActorSystem(element: Element, context: ParserContext) = { + val factory = createFactoryBean(context, element.getAttribute(NAME_ATTRIBUTE)) + + for (child ← childElements(element)) { + child.getLocalName match { + case CONFIG_ELEMENT_NAME ⇒ parseConfig(child, context, factory) + case _ ⇒ throw new ComponentDefinitionException("Unexpected child element %s found in %s".format(child, element)) + } + } + + createActorSystemBean(context, element, factory) + } + + /* + * Parse + */ + def parseConfig(node: Element, context: ParserContext, factory: MutableBeanMetadata) = { + factory.addProperty("config", new ValueMetadataImpl(node.getTextContent)) + } + + /* + * Create the bean definition for the ActorSystem + */ + def createActorSystemBean(context: ParserContext, element: Element, factory: MutableBeanMetadata): MutableBeanMetadata = { + val system = context.createMetadata(classOf[MutableBeanMetadata]) + system.setId(getId(context, element)) + system.setFactoryComponent(factory) + + system.setFactoryMethod(FACTORY_METHOD_NAME) + system.setRuntimeClass(classOf[ActorSystem]) + system + } + + /* + * Create the bean definition for the BlueprintActorSystemFactory + */ + def createFactoryBean(context: ParserContext, name: String): MutableBeanMetadata = { + val factory = context.createMetadata(classOf[MutableBeanMetadata]) + factory.setId(findAvailableId(context)) + factory.setScope(BeanMetadata.SCOPE_SINGLETON) + factory.setProcessor(true) + factory.setClassName(classOf[BlueprintActorSystemFactory].getName) + + factory.setDestroyMethod(DESTROY_METHOD_NAME) + + factory.addArgument(new BeanArgumentImpl(new RefMetadataImpl(BUNDLE_CONTEXT_REFID), classOf[BundleContext].getName, -1)) + factory.addArgument(new BeanArgumentImpl(new ValueMetadataImpl(name), classOf[String].getName, -1)) + factory.setProcessor(true) + context.getComponentDefinitionRegistry.registerComponentDefinition(factory) + factory + } + + /* + * Get the assigned id or generate a suitable id + */ def getId(context: ParserContext, element: Element) = { if (element.hasAttribute(ID_ATTRIBUTE)) { element.getAttribute(ID_ATTRIBUTE); } else { - generateId(context); + findAvailableId(context); } } - def generateId(context: ParserContext): String = { - var id = ""; - do { - idCounter += 1 - id = ".akka-" + idCounter; - } while (context.getComponentDefinitionRegistry().containsComponentDefinition(id)); - id; + /* + * Find the next available component id + */ + def findAvailableId(context: ParserContext): String = { + val id = ".akka-" + idCounter.incrementAndGet() + if (context.getComponentDefinitionRegistry.containsComponentDefinition(id)) { + // id already exists, let's try the next one + findAvailableId(context) + } else id } +} + +object NamespaceHandler { + + private val ID_ATTRIBUTE = "id"; + private val NAME_ATTRIBUTE = "name"; + + private val BUNDLE_CONTEXT_REFID = "blueprintBundleContext" + + private val ACTORSYSTEM_ELEMENT_NAME = "actor-system" + private val CONFIG_ELEMENT_NAME = "config" + + private val DESTROY_METHOD_NAME = "destroy" + private val FACTORY_METHOD_NAME = "create" } diff --git a/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/ParserHelper.scala b/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/ParserHelper.scala new file mode 100644 index 0000000000..82fb7bc113 --- /dev/null +++ b/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/ParserHelper.scala @@ -0,0 +1,16 @@ +package akka.osgi.blueprint.aries + +import org.w3c.dom.{ Node, Element } + +/** + * Helper class to deal with the W3C DOM types + */ +object ParserHelper { + + def childElements(element: Element) = children(element).filter(_.getNodeType == Node.ELEMENT_NODE).asInstanceOf[Seq[Element]] + + private[this] def children(element: Element) = { + val nodelist = element.getChildNodes + for (index ← 0 until nodelist.getLength) yield nodelist.item(index) + } +} diff --git a/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/config.xml b/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/config.xml new file mode 100644 index 0000000000..6bd3d49c9d --- /dev/null +++ b/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/config.xml @@ -0,0 +1,13 @@ + + + + + + some.config { + key=value + } + + + + diff --git a/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/injection.xml b/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/injection.xml new file mode 100644 index 0000000000..9712ee6d1f --- /dev/null +++ b/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/injection.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + diff --git a/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/simple.xml b/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/simple.xml index d276ee86a0..a46834f74b 100644 --- a/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/simple.xml +++ b/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/simple.xml @@ -1,9 +1,7 @@ + xmlns:akka="http://akka.io/xmlns/blueprint/v1.0.0"> - - - + diff --git a/akka-osgi/src/test/resources/logback-test.xml b/akka-osgi/src/test/resources/logback-test.xml new file mode 100644 index 0000000000..9c441a6fb6 --- /dev/null +++ b/akka-osgi/src/test/resources/logback-test.xml @@ -0,0 +1,23 @@ + + + + + + + %date{ISO8601} %-5level %logger %X{akkaSource} %X{sourceThread} - %msg%n + + + + + target/akka-osgi.log + true + + %date{ISO8601} %-5level %logger %X{akkaSource} %X{sourceThread} - %msg%n + + + + + + + + diff --git a/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala b/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala index 34472e3537..0b2fdd19ac 100644 --- a/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala +++ b/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala @@ -1,53 +1,28 @@ package akka.osgi -import java.util.{ ServiceLoader, HashMap } -import de.kalpatec.pojosr.framework.launch.{ ClasspathScanner, PojoServiceRegistryFactory } import org.scalatest.FlatSpec -import org.osgi.framework.BundleContext -import akka.actor.{ Actor, Props, ActorSystem } +import akka.actor.ActorSystem import akka.pattern.ask import akka.dispatch.Await import akka.util.duration._ import akka.util.Timeout +import de.kalpatec.pojosr.framework.launch.BundleDescriptor +import test.TestActorSystemActivator +import test.PingPong._ +import PojoSRTestSupport.bundle /** * Test cases for {@link ActorSystemActivator} */ -class ActorSystemActivatorTest extends FlatSpec { +class ActorSystemActivatorTest extends FlatSpec with PojoSRTestSupport { - abstract class TestMessage + val TEST_BUNDLE_NAME = "akka.osgi.test.activator" - case object Ping extends TestMessage - case object Pong extends TestMessage + val testBundles: Seq[BundleDescriptor] = Seq( + bundle(TEST_BUNDLE_NAME).withActivator(classOf[TestActorSystemActivator])) - class PongActor extends Actor { - def receive = { - case Ping ⇒ - sender ! Pong - } - } - - lazy val context: BundleContext = { - val config = new HashMap[String, AnyRef](); - val loader = ServiceLoader.load(classOf[PojoServiceRegistryFactory]); - val registry = loader.iterator().next().newPojoServiceRegistry(config); - registry.getBundleContext - } - - val activator = new ActorSystemActivator { - def configure(system: ActorSystem) { - system.actorOf(Props(new PongActor), name = "pong") - } - } - - "ActorSystemActivator" should "start and register the ActorSystem on start" in { - - activator.start(context) - - val reference = context.getServiceReference(classOf[ActorSystem].getName) - assert(reference != null) - - val system = context.getService(reference).asInstanceOf[ActorSystem] + "ActorSystemActivator" should "start and register the ActorSystem when bundle starts" in { + val system = serviceForType[ActorSystem] val actor = system.actorFor("/user/pong") implicit val timeout = Timeout(5 seconds) @@ -56,14 +31,11 @@ class ActorSystemActivatorTest extends FlatSpec { assert(result != null) } - it should "stop the ActorSystem on bundle stop" in { - val reference = context.getServiceReference(classOf[ActorSystem].getName) - assert(reference != null) - - val system = context.getService(reference).asInstanceOf[ActorSystem] + it should "stop the ActorSystem when bundle stops" in { + val system = serviceForType[ActorSystem] assert(!system.isTerminated) - activator.stop(context) + bundleForName(TEST_BUNDLE_NAME).stop() system.awaitTermination() assert(system.isTerminated) diff --git a/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala b/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala new file mode 100644 index 0000000000..cbed282c18 --- /dev/null +++ b/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala @@ -0,0 +1,150 @@ +package akka.osgi + +import de.kalpatec.pojosr.framework.launch.{ BundleDescriptor, PojoServiceRegistryFactory, ClasspathScanner } + +import scala.collection.JavaConversions.seqAsJavaList +import scala.collection.JavaConversions.collectionAsScalaIterable +import org.apache.commons.io.IOUtils.copy + +import org.osgi.framework._ +import java.net.URL + +import java.util.jar.JarInputStream +import java.io.{ FileInputStream, FileOutputStream, File } +import java.util.{ Date, ServiceLoader, HashMap } +import org.scalatest.{ BeforeAndAfterAll, Suite } + +/** + * Trait that provides support for building akka-osgi tests using PojoSR + */ +trait PojoSRTestSupport extends Suite with BeforeAndAfterAll { + + val MAX_WAIT_TIME = 8000; + val START_WAIT_TIME = 100; + + implicit def buildBundleDescriptor(builder: BundleDescriptorBuilder) = builder.build + + /** + * All bundles being found on the test classpath are automatically installed and started in the PojoSR runtime. + * Implement this to define the extra bundles that should be available for testing. + */ + val testBundles: Seq[BundleDescriptor] + + lazy val context: BundleContext = { + val config = new HashMap[String, AnyRef](); + System.setProperty("org.osgi.framework.storage", "target/akka-osgi/" + System.currentTimeMillis) + + val bundles = new ClasspathScanner().scanForBundles() + bundles.addAll(testBundles) + config.put(PojoServiceRegistryFactory.BUNDLE_DESCRIPTORS, bundles); + + val loader: ServiceLoader[PojoServiceRegistryFactory] = ServiceLoader.load(classOf[PojoServiceRegistryFactory]) + + val registry = loader.iterator.next.newPojoServiceRegistry(config) + registry.getBundleContext + } + + // Ensure bundles get stopped at the end of the test to release resources and stop threads + override protected def afterAll() = context.getBundles.foreach(_.stop) + + /** + * Convenience method to find a bundle by symbolic name + */ + def bundleForName(name: String) = context.getBundles.find(_.getSymbolicName == name) match { + case Some(bundle) ⇒ bundle + case None ⇒ fail("Unable to find bundle with symbolic name %s".format(name)) + } + + /** + * Convenience method to find a service by interface. If the service is not already available in the OSGi Service + * Registry, this method will wait for a few seconds for the service to appear. + */ + def serviceForType[T](implicit manifest: Manifest[T]): T = { + val reference = awaitReference(manifest.erasure) + context.getService(reference).asInstanceOf[T] + } + + def awaitReference(serviceType: Class[_]): ServiceReference = awaitReference(serviceType, START_WAIT_TIME) + + def awaitReference(serviceType: Class[_], wait: Long): ServiceReference = { + val option = Option(context.getServiceReference(serviceType.getName)) + option match { + case Some(reference) ⇒ reference; + case None if (wait > MAX_WAIT_TIME) ⇒ fail("Gave up waiting for service of type %s".format(serviceType)) + case None ⇒ { + Thread.sleep(wait); + awaitReference(serviceType, wait * 2); + } + } + } +} + +object PojoSRTestSupport { + + /** + * Convenience method to define additional test bundles + */ + def bundle(name: String) = new BundleDescriptorBuilder(name) + +} + +/** + * Helper class to make it easier to define test bundles + */ +class BundleDescriptorBuilder(name: String) { + + import org.ops4j.pax.tinybundles.core.TinyBundles + + val tinybundle = TinyBundles.bundle.set(Constants.BUNDLE_SYMBOLICNAME, name) + + def withBlueprintFile(name: String, contents: URL) = + returnBuilder(tinybundle.add("OSGI-INF/blueprint/%s".format(name), contents)) + + def withBlueprintFile(contents: URL): BundleDescriptorBuilder = withBlueprintFile(filename(contents), contents) + + def withActivator(activator: Class[_ <: BundleActivator]) = + returnBuilder(tinybundle.set(Constants.BUNDLE_ACTIVATOR, activator.getName)) + + def returnBuilder(block: ⇒ Unit) = { + block + this + } + + def build = { + val file: File = tinybundleToJarFile(name) + + new BundleDescriptor( + getClass().getClassLoader(), + new URL("jar:" + file.toURI().toString() + "!/"), + extractHeaders(file)); + } + + def extractHeaders(file: File): HashMap[String, String] = { + val headers = new HashMap[String, String](); + + val jis = new JarInputStream(new FileInputStream(file)); + try { + for (entry ← jis.getManifest().getMainAttributes().entrySet()) { + headers.put(entry.getKey().toString(), entry.getValue().toString()); + } + } finally { + jis.close() + } + + headers + } + + def tinybundleToJarFile(name: String): File = { + val file = new File("target/%s-%tQ.jar".format(name, new Date())); + val fos = new FileOutputStream(file); + try { + copy(tinybundle.build(), fos); + } finally { + fos.close(); + } + file + } + + private[this] def filename(url: URL) = url.getFile.split("/").last +} + diff --git a/akka-osgi/src/test/scala/akka/osgi/blueprint/aries/NamespaceHandlerTest.scala b/akka-osgi/src/test/scala/akka/osgi/blueprint/aries/NamespaceHandlerTest.scala new file mode 100644 index 0000000000..bbe26b5e92 --- /dev/null +++ b/akka-osgi/src/test/scala/akka/osgi/blueprint/aries/NamespaceHandlerTest.scala @@ -0,0 +1,94 @@ +package akka.osgi.blueprint.aries + +import org.scalatest.FlatSpec +import akka.actor.ActorSystem +import de.kalpatec.pojosr.framework.launch.BundleDescriptor +import akka.osgi.PojoSRTestSupport +import akka.osgi.PojoSRTestSupport.bundle +import akka.osgi.test.ActorSystemAwareBean + +/** + * Test cases for {@link ActorSystemActivator} + */ +object NamespaceHandlerTest { + + /* + * Bundle-SymbolicName to easily find our test bundle + */ + val TEST_BUNDLE_NAME = "akka.osgi.test.aries.namespace" + + /* + * Bundle descriptor representing the akka-osgi bundle itself + */ + val AKKA_OSGI_BLUEPRINT = + bundle("akka-osgi").withBlueprintFile(getClass.getResource("/OSGI-INF/blueprint/akka-namespacehandler.xml")) + +} + +class SimpleNamespaceHandlerTest extends FlatSpec with PojoSRTestSupport { + + import NamespaceHandlerTest._ + + val testBundles: Seq[BundleDescriptor] = Seq( + AKKA_OSGI_BLUEPRINT, + bundle(TEST_BUNDLE_NAME).withBlueprintFile(getClass.getResource("simple.xml"))) + + "simple.xml" should "set up ActorSystem when bundle starts" in { + val system = serviceForType[ActorSystem] + assert(system != null) + } + + it should "stop the ActorSystem when bundle stops" in { + val system = serviceForType[ActorSystem] + assert(!system.isTerminated) + + bundleForName(TEST_BUNDLE_NAME).stop() + + system.awaitTermination() + assert(system.isTerminated) + } + +} + +class ConfigNamespaceHandlerTest extends FlatSpec with PojoSRTestSupport { + + import NamespaceHandlerTest._ + + val testBundles: Seq[BundleDescriptor] = Seq( + AKKA_OSGI_BLUEPRINT, + bundle(TEST_BUNDLE_NAME).withBlueprintFile(getClass.getResource("config.xml"))) + + "config.xml" should "set up ActorSystem when bundle starts" in { + val system = serviceForType[ActorSystem] + assert(system != null) + + assert(system.settings.config.getString("some.config.key") == "value") + } + + it should "stop the ActorSystem when bundle stops" in { + val system = serviceForType[ActorSystem] + assert(!system.isTerminated) + + bundleForName(TEST_BUNDLE_NAME).stop() + + system.awaitTermination() + assert(system.isTerminated) + } + +} + +class DependencyInjectionNamespaceHandlerTest extends FlatSpec with PojoSRTestSupport { + + import NamespaceHandlerTest._ + + val testBundles: Seq[BundleDescriptor] = Seq( + AKKA_OSGI_BLUEPRINT, + bundle(TEST_BUNDLE_NAME).withBlueprintFile(getClass.getResource("injection.xml"))) + + "injection.xml" should "set up bean containing ActorSystem" in { + val bean = serviceForType[ActorSystemAwareBean] + assert(bean != null) + assert(bean.system != null) + } + +} diff --git a/akka-osgi/src/test/scala/akka/osgi/test/ActorSystemAwareBean.scala b/akka-osgi/src/test/scala/akka/osgi/test/ActorSystemAwareBean.scala new file mode 100644 index 0000000000..ca0df7cc04 --- /dev/null +++ b/akka-osgi/src/test/scala/akka/osgi/test/ActorSystemAwareBean.scala @@ -0,0 +1,11 @@ +package akka.osgi.test + +import akka.actor.ActorSystem + +/** + * Just a simple POJO that can contain an actor system. + * Used for testing dependency injection with Blueprint + */ +class ActorSystemAwareBean(val system: ActorSystem) { + +} diff --git a/akka-osgi/src/test/scala/akka/osgi/test/PingPong.scala b/akka-osgi/src/test/scala/akka/osgi/test/PingPong.scala new file mode 100644 index 0000000000..6a7409c667 --- /dev/null +++ b/akka-osgi/src/test/scala/akka/osgi/test/PingPong.scala @@ -0,0 +1,22 @@ +package akka.osgi.test + +import akka.actor.Actor + +/** + * Simple ping-pong actor, used for testing + */ +object PingPong { + + abstract class TestMessage + + case object Ping extends TestMessage + case object Pong extends TestMessage + + class PongActor extends Actor { + def receive = { + case Ping ⇒ + sender ! Pong + } + } + +} diff --git a/akka-osgi/src/test/scala/akka/osgi/test/TestActorSystemActivator.scala b/akka-osgi/src/test/scala/akka/osgi/test/TestActorSystemActivator.scala new file mode 100644 index 0000000000..2a44e91e4a --- /dev/null +++ b/akka-osgi/src/test/scala/akka/osgi/test/TestActorSystemActivator.scala @@ -0,0 +1,16 @@ +package akka.osgi.test + +import akka.osgi.ActorSystemActivator +import akka.actor.{ Props, ActorSystem } +import PingPong._ + +/** + * Sample ActorSystemActivator implementation used for testing purposes + */ +class TestActorSystemActivator extends ActorSystemActivator { + + def configure(system: ActorSystem) { + system.actorOf(Props(new PongActor), name = "pong") + } + +} diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 79a2aeb694..7b3f5bf65c 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -480,7 +480,7 @@ object Dependencies { val camel = Seq(camelCore, Test.scalatest, Test.junit, Test.mockito) - val osgi = Seq(osgiCore, ariesBlueprint, Runtime.logback, Test.pojosr, Test.tinybundles, Test.scalatest, Test.junit) + val osgi = Seq(osgiCore, ariesBlueprint, Runtime.logback, Test.ariesProxy, Test.commonsIo, Test.pojosr, Test.tinybundles, Test.scalatest, Test.junit) val tutorials = Seq(Test.scalatest, Test.junit) @@ -505,7 +505,7 @@ object Dependency { } // Compile - val ariesBlueprint = "org.apache.aries.blueprint" % "org.apache.aries.blueprint" % "0.3.1" // ApacheV2 + val ariesBlueprint = "org.apache.aries.blueprint" % "org.apache.aries.blueprint" % "0.3.2" // ApacheV2 val config = "com.typesafe" % "config" % "0.4.1" // ApacheV2 val camelCore = "org.apache.camel" % "camel-core" % V.Camel // ApacheV2 val netty = "io.netty" % "netty" % V.Netty // ApacheV2 @@ -518,12 +518,13 @@ object Dependency { // Test object Test { + val ariesProxy = "org.apache.aries.proxy" % "org.apache.aries.proxy.impl" % "0.3" % "test" // ApacheV2 val commonsMath = "org.apache.commons" % "commons-math" % "2.1" % "test" // ApacheV2 - val commonsIo = "commons-io" % "commons-io" % "2.0.1" % "test"// ApacheV2 + val commonsIo = "commons-io" % "commons-io" % "2.0.1" % "test"// ApacheV2 val junit = "junit" % "junit" % "4.5" % "test" // Common Public License 1.0 val logback = "ch.qos.logback" % "logback-classic" % V.Logback % "test" // EPL 1.0 / LGPL 2.1 val mockito = "org.mockito" % "mockito-all" % "1.8.1" % "test" // MIT - val pojosr = "com.googlecode.pojosr" % "de.kalpatec.pojosr.framework" % "0.1.8" % "test" // ApacheV2 + val pojosr = "com.googlecode.pojosr" % "de.kalpatec.pojosr.framework" % "0.1.4" % "test" // ApacheV2 val scalatest = "org.scalatest" % "scalatest_2.9.1" % V.Scalatest % "test" // ApacheV2 val scalacheck = "org.scala-tools.testing" % "scalacheck_2.9.1" % "1.9" % "test" // New BSD val specs2 = "org.specs2" % "specs2_2.9.1" % "1.9" % "test" // Modified BSD / ApacheV2 @@ -548,7 +549,10 @@ object OSGi { val mailboxesCommon = exports(Seq("akka.actor.mailbox.*")) - val osgi = exports(Seq("akka.osgi.*")) + val osgi = exports(Seq("akka.osgi.*")) ++ Seq( + OsgiKeys.importPackage := Seq("org.apache.aries.blueprint.*;resolution:=optional", + "org.osgi.service.blueprint.*;resolution:=optional") ++ defaultImports + ) val remote = exports(Seq("akka.remote.*", "akka.routing.*", "akka.serialization.*")) @@ -559,10 +563,11 @@ object OSGi { val zeroMQ = exports(Seq("akka.zeromq.*")) def exports(packages: Seq[String]) = osgiSettings ++ Seq( - OsgiKeys.importPackage := Seq("!sun.misc", akkaImport(), configImport(), scalaImport(), "*"), + OsgiKeys.importPackage := defaultImports, OsgiKeys.exportPackage := packages ) + def defaultImports = Seq("!sun.misc", akkaImport(), configImport(), scalaImport(), "*") def akkaImport(packageName: String = "akka.*") = "%s;version=\"[2.1,2.2)\"".format(packageName) def configImport(packageName: String = "com.typesafe.config.*") = "%s;version=\"[0.4.1,0.5)\"".format(packageName) def scalaImport(packageName: String = "scala.*") = "%s;version=\"[2.9.2,2.10)\"".format(packageName) From cfbf13ec2a042760cb35c21dc8972550c90eff2b Mon Sep 17 00:00:00 2001 From: Gert Vanthienen Date: Tue, 12 Jun 2012 16:22:21 +0200 Subject: [PATCH 363/538] Using Test.logback instead of Runtime.logback --- project/AkkaBuild.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 7b3f5bf65c..7b2b7846e9 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -480,7 +480,7 @@ object Dependencies { val camel = Seq(camelCore, Test.scalatest, Test.junit, Test.mockito) - val osgi = Seq(osgiCore, ariesBlueprint, Runtime.logback, Test.ariesProxy, Test.commonsIo, Test.pojosr, Test.tinybundles, Test.scalatest, Test.junit) + val osgi = Seq(osgiCore, ariesBlueprint, Test.logback, Test.ariesProxy, Test.commonsIo, Test.pojosr, Test.tinybundles, Test.scalatest, Test.junit) val tutorials = Seq(Test.scalatest, Test.junit) From f33c45090dfd03717148f15f03edaf72a7a3e2bd Mon Sep 17 00:00:00 2001 From: Gert Vanthienen Date: Tue, 12 Jun 2012 16:57:25 +0200 Subject: [PATCH 364/538] Update docs and use Option[String] in the activator instead of null --- akka-docs/additional/code/osgi/blueprint.xml | 12 ++++++++---- .../main/scala/akka/osgi/ActorSystemActivator.scala | 6 +++--- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/akka-docs/additional/code/osgi/blueprint.xml b/akka-docs/additional/code/osgi/blueprint.xml index f817da85b0..8fcedb990c 100644 --- a/akka-docs/additional/code/osgi/blueprint.xml +++ b/akka-docs/additional/code/osgi/blueprint.xml @@ -2,9 +2,13 @@ - - - - + + + + some.config { + key=value + } + + diff --git a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala index ef04607976..7f60aebccc 100644 --- a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala +++ b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala @@ -9,10 +9,10 @@ import java.util.Properties * Abstract {@link BundleActivator} implementation to bootstrap and configure an {@link ActorSystem} in an * OSGi environment. */ -abstract class ActorSystemActivator(nameFor: (BundleContext) ⇒ String) extends BundleActivator { +abstract class ActorSystemActivator(nameFor: (BundleContext) ⇒ Option[String]) extends BundleActivator { - def this() = this({ context: BundleContext ⇒ null }) - def this(name: String) = this({ context: BundleContext ⇒ name }) + def this() = this({ context: BundleContext ⇒ None }) + def this(name: String) = this({ context: BundleContext ⇒ Some(name) }) var system: ActorSystem = null From 0c21bb9170807cfdbf53966512865da874affd77 Mon Sep 17 00:00:00 2001 From: Szabolcs Berecz Date: Tue, 12 Jun 2012 21:49:55 +0200 Subject: [PATCH 365/538] #2156 - sbt plugin: project dependencies should be included automatically --- akka-sbt-plugin/src/main/scala/AkkaKernelPlugin.scala | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/akka-sbt-plugin/src/main/scala/AkkaKernelPlugin.scala b/akka-sbt-plugin/src/main/scala/AkkaKernelPlugin.scala index 08826fa5dd..835a596a4a 100644 --- a/akka-sbt-plugin/src/main/scala/AkkaKernelPlugin.scala +++ b/akka-sbt-plugin/src/main/scala/AkkaKernelPlugin.scala @@ -75,7 +75,9 @@ object AkkaKernelPlugin extends Plugin { copyFiles(libFiles(cp, conf.libFilter), distLibPath) copyFiles(conf.additionalLibs, distLibPath) - for (subTarget ← subProjectDependencies.map(_.target)) { + for (subProjectDependency ← subProjectDependencies) { + val subTarget = subProjectDependency.target + EvaluateTask(buildStruct, packageBin in Compile, st, subProjectDependency.projectRef) copyJars(subTarget, distLibPath) } log.info("Distribution created.") @@ -220,10 +222,10 @@ object AkkaKernelPlugin extends Plugin { }.toList val target = setting(Keys.crossTarget, "Missing crossTarget directory") - SubProjectInfo(project.id, target, subProjects) + SubProjectInfo(projectRef, target, subProjects) } - private case class SubProjectInfo(id: String, target: File, subProjects: Seq[SubProjectInfo]) { + private case class SubProjectInfo(projectRef: ProjectRef, target: File, subProjects: Seq[SubProjectInfo]) { def recursiveSubProjects: Set[SubProjectInfo] = { val flatSubProjects = for { From 42c5281d5a927882106e478b73dfb1aaea63cdf0 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 13 Jun 2012 09:37:10 +0200 Subject: [PATCH 366/538] Correct? implementation of merge and other actions, see #2077 * Merge unreachable using highestPriorityOf * Avoid merge result in node existing in both members and unreachable * Fix joining only allowed when !alreadyMember && !isUnreachable (non Down) * Fix filter bug of unreachable in downing and leaderActions * Minor cleanups --- .../src/main/scala/akka/cluster/Cluster.scala | 143 ++++++++---------- .../test/scala/akka/cluster/GossipSpec.scala | 53 +++++-- 2 files changed, 104 insertions(+), 92 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 571a8eaf68..9f241b684d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -220,25 +220,30 @@ case class Gossip( // 1. merge vector clocks val mergedVClock = this.version merge that.version - // 2. group all members by Address => Seq[Member] - val membersGroupedByAddress = (this.members.toSeq ++ that.members.toSeq).groupBy(_.address) - - // 3. merge members by selecting the single Member with highest MemberStatus out of the Member groups - val mergedMembers = - Gossip.emptyMembers ++ - membersGroupedByAddress.values.foldLeft(Vector.empty[Member]) { (acc, members) ⇒ - acc :+ members.reduceLeft(Member.highestPriorityOf(_, _)) - } - - // 4. merge meta-data + // 2. merge meta-data val mergedMeta = this.meta ++ that.meta - // 5. merge gossip overview - val mergedOverview = GossipOverview( - this.overview.seen ++ that.overview.seen, - this.overview.unreachable ++ that.overview.unreachable) + def reduceHighestPriority(a: Seq[Member], b: Seq[Member]): Set[Member] = { + // group all members by Address => Seq[Member] + val groupedByAddress = (a ++ b).groupBy(_.address) + // pick highest MemberStatus + (groupedByAddress.values.foldLeft(Vector.empty[Member]) { (acc, members) ⇒ + acc :+ members.reduceLeft(Member.highestPriorityOf(_, _)) + }).toSet + } - Gossip(mergedOverview, mergedMembers, mergedMeta, mergedVClock) + // 3. merge unreachable by selecting the single Member with highest MemberStatus out of the Member groups + val mergedUnreachable = reduceHighestPriority(this.overview.unreachable.toSeq, that.overview.unreachable.toSeq) + + // 4. merge members by selecting the single Member with highest MemberStatus out of the Member groups, + // and exclude unreachable + val mergedMembers = Gossip.emptyMembers ++ reduceHighestPriority(this.members.toSeq, that.members.toSeq). + filterNot(m ⇒ mergedUnreachable.contains(m)) + + // 5. merge seen (FIXME is this correct?) + val mergedSeen = this.overview.seen ++ that.overview.seen + + Gossip(GossipOverview(mergedSeen, mergedUnreachable), mergedMembers, mergedMeta, mergedVClock) } override def toString = @@ -648,11 +653,17 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val localState = state.get val localGossip = localState.latestGossip val localMembers = localGossip.members + val localUnreachable = localGossip.overview.unreachable - if (!localMembers.exists(_.address == node)) { + val alreadyMember = localMembers.exists(_.address == node) + val isUnreachable = localUnreachable.exists { m ⇒ + m.address == node && m.status != MemberStatus.Down && m.status != MemberStatus.Removed + } + + if (!alreadyMember && !isUnreachable) { // remove the node from the 'unreachable' set in case it is a DOWN node that is rejoining cluster - val newUnreachableMembers = localGossip.overview.unreachable filterNot { _.address == node } + val newUnreachableMembers = localUnreachable filterNot { _.address == node } val newOverview = localGossip.overview copy (unreachable = newUnreachableMembers) val newMembers = localMembers + Member(node, MemberStatus.Joining) // add joining node as Joining @@ -719,8 +730,8 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** - * The node to DOWN is removed from the 'members' set and put in the 'unreachable' set (if not alread there) - * and its status is set to DOWN. The node is alo removed from the 'seen' table. + * The node to DOWN is removed from the 'members' set and put in the 'unreachable' set (if not already there) + * and its status is set to DOWN. The node is also removed from the 'seen' table. * * The node will reside as DOWN in the 'unreachable' set until an explicit command JOIN command is sent directly * to this node and it will then go through the normal JOINING procedure. @@ -735,42 +746,34 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val localUnreachableMembers = localOverview.unreachable // 1. check if the node to DOWN is in the 'members' set - var downedMember: Option[Member] = None - val newMembers = - localMembers - .map { member ⇒ - if (member.address == address) { - log.info("Cluster Node [{}] - Marking node [{}] as DOWN", selfAddress, member.address) - val newMember = member copy (status = MemberStatus.Down) - downedMember = Some(newMember) - newMember - } else member - } - .filter(_.status != MemberStatus.Down) + val downedMember: Option[Member] = localMembers.find(_.address == address).map(m ⇒ m.copy(status = MemberStatus.Down)) + val newMembers = downedMember match { + case Some(m) ⇒ + log.info("Cluster Node [{}] - Marking node [{}] as DOWN", selfAddress, m.address) + localMembers - m + case None ⇒ localMembers + } // 2. check if the node to DOWN is in the 'unreachable' set val newUnreachableMembers = - localUnreachableMembers - .filter(_.status != MemberStatus.Down) // no need to DOWN members already DOWN - .map { member ⇒ - if (member.address == address) { - log.info("Cluster Node [{}] - Marking unreachable node [{}] as DOWN", selfAddress, member.address) - member copy (status = MemberStatus.Down) - } else member - } + localUnreachableMembers.map { member ⇒ + // no need to DOWN members already DOWN + if (member.address == address && member.status != MemberStatus.Down) { + log.info("Cluster Node [{}] - Marking unreachable node [{}] as DOWN", selfAddress, member.address) + member copy (status = MemberStatus.Down) + } else member + } // 3. add the newly DOWNED members from the 'members' (in step 1.) to the 'newUnreachableMembers' set. - val newUnreachablePlusNewlyDownedMembers = downedMember match { - case Some(member) ⇒ newUnreachableMembers + member - case None ⇒ newUnreachableMembers - } + val newUnreachablePlusNewlyDownedMembers = newUnreachableMembers ++ downedMember // 4. remove nodes marked as DOWN from the 'seen' table - val newSeen = newUnreachablePlusNewlyDownedMembers.foldLeft(localSeen) { (currentSeen, member) ⇒ - currentSeen - member.address + val newSeen = localSeen -- newUnreachablePlusNewlyDownedMembers.collect { + case m if m.status == MemberStatus.Down ⇒ m.address } - val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachablePlusNewlyDownedMembers) // update gossip overview + // update gossip overview + val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachablePlusNewlyDownedMembers) val newGossip = localGossip copy (overview = newOverview, members = newMembers) // update gossip val versionedGossip = newGossip + vclockNode val newState = localState copy (latestGossip = versionedGossip seen selfAddress) @@ -831,36 +834,6 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) */ private def autoJoin(): Unit = nodeToJoin foreach join - /** - * Switches the member status. - * - * @param newStatus the new member status - * @param oldState the state to change the member status in - * @return the updated new state with the new member status - */ - private def switchMemberStatusTo(newStatus: MemberStatus, state: State): State = { - log.info("Cluster Node [{}] - Switching membership status to [{}]", selfAddress, newStatus) - - val localSelf = self - - val localGossip = state.latestGossip - val localMembers = localGossip.members - - // change my state into a "new" self - val newSelf = localSelf copy (status = newStatus) - - // change my state in 'gossip.members' - val newMembers = localMembers map { member ⇒ if (member.address == selfAddress) newSelf else member } - - val newGossip = localGossip copy (members = newMembers) - - // version my changes - val versionedGossip = newGossip + vclockNode - val seenVersionedGossip = versionedGossip seen selfAddress - - state copy (latestGossip = seenVersionedGossip) - } - /** * INTERNAL API * @@ -985,8 +958,8 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) if (newlyDetectedUnreachableMembers.nonEmpty) { // we have newly detected members marked as unavailable - val newMembers = localMembers diff newlyDetectedUnreachableMembers - val newUnreachableMembers: Set[Member] = localUnreachableMembers ++ newlyDetectedUnreachableMembers + val newMembers = localMembers -- newlyDetectedUnreachableMembers + val newUnreachableMembers = localUnreachableMembers ++ newlyDetectedUnreachableMembers val newOverview = localOverview copy (unreachable = newUnreachableMembers) val newGossip = localGossip copy (overview = newOverview, members = newMembers) @@ -1090,16 +1063,20 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // 4. Move UNREACHABLE => DOWN (auto-downing by leader) // ---------------------- val newUnreachableMembers = - localUnreachableMembers - .filter(_.status != MemberStatus.Down) // no need to DOWN members already DOWN - .map { member ⇒ + localUnreachableMembers.map { member ⇒ + // no need to DOWN members already DOWN + if (member.status == MemberStatus.Down) member + else { log.info("Cluster Node [{}] - Leader is marking unreachable node [{}] as DOWN", selfAddress, member.address) hasChangedState = true member copy (status = MemberStatus.Down) } + } // removing nodes marked as DOWN from the 'seen' table - val newSeen = localUnreachableMembers.foldLeft(localSeen)((currentSeen, member) ⇒ currentSeen - member.address) + val newSeen = localSeen -- newUnreachableMembers.collect { + case m if m.status == MemberStatus.Down ⇒ m.address + } val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachableMembers) // update gossip overview localGossip copy (overview = newOverview) // update gossip diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala index 77cd0c52ba..985b6d5a89 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -12,18 +12,20 @@ import scala.collection.immutable.SortedSet @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class GossipSpec extends WordSpec with MustMatchers { + import MemberStatus._ + + val a1 = Member(Address("akka", "sys", "a", 2552), Up) + val a2 = Member(Address("akka", "sys", "a", 2552), Joining) + val b1 = Member(Address("akka", "sys", "b", 2552), Up) + val b2 = Member(Address("akka", "sys", "b", 2552), Removed) + val c1 = Member(Address("akka", "sys", "c", 2552), Leaving) + val c2 = Member(Address("akka", "sys", "c", 2552), Up) + val d1 = Member(Address("akka", "sys", "d", 2552), Leaving) + val d2 = Member(Address("akka", "sys", "d", 2552), Removed) + "A Gossip" must { "merge members by status priority" in { - import MemberStatus._ - val a1 = Member(Address("akka", "sys", "a", 2552), Up) - val a2 = Member(Address("akka", "sys", "a", 2552), Joining) - val b1 = Member(Address("akka", "sys", "b", 2552), Up) - val b2 = Member(Address("akka", "sys", "b", 2552), Removed) - val c1 = Member(Address("akka", "sys", "c", 2552), Leaving) - val c2 = Member(Address("akka", "sys", "c", 2552), Up) - val d1 = Member(Address("akka", "sys", "d", 2552), Leaving) - val d2 = Member(Address("akka", "sys", "d", 2552), Removed) val g1 = Gossip(members = SortedSet(a1, b1, c1, d1)) val g2 = Gossip(members = SortedSet(a2, b2, c2, d2)) @@ -38,5 +40,38 @@ class GossipSpec extends WordSpec with MustMatchers { } + "merge unreachable by status priority" in { + + val g1 = Gossip(members = Gossip.emptyMembers, overview = GossipOverview(unreachable = SortedSet(a1, b1, c1, d1))) + val g2 = Gossip(members = Gossip.emptyMembers, overview = GossipOverview(unreachable = SortedSet(a2, b2, c2, d2))) + + val merged1 = g1 merge g2 + merged1.overview.unreachable must be(Set(a1, b2, c1, d2)) + merged1.overview.unreachable.toSeq.sorted.map(_.status) must be(Seq(Up, Removed, Leaving, Removed)) + + val merged2 = g2 merge g1 + merged2.overview.unreachable must be(Set(a1, b2, c1, d2)) + merged2.overview.unreachable.toSeq.sorted.map(_.status) must be(Seq(Up, Removed, Leaving, Removed)) + + } + + "merge by excluding unreachable from members" in { + val g1 = Gossip(members = SortedSet(a1, b1), overview = GossipOverview(unreachable = SortedSet(c1, d1))) + val g2 = Gossip(members = SortedSet(a2, c2), overview = GossipOverview(unreachable = SortedSet(b2, d2))) + + val merged1 = g1 merge g2 + merged1.members must be(SortedSet(a1)) + merged1.members.toSeq.map(_.status) must be(Seq(Up)) + merged1.overview.unreachable must be(Set(b2, c1, d2)) + merged1.overview.unreachable.toSeq.sorted.map(_.status) must be(Seq(Removed, Leaving, Removed)) + + val merged2 = g2 merge g1 + merged2.members must be(SortedSet(a1)) + merged2.members.toSeq.map(_.status) must be(Seq(Up)) + merged2.overview.unreachable must be(Set(b2, c1, d2)) + merged2.overview.unreachable.toSeq.sorted.map(_.status) must be(Seq(Removed, Leaving, Removed)) + + } + } } From 9e265f5c5482a5d98ce24a341f5318be662ab14f Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 13 Jun 2012 10:48:54 +0200 Subject: [PATCH 367/538] Proposal to make it possible to fully discard the receive and replace it with become, unbecome then reverts to receive if no behavior left --- .../src/main/scala/akka/actor/ActorCell.scala | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 9dbe610195..893c81ac91 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -184,7 +184,7 @@ private[akka] object ActorCell { final val emptyReceiveTimeoutData: (Long, Cancellable) = (-1, emptyCancellable) - final val behaviorStackPlaceHolder: Stack[Actor.Receive] = Stack.empty.push(Actor.emptyBehavior) + final val behaviorStackPlaceHolder: Stack[Actor.Receive] = Stack.empty final val emptyActorRefSet: Set[ActorRef] = TreeSet.empty @@ -521,10 +521,9 @@ private[akka] class ActorCell( if (instance eq null) throw new ActorInitializationException(self, "Actor instance passed to actorOf can't be 'null'") - behaviorStack = behaviorStack match { - case `behaviorStackPlaceHolder` ⇒ Stack.empty.push(instance.receive) - case newBehaviors ⇒ Stack.empty.push(instance.receive).pushAll(newBehaviors.reverse.drop(1)) - } + // If no becomes were issued, the actors behavior is its receive method + if (behaviorStack eq behaviorStackPlaceHolder) + behaviorStack = Stack.empty.push(instance.receive) instance } finally { val stackAfter = contextStack.get @@ -683,10 +682,8 @@ private[akka] class ActorCell( } } - def become(behavior: Actor.Receive, discardOld: Boolean = true): Unit = { - if (discardOld) unbecome() - behaviorStack = behaviorStack.push(behavior) - } + def become(behavior: Actor.Receive, discardOld: Boolean = true): Unit = + behaviorStack = (if (discardOld && behaviorStack.nonEmpty) behaviorStack.pop else behaviorStack).push(behavior) /** * UntypedActorContext impl @@ -701,8 +698,8 @@ private[akka] class ActorCell( def unbecome(): Unit = { val original = behaviorStack - val popped = original.pop - behaviorStack = if (popped.isEmpty) original else popped + behaviorStack = if (original.isEmpty || original.pop.isEmpty) Stack.empty[Actor.Receive].push(actor.receive) + else original.pop } def autoReceiveMessage(msg: Envelope): Unit = { From b1fe6c709c783c0351c6c7c6195d7381bbc8901e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 13 Jun 2012 10:55:47 +0200 Subject: [PATCH 368/538] Formatting --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 893c81ac91..285c31b49d 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -698,8 +698,9 @@ private[akka] class ActorCell( def unbecome(): Unit = { val original = behaviorStack - behaviorStack = if (original.isEmpty || original.pop.isEmpty) Stack.empty[Actor.Receive].push(actor.receive) - else original.pop + behaviorStack = + if (original.isEmpty || original.pop.isEmpty) Stack.empty.push(actor.receive) + else original.pop } def autoReceiveMessage(msg: Envelope): Unit = { From ff5c99a80d4e595dfa7ab2ae585f624e4e4c7f0d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 13 Jun 2012 11:04:27 +0200 Subject: [PATCH 369/538] Minor cleanup, based on review comments, see #2077 --- .../src/main/scala/akka/cluster/Cluster.scala | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 9f241b684d..17842453bb 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -227,9 +227,9 @@ case class Gossip( // group all members by Address => Seq[Member] val groupedByAddress = (a ++ b).groupBy(_.address) // pick highest MemberStatus - (groupedByAddress.values.foldLeft(Vector.empty[Member]) { (acc, members) ⇒ - acc :+ members.reduceLeft(Member.highestPriorityOf(_, _)) - }).toSet + (Set.empty[Member] /: groupedByAddress) { + case (acc, (_, members)) ⇒ acc + members.reduceLeft(Member.highestPriorityOf) + } } // 3. merge unreachable by selecting the single Member with highest MemberStatus out of the Member groups @@ -238,7 +238,7 @@ case class Gossip( // 4. merge members by selecting the single Member with highest MemberStatus out of the Member groups, // and exclude unreachable val mergedMembers = Gossip.emptyMembers ++ reduceHighestPriority(this.members.toSeq, that.members.toSeq). - filterNot(m ⇒ mergedUnreachable.contains(m)) + filterNot(mergedUnreachable.contains) // 5. merge seen (FIXME is this correct?) val mergedSeen = this.overview.seen ++ that.overview.seen @@ -746,7 +746,9 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val localUnreachableMembers = localOverview.unreachable // 1. check if the node to DOWN is in the 'members' set - val downedMember: Option[Member] = localMembers.find(_.address == address).map(m ⇒ m.copy(status = MemberStatus.Down)) + val downedMember: Option[Member] = localMembers.collectFirst { + case m if m.address == address ⇒ m.copy(status = MemberStatus.Down) + } val newMembers = downedMember match { case Some(m) ⇒ log.info("Cluster Node [{}] - Marking node [{}] as DOWN", selfAddress, m.address) From f3d9f9c4e80e5f521baf4fa279b7de21ccd34b0c Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 13 Jun 2012 11:19:06 +0200 Subject: [PATCH 370/538] Merge seen table by starting with empty seen after merge, see #2077 --- .../src/main/scala/akka/cluster/Cluster.scala | 4 ++-- .../src/test/scala/akka/cluster/GossipSpec.scala | 12 ++++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 17842453bb..07712d8ed9 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -240,8 +240,8 @@ case class Gossip( val mergedMembers = Gossip.emptyMembers ++ reduceHighestPriority(this.members.toSeq, that.members.toSeq). filterNot(mergedUnreachable.contains) - // 5. merge seen (FIXME is this correct?) - val mergedSeen = this.overview.seen ++ that.overview.seen + // 5. fresh seen table + val mergedSeen = Map.empty[Address, VectorClock] Gossip(GossipOverview(mergedSeen, mergedUnreachable), mergedMembers, mergedMeta, mergedVClock) } diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala index 985b6d5a89..8c790cf159 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -73,5 +73,17 @@ class GossipSpec extends WordSpec with MustMatchers { } + "start with fresh seen table after merge" in { + val g1 = Gossip(members = SortedSet(a1, b1, c1, d1)).seen(a1.address).seen(b1.address) + val g2 = Gossip(members = SortedSet(a2, b2, c2, d2)).seen(b2.address).seen(c2.address) + + val merged1 = g1 merge g2 + merged1.overview.seen.isEmpty must be(true) + + val merged2 = g2 merge g1 + merged2.overview.seen.isEmpty must be(true) + + } + } } From 6199556ced894b8cad7ed791cec435e6fc4716cb Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 13 Jun 2012 11:39:04 +0200 Subject: [PATCH 371/538] Caching emptyBehaviorStack and remove all other uses of Stack.empty --- .../src/main/scala/akka/actor/ActorCell.scala | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 285c31b49d..d750b4964b 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -184,7 +184,7 @@ private[akka] object ActorCell { final val emptyReceiveTimeoutData: (Long, Cancellable) = (-1, emptyCancellable) - final val behaviorStackPlaceHolder: Stack[Actor.Receive] = Stack.empty + final val emptyBehaviorStack: Stack[Actor.Receive] = Stack.empty final val emptyActorRefSet: Set[ActorRef] = TreeSet.empty @@ -408,7 +408,7 @@ private[akka] class ActorCell( var currentMessage: Envelope = _ var actor: Actor = _ - private var behaviorStack: Stack[Actor.Receive] = Stack.empty + private var behaviorStack: Stack[Actor.Receive] = emptyBehaviorStack @volatile var _mailboxDoNotCallMeDirectly: Mailbox = _ //This must be volatile since it isn't protected by the mailbox status var nextNameSequence: Long = 0 var watching: Set[ActorRef] = emptyActorRefSet @@ -513,17 +513,16 @@ private[akka] class ActorCell( protected def newActor(): Actor = { contextStack.set(contextStack.get.push(this)) try { - import ActorCell.behaviorStackPlaceHolder + import ActorCell.emptyBehaviorStack - behaviorStack = behaviorStackPlaceHolder + behaviorStack = emptyBehaviorStack val instance = props.creator.apply() if (instance eq null) throw new ActorInitializationException(self, "Actor instance passed to actorOf can't be 'null'") // If no becomes were issued, the actors behavior is its receive method - if (behaviorStack eq behaviorStackPlaceHolder) - behaviorStack = Stack.empty.push(instance.receive) + behaviorStack = if (behaviorStack.isEmpty) behaviorStack.push(instance.receive) else behaviorStack instance } finally { val stackAfter = contextStack.get @@ -699,7 +698,7 @@ private[akka] class ActorCell( def unbecome(): Unit = { val original = behaviorStack behaviorStack = - if (original.isEmpty || original.pop.isEmpty) Stack.empty.push(actor.receive) + if (original.isEmpty || original.pop.isEmpty) emptyBehaviorStack.push(actor.receive) else original.pop } @@ -759,7 +758,7 @@ private[akka] class ActorCell( if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(a), "stopped")) } finally { - behaviorStack = behaviorStackPlaceHolder + behaviorStack = emptyBehaviorStack clearActorFields(a) actor = null } From d3e2f529f3bdcd9098223e3fe5b92e35b8da8773 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 13 Jun 2012 11:53:27 +0200 Subject: [PATCH 372/538] Removing a pointless import and the only Scala return statement in our codebase --- .../src/main/scala/akka/actor/ActorCell.scala | 2 -- .../src/main/scala/akka/testkit/TestKit.scala | 28 ++++++++++--------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index d750b4964b..39158b239d 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -513,8 +513,6 @@ private[akka] class ActorCell( protected def newActor(): Actor = { contextStack.set(contextStack.get.push(this)) try { - import ActorCell.emptyBehaviorStack - behaviorStack = emptyBehaviorStack val instance = props.creator.apply() diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index c0fb6e5267..4a5a880bb0 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -486,19 +486,21 @@ trait TestKitBase { @tailrec def doit(acc: List[T], count: Int): List[T] = { - if (count >= messages) return acc.reverse - receiveOne((stop - now) min idle) - lastMessage match { - case NullMessage ⇒ - lastMessage = msg - acc.reverse - case RealMessage(o, _) if (f isDefinedAt o) ⇒ - msg = lastMessage - doit(f(o) :: acc, count + 1) - case RealMessage(o, _) ⇒ - queue.offerFirst(lastMessage) - lastMessage = msg - acc.reverse + if (count >= messages) acc.reverse + else { + receiveOne((stop - now) min idle) + lastMessage match { + case NullMessage ⇒ + lastMessage = msg + acc.reverse + case RealMessage(o, _) if (f isDefinedAt o) ⇒ + msg = lastMessage + doit(f(o) :: acc, count + 1) + case RealMessage(o, _) ⇒ + queue.offerFirst(lastMessage) + lastMessage = msg + acc.reverse + } } } From 8ce6ac3e3e5e0c066e2db25d07a97bb21c26afd0 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 13 Jun 2012 12:23:02 +0200 Subject: [PATCH 373/538] #2217 - setting accessible = true before newInstance --- .../test/java/akka/actor/NonPublicClass.java | 22 +++++++++++++++++++ .../test/scala/akka/actor/ActorRefSpec.scala | 7 ++++++ .../src/main/scala/akka/actor/Props.scala | 6 ++++- 3 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 akka-actor-tests/src/test/java/akka/actor/NonPublicClass.java diff --git a/akka-actor-tests/src/test/java/akka/actor/NonPublicClass.java b/akka-actor-tests/src/test/java/akka/actor/NonPublicClass.java new file mode 100644 index 0000000000..55f3910db7 --- /dev/null +++ b/akka-actor-tests/src/test/java/akka/actor/NonPublicClass.java @@ -0,0 +1,22 @@ +package akka.actor; + +import com.sun.xml.internal.ws.api.PropertySet; + +/** + * Created by IntelliJ IDEA. + * User: viktorklang + * Date: 6/13/12 + * Time: 12:12 PM + * To change this template use File | Settings | File Templates. + */ +public class NonPublicClass { + public static Props createProps() { + return new Props(MyNonPublicActorClass.class); + } +} + +class MyNonPublicActorClass extends UntypedActor { + @Override public void onReceive(Object msg) { + getSender().tell(msg); + } +} \ No newline at end of file diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala index bec066d97a..3056dc9e95 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala @@ -358,6 +358,13 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { system.stop(serverRef) } + "support actorOfs where the class of the actor isn't public" in { + val a = system.actorOf(NonPublicClass.createProps()) + a.tell("pigdog", testActor) + expectMsg("pigdog") + system stop a + } + "stop when sent a poison pill" in { val timeout = Timeout(20000) val ref = system.actorOf(Props(new Actor { diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index fc01a5ba36..91c4ced285 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -186,5 +186,9 @@ case class Props( * able to optimize serialization. */ private[akka] case class FromClassCreator(clazz: Class[_ <: Actor]) extends Function0[Actor] { - def apply(): Actor = clazz.newInstance + def apply(): Actor = { + val ctor = clazz.getDeclaredConstructor() + ctor.setAccessible(true) + ctor.newInstance() + } } From 463e62926ef9e087240ff2d27c02a68f3e5c9dbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Wed, 13 Jun 2012 13:52:58 +0200 Subject: [PATCH 374/538] Make test conductor barriers fail for all on timeouts and care about within() blocks. See #2218 --- .../testconductor/TestConductorProtocol.java | 154 +++++++++++++++--- .../main/protocol/TestConductorProtocol.proto | 4 +- .../akka/remote/testconductor/Conductor.scala | 40 +++-- .../akka/remote/testconductor/DataTypes.scala | 15 +- .../akka/remote/testconductor/Player.scala | 36 +++- .../remote/testconductor/BarrierSpec.scala | 132 ++++++++------- .../akka/remote/testkit/MultiNodeSpec.scala | 17 +- .../src/main/scala/akka/testkit/TestKit.scala | 14 +- 8 files changed, 303 insertions(+), 109 deletions(-) diff --git a/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java index 99c33e6728..ec84e42331 100644 --- a/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java +++ b/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java @@ -492,7 +492,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1397,7 +1397,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1702,6 +1702,14 @@ public final class TestConductorProtocol { // optional bool status = 2; boolean hasStatus(); boolean getStatus(); + + // optional int64 timeout = 3; + boolean hasTimeout(); + long getTimeout(); + + // optional bool failed = 4; + boolean hasFailed(); + boolean getFailed(); } public static final class EnterBarrier extends com.google.protobuf.GeneratedMessage @@ -1774,9 +1782,31 @@ public final class TestConductorProtocol { return status_; } + // optional int64 timeout = 3; + public static final int TIMEOUT_FIELD_NUMBER = 3; + private long timeout_; + public boolean hasTimeout() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getTimeout() { + return timeout_; + } + + // optional bool failed = 4; + public static final int FAILED_FIELD_NUMBER = 4; + private boolean failed_; + public boolean hasFailed() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public boolean getFailed() { + return failed_; + } + private void initFields() { name_ = ""; status_ = false; + timeout_ = 0L; + failed_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -1800,6 +1830,12 @@ public final class TestConductorProtocol { if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBool(2, status_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(3, timeout_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBool(4, failed_); + } getUnknownFields().writeTo(output); } @@ -1817,6 +1853,14 @@ public final class TestConductorProtocol { size += com.google.protobuf.CodedOutputStream .computeBoolSize(2, status_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, timeout_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(4, failed_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -1927,7 +1971,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1945,6 +1989,10 @@ public final class TestConductorProtocol { bitField0_ = (bitField0_ & ~0x00000001); status_ = false; bitField0_ = (bitField0_ & ~0x00000002); + timeout_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + failed_ = false; + bitField0_ = (bitField0_ & ~0x00000008); return this; } @@ -1991,6 +2039,14 @@ public final class TestConductorProtocol { to_bitField0_ |= 0x00000002; } result.status_ = status_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.timeout_ = timeout_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.failed_ = failed_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -2013,6 +2069,12 @@ public final class TestConductorProtocol { if (other.hasStatus()) { setStatus(other.getStatus()); } + if (other.hasTimeout()) { + setTimeout(other.getTimeout()); + } + if (other.hasFailed()) { + setFailed(other.getFailed()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -2058,6 +2120,16 @@ public final class TestConductorProtocol { status_ = input.readBool(); break; } + case 24: { + bitField0_ |= 0x00000004; + timeout_ = input.readInt64(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + failed_ = input.readBool(); + break; + } } } } @@ -2121,6 +2193,48 @@ public final class TestConductorProtocol { return this; } + // optional int64 timeout = 3; + private long timeout_ ; + public boolean hasTimeout() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getTimeout() { + return timeout_; + } + public Builder setTimeout(long value) { + bitField0_ |= 0x00000004; + timeout_ = value; + onChanged(); + return this; + } + public Builder clearTimeout() { + bitField0_ = (bitField0_ & ~0x00000004); + timeout_ = 0L; + onChanged(); + return this; + } + + // optional bool failed = 4; + private boolean failed_ ; + public boolean hasFailed() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public boolean getFailed() { + return failed_; + } + public Builder setFailed(boolean value) { + bitField0_ |= 0x00000008; + failed_ = value; + onChanged(); + return this; + } + public Builder clearFailed() { + bitField0_ = (bitField0_ & ~0x00000008); + failed_ = false; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:EnterBarrier) } @@ -2377,7 +2491,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3005,7 +3119,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3611,7 +3725,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -4056,19 +4170,19 @@ public final class TestConductorProtocol { "\0132\r.EnterBarrier\022\037\n\007failure\030\003 \001(\0132\016.Inje" + "ctFailure\022\014\n\004done\030\004 \001(\t\022\035\n\004addr\030\005 \001(\0132\017." + "AddressRequest\"0\n\005Hello\022\014\n\004name\030\001 \002(\t\022\031\n" + - "\007address\030\002 \002(\0132\010.Address\",\n\014EnterBarrier" + - "\022\014\n\004name\030\001 \002(\t\022\016\n\006status\030\002 \001(\010\"6\n\016Addres" + - "sRequest\022\014\n\004node\030\001 \002(\t\022\026\n\004addr\030\002 \001(\0132\010.A" + - "ddress\"G\n\007Address\022\020\n\010protocol\030\001 \002(\t\022\016\n\006s" + - "ystem\030\002 \002(\t\022\014\n\004host\030\003 \002(\t\022\014\n\004port\030\004 \002(\005\"", - "\212\001\n\rInjectFailure\022\032\n\007failure\030\001 \002(\0162\t.Fai" + - "lType\022\035\n\tdirection\030\002 \001(\0162\n.Direction\022\031\n\007" + - "address\030\003 \001(\0132\010.Address\022\020\n\010rateMBit\030\006 \001(" + - "\002\022\021\n\texitValue\030\007 \001(\005*A\n\010FailType\022\014\n\010Thro" + - "ttle\020\001\022\016\n\nDisconnect\020\002\022\t\n\005Abort\020\003\022\014\n\010Shu" + - "tdown\020\004*,\n\tDirection\022\010\n\004Send\020\001\022\013\n\007Receiv" + - "e\020\002\022\010\n\004Both\020\003B\035\n\031akka.remote.testconduct" + - "orH\001" + "\007address\030\002 \002(\0132\010.Address\"M\n\014EnterBarrier" + + "\022\014\n\004name\030\001 \002(\t\022\016\n\006status\030\002 \001(\010\022\017\n\007timeou" + + "t\030\003 \001(\003\022\016\n\006failed\030\004 \001(\010\"6\n\016AddressReques" + + "t\022\014\n\004node\030\001 \002(\t\022\026\n\004addr\030\002 \001(\0132\010.Address\"" + + "G\n\007Address\022\020\n\010protocol\030\001 \002(\t\022\016\n\006system\030\002", + " \002(\t\022\014\n\004host\030\003 \002(\t\022\014\n\004port\030\004 \002(\005\"\212\001\n\rInj" + + "ectFailure\022\032\n\007failure\030\001 \002(\0162\t.FailType\022\035" + + "\n\tdirection\030\002 \001(\0162\n.Direction\022\031\n\007address" + + "\030\003 \001(\0132\010.Address\022\020\n\010rateMBit\030\006 \001(\002\022\021\n\tex" + + "itValue\030\007 \001(\005*A\n\010FailType\022\014\n\010Throttle\020\001\022" + + "\016\n\nDisconnect\020\002\022\t\n\005Abort\020\003\022\014\n\010Shutdown\020\004" + + "*,\n\tDirection\022\010\n\004Send\020\001\022\013\n\007Receive\020\002\022\010\n\004" + + "Both\020\003B\035\n\031akka.remote.testconductorH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -4096,7 +4210,7 @@ public final class TestConductorProtocol { internal_static_EnterBarrier_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EnterBarrier_descriptor, - new java.lang.String[] { "Name", "Status", }, + new java.lang.String[] { "Name", "Status", "Timeout", "Failed", }, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.class, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder.class); internal_static_AddressRequest_descriptor = diff --git a/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto b/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto index 648234614e..b35bbd23d8 100644 --- a/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto +++ b/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto @@ -7,7 +7,7 @@ option optimize_for = SPEED; /****************************************** Compile with: - cd ./akka-remote/src/main/protocol + cd ./akka-remote-tests/src/main/protocol protoc TestConductorProtocol.proto --java_out ../java *******************************************/ @@ -27,6 +27,8 @@ message Hello { message EnterBarrier { required string name = 1; optional bool status = 2; + optional int64 timeout = 3; + optional bool failed = 4; } message AddressRequest { diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index 17a2bfcd5f..7264948b0f 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -376,7 +376,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP * BarrierTimeouts in the players). */ override def supervisorStrategy = OneForOneStrategy() { - case BarrierTimeout(data) ⇒ SupervisorStrategy.Resume + case BarrierTimeout(data) ⇒ SupervisorStrategy.Restart case BarrierEmpty(data, msg) ⇒ SupervisorStrategy.Resume case WrongBarrier(name, client, data) ⇒ client ! ToClient(BarrierResult(name, false)); failBarrier(data) case ClientLost(data, node) ⇒ failBarrier(data) @@ -426,6 +426,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP case op: ServerOp ⇒ op match { case _: EnterBarrier ⇒ barrier forward op + case _: FailBarrier ⇒ barrier forward op case GetAddress(node) ⇒ if (nodes contains node) sender ! ToClient(AddressReply(node, nodes(node).addr)) else addrInterest += node -> ((addrInterest get node getOrElse Set()) + sender) @@ -497,9 +498,13 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor import BarrierCoordinator._ import akka.actor.FSM._ import Controller._ + import akka.util.{ Timeout ⇒ auTimeout } - // this shall be set to false if all subsequent barriers shall fail + // this shall be set to true if all subsequent barriers shall fail var failed = false + + var barrierTimeout: Option[auTimeout] = None + override def preRestart(reason: Throwable, message: Option[Any]) {} override def postRestart(reason: Throwable) { failed = true } @@ -520,27 +525,29 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor } when(Idle) { - case Event(EnterBarrier(name), d @ Data(clients, _, _)) ⇒ + case Event(EnterBarrier(name, timeout), d @ Data(clients, _, _)) ⇒ if (failed) stay replying ToClient(BarrierResult(name, false)) else if (clients.map(_.fsm) == Set(sender)) stay replying ToClient(BarrierResult(name, true)) else if (clients.find(_.fsm == sender).isEmpty) stay replying ToClient(BarrierResult(name, false)) - else + else { + barrierTimeout = timeout goto(Waiting) using d.copy(barrier = name, arrived = sender :: Nil) + } case Event(RemoveClient(name), d @ Data(clients, _, _)) ⇒ if (clients.isEmpty) throw BarrierEmpty(d, "cannot remove " + name + ": no client to remove") stay using d.copy(clients = clients filterNot (_.name == name)) } onTransition { - case Idle -> Waiting ⇒ setTimer("Timeout", StateTimeout, TestConductor().Settings.BarrierTimeout.duration, false) + case Idle -> Waiting ⇒ setTimer("Timeout", StateTimeout, barrierTimeout.getOrElse[auTimeout](TestConductor().Settings.BarrierTimeout).duration, false) case Waiting -> Idle ⇒ cancelTimer("Timeout") } when(Waiting) { - case Event(EnterBarrier(name), d @ Data(clients, barrier, arrived)) ⇒ + case Event(EnterBarrier(name, timeout), d @ Data(clients, barrier, arrived)) ⇒ if (name != barrier) throw WrongBarrier(name, sender, d) val together = if (clients.exists(_.fsm == sender)) sender :: arrived else arrived handleBarrier(d.copy(arrived = together)) @@ -550,18 +557,27 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor case Some(client) ⇒ handleBarrier(d.copy(clients = clients - client, arrived = arrived filterNot (_ == client.fsm))) } - case Event(StateTimeout, data) ⇒ - throw BarrierTimeout(data) + case Event(FailBarrier(name), d @ Data(clients, barrier, arrived)) ⇒ + if (name != barrier) throw WrongBarrier(name, sender, d) + failed = true + handleBarrier(d, false) + + case Event(StateTimeout, d @ Data(clients, barrier, arrived)) ⇒ + handleBarrier(d, false) + throw BarrierTimeout(d) } initialize - def handleBarrier(data: Data): State = { - log.debug("handleBarrier({})", data) - if (data.arrived.isEmpty) { + def handleBarrier(data: Data, status: Boolean = true): State = { + log.debug("handleBarrier({}, {})", data, status) + if (!status) { + data.arrived foreach (_ ! ToClient(BarrierResult(data.barrier, status))) + goto(Idle) using data.copy(barrier = "", arrived = Nil) + } else if (data.arrived.isEmpty) { goto(Idle) using data.copy(barrier = "") } else if ((data.clients.map(_.fsm) -- data.arrived).isEmpty) { - data.arrived foreach (_ ! ToClient(BarrierResult(data.barrier, true))) + data.arrived foreach (_ ! ToClient(BarrierResult(data.barrier, status))) goto(Idle) using data.copy(barrier = "", arrived = Nil) } else { stay using data diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala index 022ae2d89b..4730bbd508 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -10,6 +10,7 @@ import akka.remote.testconductor.{ TestConductorProtocol ⇒ TCP } import com.google.protobuf.Message import akka.actor.Address import org.jboss.netty.handler.codec.oneone.OneToOneDecoder +import akka.util.Timeout case class RoleName(name: String) @@ -28,7 +29,8 @@ private[akka] sealed trait ConfirmedClientOp extends ClientOp */ private[akka] case class Hello(name: String, addr: Address) extends NetworkOp -private[akka] case class EnterBarrier(name: String) extends ServerOp with NetworkOp +private[akka] case class EnterBarrier(name: String, timeout: Option[Timeout] = None) extends ServerOp with NetworkOp +private[akka] case class FailBarrier(name: String) extends ServerOp with NetworkOp private[akka] case class BarrierResult(name: String, success: Boolean) extends UnconfirmedClientOp with NetworkOp private[akka] case class Throttle(node: RoleName, target: RoleName, direction: Direction, rateMBit: Float) extends CommandOp @@ -72,10 +74,14 @@ private[akka] class MsgEncoder extends OneToOneEncoder { x match { case Hello(name, addr) ⇒ w.setHello(TCP.Hello.newBuilder.setName(name).setAddress(addr)) - case EnterBarrier(name) ⇒ - w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name)) + case EnterBarrier(name, timeout) ⇒ + val barrier = TCP.EnterBarrier.newBuilder.setName(name) + timeout foreach (t ⇒ barrier.setTimeout(t.duration.toMillis)) + w.setBarrier(barrier) case BarrierResult(name, success) ⇒ w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name).setStatus(success)) + case FailBarrier(name) ⇒ + w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name).setFailed(true)) case ThrottleMsg(target, dir, rate) ⇒ w.setFailure(TCP.InjectFailure.newBuilder.setAddress(target) .setFailure(TCP.FailType.Throttle).setDirection(dir).setRateMBit(rate)) @@ -115,7 +121,8 @@ private[akka] class MsgDecoder extends OneToOneDecoder { } else if (w.hasBarrier) { val barrier = w.getBarrier if (barrier.hasStatus) BarrierResult(barrier.getName, barrier.getStatus) - else EnterBarrier(w.getBarrier.getName) + else if (barrier.hasFailed) FailBarrier(barrier.getName) + else EnterBarrier(w.getBarrier.getName, if (barrier.hasTimeout) Option(Timeout.longToTimeout(barrier.getTimeout)) else None) } else if (w.hasFailure) { val f = w.getFailure import TCP.{ FailType ⇒ FT } diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala index 53c03d5d40..bed14725b4 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala @@ -11,7 +11,7 @@ import com.typesafe.config.ConfigFactory import akka.util.Timeout import akka.util.Duration import java.util.concurrent.TimeUnit.MILLISECONDS -import akka.pattern.{ ask, pipe } +import akka.pattern.{ ask, pipe, AskTimeoutException } import akka.dispatch.Await import scala.util.control.NoStackTrace import akka.actor.Status @@ -76,10 +76,34 @@ trait Player { this: TestConductorExt ⇒ * throw an exception in case of timeouts or other errors. */ def enter(name: String*) { + enter(Settings.BarrierTimeout, name) + } + + case class OutOfTimeException(barrier: String) extends RuntimeException("Ran out of time while waiting for barrier '" + barrier + "'") with NoStackTrace + + /** + * Enter the named barriers, one after the other, in the order given. Will + * throw an exception in case of timeouts or other errors. + */ + def enter(timeout: Timeout, name: Seq[String]) { + def now: Duration = System.nanoTime.nanos + system.log.debug("entering barriers " + name.mkString("(", ", ", ")")) + val stop = now + timeout.duration name foreach { b ⇒ - import Settings.BarrierTimeout - Await.result(client ? ToServer(EnterBarrier(b)), Duration.Inf) + val barrierTimeout = stop - now + if (barrierTimeout < Duration.Zero) { + client ! ToServer(FailBarrier(b)) + throw OutOfTimeException(b) + } + try { + implicit val timeout = Timeout(barrierTimeout + Settings.QueryTimeout.duration) + Await.result(client ? ToServer(EnterBarrier(b, Option(barrierTimeout))), Duration.Inf) + } catch { + case e: AskTimeoutException ⇒ + client ! ToServer(FailBarrier(b)) + throw e + } system.log.debug("passed barrier {}", b) } } @@ -88,7 +112,7 @@ trait Player { this: TestConductorExt ⇒ * Query remote transport address of named node. */ def getAddressFor(name: RoleName): Future[Address] = { - import Settings.BarrierTimeout + import Settings.QueryTimeout client ? ToServer(GetAddress(name)) mapTo } } @@ -168,8 +192,8 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) case Event(ToServer(msg), d @ Data(Some(channel), None)) ⇒ channel.write(msg) val token = msg match { - case EnterBarrier(barrier) ⇒ barrier - case GetAddress(node) ⇒ node.name + case EnterBarrier(barrier, timeout) ⇒ barrier + case GetAddress(node) ⇒ node.name } stay using d.copy(runningOp = Some(token, sender)) case Event(ToServer(op), Data(channel, Some((token, _)))) ⇒ diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala index 37ebd0a193..79dfda7559 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala @@ -19,6 +19,7 @@ import org.scalatest.BeforeAndAfterEach import java.net.InetSocketAddress import java.net.InetAddress import akka.testkit.TimingTest +import akka.util.{ Timeout, Duration } object BarrierSpec { case class Failed(ref: ActorRef, thr: Throwable) @@ -74,8 +75,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "fail entering barrier when nobody registered" taggedAs TimingTest in { val b = getBarrier() - b ! EnterBarrier("b") - expectMsg(ToClient(BarrierResult("b", false))) + b ! EnterBarrier("bar1") + expectMsg(ToClient(BarrierResult("bar1", false))) } "enter barrier" taggedAs TimingTest in { @@ -83,12 +84,12 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val a, b = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) - a.send(barrier, EnterBarrier("bar")) + a.send(barrier, EnterBarrier("bar2")) noMsg(a, b) within(2 second) { - b.send(barrier, EnterBarrier("bar")) - a.expectMsg(ToClient(BarrierResult("bar", true))) - b.expectMsg(ToClient(BarrierResult("bar", true))) + b.send(barrier, EnterBarrier("bar2")) + a.expectMsg(ToClient(BarrierResult("bar2", true))) + b.expectMsg(ToClient(BarrierResult("bar2", true))) } } @@ -97,15 +98,15 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val a, b, c = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) - a.send(barrier, EnterBarrier("bar")) + a.send(barrier, EnterBarrier("bar3")) barrier ! NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) - b.send(barrier, EnterBarrier("bar")) + b.send(barrier, EnterBarrier("bar3")) noMsg(a, b, c) within(2 second) { - c.send(barrier, EnterBarrier("bar")) - a.expectMsg(ToClient(BarrierResult("bar", true))) - b.expectMsg(ToClient(BarrierResult("bar", true))) - c.expectMsg(ToClient(BarrierResult("bar", true))) + c.send(barrier, EnterBarrier("bar3")) + a.expectMsg(ToClient(BarrierResult("bar3", true))) + b.expectMsg(ToClient(BarrierResult("bar3", true))) + c.expectMsg(ToClient(BarrierResult("bar3", true))) } } @@ -115,14 +116,14 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) - a.send(barrier, EnterBarrier("bar")) - b.send(barrier, EnterBarrier("bar")) + a.send(barrier, EnterBarrier("bar4")) + b.send(barrier, EnterBarrier("bar4")) barrier ! RemoveClient(A) barrier ! ClientDisconnected(A) noMsg(a, b, c) b.within(2 second) { barrier ! RemoveClient(C) - b.expectMsg(ToClient(BarrierResult("bar", true))) + b.expectMsg(ToClient(BarrierResult("bar4", true))) } barrier ! ClientDisconnected(C) expectNoMsg(1 second) @@ -133,7 +134,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val a, b = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) - a.send(barrier, EnterBarrier("bar")) + a.send(barrier, EnterBarrier("bar5")) barrier ! RemoveClient(A) b.send(barrier, EnterBarrier("foo")) b.expectMsg(ToClient(BarrierResult("foo", true))) @@ -145,11 +146,11 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! nodeA barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) - a.send(barrier, EnterBarrier("bar")) + a.send(barrier, EnterBarrier("bar6")) EventFilter[ClientLost](occurrences = 1) intercept { barrier ! ClientDisconnected(B) } - expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA), "bar", a.ref :: Nil), B))) + expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA), "bar6", a.ref :: Nil), B))) } "fail barrier with disconnecing node who already arrived" taggedAs TimingTest in { @@ -160,12 +161,12 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! nodeA barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeC - a.send(barrier, EnterBarrier("bar")) - b.send(barrier, EnterBarrier("bar")) + a.send(barrier, EnterBarrier("bar7")) + b.send(barrier, EnterBarrier("bar7")) EventFilter[ClientLost](occurrences = 1) intercept { barrier ! ClientDisconnected(B) } - expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA, nodeC), "bar", a.ref :: Nil), B))) + expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA, nodeC), "bar7", a.ref :: Nil), B))) } "fail when entering wrong barrier" taggedAs TimingTest in { @@ -175,11 +176,11 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! nodeA val nodeB = NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeB - a.send(barrier, EnterBarrier("bar")) + a.send(barrier, EnterBarrier("bar8")) EventFilter[WrongBarrier](occurrences = 1) intercept { b.send(barrier, EnterBarrier("foo")) } - expectMsg(Failed(barrier, WrongBarrier("foo", b.ref, Data(Set(nodeA, nodeB), "bar", a.ref :: Nil)))) + expectMsg(Failed(barrier, WrongBarrier("foo", b.ref, Data(Set(nodeA, nodeB), "bar8", a.ref :: Nil)))) } "fail barrier after first failure" taggedAs TimingTest in { @@ -190,8 +191,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with } expectMsg(Failed(barrier, BarrierEmpty(Data(Set(), "", Nil), "cannot remove RoleName(a): no client to remove"))) barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) - a.send(barrier, EnterBarrier("right")) - a.expectMsg(ToClient(BarrierResult("right", false))) + a.send(barrier, EnterBarrier("bar9")) + a.expectMsg(ToClient(BarrierResult("bar9", false))) } "fail after barrier timeout" taggedAs TimingTest in { @@ -201,9 +202,9 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val nodeB = NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeA barrier ! nodeB - a.send(barrier, EnterBarrier("right")) + a.send(barrier, EnterBarrier("bar10")) EventFilter[BarrierTimeout](occurrences = 1) intercept { - expectMsg(7 seconds, Failed(barrier, BarrierTimeout(Data(Set(nodeA, nodeB), "right", a.ref :: Nil)))) + expectMsg(7 seconds, Failed(barrier, BarrierTimeout(Data(Set(nodeA, nodeB), "bar10", a.ref :: Nil)))) } } @@ -264,12 +265,12 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("bar")) + a.send(barrier, EnterBarrier("bar11")) noMsg(a, b) within(2 second) { - b.send(barrier, EnterBarrier("bar")) - a.expectMsg(ToClient(BarrierResult("bar", true))) - b.expectMsg(ToClient(BarrierResult("bar", true))) + b.send(barrier, EnterBarrier("bar11")) + a.expectMsg(ToClient(BarrierResult("bar11", true))) + b.expectMsg(ToClient(BarrierResult("bar11", true))) } } @@ -280,16 +281,16 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("bar")) + a.send(barrier, EnterBarrier("bar12")) barrier ! NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) c.expectMsg(ToClient(Done)) - b.send(barrier, EnterBarrier("bar")) + b.send(barrier, EnterBarrier("bar12")) noMsg(a, b, c) within(2 second) { - c.send(barrier, EnterBarrier("bar")) - a.expectMsg(ToClient(BarrierResult("bar", true))) - b.expectMsg(ToClient(BarrierResult("bar", true))) - c.expectMsg(ToClient(BarrierResult("bar", true))) + c.send(barrier, EnterBarrier("bar12")) + a.expectMsg(ToClient(BarrierResult("bar12", true))) + b.expectMsg(ToClient(BarrierResult("bar12", true))) + c.expectMsg(ToClient(BarrierResult("bar12", true))) } } @@ -302,14 +303,14 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) c.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("bar")) - b.send(barrier, EnterBarrier("bar")) + a.send(barrier, EnterBarrier("bar13")) + b.send(barrier, EnterBarrier("bar13")) barrier ! Remove(A) barrier ! ClientDisconnected(A) noMsg(a, b, c) b.within(2 second) { barrier ! Remove(C) - b.expectMsg(ToClient(BarrierResult("bar", true))) + b.expectMsg(ToClient(BarrierResult("bar13", true))) } barrier ! ClientDisconnected(C) expectNoMsg(1 second) @@ -322,7 +323,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("bar")) + a.send(barrier, EnterBarrier("bar14")) barrier ! Remove(A) b.send(barrier, EnterBarrier("foo")) b.expectMsg(ToClient(BarrierResult("foo", true))) @@ -336,13 +337,13 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("bar")) + a.send(barrier, EnterBarrier("bar15")) barrier ! ClientDisconnected(RoleName("unknown")) noMsg(a) EventFilter[ClientLost](occurrences = 1) intercept { barrier ! ClientDisconnected(B) } - a.expectMsg(ToClient(BarrierResult("bar", false))) + a.expectMsg(ToClient(BarrierResult("bar15", false))) } "fail barrier with disconnecing node who already arrived" taggedAs TimingTest in { @@ -356,12 +357,12 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) c.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("bar")) - b.send(barrier, EnterBarrier("bar")) + a.send(barrier, EnterBarrier("bar16")) + b.send(barrier, EnterBarrier("bar16")) EventFilter[ClientLost](occurrences = 1) intercept { barrier ! ClientDisconnected(B) } - a.expectMsg(ToClient(BarrierResult("bar", false))) + a.expectMsg(ToClient(BarrierResult("bar16", false))) } "fail when entering wrong barrier" taggedAs TimingTest in { @@ -373,15 +374,15 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! nodeB a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("bar")) + a.send(barrier, EnterBarrier("bar17")) EventFilter[WrongBarrier](occurrences = 1) intercept { b.send(barrier, EnterBarrier("foo")) } - a.expectMsg(ToClient(BarrierResult("bar", false))) + a.expectMsg(ToClient(BarrierResult("bar17", false))) b.expectMsg(ToClient(BarrierResult("foo", false))) } - "not really fail after barrier timeout" taggedAs TimingTest in { + "fail after barrier timeout" taggedAs TimingTest in { val barrier = getController(2) val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -390,13 +391,13 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! nodeB a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("right")) + a.send(barrier, EnterBarrier("bar18", Option(Timeout.durationToTimeout(2 seconds)))) EventFilter[BarrierTimeout](occurrences = 1) intercept { - Thread.sleep(5000) + Thread.sleep(4000) } - b.send(barrier, EnterBarrier("right")) - a.expectMsg(ToClient(BarrierResult("right", true))) - b.expectMsg(ToClient(BarrierResult("right", true))) + b.send(barrier, EnterBarrier("bar18")) + a.expectMsg(ToClient(BarrierResult("bar18", false))) + b.expectMsg(ToClient(BarrierResult("bar18", false))) } "fail if a node registers twice" taggedAs TimingTest in { @@ -423,8 +424,27 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with controller ! nodeB b.expectMsg(ToClient(BarrierResult("initial startup", false))) } - a.send(controller, EnterBarrier("x")) - a.expectMsg(ToClient(BarrierResult("x", false))) + a.send(controller, EnterBarrier("bar19")) + a.expectMsg(ToClient(BarrierResult("bar19", false))) + } + + "fail subsequent barriers after foreced failure" taggedAs TimingTest in { + val barrier = getController(2) + val a, b = TestProbe() + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeB = NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) + barrier ! nodeA + barrier ! nodeB + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) + a.send(barrier, EnterBarrier("bar20", Option(Timeout.durationToTimeout(2 seconds)))) + b.send(barrier, FailBarrier("bar20")) + a.expectMsg(ToClient(BarrierResult("bar20", false))) + b.expectNoMsg(1 second) + a.send(barrier, EnterBarrier("bar21")) + b.send(barrier, EnterBarrier("bar21")) + a.expectMsg(ToClient(BarrierResult("bar21", false))) + b.expectMsg(ToClient(BarrierResult("bar21", false))) } "finally have no failure messages left" taggedAs TimingTest in { diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index faaab5cdc4..62539e981d 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -12,7 +12,7 @@ import akka.dispatch.Await import akka.dispatch.Await.Awaitable import akka.remote.testconductor.{ TestConductorExt, TestConductor, RoleName } import akka.testkit.AkkaSpec -import akka.util.{ NonFatal, Duration } +import akka.util.{ Timeout, NonFatal, Duration } /** * Configure the role names and participants of the test, including configuration settings. @@ -182,6 +182,14 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: if (nodes exists (_ == myself)) yes else no } + /** + * Enter the named barriers in the order given. Use the remaining duration from + * the innermost enclosing `within` block or the default `BarrierTimeout` + */ + def enter(name: String*) { + testConductor.enter(Timeout.durationToTimeout(remainingOr(testConductor.Settings.BarrierTimeout.duration)), name) + } + /** * Query the controller for the transport address of the given node (by role name) and * return that as an ActorPath for easy composition: @@ -193,11 +201,14 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: def node(role: RoleName): ActorPath = RootActorPath(testConductor.getAddressFor(role).await) /** - * Enrich `.await()` onto all Awaitables, using BarrierTimeout. + * Enrich `.await()` onto all Awaitables, using remaining duration from the innermost + * enclosing `within` block or BarrierTimeout. + * + * FIXME Is it really BarrierTimeout we want here? That seems like an awfully long time. */ implicit def awaitHelper[T](w: Awaitable[T]) = new AwaitHelper(w) class AwaitHelper[T](w: Awaitable[T]) { - def await: T = Await.result(w, testConductor.Settings.BarrierTimeout.duration) + def await: T = Await.result(w, remainingOr(testConductor.Settings.BarrierTimeout.duration)) } /* diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index c0fb6e5267..0f19e4e6c7 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -158,7 +158,13 @@ trait TestKitBase { * block or missing that it returns the properly dilated default for this * case from settings (key "akka.test.single-expect-default"). */ - def remaining: Duration = if (end == Duration.Undefined) testKitSettings.SingleExpectDefaultTimeout.dilated else end - now + def remaining: Duration = remainingOr(testKitSettings.SingleExpectDefaultTimeout.dilated) + + /** + * Obtain time remaining for execution of the innermost enclosing `within` + * block or missing that it returns the given duration. + */ + def remainingOr(duration: Duration): Duration = if (end == Duration.Undefined) duration else end - now /** * Query queue status. @@ -605,12 +611,6 @@ object TestKit { /** * Await until the given condition evaluates to `true` or the timeout * expires, whichever comes first. - * - * If no timeout is given, take it from the innermost enclosing `within` - * block. - * - * Note that the timeout is scaled using Duration.dilated, which uses the - * configuration entry "akka.test.timefactor" */ def awaitCond(p: ⇒ Boolean, max: Duration, interval: Duration = 100.millis, noThrow: Boolean = false): Boolean = { val stop = now + max From 2d4067e21e410e6d70b408169be842be1440de34 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 13 Jun 2012 13:56:54 +0200 Subject: [PATCH 375/538] Skipping immutable.Stack due to questionable implementation, going for immutable.List instead --- .../src/main/scala/akka/actor/Actor.scala | 8 ++----- .../src/main/scala/akka/actor/ActorCell.scala | 22 +++++++++---------- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 2721ccffa0..cf35d68c8c 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -279,18 +279,14 @@ trait Actor { */ protected[akka] implicit val context: ActorContext = { val contextStack = ActorCell.contextStack.get - - def noContextError = + if ((contextStack.isEmpty) || (contextStack.head eq null)) throw new ActorInitializationException( "\n\tYou cannot create an instance of [" + getClass.getName + "] explicitly using the constructor (new)." + "\n\tYou have to use one of the factory methods to create a new actor. Either use:" + "\n\t\t'val actor = context.actorOf(Props[MyActor])' (to create a supervised child actor from within an actor), or" + "\n\t\t'val actor = system.actorOf(Props(new MyActor(..)))' (to create a top level actor from the ActorSystem)") - - if (contextStack.isEmpty) noContextError val c = contextStack.head - if (c eq null) noContextError - ActorCell.contextStack.set(contextStack.push(null)) + ActorCell.contextStack.set(null :: contextStack) c } diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 39158b239d..72793513e2 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -13,7 +13,7 @@ import akka.japi.Procedure import java.io.{ NotSerializableException, ObjectOutputStream } import akka.serialization.SerializationExtension import akka.event.Logging.LogEventException -import collection.immutable.{ TreeSet, Stack, TreeMap } +import collection.immutable.{ TreeSet, TreeMap } import akka.util.{ Unsafe, Duration, Helpers, NonFatal } //TODO: everything here for current compatibility - could be limited more @@ -173,8 +173,8 @@ trait UntypedActorContext extends ActorContext { * for! (waves hand) */ private[akka] object ActorCell { - val contextStack = new ThreadLocal[Stack[ActorContext]] { - override def initialValue = Stack[ActorContext]() + val contextStack = new ThreadLocal[List[ActorContext]] { + override def initialValue: List[ActorContext] = Nil } final val emptyCancellable: Cancellable = new Cancellable { @@ -184,7 +184,7 @@ private[akka] object ActorCell { final val emptyReceiveTimeoutData: (Long, Cancellable) = (-1, emptyCancellable) - final val emptyBehaviorStack: Stack[Actor.Receive] = Stack.empty + final val emptyBehaviorStack: List[Actor.Receive] = Nil final val emptyActorRefSet: Set[ActorRef] = TreeSet.empty @@ -408,7 +408,7 @@ private[akka] class ActorCell( var currentMessage: Envelope = _ var actor: Actor = _ - private var behaviorStack: Stack[Actor.Receive] = emptyBehaviorStack + private var behaviorStack: List[Actor.Receive] = emptyBehaviorStack @volatile var _mailboxDoNotCallMeDirectly: Mailbox = _ //This must be volatile since it isn't protected by the mailbox status var nextNameSequence: Long = 0 var watching: Set[ActorRef] = emptyActorRefSet @@ -511,7 +511,7 @@ private[akka] class ActorCell( //This method is in charge of setting up the contextStack and create a new instance of the Actor protected def newActor(): Actor = { - contextStack.set(contextStack.get.push(this)) + contextStack.set(this :: contextStack.get) try { behaviorStack = emptyBehaviorStack val instance = props.creator.apply() @@ -520,12 +520,12 @@ private[akka] class ActorCell( throw new ActorInitializationException(self, "Actor instance passed to actorOf can't be 'null'") // If no becomes were issued, the actors behavior is its receive method - behaviorStack = if (behaviorStack.isEmpty) behaviorStack.push(instance.receive) else behaviorStack + behaviorStack = if (behaviorStack.isEmpty) instance.receive :: behaviorStack else behaviorStack instance } finally { val stackAfter = contextStack.get if (stackAfter.nonEmpty) - contextStack.set(if (stackAfter.head eq null) stackAfter.pop.pop else stackAfter.pop) // pop null marker plus our context + contextStack.set(if (stackAfter.head eq null) stackAfter.tail.tail else stackAfter.tail) // pop null marker plus our context } } @@ -680,7 +680,7 @@ private[akka] class ActorCell( } def become(behavior: Actor.Receive, discardOld: Boolean = true): Unit = - behaviorStack = (if (discardOld && behaviorStack.nonEmpty) behaviorStack.pop else behaviorStack).push(behavior) + behaviorStack = behavior :: (if (discardOld && behaviorStack.nonEmpty) behaviorStack.tail else behaviorStack) /** * UntypedActorContext impl @@ -696,8 +696,8 @@ private[akka] class ActorCell( def unbecome(): Unit = { val original = behaviorStack behaviorStack = - if (original.isEmpty || original.pop.isEmpty) emptyBehaviorStack.push(actor.receive) - else original.pop + if (original.isEmpty || original.tail.isEmpty) actor.receive :: emptyBehaviorStack + else original.tail } def autoReceiveMessage(msg: Envelope): Unit = { From d6e3642d9d79c6b80377c68189ba23daaeb63048 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 13 Jun 2012 14:08:47 +0200 Subject: [PATCH 376/538] Removing all uses of immutable.Stack in Akka --- .../src/main/scala/akka/actor/Actor.scala | 1 - .../src/main/scala/akka/actor/ActorSystem.scala | 17 ++++++++--------- .../src/main/scala/akka/actor/Props.scala | 1 - .../main/scala/akka/testkit/TestActorRef.scala | 2 -- 4 files changed, 8 insertions(+), 13 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index cf35d68c8c..8fc7df93e5 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -7,7 +7,6 @@ package akka.actor import akka.AkkaException import scala.reflect.BeanProperty import scala.util.control.NoStackTrace -import scala.collection.immutable.Stack import java.util.regex.Pattern /** diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 721375adda..c874d75afc 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -13,7 +13,6 @@ import java.io.Closeable import akka.dispatch.Await.{ Awaitable, CanAwait } import akka.util._ import akka.util.internal.{ HashedWheelTimer, ConcurrentIdentityHashMap } -import collection.immutable.Stack import java.util.concurrent.{ ThreadFactory, CountDownLatch, TimeoutException, RejectedExecutionException } import java.util.concurrent.TimeUnit.MILLISECONDS @@ -685,8 +684,8 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, final class TerminationCallbacks extends Runnable with Awaitable[Unit] { private val lock = new ReentrantGuard - private var callbacks: Stack[Runnable] = _ //non-volatile since guarded by the lock - lock withGuard { callbacks = Stack.empty[Runnable] } + private var callbacks: List[Runnable] = _ //non-volatile since guarded by the lock + lock withGuard { callbacks = Nil } private val latch = new CountDownLatch(1) @@ -695,17 +694,17 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, case 0 ⇒ throw new RejectedExecutionException("Must be called prior to system shutdown.") case _ ⇒ lock withGuard { if (latch.getCount == 0) throw new RejectedExecutionException("Must be called prior to system shutdown.") - else callbacks = callbacks.push(callback) + else callbacks ::= callback } } } final def run(): Unit = lock withGuard { - @tailrec def runNext(c: Stack[Runnable]): Stack[Runnable] = c.headOption match { - case None ⇒ Stack.empty[Runnable] - case Some(callback) ⇒ - try callback.run() catch { case e ⇒ log.error(e, "Failed to run termination callback, due to [{}]", e.getMessage) } - runNext(c.pop) + @tailrec def runNext(c: List[Runnable]): List[Runnable] = c match { + case Nil ⇒ Nil + case callback :: _ ⇒ + try callback.run() catch { case NonFatal(e) ⇒ log.error(e, "Failed to run termination callback, due to [{}]", e.getMessage) } + runNext(c.tail) } try { callbacks = runNext(callbacks) } finally latch.countDown() } diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index fc01a5ba36..82d97f5465 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -6,7 +6,6 @@ package akka.actor import akka.dispatch._ import akka.japi.Creator -import collection.immutable.Stack import akka.routing._ /** diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index ed151b6b12..f8efe4e2e5 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -5,9 +5,7 @@ package akka.testkit import akka.actor._ -import akka.util.Duration import java.util.concurrent.atomic.AtomicLong -import scala.collection.immutable.Stack import akka.dispatch._ import akka.pattern.ask From 5714d8327f3427997b5ee484efbe42fa6d469b6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Wed, 13 Jun 2012 14:55:33 +0200 Subject: [PATCH 377/538] Make multi node tests use the within() aware barrier --- ...lientDowningNodeThatIsUnreachableSpec.scala | 8 ++++---- .../ClientDowningNodeThatIsUpSpec.scala | 8 ++++---- .../scala/akka/cluster/ConvergenceSpec.scala | 8 ++++---- .../GossipingAccrualFailureDetectorSpec.scala | 4 ++-- ...eaderDowningNodeThatIsUnreachableSpec.scala | 18 +++++++++--------- .../akka/cluster/LeaderElectionSpec.scala | 16 ++++++++-------- .../MembershipChangeListenerExitingSpec.scala | 8 ++++---- .../MembershipChangeListenerJoinSpec.scala | 6 +++--- .../MembershipChangeListenerLeavingSpec.scala | 8 ++++---- .../MembershipChangeListenerUpSpec.scala | 10 +++++----- .../akka/cluster/MultiNodeClusterSpec.scala | 4 ++-- .../scala/akka/cluster/NodeJoinSpec.scala | 2 +- ...eLeavingAndExitingAndBeingRemovedSpec.scala | 4 ++-- .../cluster/NodeLeavingAndExitingSpec.scala | 4 ++-- .../scala/akka/cluster/NodeLeavingSpec.scala | 4 ++-- .../akka/cluster/NodeMembershipSpec.scala | 6 +++--- .../scala/akka/cluster/NodeUpSpec.scala | 8 ++++---- .../akka/cluster/SingletonClusterSpec.scala | 4 ++-- .../scala/akka/cluster/SunnyWeatherSpec.scala | 4 ++-- .../akka/remote/LookupRemoteActorSpec.scala | 2 +- .../scala/akka/remote/NewRemoteActorSpec.scala | 4 ++-- .../router/RandomRoutedRemoteActorSpec.scala | 10 +++++----- .../RoundRobinRoutedRemoteActorSpec.scala | 10 +++++----- .../ScatterGatherRoutedRemoteActorSpec.scala | 10 +++++----- .../testconductor/TestConductorSpec.scala | 10 +++++----- 25 files changed, 90 insertions(+), 90 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index 343f0c7c17..e86b026bfd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -47,23 +47,23 @@ abstract class ClientDowningNodeThatIsUnreachableSpec // mark 'third' node as DOWN cluster.down(thirdAddress) - testConductor.enter("down-third-node") + enter("down-third-node") awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) cluster.latestGossip.members.exists(_.address == thirdAddress) must be(false) } runOn(third) { - testConductor.enter("down-third-node") + enter("down-third-node") } runOn(second, fourth) { - testConductor.enter("down-third-node") + enter("down-third-node") awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) } - testConductor.enter("await-completion") + enter("await-completion") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index 95eeefd982..228f5b6d98 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -43,7 +43,7 @@ abstract class ClientDowningNodeThatIsUpSpec runOn(first) { // mark 'third' node as DOWN cluster.down(thirdAddress) - testConductor.enter("down-third-node") + enter("down-third-node") markNodeAsUnavailable(thirdAddress) @@ -52,16 +52,16 @@ abstract class ClientDowningNodeThatIsUpSpec } runOn(third) { - testConductor.enter("down-third-node") + enter("down-third-node") } runOn(second, fourth) { - testConductor.enter("down-third-node") + enter("down-third-node") awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) } - testConductor.enter("await-completion") + enter("await-completion") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index bdc0a1ae8b..2e496c9b2c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -46,12 +46,12 @@ abstract class ConvergenceSpec // doesn't join immediately } - testConductor.enter("after-1") + enter("after-1") } "not reach convergence while any nodes are unreachable" taggedAs LongRunningTest ignore { val thirdAddress = node(third).address - testConductor.enter("before-shutdown") + enter("before-shutdown") runOn(first) { // kill 'third' node @@ -78,7 +78,7 @@ abstract class ConvergenceSpec } } - testConductor.enter("after-2") + enter("after-2") } "not move a new joining node to Up while there is no convergence" taggedAs LongRunningTest ignore { @@ -116,7 +116,7 @@ abstract class ConvergenceSpec } } - testConductor.enter("after-3") + enter("after-3") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index b14c0d927c..d66fb95692 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -43,7 +43,7 @@ abstract class GossipingAccrualFailureDetectorSpec cluster.failureDetector.isAvailable(secondAddress) must be(true) cluster.failureDetector.isAvailable(thirdAddress) must be(true) - testConductor.enter("after-1") + enter("after-1") } "mark node as 'unavailable' if a node in the cluster is shut down (and its heartbeats stops)" taggedAs LongRunningTest in { @@ -59,7 +59,7 @@ abstract class GossipingAccrualFailureDetectorSpec cluster.failureDetector.isAvailable(secondAddress) must be(true) } - testConductor.enter("after-2") + enter("after-2") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index 5e2545394d..9953a4c61f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -46,7 +46,7 @@ abstract class LeaderDowningNodeThatIsUnreachableSpec runOn(first) { // kill 'fourth' node testConductor.shutdown(fourth, 0) - testConductor.enter("down-fourth-node") + enter("down-fourth-node") // mark the node as unreachable in the failure detector markNodeAsUnavailable(fourthAddress) @@ -57,26 +57,26 @@ abstract class LeaderDowningNodeThatIsUnreachableSpec } runOn(fourth) { - testConductor.enter("down-fourth-node") + enter("down-fourth-node") } runOn(second, third) { - testConductor.enter("down-fourth-node") + enter("down-fourth-node") awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(fourthAddress), 30.seconds) } - testConductor.enter("await-completion-1") + enter("await-completion-1") } "be able to DOWN a 'middle' node that is UNREACHABLE" taggedAs LongRunningTest in { val secondAddress = node(second).address - testConductor.enter("before-down-second-node") + enter("before-down-second-node") runOn(first) { // kill 'second' node testConductor.shutdown(second, 0) - testConductor.enter("down-second-node") + enter("down-second-node") // mark the node as unreachable in the failure detector markNodeAsUnavailable(secondAddress) @@ -87,16 +87,16 @@ abstract class LeaderDowningNodeThatIsUnreachableSpec } runOn(second) { - testConductor.enter("down-second-node") + enter("down-second-node") } runOn(third) { - testConductor.enter("down-second-node") + enter("down-second-node") awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Seq(secondAddress), 30 seconds) } - testConductor.enter("await-completion-2") + enter("await-completion-2") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index e161206ba0..28a684eb7b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -50,7 +50,7 @@ abstract class LeaderElectionSpec assertLeaderIn(sortedRoles) } - testConductor.enter("after") + enter("after") } def shutdownLeaderAndVerifyNewLeader(alreadyShutdown: Int): Unit = { @@ -64,33 +64,33 @@ abstract class LeaderElectionSpec case `controller` ⇒ val leaderAddress = node(leader).address - testConductor.enter("before-shutdown") + enter("before-shutdown") testConductor.shutdown(leader, 0) - testConductor.enter("after-shutdown", "after-down", "completed") + enter("after-shutdown", "after-down", "completed") markNodeAsUnavailable(leaderAddress) case `leader` ⇒ - testConductor.enter("before-shutdown", "after-shutdown") + enter("before-shutdown", "after-shutdown") // this node will be shutdown by the controller and doesn't participate in more barriers case `aUser` ⇒ val leaderAddress = node(leader).address - testConductor.enter("before-shutdown", "after-shutdown") + enter("before-shutdown", "after-shutdown") // user marks the shutdown leader as DOWN cluster.down(leaderAddress) - testConductor.enter("after-down", "completed") + enter("after-down", "completed") markNodeAsUnavailable(leaderAddress) case _ if remainingRoles.contains(myself) ⇒ // remaining cluster nodes, not shutdown - testConductor.enter("before-shutdown", "after-shutdown", "after-down") + enter("before-shutdown", "after-shutdown", "after-down") awaitUpConvergence(currentRoles.size - 1) val nextExpectedLeader = remainingRoles.head cluster.isLeader must be(myself == nextExpectedLeader) assertLeaderIn(remainingRoles) - testConductor.enter("completed") + enter("completed") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index d9b2c7b876..f8ad009bc2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -47,12 +47,12 @@ abstract class MembershipChangeListenerExitingSpec awaitClusterUp(first, second, third) runOn(first) { - testConductor.enter("registered-listener") + enter("registered-listener") cluster.leave(secondAddress) } runOn(second) { - testConductor.enter("registered-listener") + enter("registered-listener") } runOn(third) { @@ -63,11 +63,11 @@ abstract class MembershipChangeListenerExitingSpec exitingLatch.countDown() } }) - testConductor.enter("registered-listener") + enter("registered-listener") exitingLatch.await } - testConductor.enter("finished") + enter("finished") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index 2809ae820b..a163097959 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -44,18 +44,18 @@ abstract class MembershipChangeListenerJoinSpec joinLatch.countDown() } }) - testConductor.enter("registered-listener") + enter("registered-listener") joinLatch.await cluster.convergence.isDefined must be(true) } runOn(second) { - testConductor.enter("registered-listener") + enter("registered-listener") cluster.join(firstAddress) } - testConductor.enter("after") + enter("after") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala index 57cec4f389..20465b5cf0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -44,12 +44,12 @@ abstract class MembershipChangeListenerLeavingSpec awaitClusterUp(first, second, third) runOn(first) { - testConductor.enter("registered-listener") + enter("registered-listener") cluster.leave(secondAddress) } runOn(second) { - testConductor.enter("registered-listener") + enter("registered-listener") } runOn(third) { @@ -60,11 +60,11 @@ abstract class MembershipChangeListenerLeavingSpec latch.countDown() } }) - testConductor.enter("registered-listener") + enter("registered-listener") latch.await } - testConductor.enter("finished") + enter("finished") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala index c89bbe1f0a..9ccf1ba960 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -44,16 +44,16 @@ abstract class MembershipChangeListenerUpSpec latch.countDown() } }) - testConductor.enter("listener-1-registered") + enter("listener-1-registered") cluster.join(firstAddress) latch.await } runOn(third) { - testConductor.enter("listener-1-registered") + enter("listener-1-registered") } - testConductor.enter("after-1") + enter("after-1") } "(when three nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { @@ -65,7 +65,7 @@ abstract class MembershipChangeListenerUpSpec latch.countDown() } }) - testConductor.enter("listener-2-registered") + enter("listener-2-registered") runOn(third) { cluster.join(firstAddress) @@ -73,7 +73,7 @@ abstract class MembershipChangeListenerUpSpec latch.await - testConductor.enter("after-2") + enter("after-2") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index b4532f7efc..d67dedc34f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -71,14 +71,14 @@ trait MultiNodeClusterSpec extends FailureDetectorStrategy { self: MultiNodeSpec // make sure that the node-to-join is started before other join startClusterNode() } - testConductor.enter(roles.head.name + "-started") + enter(roles.head.name + "-started") if (roles.tail.contains(myself)) { cluster.join(node(roles.head).address) } if (upConvergence && roles.contains(myself)) { awaitUpConvergence(numberOfMembers = roles.length) } - testConductor.enter(roles.map(_.name).mkString("-") + "-joined") + enter(roles.map(_.name).mkString("-") + "-joined") } /** diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala index 6cf5fc220d..4a93655fef 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala @@ -45,7 +45,7 @@ abstract class NodeJoinSpec awaitCond(cluster.latestGossip.members.exists { member ⇒ member.address == secondAddress && member.status == MemberStatus.Joining }) - testConductor.enter("after") + enter("after") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index 01e5f8aa74..d7cf74af75 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -44,7 +44,7 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec runOn(first) { cluster.leave(secondAddress) } - testConductor.enter("second-left") + enter("second-left") runOn(first, third) { // verify that the 'second' node is no longer part of the 'members' set @@ -59,7 +59,7 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec isRemoved.get.address must be(secondAddress) } - testConductor.enter("finished") + enter("finished") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 6378a74040..be28235c33 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -50,7 +50,7 @@ abstract class NodeLeavingAndExitingSpec runOn(first) { cluster.leave(secondAddress) } - testConductor.enter("second-left") + enter("second-left") runOn(first, third) { @@ -69,7 +69,7 @@ abstract class NodeLeavingAndExitingSpec hasExited.get.address must be(secondAddress) } - testConductor.enter("finished") + enter("finished") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index 8ea21e9380..5c5ffb16e0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -44,7 +44,7 @@ abstract class NodeLeavingSpec runOn(first) { cluster.leave(secondAddress) } - testConductor.enter("second-left") + enter("second-left") runOn(first, third) { awaitCond(cluster.latestGossip.members.exists(_.status == MemberStatus.Leaving)) @@ -54,7 +54,7 @@ abstract class NodeLeavingSpec hasLeft.get.address must be(secondAddress) } - testConductor.enter("finished") + enter("finished") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index fb0573f77f..350e43a54b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -38,7 +38,7 @@ abstract class NodeMembershipSpec runOn(first) { startClusterNode() } - testConductor.enter("first-started") + enter("first-started") runOn(first, second) { cluster.join(firstAddress) @@ -50,7 +50,7 @@ abstract class NodeMembershipSpec awaitCond(cluster.convergence.isDefined) } - testConductor.enter("after-1") + enter("after-1") } "(when three nodes) start gossiping to each other so that all nodes gets the same gossip info" taggedAs LongRunningTest in { @@ -66,7 +66,7 @@ abstract class NodeMembershipSpec } awaitCond(cluster.convergence.isDefined) - testConductor.enter("after-2") + enter("after-2") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index 0fdc3c89b8..2e4691b1a4 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -33,7 +33,7 @@ abstract class NodeUpSpec awaitClusterUp(first, second) - testConductor.enter("after-1") + enter("after-1") } "be unaffected when joining again" taggedAs LongRunningTest in { @@ -45,12 +45,12 @@ abstract class NodeUpSpec unexpected.set(members) } }) - testConductor.enter("listener-registered") + enter("listener-registered") runOn(second) { cluster.join(node(first).address) } - testConductor.enter("joined-again") + enter("joined-again") // let it run for a while to make sure that nothing bad happens for (n ← 1 to 20) { @@ -59,7 +59,7 @@ abstract class NodeUpSpec cluster.latestGossip.members.forall(_.status == MemberStatus.Up) must be(true) } - testConductor.enter("after-2") + enter("after-2") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala index cada29e210..9137abbb1a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala @@ -43,7 +43,7 @@ abstract class SingletonClusterSpec cluster.isSingletonCluster must be(false) assertLeader(first, second) - testConductor.enter("after-1") + enter("after-1") } "become singleton cluster when one node is shutdown" taggedAs LongRunningTest in { @@ -58,7 +58,7 @@ abstract class SingletonClusterSpec assertLeader(first) } - testConductor.enter("after-2") + enter("after-2") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index b8486841c6..5a4699c91f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -62,7 +62,7 @@ abstract class SunnyWeatherSpec }) for (n ← 1 to 30) { - testConductor.enter("period-" + n) + enter("period-" + n) unexpected.get must be(null) awaitUpConvergence(roles.size) assertLeaderIn(roles) @@ -70,7 +70,7 @@ abstract class SunnyWeatherSpec 1.seconds.sleep } - testConductor.enter("after") + enter("after") } } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala index cfbbae67dc..999e152b0f 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala @@ -47,7 +47,7 @@ class LookupRemoteActorSpec extends MultiNodeSpec(LookupRemoteActorMultiJvmSpec) val masterAddress = testConductor.getAddressFor(master).await (hello ? "identify").await.asInstanceOf[ActorRef].path.address must equal(masterAddress) } - testConductor.enter("done") + enter("done") } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala index 5aa79eb775..5b4e19df98 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala @@ -56,7 +56,7 @@ class NewRemoteActorSpec extends MultiNodeSpec(NewRemoteActorMultiJvmSpec) system.stop(actor) } - testConductor.enter("done") + enter("done") } "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef (with deployOnAll)" taggedAs LongRunningTest in { @@ -74,7 +74,7 @@ class NewRemoteActorSpec extends MultiNodeSpec(NewRemoteActorMultiJvmSpec) system.stop(actor) } - testConductor.enter("done") + enter("done") } } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala index 58f230e487..eeb09d6174 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala @@ -55,11 +55,11 @@ class RandomRoutedRemoteActorSpec extends MultiNodeSpec(RandomRoutedRemoteActorM "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" taggedAs LongRunningTest in { runOn(first, second, third) { - testConductor.enter("start", "broadcast-end", "end", "done") + enter("start", "broadcast-end", "end", "done") } runOn(fourth) { - testConductor.enter("start") + enter("start") val actor = system.actorOf(Props[SomeActor].withRouter(RandomRouter()), "service-hello") actor.isInstanceOf[RoutedActorRef] must be(true) @@ -76,17 +76,17 @@ class RandomRoutedRemoteActorSpec extends MultiNodeSpec(RandomRoutedRemoteActorM case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1)) } - testConductor.enter("broadcast-end") + enter("broadcast-end") actor ! Broadcast(PoisonPill) - testConductor.enter("end") + enter("end") replies.values foreach { _ must be > (0) } replies.get(node(fourth).address) must be(None) // shut down the actor before we let the other node(s) shut down so we don't try to send // "Terminate" to a shut down node system.stop(actor) - testConductor.enter("done") + enter("done") } } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala index c72644899e..f69989f41f 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala @@ -55,11 +55,11 @@ class RoundRobinRoutedRemoteActorSpec extends MultiNodeSpec(RoundRobinRoutedRemo "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" taggedAs LongRunningTest in { runOn(first, second, third) { - testConductor.enter("start", "broadcast-end", "end", "done") + enter("start", "broadcast-end", "end", "done") } runOn(fourth) { - testConductor.enter("start") + enter("start") val actor = system.actorOf(Props[SomeActor].withRouter(RoundRobinRouter()), "service-hello") actor.isInstanceOf[RoutedActorRef] must be(true) @@ -76,17 +76,17 @@ class RoundRobinRoutedRemoteActorSpec extends MultiNodeSpec(RoundRobinRoutedRemo case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1)) } - testConductor.enter("broadcast-end") + enter("broadcast-end") actor ! Broadcast(PoisonPill) - testConductor.enter("end") + enter("end") replies.values foreach { _ must be(iterationCount) } replies.get(node(fourth).address) must be(None) // shut down the actor before we let the other node(s) shut down so we don't try to send // "Terminate" to a shut down node system.stop(actor) - testConductor.enter("done") + enter("done") } } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala index 10a007e772..3c18518503 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala @@ -55,11 +55,11 @@ class ScatterGatherRoutedRemoteActorSpec extends MultiNodeSpec(ScatterGatherRout "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" taggedAs LongRunningTest in { runOn(first, second, third) { - testConductor.enter("start", "broadcast-end", "end", "done") + enter("start", "broadcast-end", "end", "done") } runOn(fourth) { - testConductor.enter("start") + enter("start") val actor = system.actorOf(Props[SomeActor].withRouter(ScatterGatherFirstCompletedRouter(within = 10 seconds)), "service-hello") actor.isInstanceOf[RoutedActorRef] must be(true) @@ -76,17 +76,17 @@ class ScatterGatherRoutedRemoteActorSpec extends MultiNodeSpec(ScatterGatherRout case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1)) } - testConductor.enter("broadcast-end") + enter("broadcast-end") actor ! Broadcast(PoisonPill) - testConductor.enter("end") + enter("end") replies.values.sum must be === connectionCount * iterationCount replies.get(node(fourth).address) must be(None) // shut down the actor before we let the other node(s) shut down so we don't try to send // "Terminate" to a shut down node system.stop(actor) - testConductor.enter("done") + enter("done") } } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 624347be69..5898fd458c 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -46,7 +46,7 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec) with Im }), "echo") } - testConductor.enter("name") + enter("name") } "support throttling of network connections" taggedAs LongRunningTest in { @@ -62,7 +62,7 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec) with Im testConductor.throttle(slave, master, Direction.Send, rateMBit = 0.01).await } - testConductor.enter("throttled_send") + enter("throttled_send") runOn(slave) { for (i ← 0 to 9) echo ! i @@ -73,14 +73,14 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec) with Im receiveN(9) must be(1 to 9) } - testConductor.enter("throttled_send2") + enter("throttled_send2") runOn(master) { testConductor.throttle(slave, master, Direction.Send, -1).await testConductor.throttle(slave, master, Direction.Receive, rateMBit = 0.01).await } - testConductor.enter("throttled_recv") + enter("throttled_recv") runOn(slave) { for (i ← 10 to 19) echo ! i @@ -98,7 +98,7 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec) with Im receiveN(9) must be(11 to 19) } - testConductor.enter("throttled_recv2") + enter("throttled_recv2") runOn(master) { testConductor.throttle(slave, master, Direction.Receive, -1).await From 1eb6681b3c9c23d60dce31c17acaf58cafd09643 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 13 Jun 2012 15:11:19 +0200 Subject: [PATCH 378/538] Removing horrid IDEA autogenerated section and adding Typesafe copyright to NonPublicClass.java --- .../src/test/java/akka/actor/NonPublicClass.java | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/akka-actor-tests/src/test/java/akka/actor/NonPublicClass.java b/akka-actor-tests/src/test/java/akka/actor/NonPublicClass.java index 55f3910db7..850d82cd62 100644 --- a/akka-actor-tests/src/test/java/akka/actor/NonPublicClass.java +++ b/akka-actor-tests/src/test/java/akka/actor/NonPublicClass.java @@ -1,14 +1,9 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + package akka.actor; -import com.sun.xml.internal.ws.api.PropertySet; - -/** - * Created by IntelliJ IDEA. - * User: viktorklang - * Date: 6/13/12 - * Time: 12:12 PM - * To change this template use File | Settings | File Templates. - */ public class NonPublicClass { public static Props createProps() { return new Props(MyNonPublicActorClass.class); From 6d114fb3e2a7db4067c8cdfed79cb27d7074e938 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 13 Jun 2012 15:14:51 +0200 Subject: [PATCH 379/538] Review fixes --- akka-actor/src/main/scala/akka/actor/ActorSystem.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index c874d75afc..0c5be77889 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -702,9 +702,9 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, final def run(): Unit = lock withGuard { @tailrec def runNext(c: List[Runnable]): List[Runnable] = c match { case Nil ⇒ Nil - case callback :: _ ⇒ + case callback :: rest ⇒ try callback.run() catch { case NonFatal(e) ⇒ log.error(e, "Failed to run termination callback, due to [{}]", e.getMessage) } - runNext(c.tail) + runNext(rest) } try { callbacks = runNext(callbacks) } finally latch.countDown() } From 6bb1bf679562c60538c84fd14a828c4c2c5140e2 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 13 Jun 2012 15:19:58 +0200 Subject: [PATCH 380/538] Switching to getDeclaredConstructor.newInstance as a fallback --- akka-actor/src/main/scala/akka/actor/Props.scala | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index 91c4ced285..e1d8e3b04c 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -186,9 +186,10 @@ case class Props( * able to optimize serialization. */ private[akka] case class FromClassCreator(clazz: Class[_ <: Actor]) extends Function0[Actor] { - def apply(): Actor = { - val ctor = clazz.getDeclaredConstructor() - ctor.setAccessible(true) - ctor.newInstance() + def apply(): Actor = try clazz.newInstance catch { + case iae: IllegalAccessException ⇒ + val ctor = clazz.getDeclaredConstructor() + ctor.setAccessible(true) + ctor.newInstance() } } From 5b89d25c37fc7836e4082f581c87feedb6f89410 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 13 Jun 2012 15:23:45 +0200 Subject: [PATCH 381/538] Add invariant assertions to Gossip, see #2077 * Add doc about how members are "moved" --- .../src/main/scala/akka/cluster/Cluster.scala | 71 +++++++++++++++---- .../test/scala/akka/cluster/GossipSpec.scala | 39 ++++++---- 2 files changed, 85 insertions(+), 25 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 07712d8ed9..3fecd7524b 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -147,7 +147,13 @@ case class GossipEnvelope(from: Address, gossip: Gossip) extends ClusterMessage * * Can be one of: Joining, Up, Leaving, Exiting and Down. */ -sealed trait MemberStatus extends ClusterMessage +sealed trait MemberStatus extends ClusterMessage { + /** + * Using the same notion for 'unavailable' as 'non-convergence': DOWN and REMOVED. + */ + def isUnavailable: Boolean = this == MemberStatus.Down || this == MemberStatus.Removed +} + object MemberStatus { case object Joining extends MemberStatus case object Up extends MemberStatus @@ -155,11 +161,6 @@ object MemberStatus { case object Exiting extends MemberStatus case object Down extends MemberStatus case object Removed extends MemberStatus - - /** - * Using the same notion for 'unavailable' as 'non-convergence': DOWN and REMOVED. - */ - def isUnavailable(status: MemberStatus): Boolean = status == MemberStatus.Down || status == MemberStatus.Removed } /** @@ -169,8 +170,6 @@ case class GossipOverview( seen: Map[Address, VectorClock] = Map.empty[Address, VectorClock], unreachable: Set[Member] = Set.empty[Member]) { - // FIXME document when nodes are put in 'unreachable' set and removed from 'members' - override def toString = "GossipOverview(seen = [" + seen.mkString(", ") + "], unreachable = [" + unreachable.mkString(", ") + @@ -182,7 +181,31 @@ object Gossip { } /** - * Represents the state of the cluster; cluster ring membership, ring convergence, meta data - all versioned by a vector clock. + * Represents the state of the cluster; cluster ring membership, ring convergence, meta data - + * all versioned by a vector clock. + * + * When a node is joining the Member, with status Joining, is added to `members`. + * If the joining node was downed it is moved from `overview.unreachable` (status Down) + * to `members` (status Joining). It cannot rejoin if not first downed. + * + * When convergence is reached the leader change status of `members` from Joining + * to Up. + * + * When failure detector consider a node as unavailble it will be moved from + * `members` to `overview.unreachable`. + * + * When a node is downed, either manually or automatically, it is moved from `members` + * to `overview.unreachable` (status Down). It is also removed from `overview.seen` + * table. The node will reside as Down in the `overview.unreachable` set until joining + * again and it will then go through the normal joining procedure. + * + * When a Gossip is received the version (vector clock) is used to determine if the + * received Gossip is newer or older than the current local Gossip. The received Gossip + * and local Gossip is merged in case of concurrent vector clocks, i.e. not same history. + * When merged the seen table is cleared. + * + * TODO document leaving, exiting and removed when that is implemented + * */ case class Gossip( overview: GossipOverview = GossipOverview(), @@ -192,6 +215,28 @@ case class Gossip( extends ClusterMessage // is a serializable cluster message with Versioned[Gossip] { + // FIXME can be disabled as optimization + assertInvariants + private def assertInvariants: Unit = { + val unreachableAndLive = members.intersect(overview.unreachable) + if (unreachableAndLive.nonEmpty) + throw new IllegalArgumentException("Same nodes in both members and unreachable is not allowed, got [%s]" + format unreachableAndLive.mkString(", ")) + + val allowedLiveMemberStatuses: Set[MemberStatus] = Set(MemberStatus.Joining, MemberStatus.Up, MemberStatus.Leaving, MemberStatus.Exiting) + def hasNotAllowedLiveMemberStatus(m: Member) = !allowedLiveMemberStatuses.contains(m.status) + if (members exists hasNotAllowedLiveMemberStatus) + throw new IllegalArgumentException("Live members must have status [%s], got [%s]" + format (allowedLiveMemberStatuses.mkString(", "), + (members filter hasNotAllowedLiveMemberStatus).mkString(", "))) + + val seenButNotMember = overview.seen.keySet -- members.map(_.address) -- overview.unreachable.map(_.address) + if (seenButNotMember.nonEmpty) + throw new IllegalArgumentException("Nodes not part of cluster have marked the Gossip as seen, got [%s]" + format seenButNotMember.mkString(", ")) + + } + /** * Increments the version for this 'Node'. */ @@ -223,7 +268,7 @@ case class Gossip( // 2. merge meta-data val mergedMeta = this.meta ++ that.meta - def reduceHighestPriority(a: Seq[Member], b: Seq[Member]): Set[Member] = { + def pickHighestPriority(a: Seq[Member], b: Seq[Member]): Set[Member] = { // group all members by Address => Seq[Member] val groupedByAddress = (a ++ b).groupBy(_.address) // pick highest MemberStatus @@ -233,11 +278,11 @@ case class Gossip( } // 3. merge unreachable by selecting the single Member with highest MemberStatus out of the Member groups - val mergedUnreachable = reduceHighestPriority(this.overview.unreachable.toSeq, that.overview.unreachable.toSeq) + val mergedUnreachable = pickHighestPriority(this.overview.unreachable.toSeq, that.overview.unreachable.toSeq) // 4. merge members by selecting the single Member with highest MemberStatus out of the Member groups, // and exclude unreachable - val mergedMembers = Gossip.emptyMembers ++ reduceHighestPriority(this.members.toSeq, that.members.toSeq). + val mergedMembers = Gossip.emptyMembers ++ pickHighestPriority(this.members.toSeq, that.members.toSeq). filterNot(mergedUnreachable.contains) // 5. fresh seen table @@ -1145,7 +1190,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val localMembers = localGossip.members val localUnreachableMembers = localOverview.unreachable val isUnreachable = localUnreachableMembers exists { _.address == selfAddress } - val hasUnavailableMemberStatus = localMembers exists { m ⇒ (m == self) && MemberStatus.isUnavailable(m.status) } + val hasUnavailableMemberStatus = localMembers exists { m ⇒ (m == self) && m.status.isUnavailable } isUnreachable || hasUnavailableMemberStatus } diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala index 8c790cf159..449ebf7bff 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -22,28 +22,30 @@ class GossipSpec extends WordSpec with MustMatchers { val c2 = Member(Address("akka", "sys", "c", 2552), Up) val d1 = Member(Address("akka", "sys", "d", 2552), Leaving) val d2 = Member(Address("akka", "sys", "d", 2552), Removed) + val e1 = Member(Address("akka", "sys", "e", 2552), Joining) + val e2 = Member(Address("akka", "sys", "e", 2552), Up) "A Gossip" must { "merge members by status priority" in { - val g1 = Gossip(members = SortedSet(a1, b1, c1, d1)) - val g2 = Gossip(members = SortedSet(a2, b2, c2, d2)) + val g1 = Gossip(members = SortedSet(a1, c1, e1)) + val g2 = Gossip(members = SortedSet(a2, c2, e2)) val merged1 = g1 merge g2 - merged1.members must be(SortedSet(a1, b2, c1, d2)) - merged1.members.toSeq.map(_.status) must be(Seq(Up, Removed, Leaving, Removed)) + merged1.members must be(SortedSet(a1, c1, e2)) + merged1.members.toSeq.map(_.status) must be(Seq(Up, Leaving, Up)) val merged2 = g2 merge g1 - merged2.members must be(SortedSet(a1, b2, c1, d2)) - merged2.members.toSeq.map(_.status) must be(Seq(Up, Removed, Leaving, Removed)) + merged2.members must be(SortedSet(a1, c1, e2)) + merged2.members.toSeq.map(_.status) must be(Seq(Up, Leaving, Up)) } "merge unreachable by status priority" in { - val g1 = Gossip(members = Gossip.emptyMembers, overview = GossipOverview(unreachable = SortedSet(a1, b1, c1, d1))) - val g2 = Gossip(members = Gossip.emptyMembers, overview = GossipOverview(unreachable = SortedSet(a2, b2, c2, d2))) + val g1 = Gossip(members = Gossip.emptyMembers, overview = GossipOverview(unreachable = Set(a1, b1, c1, d1))) + val g2 = Gossip(members = Gossip.emptyMembers, overview = GossipOverview(unreachable = Set(a2, b2, c2, d2))) val merged1 = g1 merge g2 merged1.overview.unreachable must be(Set(a1, b2, c1, d2)) @@ -56,8 +58,8 @@ class GossipSpec extends WordSpec with MustMatchers { } "merge by excluding unreachable from members" in { - val g1 = Gossip(members = SortedSet(a1, b1), overview = GossipOverview(unreachable = SortedSet(c1, d1))) - val g2 = Gossip(members = SortedSet(a2, c2), overview = GossipOverview(unreachable = SortedSet(b2, d2))) + val g1 = Gossip(members = SortedSet(a1, b1), overview = GossipOverview(unreachable = Set(c1, d1))) + val g2 = Gossip(members = SortedSet(a2, c2), overview = GossipOverview(unreachable = Set(b2, d2))) val merged1 = g1 merge g2 merged1.members must be(SortedSet(a1)) @@ -74,8 +76,8 @@ class GossipSpec extends WordSpec with MustMatchers { } "start with fresh seen table after merge" in { - val g1 = Gossip(members = SortedSet(a1, b1, c1, d1)).seen(a1.address).seen(b1.address) - val g2 = Gossip(members = SortedSet(a2, b2, c2, d2)).seen(b2.address).seen(c2.address) + val g1 = Gossip(members = SortedSet(a1, e1)).seen(a1.address).seen(a1.address) + val g2 = Gossip(members = SortedSet(a2, e2)).seen(e2.address).seen(e2.address) val merged1 = g1 merge g2 merged1.overview.seen.isEmpty must be(true) @@ -85,5 +87,18 @@ class GossipSpec extends WordSpec with MustMatchers { } + "not have node in both members and unreachable" in intercept[IllegalArgumentException] { + Gossip(members = SortedSet(a1, b1), overview = GossipOverview(unreachable = Set(b2))) + } + + "not have live members with wrong status" in intercept[IllegalArgumentException] { + // b2 is Removed + Gossip(members = SortedSet(a2, b2)) + } + + "not have non cluster members in seen table" in intercept[IllegalArgumentException] { + Gossip(members = SortedSet(a1, e1)).seen(a1.address).seen(e1.address).seen(b1.address) + } + } } From afbeb3e5f91512c8b05d88ba8f1cb5a871ee1537 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 13 Jun 2012 15:33:38 +0200 Subject: [PATCH 382/538] import MemberStatus._ --- .../src/main/scala/akka/cluster/Cluster.scala | 43 +++++++++---------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 3fecd7524b..48035a0e4e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -26,6 +26,7 @@ import scala.annotation.tailrec import com.google.protobuf.ByteString import akka.util.internal.HashedWheelTimer import akka.dispatch.MonitorableThreadFactory +import MemberStatus._ /** * Interface for membership change listener. @@ -96,7 +97,6 @@ class Member(val address: Address, val status: MemberStatus) extends ClusterMess * Factory and Utility module for Member instances. */ object Member { - import MemberStatus._ /** * Sort Address by host and port @@ -151,7 +151,7 @@ sealed trait MemberStatus extends ClusterMessage { /** * Using the same notion for 'unavailable' as 'non-convergence': DOWN and REMOVED. */ - def isUnavailable: Boolean = this == MemberStatus.Down || this == MemberStatus.Removed + def isUnavailable: Boolean = this == Down || this == Removed } object MemberStatus { @@ -223,7 +223,7 @@ case class Gossip( throw new IllegalArgumentException("Same nodes in both members and unreachable is not allowed, got [%s]" format unreachableAndLive.mkString(", ")) - val allowedLiveMemberStatuses: Set[MemberStatus] = Set(MemberStatus.Joining, MemberStatus.Up, MemberStatus.Leaving, MemberStatus.Exiting) + val allowedLiveMemberStatuses: Set[MemberStatus] = Set(Joining, Up, Leaving, Exiting) def hasNotAllowedLiveMemberStatus(m: Member) = !allowedLiveMemberStatuses.contains(m.status) if (members exists hasNotAllowedLiveMemberStatus) throw new IllegalArgumentException("Live members must have status [%s], got [%s]" @@ -473,7 +473,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } private val state = { - val member = Member(selfAddress, MemberStatus.Joining) + val member = Member(selfAddress, Joining) val versionedGossip = Gossip(members = Gossip.emptyMembers + member) + vclockNode // add me as member and update my vector clock val seenVersionedGossip = versionedGossip seen selfAddress new AtomicReference[State](State(seenVersionedGossip)) @@ -702,7 +702,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val alreadyMember = localMembers.exists(_.address == node) val isUnreachable = localUnreachable.exists { m ⇒ - m.address == node && m.status != MemberStatus.Down && m.status != MemberStatus.Removed + m.address == node && m.status != Down && m.status != Removed } if (!alreadyMember && !isUnreachable) { @@ -711,7 +711,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val newUnreachableMembers = localUnreachable filterNot { _.address == node } val newOverview = localGossip.overview copy (unreachable = newUnreachableMembers) - val newMembers = localMembers + Member(node, MemberStatus.Joining) // add joining node as Joining + val newMembers = localMembers + Member(node, Joining) // add joining node as Joining val newGossip = localGossip copy (overview = newOverview, members = newMembers) val versionedGossip = newGossip + vclockNode @@ -739,7 +739,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val localGossip = localState.latestGossip val localMembers = localGossip.members - val newMembers = localMembers + Member(address, MemberStatus.Leaving) // mark node as LEAVING + val newMembers = localMembers + Member(address, Leaving) // mark node as LEAVING val newGossip = localGossip copy (members = newMembers) val versionedGossip = newGossip + vclockNode @@ -792,7 +792,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // 1. check if the node to DOWN is in the 'members' set val downedMember: Option[Member] = localMembers.collectFirst { - case m if m.address == address ⇒ m.copy(status = MemberStatus.Down) + case m if m.address == address ⇒ m.copy(status = Down) } val newMembers = downedMember match { case Some(m) ⇒ @@ -805,9 +805,9 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val newUnreachableMembers = localUnreachableMembers.map { member ⇒ // no need to DOWN members already DOWN - if (member.address == address && member.status != MemberStatus.Down) { + if (member.address == address && member.status != Down) { log.info("Cluster Node [{}] - Marking unreachable node [{}] as DOWN", selfAddress, member.address) - member copy (status = MemberStatus.Down) + member copy (status = Down) } else member } @@ -816,7 +816,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // 4. remove nodes marked as DOWN from the 'seen' table val newSeen = localSeen -- newUnreachablePlusNewlyDownedMembers.collect { - case m if m.status == MemberStatus.Down ⇒ m.address + case m if m.status == Down ⇒ m.address } // update gossip overview @@ -1073,30 +1073,30 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // ---------------------- // 1. Move JOINING => UP (once all nodes have seen that this node is JOINING e.g. we have a convergence) // ---------------------- - if (member.status == MemberStatus.Joining) { + if (member.status == Joining) { log.info("Cluster Node [{}] - Leader is moving node [{}] from JOINING to UP", selfAddress, member.address) hasChangedState = true - member copy (status = MemberStatus.Up) + member copy (status = Up) } else member } map { member ⇒ // ---------------------- // 2. Move EXITING => REMOVED (once all nodes have seen that this node is EXITING e.g. we have a convergence) // ---------------------- - if (member.status == MemberStatus.Exiting) { + if (member.status == Exiting) { log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED", selfAddress, member.address) hasChangedState = true - member copy (status = MemberStatus.Removed) + member copy (status = Removed) } else member } map { member ⇒ // ---------------------- // 3. Move LEAVING => EXITING (once we have a convergence on LEAVING *and* if we have a successful partition handoff) // ---------------------- - if (member.status == MemberStatus.Leaving && hasPartionHandoffCompletedSuccessfully(localGossip)) { + if (member.status == Leaving && hasPartionHandoffCompletedSuccessfully(localGossip)) { log.info("Cluster Node [{}] - Leader is moving node [{}] from LEAVING to EXITING", selfAddress, member.address) hasChangedState = true - member copy (status = MemberStatus.Exiting) + member copy (status = Exiting) } else member } @@ -1112,17 +1112,17 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val newUnreachableMembers = localUnreachableMembers.map { member ⇒ // no need to DOWN members already DOWN - if (member.status == MemberStatus.Down) member + if (member.status == Down) member else { log.info("Cluster Node [{}] - Leader is marking unreachable node [{}] as DOWN", selfAddress, member.address) hasChangedState = true - member copy (status = MemberStatus.Down) + member copy (status = Down) } } // removing nodes marked as DOWN from the 'seen' table val newSeen = localSeen -- newUnreachableMembers.collect { - case m if m.status == MemberStatus.Down ⇒ m.address + case m if m.status == Down ⇒ m.address } val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachableMembers) // update gossip overview @@ -1169,8 +1169,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // Else we can't continue to check for convergence // When that is done we check that all the entries in the 'seen' table have the same vector clock version if (unreachable.isEmpty || !unreachable.exists { m ⇒ - m.status != MemberStatus.Down && - m.status != MemberStatus.Removed + m.status != Down && m.status != Removed }) { val seen = gossip.overview.seen val views = Set.empty[VectorClock] ++ seen.values From 82645ca3c9b7ad97189b007148be1a788e41c0af Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 13 Jun 2012 16:06:34 +0200 Subject: [PATCH 383/538] Additional check of expectedAddresses in listener tests --- .../akka/cluster/MembershipChangeListenerJoinSpec.scala | 3 ++- .../akka/cluster/MembershipChangeListenerLeavingSpec.scala | 4 +++- .../akka/cluster/MembershipChangeListenerUpSpec.scala | 7 +++++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index 1b296c58f1..536fb3b58d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -38,9 +38,10 @@ abstract class MembershipChangeListenerJoinSpec runOn(first) { val joinLatch = TestLatch() + val expectedAddresses = Set(firstAddress, secondAddress) cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { - if (members.size == 2 && members.exists(_.status == MemberStatus.Joining)) + if (members.map(_.address) == expectedAddresses && members.exists(_.status == MemberStatus.Joining)) joinLatch.countDown() } }) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala index 57cec4f389..eda29ea0f0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -54,9 +54,11 @@ abstract class MembershipChangeListenerLeavingSpec runOn(third) { val latch = TestLatch() + val expectedAddresses = Set(firstAddress, secondAddress, thirdAddress) cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { - if (members.size == 3 && members.exists(m ⇒ m.address == secondAddress && m.status == MemberStatus.Leaving)) + if (members.map(_.address) == expectedAddresses && + members.exists(m ⇒ m.address == secondAddress && m.status == MemberStatus.Leaving)) latch.countDown() } }) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala index c89bbe1f0a..f48f9c8d9b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -29,6 +29,7 @@ abstract class MembershipChangeListenerUpSpec lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address + lazy val thirdAddress = node(third).address "A set of connected cluster systems" must { @@ -38,9 +39,10 @@ abstract class MembershipChangeListenerUpSpec runOn(first, second) { val latch = TestLatch() + val expectedAddresses = Set(firstAddress, secondAddress) cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { - if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) + if (members.map(_.address) == expectedAddresses && members.forall(_.status == MemberStatus.Up)) latch.countDown() } }) @@ -59,9 +61,10 @@ abstract class MembershipChangeListenerUpSpec "(when three nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { val latch = TestLatch() + val expectedAddresses = Set(firstAddress, secondAddress, thirdAddress) cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { - if (members.size == 3 && members.forall(_.status == MemberStatus.Up)) + if (members.map(_.address) == expectedAddresses && members.forall(_.status == MemberStatus.Up)) latch.countDown() } }) From bd7bdff2697fab82ef58a310c04b98f176ac4115 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 13 Jun 2012 16:13:49 +0200 Subject: [PATCH 384/538] Improve debug log message of no convergence, see #2222 --- akka-cluster/src/main/scala/akka/cluster/Cluster.scala | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 5bc968920a..a904075d5e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -1152,7 +1152,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val allMembersInSeen = gossip.members.forall(m ⇒ seen.contains(m.address)) if (hasUnreachable) { - log.debug("Cluster Node [{}] - No cluster convergence, due to unreachable [{}].", selfAddress, unreachable) + log.debug("Cluster Node [{}] - No cluster convergence, due to unreachable nodes [{}].", selfAddress, unreachable) None } else if (!allMembersInSeen) { log.debug("Cluster Node [{}] - No cluster convergence, due to members not in seen table [{}].", selfAddress, @@ -1160,13 +1160,14 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) None } else { - val views = (Set.empty[VectorClock] ++ seen.values).size + val views = seen.values.toSet.size if (views == 1) { log.debug("Cluster Node [{}] - Cluster convergence reached: [{}]", selfAddress, gossip.members.mkString(", ")) Some(gossip) } else { - log.debug("Cluster Node [{}] - No cluster convergence, due to [{}] different views.", selfAddress, views) + log.debug("Cluster Node [{}] - No cluster convergence, since not all nodes have seen the same state yet. [{} of {}]", + selfAddress, views, seen.values.size) None } } From 391e63332908a7a4b06970592094fbb54072b23c Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 13 Jun 2012 16:54:21 +0200 Subject: [PATCH 385/538] Improve docs based on feedback, see #2077 --- akka-cluster/src/main/scala/akka/cluster/Cluster.scala | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 48035a0e4e..c3e16de5e5 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -194,15 +194,15 @@ object Gossip { * When failure detector consider a node as unavailble it will be moved from * `members` to `overview.unreachable`. * - * When a node is downed, either manually or automatically, it is moved from `members` - * to `overview.unreachable` (status Down). It is also removed from `overview.seen` - * table. The node will reside as Down in the `overview.unreachable` set until joining + * When a node is downed, either manually or automatically, its status is changed to Down. + * It is also removed from `overview.seen` table. + * The node will reside as Down in the `overview.unreachable` set until joining * again and it will then go through the normal joining procedure. * * When a Gossip is received the version (vector clock) is used to determine if the * received Gossip is newer or older than the current local Gossip. The received Gossip - * and local Gossip is merged in case of concurrent vector clocks, i.e. not same history. - * When merged the seen table is cleared. + * and local Gossip is merged in case of conflicting version, i.e. vector clocks without + * same history. When merged the seen table is cleared. * * TODO document leaving, exiting and removed when that is implemented * From b60210362e089a4df97d18ded0bfc693bf800a7c Mon Sep 17 00:00:00 2001 From: Roland Date: Wed, 13 Jun 2012 17:57:56 +0200 Subject: [PATCH 386/538] make system.actorOf() non-blocking (and working), see #2031 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - introducing RepointableActorRef, which starts out with an UnstartedActorCell which can cheaply be created; the Supervise() message will trigger child.activate() in the supervisor, which means that the actual creation (now with normal ActorCell) happens exactly in the right place and with the right semantics. Messages which were enqueued to the dummy cell are transferred atomically into the ActorCell (using normal .tell()), so message sends keep working exactly as they used to - this enables getting rid of the brittle synchronization around RoutedActorRef by replacing that one with a RepointableActorRef subclass which creates RoutedActorCells upon activate(), with the nice benefit that there is no hurry then to get it right because the new cell is constructed “on the side” misc fixes: - InvalidMessageException is now actually enforced when trying to send “null” - Mailboxes may be created without having an ActorCell, which can come in handy later, because the cell is only needed when this mailbox is going to be scheduled on some executor - remove occurrences of Props(), which is equivalent to Props[Nothing], which is equivalent to «bug» - add test case which verifies that context.actorOf is still synchronous - plus all the stuff I have forgotten. --- .../test/scala/akka/actor/ActorRefSpec.scala | 14 +- .../scala/akka/actor/ActorSystemSpec.scala | 12 +- .../test/scala/akka/actor/FSMTimingSpec.scala | 8 +- .../akka/actor/dispatch/ActorModelSpec.scala | 42 ++-- .../dispatch/BalancingDispatcherSpec.scala | 22 +- .../akka/dispatch/MailboxConfigSpec.scala | 39 ++-- .../dispatch/PriorityDispatcherSpec.scala | 16 +- .../routing/ConfiguredLocalRoutingSpec.scala | 21 +- .../test/scala/akka/routing/RoutingSpec.scala | 19 +- .../java/akka/actor/AbstractActorCell.java | 2 + .../java/akka/actor/AbstractActorRef.java | 19 ++ .../src/main/scala/akka/actor/Actor.scala | 3 +- .../src/main/scala/akka/actor/ActorCell.scala | 158 ++++++++++++-- .../src/main/scala/akka/actor/ActorRef.scala | 32 +-- .../scala/akka/actor/ActorRefProvider.scala | 18 +- .../main/scala/akka/actor/ActorSystem.scala | 40 +++- .../akka/actor/RepointableActorRef.scala | 201 ++++++++++++++++++ .../main/scala/akka/actor/TypedActor.scala | 2 +- .../akka/dispatch/AbstractDispatcher.scala | 13 +- .../akka/dispatch/BalancingDispatcher.scala | 12 +- .../main/scala/akka/dispatch/Dispatcher.scala | 3 +- .../main/scala/akka/dispatch/Mailbox.scala | 60 ++++-- .../src/main/scala/akka/routing/Routing.scala | 177 ++++++++------- .../src/main/scala/akka/agent/Agent.scala | 2 +- .../actor/mailbox/DurableMailboxDocSpec.scala | 14 +- .../docs/dispatcher/DispatcherDocSpec.scala | 4 +- .../akka/actor/mailbox/FileBasedMailbox.scala | 14 +- .../akka/actor/mailbox/DurableMailbox.scala | 9 +- .../actor/mailbox/DurableMailboxSpec.scala | 50 +++-- .../akka/remote/RemoteActorRefProvider.scala | 8 +- .../main/scala/akka/remote/RemoteDaemon.scala | 12 +- .../scala/akka/remote/RemoteTransport.scala | 6 +- .../akka/routing/RemoteRouterConfig.scala | 3 +- .../akka/remote/RemoteCommunicationSpec.scala | 2 +- .../testkit/CallingThreadDispatcher.scala | 16 +- .../scala/akka/testkit/TestActorRef.scala | 2 +- .../src/main/scala/akka/testkit/TestKit.scala | 7 +- 37 files changed, 772 insertions(+), 310 deletions(-) create mode 100644 akka-actor/src/main/java/akka/actor/AbstractActorRef.java create mode 100644 akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala index bec066d97a..ec2d915821 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala @@ -227,7 +227,7 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { contextStackMustBeEmpty } - filterException[java.lang.IllegalStateException] { + EventFilter[ActorInitializationException](occurrences = 1) intercept { (intercept[java.lang.IllegalStateException] { wrap(result ⇒ actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept({ throw new IllegalStateException("Ur state be b0rked"); new InnerActor })(result))))))) @@ -257,14 +257,14 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { val in = new ObjectInputStream(new ByteArrayInputStream(bytes)) val readA = in.readObject - a.isInstanceOf[LocalActorRef] must be === true - readA.isInstanceOf[LocalActorRef] must be === true + a.isInstanceOf[ActorRefWithCell] must be === true + readA.isInstanceOf[ActorRefWithCell] must be === true (readA eq a) must be === true } val ser = new JavaSerializer(esys) val readA = ser.fromBinary(bytes, None) - readA.isInstanceOf[LocalActorRef] must be === true + readA.isInstanceOf[ActorRefWithCell] must be === true (readA eq a) must be === true } @@ -362,13 +362,13 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { val timeout = Timeout(20000) val ref = system.actorOf(Props(new Actor { def receive = { - case 5 ⇒ sender.tell("five") - case null ⇒ sender.tell("null") + case 5 ⇒ sender.tell("five") + case 0 ⇒ sender.tell("null") } })) val ffive = (ref.ask(5)(timeout)).mapTo[String] - val fnull = (ref.ask(null)(timeout)).mapTo[String] + val fnull = (ref.ask(0)(timeout)).mapTo[String] ref ! PoisonPill Await.result(ffive, timeout.duration) must be("five") diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala index b9540fbf33..1a2d64bb41 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala @@ -165,17 +165,21 @@ class ActorSystemSpec extends AkkaSpec("""akka.extensions = ["akka.actor.TestExt system.scheduler.scheduleOnce(200 millis) { system.shutdown() } var failing = false var created = Vector.empty[ActorRef] - while (!system.isTerminated) { + while (!system.isTerminated && system.uptime < 5) { try { val t = system.actorOf(Props[ActorSystemSpec.Terminater]) failing must not be true // because once failing => always failing (it’s due to shutdown) created :+= t } catch { - case e: Exception ⇒ failing = true + case _: IllegalStateException ⇒ failing = true } } - println(created.last) - created filter (!_.isTerminated) must be(Seq()) + if (system.uptime >= 5) { + println(created.last) + println(system.asInstanceOf[ExtendedActorSystem].printTree) + system.uptime must be < 5L + } + created filter (ref ⇒ !ref.isTerminated && !ref.asInstanceOf[ActorRefWithCell].underlying.isInstanceOf[UnstartedCell]) must be(Seq()) } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala index df47c801bb..76d8df1e92 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala @@ -140,13 +140,13 @@ class FSMTimingSpec extends AkkaSpec with ImplicitSender { object FSMTimingSpec { def suspend(actorRef: ActorRef): Unit = actorRef match { - case l: LocalActorRef ⇒ l.suspend() - case _ ⇒ + case l: ActorRefWithCell ⇒ l.suspend() + case _ ⇒ } def resume(actorRef: ActorRef): Unit = actorRef match { - case l: LocalActorRef ⇒ l.resume() - case _ ⇒ + case l: ActorRefWithCell ⇒ l.resume() + case _ ⇒ } trait State diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index 4d83c85b82..00ac2b98a8 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -3,24 +3,23 @@ */ package akka.actor.dispatch -import org.scalatest.Assertions._ -import akka.testkit._ -import akka.dispatch._ -import akka.util.Timeout -import java.util.concurrent.atomic.AtomicLong -import java.util.concurrent.atomic.AtomicInteger -import java.util.concurrent.{ ConcurrentHashMap, CountDownLatch, TimeUnit } -import akka.util.Switch import java.rmi.RemoteException -import org.junit.{ After, Test } -import akka.actor._ -import util.control.NoStackTrace -import akka.actor.ActorSystem -import akka.util.duration._ -import akka.event.Logging.Error +import java.util.concurrent.{ TimeUnit, CountDownLatch, ConcurrentHashMap } +import java.util.concurrent.atomic.{ AtomicLong, AtomicInteger } + +import org.junit.runner.RunWith +import org.scalatest.Assertions.{ fail, assert } +import org.scalatest.junit.JUnitRunner + import com.typesafe.config.Config -import akka.util.Duration + +import akka.actor._ +import akka.dispatch._ +import akka.event.Logging.Error import akka.pattern.ask +import akka.testkit._ +import akka.util.{ Timeout, Switch, Duration } +import akka.util.duration._ object ActorModelSpec { @@ -201,7 +200,7 @@ object ActorModelSpec { msgsReceived: Long = statsFor(actorRef, dispatcher).msgsReceived.get(), msgsProcessed: Long = statsFor(actorRef, dispatcher).msgsProcessed.get(), restarts: Long = statsFor(actorRef, dispatcher).restarts.get())(implicit system: ActorSystem) { - val stats = statsFor(actorRef, Option(dispatcher).getOrElse(actorRef.asInstanceOf[LocalActorRef].underlying.dispatcher)) + val stats = statsFor(actorRef, Option(dispatcher).getOrElse(actorRef.asInstanceOf[ActorRefWithCell].underlying.asInstanceOf[ActorCell].dispatcher)) val deadline = System.currentTimeMillis + 1000 try { await(deadline)(stats.suspensions.get() == suspensions) @@ -241,6 +240,13 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa def newTestActor(dispatcher: String) = system.actorOf(Props[DispatcherActor].withDispatcher(dispatcher)) + def awaitStarted(ref: ActorRef): Unit = { + awaitCond(ref match { + case r: RepointableRef ⇒ r.isStarted + case _ ⇒ true + }, 1 second, 10 millis) + } + protected def interceptedDispatcher(): MessageDispatcherInterceptor protected def dispatcherType: String @@ -328,7 +334,8 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa "not process messages for a suspended actor" in { implicit val dispatcher = interceptedDispatcher() - val a = newTestActor(dispatcher.id).asInstanceOf[LocalActorRef] + val a = newTestActor(dispatcher.id).asInstanceOf[InternalActorRef] + awaitStarted(a) val done = new CountDownLatch(1) a.suspend a ! CountDown(done) @@ -436,6 +443,7 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa "not double-deregister" in { implicit val dispatcher = interceptedDispatcher() + for (i ← 1 to 1000) system.actorOf(Props.empty) val a = newTestActor(dispatcher.id) a ! DoubleStop awaitCond(statsFor(a, dispatcher).registers.get == 1) diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/BalancingDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/BalancingDispatcherSpec.scala index 4060587b73..1a5c7e8661 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/BalancingDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/BalancingDispatcherSpec.scala @@ -1,8 +1,12 @@ package akka.actor.dispatch import java.util.concurrent.{ TimeUnit, CountDownLatch } -import akka.dispatch.{ Mailbox, Dispatchers } -import akka.actor.{ LocalActorRef, IllegalActorStateException, Actor, Props } + +import org.junit.runner.RunWith +import org.scalatest.junit.JUnitRunner + +import akka.actor.{ Props, ActorRefWithCell, ActorCell, Actor } +import akka.dispatch.Mailbox import akka.testkit.AkkaSpec object BalancingDispatcherSpec { @@ -51,8 +55,8 @@ class BalancingDispatcherSpec extends AkkaSpec(BalancingDispatcherSpec.config) { "have fast actor stealing work from slow actor" in { val finishedCounter = new CountDownLatch(110) - val slow = system.actorOf(Props(new DelayableActor(50, finishedCounter)).withDispatcher(delayableActorDispatcher)).asInstanceOf[LocalActorRef] - val fast = system.actorOf(Props(new DelayableActor(10, finishedCounter)).withDispatcher(delayableActorDispatcher)).asInstanceOf[LocalActorRef] + val slow = system.actorOf(Props(new DelayableActor(50, finishedCounter)).withDispatcher(delayableActorDispatcher)).asInstanceOf[ActorRefWithCell] + val fast = system.actorOf(Props(new DelayableActor(10, finishedCounter)).withDispatcher(delayableActorDispatcher)).asInstanceOf[ActorRefWithCell] var sentToFast = 0 @@ -76,11 +80,11 @@ class BalancingDispatcherSpec extends AkkaSpec(BalancingDispatcherSpec.config) { } finishedCounter.await(5, TimeUnit.SECONDS) - fast.underlying.mailbox.asInstanceOf[Mailbox].hasMessages must be(false) - slow.underlying.mailbox.asInstanceOf[Mailbox].hasMessages must be(false) - fast.underlying.actor.asInstanceOf[DelayableActor].invocationCount must be > sentToFast - fast.underlying.actor.asInstanceOf[DelayableActor].invocationCount must be > - (slow.underlying.actor.asInstanceOf[DelayableActor].invocationCount) + fast.underlying.asInstanceOf[ActorCell].mailbox.asInstanceOf[Mailbox].hasMessages must be(false) + slow.underlying.asInstanceOf[ActorCell].mailbox.asInstanceOf[Mailbox].hasMessages must be(false) + fast.underlying.asInstanceOf[ActorCell].actor.asInstanceOf[DelayableActor].invocationCount must be > sentToFast + fast.underlying.asInstanceOf[ActorCell].actor.asInstanceOf[DelayableActor].invocationCount must be > + (slow.underlying.asInstanceOf[ActorCell].actor.asInstanceOf[DelayableActor].invocationCount) system.stop(slow) system.stop(fast) } diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala index 8759f1aad9..a36f71c192 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala @@ -1,14 +1,17 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ package akka.dispatch -import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } -import java.util.concurrent.{ TimeUnit, BlockingQueue } -import java.util.concurrent.ConcurrentLinkedQueue -import akka.util._ -import akka.util.duration._ -import akka.testkit.AkkaSpec -import akka.actor.{ ActorRef, ActorContext, Props, LocalActorRef } +import java.util.concurrent.{ ConcurrentLinkedQueue, BlockingQueue } + +import org.scalatest.{ BeforeAndAfterEach, BeforeAndAfterAll } + import com.typesafe.config.Config -import akka.actor.ActorSystem + +import akka.actor.{ RepointableRef, Props, ActorSystem, ActorRefWithCell, ActorRef, ActorCell } +import akka.testkit.AkkaSpec +import akka.util.duration._ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAndAfterEach { @@ -75,7 +78,7 @@ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAn result } - def createMessageInvocation(msg: Any): Envelope = Envelope(msg, system.deadLetters)(system) + def createMessageInvocation(msg: Any): Envelope = Envelope(msg, system.deadLetters, system) def ensureInitialMailboxState(config: MailboxType, q: MessageQueue) { q must not be null @@ -136,8 +139,8 @@ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAn class DefaultMailboxSpec extends MailboxSpec { lazy val name = "The default mailbox implementation" def factory = { - case u: UnboundedMailbox ⇒ u.create(None) - case b: BoundedMailbox ⇒ b.create(None) + case u: UnboundedMailbox ⇒ u.create(None, None) + case b: BoundedMailbox ⇒ b.create(None, None) } } @@ -145,8 +148,8 @@ class PriorityMailboxSpec extends MailboxSpec { val comparator = PriorityGenerator(_.##) lazy val name = "The priority mailbox implementation" def factory = { - case UnboundedMailbox() ⇒ new UnboundedPriorityMailbox(comparator).create(None) - case BoundedMailbox(capacity, pushTimeOut) ⇒ new BoundedPriorityMailbox(comparator, capacity, pushTimeOut).create(None) + case UnboundedMailbox() ⇒ new UnboundedPriorityMailbox(comparator).create(None, None) + case BoundedMailbox(capacity, pushTimeOut) ⇒ new BoundedPriorityMailbox(comparator, capacity, pushTimeOut).create(None, None) } } @@ -158,13 +161,13 @@ object CustomMailboxSpec { """ class MyMailboxType(settings: ActorSystem.Settings, config: Config) extends MailboxType { - override def create(owner: Option[ActorContext]) = owner match { + override def create(owner: Option[ActorRef], system: Option[ActorSystem]) = owner match { case Some(o) ⇒ new MyMailbox(o) case None ⇒ throw new Exception("no mailbox owner given") } } - class MyMailbox(owner: ActorContext) extends QueueBasedMessageQueue with UnboundedMessageQueueSemantics { + class MyMailbox(owner: ActorRef) extends QueueBasedMessageQueue with UnboundedMessageQueueSemantics { final val queue = new ConcurrentLinkedQueue[Envelope]() } } @@ -174,7 +177,11 @@ class CustomMailboxSpec extends AkkaSpec(CustomMailboxSpec.config) { "Dispatcher configuration" must { "support custom mailboxType" in { val actor = system.actorOf(Props.empty.withDispatcher("my-dispatcher")) - val queue = actor.asInstanceOf[LocalActorRef].underlying.mailbox.messageQueue + awaitCond(actor match { + case r: RepointableRef ⇒ r.isStarted + case _ ⇒ true + }, 1 second, 10 millis) + val queue = actor.asInstanceOf[ActorRefWithCell].underlying.asInstanceOf[ActorCell].mailbox.messageQueue queue.getClass must be(classOf[CustomMailboxSpec.MyMailbox]) } } diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala index a9855fef7d..11f8760320 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala @@ -1,12 +1,14 @@ package akka.dispatch -import akka.actor.{ Props, LocalActorRef, Actor } -import akka.testkit.AkkaSpec -import akka.pattern.ask -import akka.util.duration._ -import akka.testkit.DefaultTimeout +import org.junit.runner.RunWith +import org.scalatest.junit.JUnitRunner + import com.typesafe.config.Config -import akka.actor.ActorSystem + +import akka.actor.{ Props, InternalActorRef, ActorSystem, Actor } +import akka.pattern.ask +import akka.testkit.{ DefaultTimeout, AkkaSpec } +import akka.util.duration.intToDurationInt object PriorityDispatcherSpec { val config = """ @@ -54,7 +56,7 @@ class PriorityDispatcherSpec extends AkkaSpec(PriorityDispatcherSpec.config) wit case i: Int ⇒ acc = i :: acc case 'Result ⇒ sender.tell(acc) } - }).withDispatcher(dispatcherKey)).asInstanceOf[LocalActorRef] + }).withDispatcher(dispatcherKey)).asInstanceOf[InternalActorRef] actor.suspend //Make sure the actor isn't treating any messages, let it buffer the incoming messages diff --git a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala index 5bedc8fc33..77ac5daf49 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala @@ -4,15 +4,14 @@ package akka.routing import java.util.concurrent.atomic.AtomicInteger - import org.junit.runner.RunWith - -import akka.actor.{ Props, LocalActorRef, Deploy, Actor, ActorRef } +import akka.actor.{ Props, Deploy, Actor, ActorRef } import akka.ConfigurationException import akka.dispatch.Await import akka.pattern.{ ask, gracefulStop } import akka.testkit.{ TestLatch, ImplicitSender, DefaultTimeout, AkkaSpec } import akka.util.duration.intToDurationInt +import akka.actor.UnstartedCell object ConfiguredLocalRoutingSpec { val config = """ @@ -47,6 +46,14 @@ object ConfiguredLocalRoutingSpec { @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class ConfiguredLocalRoutingSpec extends AkkaSpec(ConfiguredLocalRoutingSpec.config) with DefaultTimeout with ImplicitSender { + def routerConfig(ref: ActorRef): RouterConfig = ref match { + case r: RoutedActorRef ⇒ + r.underlying match { + case c: RoutedActorCell ⇒ c.routerConfig + case _: UnstartedCell ⇒ awaitCond(r.isStarted, 1 second, 10 millis); routerConfig(ref) + } + } + "RouterConfig" must { "be picked up from Props" in { @@ -55,7 +62,7 @@ class ConfiguredLocalRoutingSpec extends AkkaSpec(ConfiguredLocalRoutingSpec.con case "get" ⇒ sender ! context.props } }).withRouter(RoundRobinRouter(12)), "someOther") - actor.asInstanceOf[LocalActorRef].underlying.props.routerConfig must be === RoundRobinRouter(12) + routerConfig(actor) must be === RoundRobinRouter(12) Await.result(gracefulStop(actor, 3 seconds), 3 seconds) } @@ -65,7 +72,7 @@ class ConfiguredLocalRoutingSpec extends AkkaSpec(ConfiguredLocalRoutingSpec.con case "get" ⇒ sender ! context.props } }).withRouter(RoundRobinRouter(12)), "config") - actor.asInstanceOf[LocalActorRef].underlying.props.routerConfig must be === RandomRouter(4) + routerConfig(actor) must be === RandomRouter(4) Await.result(gracefulStop(actor, 3 seconds), 3 seconds) } @@ -75,7 +82,7 @@ class ConfiguredLocalRoutingSpec extends AkkaSpec(ConfiguredLocalRoutingSpec.con case "get" ⇒ sender ! context.props } }).withRouter(FromConfig).withDeploy(Deploy(routerConfig = RoundRobinRouter(12))), "someOther") - actor.asInstanceOf[LocalActorRef].underlying.props.routerConfig must be === RoundRobinRouter(12) + routerConfig(actor) must be === RoundRobinRouter(12) Await.result(gracefulStop(actor, 3 seconds), 3 seconds) } @@ -85,7 +92,7 @@ class ConfiguredLocalRoutingSpec extends AkkaSpec(ConfiguredLocalRoutingSpec.con case "get" ⇒ sender ! context.props } }).withRouter(FromConfig).withDeploy(Deploy(routerConfig = RoundRobinRouter(12))), "config") - actor.asInstanceOf[LocalActorRef].underlying.props.routerConfig must be === RandomRouter(4) + routerConfig(actor) must be === RandomRouter(4) Await.result(gracefulStop(actor, 3 seconds), 3 seconds) } diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 35631924cf..a202778fe5 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -12,10 +12,11 @@ import akka.dispatch.Await import akka.util.Duration import akka.ConfigurationException import com.typesafe.config.ConfigFactory -import akka.pattern.ask +import akka.pattern.{ ask, pipe } import java.util.concurrent.ConcurrentHashMap import com.typesafe.config.Config import akka.dispatch.Dispatchers +import akka.util.Timeout object RoutingSpec { @@ -171,6 +172,18 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with expectMsg("restarted") } + "must start in-line for context.actorOf()" in { + system.actorOf(Props(new Actor { + def receive = { + case "start" ⇒ + context.actorOf(Props(new Actor { + def receive = { case x ⇒ sender ! x } + }).withRouter(RoundRobinRouter(2))) ? "hello" pipeTo sender + } + })) ! "start" + expectMsg("hello") + } + } "no router" must { @@ -528,7 +541,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with } } "support custom router" in { - val myrouter = system.actorOf(Props().withRouter(FromConfig), "myrouter") + val myrouter = system.actorOf(Props.empty.withRouter(FromConfig), "myrouter") myrouter.isTerminated must be(false) } } @@ -540,7 +553,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with } "count votes as intended - not as in Florida" in { - val routedActor = system.actorOf(Props().withRouter(VoteCountRouter())) + val routedActor = system.actorOf(Props.empty.withRouter(VoteCountRouter())) routedActor ! DemocratVote routedActor ! DemocratVote routedActor ! RepublicanVote diff --git a/akka-actor/src/main/java/akka/actor/AbstractActorCell.java b/akka-actor/src/main/java/akka/actor/AbstractActorCell.java index 8384e67af0..95fb7368bc 100644 --- a/akka-actor/src/main/java/akka/actor/AbstractActorCell.java +++ b/akka-actor/src/main/java/akka/actor/AbstractActorCell.java @@ -9,11 +9,13 @@ import akka.util.Unsafe; final class AbstractActorCell { final static long mailboxOffset; final static long childrenOffset; + final static long nextNameOffset; static { try { mailboxOffset = Unsafe.instance.objectFieldOffset(ActorCell.class.getDeclaredField("_mailboxDoNotCallMeDirectly")); childrenOffset = Unsafe.instance.objectFieldOffset(ActorCell.class.getDeclaredField("_childrenRefsDoNotCallMeDirectly")); + nextNameOffset = Unsafe.instance.objectFieldOffset(ActorCell.class.getDeclaredField("_nextNameDoNotCallMeDirectly")); } catch(Throwable t){ throw new ExceptionInInitializerError(t); } diff --git a/akka-actor/src/main/java/akka/actor/AbstractActorRef.java b/akka-actor/src/main/java/akka/actor/AbstractActorRef.java new file mode 100644 index 0000000000..97ef09c501 --- /dev/null +++ b/akka-actor/src/main/java/akka/actor/AbstractActorRef.java @@ -0,0 +1,19 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.actor; + +import akka.util.Unsafe; + +final class AbstractActorRef { + final static long cellOffset; + + static { + try { + cellOffset = Unsafe.instance.objectFieldOffset(RepointableActorRef.class.getDeclaredField("_cellDoNotCallMeDirectly")); + } catch(Throwable t){ + throw new ExceptionInInitializerError(t); + } + } +} diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 2721ccffa0..6c9b91d6cf 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -135,8 +135,7 @@ class ActorInitializationException private[akka] (actor: ActorRef, message: Stri * there might be more of them in the future, or not. */ class InvalidMessageException private[akka] (message: String, cause: Throwable = null) - extends AkkaException(message, cause) - with NoStackTrace { + extends AkkaException(message, cause) { def this(msg: String) = this(msg, null) } diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index ab8571100f..5a1269b5fe 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -168,6 +168,78 @@ trait UntypedActorContext extends ActorContext { } +/** + * INTERNAL API + */ +private[akka] trait Cell { + /** + * The “self” reference which this Cell is attached to. + */ + def self: ActorRef + /** + * The system within which this Cell lives. + */ + def system: ActorSystem + /** + * The system internals where this Cell lives. + */ + def systemImpl: ActorSystemImpl + /** + * Recursively suspend this actor and all its children. + */ + def suspend(): Unit + /** + * Recursively resume this actor and all its children. + */ + def resume(): Unit + /** + * Restart this actor (will recursively restart or stop all children). + */ + def restart(cause: Throwable): Unit + /** + * Recursively terminate this actor and all its children. + */ + def stop(): Unit + /** + * Returns “true” if the actor is locally known to be terminated, “false” if + * alive or uncertain. + */ + def isTerminated: Boolean + /** + * The supervisor of this actor. + */ + def parent: InternalActorRef + /** + * All children of this actor, including only reserved-names. + */ + def childrenRefs: ActorCell.ChildrenContainer + /** + * Enqueue a message to be sent to the actor; may or may not actually + * schedule the actor to run, depending on which type of cell it is. + */ + def tell(message: Any, sender: ActorRef): Unit + /** + * Enqueue a message to be sent to the actor; may or may not actually + * schedule the actor to run, depending on which type of cell it is. + */ + def sendSystemMessage(msg: SystemMessage): Unit + /** + * Returns true if the actor is local, i.e. if it is actually scheduled + * on a Thread in the current JVM when run. + */ + def isLocal: Boolean + /** + * If the actor isLocal, returns whether messages are currently queued, + * “false” otherwise. + */ + def hasMessages: Boolean + /** + * If the actor isLocal, returns the number of messages currently queued, + * which may be a costly operation, 0 otherwise. + */ + def numberOfMessages: Int +} + /** * Everything in here is completely Akka PRIVATE. You will not find any * supported APIs in this place. This is not the API you were looking @@ -359,11 +431,13 @@ private[akka] class ActorCell( val system: ActorSystemImpl, val self: InternalActorRef, val props: Props, - @volatile var parent: InternalActorRef) extends UntypedActorContext { + @volatile var parent: InternalActorRef) extends UntypedActorContext with Cell { - import AbstractActorCell.{ mailboxOffset, childrenOffset } + import AbstractActorCell.{ mailboxOffset, childrenOffset, nextNameOffset } import ActorCell._ + final def isLocal = true + final def systemImpl = system protected final def guardian = self @@ -448,7 +522,7 @@ private[akka] class ActorCell( case _ ⇒ true } - private def _actorOf(props: Props, name: String): ActorRef = { + private def _actorOf(props: Props, name: String, async: Boolean): ActorRef = { if (system.settings.SerializeAllCreators && !props.creator.isInstanceOf[NoSerializationVerificationNeeded]) { val ser = SerializationExtension(system) ser.serialize(props.creator) match { @@ -459,14 +533,19 @@ private[akka] class ActorCell( } } } - // in case we are currently terminating, swallow creation requests and return EmptyLocalActorRef - if (isTerminating) provider.actorFor(self, Seq(name)) + /* + * in case we are currently terminating, fail external attachChild requests + * (internal calls cannot happen anyway because we are suspended) + */ + if (isTerminating) throw new IllegalStateException("cannot create children while terminating or terminated") else { reserveChild(name) // this name will either be unreserved or overwritten with a real child below val actor = - try provider.actorOf(systemImpl, props, self, self.path / name, false, None, true) - catch { + try { + provider.actorOf(systemImpl, props, self, self.path / name, + systemService = false, deploy = None, lookupDeploy = true, async = async) + } catch { case NonFatal(e) ⇒ unreserveChild(name) throw e @@ -476,36 +555,53 @@ private[akka] class ActorCell( } } - def actorOf(props: Props): ActorRef = _actorOf(props, randomName()) + def actorOf(props: Props): ActorRef = _actorOf(props, randomName(), async = false) - def actorOf(props: Props, name: String): ActorRef = { + def actorOf(props: Props, name: String): ActorRef = _actorOf(props, checkName(name), async = false) + + private def checkName(name: String): String = { import ActorPath.ElementRegex name match { case null ⇒ throw new InvalidActorNameException("actor name must not be null") case "" ⇒ throw new InvalidActorNameException("actor name must not be empty") - case ElementRegex() ⇒ // this is fine + case ElementRegex() ⇒ name case _ ⇒ throw new InvalidActorNameException("illegal actor name '" + name + "', must conform to " + ElementRegex) } - _actorOf(props, name) } + private[akka] def attachChild(props: Props, name: String): ActorRef = + _actorOf(props, checkName(name), async = true) + + private[akka] def attachChild(props: Props): ActorRef = + _actorOf(props, randomName(), async = true) + final def stop(actor: ActorRef): Unit = { - if (childrenRefs.getByRef(actor).isDefined) shallDie(actor) + val started = actor match { + case r: RepointableRef ⇒ r.isStarted + case _ ⇒ true + } + if (childrenRefs.getByRef(actor).isDefined && started) shallDie(actor) actor.asInstanceOf[InternalActorRef].stop() } var currentMessage: Envelope = _ var actor: Actor = _ private var behaviorStack: Stack[Actor.Receive] = Stack.empty - @volatile var _mailboxDoNotCallMeDirectly: Mailbox = _ //This must be volatile since it isn't protected by the mailbox status + var watching: Set[ActorRef] = emptyActorRefSet var watchedBy: Set[ActorRef] = emptyActorRefSet - val nextNameSequence = new AtomicLong - final protected def randomName(): String = Helpers.base64(nextNameSequence.getAndIncrement()) + @volatile private var _nextNameDoNotCallMeDirectly = 0L + final protected def randomName(): String = { + @tailrec def inc(): Long = { + val current = Unsafe.instance.getLongVolatile(this, nextNameOffset) + if (Unsafe.instance.compareAndSwapLong(this, nextNameOffset, current, current + 1)) current + else inc() + } + Helpers.base64(inc()) + } - @inline - final val dispatcher: MessageDispatcher = system.dispatchers.lookup(props.dispatcher) + @volatile private var _mailboxDoNotCallMeDirectly: Mailbox = _ //This must be volatile since it isn't protected by the mailbox status /** * INTERNAL API @@ -525,6 +621,12 @@ private[akka] class ActorCell( else oldMailbox } + final def hasMessages: Boolean = mailbox.hasMessages + + final def numberOfMessages: Int = mailbox.numberOfMessages + + val dispatcher: MessageDispatcher = system.dispatchers.lookup(props.dispatcher) + /** * UntypedActorContext impl */ @@ -532,20 +634,22 @@ private[akka] class ActorCell( final def isTerminated: Boolean = mailbox.isClosed - final def start(): Unit = { + final def start(): this.type = { + /* * Create the mailbox and enqueue the Create() message to ensure that * this is processed before anything else. */ swapMailbox(dispatcher.createMailbox(this)) + mailbox.setActor(this) + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ mailbox.systemEnqueue(self, Create()) - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - parent.sendSystemMessage(akka.dispatch.Supervise(self)) - // This call is expected to start off the actor by scheduling its mailbox. dispatcher.attach(this) + + this } // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ @@ -583,8 +687,10 @@ private[akka] class ActorCell( final def getChildren(): java.lang.Iterable[ActorRef] = scala.collection.JavaConverters.asJavaIterableConverter(children).asJava - final def tell(message: Any, sender: ActorRef): Unit = - dispatcher.dispatch(this, Envelope(message, if (sender eq null) system.deadLetters else sender)(system)) + def tell(message: Any, sender: ActorRef): Unit = + dispatcher.dispatch(this, Envelope(message, if (sender eq null) system.deadLetters else sender, system)) + + override def sendSystemMessage(message: SystemMessage): Unit = dispatcher.systemDispatch(this, message) final def sender: ActorRef = currentMessage match { case null ⇒ system.deadLetters @@ -719,6 +825,7 @@ private[akka] class ActorCell( def supervise(child: ActorRef): Unit = if (!isTerminating) { if (childrenRefs.getByRef(child).isEmpty) addChild(child) + handleSupervise(child) if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now supervising " + child)) } @@ -904,6 +1011,11 @@ private[akka] class ActorCell( } } + protected def handleSupervise(child: ActorRef): Unit = child match { + case r: RepointableActorRef ⇒ r.activate() + case _ ⇒ + } + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ final def restart(cause: Throwable): Unit = dispatcher.systemDispatch(this, Recreate(cause)) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 0620a73a28..bde2a2194c 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -167,6 +167,10 @@ private[akka] trait LocalRef extends ActorRefScope { final def isLocal = true } +private[akka] trait RepointableRef extends ActorRefScope { + def isStarted: Boolean +} + /** * Internal trait for assembling all the functionality needed internally on * ActorRefs. NOTE THAT THIS IS NOT A STABLE EXTERNAL INTERFACE! @@ -210,6 +214,10 @@ private[akka] abstract class InternalActorRef extends ActorRef with ScalaActorRe def isLocal: Boolean } +private[akka] abstract class ActorRefWithCell extends InternalActorRef { this: ActorRefScope ⇒ + def underlying: Cell +} + /** * This is an internal look-up failure token, not useful for anything else. */ @@ -228,21 +236,21 @@ private[akka] class LocalActorRef private[akka] ( _props: Props, _supervisor: InternalActorRef, override val path: ActorPath) - extends InternalActorRef with LocalRef { + extends ActorRefWithCell with LocalRef { /* - * actorCell.start() publishes actorCell & this to the dispatcher, which - * means that messages may be processed theoretically before the constructor - * ends. The JMM guarantees visibility for final fields only after the end - * of the constructor, so publish the actorCell safely by making it a - * @volatile var which is NOT TO BE WRITTEN TO. The alternative would be to - * move start() outside of the constructor, which would basically require - * us to use purely factory methods for creating LocalActorRefs. + * Safe publication of this class’s fields is guaranteed by mailbox.setActor() + * which is called indirectly from actorCell.start() (if you’re wondering why + * this is at all important, remember that under the JMM final fields are only + * frozen at the _end_ of the constructor, but we are publishing “this” before + * that is reached). */ - @volatile - private var actorCell = newActorCell(_system, this, _props, _supervisor) + private val actorCell: ActorCell = newActorCell(_system, this, _props, _supervisor) actorCell.start() + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + _supervisor.sendSystemMessage(akka.dispatch.Supervise(this)) + protected def newActorCell(system: ActorSystemImpl, ref: InternalActorRef, props: Props, supervisor: InternalActorRef): ActorCell = new ActorCell(system, ref, props, supervisor) @@ -313,9 +321,9 @@ private[akka] class LocalActorRef private[akka] ( // ========= AKKA PROTECTED FUNCTIONS ========= - protected[akka] def underlying: ActorCell = actorCell + def underlying: ActorCell = actorCell - override def sendSystemMessage(message: SystemMessage): Unit = underlying.dispatcher.systemDispatch(underlying, message) + override def sendSystemMessage(message: SystemMessage): Unit = actorCell.sendSystemMessage(message) override def !(message: Any)(implicit sender: ActorRef = null): Unit = actorCell.tell(message, sender) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 64ffe6d39d..8195aea64c 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -104,7 +104,8 @@ trait ActorRefProvider { path: ActorPath, systemService: Boolean, deploy: Option[Deploy], - lookupDeploy: Boolean): InternalActorRef + lookupDeploy: Boolean, + async: Boolean): InternalActorRef /** * Create actor reference for a specified local or remote path. If no such @@ -482,10 +483,12 @@ class LocalActorRefProvider( } lazy val guardian: InternalActorRef = - actorOf(system, guardianProps, rootGuardian, rootPath / "user", systemService = true, None, false) + actorOf(system, guardianProps, rootGuardian, rootPath / "user", + systemService = true, deploy = None, lookupDeploy = false, async = false) lazy val systemGuardian: InternalActorRef = - actorOf(system, guardianProps.withCreator(new SystemGuardian), rootGuardian, rootPath / "system", systemService = true, None, false) + actorOf(system, guardianProps.withCreator(new SystemGuardian), rootGuardian, rootPath / "system", + systemService = true, deploy = None, lookupDeploy = false, async = false) lazy val tempContainer = new VirtualPathContainer(system.provider, tempNode, rootGuardian, log) @@ -539,14 +542,17 @@ class LocalActorRefProvider( } def actorOf(system: ActorSystemImpl, props: Props, supervisor: InternalActorRef, path: ActorPath, - systemService: Boolean, deploy: Option[Deploy], lookupDeploy: Boolean): InternalActorRef = { + systemService: Boolean, deploy: Option[Deploy], lookupDeploy: Boolean, async: Boolean): InternalActorRef = { props.routerConfig match { - case NoRouter ⇒ new LocalActorRef(system, props, supervisor, path) // create a local actor + case NoRouter ⇒ + if (async) new RepointableActorRef(system, props, supervisor, path).initialize() + else new LocalActorRef(system, props, supervisor, path) case router ⇒ val lookup = if (lookupDeploy) deployer.lookup(path) else None val fromProps = Iterator(props.deploy.copy(routerConfig = props.deploy.routerConfig withFallback router)) val d = fromProps ++ deploy.iterator ++ lookup.iterator reduce ((a, b) ⇒ b withFallback a) - new RoutedActorRef(system, props.withRouter(d.routerConfig), supervisor, path) + val ref = new RoutedActorRef(system, props.withRouter(d.routerConfig), supervisor, path).initialize() + if (async) ref else ref.activate() } } diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index da8a6c4734..030fa4a8b5 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -423,6 +423,11 @@ abstract class ExtendedActorSystem extends ActorSystem { * creation. */ def dynamicAccess: DynamicAccess + + /** + * For debugging: traverse actor hierarchy and make string representation. + */ + def printTree: String } private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, classLoader: ClassLoader) extends ExtendedActorSystem { @@ -482,21 +487,21 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, private[akka] def systemActorOf(props: Props, name: String): ActorRef = { systemGuardian match { - case g: LocalActorRef ⇒ g.underlying.actorOf(props, name) + case g: LocalActorRef ⇒ g.underlying.attachChild(props, name) case s ⇒ throw new UnsupportedOperationException("unknown systemGuardian type " + s.getClass) } } def actorOf(props: Props, name: String): ActorRef = { guardian match { - case g: LocalActorRef ⇒ g.underlying.actorOf(props, name) + case g: LocalActorRef ⇒ g.underlying.attachChild(props, name) case s ⇒ throw new UnsupportedOperationException("unknown guardian type " + s.getClass) } } def actorOf(props: Props): ActorRef = { guardian match { - case g: LocalActorRef ⇒ g.underlying.actorOf(props) + case g: LocalActorRef ⇒ g.underlying.attachChild(props) case s ⇒ throw new UnsupportedOperationException("unknown guardian type " + s.getClass) } } @@ -546,10 +551,10 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, def dequeue() = null def hasMessages = false def numberOfMessages = 0 - def cleanUp(owner: ActorContext, deadLetters: MessageQueue): Unit = () + def cleanUp(owner: ActorRef, deadLetters: MessageQueue): Unit = () } //FIXME Why do we need this at all? - val deadLetterMailbox: Mailbox = new Mailbox(null, deadLetterQueue) { + val deadLetterMailbox: Mailbox = new Mailbox(deadLetterQueue) { becomeClosed() def systemEnqueue(receiver: ActorRef, handle: SystemMessage): Unit = deadLetters ! DeadLetter(handle, receiver, receiver) @@ -689,6 +694,31 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, override def toString: String = lookupRoot.path.root.address.toString + override def printTree: String = { + def printNode(node: ActorRef, indent: String): String = { + node match { + case wc: ActorRefWithCell ⇒ + val cell = wc.underlying + indent + "-> " + node.path.name + " " + Logging.simpleName(node) + " " + + (cell match { + case real: ActorCell ⇒ if (real.actor ne null) real.actor.getClass else "null" + case _ ⇒ Logging.simpleName(cell) + }) + + " " + (cell.childrenRefs match { + case ActorCell.TerminatingChildrenContainer(_, toDie, reason) ⇒ + "Terminating(" + reason + ")" + + (toDie.toSeq.sorted mkString ("\n" + indent + " toDie: ", "\n" + indent + " ", "")) + case x ⇒ Logging.simpleName(x) + }) + + (if (cell.childrenRefs.children.isEmpty) "" else "\n") + + (cell.childrenRefs.children.toSeq.sorted map (printNode(_, indent + " |")) mkString ("\n")) + case _ ⇒ + indent + node.path.name + " " + Logging.simpleName(node) + } + } + printNode(actorFor("/"), "") + } + final class TerminationCallbacks extends Runnable with Awaitable[Unit] { private val lock = new ReentrantGuard private var callbacks: Stack[Runnable] = _ //non-volatile since guarded by the lock diff --git a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala new file mode 100644 index 0000000000..1344735993 --- /dev/null +++ b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala @@ -0,0 +1,201 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.actor + +import akka.util.Unsafe +import scala.annotation.tailrec +import akka.dispatch.SystemMessage +import akka.dispatch.Mailbox +import akka.dispatch.Terminate +import akka.dispatch.Envelope +import akka.dispatch.Supervise +import akka.dispatch.Create +import akka.dispatch.MessageDispatcher +import java.util.concurrent.locks.ReentrantLock +import akka.event.Logging.Warning +import scala.collection.mutable.Queue + +/** + * This actor ref starts out with some dummy cell (by default just enqueuing + * messages into vectors protected by ReentrantLock), it must be initialize()’d + * before it can be sent to, and it will be activate()’d by its supervisor in + * response to the Supervise() message, which will replace the contained Cell + * with a fully functional one, transfer all messages from dummy to real queue + * and swap out the cell ref. + */ +private[akka] class RepointableActorRef( + val system: ActorSystemImpl, + val props: Props, + val supervisor: InternalActorRef, + val path: ActorPath) + extends ActorRefWithCell with RepointableRef { + + import AbstractActorRef.cellOffset + + @volatile private var _cellDoNotCallMeDirectly: Cell = _ + + def underlying: Cell = Unsafe.instance.getObjectVolatile(this, cellOffset).asInstanceOf[Cell] + + @tailrec final def swapCell(next: Cell): Cell = { + val old = underlying + if (Unsafe.instance.compareAndSwapObject(this, cellOffset, old, next)) old else swapCell(next) + } + + /* + * Initialize: make a dummy cell which holds just a mailbox, then tell our + * supervisor that we exist so that he can create the real Cell in + * handleSupervise(). + * + * This is protected so that others can have different initialization. + */ + def initialize(): this.type = { + swapCell(new UnstartedCell(system, this, props, supervisor)) + supervisor.sendSystemMessage(Supervise(this)) + this + } + + /** + * This method is supposedly called by the supervisor in handleSupervise() + * to replace the UnstartedCell with the real one. It assumes no concurrent + * modification of the underlying Cell. + */ + def activate(): this.type = { + underlying match { + case u: UnstartedCell ⇒ u.replaceWith(newCell()) + case _ ⇒ // this happens routinely for things which were created async=false + } + this + } + + def newCell(): Cell = new ActorCell(system, this, props, supervisor).start() + + def suspend(): Unit = underlying.suspend() + + def resume(): Unit = underlying.resume() + + def stop(): Unit = underlying.stop() + + def restart(cause: Throwable): Unit = underlying.restart(cause) + + def isStarted: Boolean = !underlying.isInstanceOf[UnstartedCell] + + def isTerminated: Boolean = underlying.isTerminated + + def provider: ActorRefProvider = system.provider + + def isLocal: Boolean = underlying.isLocal + + def getParent: InternalActorRef = underlying.parent + + def getChild(name: Iterator[String]): InternalActorRef = + if (name.hasNext) { + name.next match { + case ".." ⇒ getParent.getChild(name) + case "" ⇒ getChild(name) + case other ⇒ + underlying.childrenRefs.getByName(other) match { + case Some(crs) ⇒ crs.child.asInstanceOf[InternalActorRef].getChild(name) + case None ⇒ Nobody + } + } + } else this + + def !(message: Any)(implicit sender: ActorRef = null) = underlying.tell(message, sender) + + def sendSystemMessage(message: SystemMessage) = underlying.sendSystemMessage(message) + + @throws(classOf[java.io.ObjectStreamException]) + protected def writeReplace(): AnyRef = SerializedActorRef(path) +} + +private[akka] class UnstartedCell(val systemImpl: ActorSystemImpl, val self: RepointableActorRef, val props: Props, val supervisor: InternalActorRef) + extends Cell { + + /* + * This lock protects all accesses to this cell’s queues. It also ensures + * safe switching to the started ActorCell. + */ + val lock = new ReentrantLock + + // use Envelope to keep on-send checks in the same place + val queue: Queue[Envelope] = Queue() + val systemQueue: Queue[SystemMessage] = Queue() + + def replaceWith(cell: Cell): Unit = { + lock.lock() + try { + /* + * The CallingThreadDispatcher nicely dives under the ReentrantLock and + * breaks things by enqueueing into stale queues from within the message + * processing which happens in-line for sendSystemMessage() and tell(). + * Since this is the only possible way to f*ck things up within this + * lock, double-tap (well, N-tap, really); concurrent modification is + * still not possible because we’re the only thread accessing the queues. + */ + var interrupted = false + while (systemQueue.nonEmpty || queue.nonEmpty) { + while (systemQueue.nonEmpty) { + val msg = systemQueue.dequeue() + try cell sendSystemMessage msg catch { case _: InterruptedException ⇒ interrupted = true } + } + if (queue.nonEmpty) { + val envelope = queue.dequeue() + try cell tell (envelope.message, envelope.sender) catch { case _: InterruptedException ⇒ interrupted = true } + } + } + if (interrupted) throw new InterruptedException + } finally try + self.swapCell(cell) + finally + lock.unlock() + } + + def system: ActorSystem = systemImpl + def suspend(): Unit = {} + def resume(): Unit = {} + def restart(cause: Throwable): Unit = {} + def stop(): Unit = sendSystemMessage(Terminate()) + def isTerminated: Boolean = false + def parent: InternalActorRef = supervisor + def childrenRefs: ActorCell.ChildrenContainer = ActorCell.EmptyChildrenContainer + def tell(message: Any, sender: ActorRef): Unit = { + lock.lock() + try { + if (self.underlying eq this) queue enqueue Envelope(message, sender, system) + else self.underlying.tell(message, sender) + } finally { + lock.unlock() + } + } + def sendSystemMessage(msg: SystemMessage): Unit = { + lock.lock() + try { + if (self.underlying eq this) systemQueue enqueue msg + else self.underlying.sendSystemMessage(msg) + } finally { + lock.unlock() + } + } + def isLocal = true + def hasMessages: Boolean = { + lock.lock() + try { + if (self.underlying eq this) !queue.isEmpty + else self.underlying.hasMessages + } finally { + lock.unlock() + } + } + def numberOfMessages: Int = { + lock.lock() + try { + if (self.underlying eq this) queue.size + else self.underlying.numberOfMessages + } finally { + lock.unlock() + } + } + +} \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala index 9bb560417b..1933015e88 100644 --- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala @@ -592,7 +592,7 @@ case class TypedProps[T <: AnyRef] protected[TypedProps] ( /** * Returns the akka.actor.Props representation of this TypedProps */ - def actorProps(): Props = if (dispatcher == Props().dispatcher) Props() else Props(dispatcher = dispatcher) + def actorProps(): Props = if (dispatcher == Props.default.dispatcher) Props.default else Props(dispatcher = dispatcher) } /** diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 12eea14ffc..546373c33f 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -16,8 +16,10 @@ import akka.event.Logging.LogEventException import akka.jsr166y.{ ForkJoinTask, ForkJoinPool } import akka.util.{ Unsafe, Duration, NonFatal, Index } -final case class Envelope(val message: Any, val sender: ActorRef)(system: ActorSystem) { - if (message.isInstanceOf[AnyRef]) { +final case class Envelope private (val message: Any, val sender: ActorRef) + +object Envelope { + def apply(message: Any, sender: ActorRef, system: ActorSystem): Envelope = { val msg = message.asInstanceOf[AnyRef] if (msg eq null) throw new InvalidMessageException("Message is null") if (system.settings.SerializeAllMessages && !msg.isInstanceOf[NoSerializationVerificationNeeded]) { @@ -30,6 +32,7 @@ final case class Envelope(val message: Any, val sender: ActorRef)(system: ActorS } } } + new Envelope(message, sender) } } @@ -228,8 +231,8 @@ private[akka] object MessageDispatcher { } { val status = if (a.isTerminated) " (terminated)" else " (alive)" val messages = a match { - case l: LocalActorRef ⇒ " " + l.underlying.mailbox.numberOfMessages + " messages" - case _ ⇒ " " + a.getClass + case r: ActorRefWithCell ⇒ " " + r.underlying.numberOfMessages + " messages" + case _ ⇒ " " + a.getClass } val parent = a match { case i: InternalActorRef ⇒ ", parent: " + i.getParent @@ -265,7 +268,7 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext /** * Creates and returns a mailbox for the given actor. */ - protected[akka] def createMailbox(actor: ActorCell): Mailbox //FIXME should this really be private[akka]? + protected[akka] def createMailbox(actor: Cell): Mailbox //FIXME should this really be private[akka]? /** * Identifier of this dispatcher, corresponds to the full key diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index 6beee3c9da..5b8c5209b0 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -9,6 +9,7 @@ import annotation.tailrec import akka.util.{ Duration, Helpers } import java.util.{ Comparator, Iterator } import java.util.concurrent.{ Executor, LinkedBlockingQueue, ConcurrentLinkedQueue, ConcurrentSkipListSet } +import akka.actor.ActorSystemImpl /** * An executor based event driven dispatcher which will try to redistribute work from busy actors to idle actors. It is assumed @@ -46,24 +47,25 @@ class BalancingDispatcher( /** * INTERNAL USE ONLY */ - private[akka] val messageQueue: MessageQueue = mailboxType.create(None) + private[akka] val messageQueue: MessageQueue = mailboxType.create(None, None) - private class SharingMailbox(_actor: ActorCell, _messageQueue: MessageQueue) extends Mailbox(_actor, _messageQueue) with DefaultSystemMessageQueue { + private class SharingMailbox(val system: ActorSystemImpl, _messageQueue: MessageQueue) + extends Mailbox(_messageQueue) with DefaultSystemMessageQueue { override def cleanUp(): Unit = { - val dlq = actor.systemImpl.deadLetterMailbox + val dlq = system.deadLetterMailbox //Don't call the original implementation of this since it scraps all messages, and we don't want to do that var message = systemDrain(NoMessage) while (message ne null) { // message must be “virgin” before being able to systemEnqueue again val next = message.next message.next = null - dlq.systemEnqueue(actor.self, message) + dlq.systemEnqueue(system.deadLetters, message) message = next } } } - protected[akka] override def createMailbox(actor: ActorCell): Mailbox = new SharingMailbox(actor, messageQueue) + protected[akka] override def createMailbox(actor: akka.actor.Cell): Mailbox = new SharingMailbox(actor.systemImpl, messageQueue) protected[akka] override def register(actor: ActorCell): Unit = { super.register(actor) diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index 3c17ab8db4..d382cc5ecc 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -82,7 +82,8 @@ class Dispatcher( /** * INTERNAL USE ONLY */ - protected[akka] def createMailbox(actor: ActorCell): Mailbox = new Mailbox(actor, mailboxType.create(Some(actor))) with DefaultSystemMessageQueue + protected[akka] def createMailbox(actor: akka.actor.Cell): Mailbox = + new Mailbox(mailboxType.create(Some(actor.self), Some(actor.system))) with DefaultSystemMessageQueue /** * INTERNAL USE ONLY diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index b6af478ac7..fd2da63a8b 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -6,7 +6,7 @@ package akka.dispatch import akka.AkkaException import java.util.{ Comparator, PriorityQueue, Queue, Deque } import akka.util._ -import akka.actor.{ ActorCell, ActorRef } +import akka.actor.{ ActorCell, ActorRef, Cell } import java.util.concurrent._ import annotation.tailrec import akka.event.Logging.Error @@ -48,11 +48,32 @@ private[akka] object Mailbox { * * INTERNAL API */ -private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: MessageQueue) +private[akka] abstract class Mailbox(val messageQueue: MessageQueue) extends SystemMessageQueue with Runnable { import Mailbox._ + /* + * This is needed for actually executing the mailbox, i.e. invoking the + * ActorCell. There are situations (e.g. RepointableActorRef) where a Mailbox + * is constructed but we know that we will not execute it, in which case this + * will be null. It must be a var to support switching into an “active” + * mailbox, should the owning ActorRef turn local. + * + * ANOTHER THING, IMPORTANT: + * + * actorCell.start() publishes actorCell & self to the dispatcher, which + * means that messages may be processed theoretically before self’s constructor + * ends. The JMM guarantees visibility for final fields only after the end + * of the constructor, so safe publication requires that THIS WRITE BELOW + * stay as it is. + */ + @volatile + var actor: ActorCell = _ + def setActor(cell: ActorCell): Unit = actor = cell + + def dispatcher: MessageDispatcher = actor.dispatcher + /** * Try to enqueue the message to this queue, or throw an exception. */ @@ -237,11 +258,12 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes * if we closed the mailbox, we must dump the remaining system messages * to deadLetters (this is essential for DeathWatch) */ + val dlm = actor.systemImpl.deadLetterMailbox while (nextMessage ne null) { val msg = nextMessage nextMessage = nextMessage.next msg.next = null - try actor.systemImpl.deadLetterMailbox.systemEnqueue(actor.self, msg) + try dlm.systemEnqueue(actor.self, msg) catch { case NonFatal(e) ⇒ actor.system.eventStream.publish( Error(e, actor.self.path.toString, this.getClass, "error while enqueuing " + msg + " to deadLetters: " + e.getMessage)) @@ -251,9 +273,6 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes if (failure ne null) actor.handleInvokeFailure(failure, failure.getMessage) } - @inline - final def dispatcher: MessageDispatcher = actor.dispatcher - /** * Overridable callback to clean up the mailbox, * called when an actor is unregistered. @@ -272,7 +291,7 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes } if (messageQueue ne null) // needed for CallingThreadDispatcher, which never calls Mailbox.run() - messageQueue.cleanUp(actor, actor.systemImpl.deadLetterQueue) + messageQueue.cleanUp(actor.self, actor.systemImpl.deadLetterQueue) } } @@ -310,7 +329,7 @@ trait MessageQueue { * which is passed in. The owner of this MessageQueue is passed in if * available (e.g. for creating DeadLetters()), “/deadletters” otherwise. */ - def cleanUp(owner: ActorContext, deadLetters: MessageQueue): Unit + def cleanUp(owner: ActorRef, deadLetters: MessageQueue): Unit } /** @@ -338,10 +357,11 @@ private[akka] trait DefaultSystemMessageQueue { self: Mailbox ⇒ @tailrec final def systemEnqueue(receiver: ActorRef, message: SystemMessage): Unit = { assert(message.next eq null) - if (Mailbox.debug) println(actor.self + " having enqueued " + message) + if (Mailbox.debug) println(receiver + " having enqueued " + message) val head = systemQueueGet - if (head == NoMessage) actor.system.deadLetterMailbox.systemEnqueue(receiver, message) - else { + if (head == NoMessage) { + if (actor ne null) actor.systemImpl.deadLetterMailbox.systemEnqueue(receiver, message) + } else { /* * this write is safely published by the compareAndSet contained within * systemQueuePut; “Intra-Thread Semantics” on page 12 of the JSR133 spec @@ -373,11 +393,11 @@ trait QueueBasedMessageQueue extends MessageQueue { def queue: Queue[Envelope] def numberOfMessages = queue.size def hasMessages = !queue.isEmpty - def cleanUp(owner: ActorContext, deadLetters: MessageQueue): Unit = { + def cleanUp(owner: ActorRef, deadLetters: MessageQueue): Unit = { if (hasMessages) { var envelope = dequeue while (envelope ne null) { - deadLetters.enqueue(owner.self, envelope) + deadLetters.enqueue(owner, envelope) envelope = dequeue } } @@ -459,7 +479,7 @@ trait BoundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueue { * MailboxType is a factory to create MessageQueues for an optionally provided ActorContext */ trait MailboxType { - def create(owner: Option[ActorContext]): MessageQueue + def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue } /** @@ -469,7 +489,7 @@ case class UnboundedMailbox() extends MailboxType { def this(settings: ActorSystem.Settings, config: Config) = this() - final override def create(owner: Option[ActorContext]): MessageQueue = + final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new ConcurrentLinkedQueue[Envelope]() with QueueBasedMessageQueue with UnboundedMessageQueueSemantics { final def queue: Queue[Envelope] = this } @@ -486,7 +506,7 @@ case class BoundedMailbox( final val capacity: Int, final val pushTimeOut: Durat if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedMailbox can not be negative") if (pushTimeOut eq null) throw new IllegalArgumentException("The push time-out for BoundedMailbox can not be null") - final override def create(owner: Option[ActorContext]): MessageQueue = + final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new LinkedBlockingQueue[Envelope](capacity) with QueueBasedMessageQueue with BoundedMessageQueueSemantics { final def queue: BlockingQueue[Envelope] = this final val pushTimeOut = BoundedMailbox.this.pushTimeOut @@ -499,7 +519,7 @@ case class BoundedMailbox( final val capacity: Int, final val pushTimeOut: Durat */ class UnboundedPriorityMailbox( final val cmp: Comparator[Envelope], final val initialCapacity: Int) extends MailboxType { def this(cmp: Comparator[Envelope]) = this(cmp, 11) - final override def create(owner: Option[ActorContext]): MessageQueue = + final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new PriorityBlockingQueue[Envelope](initialCapacity, cmp) with QueueBasedMessageQueue with UnboundedMessageQueueSemantics { final def queue: Queue[Envelope] = this } @@ -514,7 +534,7 @@ class BoundedPriorityMailbox( final val cmp: Comparator[Envelope], final val cap if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedMailbox can not be negative") if (pushTimeOut eq null) throw new IllegalArgumentException("The push time-out for BoundedMailbox can not be null") - final override def create(owner: Option[ActorContext]): MessageQueue = + final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new BoundedBlockingQueue[Envelope](capacity, new PriorityQueue[Envelope](11, cmp)) with QueueBasedMessageQueue with BoundedMessageQueueSemantics { final def queue: BlockingQueue[Envelope] = this final val pushTimeOut = BoundedPriorityMailbox.this.pushTimeOut @@ -528,7 +548,7 @@ case class UnboundedDequeBasedMailbox() extends MailboxType { def this(settings: ActorSystem.Settings, config: Config) = this() - final override def create(owner: Option[ActorContext]): MessageQueue = + final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new LinkedBlockingDeque[Envelope]() with DequeBasedMessageQueue with UnboundedDequeBasedMessageQueueSemantics { final val queue = this } @@ -545,7 +565,7 @@ case class BoundedDequeBasedMailbox( final val capacity: Int, final val pushTime if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedDequeBasedMailbox can not be negative") if (pushTimeOut eq null) throw new IllegalArgumentException("The push time-out for BoundedDequeBasedMailbox can not be null") - final override def create(owner: Option[ActorContext]): MessageQueue = + final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new LinkedBlockingDeque[Envelope](capacity) with DequeBasedMessageQueue with BoundedDequeBasedMessageQueueSemantics { final val queue = this final val pushTimeOut = BoundedDequeBasedMailbox.this.pushTimeOut diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index bcd92794da..5800e14ed2 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -23,42 +23,28 @@ import scala.runtime.ScalaRunTime * send a message to on (or more) of these actors. */ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _supervisor: InternalActorRef, _path: ActorPath) - extends LocalActorRef( - _system, - _props.copy(creator = () ⇒ _props.routerConfig.createActor(), dispatcher = _props.routerConfig.routerDispatcher), - _supervisor, - _path) { + extends RepointableActorRef(_system, _props, _supervisor, _path) { - /* - * CAUTION: RoutedActorRef is PROBLEMATIC - * ====================================== - * - * We are constructing/assembling the children outside of the scope of the - * Router actor, inserting them in its childrenRef list, which is not at all - * synchronized. This is done exactly once at start-up, all other accesses - * are done from the Router actor. This means that the only thing which is - * really hairy is making sure that the Router does not touch its childrenRefs - * before we are done with them: lock the monitor of the actor cell (hence the - * override of newActorCell) and use that to block the Router constructor for - * as long as it takes to setup the RoutedActorRef itself. - * - * ===> I M P O R T A N T N O T I C E <=== - * - * DO NOT THROW ANY EXCEPTIONS BEFORE THE FOLLOWING TRY-BLOCK WITHOUT - * EXITING THE MONITOR OF THE actorCell! - * - * This is important, just don’t do it! No kidding. - */ - override def newActorCell( - system: ActorSystemImpl, - ref: InternalActorRef, - props: Props, - supervisor: InternalActorRef): ActorCell = { - val cell = super.newActorCell(system, ref, props, supervisor) - Unsafe.instance.monitorEnter(cell) - cell + // verify that a BalancingDispatcher is not used with a Router + if (_props.routerConfig != NoRouter && _system.dispatchers.isBalancingDispatcher(_props.routerConfig.routerDispatcher)) { + throw new ConfigurationException( + "Configuration for " + this + + " is invalid - you can not use a 'BalancingDispatcher' as a Router's dispatcher, you can however use it for the routees.") } + _props.routerConfig.verifyConfig() + + override def newCell(): Cell = new RoutedActorCell(system, this, props, supervisor) + +} + +private[akka] class RoutedActorCell(_system: ActorSystemImpl, _ref: InternalActorRef, _props: Props, _supervisor: InternalActorRef) + extends ActorCell( + _system, + _ref, + _props.copy(creator = () ⇒ _props.routerConfig.createActor(), dispatcher = _props.routerConfig.routerDispatcher), + _supervisor) { + private[akka] val routerConfig = _props.routerConfig private[akka] val routeeProps = _props.copy(routerConfig = NoRouter) private[akka] val resizeInProgress = new AtomicBoolean @@ -72,39 +58,29 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup private var _routeeProvider: RouteeProvider = _ def routeeProvider = _routeeProvider - val route = - try { - // verify that a BalancingDispatcher is not used with a Router - if (_props.routerConfig != NoRouter && _system.dispatchers.isBalancingDispatcher(_props.routerConfig.routerDispatcher)) { - actorContext.stop(actorContext.self) - throw new ConfigurationException( - "Configuration for actor [" + _path.toString + - "] is invalid - you can not use a 'BalancingDispatcher' as a Router's dispatcher, you can however use it for the routees.") - } - - _routeeProvider = routerConfig.createRouteeProvider(actorContext) - val r = routerConfig.createRoute(routeeProps, routeeProvider) - // initial resize, before message send - routerConfig.resizer foreach { r ⇒ - if (r.isTimeForResize(resizeCounter.getAndIncrement())) - r.resize(routeeProps, routeeProvider) - } - r - } finally { - assert(Thread.holdsLock(actorContext)) - Unsafe.instance.monitorExit(actorContext) // unblock Router’s constructor + val route = { + _routeeProvider = routerConfig.createRouteeProvider(this) + val r = routerConfig.createRoute(routeeProps, routeeProvider) + // initial resize, before message send + routerConfig.resizer foreach { r ⇒ + if (r.isTimeForResize(resizeCounter.getAndIncrement())) + r.resize(routeeProps, routeeProvider) } + r + } if (routerConfig.resizer.isEmpty && _routees.isEmpty) throw new ActorInitializationException("router " + routerConfig + " did not register routees!") + start() + /* * end of construction */ def applyRoute(sender: ActorRef, message: Any): Iterable[Destination] = message match { - case _: AutoReceivedMessage ⇒ Destination(this, this) :: Nil - case Terminated(_) ⇒ Destination(this, this) :: Nil + case _: AutoReceivedMessage ⇒ Destination(self, self) :: Nil + case Terminated(_) ⇒ Destination(self, self) :: Nil case CurrentRoutees ⇒ sender ! RouterRoutees(_routees) Nil @@ -122,7 +98,7 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup private[akka] def addRoutees(newRoutees: IndexedSeq[ActorRef]): Unit = { _routees = _routees ++ newRoutees // subscribe to Terminated messages for all route destinations, to be handled by Router actor - newRoutees foreach underlying.watch + newRoutees foreach watch } /** @@ -133,13 +109,13 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup */ private[akka] def removeRoutees(abandonedRoutees: IndexedSeq[ActorRef]): Unit = { _routees = _routees diff abandonedRoutees - abandonedRoutees foreach underlying.unwatch + abandonedRoutees foreach unwatch } - override def !(message: Any)(implicit sender: ActorRef = null): Unit = { + override def tell(message: Any, sender: ActorRef): Unit = { resize() - val s = if (sender eq null) underlying.system.deadLetters else sender + val s = if (sender eq null) system.deadLetters else sender val msg = message match { case Broadcast(m) ⇒ m @@ -147,15 +123,18 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup } applyRoute(s, message) match { - case Destination(_, x) :: Nil if x eq this ⇒ super.!(message)(s) - case refs ⇒ refs foreach (p ⇒ p.recipient.!(msg)(p.sender)) + case Destination(_, x) :: Nil if x == self ⇒ super.tell(message, s) + case refs ⇒ + refs foreach (p ⇒ + if (p.recipient == self) super.tell(msg, p.sender) + else p.recipient.!(msg)(p.sender)) } } def resize(): Unit = { for (r ← routerConfig.resizer) { if (r.isTimeForResize(resizeCounter.getAndIncrement()) && resizeInProgress.compareAndSet(false, true)) - super.!(Router.Resize) + super.tell(Router.Resize, self) } } } @@ -212,6 +191,11 @@ trait RouterConfig { */ def resizer: Option[Resizer] = None + /** + * Check that everything is there which is needed. Called in constructor of RoutedActorRef to fail early. + */ + def verifyConfig(): Unit = {} + } /** @@ -227,7 +211,7 @@ class RouteeProvider(val context: ActorContext, val resizer: Option[Resizer]) { * Not thread safe, but intended to be called from protected points, such as * `RouterConfig.createRoute` and `Resizer.resize`. */ - def registerRoutees(routees: IndexedSeq[ActorRef]): Unit = routedRef.addRoutees(routees) + def registerRoutees(routees: IndexedSeq[ActorRef]): Unit = routedCell.addRoutees(routees) /** * Adds the routees to the router. @@ -247,7 +231,7 @@ class RouteeProvider(val context: ActorContext, val resizer: Option[Resizer]) { * Not thread safe, but intended to be called from protected points, such as * `Resizer.resize`. */ - def unregisterRoutees(routees: IndexedSeq[ActorRef]): Unit = routedRef.removeRoutees(routees) + def unregisterRoutees(routees: IndexedSeq[ActorRef]): Unit = routedCell.removeRoutees(routees) def createRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] = (nrOfInstances, routees) match { @@ -264,9 +248,9 @@ class RouteeProvider(val context: ActorContext, val resizer: Option[Resizer]) { /** * All routees of the router */ - def routees: IndexedSeq[ActorRef] = routedRef.routees + def routees: IndexedSeq[ActorRef] = routedCell.routees - private def routedRef = context.self.asInstanceOf[RoutedActorRef] + private def routedCell = context.asInstanceOf[RoutedActorCell] } /** @@ -298,12 +282,9 @@ trait CustomRoute { */ trait Router extends Actor { - // make sure that we synchronize properly to get the childrenRefs into our CPU cache - val ref = context.synchronized { - self match { - case x: RoutedActorRef ⇒ x - case _ ⇒ throw new ActorInitializationException("Router actor can only be used in RoutedActorRef") - } + val ref = context match { + case x: RoutedActorCell ⇒ x + case _ ⇒ throw new ActorInitializationException("Router actor can only be used in RoutedActorRef, not in " + context.getClass) } final def receive = ({ @@ -417,8 +398,10 @@ class FromConfig(val routerDispatcher: String = Dispatchers.DefaultDispatcherId) def this() = this(Dispatchers.DefaultDispatcherId) - def createRoute(props: Props, routeeProvider: RouteeProvider): Route = - throw new ConfigurationException("router " + routeeProvider.context.self + " needs external configuration from file (e.g. application.conf)") + override def verifyConfig(): Unit = + throw new ConfigurationException("router needs external configuration from file (e.g. application.conf)") + + def createRoute(props: Props, routeeProvider: RouteeProvider): Route = null def supervisorStrategy: SupervisorStrategy = Router.defaultSupervisorStrategy } @@ -774,9 +757,11 @@ trait SmallestMailboxLike { this: RouterConfig ⇒ * routers based on mailbox and actor internal state. */ protected def isProcessingMessage(a: ActorRef): Boolean = a match { - case x: LocalActorRef ⇒ - val cell = x.underlying - cell.mailbox.isScheduled && cell.currentMessage != null + case x: ActorRefWithCell ⇒ + x.underlying match { + case cell: ActorCell ⇒ cell.mailbox.isScheduled && cell.currentMessage != null + case _ ⇒ false + } case _ ⇒ false } @@ -788,8 +773,8 @@ trait SmallestMailboxLike { this: RouterConfig ⇒ * routers based on mailbox and actor internal state. */ protected def hasMessages(a: ActorRef): Boolean = a match { - case x: LocalActorRef ⇒ x.underlying.mailbox.hasMessages - case _ ⇒ false + case x: ActorRefWithCell ⇒ x.underlying.hasMessages + case _ ⇒ false } /** @@ -799,8 +784,12 @@ trait SmallestMailboxLike { this: RouterConfig ⇒ * routers based on mailbox and actor internal state. */ protected def isSuspended(a: ActorRef): Boolean = a match { - case x: LocalActorRef ⇒ x.underlying.mailbox.isSuspended - case _ ⇒ false + case x: ActorRefWithCell ⇒ + x.underlying match { + case cell: ActorCell ⇒ cell.mailbox.isSuspended + case _ ⇒ true + } + case _ ⇒ false } /** @@ -810,8 +799,8 @@ trait SmallestMailboxLike { this: RouterConfig ⇒ * routers based on mailbox and actor internal state. */ protected def numberOfMessages(a: ActorRef): Int = a match { - case x: LocalActorRef ⇒ x.underlying.mailbox.numberOfMessages - case _ ⇒ 0 + case x: ActorRefWithCell ⇒ x.underlying.numberOfMessages + case _ ⇒ 0 } def createRoute(props: Props, routeeProvider: RouteeProvider): Route = { @@ -1283,12 +1272,20 @@ case class DefaultResizer( */ def pressure(routees: IndexedSeq[ActorRef]): Int = { routees count { - case a: LocalActorRef ⇒ - val cell = a.underlying - pressureThreshold match { - case 1 ⇒ cell.mailbox.isScheduled && cell.mailbox.hasMessages - case i if i < 1 ⇒ cell.mailbox.isScheduled && cell.currentMessage != null - case threshold ⇒ cell.mailbox.numberOfMessages >= threshold + case a: ActorRefWithCell ⇒ + a.underlying match { + case cell: ActorCell ⇒ + pressureThreshold match { + case 1 ⇒ cell.mailbox.isScheduled && cell.mailbox.hasMessages + case i if i < 1 ⇒ cell.mailbox.isScheduled && cell.currentMessage != null + case threshold ⇒ cell.mailbox.numberOfMessages >= threshold + } + case cell ⇒ + pressureThreshold match { + case 1 ⇒ cell.hasMessages + case i if i < 1 ⇒ true // unstarted cells are always busy, for example + case threshold ⇒ cell.numberOfMessages >= threshold + } } case x ⇒ false diff --git a/akka-agent/src/main/scala/akka/agent/Agent.scala b/akka-agent/src/main/scala/akka/agent/Agent.scala index 64834178a8..ea3d8719cd 100644 --- a/akka-agent/src/main/scala/akka/agent/Agent.scala +++ b/akka-agent/src/main/scala/akka/agent/Agent.scala @@ -97,7 +97,7 @@ object Agent { */ class Agent[T](initialValue: T, system: ActorSystem) { private val ref = Ref(initialValue) - private val updater = system.actorOf(Props(new AgentUpdater(this, ref))).asInstanceOf[LocalActorRef] //TODO can we avoid this somehow? + private val updater = system.actorOf(Props(new AgentUpdater(this, ref))).asInstanceOf[InternalActorRef] //TODO can we avoid this somehow? /** * Read the internal state of the agent. diff --git a/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala index b51c7bb170..fc62cd940d 100644 --- a/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala +++ b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala @@ -11,7 +11,7 @@ import akka.actor.Props import org.scalatest.{ BeforeAndAfterAll, WordSpec } import org.scalatest.matchers.MustMatchers import akka.testkit.AkkaSpec -import akka.actor.Actor +import akka.actor.{ Actor, ExtendedActorSystem } class MyActor extends Actor { def receive = { @@ -56,20 +56,20 @@ import akka.util.duration._ class MyMailboxType(systemSettings: ActorSystem.Settings, config: Config) extends MailboxType { - override def create(owner: Option[ActorContext]): MessageQueue = owner match { - case Some(o) ⇒ new MyMessageQueue(o) + override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = owner zip system headOption match { + case Some((o, s: ExtendedActorSystem)) ⇒ new MyMessageQueue(o, s) case None ⇒ throw new IllegalArgumentException( "requires an owner (i.e. does not work with BalancingDispatcher)") } } -class MyMessageQueue(_owner: ActorContext) - extends DurableMessageQueue(_owner) with DurableMessageSerialization { +class MyMessageQueue(_owner: ActorRef, _system: ExtendedActorSystem) + extends DurableMessageQueue(_owner, _system) with DurableMessageSerialization { val storage = new QueueStorage // A real-world implmentation would use configuration to set the last // three parameters below - val breaker = CircuitBreaker(_owner.system.scheduler, 5, 30.seconds, 1.minute) + val breaker = CircuitBreaker(system.scheduler, 5, 30.seconds, 1.minute) def enqueue(receiver: ActorRef, envelope: Envelope): Unit = breaker.withSyncCircuitBreaker { val data: Array[Byte] = serialize(envelope) @@ -91,7 +91,7 @@ class MyMessageQueue(_owner: ActorContext) * but the purpose of a durable mailbox is to continue * with the same message queue when the actor is started again. */ - def cleanUp(owner: ActorContext, deadLetters: MessageQueue): Unit = () + def cleanUp(owner: ActorRef, deadLetters: MessageQueue): Unit = () } //#custom-mailbox diff --git a/akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala b/akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala index 3ff8d9c1ea..09a2f810bf 100644 --- a/akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala +++ b/akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala @@ -135,7 +135,7 @@ object DispatcherDocSpec { //#mailbox-implementation-example case class MyUnboundedMailbox() extends akka.dispatch.MailboxType { - import akka.actor.ActorContext + import akka.actor.{ ActorRef, ActorSystem } import com.typesafe.config.Config import java.util.concurrent.ConcurrentLinkedQueue import akka.dispatch.{ @@ -149,7 +149,7 @@ object DispatcherDocSpec { def this(settings: ActorSystem.Settings, config: Config) = this() // The create method is called to create the MessageQueue - final override def create(owner: Option[ActorContext]): MessageQueue = + final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new QueueBasedMessageQueue with UnboundedMessageQueueSemantics { final val queue = new ConcurrentLinkedQueue[Envelope]() } diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala index fccb6b5aea..8d2ce5b897 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala @@ -13,20 +13,22 @@ import akka.actor.ActorSystem import akka.dispatch._ import akka.util.{ Duration, NonFatal } import akka.pattern.{ CircuitBreakerOpenException, CircuitBreaker } +import akka.actor.ExtendedActorSystem class FileBasedMailboxType(systemSettings: ActorSystem.Settings, config: Config) extends MailboxType { private val settings = new FileBasedMailboxSettings(systemSettings, config) - override def create(owner: Option[ActorContext]): MessageQueue = owner match { - case Some(o) ⇒ new FileBasedMessageQueue(o, settings) - case None ⇒ throw new ConfigurationException("creating a durable mailbox requires an owner (i.e. does not work with BalancingDispatcher)") + override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = owner zip system headOption match { + case Some((o, s: ExtendedActorSystem)) ⇒ new FileBasedMessageQueue(o, s, settings) + case None ⇒ throw new ConfigurationException("creating a durable mailbox requires an owner (i.e. does not work with BalancingDispatcher)") } } -class FileBasedMessageQueue(_owner: ActorContext, val settings: FileBasedMailboxSettings) extends DurableMessageQueue(_owner) with DurableMessageSerialization { +class FileBasedMessageQueue(_owner: ActorRef, _system: ExtendedActorSystem, val settings: FileBasedMailboxSettings) + extends DurableMessageQueue(_owner, _system) with DurableMessageSerialization { // TODO Is it reasonable for all FileBasedMailboxes to have their own logger? private val log = Logging(system, "FileBasedMessageQueue") - val breaker = CircuitBreaker(_owner.system.scheduler, settings.CircuitBreakerMaxFailures, settings.CircuitBreakerCallTimeout, settings.CircuitBreakerResetTimeout) + val breaker = CircuitBreaker(system.scheduler, settings.CircuitBreakerMaxFailures, settings.CircuitBreakerCallTimeout, settings.CircuitBreakerResetTimeout) private val queue = try { (new java.io.File(settings.QueuePath)) match { @@ -79,5 +81,5 @@ class FileBasedMessageQueue(_owner: ActorContext, val settings: FileBasedMailbox case NonFatal(_) ⇒ false } - def cleanUp(owner: ActorContext, deadLetters: MessageQueue): Unit = () + def cleanUp(owner: ActorRef, deadLetters: MessageQueue): Unit = () } diff --git a/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala b/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala index b21878d00e..e3bb5858f7 100644 --- a/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala +++ b/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala @@ -13,11 +13,10 @@ private[akka] object DurableExecutableMailboxConfig { val Name = "[\\.\\/\\$\\s]".r } -abstract class DurableMessageQueue(val owner: ActorContext) extends MessageQueue { +abstract class DurableMessageQueue(val owner: ActorRef, val system: ExtendedActorSystem) extends MessageQueue { import DurableExecutableMailboxConfig._ - def system: ExtendedActorSystem = owner.system.asInstanceOf[ExtendedActorSystem] - def ownerPath: ActorPath = owner.self.path + def ownerPath: ActorPath = owner.path val ownerPathString: String = ownerPath.elements.mkString("/") val name: String = "mailbox_" + Name.replaceAllIn(ownerPathString, "_") @@ -42,7 +41,7 @@ trait DurableMessageSerialization { this: DurableMessageQueue ⇒ val message = MessageSerializer.serialize(system, durableMessage.message.asInstanceOf[AnyRef]) val builder = RemoteMessageProtocol.newBuilder .setMessage(message) - .setRecipient(serializeActorRef(owner.self)) + .setRecipient(serializeActorRef(owner)) .setSender(serializeActorRef(durableMessage.sender)) builder.build.toByteArray @@ -60,7 +59,7 @@ trait DurableMessageSerialization { this: DurableMessageQueue ⇒ val message = MessageSerializer.deserialize(system, durableMessage.getMessage) val sender = deserializeActorRef(durableMessage.getSender) - Envelope(message, sender)(system) + Envelope(message, sender, system) } } diff --git a/akka-durable-mailboxes/akka-mailboxes-common/src/test/scala/akka/actor/mailbox/DurableMailboxSpec.scala b/akka-durable-mailboxes/akka-mailboxes-common/src/test/scala/akka/actor/mailbox/DurableMailboxSpec.scala index 9081a5fcb0..8264bd0348 100644 --- a/akka-durable-mailboxes/akka-mailboxes-common/src/test/scala/akka/actor/mailbox/DurableMailboxSpec.scala +++ b/akka-durable-mailboxes/akka-mailboxes-common/src/test/scala/akka/actor/mailbox/DurableMailboxSpec.scala @@ -3,25 +3,21 @@ */ package akka.actor.mailbox -import DurableMailboxSpecActorFactory.AccumulatorActor -import DurableMailboxSpecActorFactory.MailboxTestActor -import akka.actor.Actor -import akka.actor.ActorRef -import akka.actor.ActorSystem -import akka.actor.LocalActorRef -import akka.actor.Props -import akka.actor.actorRef2Scala +import java.io.InputStream +import java.util.concurrent.TimeoutException + +import scala.annotation.tailrec + +import org.scalatest.{ WordSpec, BeforeAndAfterAll } +import org.scalatest.matchers.MustMatchers + +import com.typesafe.config.{ ConfigFactory, Config } + +import DurableMailboxSpecActorFactory.{ MailboxTestActor, AccumulatorActor } +import akka.actor.{ RepointableRef, Props, ActorSystem, ActorRefWithCell, ActorRef, ActorCell, Actor } import akka.dispatch.Mailbox import akka.testkit.TestKit import akka.util.duration.intToDurationInt -import com.typesafe.config.Config -import com.typesafe.config.ConfigFactory -import java.io.InputStream -import java.util.concurrent.TimeoutException -import org.scalatest.BeforeAndAfterAll -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import scala.annotation.tailrec object DurableMailboxSpecActorFactory { @@ -115,9 +111,15 @@ abstract class DurableMailboxSpec(system: ActorSystem, val backendName: String) if (!result.contains(words)) throw new Exception("stream did not contain '" + words + "':\n" + result) } - def createMailboxTestActor(props: Props = Props[MailboxTestActor], id: String = ""): ActorRef = id match { - case null | "" ⇒ system.actorOf(props.withDispatcher(backendName + "-dispatcher")) - case some ⇒ system.actorOf(props.withDispatcher(backendName + "-dispatcher"), some) + def createMailboxTestActor(props: Props = Props[MailboxTestActor], id: String = ""): ActorRef = { + val ref = id match { + case null | "" ⇒ system.actorOf(props.withDispatcher(backendName + "-dispatcher")) + case some ⇒ system.actorOf(props.withDispatcher(backendName + "-dispatcher"), some) + } + awaitCond(ref match { + case r: RepointableRef ⇒ r.isStarted + }, 1 second, 10 millis) + ref } private def isDurableMailbox(m: Mailbox): Boolean = @@ -127,9 +129,11 @@ abstract class DurableMailboxSpec(system: ActorSystem, val backendName: String) "get a new, unique, durable mailbox" in { val a1, a2 = createMailboxTestActor() - isDurableMailbox(a1.asInstanceOf[LocalActorRef].underlying.mailbox) must be(true) - isDurableMailbox(a2.asInstanceOf[LocalActorRef].underlying.mailbox) must be(true) - (a1.asInstanceOf[LocalActorRef].underlying.mailbox ne a2.asInstanceOf[LocalActorRef].underlying.mailbox) must be(true) + val mb1 = a1.asInstanceOf[ActorRefWithCell].underlying.asInstanceOf[ActorCell].mailbox + val mb2 = a2.asInstanceOf[ActorRefWithCell].underlying.asInstanceOf[ActorCell].mailbox + isDurableMailbox(mb1) must be(true) + isDurableMailbox(mb2) must be(true) + (mb1 ne mb2) must be(true) } "deliver messages at most once" in { @@ -148,7 +152,7 @@ abstract class DurableMailboxSpec(system: ActorSystem, val backendName: String) "support having multiple actors at the same time" in { val actors = Vector.fill(3)(createMailboxTestActor(Props[AccumulatorActor])) - actors foreach { a ⇒ isDurableMailbox(a.asInstanceOf[LocalActorRef].underlying.mailbox) must be(true) } + actors foreach { a ⇒ isDurableMailbox(a.asInstanceOf[ActorRefWithCell].underlying.asInstanceOf[ActorCell].mailbox) must be(true) } val msgs = 1 to 3 diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index eaecf67792..dfdf25759b 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -96,8 +96,8 @@ private[akka] class RemoteActorRefProvider( } def actorOf(system: ActorSystemImpl, props: Props, supervisor: InternalActorRef, path: ActorPath, - systemService: Boolean, deploy: Option[Deploy], lookupDeploy: Boolean): InternalActorRef = { - if (systemService) local.actorOf(system, props, supervisor, path, systemService, deploy, lookupDeploy) + systemService: Boolean, deploy: Option[Deploy], lookupDeploy: Boolean, async: Boolean): InternalActorRef = { + if (systemService) local.actorOf(system, props, supervisor, path, systemService, deploy, lookupDeploy, async) else { /* @@ -155,14 +155,14 @@ private[akka] class RemoteActorRefProvider( Iterator(props.deploy) ++ deployment.iterator reduce ((a, b) ⇒ b withFallback a) match { case d @ Deploy(_, _, _, RemoteScope(addr)) ⇒ if (addr == rootPath.address || addr == transport.address) { - local.actorOf(system, props, supervisor, path, false, deployment.headOption, false) + local.actorOf(system, props, supervisor, path, false, deployment.headOption, false, async) } else { val rpath = RootActorPath(addr) / "remote" / transport.address.hostPort / path.elements useActorOnNode(rpath, props, d, supervisor) new RemoteActorRef(this, transport, rpath, supervisor) } - case _ ⇒ local.actorOf(system, props, supervisor, path, systemService, deployment.headOption, false) + case _ ⇒ local.actorOf(system, props, supervisor, path, systemService, deployment.headOption, false, async) } } } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala index ddab54b2ad..53023687c0 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala @@ -5,10 +5,11 @@ package akka.remote import scala.annotation.tailrec - import akka.actor.{ VirtualPathContainer, Terminated, Deploy, Props, Nobody, LocalActorRef, InternalActorRef, Address, ActorSystemImpl, ActorRef, ActorPathExtractor, ActorPath, Actor } import akka.event.LoggingAdapter import akka.dispatch.Watch +import akka.actor.ActorRefWithCell +import akka.actor.ActorRefScope private[akka] sealed trait DaemonMsg private[akka] case class DaemonMsgCreate(props: Props, deploy: Deploy, path: String, supervisor: ActorRef) extends DaemonMsg @@ -60,7 +61,7 @@ private[akka] class RemoteSystemDaemon(system: ActorSystemImpl, _path: ActorPath val subpath = elems.drop(1) val path = this.path / subpath val actor = system.provider.actorOf(system, props, supervisor.asInstanceOf[InternalActorRef], - path, false, Some(deploy), true) + path, systemService = false, Some(deploy), lookupDeploy = true, async = false) addChild(subpath.mkString("/"), actor) this.sendSystemMessage(Watch(actor, this)) case _ ⇒ @@ -68,11 +69,12 @@ private[akka] class RemoteSystemDaemon(system: ActorSystemImpl, _path: ActorPath } } - case Terminated(child: LocalActorRef) ⇒ removeChild(child.path.elements.drop(1).mkString("/")) + case Terminated(child: ActorRefWithCell) if child.asInstanceOf[ActorRefScope].isLocal ⇒ + removeChild(child.path.elements.drop(1).mkString("/")) - case t: Terminated ⇒ + case t: Terminated ⇒ - case unknown ⇒ log.warning("Unknown message {} received by {}", unknown, this) + case unknown ⇒ log.warning("Unknown message {} received by {}", unknown, this) } } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala index aefd34ec74..f06d671700 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala @@ -269,14 +269,14 @@ abstract class RemoteTransport(val system: ExtendedActorSystem, val provider: Re } case x ⇒ log.warning("remoteDaemon received illegal message {} from {}", x, remoteMessage.sender) } - case l: LocalRef ⇒ + case l @ (_: LocalRef | _: RepointableRef) if l.isLocal ⇒ if (provider.remoteSettings.LogReceive) log.debug("received local message {}", remoteMessage) remoteMessage.payload match { case msg: PossiblyHarmful if useUntrustedMode ⇒ log.warning("operating in UntrustedMode, dropping inbound PossiblyHarmful message of type {}", msg.getClass) case msg: SystemMessage ⇒ l.sendSystemMessage(msg) case msg ⇒ l.!(msg)(remoteMessage.sender) } - case r: RemoteRef ⇒ + case r @ (_: RemoteRef | _: RepointableRef) if !r.isLocal ⇒ if (provider.remoteSettings.LogReceive) log.debug("received remote-destined message {}", remoteMessage) remoteMessage.originalReceiver match { case AddressFromURIString(address) if address == provider.transport.address ⇒ @@ -284,7 +284,7 @@ abstract class RemoteTransport(val system: ExtendedActorSystem, val provider: Re r.!(remoteMessage.payload)(remoteMessage.sender) case r ⇒ log.error("dropping message {} for non-local recipient {} arriving at {} inbound address is {}", remoteMessage.payload, r, address, provider.transport.address) } - case r ⇒ log.error("dropping message {} for non-local recipient {} arriving at {} inbound address is {}", remoteMessage.payload, r, address, provider.transport.address) + case r ⇒ log.error("dropping message {} for unknown recipient {} arriving at {} inbound address is {}", remoteMessage.payload, r, address, provider.transport.address) } } } diff --git a/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala index 9a71f309fc..a0b7ae4a49 100644 --- a/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala +++ b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala @@ -71,7 +71,8 @@ class RemoteRouteeProvider(nodes: Iterable[Address], _context: ActorContext, _re IndexedSeq.empty[ActorRef] ++ (for (i ← 1 to nrOfInstances) yield { val name = "c" + i val deploy = Deploy("", ConfigFactory.empty(), props.routerConfig, RemoteScope(nodeAddressIter.next)) - impl.provider.actorOf(impl, props, context.self.asInstanceOf[InternalActorRef], context.self.path / name, false, Some(deploy), false) + impl.provider.actorOf(impl, props, context.self.asInstanceOf[InternalActorRef], context.self.path / name, + systemService = false, Some(deploy), lookupDeploy = false, async = false) }) case (_, xs, _) ⇒ throw new ConfigurationException("Remote target.nodes can not be combined with routees for [%s]" diff --git a/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala index ac4127fe17..7f92e3089b 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala @@ -118,7 +118,7 @@ akka { val r = expectMsgType[ActorRef] r ! (Props[Echo], "grandchild") val remref = expectMsgType[ActorRef] - remref.isInstanceOf[LocalActorRef] must be(true) + remref.asInstanceOf[ActorRefScope].isLocal must be(true) val myref = system.actorFor(system / "looker" / "child" / "grandchild") myref.isInstanceOf[RemoteActorRef] must be(true) myref ! 43 diff --git a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala index 2fe664d7b6..1732d5faf3 100644 --- a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala +++ b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala @@ -128,7 +128,7 @@ class CallingThreadDispatcher( override def id: String = Id - protected[akka] override def createMailbox(actor: ActorCell) = new CallingThreadMailbox(actor, mailboxType) + protected[akka] override def createMailbox(actor: akka.actor.Cell) = new CallingThreadMailbox(actor, mailboxType) protected[akka] override def shutdown() {} @@ -281,17 +281,21 @@ class NestingQueue(val q: MessageQueue) { def isActive = active } -class CallingThreadMailbox(_receiver: ActorCell, val mailboxType: MailboxType) extends Mailbox(_receiver, null) with DefaultSystemMessageQueue { +class CallingThreadMailbox(_receiver: akka.actor.Cell, val mailboxType: MailboxType) + extends Mailbox(null) with DefaultSystemMessageQueue { + + val system = _receiver.system + val self = _receiver.self private val q = new ThreadLocal[NestingQueue]() { override def initialValue = { - val queue = new NestingQueue(mailboxType.create(Some(actor))) - CallingThreadDispatcherQueues(actor.system).registerQueue(CallingThreadMailbox.this, queue) + val queue = new NestingQueue(mailboxType.create(Some(self), Some(system))) + CallingThreadDispatcherQueues(system).registerQueue(CallingThreadMailbox.this, queue) queue } } - override def enqueue(receiver: ActorRef, msg: Envelope): Unit = throw new UnsupportedOperationException("CallingThreadMailbox cannot enqueue normally") + override def enqueue(receiver: ActorRef, msg: Envelope): Unit = q.get.q.enqueue(receiver, msg) override def dequeue(): Envelope = throw new UnsupportedOperationException("CallingThreadMailbox cannot dequeue normally") override def hasMessages: Boolean = q.get.q.hasMessages override def numberOfMessages: Int = 0 @@ -311,7 +315,7 @@ class CallingThreadMailbox(_receiver: ActorCell, val mailboxType: MailboxType) e val q = queue CallingThreadDispatcherQueues(actor.system).gatherFromAllOtherQueues(this, q) super.cleanUp() - q.q.cleanUp(actor, actor.systemImpl.deadLetterQueue) + q.q.cleanUp(actor.self, actor.systemImpl.deadLetterQueue) } } } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index ed151b6b12..1b6945ff51 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -58,7 +58,7 @@ class TestActorRef[T <: Actor]( * become/unbecome. */ def receive(o: Any, sender: ActorRef): Unit = try { - underlying.currentMessage = Envelope(o, if (sender eq null) underlying.system.deadLetters else sender)(underlying.system) + underlying.currentMessage = Envelope(o, if (sender eq null) underlying.system.deadLetters else sender, underlying.system) underlying.receiveMessage(o) } finally underlying.currentMessage = null diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index c0fb6e5267..d176971d33 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -97,9 +97,14 @@ trait TestKitBase { */ lazy val testActor: ActorRef = { val impl = system.asInstanceOf[ActorSystemImpl] //TODO ticket #1559 - impl.systemActorOf(Props(new TestActor(queue)) + val ref = impl.systemActorOf(Props(new TestActor(queue)) .withDispatcher(CallingThreadDispatcher.Id), "testActor" + TestKit.testActorId.incrementAndGet) + awaitCond(ref match { + case r: RepointableRef ⇒ r.isStarted + case _ ⇒ true + }, 1 second, 10 millis) + ref } private var end: Duration = Duration.Undefined From 2bf2cec282d1606d10692aaca770a53b5bc52917 Mon Sep 17 00:00:00 2001 From: Gert Vanthienen Date: Wed, 13 Jun 2012 22:37:41 +0200 Subject: [PATCH 387/538] Service registration is now optional, service references get cleaned up properly --- .../additional/code/osgi/Activator.scala | 5 ++- .../akka/osgi/ActorSystemActivator.scala | 35 +++++++++++++------ .../akka/osgi/OsgiActorSystemFactory.scala | 16 ++------- .../akka/osgi/blueprint/aries/config.xml | 4 ++- .../akka/osgi/blueprint/aries/simple.xml | 4 ++- .../osgi/test/TestActorSystemActivator.scala | 4 ++- 6 files changed, 40 insertions(+), 28 deletions(-) diff --git a/akka-docs/additional/code/osgi/Activator.scala b/akka-docs/additional/code/osgi/Activator.scala index 06a538d242..34e83fcf77 100644 --- a/akka-docs/additional/code/osgi/Activator.scala +++ b/akka-docs/additional/code/osgi/Activator.scala @@ -6,7 +6,10 @@ import org.apache.servicemix.examples.akka.Master //#Activator class Activator extends ActorSystemActivator("PiSystem") { - def configure(system: ActorSystem) { + def configure(context: BundleContext, system: ActorSystem) { + // optionally register the ActorSystem in the OSGi Service Registry + registerService(context, system) + val listener = system.actorOf(Props[Listener], name = "listener") val master = system.actorOf(Props(new Master(4, 10000, 10000, listener)), name = "master") master ! Calculate diff --git a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala index 7f60aebccc..546ff8c2c4 100644 --- a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala +++ b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala @@ -2,8 +2,8 @@ package akka.osgi import com.typesafe.config.{ Config, ConfigFactory } import akka.actor.ActorSystem -import org.osgi.framework.{ BundleContext, BundleActivator } -import java.util.Properties +import java.util.{ Dictionary, Properties } +import org.osgi.framework.{ ServiceRegistration, BundleContext, BundleActivator } /** * Abstract {@link BundleActivator} implementation to bootstrap and configure an {@link ActorSystem} in an @@ -14,14 +14,16 @@ abstract class ActorSystemActivator(nameFor: (BundleContext) ⇒ Option[String]) def this() = this({ context: BundleContext ⇒ None }) def this(name: String) = this({ context: BundleContext ⇒ Some(name) }) - var system: ActorSystem = null + var system: Option[ActorSystem] = None + var registration: Option[ServiceRegistration] = None /** * Implement this method to add your own actors to the ActorSystem * + * @param context the bundle context * @param system the ActorSystem that was created by the activator */ - def configure(system: ActorSystem) + def configure(context: BundleContext, system: ActorSystem) /** * Sets up a new ActorSystem and registers it in the OSGi Service Registry @@ -29,20 +31,31 @@ abstract class ActorSystemActivator(nameFor: (BundleContext) ⇒ Option[String]) * @param context the BundleContext */ def start(context: BundleContext) { - system = OsgiActorSystemFactory(context).createActorSystem(nameFor(context)) - configure(system) + system = Some(OsgiActorSystemFactory(context).createActorSystem(nameFor(context))) + system.foreach(configure(context, _)) } /** - * Shuts down the ActorSystem when the bundle is stopped. + * Shuts down the ActorSystem when the bundle is stopped and, if necessary, unregisters a service registration * * @param context the BundleContext */ def stop(context: BundleContext) { - if (system != null) { - system.shutdown() - system = null - } + registration.foreach(_.unregister()) + system.foreach(_.shutdown()) + } + + /** + * Register the actor system in the OSGi service registry + * + * @param context the bundle context + * @param system the actor system + */ + def registerService(context: BundleContext, system: ActorSystem) { + val properties = new Properties() + properties.put("name", system.name) + registration = Some(context.registerService(classOf[ActorSystem].getName, system, + properties.asInstanceOf[Dictionary[String, Any]])) } } diff --git a/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala b/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala index cddf797d07..2c5a6eca14 100644 --- a/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala +++ b/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala @@ -1,10 +1,9 @@ package akka.osgi import impl.BundleDelegatingClassLoader -import org.osgi.framework.BundleContext import akka.actor.ActorSystem import com.typesafe.config.{ ConfigFactory, Config } -import java.util.{ Dictionary, Properties } +import org.osgi.framework.BundleContext /** * Factory class to create ActorSystem implementations in an OSGi environment. This mainly involves dealing with @@ -22,17 +21,8 @@ class OsgiActorSystemFactory(val context: BundleContext) { */ def createActorSystem(name: String): ActorSystem = createActorSystem(Option(name)) - def createActorSystem(name: Option[String]): ActorSystem = { - val system = ActorSystem(actorSystemName(name), actorSystemConfig(context), classloader) - registerService(system) - system - } - - def registerService(system: ActorSystem) { - val properties = new Properties() - properties.put("name", system.name) - context.registerService(classOf[ActorSystem].getName, system, properties.asInstanceOf[Dictionary[String, Any]]) - } + def createActorSystem(name: Option[String]): ActorSystem = + ActorSystem(actorSystemName(name), actorSystemConfig(context), classloader) /** * Strategy method to create the Config for the ActorSystem, ensuring that the default/reference configuration is diff --git a/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/config.xml b/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/config.xml index 6bd3d49c9d..ce9f48c551 100644 --- a/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/config.xml +++ b/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/config.xml @@ -2,7 +2,9 @@ - + + + some.config { key=value diff --git a/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/simple.xml b/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/simple.xml index a46834f74b..2ac6552f80 100644 --- a/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/simple.xml +++ b/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/simple.xml @@ -2,6 +2,8 @@ - + + + diff --git a/akka-osgi/src/test/scala/akka/osgi/test/TestActorSystemActivator.scala b/akka-osgi/src/test/scala/akka/osgi/test/TestActorSystemActivator.scala index 2a44e91e4a..90305bc663 100644 --- a/akka-osgi/src/test/scala/akka/osgi/test/TestActorSystemActivator.scala +++ b/akka-osgi/src/test/scala/akka/osgi/test/TestActorSystemActivator.scala @@ -3,14 +3,16 @@ package akka.osgi.test import akka.osgi.ActorSystemActivator import akka.actor.{ Props, ActorSystem } import PingPong._ +import org.osgi.framework.BundleContext /** * Sample ActorSystemActivator implementation used for testing purposes */ class TestActorSystemActivator extends ActorSystemActivator { - def configure(system: ActorSystem) { + def configure(context: BundleContext, system: ActorSystem) { system.actorOf(Props(new PongActor), name = "pong") + registerService(context, system) } } From b52da8d5881692ac23e443b547aa820f29ba6ff1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Thu, 14 Jun 2012 09:18:02 +0200 Subject: [PATCH 388/538] Fixed protoc generated code with script and updated instructions --- .../remote/testconductor/TestConductorProtocol.java | 12 ++++++------ .../src/main/protocol/TestConductorProtocol.proto | 2 ++ akka-remote/src/main/protocol/RemoteProtocol.proto | 2 ++ akka-remote/src/test/protocol/ProtobufProtocol.proto | 6 ++++-- 4 files changed, 14 insertions(+), 8 deletions(-) diff --git a/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java index ec84e42331..14fe236950 100644 --- a/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java +++ b/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java @@ -492,7 +492,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1397,7 +1397,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1971,7 +1971,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -2491,7 +2491,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3119,7 +3119,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3725,7 +3725,7 @@ public final class TestConductorProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } diff --git a/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto b/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto index b35bbd23d8..bef4aad5c1 100644 --- a/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto +++ b/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto @@ -9,6 +9,8 @@ option optimize_for = SPEED; Compile with: cd ./akka-remote-tests/src/main/protocol protoc TestConductorProtocol.proto --java_out ../java + cd ../../../.. + ./scripts/fix-protobuf.sh *******************************************/ message Wrapper { diff --git a/akka-remote/src/main/protocol/RemoteProtocol.proto b/akka-remote/src/main/protocol/RemoteProtocol.proto index 7d86d8a82b..ddcfe26d1d 100644 --- a/akka-remote/src/main/protocol/RemoteProtocol.proto +++ b/akka-remote/src/main/protocol/RemoteProtocol.proto @@ -9,6 +9,8 @@ option optimize_for = SPEED; Compile with: cd ./akka-remote/src/main/protocol protoc RemoteProtocol.proto --java_out ../java + cd ../../../.. + ./scripts/fix-protobuf.sh *******************************************/ message AkkaRemoteProtocol { diff --git a/akka-remote/src/test/protocol/ProtobufProtocol.proto b/akka-remote/src/test/protocol/ProtobufProtocol.proto index ccb92aa1e3..0ff2663821 100644 --- a/akka-remote/src/test/protocol/ProtobufProtocol.proto +++ b/akka-remote/src/test/protocol/ProtobufProtocol.proto @@ -4,11 +4,13 @@ package akka.actor; -/* +/****************************************** Compile with: cd ./akka-remote/src/test/protocol protoc ProtobufProtocol.proto --java_out ../java -*/ + cd ../../../.. + ./scripts/fix-protobuf.sh +*******************************************/ message MyMessage { required uint64 id = 1; From f7a01505baedf47be473874097bc8f995ba9311b Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 15 Jun 2012 13:24:06 +0200 Subject: [PATCH 389/538] Correction of gossip merge when joining, see #2204 The problem: * Node that is Up joins a cluster and becomes Joining in that cluster * The joining node receives gossip, which results in conflict, merge results in Up * It became Up in the new cluster without passing the ordinary leader action to move it to Up The solution: * Change priority order of Up and Joining so that Joining is used when merging --- .../src/main/scala/akka/cluster/Cluster.scala | 26 +++++++++---------- .../scala/akka/cluster/ConvergenceSpec.scala | 6 ++--- .../test/scala/akka/cluster/GossipSpec.scala | 24 ++++++++--------- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index a86bc0148c..67ea0c4cd0 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -118,6 +118,15 @@ object Member { case _ ⇒ None } + def pickHighestPriority(a: Set[Member], b: Set[Member]): Set[Member] = { + // group all members by Address => Seq[Member] + val groupedByAddress = (a.toSeq ++ b.toSeq).groupBy(_.address) + // pick highest MemberStatus + (Set.empty[Member] /: groupedByAddress) { + case (acc, (_, members)) ⇒ acc + members.reduceLeft(highestPriorityOf) + } + } + /** * Picks the Member with the highest "priority" MemberStatus. */ @@ -130,8 +139,8 @@ object Member { case (_, Exiting) ⇒ m2 case (Leaving, _) ⇒ m1 case (_, Leaving) ⇒ m2 - case (Up, Joining) ⇒ m1 - case (Joining, Up) ⇒ m2 + case (Up, Joining) ⇒ m2 + case (Joining, Up) ⇒ m1 case (Joining, Joining) ⇒ m1 case (Up, Up) ⇒ m1 } @@ -268,21 +277,12 @@ case class Gossip( // 2. merge meta-data val mergedMeta = this.meta ++ that.meta - def pickHighestPriority(a: Seq[Member], b: Seq[Member]): Set[Member] = { - // group all members by Address => Seq[Member] - val groupedByAddress = (a ++ b).groupBy(_.address) - // pick highest MemberStatus - (Set.empty[Member] /: groupedByAddress) { - case (acc, (_, members)) ⇒ acc + members.reduceLeft(Member.highestPriorityOf) - } - } - // 3. merge unreachable by selecting the single Member with highest MemberStatus out of the Member groups - val mergedUnreachable = pickHighestPriority(this.overview.unreachable.toSeq, that.overview.unreachable.toSeq) + val mergedUnreachable = Member.pickHighestPriority(this.overview.unreachable, that.overview.unreachable) // 4. merge members by selecting the single Member with highest MemberStatus out of the Member groups, // and exclude unreachable - val mergedMembers = Gossip.emptyMembers ++ pickHighestPriority(this.members.toSeq, that.members.toSeq). + val mergedMembers = Gossip.emptyMembers ++ Member.pickHighestPriority(this.members, that.members). filterNot(mergedUnreachable.contains) // 5. fresh seen table diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index bdc0a1ae8b..52206f1b8c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -39,7 +39,7 @@ abstract class ConvergenceSpec "A cluster of 3 members" must { - "reach initial convergence" taggedAs LongRunningTest ignore { + "reach initial convergence" taggedAs LongRunningTest in { awaitClusterUp(first, second, third) runOn(fourth) { @@ -49,7 +49,7 @@ abstract class ConvergenceSpec testConductor.enter("after-1") } - "not reach convergence while any nodes are unreachable" taggedAs LongRunningTest ignore { + "not reach convergence while any nodes are unreachable" taggedAs LongRunningTest in { val thirdAddress = node(third).address testConductor.enter("before-shutdown") @@ -81,7 +81,7 @@ abstract class ConvergenceSpec testConductor.enter("after-2") } - "not move a new joining node to Up while there is no convergence" taggedAs LongRunningTest ignore { + "not move a new joining node to Up while there is no convergence" taggedAs LongRunningTest in { runOn(fourth) { // try to join cluster.join(node(first).address) diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala index 449ebf7bff..8020010655 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -33,12 +33,12 @@ class GossipSpec extends WordSpec with MustMatchers { val g2 = Gossip(members = SortedSet(a2, c2, e2)) val merged1 = g1 merge g2 - merged1.members must be(SortedSet(a1, c1, e2)) - merged1.members.toSeq.map(_.status) must be(Seq(Up, Leaving, Up)) + merged1.members must be(SortedSet(a2, c1, e1)) + merged1.members.toSeq.map(_.status) must be(Seq(Joining, Leaving, Joining)) val merged2 = g2 merge g1 - merged2.members must be(SortedSet(a1, c1, e2)) - merged2.members.toSeq.map(_.status) must be(Seq(Up, Leaving, Up)) + merged2.members must be(SortedSet(a2, c1, e1)) + merged2.members.toSeq.map(_.status) must be(Seq(Joining, Leaving, Joining)) } @@ -48,12 +48,12 @@ class GossipSpec extends WordSpec with MustMatchers { val g2 = Gossip(members = Gossip.emptyMembers, overview = GossipOverview(unreachable = Set(a2, b2, c2, d2))) val merged1 = g1 merge g2 - merged1.overview.unreachable must be(Set(a1, b2, c1, d2)) - merged1.overview.unreachable.toSeq.sorted.map(_.status) must be(Seq(Up, Removed, Leaving, Removed)) + merged1.overview.unreachable must be(Set(a2, b2, c1, d2)) + merged1.overview.unreachable.toSeq.sorted.map(_.status) must be(Seq(Joining, Removed, Leaving, Removed)) val merged2 = g2 merge g1 - merged2.overview.unreachable must be(Set(a1, b2, c1, d2)) - merged2.overview.unreachable.toSeq.sorted.map(_.status) must be(Seq(Up, Removed, Leaving, Removed)) + merged2.overview.unreachable must be(Set(a2, b2, c1, d2)) + merged2.overview.unreachable.toSeq.sorted.map(_.status) must be(Seq(Joining, Removed, Leaving, Removed)) } @@ -62,14 +62,14 @@ class GossipSpec extends WordSpec with MustMatchers { val g2 = Gossip(members = SortedSet(a2, c2), overview = GossipOverview(unreachable = Set(b2, d2))) val merged1 = g1 merge g2 - merged1.members must be(SortedSet(a1)) - merged1.members.toSeq.map(_.status) must be(Seq(Up)) + merged1.members must be(SortedSet(a2)) + merged1.members.toSeq.map(_.status) must be(Seq(Joining)) merged1.overview.unreachable must be(Set(b2, c1, d2)) merged1.overview.unreachable.toSeq.sorted.map(_.status) must be(Seq(Removed, Leaving, Removed)) val merged2 = g2 merge g1 - merged2.members must be(SortedSet(a1)) - merged2.members.toSeq.map(_.status) must be(Seq(Up)) + merged2.members must be(SortedSet(a2)) + merged2.members.toSeq.map(_.status) must be(Seq(Joining)) merged2.overview.unreachable must be(Set(b2, c1, d2)) merged2.overview.unreachable.toSeq.sorted.map(_.status) must be(Seq(Removed, Leaving, Removed)) From 08c47591c0ada2401bc0269b9cf5b80a6dbfacd1 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 15 Jun 2012 13:31:34 +0200 Subject: [PATCH 390/538] Use max of periodic-tasks-initial-delay and the interval --- .../src/main/scala/akka/cluster/Cluster.scala | 28 +++++++++++-------- .../MembershipChangeListenerExitingSpec.scala | 2 +- .../MembershipChangeListenerLeavingSpec.scala | 2 +- .../cluster/NodeLeavingAndExitingSpec.scala | 2 +- 4 files changed, 19 insertions(+), 15 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 67ea0c4cd0..c495e470ce 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -522,24 +522,28 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } // start periodic gossip to random nodes in cluster - private val gossipTask = FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay, GossipInterval) { - gossip() - } + private val gossipTask = + FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay.max(GossipInterval), GossipInterval) { + gossip() + } // start periodic heartbeat to all nodes in cluster - private val heartbeatTask = FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay, HeartbeatInterval) { - heartbeat() - } + private val heartbeatTask = + FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay.max(HeartbeatInterval), HeartbeatInterval) { + heartbeat() + } // start periodic cluster failure detector reaping (moving nodes condemned by the failure detector to unreachable list) - private val failureDetectorReaperTask = FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay, UnreachableNodesReaperInterval) { - reapUnreachableMembers() - } + private val failureDetectorReaperTask = + FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay.max(UnreachableNodesReaperInterval), UnreachableNodesReaperInterval) { + reapUnreachableMembers() + } // start periodic leader action management (only applies for the current leader) - private val leaderActionsTask = FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay, LeaderActionsInterval) { - leaderActions() - } + private val leaderActionsTask = + FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay.max(LeaderActionsInterval), LeaderActionsInterval) { + leaderActions() + } createMBean() diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index d9b2c7b876..88cee08191 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -21,7 +21,7 @@ object MembershipChangeListenerExitingMultiJvmSpec extends MultiNodeConfig { .withFallback(ConfigFactory.parseString(""" akka.cluster { leader-actions-interval = 5 s # increase the leader action task interval - unreachable-nodes-reaper-interval = 30 s # turn "off" reaping to unreachable node set + unreachable-nodes-reaper-interval = 300 s # turn "off" reaping to unreachable node set } """) .withFallback(MultiNodeClusterSpec.clusterConfig))) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala index eda29ea0f0..0640e58175 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -19,7 +19,7 @@ object MembershipChangeListenerLeavingMultiJvmSpec extends MultiNodeConfig { debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" akka.cluster.leader-actions-interval = 5 s - akka.cluster.unreachable-nodes-reaper-interval = 30 s + akka.cluster.unreachable-nodes-reaper-interval = 300 s # turn "off" """)) .withFallback(MultiNodeClusterSpec.clusterConfig)) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 6378a74040..fc62c17c1d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -20,7 +20,7 @@ object NodeLeavingAndExitingMultiJvmSpec extends MultiNodeConfig { .withFallback(ConfigFactory.parseString(""" akka.cluster { leader-actions-interval = 5 s # increase the leader action task frequency to make sure we get a chance to test the LEAVING state - unreachable-nodes-reaper-interval = 30 s + unreachable-nodes-reaper-interval = 300 s # turn "off" } """) .withFallback(MultiNodeClusterSpec.clusterConfig))) From 11c85b84b96761dfd1d2a250d3c839b399725129 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 15 Jun 2012 13:32:55 +0200 Subject: [PATCH 391/538] Fail fast in cluster tests if prevous step failed --- .../akka/cluster/MultiNodeClusterSpec.scala | 27 ++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index b4532f7efc..b5afaf404c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -5,12 +5,15 @@ package akka.cluster import com.typesafe.config.Config import com.typesafe.config.ConfigFactory -import akka.actor.{Address, ExtendedActorSystem} +import akka.actor.{ Address, ExtendedActorSystem } import akka.remote.testconductor.RoleName import akka.remote.testkit.MultiNodeSpec import akka.testkit._ import akka.util.duration._ import akka.util.Duration +import org.scalatest.Suite +import org.scalatest.TestFailedException +import scala.util.control.NoStackTrace object MultiNodeClusterSpec { def clusterConfig: Config = ConfigFactory.parseString(""" @@ -29,10 +32,28 @@ object MultiNodeClusterSpec { """) } -trait MultiNodeClusterSpec extends FailureDetectorStrategy { self: MultiNodeSpec ⇒ +trait MultiNodeClusterSpec extends FailureDetectorStrategy with Suite { self: MultiNodeSpec ⇒ override def initialParticipants = roles.size + // Cluster tests are written so that if previous step (test method) failed + // it will most likely not be possible to run next step. This ensures + // fail fast of steps after the first failure. + private var failed = false + override protected def withFixture(test: NoArgTest): Unit = try { + if (failed) { + val e = new TestFailedException("Previous step failed", 0) + // short stack trace + e.setStackTrace(e.getStackTrace.take(1)) + throw e + } + super.withFixture(test) + } catch { + case t ⇒ + failed = true + throw t + } + /** * The cluster node instance. Needs to be lazily created. */ @@ -151,6 +172,6 @@ trait MultiNodeClusterSpec extends FailureDetectorStrategy { self: MultiNodeSpec } def roleName(address: Address): Option[RoleName] = { - testConductor.getNodes.await.find(node(_).address == address) + roles.find(node(_).address == address) } } From 309b460367a5a5079411bd67e91c544b09edafad Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 15 Jun 2012 13:33:58 +0200 Subject: [PATCH 392/538] Test state transitions and actions step-by-step, see #2223 --- .../scala/akka/cluster/TransitionSpec.scala | 438 ++++++++++++++++++ 1 file changed, 438 insertions(+) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala new file mode 100644 index 0000000000..87af47a439 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -0,0 +1,438 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.actor.Address +import akka.remote.testconductor.RoleName +import MemberStatus._ + +object TransitionMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + val fifth = role("fifth") + + commonConfig(debugConfig(on = false). + withFallback(ConfigFactory.parseString(""" + akka.cluster { + periodic-tasks-initial-delay = 300 s # turn "off" all periodic tasks + } + """)). + withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class TransitionMultiJvmNode1 extends TransitionSpec with FailureDetectorPuppetStrategy +class TransitionMultiJvmNode2 extends TransitionSpec with FailureDetectorPuppetStrategy +class TransitionMultiJvmNode3 extends TransitionSpec with FailureDetectorPuppetStrategy +class TransitionMultiJvmNode4 extends TransitionSpec with FailureDetectorPuppetStrategy +class TransitionMultiJvmNode5 extends TransitionSpec with FailureDetectorPuppetStrategy + +abstract class TransitionSpec + extends MultiNodeSpec(TransitionMultiJvmSpec) + with MultiNodeClusterSpec { + + import TransitionMultiJvmSpec._ + + // sorted in the order used by the cluster + def leader(roles: RoleName*) = roles.sorted.head + def nonLeader(roles: RoleName*) = roles.toSeq.sorted.tail + + def memberStatus(address: Address): MemberStatus = { + val statusOption = (cluster.latestGossip.members ++ cluster.latestGossip.overview.unreachable).collectFirst { + case m if m.address == address ⇒ m.status + } + statusOption must not be (None) + statusOption.get + } + + def memberAddresses: Set[Address] = cluster.latestGossip.members.map(_.address) + + def members: Set[RoleName] = memberAddresses.flatMap(roleName(_)) + + def seenLatestGossip: Set[RoleName] = { + val gossip = cluster.latestGossip + gossip.overview.seen.collect { + case (address, v) if v == gossip.version ⇒ roleName(address) + }.flatten.toSet + } + + def awaitSeen(addresses: Address*): Unit = awaitCond { + seenLatestGossip.map(node(_).address) == addresses.toSet + } + + def awaitMembers(addresses: Address*): Unit = awaitCond { + memberAddresses == addresses.toSet + } + + def awaitMemberStatus(address: Address, status: MemberStatus): Unit = awaitCond { + memberStatus(address) == Up + } + + // implicit conversion from RoleName to Address + implicit def role2Address(role: RoleName): Address = node(role).address + + // DSL sugar for `role1 gossipTo role2` + implicit def roleExtras(role: RoleName): RoleWrapper = new RoleWrapper(role) + var gossipBarrierCounter = 0 + class RoleWrapper(fromRole: RoleName) { + def gossipTo(toRole: RoleName): Unit = { + gossipBarrierCounter += 1 + runOn(toRole) { + val g = cluster.latestGossip + testConductor.enter("before-gossip-" + gossipBarrierCounter) + awaitCond(cluster.latestGossip != g) // received gossip + testConductor.enter("after-gossip-" + gossipBarrierCounter) + } + runOn(fromRole) { + testConductor.enter("before-gossip-" + gossipBarrierCounter) + cluster.gossipTo(node(toRole).address) // send gossip + testConductor.enter("after-gossip-" + gossipBarrierCounter) + } + runOn(roles.filterNot(r ⇒ r == fromRole || r == toRole): _*) { + testConductor.enter("before-gossip-" + gossipBarrierCounter) + testConductor.enter("after-gossip-" + gossipBarrierCounter) + } + } + } + + "A Cluster" must { + + "start nodes as singleton clusters" taggedAs LongRunningTest in { + + startClusterNode() + cluster.isSingletonCluster must be(true) + cluster.self.status must be(Joining) + cluster.convergence.isDefined must be(true) + cluster.leaderActions() + cluster.self.status must be(Up) + + testConductor.enter("after-1") + } + + "perform correct transitions when second joining first" taggedAs LongRunningTest in { + + runOn(second) { + cluster.join(first) + } + runOn(first) { + awaitMembers(first, second) + memberStatus(first) must be(Up) + memberStatus(second) must be(Joining) + cluster.convergence.isDefined must be(false) + } + testConductor.enter("second-joined") + + first gossipTo second + runOn(second) { + members must be(Set(first, second)) + memberStatus(first) must be(Up) + memberStatus(second) must be(Joining) + // we got a conflicting version in second, and therefore not convergence in second + seenLatestGossip must be(Set(second)) + cluster.convergence.isDefined must be(false) + } + + second gossipTo first + runOn(first) { + seenLatestGossip must be(Set(first, second)) + } + + first gossipTo second + runOn(second) { + seenLatestGossip must be(Set(first, second)) + } + + runOn(first, second) { + memberStatus(first) must be(Up) + memberStatus(second) must be(Joining) + cluster.convergence.isDefined must be(true) + } + testConductor.enter("convergence-joining-2") + + runOn(leader(first, second)) { + cluster.leaderActions() + memberStatus(first) must be(Up) + memberStatus(second) must be(Up) + } + testConductor.enter("leader-actions-2") + + leader(first, second) gossipTo nonLeader(first, second).head + runOn(nonLeader(first, second).head) { + memberStatus(first) must be(Up) + memberStatus(second) must be(Up) + seenLatestGossip must be(Set(first, second)) + cluster.convergence.isDefined must be(true) + } + + nonLeader(first, second).head gossipTo leader(first, second) + runOn(first, second) { + memberStatus(first) must be(Up) + memberStatus(second) must be(Up) + seenLatestGossip must be(Set(first, second)) + cluster.convergence.isDefined must be(true) + } + + testConductor.enter("after-2") + } + + "perform correct transitions when third joins second" taggedAs LongRunningTest in { + + runOn(third) { + cluster.join(second) + } + runOn(second) { + awaitMembers(first, second, third) + cluster.convergence.isDefined must be(false) + memberStatus(third) must be(Joining) + seenLatestGossip must be(Set(second)) + } + testConductor.enter("third-joined-second") + + second gossipTo first + runOn(first) { + members must be(Set(first, second, third)) + cluster.convergence.isDefined must be(false) + memberStatus(third) must be(Joining) + } + + first gossipTo third + runOn(third) { + members must be(Set(first, second, third)) + cluster.convergence.isDefined must be(false) + memberStatus(third) must be(Joining) + // conflicting version + seenLatestGossip must be(Set(third)) + } + + third gossipTo first + third gossipTo second + runOn(first, second) { + seenLatestGossip must be(Set(myself, third)) + } + + first gossipTo second + runOn(second) { + seenLatestGossip must be(Set(first, second, third)) + cluster.convergence.isDefined must be(true) + } + + runOn(first, third) { + cluster.convergence.isDefined must be(false) + } + + second gossipTo first + second gossipTo third + runOn(first, second, third) { + seenLatestGossip must be(Set(first, second, third)) + memberStatus(first) must be(Up) + memberStatus(second) must be(Up) + memberStatus(third) must be(Joining) + cluster.convergence.isDefined must be(true) + } + + testConductor.enter("convergence-joining-3") + + runOn(leader(first, second, third)) { + cluster.leaderActions() + memberStatus(first) must be(Up) + memberStatus(second) must be(Up) + memberStatus(third) must be(Up) + } + testConductor.enter("leader-actions-3") + + // leader gossipTo first non-leader + leader(first, second, third) gossipTo nonLeader(first, second, third).head + runOn(nonLeader(first, second, third).head) { + memberStatus(third) must be(Up) + seenLatestGossip must be(Set(leader(first, second, third), myself)) + cluster.convergence.isDefined must be(false) + } + + // first non-leader gossipTo the other non-leader + nonLeader(first, second, third).head gossipTo nonLeader(first, second, third).tail.head + runOn(nonLeader(first, second, third).head) { + cluster.gossipTo(node(nonLeader(first, second, third).tail.head).address) + } + runOn(nonLeader(first, second, third).tail.head) { + memberStatus(third) must be(Up) + seenLatestGossip must be(Set(first, second, third)) + cluster.convergence.isDefined must be(true) + } + + // and back again + nonLeader(first, second, third).tail.head gossipTo nonLeader(first, second, third).head + runOn(nonLeader(first, second, third).head) { + memberStatus(third) must be(Up) + seenLatestGossip must be(Set(first, second, third)) + cluster.convergence.isDefined must be(true) + } + + // first non-leader gossipTo the leader + nonLeader(first, second, third).head gossipTo leader(first, second, third) + runOn(first, second, third) { + memberStatus(first) must be(Up) + memberStatus(second) must be(Up) + memberStatus(third) must be(Up) + seenLatestGossip must be(Set(first, second, third)) + cluster.convergence.isDefined must be(true) + } + + testConductor.enter("after-3") + } + + "startup a second separated cluster consisting of nodes fourth and fifth" taggedAs LongRunningTest in { + runOn(fourth) { + cluster.join(fifth) + awaitMembers(fourth, fifth) + cluster.gossipTo(fifth) + awaitSeen(fourth, fifth) + cluster.convergence.isDefined must be(true) + } + runOn(fifth) { + awaitMembers(fourth, fifth) + cluster.gossipTo(fourth) + awaitSeen(fourth, fifth) + cluster.gossipTo(fourth) + cluster.convergence.isDefined must be(true) + } + testConductor.enter("fourth-joined-fifth") + + testConductor.enter("after-4") + } + + "perform correct transitions when second cluster (node fourth) joins first cluster (node third)" taggedAs LongRunningTest in { + + runOn(fourth) { + cluster.join(third) + } + runOn(third) { + awaitMembers(first, second, third, fourth) + seenLatestGossip must be(Set(third)) + } + testConductor.enter("fourth-joined-third") + + third gossipTo second + runOn(second) { + seenLatestGossip must be(Set(second, third)) + } + + second gossipTo fourth + runOn(fourth) { + members must be(roles.toSet) + // merge conflict + seenLatestGossip must be(Set(fourth)) + } + + fourth gossipTo first + fourth gossipTo second + fourth gossipTo third + fourth gossipTo fifth + runOn(first, second, third, fifth) { + members must be(roles.toSet) + seenLatestGossip must be(Set(fourth, myself)) + } + + first gossipTo fifth + runOn(fifth) { + seenLatestGossip must be(Set(first, fourth, fifth)) + } + + fifth gossipTo third + runOn(third) { + seenLatestGossip must be(Set(first, third, fourth, fifth)) + } + + third gossipTo second + runOn(second) { + seenLatestGossip must be(roles.toSet) + cluster.convergence.isDefined must be(true) + } + + second gossipTo first + second gossipTo third + second gossipTo fourth + third gossipTo fifth + + seenLatestGossip must be(roles.toSet) + memberStatus(first) must be(Up) + memberStatus(second) must be(Up) + memberStatus(third) must be(Up) + memberStatus(fourth) must be(Joining) + memberStatus(fifth) must be(Up) + cluster.convergence.isDefined must be(true) + + testConductor.enter("convergence-joining-3") + + runOn(leader(roles: _*)) { + cluster.leaderActions() + memberStatus(fourth) must be(Up) + seenLatestGossip must be(Set(myself)) + cluster.convergence.isDefined must be(false) + } + // spread the word + for (x :: y :: Nil ← (roles.sorted ++ roles.sorted.dropRight(1)).toList.sliding(2)) { + x gossipTo y + } + + testConductor.enter("spread-5") + + seenLatestGossip must be(roles.toSet) + memberStatus(first) must be(Up) + memberStatus(second) must be(Up) + memberStatus(third) must be(Up) + memberStatus(fourth) must be(Up) + memberStatus(fifth) must be(Up) + cluster.convergence.isDefined must be(true) + + testConductor.enter("after-5") + } + + "perform correct transitions when second becomes unavailble" taggedAs LongRunningTest in { + runOn(fifth) { + markNodeAsUnavailable(second) + cluster.reapUnreachableMembers() + cluster.latestGossip.overview.unreachable must contain(Member(second, Up)) + seenLatestGossip must be(Set(fifth)) + } + + // spread the word + val gossipRound = List(fifth, fourth, third, first, third, fourth, fifth) + for (x :: y :: Nil ← gossipRound.sliding(2)) { + x gossipTo y + } + + runOn((roles.filterNot(_ == second)): _*) { + cluster.latestGossip.overview.unreachable must contain(Member(second, Up)) + cluster.convergence.isDefined must be(false) + } + + runOn(third) { + cluster.down(second) + awaitMemberStatus(second, Down) + } + + // spread the word + val gossipRound2 = List(third, fourth, fifth, first, third, fourth, fifth) + for (x :: y :: Nil ← gossipRound2.sliding(2)) { + x gossipTo y + } + + runOn((roles.filterNot(_ == second)): _*) { + cluster.latestGossip.overview.unreachable must contain(Member(second, Down)) + memberStatus(second) must be(Down) + seenLatestGossip must be(Set(first, third, fourth, fifth)) + cluster.convergence.isDefined must be(true) + } + + testConductor.enter("after-6") + } + + } +} From 51a38f318a86379a37e7de6efa0b1b32e2cd09d2 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 15 Jun 2012 13:44:37 +0200 Subject: [PATCH 393/538] Real SunnyWeather --- .../scala/akka/cluster/JoinTwoClustersSpec.scala | 2 +- .../scala/akka/cluster/SunnyWeatherSpec.scala | 11 +++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index f4ea161b2a..4b64bb6e58 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -17,7 +17,7 @@ object JoinTwoClustersMultiJvmSpec extends MultiNodeConfig { val c1 = role("c1") val c2 = role("c2") - commonConfig(debugConfig(on = true).withFallback(MultiNodeClusterSpec.clusterConfig)) + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec with FailureDetectorPuppetStrategy diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index b8486841c6..6f3ddfc866 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -21,18 +21,17 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { commonConfig(ConfigFactory.parseString(""" akka.cluster { - gossip-interval = 400 ms nr-of-deputy-nodes = 0 } akka.loglevel = INFO """)) } -class SunnyWeatherMultiJvmNode1 extends SunnyWeatherSpec with FailureDetectorPuppetStrategy -class SunnyWeatherMultiJvmNode2 extends SunnyWeatherSpec with FailureDetectorPuppetStrategy -class SunnyWeatherMultiJvmNode3 extends SunnyWeatherSpec with FailureDetectorPuppetStrategy -class SunnyWeatherMultiJvmNode4 extends SunnyWeatherSpec with FailureDetectorPuppetStrategy -class SunnyWeatherMultiJvmNode5 extends SunnyWeatherSpec with FailureDetectorPuppetStrategy +class SunnyWeatherMultiJvmNode1 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy +class SunnyWeatherMultiJvmNode2 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy +class SunnyWeatherMultiJvmNode3 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy +class SunnyWeatherMultiJvmNode4 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy +class SunnyWeatherMultiJvmNode5 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy abstract class SunnyWeatherSpec extends MultiNodeSpec(SunnyWeatherMultiJvmSpec) From 46c06fa66918ad91e3a88c95218219ea41e5b3cf Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 15 Jun 2012 14:15:09 +0200 Subject: [PATCH 394/538] Minor reformatting --- .../akka/remote/netty/NettySSLSupport.scala | 89 ++++++++----------- .../provider/AES128CounterRNGFast.scala | 11 +-- .../provider/AES128CounterRNGSecure.scala | 11 +-- .../provider/AES256CounterRNGSecure.scala | 13 +-- .../akka/security/provider/AkkaProvider.scala | 10 +-- 5 files changed, 50 insertions(+), 84 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala index 99f56bf301..4c68069278 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala @@ -16,14 +16,12 @@ import akka.security.provider.AkkaProvider * Used for adding SSL support to Netty pipeline * Internal use only */ -private object NettySSLSupport { +private[netty] object NettySSLSupport { /** * Construct a SSLHandler which can be inserted into a Netty server/client pipeline */ - def apply(settings: NettySettings, log: LoggingAdapter, isClient: Boolean): SslHandler = { - if (isClient) initialiseClientSSL(settings, log) - else initialiseServerSSL(settings, log) - } + def apply(settings: NettySettings, log: LoggingAdapter, isClient: Boolean): SslHandler = + if (isClient) initialiseClientSSL(settings, log) else initialiseServerSSL(settings, log) private def initialiseCustomSecureRandom(settings: NettySettings, log: LoggingAdapter): SecureRandom = { /** @@ -51,36 +49,33 @@ private object NettySSLSupport { log.debug("SSLRandomNumberGenerator not specified, falling back to SecureRandom") new SecureRandom } - // prevent stall on first access - rng.nextInt() + rng.nextInt() // prevent stall on first access rng } private def initialiseClientSSL(settings: NettySettings, log: LoggingAdapter): SslHandler = { log.debug("Client SSL is enabled, initialising ...") - val sslContext: Option[SSLContext] = { - (settings.SSLTrustStore, settings.SSLTrustStorePassword, settings.SSLProtocol) match { - case (Some(trustStore), Some(password), Some(protocol)) ⇒ constructClientContext(settings, log, trustStore, password, protocol) - case (trustStore, password, protocol) ⇒ - val msg = "SSL trust store settings went missing. [trust-store: %s] [trust-store-password: %s] [protocol: %s]" - .format(trustStore, password, protocol) - throw new GeneralSecurityException(msg) - } - } - sslContext match { - case Some(context) ⇒ { + ((settings.SSLTrustStore, settings.SSLTrustStorePassword, settings.SSLProtocol) match { + case (Some(trustStore), Some(password), Some(protocol)) ⇒ constructClientContext(settings, log, trustStore, password, protocol) + case (trustStore, password, protocol) ⇒ throw new GeneralSecurityException( + "One or several SSL trust store settings are missing: [trust-store: %s] [trust-store-password: %s] [protocol: %s]".format( + trustStore, + password, + protocol)) + }) match { + case Some(context) ⇒ log.debug("Using client SSL context to create SSLEngine ...") val sslEngine = context.createSSLEngine sslEngine.setUseClientMode(true) sslEngine.setEnabledCipherSuites(settings.SSLSupportedAlgorithms.toArray.map(_.toString)) new SslHandler(sslEngine) - } - case None ⇒ { - val msg = "Failed to initialise client SSL because SSL context could not be found. " + - "Make sure your settings are correct: [trust-store: %s] [trust-store-password: %s] [protocol: %s]" - .format(settings.SSLTrustStore, settings.SSLTrustStorePassword, settings.SSLProtocol) - throw new GeneralSecurityException(msg) - } + case None ⇒ + throw new GeneralSecurityException( + """Failed to initialise client SSL because SSL context could not be found." + + "Make sure your settings are correct: [trust-store: %s] [trust-store-password: %s] [protocol: %s]""".format( + settings.SSLTrustStore, + settings.SSLTrustStorePassword, + settings.SSLProtocol)) } } @@ -88,13 +83,10 @@ private object NettySSLSupport { try { val trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm) val trustStore = KeyStore.getInstance(KeyStore.getDefaultType) - val stream = new FileInputStream(trustStorePath) - trustStore.load(stream, trustStorePassword.toCharArray) + trustStore.load(new FileInputStream(trustStorePath), trustStorePassword.toCharArray) //FIXME does the FileInputStream need to be closed? trustManagerFactory.init(trustStore) val trustManagers: Array[TrustManager] = trustManagerFactory.getTrustManagers - val sslContext = SSLContext.getInstance(protocol) - sslContext.init(null, trustManagers, initialiseCustomSecureRandom(settings, log)) - Some(sslContext) + Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(null, trustManagers, initialiseCustomSecureRandom(settings, log)); ctx } } catch { case e: FileNotFoundException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because trust store could not be loaded", e) case e: IOException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because: " + e.getMessage, e) @@ -104,28 +96,24 @@ private object NettySSLSupport { private def initialiseServerSSL(settings: NettySettings, log: LoggingAdapter): SslHandler = { log.debug("Server SSL is enabled, initialising ...") - val sslContext: Option[SSLContext] = { - (settings.SSLKeyStore, settings.SSLKeyStorePassword, settings.SSLProtocol) match { - case (Some(keyStore), Some(password), Some(protocol)) ⇒ constructServerContext(settings, log, keyStore, password, protocol) - case (keyStore, password, protocol) ⇒ - val msg = "SSL key store settings went missing. [key-store: %s] [key-store-password: %s] [protocol: %s]".format(keyStore, password, protocol) - throw new GeneralSecurityException(msg) - } - } - sslContext match { - case Some(context) ⇒ { + + ((settings.SSLKeyStore, settings.SSLKeyStorePassword, settings.SSLProtocol) match { + case (Some(keyStore), Some(password), Some(protocol)) ⇒ constructServerContext(settings, log, keyStore, password, protocol) + case (keyStore, password, protocol) ⇒ throw new GeneralSecurityException( + "SSL key store settings went missing. [key-store: %s] [key-store-password: %s] [protocol: %s]".format(keyStore, password, protocol)) + }) match { + case Some(context) ⇒ log.debug("Using server SSL context to create SSLEngine ...") val sslEngine = context.createSSLEngine sslEngine.setUseClientMode(false) sslEngine.setEnabledCipherSuites(settings.SSLSupportedAlgorithms.toArray.map(_.toString)) new SslHandler(sslEngine) - } - case None ⇒ { - val msg = "Failed to initialise server SSL because SSL context could not be found. " + - "Make sure your settings are correct: [key-store: %s] [key-store-password: %s] [protocol: %s]" - .format(settings.SSLKeyStore, settings.SSLKeyStorePassword, settings.SSLProtocol) - throw new GeneralSecurityException(msg) - } + case None ⇒ throw new GeneralSecurityException( + """Failed to initialise server SSL because SSL context could not be found. + Make sure your settings are correct: [key-store: %s] [key-store-password: %s] [protocol: %s]""".format( + settings.SSLKeyStore, + settings.SSLKeyStorePassword, + settings.SSLProtocol)) } } @@ -133,12 +121,9 @@ private object NettySSLSupport { try { val factory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm) val keyStore = KeyStore.getInstance(KeyStore.getDefaultType) - val stream = new FileInputStream(keyStorePath) - keyStore.load(stream, keyStorePassword.toCharArray) + keyStore.load(new FileInputStream(keyStorePath), keyStorePassword.toCharArray) //FIXME does the FileInputStream need to be closed? factory.init(keyStore, keyStorePassword.toCharArray) - val sslContext = SSLContext.getInstance(protocol) - sslContext.init(factory.getKeyManagers, null, initialiseCustomSecureRandom(settings, log)) - Some(sslContext) + Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(factory.getKeyManagers, null, initialiseCustomSecureRandom(settings, log)); ctx } } catch { case e: FileNotFoundException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because key store could not be loaded", e) case e: IOException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because: " + e.getMessage, e) diff --git a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala b/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala index 12f0d2a83e..c355f5a548 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala @@ -15,17 +15,14 @@ class AES128CounterRNGFast extends java.security.SecureRandomSpi { /** * This is managed internally only */ - protected def engineSetSeed(seed: Array[Byte]) { - } + override protected def engineSetSeed(seed: Array[Byte]): Unit = () /** * Generates a user-specified number of random bytes. * * @param bytes the array to be filled in with random bytes. */ - protected def engineNextBytes(bytes: Array[Byte]) { - rng.nextBytes(bytes) - } + override protected def engineNextBytes(bytes: Array[Byte]): Unit = rng.nextBytes(bytes) /** * Returns the given number of seed bytes. This call may be used to @@ -34,8 +31,6 @@ class AES128CounterRNGFast extends java.security.SecureRandomSpi { * @param numBytes the number of seed bytes to generate. * @return the seed bytes. */ - protected def engineGenerateSeed(numBytes: Int): Array[Byte] = { - (new SecureRandom).generateSeed(numBytes) - } + override protected def engineGenerateSeed(numBytes: Int): Array[Byte] = (new SecureRandom).generateSeed(numBytes) } diff --git a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala b/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala index 4859a8ea4b..846476cc2d 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala @@ -14,17 +14,14 @@ class AES128CounterRNGSecure extends java.security.SecureRandomSpi { /** * This is managed internally only */ - protected def engineSetSeed(seed: Array[Byte]) { - } + override protected def engineSetSeed(seed: Array[Byte]): Unit = () /** * Generates a user-specified number of random bytes. * * @param bytes the array to be filled in with random bytes. */ - protected def engineNextBytes(bytes: Array[Byte]) { - rng.nextBytes(bytes) - } + override protected def engineNextBytes(bytes: Array[Byte]): Unit = rng.nextBytes(bytes) /** * Returns the given number of seed bytes. This call may be used to @@ -33,8 +30,6 @@ class AES128CounterRNGSecure extends java.security.SecureRandomSpi { * @param numBytes the number of seed bytes to generate. * @return the seed bytes. */ - protected def engineGenerateSeed(numBytes: Int): Array[Byte] = { - DefaultSeedGenerator.getInstance.generateSeed(numBytes) - } + override protected def engineGenerateSeed(numBytes: Int): Array[Byte] = DefaultSeedGenerator.getInstance.generateSeed(numBytes) } diff --git a/akka-remote/src/main/scala/akka/security/provider/AES256CounterRNGSecure.scala b/akka-remote/src/main/scala/akka/security/provider/AES256CounterRNGSecure.scala index 3aeda2b1a1..d942938411 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AES256CounterRNGSecure.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AES256CounterRNGSecure.scala @@ -9,22 +9,19 @@ import org.uncommons.maths.random.{ AESCounterRNG, DefaultSeedGenerator } * Internal API */ class AES256CounterRNGSecure extends java.security.SecureRandomSpi { - private val rng = new AESCounterRNG(32) + private val rng = new AESCounterRNG(32) // Magic number is magic /** * This is managed internally only */ - protected def engineSetSeed(seed: Array[Byte]) { - } + override protected def engineSetSeed(seed: Array[Byte]): Unit = () /** * Generates a user-specified number of random bytes. * * @param bytes the array to be filled in with random bytes. */ - protected def engineNextBytes(bytes: Array[Byte]) { - rng.nextBytes(bytes) - } + override protected def engineNextBytes(bytes: Array[Byte]): Unit = rng.nextBytes(bytes) /** * Returns the given number of seed bytes. This call may be used to @@ -33,8 +30,6 @@ class AES256CounterRNGSecure extends java.security.SecureRandomSpi { * @param numBytes the number of seed bytes to generate. * @return the seed bytes. */ - protected def engineGenerateSeed(numBytes: Int): Array[Byte] = { - DefaultSeedGenerator.getInstance.generateSeed(numBytes) - } + override protected def engineGenerateSeed(numBytes: Int): Array[Byte] = DefaultSeedGenerator.getInstance.generateSeed(numBytes) } diff --git a/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala b/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala index 705afa37ba..f44aeae584 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala @@ -11,20 +11,16 @@ import java.security.{ PrivilegedAction, AccessController, Provider } final class AkkaProvider extends Provider("Akka", 1.0, "Akka provider 1.0 that implements a secure AES random number generator") { AccessController.doPrivileged(new PrivilegedAction[AkkaProvider] { def run = { - /** - * SecureRandom - */ + //SecureRandom put("SecureRandom.AES128CounterRNGFast", "akka.security.provider.AES128CounterRNGFast") put("SecureRandom.AES128CounterRNGSecure", "akka.security.provider.AES128CounterRNGSecure") put("SecureRandom.AES256CounterRNGSecure", "akka.security.provider.AES256CounterRNGSecure") - /** - * Implementation type: software or hardware - */ + //Implementation type: software or hardware put("SecureRandom.AES128CounterRNGFast ImplementedIn", "Software") put("SecureRandom.AES128CounterRNGSecure ImplementedIn", "Software") put("SecureRandom.AES256CounterRNGSecure ImplementedIn", "Software") - null + null //Magic null is magic } }) } From fd42c3d49a6dcac26802c0d7d3599ad123c6e7d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Fri, 15 Jun 2012 14:39:47 +0200 Subject: [PATCH 395/538] Allow barrier timeouts to be shortened and other review fixes --- ...ientDowningNodeThatIsUnreachableSpec.scala | 8 +- .../ClientDowningNodeThatIsUpSpec.scala | 8 +- .../scala/akka/cluster/ConvergenceSpec.scala | 8 +- .../GossipingAccrualFailureDetectorSpec.scala | 4 +- ...aderDowningNodeThatIsUnreachableSpec.scala | 18 +- .../akka/cluster/LeaderElectionSpec.scala | 16 +- .../MembershipChangeListenerExitingSpec.scala | 8 +- .../MembershipChangeListenerJoinSpec.scala | 6 +- .../MembershipChangeListenerLeavingSpec.scala | 8 +- .../MembershipChangeListenerUpSpec.scala | 10 +- .../akka/cluster/MultiNodeClusterSpec.scala | 4 +- .../scala/akka/cluster/NodeJoinSpec.scala | 2 +- ...LeavingAndExitingAndBeingRemovedSpec.scala | 4 +- .../cluster/NodeLeavingAndExitingSpec.scala | 4 +- .../scala/akka/cluster/NodeLeavingSpec.scala | 4 +- .../akka/cluster/NodeMembershipSpec.scala | 6 +- .../scala/akka/cluster/NodeUpSpec.scala | 8 +- .../akka/cluster/SingletonClusterSpec.scala | 4 +- .../scala/akka/cluster/SunnyWeatherSpec.scala | 4 +- .../testconductor/TestConductorProtocol.java | 237 ++++++++++-------- .../main/protocol/TestConductorProtocol.proto | 12 +- .../akka/remote/testconductor/Conductor.scala | 65 ++--- .../akka/remote/testconductor/DataTypes.scala | 23 +- .../akka/remote/testconductor/Player.scala | 8 +- .../akka/remote/LookupRemoteActorSpec.scala | 2 +- .../akka/remote/NewRemoteActorSpec.scala | 4 +- .../router/RandomRoutedRemoteActorSpec.scala | 10 +- .../RoundRobinRoutedRemoteActorSpec.scala | 10 +- .../ScatterGatherRoutedRemoteActorSpec.scala | 10 +- .../testconductor/TestConductorSpec.scala | 10 +- .../remote/testconductor/BarrierSpec.scala | 123 +++++++-- .../akka/remote/testkit/MultiNodeSpec.scala | 14 +- 32 files changed, 395 insertions(+), 267 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index e86b026bfd..ad90cc5287 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -47,23 +47,23 @@ abstract class ClientDowningNodeThatIsUnreachableSpec // mark 'third' node as DOWN cluster.down(thirdAddress) - enter("down-third-node") + enterBarrier("down-third-node") awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) cluster.latestGossip.members.exists(_.address == thirdAddress) must be(false) } runOn(third) { - enter("down-third-node") + enterBarrier("down-third-node") } runOn(second, fourth) { - enter("down-third-node") + enterBarrier("down-third-node") awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) } - enter("await-completion") + enterBarrier("await-completion") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index 228f5b6d98..dabd92abb0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -43,7 +43,7 @@ abstract class ClientDowningNodeThatIsUpSpec runOn(first) { // mark 'third' node as DOWN cluster.down(thirdAddress) - enter("down-third-node") + enterBarrier("down-third-node") markNodeAsUnavailable(thirdAddress) @@ -52,16 +52,16 @@ abstract class ClientDowningNodeThatIsUpSpec } runOn(third) { - enter("down-third-node") + enterBarrier("down-third-node") } runOn(second, fourth) { - enter("down-third-node") + enterBarrier("down-third-node") awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) } - enter("await-completion") + enterBarrier("await-completion") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index 2e496c9b2c..a8e985412f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -46,12 +46,12 @@ abstract class ConvergenceSpec // doesn't join immediately } - enter("after-1") + enterBarrier("after-1") } "not reach convergence while any nodes are unreachable" taggedAs LongRunningTest ignore { val thirdAddress = node(third).address - enter("before-shutdown") + enterBarrier("before-shutdown") runOn(first) { // kill 'third' node @@ -78,7 +78,7 @@ abstract class ConvergenceSpec } } - enter("after-2") + enterBarrier("after-2") } "not move a new joining node to Up while there is no convergence" taggedAs LongRunningTest ignore { @@ -116,7 +116,7 @@ abstract class ConvergenceSpec } } - enter("after-3") + enterBarrier("after-3") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index d66fb95692..d0e255fb81 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -43,7 +43,7 @@ abstract class GossipingAccrualFailureDetectorSpec cluster.failureDetector.isAvailable(secondAddress) must be(true) cluster.failureDetector.isAvailable(thirdAddress) must be(true) - enter("after-1") + enterBarrier("after-1") } "mark node as 'unavailable' if a node in the cluster is shut down (and its heartbeats stops)" taggedAs LongRunningTest in { @@ -59,7 +59,7 @@ abstract class GossipingAccrualFailureDetectorSpec cluster.failureDetector.isAvailable(secondAddress) must be(true) } - enter("after-2") + enterBarrier("after-2") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index 9953a4c61f..1ec4f47fcf 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -46,7 +46,7 @@ abstract class LeaderDowningNodeThatIsUnreachableSpec runOn(first) { // kill 'fourth' node testConductor.shutdown(fourth, 0) - enter("down-fourth-node") + enterBarrier("down-fourth-node") // mark the node as unreachable in the failure detector markNodeAsUnavailable(fourthAddress) @@ -57,26 +57,26 @@ abstract class LeaderDowningNodeThatIsUnreachableSpec } runOn(fourth) { - enter("down-fourth-node") + enterBarrier("down-fourth-node") } runOn(second, third) { - enter("down-fourth-node") + enterBarrier("down-fourth-node") awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(fourthAddress), 30.seconds) } - enter("await-completion-1") + enterBarrier("await-completion-1") } "be able to DOWN a 'middle' node that is UNREACHABLE" taggedAs LongRunningTest in { val secondAddress = node(second).address - enter("before-down-second-node") + enterBarrier("before-down-second-node") runOn(first) { // kill 'second' node testConductor.shutdown(second, 0) - enter("down-second-node") + enterBarrier("down-second-node") // mark the node as unreachable in the failure detector markNodeAsUnavailable(secondAddress) @@ -87,16 +87,16 @@ abstract class LeaderDowningNodeThatIsUnreachableSpec } runOn(second) { - enter("down-second-node") + enterBarrier("down-second-node") } runOn(third) { - enter("down-second-node") + enterBarrier("down-second-node") awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Seq(secondAddress), 30 seconds) } - enter("await-completion-2") + enterBarrier("await-completion-2") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index 28a684eb7b..965efc1555 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -50,7 +50,7 @@ abstract class LeaderElectionSpec assertLeaderIn(sortedRoles) } - enter("after") + enterBarrier("after") } def shutdownLeaderAndVerifyNewLeader(alreadyShutdown: Int): Unit = { @@ -64,33 +64,33 @@ abstract class LeaderElectionSpec case `controller` ⇒ val leaderAddress = node(leader).address - enter("before-shutdown") + enterBarrier("before-shutdown") testConductor.shutdown(leader, 0) - enter("after-shutdown", "after-down", "completed") + enterBarrier("after-shutdown", "after-down", "completed") markNodeAsUnavailable(leaderAddress) case `leader` ⇒ - enter("before-shutdown", "after-shutdown") + enterBarrier("before-shutdown", "after-shutdown") // this node will be shutdown by the controller and doesn't participate in more barriers case `aUser` ⇒ val leaderAddress = node(leader).address - enter("before-shutdown", "after-shutdown") + enterBarrier("before-shutdown", "after-shutdown") // user marks the shutdown leader as DOWN cluster.down(leaderAddress) - enter("after-down", "completed") + enterBarrier("after-down", "completed") markNodeAsUnavailable(leaderAddress) case _ if remainingRoles.contains(myself) ⇒ // remaining cluster nodes, not shutdown - enter("before-shutdown", "after-shutdown", "after-down") + enterBarrier("before-shutdown", "after-shutdown", "after-down") awaitUpConvergence(currentRoles.size - 1) val nextExpectedLeader = remainingRoles.head cluster.isLeader must be(myself == nextExpectedLeader) assertLeaderIn(remainingRoles) - enter("completed") + enterBarrier("completed") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index f8ad009bc2..5b396cd2b9 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -47,12 +47,12 @@ abstract class MembershipChangeListenerExitingSpec awaitClusterUp(first, second, third) runOn(first) { - enter("registered-listener") + enterBarrier("registered-listener") cluster.leave(secondAddress) } runOn(second) { - enter("registered-listener") + enterBarrier("registered-listener") } runOn(third) { @@ -63,11 +63,11 @@ abstract class MembershipChangeListenerExitingSpec exitingLatch.countDown() } }) - enter("registered-listener") + enterBarrier("registered-listener") exitingLatch.await } - enter("finished") + enterBarrier("finished") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index af3fe0aeee..56320f8c20 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -45,19 +45,19 @@ abstract class MembershipChangeListenerJoinSpec joinLatch.countDown() } }) - enter("registered-listener") + enterBarrier("registered-listener") joinLatch.await } runOn(second) { - enter("registered-listener") + enterBarrier("registered-listener") cluster.join(firstAddress) } awaitUpConvergence(2) - enter("after") + enterBarrier("after") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala index 81556a44e8..2d93f4a5dd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -44,12 +44,12 @@ abstract class MembershipChangeListenerLeavingSpec awaitClusterUp(first, second, third) runOn(first) { - enter("registered-listener") + enterBarrier("registered-listener") cluster.leave(secondAddress) } runOn(second) { - enter("registered-listener") + enterBarrier("registered-listener") } runOn(third) { @@ -62,11 +62,11 @@ abstract class MembershipChangeListenerLeavingSpec latch.countDown() } }) - enter("registered-listener") + enterBarrier("registered-listener") latch.await } - enter("finished") + enterBarrier("finished") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala index 7aea16ed99..f8e7475501 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -46,16 +46,16 @@ abstract class MembershipChangeListenerUpSpec latch.countDown() } }) - enter("listener-1-registered") + enterBarrier("listener-1-registered") cluster.join(firstAddress) latch.await } runOn(third) { - enter("listener-1-registered") + enterBarrier("listener-1-registered") } - enter("after-1") + enterBarrier("after-1") } "(when three nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { @@ -68,7 +68,7 @@ abstract class MembershipChangeListenerUpSpec latch.countDown() } }) - enter("listener-2-registered") + enterBarrier("listener-2-registered") runOn(third) { cluster.join(firstAddress) @@ -76,7 +76,7 @@ abstract class MembershipChangeListenerUpSpec latch.await - enter("after-2") + enterBarrier("after-2") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index d67dedc34f..0b1703642f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -71,14 +71,14 @@ trait MultiNodeClusterSpec extends FailureDetectorStrategy { self: MultiNodeSpec // make sure that the node-to-join is started before other join startClusterNode() } - enter(roles.head.name + "-started") + enterBarrier(roles.head.name + "-started") if (roles.tail.contains(myself)) { cluster.join(node(roles.head).address) } if (upConvergence && roles.contains(myself)) { awaitUpConvergence(numberOfMembers = roles.length) } - enter(roles.map(_.name).mkString("-") + "-joined") + enterBarrier(roles.map(_.name).mkString("-") + "-joined") } /** diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala index 4a93655fef..00563d8775 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala @@ -45,7 +45,7 @@ abstract class NodeJoinSpec awaitCond(cluster.latestGossip.members.exists { member ⇒ member.address == secondAddress && member.status == MemberStatus.Joining }) - enter("after") + enterBarrier("after") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index d7cf74af75..426f26ef6a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -44,7 +44,7 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec runOn(first) { cluster.leave(secondAddress) } - enter("second-left") + enterBarrier("second-left") runOn(first, third) { // verify that the 'second' node is no longer part of the 'members' set @@ -59,7 +59,7 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec isRemoved.get.address must be(secondAddress) } - enter("finished") + enterBarrier("finished") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index be28235c33..c101c0d9a1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -50,7 +50,7 @@ abstract class NodeLeavingAndExitingSpec runOn(first) { cluster.leave(secondAddress) } - enter("second-left") + enterBarrier("second-left") runOn(first, third) { @@ -69,7 +69,7 @@ abstract class NodeLeavingAndExitingSpec hasExited.get.address must be(secondAddress) } - enter("finished") + enterBarrier("finished") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index 5c5ffb16e0..11d943d02f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -44,7 +44,7 @@ abstract class NodeLeavingSpec runOn(first) { cluster.leave(secondAddress) } - enter("second-left") + enterBarrier("second-left") runOn(first, third) { awaitCond(cluster.latestGossip.members.exists(_.status == MemberStatus.Leaving)) @@ -54,7 +54,7 @@ abstract class NodeLeavingSpec hasLeft.get.address must be(secondAddress) } - enter("finished") + enterBarrier("finished") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index 350e43a54b..067fcc4063 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -38,7 +38,7 @@ abstract class NodeMembershipSpec runOn(first) { startClusterNode() } - enter("first-started") + enterBarrier("first-started") runOn(first, second) { cluster.join(firstAddress) @@ -50,7 +50,7 @@ abstract class NodeMembershipSpec awaitCond(cluster.convergence.isDefined) } - enter("after-1") + enterBarrier("after-1") } "(when three nodes) start gossiping to each other so that all nodes gets the same gossip info" taggedAs LongRunningTest in { @@ -66,7 +66,7 @@ abstract class NodeMembershipSpec } awaitCond(cluster.convergence.isDefined) - enter("after-2") + enterBarrier("after-2") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index 2e4691b1a4..c6ab7c0860 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -33,7 +33,7 @@ abstract class NodeUpSpec awaitClusterUp(first, second) - enter("after-1") + enterBarrier("after-1") } "be unaffected when joining again" taggedAs LongRunningTest in { @@ -45,12 +45,12 @@ abstract class NodeUpSpec unexpected.set(members) } }) - enter("listener-registered") + enterBarrier("listener-registered") runOn(second) { cluster.join(node(first).address) } - enter("joined-again") + enterBarrier("joined-again") // let it run for a while to make sure that nothing bad happens for (n ← 1 to 20) { @@ -59,7 +59,7 @@ abstract class NodeUpSpec cluster.latestGossip.members.forall(_.status == MemberStatus.Up) must be(true) } - enter("after-2") + enterBarrier("after-2") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala index 9137abbb1a..a747f93615 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala @@ -43,7 +43,7 @@ abstract class SingletonClusterSpec cluster.isSingletonCluster must be(false) assertLeader(first, second) - enter("after-1") + enterBarrier("after-1") } "become singleton cluster when one node is shutdown" taggedAs LongRunningTest in { @@ -58,7 +58,7 @@ abstract class SingletonClusterSpec assertLeader(first) } - enter("after-2") + enterBarrier("after-2") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index 5a4699c91f..33b7a9ccc0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -62,7 +62,7 @@ abstract class SunnyWeatherSpec }) for (n ← 1 to 30) { - enter("period-" + n) + enterBarrier("period-" + n) unexpected.get must be(null) awaitUpConvergence(roles.size) assertLeaderIn(roles) @@ -70,7 +70,7 @@ abstract class SunnyWeatherSpec 1.seconds.sleep } - enter("after") + enterBarrier("after") } } } diff --git a/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java b/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java index 14fe236950..bd8de8a052 100644 --- a/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java +++ b/akka-remote-tests/src/main/java/akka/remote/testconductor/TestConductorProtocol.java @@ -8,6 +8,81 @@ public final class TestConductorProtocol { public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { } + public enum BarrierOp + implements com.google.protobuf.ProtocolMessageEnum { + Enter(0, 1), + Fail(1, 2), + Succeeded(2, 3), + Failed(3, 4), + ; + + public static final int Enter_VALUE = 1; + public static final int Fail_VALUE = 2; + public static final int Succeeded_VALUE = 3; + public static final int Failed_VALUE = 4; + + + public final int getNumber() { return value; } + + public static BarrierOp valueOf(int value) { + switch (value) { + case 1: return Enter; + case 2: return Fail; + case 3: return Succeeded; + case 4: return Failed; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public BarrierOp findValueByNumber(int number) { + return BarrierOp.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return akka.remote.testconductor.TestConductorProtocol.getDescriptor().getEnumTypes().get(0); + } + + private static final BarrierOp[] VALUES = { + Enter, Fail, Succeeded, Failed, + }; + + public static BarrierOp valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private BarrierOp(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:BarrierOp) + } + public enum FailType implements com.google.protobuf.ProtocolMessageEnum { Throttle(0, 1), @@ -56,7 +131,7 @@ public final class TestConductorProtocol { } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return akka.remote.testconductor.TestConductorProtocol.getDescriptor().getEnumTypes().get(0); + return akka.remote.testconductor.TestConductorProtocol.getDescriptor().getEnumTypes().get(1); } private static final FailType[] VALUES = { @@ -128,7 +203,7 @@ public final class TestConductorProtocol { } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return akka.remote.testconductor.TestConductorProtocol.getDescriptor().getEnumTypes().get(1); + return akka.remote.testconductor.TestConductorProtocol.getDescriptor().getEnumTypes().get(2); } private static final Direction[] VALUES = { @@ -1699,17 +1774,13 @@ public final class TestConductorProtocol { boolean hasName(); String getName(); - // optional bool status = 2; - boolean hasStatus(); - boolean getStatus(); + // required .BarrierOp op = 2; + boolean hasOp(); + akka.remote.testconductor.TestConductorProtocol.BarrierOp getOp(); // optional int64 timeout = 3; boolean hasTimeout(); long getTimeout(); - - // optional bool failed = 4; - boolean hasFailed(); - boolean getFailed(); } public static final class EnterBarrier extends com.google.protobuf.GeneratedMessage @@ -1772,14 +1843,14 @@ public final class TestConductorProtocol { } } - // optional bool status = 2; - public static final int STATUS_FIELD_NUMBER = 2; - private boolean status_; - public boolean hasStatus() { + // required .BarrierOp op = 2; + public static final int OP_FIELD_NUMBER = 2; + private akka.remote.testconductor.TestConductorProtocol.BarrierOp op_; + public boolean hasOp() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public boolean getStatus() { - return status_; + public akka.remote.testconductor.TestConductorProtocol.BarrierOp getOp() { + return op_; } // optional int64 timeout = 3; @@ -1792,21 +1863,10 @@ public final class TestConductorProtocol { return timeout_; } - // optional bool failed = 4; - public static final int FAILED_FIELD_NUMBER = 4; - private boolean failed_; - public boolean hasFailed() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public boolean getFailed() { - return failed_; - } - private void initFields() { name_ = ""; - status_ = false; + op_ = akka.remote.testconductor.TestConductorProtocol.BarrierOp.Enter; timeout_ = 0L; - failed_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -1817,6 +1877,10 @@ public final class TestConductorProtocol { memoizedIsInitialized = 0; return false; } + if (!hasOp()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -1828,14 +1892,11 @@ public final class TestConductorProtocol { output.writeBytes(1, getNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(2, status_); + output.writeEnum(2, op_.getNumber()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeInt64(3, timeout_); } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBool(4, failed_); - } getUnknownFields().writeTo(output); } @@ -1851,16 +1912,12 @@ public final class TestConductorProtocol { } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(2, status_); + .computeEnumSize(2, op_.getNumber()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(3, timeout_); } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(4, failed_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -1987,12 +2044,10 @@ public final class TestConductorProtocol { super.clear(); name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - status_ = false; + op_ = akka.remote.testconductor.TestConductorProtocol.BarrierOp.Enter; bitField0_ = (bitField0_ & ~0x00000002); timeout_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); - failed_ = false; - bitField0_ = (bitField0_ & ~0x00000008); return this; } @@ -2038,15 +2093,11 @@ public final class TestConductorProtocol { if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.status_ = status_; + result.op_ = op_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.timeout_ = timeout_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.failed_ = failed_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -2066,15 +2117,12 @@ public final class TestConductorProtocol { if (other.hasName()) { setName(other.getName()); } - if (other.hasStatus()) { - setStatus(other.getStatus()); + if (other.hasOp()) { + setOp(other.getOp()); } if (other.hasTimeout()) { setTimeout(other.getTimeout()); } - if (other.hasFailed()) { - setFailed(other.getFailed()); - } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -2084,6 +2132,10 @@ public final class TestConductorProtocol { return false; } + if (!hasOp()) { + + return false; + } return true; } @@ -2116,8 +2168,14 @@ public final class TestConductorProtocol { break; } case 16: { - bitField0_ |= 0x00000002; - status_ = input.readBool(); + int rawValue = input.readEnum(); + akka.remote.testconductor.TestConductorProtocol.BarrierOp value = akka.remote.testconductor.TestConductorProtocol.BarrierOp.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + op_ = value; + } break; } case 24: { @@ -2125,11 +2183,6 @@ public final class TestConductorProtocol { timeout_ = input.readInt64(); break; } - case 32: { - bitField0_ |= 0x00000008; - failed_ = input.readBool(); - break; - } } } } @@ -2172,23 +2225,26 @@ public final class TestConductorProtocol { onChanged(); } - // optional bool status = 2; - private boolean status_ ; - public boolean hasStatus() { + // required .BarrierOp op = 2; + private akka.remote.testconductor.TestConductorProtocol.BarrierOp op_ = akka.remote.testconductor.TestConductorProtocol.BarrierOp.Enter; + public boolean hasOp() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public boolean getStatus() { - return status_; + public akka.remote.testconductor.TestConductorProtocol.BarrierOp getOp() { + return op_; } - public Builder setStatus(boolean value) { + public Builder setOp(akka.remote.testconductor.TestConductorProtocol.BarrierOp value) { + if (value == null) { + throw new NullPointerException(); + } bitField0_ |= 0x00000002; - status_ = value; + op_ = value; onChanged(); return this; } - public Builder clearStatus() { + public Builder clearOp() { bitField0_ = (bitField0_ & ~0x00000002); - status_ = false; + op_ = akka.remote.testconductor.TestConductorProtocol.BarrierOp.Enter; onChanged(); return this; } @@ -2214,27 +2270,6 @@ public final class TestConductorProtocol { return this; } - // optional bool failed = 4; - private boolean failed_ ; - public boolean hasFailed() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public boolean getFailed() { - return failed_; - } - public Builder setFailed(boolean value) { - bitField0_ |= 0x00000008; - failed_ = value; - onChanged(); - return this; - } - public Builder clearFailed() { - bitField0_ = (bitField0_ & ~0x00000008); - failed_ = false; - onChanged(); - return this; - } - // @@protoc_insertion_point(builder_scope:EnterBarrier) } @@ -4170,19 +4205,21 @@ public final class TestConductorProtocol { "\0132\r.EnterBarrier\022\037\n\007failure\030\003 \001(\0132\016.Inje" + "ctFailure\022\014\n\004done\030\004 \001(\t\022\035\n\004addr\030\005 \001(\0132\017." + "AddressRequest\"0\n\005Hello\022\014\n\004name\030\001 \002(\t\022\031\n" + - "\007address\030\002 \002(\0132\010.Address\"M\n\014EnterBarrier" + - "\022\014\n\004name\030\001 \002(\t\022\016\n\006status\030\002 \001(\010\022\017\n\007timeou" + - "t\030\003 \001(\003\022\016\n\006failed\030\004 \001(\010\"6\n\016AddressReques" + - "t\022\014\n\004node\030\001 \002(\t\022\026\n\004addr\030\002 \001(\0132\010.Address\"" + - "G\n\007Address\022\020\n\010protocol\030\001 \002(\t\022\016\n\006system\030\002", - " \002(\t\022\014\n\004host\030\003 \002(\t\022\014\n\004port\030\004 \002(\005\"\212\001\n\rInj" + - "ectFailure\022\032\n\007failure\030\001 \002(\0162\t.FailType\022\035" + - "\n\tdirection\030\002 \001(\0162\n.Direction\022\031\n\007address" + - "\030\003 \001(\0132\010.Address\022\020\n\010rateMBit\030\006 \001(\002\022\021\n\tex" + - "itValue\030\007 \001(\005*A\n\010FailType\022\014\n\010Throttle\020\001\022" + - "\016\n\nDisconnect\020\002\022\t\n\005Abort\020\003\022\014\n\010Shutdown\020\004" + - "*,\n\tDirection\022\010\n\004Send\020\001\022\013\n\007Receive\020\002\022\010\n\004" + - "Both\020\003B\035\n\031akka.remote.testconductorH\001" + "\007address\030\002 \002(\0132\010.Address\"E\n\014EnterBarrier" + + "\022\014\n\004name\030\001 \002(\t\022\026\n\002op\030\002 \002(\0162\n.BarrierOp\022\017" + + "\n\007timeout\030\003 \001(\003\"6\n\016AddressRequest\022\014\n\004nod" + + "e\030\001 \002(\t\022\026\n\004addr\030\002 \001(\0132\010.Address\"G\n\007Addre" + + "ss\022\020\n\010protocol\030\001 \002(\t\022\016\n\006system\030\002 \002(\t\022\014\n\004", + "host\030\003 \002(\t\022\014\n\004port\030\004 \002(\005\"\212\001\n\rInjectFailu" + + "re\022\032\n\007failure\030\001 \002(\0162\t.FailType\022\035\n\tdirect" + + "ion\030\002 \001(\0162\n.Direction\022\031\n\007address\030\003 \001(\0132\010" + + ".Address\022\020\n\010rateMBit\030\006 \001(\002\022\021\n\texitValue\030" + + "\007 \001(\005*;\n\tBarrierOp\022\t\n\005Enter\020\001\022\010\n\004Fail\020\002\022" + + "\r\n\tSucceeded\020\003\022\n\n\006Failed\020\004*A\n\010FailType\022\014" + + "\n\010Throttle\020\001\022\016\n\nDisconnect\020\002\022\t\n\005Abort\020\003\022" + + "\014\n\010Shutdown\020\004*,\n\tDirection\022\010\n\004Send\020\001\022\013\n\007" + + "Receive\020\002\022\010\n\004Both\020\003B\035\n\031akka.remote.testc" + + "onductorH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -4210,7 +4247,7 @@ public final class TestConductorProtocol { internal_static_EnterBarrier_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EnterBarrier_descriptor, - new java.lang.String[] { "Name", "Status", "Timeout", "Failed", }, + new java.lang.String[] { "Name", "Op", "Timeout", }, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.class, akka.remote.testconductor.TestConductorProtocol.EnterBarrier.Builder.class); internal_static_AddressRequest_descriptor = diff --git a/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto b/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto index bef4aad5c1..1ff8a83c24 100644 --- a/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto +++ b/akka-remote-tests/src/main/protocol/TestConductorProtocol.proto @@ -26,11 +26,17 @@ message Hello { required Address address = 2; } +enum BarrierOp { + Enter = 1; + Fail = 2; + Succeeded = 3; + Failed = 4; +} + message EnterBarrier { required string name = 1; - optional bool status = 2; + required BarrierOp op = 2; optional int64 timeout = 3; - optional bool failed = 4; } message AddressRequest { @@ -51,11 +57,13 @@ enum FailType { Abort = 3; Shutdown = 4; } + enum Direction { Send = 1; Receive = 2; Both = 3; } + message InjectFailure { required FailType failure = 1; optional Direction direction = 2; diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index 7264948b0f..9256ec1abc 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -8,8 +8,6 @@ import RemoteConnection.getAddrString import TestConductorProtocol._ import org.jboss.netty.channel.{ Channel, SimpleChannelUpstreamHandler, ChannelHandlerContext, ChannelStateEvent, MessageEvent } import com.typesafe.config.ConfigFactory -import akka.util.Timeout -import akka.util.Duration import akka.util.duration._ import akka.pattern.ask import java.util.concurrent.TimeUnit.MILLISECONDS @@ -26,6 +24,7 @@ import akka.actor.OneForOneStrategy import akka.actor.SupervisorStrategy import java.util.concurrent.ConcurrentHashMap import akka.actor.Status +import akka.util.{ Deadline, Timeout, Duration } sealed trait Direction { def includes(other: Direction): Boolean @@ -376,7 +375,8 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP * BarrierTimeouts in the players). */ override def supervisorStrategy = OneForOneStrategy() { - case BarrierTimeout(data) ⇒ SupervisorStrategy.Restart + case BarrierTimeout(data) ⇒ failBarrier(data) + case FailedBarrier(data) ⇒ failBarrier(data) case BarrierEmpty(data, msg) ⇒ SupervisorStrategy.Resume case WrongBarrier(name, client, data) ⇒ client ! ToClient(BarrierResult(name, false)); failBarrier(data) case ClientLost(data, node) ⇒ failBarrier(data) @@ -464,7 +464,7 @@ private[akka] object BarrierCoordinator { case class RemoveClient(name: RoleName) - case class Data(clients: Set[Controller.NodeInfo], barrier: String, arrived: List[ActorRef]) + case class Data(clients: Set[Controller.NodeInfo], barrier: String, arrived: List[ActorRef], deadline: Deadline) trait Printer { this: Product with Throwable with NoStackTrace ⇒ override def toString = productPrefix + productIterator.mkString("(", ", ", ")") @@ -472,6 +472,8 @@ private[akka] object BarrierCoordinator { case class BarrierTimeout(data: Data) extends RuntimeException("timeout while waiting for barrier '" + data.barrier + "'") with NoStackTrace with Printer + case class FailedBarrier(data: Data) + extends RuntimeException("failing barrier '" + data.barrier + "'") with NoStackTrace with Printer case class DuplicateNode(data: Data, node: Controller.NodeInfo) extends RuntimeException(node.toString) with NoStackTrace with Printer case class WrongBarrier(barrier: String, client: ActorRef, data: Data) @@ -503,20 +505,18 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor // this shall be set to true if all subsequent barriers shall fail var failed = false - var barrierTimeout: Option[auTimeout] = None - override def preRestart(reason: Throwable, message: Option[Any]) {} override def postRestart(reason: Throwable) { failed = true } // TODO what happens with the other waiting players in case of a test failure? - startWith(Idle, Data(Set(), "", Nil)) + startWith(Idle, Data(Set(), "", Nil, null)) whenUnhandled { - case Event(n: NodeInfo, d @ Data(clients, _, _)) ⇒ + case Event(n: NodeInfo, d @ Data(clients, _, _, _)) ⇒ if (clients.find(_.name == n.name).isDefined) throw new DuplicateNode(d, n) stay using d.copy(clients = clients + n) - case Event(ClientDisconnected(name), d @ Data(clients, _, arrived)) ⇒ + case Event(ClientDisconnected(name), d @ Data(clients, _, arrived, _)) ⇒ if (clients.isEmpty) throw BarrierEmpty(d, "cannot disconnect " + name + ": no client to disconnect") (clients find (_.name == name)) match { case None ⇒ stay @@ -525,7 +525,7 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor } when(Idle) { - case Event(EnterBarrier(name, timeout), d @ Data(clients, _, _)) ⇒ + case Event(EnterBarrier(name, timeout), d @ Data(clients, _, _, _)) ⇒ if (failed) stay replying ToClient(BarrierResult(name, false)) else if (clients.map(_.fsm) == Set(sender)) @@ -533,56 +533,61 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor else if (clients.find(_.fsm == sender).isEmpty) stay replying ToClient(BarrierResult(name, false)) else { - barrierTimeout = timeout - goto(Waiting) using d.copy(barrier = name, arrived = sender :: Nil) + goto(Waiting) using d.copy(barrier = name, arrived = sender :: Nil, + deadline = getDeadline(timeout)) } - case Event(RemoveClient(name), d @ Data(clients, _, _)) ⇒ + case Event(RemoveClient(name), d @ Data(clients, _, _, _)) ⇒ if (clients.isEmpty) throw BarrierEmpty(d, "cannot remove " + name + ": no client to remove") stay using d.copy(clients = clients filterNot (_.name == name)) } onTransition { - case Idle -> Waiting ⇒ setTimer("Timeout", StateTimeout, barrierTimeout.getOrElse[auTimeout](TestConductor().Settings.BarrierTimeout).duration, false) + case Idle -> Waiting ⇒ setTimer("Timeout", StateTimeout, nextStateData.deadline - Deadline.now, false) case Waiting -> Idle ⇒ cancelTimer("Timeout") } when(Waiting) { - case Event(EnterBarrier(name, timeout), d @ Data(clients, barrier, arrived)) ⇒ + case Event(EnterBarrier(name, timeout), d @ Data(clients, barrier, arrived, deadline)) ⇒ if (name != barrier) throw WrongBarrier(name, sender, d) val together = if (clients.exists(_.fsm == sender)) sender :: arrived else arrived - handleBarrier(d.copy(arrived = together)) - case Event(RemoveClient(name), d @ Data(clients, barrier, arrived)) ⇒ + val enterDeadline = getDeadline(timeout) + // we only allow the deadlines to get shorter + val newDeadline = if ((enterDeadline - deadline) < Duration.Zero) enterDeadline else deadline + if (newDeadline != deadline) { + cancelTimer("Timeout") + setTimer("Timeout", StateTimeout, newDeadline - Deadline.now, false) + } + handleBarrier(d.copy(arrived = together, deadline = newDeadline)) + case Event(RemoveClient(name), d @ Data(clients, barrier, arrived, _)) ⇒ clients find (_.name == name) match { case None ⇒ stay case Some(client) ⇒ handleBarrier(d.copy(clients = clients - client, arrived = arrived filterNot (_ == client.fsm))) } - case Event(FailBarrier(name), d @ Data(clients, barrier, arrived)) ⇒ + case Event(FailBarrier(name), d @ Data(_, barrier, _, _)) ⇒ if (name != barrier) throw WrongBarrier(name, sender, d) - failed = true - handleBarrier(d, false) - - case Event(StateTimeout, d @ Data(clients, barrier, arrived)) ⇒ - handleBarrier(d, false) + throw FailedBarrier(d) + case Event(StateTimeout, d) ⇒ throw BarrierTimeout(d) } initialize - def handleBarrier(data: Data, status: Boolean = true): State = { - log.debug("handleBarrier({}, {})", data, status) - if (!status) { - data.arrived foreach (_ ! ToClient(BarrierResult(data.barrier, status))) - goto(Idle) using data.copy(barrier = "", arrived = Nil) - } else if (data.arrived.isEmpty) { + def handleBarrier(data: Data): State = { + log.debug("handleBarrier({})", data) + if (data.arrived.isEmpty) { goto(Idle) using data.copy(barrier = "") } else if ((data.clients.map(_.fsm) -- data.arrived).isEmpty) { - data.arrived foreach (_ ! ToClient(BarrierResult(data.barrier, status))) + data.arrived foreach (_ ! ToClient(BarrierResult(data.barrier, true))) goto(Idle) using data.copy(barrier = "", arrived = Nil) } else { stay using data } } + def getDeadline(timeout: Option[Duration]): Deadline = { + Deadline.now + timeout.getOrElse(TestConductor().Settings.BarrierTimeout.duration) + } + } diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala index 4730bbd508..5adc07bef2 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -10,7 +10,8 @@ import akka.remote.testconductor.{ TestConductorProtocol ⇒ TCP } import com.google.protobuf.Message import akka.actor.Address import org.jboss.netty.handler.codec.oneone.OneToOneDecoder -import akka.util.Timeout +import akka.util.Duration +import akka.remote.testconductor.TestConductorProtocol.BarrierOp case class RoleName(name: String) @@ -29,7 +30,7 @@ private[akka] sealed trait ConfirmedClientOp extends ClientOp */ private[akka] case class Hello(name: String, addr: Address) extends NetworkOp -private[akka] case class EnterBarrier(name: String, timeout: Option[Timeout] = None) extends ServerOp with NetworkOp +private[akka] case class EnterBarrier(name: String, timeout: Option[Duration] = None) extends ServerOp with NetworkOp private[akka] case class FailBarrier(name: String) extends ServerOp with NetworkOp private[akka] case class BarrierResult(name: String, success: Boolean) extends UnconfirmedClientOp with NetworkOp @@ -76,12 +77,14 @@ private[akka] class MsgEncoder extends OneToOneEncoder { w.setHello(TCP.Hello.newBuilder.setName(name).setAddress(addr)) case EnterBarrier(name, timeout) ⇒ val barrier = TCP.EnterBarrier.newBuilder.setName(name) - timeout foreach (t ⇒ barrier.setTimeout(t.duration.toMillis)) + timeout foreach (t ⇒ barrier.setTimeout(t.toNanos)) + barrier.setOp(BarrierOp.Enter) w.setBarrier(barrier) case BarrierResult(name, success) ⇒ - w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name).setStatus(success)) + val res = if (success) BarrierOp.Succeeded else BarrierOp.Failed + w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name).setOp(res)) case FailBarrier(name) ⇒ - w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name).setFailed(true)) + w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name).setOp(BarrierOp.Fail)) case ThrottleMsg(target, dir, rate) ⇒ w.setFailure(TCP.InjectFailure.newBuilder.setAddress(target) .setFailure(TCP.FailType.Throttle).setDirection(dir).setRateMBit(rate)) @@ -120,9 +123,13 @@ private[akka] class MsgDecoder extends OneToOneDecoder { Hello(h.getName, h.getAddress) } else if (w.hasBarrier) { val barrier = w.getBarrier - if (barrier.hasStatus) BarrierResult(barrier.getName, barrier.getStatus) - else if (barrier.hasFailed) FailBarrier(barrier.getName) - else EnterBarrier(w.getBarrier.getName, if (barrier.hasTimeout) Option(Timeout.longToTimeout(barrier.getTimeout)) else None) + barrier.getOp match { + case BarrierOp.Succeeded ⇒ BarrierResult(barrier.getName, true) + case BarrierOp.Failed ⇒ BarrierResult(barrier.getName, false) + case BarrierOp.Fail ⇒ FailBarrier(barrier.getName) + case BarrierOp.Enter ⇒ EnterBarrier(barrier.getName, + if (barrier.hasTimeout) Option(Duration.fromNanos(barrier.getTimeout)) else None) + } } else if (w.hasFailure) { val f = w.getFailure import TCP.{ FailType ⇒ FT } diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala index bed14725b4..46b7106a19 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala @@ -26,6 +26,7 @@ import org.jboss.netty.channel.WriteCompletionEvent import java.net.ConnectException import akka.util.Deadline import akka.actor.Scheduler +import java.util.concurrent.TimeoutException /** * The Player is the client component of the @@ -79,8 +80,6 @@ trait Player { this: TestConductorExt ⇒ enter(Settings.BarrierTimeout, name) } - case class OutOfTimeException(barrier: String) extends RuntimeException("Ran out of time while waiting for barrier '" + barrier + "'") with NoStackTrace - /** * Enter the named barriers, one after the other, in the order given. Will * throw an exception in case of timeouts or other errors. @@ -94,7 +93,7 @@ trait Player { this: TestConductorExt ⇒ val barrierTimeout = stop - now if (barrierTimeout < Duration.Zero) { client ! ToServer(FailBarrier(b)) - throw OutOfTimeException(b) + throw new TimeoutException("Server timed out while waiting for barrier " + b); } try { implicit val timeout = Timeout(barrierTimeout + Settings.QueryTimeout.duration) @@ -102,7 +101,8 @@ trait Player { this: TestConductorExt ⇒ } catch { case e: AskTimeoutException ⇒ client ! ToServer(FailBarrier(b)) - throw e + // Why don't TimeoutException have a constructor that takes a cause? + throw new TimeoutException("Client timed out while waiting for barrier " + b); } system.log.debug("passed barrier {}", b) } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala index 999e152b0f..f49dc53e2b 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala @@ -47,7 +47,7 @@ class LookupRemoteActorSpec extends MultiNodeSpec(LookupRemoteActorMultiJvmSpec) val masterAddress = testConductor.getAddressFor(master).await (hello ? "identify").await.asInstanceOf[ActorRef].path.address must equal(masterAddress) } - enter("done") + enterBarrier("done") } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala index 5b4e19df98..eca91495d6 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala @@ -56,7 +56,7 @@ class NewRemoteActorSpec extends MultiNodeSpec(NewRemoteActorMultiJvmSpec) system.stop(actor) } - enter("done") + enterBarrier("done") } "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef (with deployOnAll)" taggedAs LongRunningTest in { @@ -74,7 +74,7 @@ class NewRemoteActorSpec extends MultiNodeSpec(NewRemoteActorMultiJvmSpec) system.stop(actor) } - enter("done") + enterBarrier("done") } } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala index eeb09d6174..44c7ae5047 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala @@ -55,11 +55,11 @@ class RandomRoutedRemoteActorSpec extends MultiNodeSpec(RandomRoutedRemoteActorM "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" taggedAs LongRunningTest in { runOn(first, second, third) { - enter("start", "broadcast-end", "end", "done") + enterBarrier("start", "broadcast-end", "end", "done") } runOn(fourth) { - enter("start") + enterBarrier("start") val actor = system.actorOf(Props[SomeActor].withRouter(RandomRouter()), "service-hello") actor.isInstanceOf[RoutedActorRef] must be(true) @@ -76,17 +76,17 @@ class RandomRoutedRemoteActorSpec extends MultiNodeSpec(RandomRoutedRemoteActorM case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1)) } - enter("broadcast-end") + enterBarrier("broadcast-end") actor ! Broadcast(PoisonPill) - enter("end") + enterBarrier("end") replies.values foreach { _ must be > (0) } replies.get(node(fourth).address) must be(None) // shut down the actor before we let the other node(s) shut down so we don't try to send // "Terminate" to a shut down node system.stop(actor) - enter("done") + enterBarrier("done") } } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala index f69989f41f..76a7e41ad1 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala @@ -55,11 +55,11 @@ class RoundRobinRoutedRemoteActorSpec extends MultiNodeSpec(RoundRobinRoutedRemo "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" taggedAs LongRunningTest in { runOn(first, second, third) { - enter("start", "broadcast-end", "end", "done") + enterBarrier("start", "broadcast-end", "end", "done") } runOn(fourth) { - enter("start") + enterBarrier("start") val actor = system.actorOf(Props[SomeActor].withRouter(RoundRobinRouter()), "service-hello") actor.isInstanceOf[RoutedActorRef] must be(true) @@ -76,17 +76,17 @@ class RoundRobinRoutedRemoteActorSpec extends MultiNodeSpec(RoundRobinRoutedRemo case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1)) } - enter("broadcast-end") + enterBarrier("broadcast-end") actor ! Broadcast(PoisonPill) - enter("end") + enterBarrier("end") replies.values foreach { _ must be(iterationCount) } replies.get(node(fourth).address) must be(None) // shut down the actor before we let the other node(s) shut down so we don't try to send // "Terminate" to a shut down node system.stop(actor) - enter("done") + enterBarrier("done") } } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala index 3c18518503..b77b0c196e 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala @@ -55,11 +55,11 @@ class ScatterGatherRoutedRemoteActorSpec extends MultiNodeSpec(ScatterGatherRout "be locally instantiated on a remote node and be able to communicate through its RemoteActorRef" taggedAs LongRunningTest in { runOn(first, second, third) { - enter("start", "broadcast-end", "end", "done") + enterBarrier("start", "broadcast-end", "end", "done") } runOn(fourth) { - enter("start") + enterBarrier("start") val actor = system.actorOf(Props[SomeActor].withRouter(ScatterGatherFirstCompletedRouter(within = 10 seconds)), "service-hello") actor.isInstanceOf[RoutedActorRef] must be(true) @@ -76,17 +76,17 @@ class ScatterGatherRoutedRemoteActorSpec extends MultiNodeSpec(ScatterGatherRout case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1)) } - enter("broadcast-end") + enterBarrier("broadcast-end") actor ! Broadcast(PoisonPill) - enter("end") + enterBarrier("end") replies.values.sum must be === connectionCount * iterationCount replies.get(node(fourth).address) must be(None) // shut down the actor before we let the other node(s) shut down so we don't try to send // "Terminate" to a shut down node system.stop(actor) - enter("done") + enterBarrier("done") } } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 5898fd458c..86fabc489d 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -46,7 +46,7 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec) with Im }), "echo") } - enter("name") + enterBarrier("name") } "support throttling of network connections" taggedAs LongRunningTest in { @@ -62,7 +62,7 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec) with Im testConductor.throttle(slave, master, Direction.Send, rateMBit = 0.01).await } - enter("throttled_send") + enterBarrier("throttled_send") runOn(slave) { for (i ← 0 to 9) echo ! i @@ -73,14 +73,14 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec) with Im receiveN(9) must be(1 to 9) } - enter("throttled_send2") + enterBarrier("throttled_send2") runOn(master) { testConductor.throttle(slave, master, Direction.Send, -1).await testConductor.throttle(slave, master, Direction.Receive, rateMBit = 0.01).await } - enter("throttled_recv") + enterBarrier("throttled_recv") runOn(slave) { for (i ← 10 to 19) echo ! i @@ -98,7 +98,7 @@ class TestConductorSpec extends MultiNodeSpec(TestConductorMultiJvmSpec) with Im receiveN(9) must be(11 to 19) } - enter("throttled_recv2") + enterBarrier("throttled_recv2") runOn(master) { testConductor.throttle(slave, master, Direction.Receive, -1).await diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala index 79dfda7559..779c02b670 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala @@ -29,10 +29,10 @@ object BarrierSpec { akka.remote.netty.port = 0 akka.actor.debug.fsm = on akka.actor.debug.lifecycle = on - """ + """ } -class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with BeforeAndAfterEach { +class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { import BarrierSpec._ import Controller._ @@ -42,10 +42,6 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with val B = RoleName("b") val C = RoleName("c") - override def afterEach { - system.eventStream.setLogLevel(Logging.WarningLevel) - } - "A BarrierCoordinator" must { "register clients and remove them" taggedAs TimingTest in { @@ -56,7 +52,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with EventFilter[BarrierEmpty](occurrences = 1) intercept { b ! RemoveClient(A) } - expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "cannot remove RoleName(a): no client to remove"))) + expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil, null), "cannot remove RoleName(a): no client to remove"))) } "register clients and disconnect them" taggedAs TimingTest in { @@ -66,11 +62,11 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with EventFilter[ClientLost](occurrences = 1) intercept { b ! ClientDisconnected(A) } - expectMsg(Failed(b, ClientLost(Data(Set(), "", Nil), A))) + expectMsg(Failed(b, ClientLost(Data(Set(), "", Nil, null), A))) EventFilter[BarrierEmpty](occurrences = 1) intercept { b ! ClientDisconnected(A) } - expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "cannot disconnect RoleName(a): no client to disconnect"))) + expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil, null), "cannot disconnect RoleName(a): no client to disconnect"))) } "fail entering barrier when nobody registered" taggedAs TimingTest in { @@ -86,7 +82,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.send(barrier, EnterBarrier("bar2")) noMsg(a, b) - within(2 second) { + within(2 seconds) { b.send(barrier, EnterBarrier("bar2")) a.expectMsg(ToClient(BarrierResult("bar2", true))) b.expectMsg(ToClient(BarrierResult("bar2", true))) @@ -102,7 +98,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) b.send(barrier, EnterBarrier("bar3")) noMsg(a, b, c) - within(2 second) { + within(2 seconds) { c.send(barrier, EnterBarrier("bar3")) a.expectMsg(ToClient(BarrierResult("bar3", true))) b.expectMsg(ToClient(BarrierResult("bar3", true))) @@ -121,7 +117,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! RemoveClient(A) barrier ! ClientDisconnected(A) noMsg(a, b, c) - b.within(2 second) { + b.within(2 seconds) { barrier ! RemoveClient(C) b.expectMsg(ToClient(BarrierResult("bar4", true))) } @@ -150,7 +146,11 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with EventFilter[ClientLost](occurrences = 1) intercept { barrier ! ClientDisconnected(B) } - expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA), "bar6", a.ref :: Nil), B))) + val msg = expectMsgType[Failed] + msg match { + case Failed(barrier, thr: ClientLost) if (thr == ClientLost(Data(Set(nodeA), "bar6", a.ref :: Nil, thr.data.deadline), B)) ⇒ + case x ⇒ fail("Expected " + Failed(barrier, ClientLost(Data(Set(nodeA), "bar6", a.ref :: Nil, null), B)) + " but got " + x) + } } "fail barrier with disconnecing node who already arrived" taggedAs TimingTest in { @@ -166,7 +166,11 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with EventFilter[ClientLost](occurrences = 1) intercept { barrier ! ClientDisconnected(B) } - expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA, nodeC), "bar7", a.ref :: Nil), B))) + val msg = expectMsgType[Failed] + msg match { + case Failed(barrier, thr: ClientLost) if (thr == ClientLost(Data(Set(nodeA, nodeC), "bar7", a.ref :: Nil, thr.data.deadline), B)) ⇒ + case x ⇒ fail("Expected " + Failed(barrier, ClientLost(Data(Set(nodeA, nodeC), "bar7", a.ref :: Nil, null), B)) + " but got " + x) + } } "fail when entering wrong barrier" taggedAs TimingTest in { @@ -180,7 +184,11 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with EventFilter[WrongBarrier](occurrences = 1) intercept { b.send(barrier, EnterBarrier("foo")) } - expectMsg(Failed(barrier, WrongBarrier("foo", b.ref, Data(Set(nodeA, nodeB), "bar8", a.ref :: Nil)))) + val msg = expectMsgType[Failed] + msg match { + case Failed(barrier, thr: WrongBarrier) if (thr == WrongBarrier("foo", b.ref, Data(Set(nodeA, nodeB), "bar8", a.ref :: Nil, thr.data.deadline))) ⇒ + case x ⇒ fail("Expected " + Failed(barrier, WrongBarrier("foo", b.ref, Data(Set(nodeA, nodeB), "bar8", a.ref :: Nil, null))) + " but got " + x) + } } "fail barrier after first failure" taggedAs TimingTest in { @@ -189,7 +197,11 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with EventFilter[BarrierEmpty](occurrences = 1) intercept { barrier ! RemoveClient(A) } - expectMsg(Failed(barrier, BarrierEmpty(Data(Set(), "", Nil), "cannot remove RoleName(a): no client to remove"))) + val msg = expectMsgType[Failed] + msg match { + case Failed(barrier, thr: BarrierEmpty) if (thr == BarrierEmpty(Data(Set(), "", Nil, thr.data.deadline), "cannot remove RoleName(a): no client to remove")) ⇒ + case x ⇒ fail("Expected " + Failed(barrier, BarrierEmpty(Data(Set(), "", Nil, null), "cannot remove RoleName(a): no client to remove")) + " but got " + x) + } barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) a.send(barrier, EnterBarrier("bar9")) a.expectMsg(ToClient(BarrierResult("bar9", false))) @@ -204,7 +216,11 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! nodeB a.send(barrier, EnterBarrier("bar10")) EventFilter[BarrierTimeout](occurrences = 1) intercept { - expectMsg(7 seconds, Failed(barrier, BarrierTimeout(Data(Set(nodeA, nodeB), "bar10", a.ref :: Nil)))) + val msg = expectMsgType[Failed](7 seconds) + msg match { + case Failed(barrier, thr: BarrierTimeout) if (thr == BarrierTimeout(Data(Set(nodeA, nodeB), "bar10", a.ref :: Nil, thr.data.deadline))) ⇒ + case x ⇒ fail("Expected " + Failed(barrier, BarrierTimeout(Data(Set(nodeA, nodeB), "bar10", a.ref :: Nil, null))) + " but got " + x) + } } } @@ -217,7 +233,11 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with EventFilter[DuplicateNode](occurrences = 1) intercept { barrier ! nodeB } - expectMsg(Failed(barrier, DuplicateNode(Data(Set(nodeA), "", Nil), nodeB))) + val msg = expectMsgType[Failed] + msg match { + case Failed(barrier, thr: DuplicateNode) if (thr == DuplicateNode(Data(Set(nodeA), "", Nil, thr.data.deadline), nodeB)) ⇒ + case x ⇒ fail("Expected " + Failed(barrier, DuplicateNode(Data(Set(nodeA), "", Nil, null), nodeB)) + " but got " + x) + } } "finally have no failure messages left" taggedAs TimingTest in { @@ -267,7 +287,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar11")) noMsg(a, b) - within(2 second) { + within(2 seconds) { b.send(barrier, EnterBarrier("bar11")) a.expectMsg(ToClient(BarrierResult("bar11", true))) b.expectMsg(ToClient(BarrierResult("bar11", true))) @@ -286,7 +306,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with c.expectMsg(ToClient(Done)) b.send(barrier, EnterBarrier("bar12")) noMsg(a, b, c) - within(2 second) { + within(2 seconds) { c.send(barrier, EnterBarrier("bar12")) a.expectMsg(ToClient(BarrierResult("bar12", true))) b.expectMsg(ToClient(BarrierResult("bar12", true))) @@ -308,7 +328,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! Remove(A) barrier ! ClientDisconnected(A) noMsg(a, b, c) - b.within(2 second) { + b.within(2 seconds) { barrier ! Remove(C) b.expectMsg(ToClient(BarrierResult("bar13", true))) } @@ -391,7 +411,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! nodeB a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("bar18", Option(Timeout.durationToTimeout(2 seconds)))) + a.send(barrier, EnterBarrier("bar18", Option(2 seconds))) EventFilter[BarrierTimeout](occurrences = 1) intercept { Thread.sleep(4000) } @@ -437,16 +457,64 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with barrier ! nodeB a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("bar20", Option(Timeout.durationToTimeout(2 seconds)))) - b.send(barrier, FailBarrier("bar20")) - a.expectMsg(ToClient(BarrierResult("bar20", false))) - b.expectNoMsg(1 second) + a.send(barrier, EnterBarrier("bar20", Option(2 seconds))) + EventFilter[FailedBarrier](occurrences = 1) intercept { + b.send(barrier, FailBarrier("bar20")) + a.expectMsg(ToClient(BarrierResult("bar20", false))) + b.expectNoMsg(1 second) + } a.send(barrier, EnterBarrier("bar21")) b.send(barrier, EnterBarrier("bar21")) a.expectMsg(ToClient(BarrierResult("bar21", false))) b.expectMsg(ToClient(BarrierResult("bar21", false))) } + "timeout within the shortest timeout if the new timeout is shorter" taggedAs TimingTest in { + val barrier = getController(3) + val a, b, c = TestProbe() + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeB = NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) + val nodeC = NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) + barrier ! nodeA + barrier ! nodeB + barrier ! nodeC + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) + c.expectMsg(ToClient(Done)) + a.send(barrier, EnterBarrier("bar22", Option(10 seconds))) + b.send(barrier, EnterBarrier("bar22", Option(2 seconds))) + EventFilter[BarrierTimeout](occurrences = 1) intercept { + Thread.sleep(4000) + } + c.send(barrier, EnterBarrier("bar22")) + a.expectMsg(ToClient(BarrierResult("bar22", false))) + b.expectMsg(ToClient(BarrierResult("bar22", false))) + c.expectMsg(ToClient(BarrierResult("bar22", false))) + } + + "timeout within the shortest timeout if the new timeout is longer" taggedAs TimingTest in { + val barrier = getController(3) + val a, b, c = TestProbe() + val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) + val nodeB = NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) + val nodeC = NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) + barrier ! nodeA + barrier ! nodeB + barrier ! nodeC + a.expectMsg(ToClient(Done)) + b.expectMsg(ToClient(Done)) + c.expectMsg(ToClient(Done)) + a.send(barrier, EnterBarrier("bar23", Option(2 seconds))) + b.send(barrier, EnterBarrier("bar23", Option(10 seconds))) + EventFilter[BarrierTimeout](occurrences = 1) intercept { + Thread.sleep(4000) + } + c.send(barrier, EnterBarrier("bar23")) + a.expectMsg(ToClient(BarrierResult("bar23", false))) + b.expectMsg(ToClient(BarrierResult("bar23", false))) + c.expectMsg(ToClient(BarrierResult("bar23", false))) + } + "finally have no failure messages left" taggedAs TimingTest in { expectNoMsg(1 second) } @@ -489,4 +557,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with probes foreach (_.msgAvailable must be(false)) } + private def data(clients: Set[Controller.NodeInfo], barrier: String, arrived: List[ActorRef], previous: Data): Data = { + Data(clients, barrier, arrived, previous.deadline) + } } \ No newline at end of file diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 62539e981d..4d65a2084e 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -186,7 +186,7 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: * Enter the named barriers in the order given. Use the remaining duration from * the innermost enclosing `within` block or the default `BarrierTimeout` */ - def enter(name: String*) { + def enterBarrier(name: String*) { testConductor.enter(Timeout.durationToTimeout(remainingOr(testConductor.Settings.BarrierTimeout.duration)), name) } @@ -202,13 +202,11 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: /** * Enrich `.await()` onto all Awaitables, using remaining duration from the innermost - * enclosing `within` block or BarrierTimeout. - * - * FIXME Is it really BarrierTimeout we want here? That seems like an awfully long time. + * enclosing `within` block or QueryTimeout. */ implicit def awaitHelper[T](w: Awaitable[T]) = new AwaitHelper(w) class AwaitHelper[T](w: Awaitable[T]) { - def await: T = Await.result(w, remainingOr(testConductor.Settings.BarrierTimeout.duration)) + def await: T = Await.result(w, remainingOr(testConductor.Settings.QueryTimeout.duration)) } /* @@ -217,9 +215,11 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: private val controllerAddr = new InetSocketAddress(nodeNames(0), 4711) if (selfIndex == 0) { - testConductor.startController(initialParticipants, myself, controllerAddr).await + Await.result(testConductor.startController(initialParticipants, myself, controllerAddr), + testConductor.Settings.BarrierTimeout.duration) } else { - testConductor.startClient(myself, controllerAddr).await + Await.result(testConductor.startClient(myself, controllerAddr), + testConductor.Settings.BarrierTimeout.duration) } // now add deployments, if so desired From 1e0d6670d56d1556bc658c37ed7ee954daf2105d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 15 Jun 2012 14:37:15 +0200 Subject: [PATCH 396/538] Cache node lookup, see #2201 --- .../akka/remote/testkit/MultiNodeSpec.scala | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index faaab5cdc4..7debc9d4d2 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -4,15 +4,14 @@ package akka.remote.testkit import java.net.InetSocketAddress - import com.typesafe.config.{ ConfigObject, ConfigFactory, Config } - import akka.actor.{ RootActorPath, Deploy, ActorPath, ActorSystem, ExtendedActorSystem } import akka.dispatch.Await import akka.dispatch.Await.Awaitable import akka.remote.testconductor.{ TestConductorExt, TestConductor, RoleName } import akka.testkit.AkkaSpec import akka.util.{ NonFatal, Duration } +import java.util.concurrent.ConcurrentHashMap /** * Configure the role names and participants of the test, including configuration settings. @@ -190,7 +189,18 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: * val serviceA = system.actorFor(node("master") / "user" / "serviceA") * }}} */ - def node(role: RoleName): ActorPath = RootActorPath(testConductor.getAddressFor(role).await) + def node(role: RoleName): ActorPath = { + cachedRootActorPaths.get(role) match { + case null ⇒ + val root = RootActorPath(testConductor.getAddressFor(role).await) + cachedRootActorPaths.put(role, root) + root + case root ⇒ root + } + + } + + private val cachedRootActorPaths = new ConcurrentHashMap[RoleName, ActorPath] /** * Enrich `.await()` onto all Awaitables, using BarrierTimeout. From 404fa4dfa3f3828d1cfaf762a22663415e08a86b Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 15 Jun 2012 14:37:51 +0200 Subject: [PATCH 397/538] Implicit conversion from RoleName to Address * Improved readability in tests if role name can be used * Will change other tests also if you like it, otherwise revert --- .../GossipingAccrualFailureDetectorSpec.scala | 16 ++++++---------- .../akka/cluster/MultiNodeClusterSpec.scala | 6 +++++- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index b14c0d927c..8b9489ac11 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -29,19 +29,15 @@ abstract class GossipingAccrualFailureDetectorSpec import GossipingAccrualFailureDetectorMultiJvmSpec._ - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - lazy val thirdAddress = node(third).address - "A Gossip-driven Failure Detector" must { "receive gossip heartbeats so that all member nodes in the cluster are marked 'available'" taggedAs LongRunningTest in { awaitClusterUp(first, second, third) 5.seconds.dilated.sleep // let them gossip - cluster.failureDetector.isAvailable(firstAddress) must be(true) - cluster.failureDetector.isAvailable(secondAddress) must be(true) - cluster.failureDetector.isAvailable(thirdAddress) must be(true) + cluster.failureDetector.isAvailable(first) must be(true) + cluster.failureDetector.isAvailable(second) must be(true) + cluster.failureDetector.isAvailable(third) must be(true) testConductor.enter("after-1") } @@ -53,10 +49,10 @@ abstract class GossipingAccrualFailureDetectorSpec runOn(first, second) { // remaning nodes should detect failure... - awaitCond(!cluster.failureDetector.isAvailable(thirdAddress), 10.seconds) + awaitCond(!cluster.failureDetector.isAvailable(third), 10.seconds) // other connections still ok - cluster.failureDetector.isAvailable(firstAddress) must be(true) - cluster.failureDetector.isAvailable(secondAddress) must be(true) + cluster.failureDetector.isAvailable(first) must be(true) + cluster.failureDetector.isAvailable(second) must be(true) } testConductor.enter("after-2") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index b4532f7efc..c598995508 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -5,7 +5,7 @@ package akka.cluster import com.typesafe.config.Config import com.typesafe.config.ConfigFactory -import akka.actor.{Address, ExtendedActorSystem} +import akka.actor.{ Address, ExtendedActorSystem } import akka.remote.testconductor.RoleName import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -153,4 +153,8 @@ trait MultiNodeClusterSpec extends FailureDetectorStrategy { self: MultiNodeSpec def roleName(address: Address): Option[RoleName] = { testConductor.getNodes.await.find(node(_).address == address) } + + // implicit conversion from RoleName to Address + implicit def role2Address(role: RoleName): Address = node(role).address + } From e9a96afef87720d0769f5de3f73509cb8b30d2a6 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 15 Jun 2012 14:45:15 +0200 Subject: [PATCH 398/538] Use FailureDetectorPuppet in ClusterSpec --- .../test/scala/akka/cluster/ClusterSpec.scala | 20 ++++++------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index 03f6460ea1..b2c1a70ec2 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -33,7 +33,9 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { val deterministicRandom = new AtomicInteger - val cluster = new Cluster(system.asInstanceOf[ExtendedActorSystem], new FailureDetectorPuppet(system)) { + val failureDetector = new FailureDetectorPuppet(system) + + val cluster = new Cluster(system.asInstanceOf[ExtendedActorSystem], failureDetector) { override def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = { if (addresses.isEmpty) None @@ -64,16 +66,6 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { else _gossipToDeputyProbablity } - @volatile - var _unavailable: Set[Address] = Set.empty - - override val failureDetector = new FailureDetectorPuppet(system) { - override def isAvailable(connection: Address): Boolean = { - if (_unavailable.contains(connection)) false - else super.isAvailable(connection) - } - } - } val selfAddress = cluster.self.address @@ -91,7 +83,7 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { before { cluster._gossipToUnreachableProbablity = 0.0 cluster._gossipToDeputyProbablity = 0.0 - cluster._unavailable = Set.empty + addresses.foreach(failureDetector.remove(_)) deterministicRandom.set(0) } @@ -188,7 +180,7 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { "gossip to random unreachable node" in { val dead = Set(addresses(1)) - cluster._unavailable = dead + dead.foreach(failureDetector.markNodeAsUnavailable(_)) cluster._gossipToUnreachableProbablity = 1.0 // always cluster.reapUnreachableMembers() @@ -206,7 +198,7 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { cluster._gossipToDeputyProbablity = -1.0 // real impl // 0 and 2 still alive val dead = Set(addresses(1), addresses(3), addresses(4), addresses(5)) - cluster._unavailable = dead + dead.foreach(failureDetector.markNodeAsUnavailable(_)) cluster.reapUnreachableMembers() cluster.latestGossip.overview.unreachable.map(_.address) must be(dead) From 019ec0da712000297850e3435aee50c2e980682a Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 15 Jun 2012 15:04:54 +0200 Subject: [PATCH 399/538] Fixing so that the SSL tests are ignored if the respective cipher isn't available on the machine the test runs on, so you'll see a yellow warning that the test wasn't run in that case --- .../akka/remote/netty/NettySSLSupport.scala | 12 +- .../remote/Ticket1978CommunicationSpec.scala | 162 ++++++++++-------- 2 files changed, 92 insertions(+), 82 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala index 4c68069278..7e006373c2 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala @@ -16,22 +16,22 @@ import akka.security.provider.AkkaProvider * Used for adding SSL support to Netty pipeline * Internal use only */ -private[netty] object NettySSLSupport { +private[akka] object NettySSLSupport { /** * Construct a SSLHandler which can be inserted into a Netty server/client pipeline */ def apply(settings: NettySettings, log: LoggingAdapter, isClient: Boolean): SslHandler = if (isClient) initialiseClientSSL(settings, log) else initialiseServerSSL(settings, log) - private def initialiseCustomSecureRandom(settings: NettySettings, log: LoggingAdapter): SecureRandom = { + def initialiseCustomSecureRandom(rngName: Option[String], sourceOfRandomness: Option[String], log: LoggingAdapter): SecureRandom = { /** * According to this bug report: http://bugs.sun.com/view_bug.do?bug_id=6202721 * Using /dev/./urandom is only necessary when using SHA1PRNG on Linux * Use 'new SecureRandom()' instead of 'SecureRandom.getInstance("SHA1PRNG")' to avoid having problems */ - settings.SSLRandomSource foreach { path ⇒ System.setProperty("java.security.egd", path) } + sourceOfRandomness foreach { path ⇒ System.setProperty("java.security.egd", path) } - val rng = settings.SSLRandomNumberGenerator match { + val rng = rngName match { case Some(r @ ("AES128CounterRNGFast" | "AES128CounterRNGSecure" | "AES256CounterRNGSecure")) ⇒ log.debug("SSL random number generator set to: {}", r) val akka = new AkkaProvider @@ -86,7 +86,7 @@ private[netty] object NettySSLSupport { trustStore.load(new FileInputStream(trustStorePath), trustStorePassword.toCharArray) //FIXME does the FileInputStream need to be closed? trustManagerFactory.init(trustStore) val trustManagers: Array[TrustManager] = trustManagerFactory.getTrustManagers - Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(null, trustManagers, initialiseCustomSecureRandom(settings, log)); ctx } + Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(null, trustManagers, initialiseCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, log)); ctx } } catch { case e: FileNotFoundException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because trust store could not be loaded", e) case e: IOException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because: " + e.getMessage, e) @@ -123,7 +123,7 @@ private[netty] object NettySSLSupport { val keyStore = KeyStore.getInstance(KeyStore.getDefaultType) keyStore.load(new FileInputStream(keyStorePath), keyStorePassword.toCharArray) //FIXME does the FileInputStream need to be closed? factory.init(keyStore, keyStorePassword.toCharArray) - Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(factory.getKeyManagers, null, initialiseCustomSecureRandom(settings, log)); ctx } + Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(factory.getKeyManagers, null, initialiseCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, log)); ctx } } catch { case e: FileNotFoundException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because key store could not be loaded", e) case e: IOException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because: " + e.getMessage, e) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index ff41e369ff..86ebd921e4 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -9,7 +9,9 @@ import com.typesafe.config._ import akka.dispatch.{ Await, Future } import akka.pattern.ask import java.io.File -import java.security.{ PrivilegedAction, AccessController } +import akka.event.LoggingAdapter +import java.security.{ SecureRandom, PrivilegedAction, AccessController } +import netty.NettySSLSupport object Configuration { // set this in your JAVA_OPTS to see all ssl debug info: "-Djavax.net.debug=ssl,keymanager" @@ -38,32 +40,29 @@ object Configuration { } """ - def getConfig(rng: String): String = { - conf.format(trustStore, keyStore, rng) - } + def getConfig(rng: String): String = conf.format(trustStore, keyStore, rng) } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978SHA1PRNG extends Ticket1978CommunicationSpec(Configuration.getConfig("SHA1PRNG")) +class Ticket1978SHA1PRNG extends Ticket1978CommunicationSpec("SHA1PRNG") @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES128CounterRNGFast extends Ticket1978CommunicationSpec(Configuration.getConfig("AES128CounterRNGFast")) +class Ticket1978AES128CounterRNGFast extends Ticket1978CommunicationSpec("AES128CounterRNGFast") /** * Both of the Secure variants require access to the Internet to access random.org. */ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES128CounterRNGSecure extends Ticket1978CommunicationSpec(Configuration.getConfig("AES128CounterRNGSecure")) +class Ticket1978AES128CounterRNGSecure extends Ticket1978CommunicationSpec("AES128CounterRNGSecure") /** * Both of the Secure variants require access to the Internet to access random.org. */ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES256CounterRNGSecure extends Ticket1978CommunicationSpec(Configuration.getConfig("AES256CounterRNGSecure")) +class Ticket1978AES256CounterRNGSecure extends Ticket1978CommunicationSpec("AES256CounterRNGSecure") @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978CommunicationSpec(val configuration: String) - extends AkkaSpec(configuration) with ImplicitSender with DefaultTimeout { +abstract class Ticket1978CommunicationSpec(val cipher: String) extends AkkaSpec(Configuration.getConfig(cipher)) with ImplicitSender with DefaultTimeout { import RemoteCommunicationSpec._ @@ -85,75 +84,86 @@ class Ticket1978CommunicationSpec(val configuration: String) other.shutdown() } + val isSupportedOnPlatform: Boolean = try { + NettySSLSupport.initialiseCustomSecureRandom(Some(cipher), None, log) ne null + } catch { + case iae: IllegalArgumentException if iae.getMessage == "Cannot support %s with currently installed providers".format(cipher) ⇒ false + } + "SSL Remoting" must { - - "support remote look-ups" in { - here ! "ping" - expectMsgPF() { - case ("pong", s: AnyRef) if s eq testActor ⇒ true - } - } - - "send error message for wrong address" in { - EventFilter.error(start = "dropping", occurrences = 1).intercept { - system.actorFor("akka://remotesys@localhost:12346/user/echo") ! "ping" - }(other) - } - - "support ask" in { - Await.result(here ? "ping", timeout.duration) match { - case ("pong", s: akka.pattern.PromiseActorRef) ⇒ // good - case m ⇒ fail(m + " was not (pong, AskActorRef)") - } - } - - "send dead letters on remote if actor does not exist" in { - EventFilter.warning(pattern = "dead.*buh", occurrences = 1).intercept { - system.actorFor("akka://remote-sys@localhost:12346/does/not/exist") ! "buh" - }(other) - } - - "create and supervise children on remote node" in { - val r = system.actorOf(Props[Echo], "blub") - r.path.toString must be === "akka://remote-sys@localhost:12346/remote/Ticket1978CommunicationSpec@localhost:12345/user/blub" - r ! 42 - expectMsg(42) - EventFilter[Exception]("crash", occurrences = 1).intercept { - r ! new Exception("crash") - }(other) - expectMsg("preRestart") - r ! 42 - expectMsg(42) - system.stop(r) - expectMsg("postStop") - } - - "look-up actors across node boundaries" in { - val l = system.actorOf(Props(new Actor { - def receive = { - case (p: Props, n: String) ⇒ sender ! context.actorOf(p, n) - case s: String ⇒ sender ! context.actorFor(s) + if (isSupportedOnPlatform) { + "support remote look-ups" in { + here ! "ping" + expectMsgPF() { + case ("pong", s: AnyRef) if s eq testActor ⇒ true } - }), "looker") - l ! (Props[Echo], "child") - val r = expectMsgType[ActorRef] - r ! (Props[Echo], "grandchild") - val remref = expectMsgType[ActorRef] - remref.isInstanceOf[LocalActorRef] must be(true) - val myref = system.actorFor(system / "looker" / "child" / "grandchild") - myref.isInstanceOf[RemoteActorRef] must be(true) - myref ! 43 - expectMsg(43) - lastSender must be theSameInstanceAs remref - r.asInstanceOf[RemoteActorRef].getParent must be(l) - system.actorFor("/user/looker/child") must be theSameInstanceAs r - Await.result(l ? "child/..", timeout.duration).asInstanceOf[AnyRef] must be theSameInstanceAs l - Await.result(system.actorFor(system / "looker" / "child") ? "..", timeout.duration).asInstanceOf[AnyRef] must be theSameInstanceAs l - } + } - "not fail ask across node boundaries" in { - val f = for (_ ← 1 to 1000) yield here ? "ping" mapTo manifest[(String, ActorRef)] - Await.result(Future.sequence(f), remaining).map(_._1).toSet must be(Set("pong")) + "send error message for wrong address" in { + EventFilter.error(start = "dropping", occurrences = 1).intercept { + system.actorFor("akka://remotesys@localhost:12346/user/echo") ! "ping" + }(other) + } + + "support ask" in { + Await.result(here ? "ping", timeout.duration) match { + case ("pong", s: akka.pattern.PromiseActorRef) ⇒ // good + case m ⇒ fail(m + " was not (pong, AskActorRef)") + } + } + + "send dead letters on remote if actor does not exist" in { + EventFilter.warning(pattern = "dead.*buh", occurrences = 1).intercept { + system.actorFor("akka://remote-sys@localhost:12346/does/not/exist") ! "buh" + }(other) + } + + "create and supervise children on remote node" in { + val r = system.actorOf(Props[Echo], "blub") + r.path.toString must be === "akka://remote-sys@localhost:12346/remote/Ticket1978CommunicationSpec@localhost:12345/user/blub" + r ! 42 + expectMsg(42) + EventFilter[Exception]("crash", occurrences = 1).intercept { + r ! new Exception("crash") + }(other) + expectMsg("preRestart") + r ! 42 + expectMsg(42) + system.stop(r) + expectMsg("postStop") + } + + "look-up actors across node boundaries" in { + val l = system.actorOf(Props(new Actor { + def receive = { + case (p: Props, n: String) ⇒ sender ! context.actorOf(p, n) + case s: String ⇒ sender ! context.actorFor(s) + } + }), "looker") + l ! (Props[Echo], "child") + val r = expectMsgType[ActorRef] + r ! (Props[Echo], "grandchild") + val remref = expectMsgType[ActorRef] + remref.isInstanceOf[LocalActorRef] must be(true) + val myref = system.actorFor(system / "looker" / "child" / "grandchild") + myref.isInstanceOf[RemoteActorRef] must be(true) + myref ! 43 + expectMsg(43) + lastSender must be theSameInstanceAs remref + r.asInstanceOf[RemoteActorRef].getParent must be(l) + system.actorFor("/user/looker/child") must be theSameInstanceAs r + Await.result(l ? "child/..", timeout.duration).asInstanceOf[AnyRef] must be theSameInstanceAs l + Await.result(system.actorFor(system / "looker" / "child") ? "..", timeout.duration).asInstanceOf[AnyRef] must be theSameInstanceAs l + } + + "not fail ask across node boundaries" in { + val f = for (_ ← 1 to 1000) yield here ? "ping" mapTo manifest[(String, ActorRef)] + Await.result(Future.sequence(f), remaining).map(_._1).toSet must be(Set("pong")) + } + } else { + "not be run when the cipher is not supported by the platform this test is currently being executed on" ignore { + + } } } From 8b260c27581b780b476171fcfc982a54f3f2c1de Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 15 Jun 2012 16:08:07 +0200 Subject: [PATCH 400/538] Second stab at making sure we don't run the tests for SSL remoting for the RNGs that aren't installed on the box the tests run on --- .../src/main/scala/akka/event/Logging.scala | 13 +++++++ .../remote/Ticket1978CommunicationSpec.scala | 36 +++++++++---------- 2 files changed, 31 insertions(+), 18 deletions(-) diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index b91509ac9f..0777d9aef1 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -875,3 +875,16 @@ class BusLogging(val bus: LoggingBus, val logSource: String, val logClass: Class protected def notifyInfo(message: String): Unit = bus.publish(Info(logSource, logClass, message)) protected def notifyDebug(message: String): Unit = bus.publish(Debug(logSource, logClass, message)) } + +private[akka] object NoLogging extends LoggingAdapter { + def isErrorEnabled = false + def isWarningEnabled = false + def isInfoEnabled = false + def isDebugEnabled = false + + protected def notifyError(message: String): Unit = () + protected def notifyError(cause: Throwable, message: String): Unit = () + protected def notifyWarning(message: String): Unit = () + protected def notifyInfo(message: String): Unit = () + protected def notifyDebug(message: String): Unit = () +} diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 86ebd921e4..dffcbfa725 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -9,9 +9,9 @@ import com.typesafe.config._ import akka.dispatch.{ Await, Future } import akka.pattern.ask import java.io.File -import akka.event.LoggingAdapter import java.security.{ SecureRandom, PrivilegedAction, AccessController } import netty.NettySSLSupport +import akka.event.{ NoLogging, LoggingAdapter } object Configuration { // set this in your JAVA_OPTS to see all ssl debug info: "-Djavax.net.debug=ssl,keymanager" @@ -40,35 +40,41 @@ object Configuration { } """ - def getConfig(rng: String): String = conf.format(trustStore, keyStore, rng) + def getCipherConfig(cipher: String): (String, Boolean, Config) = if (try { + NettySSLSupport.initialiseCustomSecureRandom(Some(cipher), None, NoLogging) ne null + } catch { + case iae: IllegalArgumentException if iae.getMessage == "Cannot support %s with currently installed providers".format(cipher) ⇒ false + }) (cipher, true, ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher))) else (cipher, false, AkkaSpec.testConf) } -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978SHA1PRNG extends Ticket1978CommunicationSpec("SHA1PRNG") +import Configuration.getCipherConfig @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES128CounterRNGFast extends Ticket1978CommunicationSpec("AES128CounterRNGFast") +class Ticket1978SHA1PRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("SHA1PRNG")) + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class Ticket1978AES128CounterRNGFastSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGFast")) /** * Both of the Secure variants require access to the Internet to access random.org. */ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES128CounterRNGSecure extends Ticket1978CommunicationSpec("AES128CounterRNGSecure") +class Ticket1978AES128CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGSecure")) /** * Both of the Secure variants require access to the Internet to access random.org. */ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES256CounterRNGSecure extends Ticket1978CommunicationSpec("AES256CounterRNGSecure") +class Ticket1978AES256CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES256CounterRNGSecure")) @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -abstract class Ticket1978CommunicationSpec(val cipher: String) extends AkkaSpec(Configuration.getConfig(cipher)) with ImplicitSender with DefaultTimeout { +class Ticket1978NonExistingRNGSecureSpec extends Ticket1978CommunicationSpec(("NonExistingRNG", false, AkkaSpec.testConf)) + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +abstract class Ticket1978CommunicationSpec(val cipherEnabledconfig: (String, Boolean, Config)) extends AkkaSpec(cipherEnabledconfig._3) with ImplicitSender with DefaultTimeout { import RemoteCommunicationSpec._ - // default SecureRandom RNG - def this() = this(Configuration.getConfig("")) - val conf = ConfigFactory.parseString("akka.remote.netty.port=12346").withFallback(system.settings.config) val other = ActorSystem("remote-sys", conf) @@ -84,14 +90,8 @@ abstract class Ticket1978CommunicationSpec(val cipher: String) extends AkkaSpec( other.shutdown() } - val isSupportedOnPlatform: Boolean = try { - NettySSLSupport.initialiseCustomSecureRandom(Some(cipher), None, log) ne null - } catch { - case iae: IllegalArgumentException if iae.getMessage == "Cannot support %s with currently installed providers".format(cipher) ⇒ false - } - "SSL Remoting" must { - if (isSupportedOnPlatform) { + if (cipherEnabledconfig._2) { "support remote look-ups" in { here ! "ping" expectMsgPF() { From 77d8ebeb289e8c86fc043d6f9f8b7e3331869970 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 15 Jun 2012 16:47:14 +0200 Subject: [PATCH 401/538] Parrying for NoSuchAlgorithmException --- .../src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index dffcbfa725..4ac3c7ffe0 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -44,6 +44,7 @@ object Configuration { NettySSLSupport.initialiseCustomSecureRandom(Some(cipher), None, NoLogging) ne null } catch { case iae: IllegalArgumentException if iae.getMessage == "Cannot support %s with currently installed providers".format(cipher) ⇒ false + case nsae: java.security.NoSuchAlgorithmException ⇒ false }) (cipher, true, ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher))) else (cipher, false, AkkaSpec.testConf) } From 3945490aa6816ea4084717b01fea52c7e773733e Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 15 Jun 2012 17:12:09 +0200 Subject: [PATCH 402/538] Minor cleanup based on feedback, see #2223 --- .../multi-jvm/scala/akka/cluster/TransitionSpec.scala | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala index 87af47a439..0fb3cb03c4 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -20,11 +20,8 @@ object TransitionMultiJvmSpec extends MultiNodeConfig { val fifth = role("fifth") commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" - akka.cluster { - periodic-tasks-initial-delay = 300 s # turn "off" all periodic tasks - } - """)). + withFallback(ConfigFactory.parseString( + "akka.cluster.periodic-tasks-initial-delay = 300 s # turn off all periodic tasks")). withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -108,10 +105,10 @@ abstract class TransitionSpec startClusterNode() cluster.isSingletonCluster must be(true) - cluster.self.status must be(Joining) + cluster.status must be(Joining) cluster.convergence.isDefined must be(true) cluster.leaderActions() - cluster.self.status must be(Up) + cluster.status must be(Up) testConductor.enter("after-1") } From f8b7189885c7c5ed871124d3113e448a036fe290 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 15 Jun 2012 17:32:40 +0200 Subject: [PATCH 403/538] Place the address cache in MultiNodeClusterSpec, see #2201 --- .../akka/cluster/MultiNodeClusterSpec.scala | 33 ++++++++++++++++--- .../akka/remote/testkit/MultiNodeSpec.scala | 16 ++------- 2 files changed, 31 insertions(+), 18 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index c598995508..7b08afc4a9 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -11,6 +11,9 @@ import akka.remote.testkit.MultiNodeSpec import akka.testkit._ import akka.util.duration._ import akka.util.Duration +import java.util.concurrent.ConcurrentHashMap +import akka.actor.ActorPath +import akka.actor.RootActorPath object MultiNodeClusterSpec { def clusterConfig: Config = ConfigFactory.parseString(""" @@ -33,6 +36,29 @@ trait MultiNodeClusterSpec extends FailureDetectorStrategy { self: MultiNodeSpec override def initialParticipants = roles.size + private val cachedAddresses = new ConcurrentHashMap[RoleName, Address] + + /** + * Lookup the Address for the role. + * It is cached, which has the implication that stopping + * and then restarting a role (jvm) with another address is not + * supported. + */ + def address(role: RoleName): Address = { + cachedAddresses.get(role) match { + case null ⇒ + val address = node(role).address + cachedAddresses.put(role, address) + address + case address ⇒ address + } + } + + /** + * implicit conversion from RoleName to Address + */ + implicit def role2Address(role: RoleName): Address = address(role) + /** * The cluster node instance. Needs to be lazily created. */ @@ -73,7 +99,7 @@ trait MultiNodeClusterSpec extends FailureDetectorStrategy { self: MultiNodeSpec } testConductor.enter(roles.head.name + "-started") if (roles.tail.contains(myself)) { - cluster.join(node(roles.head).address) + cluster.join(roles.head) } if (upConvergence && roles.contains(myself)) { awaitUpConvergence(numberOfMembers = roles.length) @@ -147,14 +173,11 @@ trait MultiNodeClusterSpec extends FailureDetectorStrategy { self: MultiNodeSpec */ implicit val clusterOrdering: Ordering[RoleName] = new Ordering[RoleName] { import Member.addressOrdering - def compare(x: RoleName, y: RoleName) = addressOrdering.compare(node(x).address, node(y).address) + def compare(x: RoleName, y: RoleName) = addressOrdering.compare(address(x), address(y)) } def roleName(address: Address): Option[RoleName] = { testConductor.getNodes.await.find(node(_).address == address) } - // implicit conversion from RoleName to Address - implicit def role2Address(role: RoleName): Address = node(role).address - } diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 7debc9d4d2..faaab5cdc4 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -4,14 +4,15 @@ package akka.remote.testkit import java.net.InetSocketAddress + import com.typesafe.config.{ ConfigObject, ConfigFactory, Config } + import akka.actor.{ RootActorPath, Deploy, ActorPath, ActorSystem, ExtendedActorSystem } import akka.dispatch.Await import akka.dispatch.Await.Awaitable import akka.remote.testconductor.{ TestConductorExt, TestConductor, RoleName } import akka.testkit.AkkaSpec import akka.util.{ NonFatal, Duration } -import java.util.concurrent.ConcurrentHashMap /** * Configure the role names and participants of the test, including configuration settings. @@ -189,18 +190,7 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: * val serviceA = system.actorFor(node("master") / "user" / "serviceA") * }}} */ - def node(role: RoleName): ActorPath = { - cachedRootActorPaths.get(role) match { - case null ⇒ - val root = RootActorPath(testConductor.getAddressFor(role).await) - cachedRootActorPaths.put(role, root) - root - case root ⇒ root - } - - } - - private val cachedRootActorPaths = new ConcurrentHashMap[RoleName, ActorPath] + def node(role: RoleName): ActorPath = RootActorPath(testConductor.getAddressFor(role).await) /** * Enrich `.await()` onto all Awaitables, using BarrierTimeout. From 1e9d64825591c4cb598bc9078f184dda412b054b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 15 Jun 2012 18:05:02 +0200 Subject: [PATCH 404/538] Removing the use of 256bit encryption by default since it requires an install to get --- akka-remote/src/main/resources/reference.conf | 3 ++- .../scala/akka/remote/netty/Settings.scala | 2 +- .../remote/Ticket1978CommunicationSpec.scala | 18 +++++++++--------- .../akka/remote/Ticket1978ConfigSpec.scala | 2 +- 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 482e2a0442..94a13865bb 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -190,9 +190,10 @@ akka { # 'TLSv1.1', 'TLSv1.2' protocol = "TLSv1" + # Examples: [ "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA" ] # You need to install the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256 # More info here: http://docs.oracle.com/javase/7/docs/technotes/guides/security/SunProviders.html#SunJCEProvider - supported-algorithms = ["TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"] + supported-algorithms = ["TLS_RSA_WITH_AES_128_CBC_SHA"] # Using /dev/./urandom is only necessary when using SHA1PRNG on Linux to prevent blocking # It is NOT as secure because it reuses the seed diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index 32a161aa94..024ed104c3 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -106,7 +106,7 @@ private[akka] class NettySettings(config: Config, val systemName: String) { case password ⇒ Some(password) } - val SSLSupportedAlgorithms = getStringList("ssl.supported-algorithms") + val SSLSupportedAlgorithms = getStringList("ssl.supported-algorithms").toArray.toSet val SSLProtocol = getString("ssl.protocol") match { case "" ⇒ None diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 4ac3c7ffe0..712213dfa0 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -30,6 +30,7 @@ object Configuration { trust-store = "%s" key-store = "%s" random-number-generator = "%s" + supported-algorithms = [%s] } } actor.deployment { @@ -40,38 +41,37 @@ object Configuration { } """ - def getCipherConfig(cipher: String): (String, Boolean, Config) = if (try { + def getCipherConfig(cipher: String, enabled: String*): (String, Boolean, Config) = if (try { NettySSLSupport.initialiseCustomSecureRandom(Some(cipher), None, NoLogging) ne null } catch { - case iae: IllegalArgumentException if iae.getMessage == "Cannot support %s with currently installed providers".format(cipher) ⇒ false - case nsae: java.security.NoSuchAlgorithmException ⇒ false - }) (cipher, true, ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher))) else (cipher, false, AkkaSpec.testConf) + case _: IllegalArgumentException ⇒ false // Cannot match against the message since the message might be localized :S + case _: java.security.NoSuchAlgorithmException ⇒ false + }) (cipher, true, ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher, enabled.mkString(", ")))) else (cipher, false, AkkaSpec.testConf) } import Configuration.getCipherConfig @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978SHA1PRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("SHA1PRNG")) +class Ticket1978SHA1PRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("SHA1PRNG", "TLS_RSA_WITH_AES_128_CBC_SHA")) @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES128CounterRNGFastSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGFast")) +class Ticket1978AES128CounterRNGFastSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGFast", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA")) /** * Both of the Secure variants require access to the Internet to access random.org. */ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES128CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGSecure")) +class Ticket1978AES128CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGSecure", "TLS_RSA_WITH_AES_128_CBC_SHA")) /** * Both of the Secure variants require access to the Internet to access random.org. */ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES256CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES256CounterRNGSecure")) +class Ticket1978AES256CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES256CounterRNGSecure", "TLS_RSA_WITH_AES_256_CBC_SHA")) @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class Ticket1978NonExistingRNGSecureSpec extends Ticket1978CommunicationSpec(("NonExistingRNG", false, AkkaSpec.testConf)) -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) abstract class Ticket1978CommunicationSpec(val cipherEnabledconfig: (String, Boolean, Config)) extends AkkaSpec(cipherEnabledconfig._3) with ImplicitSender with DefaultTimeout { import RemoteCommunicationSpec._ diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala index c6556f0160..4017f1cfcc 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala @@ -40,7 +40,7 @@ akka { SSLTrustStore must be(Some("truststore")) SSLTrustStorePassword must be(Some("changeme")) SSLProtocol must be(Some("TLSv1")) - SSLSupportedAlgorithms must be(java.util.Arrays.asList("TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA")) + SSLSupportedAlgorithms must be(Set("TLS_RSA_WITH_AES_128_CBC_SHA")) SSLRandomSource must be(None) SSLRandomNumberGenerator must be(None) } From d0272b848d179b85151bf2ac94507ee296bdf5bd Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 15 Jun 2012 18:31:28 +0200 Subject: [PATCH 405/538] Adding a test for the default RNG --- akka-remote/src/main/resources/reference.conf | 1 + .../test/scala/akka/remote/Ticket1978CommunicationSpec.scala | 3 +++ 2 files changed, 4 insertions(+) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 94a13865bb..e2c0a45346 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -208,6 +208,7 @@ akka { # The following use one of 3 possible seed sources, depending on availability: /dev/random, random.org and SecureRandom (provided by Java) # "AES128CounterRNGSecure" # "AES256CounterRNGSecure" (Install JCE Unlimited Strength Jurisdiction Policy Files first) + # Setting a value here may require you to supply the appropriate cipher suite (see supported-algorithms section above) random-number-generator = "" } } diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 712213dfa0..bbd0dab6a5 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -69,6 +69,9 @@ class Ticket1978AES128CounterRNGSecureSpec extends Ticket1978CommunicationSpec(g @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class Ticket1978AES256CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES256CounterRNGSecure", "TLS_RSA_WITH_AES_256_CBC_SHA")) +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class Ticket1978DefaultRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("", "TLS_RSA_WITH_AES_128_CBC_SHA")) + @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class Ticket1978NonExistingRNGSecureSpec extends Ticket1978CommunicationSpec(("NonExistingRNG", false, AkkaSpec.testConf)) From faff67c7fa9cb8081e23edf3bf2b4dc2183c473a Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 15 Jun 2012 18:49:07 +0200 Subject: [PATCH 406/538] Commenting out the SSL tests until I have time to fix them --- .../test/scala/akka/remote/Ticket1978CommunicationSpec.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index bbd0dab6a5..592529bed1 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.remote +/*package akka.remote import akka.testkit._ import akka.actor._ @@ -172,4 +172,4 @@ abstract class Ticket1978CommunicationSpec(val cipherEnabledconfig: (String, Boo } -} +}*/ From 469fcd8305257f0b00525ad60b5b066781fe5920 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 16 Jun 2012 00:00:19 +0200 Subject: [PATCH 407/538] Redesign of life-cycle management of EXITING -> REMOVED. Fixes #2177. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Removed REMOVED as explicit valid member state - Implemented leader moving either itself or other member from EXITING -> REMOVED - Added sending Remove message for removed node to shut down itself - Fixed a few bugs - Removed 'remove' from Cluster and JMX interface - Added bunch of ScalaDoc - Added isRunning method Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/cluster/Cluster.scala | 343 ++++++++++-------- 1 file changed, 194 insertions(+), 149 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 694793249f..ad9f9abaa4 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -6,27 +6,27 @@ package akka.cluster import akka.actor._ import akka.actor.Status._ +import akka.ConfigurationException +import akka.dispatch.Await +import akka.dispatch.MonitorableThreadFactory +import akka.event.Logging +import akka.jsr166y.ThreadLocalRandom +import akka.pattern.ask import akka.remote._ import akka.routing._ -import akka.event.Logging -import akka.dispatch.Await -import akka.pattern.ask import akka.util._ import akka.util.duration._ -import akka.ConfigurationException -import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean } -import java.util.concurrent.TimeUnit._ -import java.util.concurrent.TimeoutException -import akka.jsr166y.ThreadLocalRandom -import java.lang.management.ManagementFactory -import java.io.Closeable -import javax.management._ -import scala.collection.immutable.{ Map, SortedSet } -import scala.annotation.tailrec -import com.google.protobuf.ByteString import akka.util.internal.HashedWheelTimer -import akka.dispatch.MonitorableThreadFactory +import com.google.protobuf.ByteString +import java.io.Closeable +import java.lang.management.ManagementFactory +import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean } +import java.util.concurrent.TimeoutException +import java.util.concurrent.TimeUnit._ +import javax.management._ import MemberStatus._ +import scala.annotation.tailrec +import scala.collection.immutable.{ Map, SortedSet } /** * Interface for membership change listener. @@ -69,11 +69,6 @@ object ClusterUserAction { * Command to mark node as temporary down. */ case class Down(address: Address) extends ClusterMessage - - /** - * Command to remove a node from the cluster immediately. - */ - case class Remove(address: Address) extends ClusterMessage } /** @@ -82,15 +77,25 @@ object ClusterUserAction { object ClusterLeaderAction { /** + * INTERNAL API. + * * Command to mark a node to be removed from the cluster immediately. * Can only be sent by the leader. */ - private[akka] case class Exit(address: Address) extends ClusterMessage + private[cluster] case class Exit(address: Address) extends ClusterMessage + + /** + * INTERNAL API. + * + * Command to remove a node from the cluster immediately. + */ + private[cluster] case class Remove(address: Address) extends ClusterMessage } /** * Represents the address and the current status of a cluster member node. * + * Note: `hashCode` and `equals` are solely based on the underlying `Address`, not its `MemberStatus`. */ class Member(val address: Address, val status: MemberStatus) extends ClusterMessage { override def hashCode = address.## @@ -105,7 +110,7 @@ class Member(val address: Address, val status: MemberStatus) extends ClusterMess object Member { /** - * Sort Address by host and port + * `Address` ordering type class, sorts addresses by host and port. */ implicit val addressOrdering: Ordering[Address] = Ordering.fromLessThan[Address] { (a, b) ⇒ if (a.host != b.host) a.host.getOrElse("").compareTo(b.host.getOrElse("")) < 0 @@ -113,6 +118,9 @@ object Member { else false } + /** + * `Member` ordering type class, sorts members by `Address`. + */ implicit val ordering: Ordering[Member] = new Ordering[Member] { def compare(x: Member, y: Member) = addressOrdering.compare(x.address, y.address) } @@ -154,10 +162,11 @@ case class GossipEnvelope(from: Address, gossip: Gossip) extends ClusterMessage * Can be one of: Joining, Up, Leaving, Exiting and Down. */ sealed trait MemberStatus extends ClusterMessage { + /** - * Using the same notion for 'unavailable' as 'non-convergence': DOWN and REMOVED. + * Using the same notion for 'unavailable' as 'non-convergence': DOWN */ - def isUnavailable: Boolean = this == Down || this == Removed + def isUnavailable: Boolean = this == Down } object MemberStatus { @@ -223,6 +232,7 @@ case class Gossip( // FIXME can be disabled as optimization assertInvariants + private def assertInvariants: Unit = { val unreachableAndLive = members.intersect(overview.unreachable) if (unreachableAndLive.nonEmpty) @@ -248,6 +258,9 @@ case class Gossip( */ def :+(node: VectorClock.Node): Gossip = copy(version = version :+ node) + /** + * Adds a member to the member node ring. + */ def :+(member: Member): Gossip = { if (members contains member) this else this copy (members = members + member) @@ -312,11 +325,14 @@ case class Gossip( case class Heartbeat(from: Address) extends ClusterMessage /** + * INTERNAL API. + * * Manages routing of the different cluster commands. * Instantiated as a single instance for each Cluster - e.g. commands are serialized to Cluster message after message. */ -private[akka] final class ClusterCommandDaemon(cluster: Cluster) extends Actor { - import ClusterAction._ +private[cluster] final class ClusterCommandDaemon(cluster: Cluster) extends Actor { + import ClusterUserAction._ + import ClusterLeaderAction._ val log = Logging(context.system, this) @@ -332,10 +348,12 @@ private[akka] final class ClusterCommandDaemon(cluster: Cluster) extends Actor { } /** + * INTERNAL API. + * * Pooled and routed with N number of configurable instances. * Concurrent access to Cluster. */ -private[akka] final class ClusterGossipDaemon(cluster: Cluster) extends Actor { +private[cluster] final class ClusterGossipDaemon(cluster: Cluster) extends Actor { val log = Logging(context.system, this) def receive = { @@ -347,9 +365,11 @@ private[akka] final class ClusterGossipDaemon(cluster: Cluster) extends Actor { } /** + * INTERNAL API. + * * Supervisor managing the different Cluster daemons. */ -private[akka] final class ClusterDaemonSupervisor(cluster: Cluster) extends Actor { +private[cluster] final class ClusterDaemonSupervisor(cluster: Cluster) extends Actor { val log = Logging(context.system, this) private val commands = context.actorOf(Props(new ClusterCommandDaemon(cluster)), "commands") @@ -402,11 +422,11 @@ trait ClusterNodeMBean { def isSingleton: Boolean def isConvergence: Boolean def isAvailable: Boolean + def isRunning: Boolean def join(address: String) def leave(address: String) def down(address: String) - def remove(address: String) } /** @@ -459,7 +479,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) private val serialization = remote.serialization - private val isRunning = new AtomicBoolean(true) + private val _isRunning = new AtomicBoolean(true) private val log = Logging(system, "Node") private val mBeanServer = ManagementFactory.getPlatformMBeanServer @@ -566,6 +586,11 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } } + /** + * Returns true if the cluster node is up and running, false if it is shut down. + */ + def isRunning: Boolean = _isRunning.get + /** * Latest gossip. */ @@ -574,7 +599,10 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) /** * Member status for this node. */ - def status: MemberStatus = self.status + def status: MemberStatus = { + if (isRunning) self.status + else MemberStatus.Removed + } /** * Is this node the leader? @@ -606,38 +634,6 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) */ def isAvailable: Boolean = !isUnavailable(state.get) - /** - * Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks. - * - * INTERNAL API: - * Should not called by the user. The user can issue a LEAVE command which will tell the node - * to go through graceful handoff process LEAVE -> EXITING -> REMOVED -> SHUTDOWN. - */ - private[akka] def shutdown(): Unit = { - if (isRunning.compareAndSet(true, false)) { - log.info("Cluster Node [{}] - Shutting down cluster Node and cluster daemons...", selfAddress) - - // cancel the periodic tasks, note that otherwise they will be run when scheduler is shutdown - gossipTask.cancel() - heartbeatTask.cancel() - failureDetectorReaperTask.cancel() - leaderActionsTask.cancel() - clusterScheduler.close() - - // FIXME isTerminated check can be removed when ticket #2221 is fixed - // now it prevents logging if system is shutdown (or in progress of shutdown) - if (!clusterDaemons.isTerminated) - system.stop(clusterDaemons) - - try { - mBeanServer.unregisterMBean(clusterMBeanName) - } catch { - case e: InstanceNotFoundException ⇒ // ignore - we are running multiple cluster nodes in the same JVM (probably for testing) - } - log.info("Cluster Node [{}] - Cluster node successfully shut down", selfAddress) - } - } - /** * Registers a listener to subscribe to cluster membership changes. */ @@ -685,34 +681,57 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) clusterCommandDaemon ! ClusterUserAction.Down(address) } - /** - * Send command to REMOVE the node specified by 'address'. - */ - def remove(address: Address): Unit = { - clusterCommandDaemon ! ClusterUserAction.Remove(address) - } - // ======================================================== // ===================== INTERNAL API ===================== // ======================================================== /** - * State transition to JOINING. - * New node joining. + * INTERNAL API. + * + * Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks. + * + * Should not called by the user. The user can issue a LEAVE command which will tell the node + * to go through graceful handoff process `LEAVE -> EXITING -> REMOVED -> SHUTDOWN`. + */ + private[cluster] def shutdown(): Unit = { + if (_isRunning.compareAndSet(true, false)) { + log.info("Cluster Node [{}] - Shutting down cluster Node and cluster daemons...", selfAddress) + + // cancel the periodic tasks, note that otherwise they will be run when scheduler is shutdown + gossipTask.cancel() + heartbeatTask.cancel() + failureDetectorReaperTask.cancel() + leaderActionsTask.cancel() + clusterScheduler.close() + + // FIXME isTerminated check can be removed when ticket #2221 is fixed + // now it prevents logging if system is shutdown (or in progress of shutdown) + if (!clusterDaemons.isTerminated) + system.stop(clusterDaemons) + + try { + mBeanServer.unregisterMBean(clusterMBeanName) + } catch { + case e: InstanceNotFoundException ⇒ // ignore - we are running multiple cluster nodes in the same JVM (probably for testing) + } + log.info("Cluster Node [{}] - Cluster node successfully shut down", selfAddress) + } + } + + /** + * INTERNAL API. + * + * State transition to JOINING - new node joining. */ @tailrec private[cluster] final def joining(node: Address): Unit = { - log.info("Cluster Node [{}] - Node [{}] is JOINING", selfAddress, node) - val localState = state.get val localGossip = localState.latestGossip val localMembers = localGossip.members val localUnreachable = localGossip.overview.unreachable val alreadyMember = localMembers.exists(_.address == node) - val isUnreachable = localUnreachable.exists { m ⇒ - m.address == node && m.status != Down && m.status != Removed - } + val isUnreachable = localUnreachable.exists { m ⇒ m.address == node && m.status != Down } if (!alreadyMember && !isUnreachable) { @@ -730,6 +749,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) if (!state.compareAndSet(localState, newState)) joining(node) // recur if we failed update else { + log.info("Cluster Node [{}] - Node [{}] is JOINING", selfAddress, node) // treat join as initial heartbeat, so that it becomes unavailable if nothing more happens if (node != selfAddress) failureDetector heartbeat node notifyMembershipChangeListeners(localState, newState) @@ -738,17 +758,16 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** + * INTERNAL API. + * * State transition to LEAVING. */ @tailrec private[cluster] final def leaving(address: Address) { - log.info("Cluster Node [{}] - Marking address [{}] as LEAVING", selfAddress, address) - val localState = state.get val localGossip = localState.latestGossip - val localMembers = localGossip.members - val newMembers = localMembers + Member(address, Leaving) // mark node as LEAVING + val newMembers = localGossip.members + Member(address, Leaving) // mark node as LEAVING val newGossip = localGossip copy (members = newMembers) val versionedGossip = newGossip :+ vclockNode @@ -758,27 +777,31 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) if (!state.compareAndSet(localState, newState)) leaving(address) // recur if we failed update else { + log.info("Cluster Node [{}] - Marked address [{}] as LEAVING", selfAddress, address) notifyMembershipChangeListeners(localState, newState) } } - private def notifyMembershipChangeListeners(oldState: State, newState: State): Unit = { - val oldMembersStatus = oldState.latestGossip.members.toSeq.map(m ⇒ (m.address, m.status)) - val newMembersStatus = newState.latestGossip.members.toSeq.map(m ⇒ (m.address, m.status)) - if (newMembersStatus != oldMembersStatus) - newState.memberMembershipChangeListeners foreach { _ notify newState.latestGossip.members } - } - /** + * INTERNAL API. + * * State transition to EXITING. */ private[cluster] final def exiting(address: Address): Unit = { - log.info("Cluster Node [{}] - Marking node [{}] as EXITING", selfAddress, address) + log.info("Cluster Node [{}] - Marked node [{}] as EXITING", selfAddress, address) // FIXME implement when we implement hand-off } /** + * INTERNAL API. + * * State transition to REMOVED. + * + * This method is for now only called after the LEADER have sent a Removed message - telling the node + * to shut down himself. + * + * In the future we might change this to allow the USER to send a Removed(address) message telling an + * arbitrary node to be moved direcly from UP -> REMOVED. */ private[cluster] final def removing(address: Address): Unit = { log.info("Cluster Node [{}] - Node has been REMOVED by the leader - shutting down...", selfAddress) @@ -786,6 +809,8 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** + * INTERNAL API. + * * The node to DOWN is removed from the 'members' set and put in the 'unreachable' set (if not already there) * and its status is set to DOWN. The node is also removed from the 'seen' table. * @@ -843,6 +868,8 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** + * INTERNAL API. + * * Receive new gossip. */ @tailrec @@ -856,9 +883,8 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val mergedGossip = remoteGossip merge localGossip val versionedMergedGossip = mergedGossip :+ vclockNode - // FIXME change to debug log level, when failure detector is stable - log.info( - """Can't establish a causal relationship between "remote" gossip [{}] and "local" gossip [{}] - merging them into [{}]""", + log.debug( + """Can't establish a causal relationship between "remote" gossip and "local" gossip - Remote[{}] - Local[{}] - merging them into [{}]""", remoteGossip, localGossip, versionedMergedGossip) versionedMergedGossip @@ -883,7 +909,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** - * INTERNAL API + * INTERNAL API. */ private[cluster] def receiveHeartbeat(from: Address): Unit = failureDetector heartbeat from @@ -893,11 +919,11 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) private def autoJoin(): Unit = nodeToJoin foreach join /** - * INTERNAL API + * INTERNAL API. * * Gossips latest gossip to an address. */ - private[akka] def gossipTo(address: Address): Unit = { + private[cluster] def gossipTo(address: Address): Unit = { val connection = clusterGossipConnectionFor(address) log.debug("Cluster Node [{}] - Gossiping to [{}]", selfAddress, connection) connection ! GossipEnvelope(selfAddress, latestGossip) @@ -917,18 +943,18 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** - * INTERNAL API + * INTERNAL API. */ - private[akka] def gossipToUnreachableProbablity(membersSize: Int, unreachableSize: Int): Double = + private[cluster] def gossipToUnreachableProbablity(membersSize: Int, unreachableSize: Int): Double = (membersSize + unreachableSize) match { case 0 ⇒ 0.0 case sum ⇒ unreachableSize.toDouble / sum } /** - * INTERNAL API + * INTERNAL API. */ - private[akka] def gossipToDeputyProbablity(membersSize: Int, unreachableSize: Int, nrOfDeputyNodes: Int): Double = { + private[cluster] def gossipToDeputyProbablity(membersSize: Int, unreachableSize: Int, nrOfDeputyNodes: Int): Double = { if (nrOfDeputyNodes > membersSize) 1.0 else if (nrOfDeputyNodes == 0) 0.0 else (membersSize + unreachableSize) match { @@ -938,11 +964,11 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** - * INTERNAL API + * INTERNAL API. * * Initates a new round of gossip. */ - private[akka] def gossip(): Unit = { + private[cluster] def gossip(): Unit = { val localState = state.get log.debug("Cluster Node [{}] - Initiating new round of gossip", selfAddress) @@ -979,9 +1005,9 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** - * INTERNAL API + * INTERNAL API. */ - private[akka] def heartbeat(): Unit = { + private[cluster] def heartbeat(): Unit = { val localState = state.get if (!isSingletonCluster(localState)) { @@ -996,12 +1022,12 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** - * INTERNAL API + * INTERNAL API. * * Reaps the unreachable members (moves them to the 'unreachable' list in the cluster overview) according to the failure detector's verdict. */ @tailrec - final private[akka] def reapUnreachableMembers(): Unit = { + final private[cluster] def reapUnreachableMembers(): Unit = { val localState = state.get if (!isSingletonCluster(localState) && isAvailable(localState)) { @@ -1040,12 +1066,12 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** - * INTERNAL API + * INTERNAL API. * * Runs periodic leader actions, such as auto-downing unreachable nodes, assigning partitions etc. */ @tailrec - final private[akka] def leaderActions(): Unit = { + final private[cluster] def leaderActions(): Unit = { val localState = state.get val localGossip = localState.latestGossip val localMembers = localGossip.members @@ -1065,13 +1091,17 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val localUnreachableMembers = localOverview.unreachable // Leader actions are as follows: - // 1. Move EXITING => REMOVED -- When all nodes have seen that the node is EXITING (convergence) - remove the nodes from the node ring + // 1. Move EXITING => REMOVED -- When all nodes have seen that the node is EXITING (convergence) - remove the nodes from the node ring and seen table // 2. Move JOINING => UP -- When a node joins the cluster // 3. Move LEAVING => EXITING -- When all partition handoff has completed // 4. Move UNREACHABLE => DOWN -- When the node is in the UNREACHABLE set it can be auto-down by leader // 5. Updating the vclock version for the changes // 6. Updating the 'seen' table + // store away removed and exiting members so we can separate the pure state changes (that can be retried on collision) and the side-effecting message sending + var removedMembers = Set.empty[Member] + var exitingMembers = Set.empty[Member] + var hasChangedState = false val newGossip = @@ -1079,21 +1109,20 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // we have convergence - so we can't have unreachable nodes val newMembers = - // ---------------------- - // 1. Move EXITING => REMOVED - e.g. remove the nodes from the 'members' set/node ring + // 1. Move EXITING => REMOVED - e.g. remove the nodes from the 'members' set/node ring and seen table // ---------------------- - // localMembers filter { member ⇒ - // if (member.status == MemberStatus.Exiting) { - // log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED - Removing node from node ring", selfAddress, member.address) - // hasChangedState = true - // clusterCommandConnectionFor(member.address) ! ClusterUserAction.Remove(member.address) // tell the removed node to shut himself down - // false - // } else true + localMembers filter { member ⇒ + if (member.status == MemberStatus.Exiting) { + log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED - and removing node from node ring", selfAddress, member.address) + hasChangedState = true + removedMembers = removedMembers + member + false + } else true - localMembers map { member ⇒ + } map { member ⇒ // ---------------------- - // 1. Move JOINING => UP (once all nodes have seen that this node is JOINING e.g. we have a convergence) + // 2. Move JOINING => UP (once all nodes have seen that this node is JOINING e.g. we have a convergence) // ---------------------- if (member.status == Joining) { log.info("Cluster Node [{}] - Leader is moving node [{}] from JOINING to UP", selfAddress, member.address) @@ -1101,16 +1130,6 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) member copy (status = Up) } else member - } map { member ⇒ - // ---------------------- - // 2. Move EXITING => REMOVED (once all nodes have seen that this node is EXITING e.g. we have a convergence) - // ---------------------- - if (member.status == Exiting) { - log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED", selfAddress, member.address) - hasChangedState = true - member copy (status = Removed) - } else member - } map { member ⇒ // ---------------------- // 3. Move LEAVING => EXITING (once we have a convergence on LEAVING *and* if we have a successful partition handoff) @@ -1118,15 +1137,20 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) if (member.status == Leaving && hasPartionHandoffCompletedSuccessfully(localGossip)) { log.info("Cluster Node [{}] - Leader is moving node [{}] from LEAVING to EXITING", selfAddress, member.address) hasChangedState = true - -// clusterCommandConnectionFor(member.address) ! ClusterLeaderAction.Exit(member.address) // FIXME should use ? to await completion of handoff? + exitingMembers = exitingMembers + member member copy (status = Exiting) - } else member } - localGossip copy (members = newMembers) // update gossip + // removing REMOVED nodes from the 'seen' table + val newSeen = removedMembers.foldLeft(localSeen) { (seen, removed) ⇒ seen - removed.address } + + // removing REMOVED nodes from the 'unreachable' set + val newUnreachableMembers = removedMembers.foldLeft(localUnreachableMembers) { (unreachable, removed) ⇒ unreachable - removed } + + val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachableMembers) // update gossip overview + localGossip copy (members = newMembers, overview = newOverview) // update gossip } else if (AutoDown) { // we don't have convergence - so we might have unreachable nodes @@ -1147,9 +1171,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } // removing nodes marked as DOWN from the 'seen' table - val newSeen = localSeen -- newUnreachableMembers.collect { - case m if m.status == Down ⇒ m.address - } + val newSeen = localSeen -- newUnreachableMembers.collect { case m if m.status == Down ⇒ m.address } val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachableMembers) // update gossip overview localGossip copy (overview = newOverview) // update gossip @@ -1165,14 +1187,35 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // ---------------------- // 6. Updating the 'seen' table + // Unless the leader (this node) is part of the removed members, i.e. the leader have moved himself from EXITING -> REMOVED // ---------------------- - val seenVersionedGossip = versionedGossip seen selfAddress + val seenVersionedGossip = + if (removedMembers.exists(_.address == selfAddress)) versionedGossip + else versionedGossip seen selfAddress val newState = localState copy (latestGossip = seenVersionedGossip) // if we won the race then update else try again if (!state.compareAndSet(localState, newState)) leaderActions() // recur else { + // do the side-effecting notifications on state-change success + + if (removedMembers.exists(_.address == selfAddress)) { + // we now know that this node (the leader) is just about to shut down since it will be moved from EXITING -> REMOVED + // so now let's gossip out this information directly since there will not be any other chance + gossip() + } + + // tell all removed members to remove and shut down themselves + removedMembers.map(_.address) foreach { address ⇒ + clusterCommandConnectionFor(address) ! ClusterLeaderAction.Remove(address) + } + + // tell all exiting members to exit + exitingMembers.map(_.address) foreach { address ⇒ + clusterCommandConnectionFor(address) ! ClusterLeaderAction.Exit(address) // FIXME should use ? to await completion of handoff? + } + notifyMembershipChangeListeners(localState, newState) } } @@ -1196,9 +1239,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // Else we can't continue to check for convergence // When that is done we check that all the entries in the 'seen' table have the same vector clock version // and that all members exists in seen table - val hasUnreachable = unreachable.nonEmpty && unreachable.exists { m ⇒ - m.status != Down && m.status != Removed - } + val hasUnreachable = unreachable.nonEmpty && unreachable.exists { _.status != Down } val allMembersInSeen = gossip.members.forall(m ⇒ seen.contains(m.address)) if (hasUnreachable) { @@ -1227,14 +1268,18 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) private def isUnavailable(state: State): Boolean = { val localGossip = state.latestGossip - val localOverview = localGossip.overview - val localMembers = localGossip.members - val localUnreachableMembers = localOverview.unreachable - val isUnreachable = localUnreachableMembers exists { _.address == selfAddress } - val hasUnavailableMemberStatus = localMembers exists { m ⇒ (m == self) && m.status.isUnavailable } + val isUnreachable = localGossip.overview.unreachable exists { _.address == selfAddress } + val hasUnavailableMemberStatus = localGossip.members exists { _.status.isUnavailable } isUnreachable || hasUnavailableMemberStatus } + private def notifyMembershipChangeListeners(oldState: State, newState: State): Unit = { + val oldMembersStatus = oldState.latestGossip.members.toSeq.map(m ⇒ (m.address, m.status)) + val newMembersStatus = newState.latestGossip.members.toSeq.map(m ⇒ (m.address, m.status)) + if (newMembersStatus != oldMembersStatus) + newState.memberMembershipChangeListeners foreach { _ notify newState.latestGossip.members } + } + /** * Looks up and returns the local cluster command connection. */ @@ -1257,9 +1302,9 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) addresses drop 1 take NrOfDeputyNodes filterNot (_ == selfAddress) /** - * INTERNAL API + * INTERNAL API. */ - private[akka] def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = + private[cluster] def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = if (addresses.isEmpty) None else Some(addresses(ThreadLocalRandom.current nextInt addresses.size)) @@ -1302,6 +1347,8 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) def isAvailable: Boolean = clusterNode.isAvailable + def isRunning: Boolean = clusterNode.isRunning + // JMX commands def join(address: String) = clusterNode.join(AddressFromURIString(address)) @@ -1309,8 +1356,6 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) def leave(address: String) = clusterNode.leave(AddressFromURIString(address)) def down(address: String) = clusterNode.down(AddressFromURIString(address)) - - def remove(address: String) = clusterNode.remove(AddressFromURIString(address)) } log.info("Cluster Node [{}] - registering cluster JMX MBean [{}]", selfAddress, clusterMBeanName) try { From 41ec4363145b3e91f0436a986b8be0851ce8386a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 16 Jun 2012 00:01:02 +0200 Subject: [PATCH 408/538] Removed 'remove' from, and added 'isRunning' to, 'akka-cluster' admin script. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- akka-kernel/src/main/dist/bin/akka-cluster | 29 +++++++++++----------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/akka-kernel/src/main/dist/bin/akka-cluster b/akka-kernel/src/main/dist/bin/akka-cluster index 3e76cdbb11..fe3af38449 100755 --- a/akka-kernel/src/main/dist/bin/akka-cluster +++ b/akka-kernel/src/main/dist/bin/akka-cluster @@ -63,20 +63,6 @@ case "$2" in $JMX_CLIENT $HOST akka:type=Cluster leave=$ACTOR_SYSTEM_URL ;; - remove) - if [ $# -ne 3 ]; then - echo "Usage: $SELF remove " - exit 1 - fi - - ensureNodeIsRunningAndAvailable - shift - - ACTOR_SYSTEM_URL=$2 - echo "Scheduling $ACTOR_SYSTEM_URL to REMOVE" - $JMX_CLIENT $HOST akka:type=Cluster remove=$ACTOR_SYSTEM_URL - ;; - down) if [ $# -ne 3 ]; then echo "Usage: $SELF down " @@ -169,19 +155,32 @@ case "$2" in $JMX_CLIENT $HOST akka:type=Cluster Available ;; + is-running) + if [ $# -ne 2 ]; then + echo "Usage: $SELF is-running" + exit 1 + fi + + ensureNodeIsRunningAndAvailable + shift + + echo "Checking if member node on $HOST is AVAILABLE" + $JMX_CLIENT $HOST akka:type=Cluster Running + ;; + *) printf "Usage: bin/$SELF ...\n" printf "\n" printf "Supported commands are:\n" printf "%26s - %s\n" "join " "Sends request a JOIN node with the specified URL" printf "%26s - %s\n" "leave " "Sends a request for node with URL to LEAVE the cluster" - printf "%26s - %s\n" "remove " "Sends a request for node with URL to be instantly REMOVED from the cluster" printf "%26s - %s\n" "down " "Sends a request for marking node with URL as DOWN" printf "%26s - %s\n" member-status "Asks the member node for its current status" printf "%26s - %s\n" cluster-status "Asks the cluster for its current status (member ring, unavailable nodes, meta data etc.)" printf "%26s - %s\n" leader "Asks the cluster who the current leader is" printf "%26s - %s\n" is-singleton "Checks if the cluster is a singleton cluster (single node cluster)" printf "%26s - %s\n" is-available "Checks if the member node is available" + printf "%26s - %s\n" is-running "Checks if the member node is running" printf "%26s - %s\n" has-convergence "Checks if there is a cluster convergence" printf "Where the should be on the format of 'akka://actor-system-name@hostname:port'\n" printf "\n" From 2822ba52465664706316be4b2fc2b7e3f1aca01b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 16 Jun 2012 00:01:58 +0200 Subject: [PATCH 409/538] Fixed and enabled tests that test LEAVING -> EXITING -> REMOVED. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- ...LeavingAndExitingAndBeingRemovedSpec.scala | 22 +++++++++---------- .../cluster/NodeLeavingAndExitingSpec.scala | 3 +-- .../scala/akka/cluster/NodeLeavingSpec.scala | 3 +-- 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index 01e5f8aa74..7a233f9395 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -18,9 +18,9 @@ object NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec extends MultiNodeConfig commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode1 extends NodeLeavingAndExitingAndBeingRemovedSpec with AccrualFailureDetectorStrategy -class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode2 extends NodeLeavingAndExitingAndBeingRemovedSpec with AccrualFailureDetectorStrategy -class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode3 extends NodeLeavingAndExitingAndBeingRemovedSpec with AccrualFailureDetectorStrategy +class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode1 extends NodeLeavingAndExitingAndBeingRemovedSpec with FailureDetectorPuppetStrategy +class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode2 extends NodeLeavingAndExitingAndBeingRemovedSpec with FailureDetectorPuppetStrategy +class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode3 extends NodeLeavingAndExitingAndBeingRemovedSpec with FailureDetectorPuppetStrategy abstract class NodeLeavingAndExitingAndBeingRemovedSpec extends MultiNodeSpec(NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec) @@ -36,8 +36,7 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec "A node that is LEAVING a non-singleton cluster" must { - // FIXME make it work and remove ignore - "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest ignore { + "eventually set to REMOVED by the reaper, and removed from membership ring and seen table" taggedAs LongRunningTest in { awaitClusterUp(first, second, third) @@ -50,13 +49,14 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec // verify that the 'second' node is no longer part of the 'members' set awaitCond(cluster.latestGossip.members.forall(_.address != secondAddress), reaperWaitingTime) - // verify that the 'second' node is part of the 'unreachable' set - awaitCond(cluster.latestGossip.overview.unreachable.exists(_.status == MemberStatus.Removed), reaperWaitingTime) + // verify that the 'second' node is not part of the 'unreachable' set + awaitCond(cluster.latestGossip.overview.unreachable.forall(_.address != secondAddress), reaperWaitingTime) + } - // verify node that got removed is 'second' node - val isRemoved = cluster.latestGossip.overview.unreachable.find(_.status == MemberStatus.Removed) - isRemoved must be('defined) - isRemoved.get.address must be(secondAddress) + runOn(second) { + // verify that the second node is shut down and has status REMOVED + awaitCond(!cluster.isRunning, reaperWaitingTime) + awaitCond(cluster.status == MemberStatus.Removed, reaperWaitingTime) } testConductor.enter("finished") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 6378a74040..ef285b5070 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -42,8 +42,7 @@ abstract class NodeLeavingAndExitingSpec "A node that is LEAVING a non-singleton cluster" must { - // FIXME make it work and remove ignore - "be moved to EXITING by the leader" taggedAs LongRunningTest ignore { + "be moved to EXITING by the leader" taggedAs LongRunningTest in { awaitClusterUp(first, second, third) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index 8ea21e9380..8f637d87e5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -36,8 +36,7 @@ abstract class NodeLeavingSpec "A node that is LEAVING a non-singleton cluster" must { - // FIXME make it work and remove ignore - "be marked as LEAVING in the converged membership table" taggedAs LongRunningTest ignore { + "be marked as LEAVING in the converged membership table" taggedAs LongRunningTest in { awaitClusterUp(first, second, third) From 616aaacbe87a52893cbeb42e6591ca70e33dc697 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 16 Jun 2012 00:02:20 +0200 Subject: [PATCH 410/538] Changed logging in FD from INFO to DEBUG. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/cluster/AccrualFailureDetector.scala | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index 6632111f00..62d5fa4eb9 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -165,8 +165,7 @@ class AccrualFailureDetector( else PhiFactor * timestampDiff / mean } - // FIXME change to debug log level, when failure detector is stable - log.info("Phi value [{}] and threshold [{}] for connection [{}] ", phi, threshold, connection) + log.debug("Phi value [{}] and threshold [{}] for connection [{}] ", phi, threshold, connection) phi } From 486853b7bd534571e54866358811c74f6d85537e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 16 Jun 2012 00:03:13 +0200 Subject: [PATCH 411/538] Removed MembershipChangeListenerRemovedSpec. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since there is no listener being called for the transition to REMOVED. Signed-off-by: Jonas Bonér --- .../MembershipChangeListenerRemovedSpec.scala | 71 ------------------- 1 file changed, 71 deletions(-) delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerRemovedSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerRemovedSpec.scala deleted file mode 100644 index 6b737a22e2..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerRemovedSpec.scala +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import scala.collection.immutable.SortedSet -import org.scalatest.BeforeAndAfter -import com.typesafe.config.ConfigFactory -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ -import akka.util.duration._ - -object MembershipChangeListenerRemovedMultiJvmSpec extends MultiNodeConfig { - val first = role("first") - val second = role("second") - val third = role("third") - - commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) -} - -class MembershipChangeListenerRemovedMultiJvmNode1 extends MembershipChangeListenerRemovedSpec -class MembershipChangeListenerRemovedMultiJvmNode2 extends MembershipChangeListenerRemovedSpec -class MembershipChangeListenerRemovedMultiJvmNode3 extends MembershipChangeListenerRemovedSpec - -abstract class MembershipChangeListenerRemovedSpec extends MultiNodeSpec(MembershipChangeListenerRemovedMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { - import MembershipChangeListenerRemovedMultiJvmSpec._ - - override def initialParticipants = 3 - - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - lazy val thirdAddress = node(third).address - - val reaperWaitingTime = 30.seconds.dilated - - "A registered MembershipChangeListener" must { - "be notified when new node is REMOVED" taggedAs LongRunningTest in { - - runOn(first) { - cluster.self - } - testConductor.enter("first-started") - - runOn(second, third) { - cluster.join(firstAddress) - } - awaitUpConvergence(numberOfMembers = 3) - testConductor.enter("rest-started") - - runOn(third) { - val removedLatch = TestLatch() - cluster.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - println("------- MembershipChangeListener " + members.mkString(", ")) - if (members.size == 3 && members.find(_.address == secondAddress).isEmpty) - removedLatch.countDown() - } - }) - removedLatch.await - } - - runOn(first) { - cluster.leave(secondAddress) - } - - testConductor.enter("finished") - } - } -} From 86dc1fe69d9b981a387a5c20cca4a8d4244e770a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 16 Jun 2012 00:03:45 +0200 Subject: [PATCH 412/538] Minor edits to cluster specification. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- akka-docs/cluster/cluster.rst | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/akka-docs/cluster/cluster.rst b/akka-docs/cluster/cluster.rst index 1368d7835f..0126897dab 100644 --- a/akka-docs/cluster/cluster.rst +++ b/akka-docs/cluster/cluster.rst @@ -163,8 +163,8 @@ After gossip convergence a ``leader`` for the cluster can be determined. There i ``leader`` election process, the ``leader`` can always be recognised deterministically by any node whenever there is gossip convergence. The ``leader`` is simply the first node in sorted order that is able to take the leadership role, where the only -allowed member states for a ``leader`` are ``up`` or ``leaving`` (see below for more -information about member states). +allowed member states for a ``leader`` are ``up``, ``leaving`` or ``exiting`` (see +below for more information about member states). The role of the ``leader`` is to shift members in and out of the cluster, changing ``joining`` members to the ``up`` state or ``exiting`` members to the @@ -301,12 +301,6 @@ handoff has completed then the node will change to the ``exiting`` state. Once all nodes have seen the exiting state (convergence) the ``leader`` will remove the node from the cluster, marking it as ``removed``. -A node can also be removed forcefully by moving it directly to the ``removed`` -state using the ``remove`` action. The cluster will rebalance based on the new -cluster membership. This will also happen if you are shutting the system down -forcefully (through an external ``SIGKILL`` signal, ``System.exit(status)`` or -similar. - If a node is unreachable then gossip convergence is not possible and therefore any ``leader`` actions are also not possible (for instance, allowing a node to become a part of the cluster, or changing actor distribution). To be able to @@ -315,11 +309,12 @@ unreachable node is experiencing only transient difficulties then it can be explicitly marked as ``down`` using the ``down`` user action. When this node comes back up and begins gossiping it will automatically go through the joining process again. If the unreachable node will be permanently down then it can be -removed from the cluster directly with the ``remove`` user action. The cluster -can also *auto-down* a node using the accrual failure detector. +removed from the cluster directly by shutting the actor system down or killing it +through an external ``SIGKILL`` signal, invocation of ``System.exit(status)`` or +similar. The cluster can, through the leader, also *auto-down* a node. -This means that nodes can join and leave the cluster at any point in time, -e.g. provide cluster elasticity. +This means that nodes can join and leave the cluster at any point in time, i.e. +provide cluster elasticity. State Diagram for the Member States @@ -340,12 +335,12 @@ Member States - **leaving** / **exiting** states during graceful removal -- **removed** - tombstone state (no longer a member) - - **down** marked as down/offline/unreachable +- **removed** + tombstone state (no longer a member) + User Actions ^^^^^^^^^^^^ @@ -360,9 +355,6 @@ User Actions - **down** mark a node as temporarily down -- **remove** - remove a node from the cluster immediately - Leader Actions ^^^^^^^^^^^^^^ From 07dadc40cb0eb0fe5953d81975a6c0bf0c851e5a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 16 Jun 2012 00:04:37 +0200 Subject: [PATCH 413/538] Added spec testing telling a LEADER to LEAVE (and transition from UP -> LEAVING -> EXITING -> REMOVED). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../akka/cluster/LeaderLeavingSpec.scala | 93 +++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala new file mode 100644 index 0000000000..04b93e8a8c --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala @@ -0,0 +1,93 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import scala.collection.immutable.SortedSet +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object LeaderLeavingMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" + akka.cluster { + leader-actions-interval = 5 s # increase the leader action task frequency to make sure we get a chance to test the LEAVING state + unreachable-nodes-reaper-interval = 30 s + } + """) + .withFallback(MultiNodeClusterSpec.clusterConfig))) +} + +class LeaderLeavingMultiJvmNode1 extends LeaderLeavingSpec with FailureDetectorPuppetStrategy +class LeaderLeavingMultiJvmNode2 extends LeaderLeavingSpec with FailureDetectorPuppetStrategy +class LeaderLeavingMultiJvmNode3 extends LeaderLeavingSpec with FailureDetectorPuppetStrategy + +abstract class LeaderLeavingSpec + extends MultiNodeSpec(LeaderLeavingMultiJvmSpec) + with MultiNodeClusterSpec { + + import LeaderLeavingMultiJvmSpec._ + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + lazy val thirdAddress = node(third).address + + val reaperWaitingTime = 30.seconds.dilated + + def leaderRole = cluster.leader match { + case `firstAddress` => first + case `secondAddress` => second + case `thirdAddress` => third + } + + "A LEADER that is LEAVING" must { + + "be moved to LEAVING, then to EXITING, then to REMOVED, then be shut down and then a new LEADER should be elected" taggedAs LongRunningTest in { + + awaitClusterUp(first, second, third) + + val oldLeaderAddress = cluster.leader + + if (cluster.isLeader) { + + cluster.leave(oldLeaderAddress) + testConductor.enter("leader-left") + + // verify that the LEADER is shut down + awaitCond(!cluster.isRunning, reaperWaitingTime) + + // verify that the LEADER is REMOVED + awaitCond(cluster.status == MemberStatus.Removed, reaperWaitingTime) + + } else { + + testConductor.enter("leader-left") + + // verify that the LEADER is LEAVING + awaitCond(cluster.latestGossip.members.exists(m => m.status == MemberStatus.Leaving && m.address == oldLeaderAddress)) // wait on LEAVING + + // verify that the LEADER is EXITING + awaitCond(cluster.latestGossip.members.exists(m => m.status == MemberStatus.Exiting && m.address == oldLeaderAddress)) // wait on EXITING + + // verify that the LEADER is no longer part of the 'members' set + awaitCond(cluster.latestGossip.members.forall(_.address != oldLeaderAddress), reaperWaitingTime) + + // verify that the LEADER is not part of the 'unreachable' set + awaitCond(cluster.latestGossip.overview.unreachable.forall(_.address != oldLeaderAddress), reaperWaitingTime) + + // verify that we have a new LEADER + awaitCond(cluster.leader != oldLeaderAddress, reaperWaitingTime) + } + + testConductor.enter("finished") + } + } +} From 0df105f8a1d97954b464db73e32cdfe142d3b1ec Mon Sep 17 00:00:00 2001 From: Roland Kuhn Date: Mon, 18 Jun 2012 10:20:43 +0300 Subject: [PATCH 414/538] =?UTF-8?q?correct=20ActorSystem=E2=80=99s=20error?= =?UTF-8?q?=20message=20for=20invalid=20system=20name,=20see=20#2246?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- akka-actor/src/main/scala/akka/actor/ActorSystem.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 721375adda..0483ccf60f 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -430,7 +430,7 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, if (!name.matches("""^[a-zA-Z0-9][a-zA-Z0-9-]*$""")) throw new IllegalArgumentException( "invalid ActorSystem name [" + name + - "], must contain only word characters (i.e. [a-zA-Z_0-9] plus non-leading '-')") + "], must contain only word characters (i.e. [a-zA-Z0-9] plus non-leading '-')") import ActorSystem._ From fb04786072281fc9e4ceb9c73c9d20aa38882ba2 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 18 Jun 2012 11:16:30 +0200 Subject: [PATCH 415/538] Cleanup based on review comments, see #2201 --- .../scala/akka/cluster/MultiNodeClusterSpec.scala | 14 +++++--------- .../src/test/scala/akka/cluster/ClusterSpec.scala | 6 +++--- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 7d1457234f..45e81df743 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -42,11 +42,14 @@ trait MultiNodeClusterSpec extends FailureDetectorStrategy with Suite { self: Mu /** * Lookup the Address for the role. + * + * Implicit conversion from RoleName to Address. + * * It is cached, which has the implication that stopping * and then restarting a role (jvm) with another address is not * supported. */ - def address(role: RoleName): Address = { + implicit def address(role: RoleName): Address = { cachedAddresses.get(role) match { case null ⇒ val address = node(role).address @@ -56,11 +59,6 @@ trait MultiNodeClusterSpec extends FailureDetectorStrategy with Suite { self: Mu } } - /** - * implicit conversion from RoleName to Address - */ - implicit def role2Address(role: RoleName): Address = address(role) - // Cluster tests are written so that if previous step (test method) failed // it will most likely not be possible to run next step. This ensures // fail fast of steps after the first failure. @@ -196,8 +194,6 @@ trait MultiNodeClusterSpec extends FailureDetectorStrategy with Suite { self: Mu def compare(x: RoleName, y: RoleName) = addressOrdering.compare(address(x), address(y)) } - def roleName(addr: Address): Option[RoleName] = { - roles.find(address(_) == addr) - } + def roleName(addr: Address): Option[RoleName] = roles.find(address(_) == addr) } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index b2c1a70ec2..229ec7137d 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -83,7 +83,7 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { before { cluster._gossipToUnreachableProbablity = 0.0 cluster._gossipToDeputyProbablity = 0.0 - addresses.foreach(failureDetector.remove(_)) + addresses foreach failureDetector.remove deterministicRandom.set(0) } @@ -180,7 +180,7 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { "gossip to random unreachable node" in { val dead = Set(addresses(1)) - dead.foreach(failureDetector.markNodeAsUnavailable(_)) + dead foreach failureDetector.markNodeAsUnavailable cluster._gossipToUnreachableProbablity = 1.0 // always cluster.reapUnreachableMembers() @@ -198,7 +198,7 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { cluster._gossipToDeputyProbablity = -1.0 // real impl // 0 and 2 still alive val dead = Set(addresses(1), addresses(3), addresses(4), addresses(5)) - dead.foreach(failureDetector.markNodeAsUnavailable(_)) + dead foreach failureDetector.markNodeAsUnavailable cluster.reapUnreachableMembers() cluster.latestGossip.overview.unreachable.map(_.address) must be(dead) From f44bc9dc0ce887ce20706d4350c6ef978714dd90 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 18 Jun 2012 12:09:12 +0200 Subject: [PATCH 416/538] Switching to a more continuation based approach for sending that also solves the issue when sending is not possible --- .../akka/zeromq/ConcurrentSocketActor.scala | 78 +++++++++++-------- .../scala/akka/zeromq/ZeroMQExtension.scala | 7 +- 2 files changed, 47 insertions(+), 38 deletions(-) diff --git a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala index e848809644..fa1da6e4ba 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala @@ -9,14 +9,17 @@ import akka.actor._ import akka.dispatch.{ Promise, Future } import akka.event.Logging import annotation.tailrec -import akka.util.Duration import java.util.concurrent.TimeUnit +import collection.mutable.ListBuffer +import akka.util.{ NonFatal, Duration } private[zeromq] object ConcurrentSocketActor { private sealed trait PollMsg private case object Poll extends PollMsg private case object PollCareful extends PollMsg + private case object Flush + private class NoSocketHandleException() extends Exception("Couldn't create a zeromq socket.") private val DefaultContext = Context() @@ -32,19 +35,28 @@ private[zeromq] class ConcurrentSocketActor(params: Seq[SocketOption]) extends A import SocketType.{ ZMQSocketType ⇒ ST } params.collectFirst { case t: ST ⇒ t }.getOrElse(throw new IllegalArgumentException("A socket type is required")) } + private val socket: Socket = zmqContext.socket(socketType) private val poller: Poller = zmqContext.poller private val log = Logging(context.system, this) + private val pendingSends = new ListBuffer[Seq[Frame]] + def receive = { case m: PollMsg ⇒ doPoll(m) - case ZMQMessage(frames) ⇒ sendMessage(frames) + case ZMQMessage(frames) ⇒ handleRequest(Send(frames)) case r: Request ⇒ handleRequest(r) + case Flush ⇒ flush() case Terminated(_) ⇒ context stop self } private def handleRequest(msg: Request): Unit = msg match { - case Send(frames) ⇒ sendMessage(frames) + case Send(frames) ⇒ + if (frames.nonEmpty) { + val flushNow = pendingSends.isEmpty + pendingSends.append(frames) + if (flushNow) flush() + } case opt: SocketOption ⇒ handleSocketOption(opt) case q: SocketOptionQuery ⇒ handleSocketOptionQuery(q) } @@ -117,48 +129,48 @@ private[zeromq] class ConcurrentSocketActor(params: Seq[SocketOption]) extends A } } - private def setupConnection() { + private def setupConnection(): Unit = { params filter (_.isInstanceOf[SocketConnectOption]) foreach { self ! _ } params filter (_.isInstanceOf[PubSubOption]) foreach { self ! _ } } - private def deserializerFromParams = { + private def deserializerFromParams: Deserializer = params collectFirst { case d: Deserializer ⇒ d } getOrElse new ZMQMessageDeserializer + + private def setupSocket() = params foreach { + case _: SocketConnectOption | _: PubSubOption | _: SocketMeta ⇒ // ignore, handled differently + case m ⇒ self ! m } - private def setupSocket() = { - params foreach { - case _: SocketConnectOption | _: PubSubOption | _: SocketMeta ⇒ // ignore, handled differently - case m ⇒ self ! m + override def preRestart(reason: Throwable, message: Option[Any]): Unit = context.children foreach context.stop //Do not call postStop + + override def postRestart(reason: Throwable): Unit = { + if (pendingSends.nonEmpty) self ! Flush // If we're restarting we might want to resume sending the messages + } + + override def postStop: Unit = try { + if (socket != null) { + poller.unregister(socket) + socket.close } - } + } finally notifyListener(Closed) - override def preRestart(reason: Throwable, message: Option[Any]) { - context.children foreach context.stop //Do not call postStop - } - - override def postRestart(reason: Throwable) {} //Do nothing - - override def postStop { - try { - if (socket != null) { - poller.unregister(socket) - socket.close + @tailrec private def flushMessage(i: Seq[Frame]): Boolean = + if (i.isEmpty) + false + else { + val head = i.head + val tail = i.tail + if (socket.send(head.payload.toArray, if (tail.nonEmpty) JZMQ.SNDMORE else 0)) flushMessage(tail) + else { + pendingSends.prepend(i) // Reenqueue the rest of the message so the next flush takes care of it + self ! Flush + false } - } finally { - notifyListener(Closed) } - } - private def sendMessage(frames: Seq[Frame]) { - def sendBytes(bytes: Seq[Byte], flags: Int) = socket.send(bytes.toArray, flags) - val iter = frames.iterator - while (iter.hasNext) { - val payload = iter.next.payload - val flags = if (iter.hasNext) JZMQ.SNDMORE else 0 - sendBytes(payload, flags) - } - } + @tailrec private def flush(): Unit = + if (pendingSends.nonEmpty && flushMessage(pendingSends.remove(0))) flush() // Flush while things are going well // this is a “PollMsg=>Unit” which either polls or schedules Poll, depending on the sign of the timeout private val doPollTimeout = { diff --git a/akka-zeromq/src/main/scala/akka/zeromq/ZeroMQExtension.scala b/akka-zeromq/src/main/scala/akka/zeromq/ZeroMQExtension.scala index 85a9ea6642..4bf52a41e3 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/ZeroMQExtension.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/ZeroMQExtension.scala @@ -139,8 +139,7 @@ class ZeroMQExtension(system: ActorSystem) extends Extension { */ def newSocket(socketParameters: SocketOption*): ActorRef = { implicit val timeout = NewSocketTimeout - val req = (zeromqGuardian ? newSocketProps(socketParameters: _*)).mapTo[ActorRef] - Await.result(req, timeout.duration) + Await.result((zeromqGuardian ? newSocketProps(socketParameters: _*)).mapTo[ActorRef], timeout.duration) } /** @@ -248,9 +247,7 @@ class ZeroMQExtension(system: ActorSystem) extends Extension { case _ ⇒ false } - def receive = { - case p: Props ⇒ sender ! context.actorOf(p) - } + def receive = { case p: Props ⇒ sender ! context.actorOf(p) } }), "zeromq") } From 12e90a98dc0c575fa5d9289d9f1af099139c3da9 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 18 Jun 2012 11:54:44 +0200 Subject: [PATCH 417/538] Remove address vals in tests, fix race in TransitionSpec, see #2201 --- ...ientDowningNodeThatIsUnreachableSpec.scala | 2 +- .../ClientDowningNodeThatIsUpSpec.scala | 2 +- .../scala/akka/cluster/ConvergenceSpec.scala | 20 +++++++------------ .../akka/cluster/JoinTwoClustersSpec.scala | 14 +++++-------- ...aderDowningNodeThatIsUnreachableSpec.scala | 4 ++-- .../akka/cluster/LeaderElectionSpec.scala | 4 ++-- .../MembershipChangeListenerExitingSpec.scala | 8 ++------ .../MembershipChangeListenerJoinSpec.scala | 7 ++----- .../MembershipChangeListenerLeavingSpec.scala | 11 ++++------ .../MembershipChangeListenerUpSpec.scala | 12 ++++------- .../akka/cluster/MultiNodeClusterSpec.scala | 2 +- .../scala/akka/cluster/NodeJoinSpec.scala | 9 +++------ ...LeavingAndExitingAndBeingRemovedSpec.scala | 10 +++------- .../cluster/NodeLeavingAndExitingSpec.scala | 10 +++------- .../scala/akka/cluster/NodeLeavingSpec.scala | 8 ++------ .../akka/cluster/NodeMembershipSpec.scala | 12 ++++------- .../scala/akka/cluster/NodeUpSpec.scala | 2 +- .../akka/cluster/SingletonClusterSpec.scala | 2 +- .../scala/akka/cluster/TransitionSpec.scala | 8 +++++--- 19 files changed, 53 insertions(+), 94 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index 343f0c7c17..5ebc6cbf02 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -37,7 +37,7 @@ abstract class ClientDowningNodeThatIsUnreachableSpec "Client of a 4 node cluster" must { "be able to DOWN a node that is UNREACHABLE (killed)" taggedAs LongRunningTest in { - val thirdAddress = node(third).address + val thirdAddress = address(third) awaitClusterUp(first, second, third, fourth) runOn(first) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index 95eeefd982..79cd209a99 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -37,7 +37,7 @@ abstract class ClientDowningNodeThatIsUpSpec "Client of a 4 node cluster" must { "be able to DOWN a node that is UP (healthy and available)" taggedAs LongRunningTest in { - val thirdAddress = node(third).address + val thirdAddress = address(third) awaitClusterUp(first, second, third, fourth) runOn(first) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index 52206f1b8c..88b91b9fcf 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -50,7 +50,7 @@ abstract class ConvergenceSpec } "not reach convergence while any nodes are unreachable" taggedAs LongRunningTest in { - val thirdAddress = node(third).address + val thirdAddress = address(third) testConductor.enter("before-shutdown") runOn(first) { @@ -60,15 +60,13 @@ abstract class ConvergenceSpec } runOn(first, second) { - val firstAddress = node(first).address - val secondAddress = node(second).address within(28 seconds) { // third becomes unreachable awaitCond(cluster.latestGossip.overview.unreachable.size == 1) awaitCond(cluster.latestGossip.members.size == 2) awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up)) - awaitSeenSameState(Seq(firstAddress, secondAddress)) + awaitSeenSameState(first, second) // still one unreachable cluster.latestGossip.overview.unreachable.size must be(1) cluster.latestGossip.overview.unreachable.head.address must be(thirdAddress) @@ -84,24 +82,20 @@ abstract class ConvergenceSpec "not move a new joining node to Up while there is no convergence" taggedAs LongRunningTest in { runOn(fourth) { // try to join - cluster.join(node(first).address) + cluster.join(first) } - val firstAddress = node(first).address - val secondAddress = node(second).address - val fourthAddress = node(fourth).address - def memberStatus(address: Address): Option[MemberStatus] = cluster.latestGossip.members.collectFirst { case m if m.address == address ⇒ m.status } def assertNotMovedUp: Unit = { within(20 seconds) { awaitCond(cluster.latestGossip.members.size == 3) - awaitSeenSameState(Seq(firstAddress, secondAddress, fourthAddress)) - memberStatus(firstAddress) must be(Some(MemberStatus.Up)) - memberStatus(secondAddress) must be(Some(MemberStatus.Up)) + awaitSeenSameState(first, second, fourth) + memberStatus(first) must be(Some(MemberStatus.Up)) + memberStatus(second) must be(Some(MemberStatus.Up)) // leader is not allowed to move the new node to Up - memberStatus(fourthAddress) must be(Some(MemberStatus.Joining)) + memberStatus(fourth) must be(Some(MemberStatus.Joining)) // still no convergence cluster.convergence.isDefined must be(false) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index 4b64bb6e58..23961579a3 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -33,10 +33,6 @@ abstract class JoinTwoClustersSpec import JoinTwoClustersMultiJvmSpec._ - lazy val a1Address = node(a1).address - lazy val b1Address = node(b1).address - lazy val c1Address = node(c1).address - "Three different clusters (A, B and C)" must { "be able to 'elect' a single leader after joining (A -> B)" taggedAs LongRunningTest in { @@ -47,13 +43,13 @@ abstract class JoinTwoClustersSpec testConductor.enter("first-started") runOn(a1, a2) { - cluster.join(a1Address) + cluster.join(a1) } runOn(b1, b2) { - cluster.join(b1Address) + cluster.join(b1) } runOn(c1, c2) { - cluster.join(c1Address) + cluster.join(c1) } awaitUpConvergence(numberOfMembers = 2) @@ -65,7 +61,7 @@ abstract class JoinTwoClustersSpec testConductor.enter("two-members") runOn(b2) { - cluster.join(a1Address) + cluster.join(a1) } runOn(a1, a2, b1, b2) { @@ -81,7 +77,7 @@ abstract class JoinTwoClustersSpec "be able to 'elect' a single leader after joining (C -> A + B)" taggedAs LongRunningTest in { runOn(b2) { - cluster.join(c1Address) + cluster.join(c1) } awaitUpConvergence(numberOfMembers = 6) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index 5e2545394d..91b3d6f175 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -42,7 +42,7 @@ abstract class LeaderDowningNodeThatIsUnreachableSpec "be able to DOWN a 'last' node that is UNREACHABLE" taggedAs LongRunningTest in { awaitClusterUp(first, second, third, fourth) - val fourthAddress = node(fourth).address + val fourthAddress = address(fourth) runOn(first) { // kill 'fourth' node testConductor.shutdown(fourth, 0) @@ -70,7 +70,7 @@ abstract class LeaderDowningNodeThatIsUnreachableSpec } "be able to DOWN a 'middle' node that is UNREACHABLE" taggedAs LongRunningTest in { - val secondAddress = node(second).address + val secondAddress = address(second) testConductor.enter("before-down-second-node") runOn(first) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index e161206ba0..2ec6d2b18d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -63,7 +63,7 @@ abstract class LeaderElectionSpec myself match { case `controller` ⇒ - val leaderAddress = node(leader).address + val leaderAddress = address(leader) testConductor.enter("before-shutdown") testConductor.shutdown(leader, 0) testConductor.enter("after-shutdown", "after-down", "completed") @@ -74,7 +74,7 @@ abstract class LeaderElectionSpec // this node will be shutdown by the controller and doesn't participate in more barriers case `aUser` ⇒ - val leaderAddress = node(leader).address + val leaderAddress = address(leader) testConductor.enter("before-shutdown", "after-shutdown") // user marks the shutdown leader as DOWN cluster.down(leaderAddress) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index 88cee08191..32cb8a427d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -37,10 +37,6 @@ abstract class MembershipChangeListenerExitingSpec import MembershipChangeListenerExitingMultiJvmSpec._ - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - lazy val thirdAddress = node(third).address - "A registered MembershipChangeListener" must { "be notified when new node is EXITING" taggedAs LongRunningTest in { @@ -48,7 +44,7 @@ abstract class MembershipChangeListenerExitingSpec runOn(first) { testConductor.enter("registered-listener") - cluster.leave(secondAddress) + cluster.leave(second) } runOn(second) { @@ -59,7 +55,7 @@ abstract class MembershipChangeListenerExitingSpec val exitingLatch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { - if (members.size == 3 && members.exists(m ⇒ m.address == secondAddress && m.status == MemberStatus.Exiting)) + if (members.size == 3 && members.exists(m ⇒ m.address == address(second) && m.status == MemberStatus.Exiting)) exitingLatch.countDown() } }) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index 536fb3b58d..e9ae72a894 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -30,15 +30,12 @@ abstract class MembershipChangeListenerJoinSpec import MembershipChangeListenerJoinMultiJvmSpec._ - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - "A registered MembershipChangeListener" must { "be notified when new node is JOINING" taggedAs LongRunningTest in { runOn(first) { val joinLatch = TestLatch() - val expectedAddresses = Set(firstAddress, secondAddress) + val expectedAddresses = Set(first, second) map address cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { if (members.map(_.address) == expectedAddresses && members.exists(_.status == MemberStatus.Joining)) @@ -52,7 +49,7 @@ abstract class MembershipChangeListenerJoinSpec runOn(second) { testConductor.enter("registered-listener") - cluster.join(firstAddress) + cluster.join(first) } awaitUpConvergence(2) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala index 0640e58175..c644ebb288 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -9,6 +9,7 @@ import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ +import akka.actor.Address object MembershipChangeListenerLeavingMultiJvmSpec extends MultiNodeConfig { val first = role("first") @@ -34,10 +35,6 @@ abstract class MembershipChangeListenerLeavingSpec import MembershipChangeListenerLeavingMultiJvmSpec._ - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - lazy val thirdAddress = node(third).address - "A registered MembershipChangeListener" must { "be notified when new node is LEAVING" taggedAs LongRunningTest in { @@ -45,7 +42,7 @@ abstract class MembershipChangeListenerLeavingSpec runOn(first) { testConductor.enter("registered-listener") - cluster.leave(secondAddress) + cluster.leave(second) } runOn(second) { @@ -54,11 +51,11 @@ abstract class MembershipChangeListenerLeavingSpec runOn(third) { val latch = TestLatch() - val expectedAddresses = Set(firstAddress, secondAddress, thirdAddress) + val expectedAddresses = Set(first, second, third) map address cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { if (members.map(_.address) == expectedAddresses && - members.exists(m ⇒ m.address == secondAddress && m.status == MemberStatus.Leaving)) + members.exists(m ⇒ m.address == address(second) && m.status == MemberStatus.Leaving)) latch.countDown() } }) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala index f48f9c8d9b..321a34f4e7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -27,10 +27,6 @@ abstract class MembershipChangeListenerUpSpec import MembershipChangeListenerUpMultiJvmSpec._ - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - lazy val thirdAddress = node(third).address - "A set of connected cluster systems" must { "(when two nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { @@ -39,7 +35,7 @@ abstract class MembershipChangeListenerUpSpec runOn(first, second) { val latch = TestLatch() - val expectedAddresses = Set(firstAddress, secondAddress) + val expectedAddresses = Set(first, second) map address cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { if (members.map(_.address) == expectedAddresses && members.forall(_.status == MemberStatus.Up)) @@ -47,7 +43,7 @@ abstract class MembershipChangeListenerUpSpec } }) testConductor.enter("listener-1-registered") - cluster.join(firstAddress) + cluster.join(first) latch.await } @@ -61,7 +57,7 @@ abstract class MembershipChangeListenerUpSpec "(when three nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { val latch = TestLatch() - val expectedAddresses = Set(firstAddress, secondAddress, thirdAddress) + val expectedAddresses = Set(first, second, third) map address cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { if (members.map(_.address) == expectedAddresses && members.forall(_.status == MemberStatus.Up)) @@ -71,7 +67,7 @@ abstract class MembershipChangeListenerUpSpec testConductor.enter("listener-2-registered") runOn(third) { - cluster.join(firstAddress) + cluster.join(first) } latch.await diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 45e81df743..f94ba8bc5e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -173,7 +173,7 @@ trait MultiNodeClusterSpec extends FailureDetectorStrategy with Suite { self: Mu /** * Wait until the specified nodes have seen the same gossip overview. */ - def awaitSeenSameState(addresses: Seq[Address]): Unit = { + def awaitSeenSameState(addresses: Address*): Unit = { awaitCond { val seen = cluster.latestGossip.overview.seen val seenVectorClocks = addresses.flatMap(seen.get(_)) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala index 6cf5fc220d..8a531f322c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala @@ -17,7 +17,7 @@ object NodeJoinMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString("akka.cluster.leader-actions-interval = 5 s") // increase the leader action task interval - .withFallback(MultiNodeClusterSpec.clusterConfig))) + .withFallback(MultiNodeClusterSpec.clusterConfig))) } class NodeJoinMultiJvmNode1 extends NodeJoinSpec with FailureDetectorPuppetStrategy @@ -29,9 +29,6 @@ abstract class NodeJoinSpec import NodeJoinMultiJvmSpec._ - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - "A cluster node" must { "join another cluster and get status JOINING - when sending a 'Join' command" taggedAs LongRunningTest in { @@ -40,10 +37,10 @@ abstract class NodeJoinSpec } runOn(second) { - cluster.join(firstAddress) + cluster.join(first) } - awaitCond(cluster.latestGossip.members.exists { member ⇒ member.address == secondAddress && member.status == MemberStatus.Joining }) + awaitCond(cluster.latestGossip.members.exists { member ⇒ member.address == address(second) && member.status == MemberStatus.Joining }) testConductor.enter("after") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index 01e5f8aa74..bd72eb0a90 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -28,10 +28,6 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec import NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec._ - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - lazy val thirdAddress = node(third).address - val reaperWaitingTime = 30.seconds.dilated "A node that is LEAVING a non-singleton cluster" must { @@ -42,13 +38,13 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec awaitClusterUp(first, second, third) runOn(first) { - cluster.leave(secondAddress) + cluster.leave(second) } testConductor.enter("second-left") runOn(first, third) { // verify that the 'second' node is no longer part of the 'members' set - awaitCond(cluster.latestGossip.members.forall(_.address != secondAddress), reaperWaitingTime) + awaitCond(cluster.latestGossip.members.forall(_.address != address(second)), reaperWaitingTime) // verify that the 'second' node is part of the 'unreachable' set awaitCond(cluster.latestGossip.overview.unreachable.exists(_.status == MemberStatus.Removed), reaperWaitingTime) @@ -56,7 +52,7 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec // verify node that got removed is 'second' node val isRemoved = cluster.latestGossip.overview.unreachable.find(_.status == MemberStatus.Removed) isRemoved must be('defined) - isRemoved.get.address must be(secondAddress) + isRemoved.get.address must be(address(second)) } testConductor.enter("finished") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index fc62c17c1d..209999a7de 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -36,10 +36,6 @@ abstract class NodeLeavingAndExitingSpec import NodeLeavingAndExitingMultiJvmSpec._ - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - lazy val thirdAddress = node(third).address - "A node that is LEAVING a non-singleton cluster" must { // FIXME make it work and remove ignore @@ -48,7 +44,7 @@ abstract class NodeLeavingAndExitingSpec awaitClusterUp(first, second, third) runOn(first) { - cluster.leave(secondAddress) + cluster.leave(second) } testConductor.enter("second-left") @@ -60,13 +56,13 @@ abstract class NodeLeavingAndExitingSpec awaitCond(cluster.latestGossip.members.exists(_.status == MemberStatus.Leaving)) // wait on LEAVING val hasLeft = cluster.latestGossip.members.find(_.status == MemberStatus.Leaving) // verify node that left hasLeft must be('defined) - hasLeft.get.address must be(secondAddress) + hasLeft.get.address must be(address(second)) // 2. Verify that 'second' node is set to EXITING awaitCond(cluster.latestGossip.members.exists(_.status == MemberStatus.Exiting)) // wait on EXITING val hasExited = cluster.latestGossip.members.find(_.status == MemberStatus.Exiting) // verify node that exited hasExited must be('defined) - hasExited.get.address must be(secondAddress) + hasExited.get.address must be(address(second)) } testConductor.enter("finished") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index 8ea21e9380..dfb398fb04 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -30,10 +30,6 @@ abstract class NodeLeavingSpec import NodeLeavingMultiJvmSpec._ - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - lazy val thirdAddress = node(third).address - "A node that is LEAVING a non-singleton cluster" must { // FIXME make it work and remove ignore @@ -42,7 +38,7 @@ abstract class NodeLeavingSpec awaitClusterUp(first, second, third) runOn(first) { - cluster.leave(secondAddress) + cluster.leave(second) } testConductor.enter("second-left") @@ -51,7 +47,7 @@ abstract class NodeLeavingSpec val hasLeft = cluster.latestGossip.members.find(_.status == MemberStatus.Leaving) hasLeft must be('defined) - hasLeft.get.address must be(secondAddress) + hasLeft.get.address must be(address(second)) } testConductor.enter("finished") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index fb0573f77f..454230b10a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -26,10 +26,6 @@ abstract class NodeMembershipSpec import NodeMembershipMultiJvmSpec._ - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - lazy val thirdAddress = node(third).address - "A set of connected cluster systems" must { "(when two nodes) start gossiping to each other so that both nodes gets the same gossip info" taggedAs LongRunningTest in { @@ -41,9 +37,9 @@ abstract class NodeMembershipSpec testConductor.enter("first-started") runOn(first, second) { - cluster.join(firstAddress) + cluster.join(first) awaitCond(cluster.latestGossip.members.size == 2) - assertMembers(cluster.latestGossip.members, firstAddress, secondAddress) + assertMembers(cluster.latestGossip.members, first, second) awaitCond { cluster.latestGossip.members.forall(_.status == MemberStatus.Up) } @@ -56,11 +52,11 @@ abstract class NodeMembershipSpec "(when three nodes) start gossiping to each other so that all nodes gets the same gossip info" taggedAs LongRunningTest in { runOn(third) { - cluster.join(firstAddress) + cluster.join(first) } awaitCond(cluster.latestGossip.members.size == 3) - assertMembers(cluster.latestGossip.members, firstAddress, secondAddress, thirdAddress) + assertMembers(cluster.latestGossip.members, first, second, third) awaitCond { cluster.latestGossip.members.forall(_.status == MemberStatus.Up) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index 0fdc3c89b8..76b62bdec9 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -48,7 +48,7 @@ abstract class NodeUpSpec testConductor.enter("listener-registered") runOn(second) { - cluster.join(node(first).address) + cluster.join(first) } testConductor.enter("joined-again") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala index cada29e210..5a02ce036c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala @@ -48,7 +48,7 @@ abstract class SingletonClusterSpec "become singleton cluster when one node is shutdown" taggedAs LongRunningTest in { runOn(first) { - val secondAddress = node(second).address + val secondAddress = address(second) testConductor.shutdown(second, 0) markNodeAsUnavailable(secondAddress) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala index 16cc4b385f..20dd0c8dda 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -61,7 +61,7 @@ abstract class TransitionSpec } def awaitSeen(addresses: Address*): Unit = awaitCond { - seenLatestGossip.map(node(_).address) == addresses.toSet + (seenLatestGossip map address) == addresses.toSet } def awaitMembers(addresses: Address*): Unit = awaitCond { @@ -86,7 +86,7 @@ abstract class TransitionSpec } runOn(fromRole) { testConductor.enter("before-gossip-" + gossipBarrierCounter) - cluster.gossipTo(node(toRole).address) // send gossip + cluster.gossipTo(toRole) // send gossip testConductor.enter("after-gossip-" + gossipBarrierCounter) } runOn(roles.filterNot(r ⇒ r == fromRole || r == toRole): _*) { @@ -252,7 +252,7 @@ abstract class TransitionSpec // first non-leader gossipTo the other non-leader nonLeader(first, second, third).head gossipTo nonLeader(first, second, third).tail.head runOn(nonLeader(first, second, third).head) { - cluster.gossipTo(node(nonLeader(first, second, third).tail.head).address) + cluster.gossipTo(nonLeader(first, second, third).tail.head) } runOn(nonLeader(first, second, third).tail.head) { memberStatus(third) must be(Up) @@ -412,6 +412,8 @@ abstract class TransitionSpec awaitMemberStatus(second, Down) } + testConductor.enter("after-third-down") + // spread the word val gossipRound2 = List(third, fourth, fifth, first, third, fourth, fifth) for (x :: y :: Nil ← gossipRound2.sliding(2)) { From a5fe6ea607020ebfc15270acd8b1ea5a2a85a2de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 18 Jun 2012 13:51:00 +0200 Subject: [PATCH 418/538] Fixed wrong ScalaDoc. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- akka-actor/src/main/scala/akka/AkkaException.scala | 1 - 1 file changed, 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index 04e820419f..8e49c7cb11 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -9,7 +9,6 @@ package akka *

    *
  • a uuid for tracking purposes
  • *
  • toString that includes exception name, message and uuid
  • - *
  • toLongString which also includes the stack trace
  • *
*/ //TODO add @SerialVersionUID(1L) when SI-4804 is fixed From e362c2f48819c1cccd4c96e2f9b13efae34012db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 18 Jun 2012 13:51:54 +0200 Subject: [PATCH 419/538] Cleaned up LeaderLeavingSpec. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …and turned it off until redesign of leader leaving is implement. Signed-off-by: Jonas Bonér --- .../akka/cluster/LeaderLeavingSpec.scala | 20 ++++++------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala index 04b93e8a8c..f3274583b5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala @@ -40,17 +40,9 @@ abstract class LeaderLeavingSpec lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address - val reaperWaitingTime = 30.seconds.dilated - - def leaderRole = cluster.leader match { - case `firstAddress` => first - case `secondAddress` => second - case `thirdAddress` => third - } - "A LEADER that is LEAVING" must { - "be moved to LEAVING, then to EXITING, then to REMOVED, then be shut down and then a new LEADER should be elected" taggedAs LongRunningTest in { + "be moved to LEAVING, then to EXITING, then to REMOVED, then be shut down and then a new LEADER should be elected" taggedAs LongRunningTest ignore { awaitClusterUp(first, second, third) @@ -62,10 +54,10 @@ abstract class LeaderLeavingSpec testConductor.enter("leader-left") // verify that the LEADER is shut down - awaitCond(!cluster.isRunning, reaperWaitingTime) + awaitCond(!cluster.isRunning) // verify that the LEADER is REMOVED - awaitCond(cluster.status == MemberStatus.Removed, reaperWaitingTime) + awaitCond(cluster.status == MemberStatus.Removed) } else { @@ -78,13 +70,13 @@ abstract class LeaderLeavingSpec awaitCond(cluster.latestGossip.members.exists(m => m.status == MemberStatus.Exiting && m.address == oldLeaderAddress)) // wait on EXITING // verify that the LEADER is no longer part of the 'members' set - awaitCond(cluster.latestGossip.members.forall(_.address != oldLeaderAddress), reaperWaitingTime) + awaitCond(cluster.latestGossip.members.forall(_.address != oldLeaderAddress)) // verify that the LEADER is not part of the 'unreachable' set - awaitCond(cluster.latestGossip.overview.unreachable.forall(_.address != oldLeaderAddress), reaperWaitingTime) + awaitCond(cluster.latestGossip.overview.unreachable.forall(_.address != oldLeaderAddress)) // verify that we have a new LEADER - awaitCond(cluster.leader != oldLeaderAddress, reaperWaitingTime) + awaitCond(cluster.leader != oldLeaderAddress) } testConductor.enter("finished") From c0dff0050b353bd9683d8f2eb26318d2e309baaa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 18 Jun 2012 13:52:06 +0200 Subject: [PATCH 420/538] Minor edit . MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index 6f3ddfc866..798dd0058d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -20,9 +20,7 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { val fifth = role("fifth") commonConfig(ConfigFactory.parseString(""" - akka.cluster { - nr-of-deputy-nodes = 0 - } + akka.cluster.nr-of-deputy-nodes = 0 akka.loglevel = INFO """)) } From 8b6652a79491dfecacec322cccc51e8ad4307960 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 18 Jun 2012 13:53:49 +0200 Subject: [PATCH 421/538] Fixed all issues from review. In particular fully separated state transformation and preparation for side-effecting processing. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/cluster/Cluster.scala | 196 ++++++++++-------- 1 file changed, 115 insertions(+), 81 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index b233b9bfbf..528672363d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -770,19 +770,20 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) private[cluster] final def leaving(address: Address) { val localState = state.get val localGossip = localState.latestGossip + if (localGossip.members.exists(_.address == address)) { // only try to update if the node is available (in the member ring) + val newMembers = localGossip.members map { member ⇒ if (member.address == address) Member(address, Leaving) else member } // mark node as LEAVING + val newGossip = localGossip copy (members = newMembers) - val newMembers = localGossip.members + Member(address, Leaving) // mark node as LEAVING - val newGossip = localGossip copy (members = newMembers) + val versionedGossip = newGossip :+ vclockNode + val seenVersionedGossip = versionedGossip seen selfAddress - val versionedGossip = newGossip :+ vclockNode - val seenVersionedGossip = versionedGossip seen selfAddress + val newState = localState copy (latestGossip = seenVersionedGossip) - val newState = localState copy (latestGossip = seenVersionedGossip) - - if (!state.compareAndSet(localState, newState)) leaving(address) // recur if we failed update - else { - log.info("Cluster Node [{}] - Marked address [{}] as LEAVING", selfAddress, address) - notifyMembershipChangeListeners(localState, newState) + if (!state.compareAndSet(localState, newState)) leaving(address) // recur if we failed update + else { + log.info("Cluster Node [{}] - Marked address [{}] as LEAVING", selfAddress, address) + notifyMembershipChangeListeners(localState, newState) + } } } @@ -1082,115 +1083,126 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val isLeader = localMembers.nonEmpty && (selfAddress == localMembers.head.address) - // FIXME implement partion handoff and a check if it is completed - now just returns TRUE - e.g. has completed successfully - def hasPartionHandoffCompletedSuccessfully(gossip: Gossip): Boolean = { - true - } - if (isLeader && isAvailable(localState)) { // only run the leader actions if we are the LEADER and available val localOverview = localGossip.overview val localSeen = localOverview.seen val localUnreachableMembers = localOverview.unreachable + val hasPartionHandoffCompletedSuccessfully: Boolean = { + // FIXME implement partion handoff and a check if it is completed - now just returns TRUE - e.g. has completed successfully + true + } // Leader actions are as follows: // 1. Move EXITING => REMOVED -- When all nodes have seen that the node is EXITING (convergence) - remove the nodes from the node ring and seen table // 2. Move JOINING => UP -- When a node joins the cluster // 3. Move LEAVING => EXITING -- When all partition handoff has completed // 4. Move UNREACHABLE => DOWN -- When the node is in the UNREACHABLE set it can be auto-down by leader - // 5. Updating the vclock version for the changes - // 6. Updating the 'seen' table + // 5. Store away all stuff needed for the side-effecting processing in 10. + // 6. Updating the vclock version for the changes + // 7. Updating the 'seen' table + // 8. Try to update the state with the new gossip + // 9. If failure - retry + // 10. If success - run all the side-effecting processing - // store away removed and exiting members so we can separate the pure state changes (that can be retried on collision) and the side-effecting message sending - var removedMembers = Set.empty[Member] - var exitingMembers = Set.empty[Member] - - var hasChangedState = false - val newGossip = + val ( + newGossip: Gossip, + hasChangedState: Boolean, + upMembers: Set[Member], + exitingMembers: Set[Member], + removedMembers: Set[Member], + unreachableButNotDownedMembers: Set[Member]) = if (convergence(localGossip).isDefined) { // we have convergence - so we can't have unreachable nodes + // transform the node member ring - filterNot/map/map val newMembers = - // ---------------------- - // 1. Move EXITING => REMOVED - e.g. remove the nodes from the 'members' set/node ring and seen table - // ---------------------- - localMembers filter { member ⇒ - if (member.status == MemberStatus.Exiting) { - log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED - and removing node from node ring", selfAddress, member.address) - hasChangedState = true - removedMembers = removedMembers + member - false - } else true + localMembers filterNot { member ⇒ + // ---------------------- + // 1. Move EXITING => REMOVED - e.g. remove the nodes from the 'members' set/node ring and seen table + // ---------------------- + member.status == MemberStatus.Exiting } map { member ⇒ // ---------------------- // 2. Move JOINING => UP (once all nodes have seen that this node is JOINING e.g. we have a convergence) // ---------------------- - if (member.status == Joining) { - log.info("Cluster Node [{}] - Leader is moving node [{}] from JOINING to UP", selfAddress, member.address) - hasChangedState = true - member copy (status = Up) - } else member + if (member.status == Joining) member copy (status = Up) + else member } map { member ⇒ // ---------------------- // 3. Move LEAVING => EXITING (once we have a convergence on LEAVING *and* if we have a successful partition handoff) // ---------------------- - if (member.status == Leaving && hasPartionHandoffCompletedSuccessfully(localGossip)) { - log.info("Cluster Node [{}] - Leader is moving node [{}] from LEAVING to EXITING", selfAddress, member.address) - hasChangedState = true - exitingMembers = exitingMembers + member - member copy (status = Exiting) - } else member - + if (member.status == Leaving && hasPartionHandoffCompletedSuccessfully) member copy (status = Exiting) + else member } + // ---------------------- + // 5. Store away all stuff needed for the side-effecting processing in 10. + // ---------------------- + + // Check for the need to do side-effecting on successful state change + // Repeat the checking for transitions between JOINING -> UP, LEAVING -> EXITING, EXITING -> REMOVED + // to check for state-changes and to store away removed and exiting members for later notification + // 1. check for state-changes to update + // 2. store away removed and exiting members so we can separate the pure state changes (that can be retried on collision) and the side-effecting message sending + val (removedMembers, newMembers1) = localMembers partition (_.status == Exiting) + + val (upMembers, newMembers2) = newMembers1 partition (_.status == Joining) + + val (exitingMembers, newMembers3) = newMembers2 partition (_.status == Leaving && hasPartionHandoffCompletedSuccessfully) + + val hasChangedState = removedMembers.nonEmpty || upMembers.nonEmpty || exitingMembers.nonEmpty + // removing REMOVED nodes from the 'seen' table - val newSeen = removedMembers.foldLeft(localSeen) { (seen, removed) ⇒ seen - removed.address } + //val newSeen = removedMembers.foldLeft(localSeen) { (seen, removed) ⇒ seen - removed.address } + val newSeen = localSeen -- removedMembers.map(_.address) // removing REMOVED nodes from the 'unreachable' set - val newUnreachableMembers = removedMembers.foldLeft(localUnreachableMembers) { (unreachable, removed) ⇒ unreachable - removed } + //val newUnreachableMembers = removedMembers.foldLeft(localUnreachableMembers) { (unreachable, removed) ⇒ unreachable - removed } + val newUnreachableMembers = localUnreachableMembers -- removedMembers val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachableMembers) // update gossip overview - localGossip copy (members = newMembers, overview = newOverview) // update gossip + val newGossip = localGossip copy (members = newMembers, overview = newOverview) // update gossip + + (newGossip, hasChangedState, upMembers, exitingMembers, removedMembers, Set.empty[Member]) } else if (AutoDown) { // we don't have convergence - so we might have unreachable nodes - // if 'auto-down' is turned on, then try to auto-down any unreachable nodes - // ---------------------- - // 4. Move UNREACHABLE => DOWN (auto-downing by leader) - // ---------------------- - val newUnreachableMembers = - localUnreachableMembers.map { member ⇒ - // no need to DOWN members already DOWN - if (member.status == Down) member - else { - log.info("Cluster Node [{}] - Leader is marking unreachable node [{}] as DOWN", selfAddress, member.address) - hasChangedState = true - member copy (status = Down) - } - } + // if 'auto-down' is turned on, then try to auto-down any unreachable nodes + val newUnreachableMembers = localUnreachableMembers.map { member ⇒ + // ---------------------- + // 5. Move UNREACHABLE => DOWN (auto-downing by leader) + // ---------------------- + if (member.status == Down) member // no need to DOWN members already DOWN + else member copy (status = Down) + } + + // Check for the need to do side-effecting on successful state change + val (unreachableButNotDownedMembers, _) = localUnreachableMembers partition (_.status != Down) // removing nodes marked as DOWN from the 'seen' table val newSeen = localSeen -- newUnreachableMembers.collect { case m if m.status == Down ⇒ m.address } val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachableMembers) // update gossip overview - localGossip copy (overview = newOverview) // update gossip + val newGossip = localGossip copy (overview = newOverview) // update gossip - } else localGossip + (newGossip, unreachableButNotDownedMembers.nonEmpty, Set.empty[Member], Set.empty[Member], Set.empty[Member], unreachableButNotDownedMembers) + + } else (localGossip, false, Set.empty[Member], Set.empty[Member], Set.empty[Member], Set.empty[Member]) if (hasChangedState) { // we have a change of state - version it and try to update - // ---------------------- - // 5. Updating the vclock version for the changes + // 6. Updating the vclock version for the changes // ---------------------- val versionedGossip = newGossip :+ vclockNode // ---------------------- - // 6. Updating the 'seen' table + // 7. Updating the 'seen' table // Unless the leader (this node) is part of the removed members, i.e. the leader have moved himself from EXITING -> REMOVED // ---------------------- val seenVersionedGossip = @@ -1199,27 +1211,49 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val newState = localState copy (latestGossip = seenVersionedGossip) - // if we won the race then update else try again - if (!state.compareAndSet(localState, newState)) leaderActions() // recur - else { - // do the side-effecting notifications on state-change success + // ---------------------- + // 8. Try to update the state with the new gossip + // ---------------------- + if (!state.compareAndSet(localState, newState)) { - if (removedMembers.exists(_.address == selfAddress)) { - // we now know that this node (the leader) is just about to shut down since it will be moved from EXITING -> REMOVED - // so now let's gossip out this information directly since there will not be any other chance - gossip() - } + // ---------------------- + // 9. Failure - retry + // ---------------------- + leaderActions() // recur + + } else { + // ---------------------- + // 10. Success - run all the side-effecting processing + // ---------------------- + + // if (removedMembers.exists(_.address == selfAddress)) { + // // we now know that this node (the leader) is just about to shut down since it will be moved from EXITING -> REMOVED + // // so now let's gossip out this information directly since there will not be any other chance + // gossip() + // } + + // log the move of members from joining to up + upMembers foreach { member ⇒ log.info("Cluster Node [{}] - Leader is moving node [{}] from JOINING to UP", selfAddress, member.address) } // tell all removed members to remove and shut down themselves - removedMembers.map(_.address) foreach { address ⇒ + removedMembers foreach { member ⇒ + val address = member.address + log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED - and removing node from node ring", selfAddress, address) clusterCommandConnectionFor(address) ! ClusterLeaderAction.Remove(address) } // tell all exiting members to exit - exitingMembers.map(_.address) foreach { address ⇒ + exitingMembers foreach { member ⇒ + val address = member.address + log.info("Cluster Node [{}] - Leader is moving node [{}] from LEAVING to EXITING", selfAddress, address) clusterCommandConnectionFor(address) ! ClusterLeaderAction.Exit(address) // FIXME should use ? to await completion of handoff? } + // log the auto-downing of the unreachable nodes + unreachableButNotDownedMembers foreach { member ⇒ + log.info("Cluster Node [{}] - Leader is marking unreachable node [{}] as DOWN", selfAddress, member.address) + } + notifyMembershipChangeListeners(localState, newState) } } @@ -1273,13 +1307,13 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) private def isUnavailable(state: State): Boolean = { val localGossip = state.latestGossip val isUnreachable = localGossip.overview.unreachable exists { _.address == selfAddress } - val hasUnavailableMemberStatus = localGossip.members exists { _.status.isUnavailable } + val hasUnavailableMemberStatus = localGossip.members exists { m ⇒ (m == self) && m.status.isUnavailable } isUnreachable || hasUnavailableMemberStatus } private def notifyMembershipChangeListeners(oldState: State, newState: State): Unit = { - val oldMembersStatus = oldState.latestGossip.members.toSeq.map(m ⇒ (m.address, m.status)) - val newMembersStatus = newState.latestGossip.members.toSeq.map(m ⇒ (m.address, m.status)) + val oldMembersStatus = oldState.latestGossip.members.map(m ⇒ (m.address, m.status)) + val newMembersStatus = newState.latestGossip.members.map(m ⇒ (m.address, m.status)) if (newMembersStatus != oldMembersStatus) newState.memberMembershipChangeListeners foreach { _ notify newState.latestGossip.members } } From 5a3a02a5161af87358f6cc44c4773eaf46656991 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 18 Jun 2012 13:57:42 +0200 Subject: [PATCH 422/538] Unrandom to unb0rk Linux, because Linux has some issues with dev/random entropy --- .../test/scala/akka/remote/Ticket1978CommunicationSpec.scala | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 592529bed1..29860d1ec9 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -/*package akka.remote +package akka.remote import akka.testkit._ import akka.actor._ @@ -31,6 +31,7 @@ object Configuration { key-store = "%s" random-number-generator = "%s" supported-algorithms = [%s] + sha1prng-random-source = "/dev/./urandom" } } actor.deployment { @@ -172,4 +173,4 @@ abstract class Ticket1978CommunicationSpec(val cipherEnabledconfig: (String, Boo } -}*/ +} From da5862ab6f180077ca18c2691d7a7a1e3cbf30a5 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 18 Jun 2012 14:02:08 +0200 Subject: [PATCH 423/538] InitialiZe --- .../akka/remote/netty/NettySSLSupport.scala | 16 ++++++++-------- .../remote/Ticket1978CommunicationSpec.scala | 17 ++++++----------- .../UntypedCoordinatedIncrementTest.java | 2 +- .../akka/transactor/UntypedTransactorTest.java | 2 +- 4 files changed, 16 insertions(+), 21 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala index 7e006373c2..4574c65082 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala @@ -21,9 +21,9 @@ private[akka] object NettySSLSupport { * Construct a SSLHandler which can be inserted into a Netty server/client pipeline */ def apply(settings: NettySettings, log: LoggingAdapter, isClient: Boolean): SslHandler = - if (isClient) initialiseClientSSL(settings, log) else initialiseServerSSL(settings, log) + if (isClient) initializeClientSSL(settings, log) else initializeServerSSL(settings, log) - def initialiseCustomSecureRandom(rngName: Option[String], sourceOfRandomness: Option[String], log: LoggingAdapter): SecureRandom = { + def initializeCustomSecureRandom(rngName: Option[String], sourceOfRandomness: Option[String], log: LoggingAdapter): SecureRandom = { /** * According to this bug report: http://bugs.sun.com/view_bug.do?bug_id=6202721 * Using /dev/./urandom is only necessary when using SHA1PRNG on Linux @@ -53,7 +53,7 @@ private[akka] object NettySSLSupport { rng } - private def initialiseClientSSL(settings: NettySettings, log: LoggingAdapter): SslHandler = { + private def initializeClientSSL(settings: NettySettings, log: LoggingAdapter): SslHandler = { log.debug("Client SSL is enabled, initialising ...") ((settings.SSLTrustStore, settings.SSLTrustStorePassword, settings.SSLProtocol) match { case (Some(trustStore), Some(password), Some(protocol)) ⇒ constructClientContext(settings, log, trustStore, password, protocol) @@ -71,7 +71,7 @@ private[akka] object NettySSLSupport { new SslHandler(sslEngine) case None ⇒ throw new GeneralSecurityException( - """Failed to initialise client SSL because SSL context could not be found." + + """Failed to initialize client SSL because SSL context could not be found." + "Make sure your settings are correct: [trust-store: %s] [trust-store-password: %s] [protocol: %s]""".format( settings.SSLTrustStore, settings.SSLTrustStorePassword, @@ -86,7 +86,7 @@ private[akka] object NettySSLSupport { trustStore.load(new FileInputStream(trustStorePath), trustStorePassword.toCharArray) //FIXME does the FileInputStream need to be closed? trustManagerFactory.init(trustStore) val trustManagers: Array[TrustManager] = trustManagerFactory.getTrustManagers - Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(null, trustManagers, initialiseCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, log)); ctx } + Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(null, trustManagers, initializeCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, log)); ctx } } catch { case e: FileNotFoundException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because trust store could not be loaded", e) case e: IOException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because: " + e.getMessage, e) @@ -94,7 +94,7 @@ private[akka] object NettySSLSupport { } } - private def initialiseServerSSL(settings: NettySettings, log: LoggingAdapter): SslHandler = { + private def initializeServerSSL(settings: NettySettings, log: LoggingAdapter): SslHandler = { log.debug("Server SSL is enabled, initialising ...") ((settings.SSLKeyStore, settings.SSLKeyStorePassword, settings.SSLProtocol) match { @@ -109,7 +109,7 @@ private[akka] object NettySSLSupport { sslEngine.setEnabledCipherSuites(settings.SSLSupportedAlgorithms.toArray.map(_.toString)) new SslHandler(sslEngine) case None ⇒ throw new GeneralSecurityException( - """Failed to initialise server SSL because SSL context could not be found. + """Failed to initialize server SSL because SSL context could not be found. Make sure your settings are correct: [key-store: %s] [key-store-password: %s] [protocol: %s]""".format( settings.SSLKeyStore, settings.SSLKeyStorePassword, @@ -123,7 +123,7 @@ private[akka] object NettySSLSupport { val keyStore = KeyStore.getInstance(KeyStore.getDefaultType) keyStore.load(new FileInputStream(keyStorePath), keyStorePassword.toCharArray) //FIXME does the FileInputStream need to be closed? factory.init(keyStore, keyStorePassword.toCharArray) - Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(factory.getKeyManagers, null, initialiseCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, log)); ctx } + Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(factory.getKeyManagers, null, initializeCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, log)); ctx } } catch { case e: FileNotFoundException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because key store could not be loaded", e) case e: IOException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because: " + e.getMessage, e) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 29860d1ec9..8bc6adb7ac 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -43,7 +43,7 @@ object Configuration { """ def getCipherConfig(cipher: String, enabled: String*): (String, Boolean, Config) = if (try { - NettySSLSupport.initialiseCustomSecureRandom(Some(cipher), None, NoLogging) ne null + NettySSLSupport.initializeCustomSecureRandom(Some(cipher), None, NoLogging) ne null } catch { case _: IllegalArgumentException ⇒ false // Cannot match against the message since the message might be localized :S case _: java.security.NoSuchAlgorithmException ⇒ false @@ -80,16 +80,7 @@ abstract class Ticket1978CommunicationSpec(val cipherEnabledconfig: (String, Boo import RemoteCommunicationSpec._ - val conf = ConfigFactory.parseString("akka.remote.netty.port=12346").withFallback(system.settings.config) - val other = ActorSystem("remote-sys", conf) - - val remote = other.actorOf(Props(new Actor { - def receive = { - case "ping" ⇒ sender ! (("pong", sender)) - } - }), "echo") - - val here = system.actorFor("akka://remote-sys@localhost:12346/user/echo") + val other = ActorSystem("remote-sys", ConfigFactory.parseString("akka.remote.netty.port=12346").withFallback(system.settings.config)) override def atTermination() { other.shutdown() @@ -97,6 +88,10 @@ abstract class Ticket1978CommunicationSpec(val cipherEnabledconfig: (String, Boo "SSL Remoting" must { if (cipherEnabledconfig._2) { + val remote = other.actorOf(Props(new Actor { def receive = { case "ping" ⇒ sender ! (("pong", sender)) } }), "echo") + + val here = system.actorFor("akka://remote-sys@localhost:12346/user/echo") + "support remote look-ups" in { here ! "ping" expectMsgPF() { diff --git a/akka-transactor/src/test/java/akka/transactor/UntypedCoordinatedIncrementTest.java b/akka-transactor/src/test/java/akka/transactor/UntypedCoordinatedIncrementTest.java index 60a887f554..36c063feaa 100644 --- a/akka-transactor/src/test/java/akka/transactor/UntypedCoordinatedIncrementTest.java +++ b/akka-transactor/src/test/java/akka/transactor/UntypedCoordinatedIncrementTest.java @@ -57,7 +57,7 @@ public class UntypedCoordinatedIncrementTest { Timeout timeout = new Timeout(timeoutSeconds, TimeUnit.SECONDS); @Before - public void initialise() { + public void initialize() { counters = new ArrayList(); for (int i = 1; i <= numCounters; i++) { final String name = "counter" + i; diff --git a/akka-transactor/src/test/java/akka/transactor/UntypedTransactorTest.java b/akka-transactor/src/test/java/akka/transactor/UntypedTransactorTest.java index cadc4828b1..b7dc99389a 100644 --- a/akka-transactor/src/test/java/akka/transactor/UntypedTransactorTest.java +++ b/akka-transactor/src/test/java/akka/transactor/UntypedTransactorTest.java @@ -58,7 +58,7 @@ public class UntypedTransactorTest { Timeout timeout = new Timeout(timeoutSeconds, TimeUnit.SECONDS); @Before - public void initialise() { + public void initialize() { counters = new ArrayList(); for (int i = 1; i <= numCounters; i++) { final String name = "counter" + i; From 67a52ea5e117bd7d60c81365dc181f0d8f4831da Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 18 Jun 2012 14:10:57 +0200 Subject: [PATCH 424/538] Shorter gossip interval, until we optimize join, see #2239 --- .../src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index 6f3ddfc866..ef420ab302 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -22,6 +22,8 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { commonConfig(ConfigFactory.parseString(""" akka.cluster { nr-of-deputy-nodes = 0 + # FIXME remove this (use default) when ticket #2239 has been fixed + gossip-interval = 400 ms } akka.loglevel = INFO """)) From c59f058640a9681028d593623d49acec7f23c194 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 18 Jun 2012 14:29:50 +0200 Subject: [PATCH 425/538] Removing dead code as detected by The Doctor --- .../src/main/scala/akka/zeromq/ConcurrentSocketActor.scala | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala index fa1da6e4ba..2e23d1da79 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala @@ -144,9 +144,7 @@ private[zeromq] class ConcurrentSocketActor(params: Seq[SocketOption]) extends A override def preRestart(reason: Throwable, message: Option[Any]): Unit = context.children foreach context.stop //Do not call postStop - override def postRestart(reason: Throwable): Unit = { - if (pendingSends.nonEmpty) self ! Flush // If we're restarting we might want to resume sending the messages - } + override def postRestart(reason: Throwable): Unit = () // Do nothing override def postStop: Unit = try { if (socket != null) { From 914a83b291da6b3018cf1085c4f9a3f60ec511fa Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 18 Jun 2012 14:31:57 +0200 Subject: [PATCH 426/538] Fixing bug in flushMessage --- .../src/main/scala/akka/zeromq/ConcurrentSocketActor.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala index 2e23d1da79..71b7b185f0 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala @@ -155,7 +155,7 @@ private[zeromq] class ConcurrentSocketActor(params: Seq[SocketOption]) extends A @tailrec private def flushMessage(i: Seq[Frame]): Boolean = if (i.isEmpty) - false + true else { val head = i.head val tail = i.tail From ceb7d1515abf39699c8f703f037d780ae777005c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 18 Jun 2012 14:55:49 +0200 Subject: [PATCH 427/538] Switching strategy for SSL detection to try to initialize a Client SslHandler --- .../akka/remote/netty/NettySSLSupport.scala | 61 +++++++++---------- .../remote/Ticket1978CommunicationSpec.scala | 17 +++--- 2 files changed, 40 insertions(+), 38 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala index 4574c65082..9440c09c95 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala @@ -53,8 +53,23 @@ private[akka] object NettySSLSupport { rng } - private def initializeClientSSL(settings: NettySettings, log: LoggingAdapter): SslHandler = { + def initializeClientSSL(settings: NettySettings, log: LoggingAdapter): SslHandler = { log.debug("Client SSL is enabled, initialising ...") + + def constructClientContext(settings: NettySettings, log: LoggingAdapter, trustStorePath: String, trustStorePassword: String, protocol: String): Option[SSLContext] = + try { + val trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm) + val trustStore = KeyStore.getInstance(KeyStore.getDefaultType) + trustStore.load(new FileInputStream(trustStorePath), trustStorePassword.toCharArray) //FIXME does the FileInputStream need to be closed? + trustManagerFactory.init(trustStore) + val trustManagers: Array[TrustManager] = trustManagerFactory.getTrustManagers + Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(null, trustManagers, initializeCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, log)); ctx } + } catch { + case e: FileNotFoundException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because trust store could not be loaded", e) + case e: IOException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because: " + e.getMessage, e) + case e: GeneralSecurityException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because SSL context could not be constructed", e) + } + ((settings.SSLTrustStore, settings.SSLTrustStorePassword, settings.SSLProtocol) match { case (Some(trustStore), Some(password), Some(protocol)) ⇒ constructClientContext(settings, log, trustStore, password, protocol) case (trustStore, password, protocol) ⇒ throw new GeneralSecurityException( @@ -79,24 +94,22 @@ private[akka] object NettySSLSupport { } } - private def constructClientContext(settings: NettySettings, log: LoggingAdapter, trustStorePath: String, trustStorePassword: String, protocol: String): Option[SSLContext] = { - try { - val trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm) - val trustStore = KeyStore.getInstance(KeyStore.getDefaultType) - trustStore.load(new FileInputStream(trustStorePath), trustStorePassword.toCharArray) //FIXME does the FileInputStream need to be closed? - trustManagerFactory.init(trustStore) - val trustManagers: Array[TrustManager] = trustManagerFactory.getTrustManagers - Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(null, trustManagers, initializeCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, log)); ctx } - } catch { - case e: FileNotFoundException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because trust store could not be loaded", e) - case e: IOException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because: " + e.getMessage, e) - case e: GeneralSecurityException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because SSL context could not be constructed", e) - } - } - - private def initializeServerSSL(settings: NettySettings, log: LoggingAdapter): SslHandler = { + def initializeServerSSL(settings: NettySettings, log: LoggingAdapter): SslHandler = { log.debug("Server SSL is enabled, initialising ...") + def constructServerContext(settings: NettySettings, log: LoggingAdapter, keyStorePath: String, keyStorePassword: String, protocol: String): Option[SSLContext] = + try { + val factory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm) + val keyStore = KeyStore.getInstance(KeyStore.getDefaultType) + keyStore.load(new FileInputStream(keyStorePath), keyStorePassword.toCharArray) //FIXME does the FileInputStream need to be closed? + factory.init(keyStore, keyStorePassword.toCharArray) + Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(factory.getKeyManagers, null, initializeCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, log)); ctx } + } catch { + case e: FileNotFoundException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because key store could not be loaded", e) + case e: IOException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because: " + e.getMessage, e) + case e: GeneralSecurityException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because SSL context could not be constructed", e) + } + ((settings.SSLKeyStore, settings.SSLKeyStorePassword, settings.SSLProtocol) match { case (Some(keyStore), Some(password), Some(protocol)) ⇒ constructServerContext(settings, log, keyStore, password, protocol) case (keyStore, password, protocol) ⇒ throw new GeneralSecurityException( @@ -116,18 +129,4 @@ private[akka] object NettySSLSupport { settings.SSLProtocol)) } } - - private def constructServerContext(settings: NettySettings, log: LoggingAdapter, keyStorePath: String, keyStorePassword: String, protocol: String): Option[SSLContext] = { - try { - val factory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm) - val keyStore = KeyStore.getInstance(KeyStore.getDefaultType) - keyStore.load(new FileInputStream(keyStorePath), keyStorePassword.toCharArray) //FIXME does the FileInputStream need to be closed? - factory.init(keyStore, keyStorePassword.toCharArray) - Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(factory.getKeyManagers, null, initializeCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, log)); ctx } - } catch { - case e: FileNotFoundException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because key store could not be loaded", e) - case e: IOException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because: " + e.getMessage, e) - case e: GeneralSecurityException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because SSL context could not be constructed", e) - } - } } diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 8bc6adb7ac..82d9412120 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -9,9 +9,9 @@ import com.typesafe.config._ import akka.dispatch.{ Await, Future } import akka.pattern.ask import java.io.File -import java.security.{ SecureRandom, PrivilegedAction, AccessController } -import netty.NettySSLSupport import akka.event.{ NoLogging, LoggingAdapter } +import java.security.{ NoSuchAlgorithmException, SecureRandom, PrivilegedAction, AccessController } +import netty.{ NettySettings, NettySSLSupport } object Configuration { // set this in your JAVA_OPTS to see all ssl debug info: "-Djavax.net.debug=ssl,keymanager" @@ -42,12 +42,15 @@ object Configuration { } """ - def getCipherConfig(cipher: String, enabled: String*): (String, Boolean, Config) = if (try { - NettySSLSupport.initializeCustomSecureRandom(Some(cipher), None, NoLogging) ne null + def getCipherConfig(cipher: String, enabled: String*): (String, Boolean, Config) = try { + //NettySSLSupport.initializeCustomSecureRandom(Some(cipher), None, NoLogging) ne null + val config = ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher, enabled.mkString(", "))) + val settings = new NettySettings(config.withFallback(AkkaSpec.testConf).withFallback(ConfigFactory.load).getConfig("akka.remote.netty"), "pigdog") + (NettySSLSupport.initializeClientSSL(settings, NoLogging) ne null) || (throw new NoSuchAlgorithmException(cipher)) + (cipher, true, config) } catch { - case _: IllegalArgumentException ⇒ false // Cannot match against the message since the message might be localized :S - case _: java.security.NoSuchAlgorithmException ⇒ false - }) (cipher, true, ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher, enabled.mkString(", ")))) else (cipher, false, AkkaSpec.testConf) + case (_: IllegalArgumentException) | (_: NoSuchAlgorithmException) ⇒ (cipher, false, AkkaSpec.testConf) // Cannot match against the message since the message might be localized :S + } } import Configuration.getCipherConfig From f63409e0e7e63e3758aed3df041e57cd965a048b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 18 Jun 2012 15:05:24 +0200 Subject: [PATCH 428/538] Removing commented out code in the Ticket1978CommunicationSpec --- .../src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala | 1 - 1 file changed, 1 deletion(-) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 82d9412120..75b437afce 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -43,7 +43,6 @@ object Configuration { """ def getCipherConfig(cipher: String, enabled: String*): (String, Boolean, Config) = try { - //NettySSLSupport.initializeCustomSecureRandom(Some(cipher), None, NoLogging) ne null val config = ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher, enabled.mkString(", "))) val settings = new NettySettings(config.withFallback(AkkaSpec.testConf).withFallback(ConfigFactory.load).getConfig("akka.remote.netty"), "pigdog") (NettySSLSupport.initializeClientSSL(settings, NoLogging) ne null) || (throw new NoSuchAlgorithmException(cipher)) From 49586bd01dbcae3e6f0b7bba9f8fba841194177b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 18 Jun 2012 15:25:17 +0200 Subject: [PATCH 429/538] Change Member ordering so it sorts members by host and port with the exception that it puts all members that are in MemberStatus.EXITING last. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To fix LEADER leaving and allow handoff to new leader before moving old leader from EXITING -> REMOVED. Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/cluster/Cluster.scala | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 528672363d..cc91680b4a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -105,7 +105,7 @@ class Member(val address: Address, val status: MemberStatus) extends ClusterMess } /** - * Factory and Utility module for Member instances. + * Module with factory and ordering methods for Member instances. */ object Member { @@ -119,10 +119,13 @@ object Member { } /** - * `Member` ordering type class, sorts members by `Address`. + * `Member` ordering type class, sorts members by host and port with the exception that + * it puts all members that are in MemberStatus.EXITING last. */ - implicit val ordering: Ordering[Member] = new Ordering[Member] { - def compare(x: Member, y: Member) = addressOrdering.compare(x.address, y.address) + implicit val ordering: Ordering[Member] = Ordering.fromLessThan[Member] { (a, b) ⇒ + if (a.status == MemberStatus.Exiting && b.status != MemberStatus.Exiting) false + else if (a.status != MemberStatus.Exiting && b.status == MemberStatus.Exiting) true + else addressOrdering.compare(a.address, b.address) < 0 } def apply(address: Address, status: MemberStatus): Member = new Member(address, status) @@ -301,8 +304,7 @@ case class Gossip( // 4. merge members by selecting the single Member with highest MemberStatus out of the Member groups, // and exclude unreachable - val mergedMembers = Gossip.emptyMembers ++ Member.pickHighestPriority(this.members, that.members). - filterNot(mergedUnreachable.contains) + val mergedMembers = Gossip.emptyMembers ++ Member.pickHighestPriority(this.members, that.members).filterNot(mergedUnreachable.contains) // 5. fresh seen table val mergedSeen = Map.empty[Address, VectorClock] @@ -1109,10 +1111,10 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val ( newGossip: Gossip, hasChangedState: Boolean, - upMembers: Set[Member], - exitingMembers: Set[Member], - removedMembers: Set[Member], - unreachableButNotDownedMembers: Set[Member]) = + upMembers, + exitingMembers, + removedMembers, + unreachableButNotDownedMembers) = if (convergence(localGossip).isDefined) { // we have convergence - so we can't have unreachable nodes From 6b02c48be9904e15537ed2a02fc77eef98bd070e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 18 Jun 2012 15:25:46 +0200 Subject: [PATCH 430/538] Added spec testing the Ordering[Address] and Ordering[Member]. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../akka/cluster/MemberOrderingSpec.scala | 108 ++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala diff --git a/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala new file mode 100644 index 0000000000..7528750a22 --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala @@ -0,0 +1,108 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import akka.actor.{ Address, AddressFromURIString } +import akka.testkit.AkkaSpec +import java.net.InetSocketAddress +import scala.collection.immutable.SortedSet + +class MemberOrderingSpec extends AkkaSpec { + import Member.ordering + import Member.addressOrdering + import MemberStatus._ + + "An Ordering[Member]" must { + + "order non-exiting members by host:port" in { + val members = SortedSet.empty[Member] + + Member(AddressFromURIString("akka://sys@darkstar:1112"), Up) + + Member(AddressFromURIString("akka://sys@darkstar:1113"), Joining) + + Member(AddressFromURIString("akka://sys@darkstar:1111"), Up) + + val seq = members.toSeq + seq.size must equal(3) + seq(0) must equal(Member(AddressFromURIString("akka://sys@darkstar:1111"), Up)) + seq(1) must equal(Member(AddressFromURIString("akka://sys@darkstar:1112"), Up)) + seq(2) must equal(Member(AddressFromURIString("akka://sys@darkstar:1113"), Joining)) + } + + "order exiting members by last" in { + val members = SortedSet.empty[Member] + + Member(AddressFromURIString("akka://sys@darkstar:1112"), Exiting) + + Member(AddressFromURIString("akka://sys@darkstar:1113"), Up) + + Member(AddressFromURIString("akka://sys@darkstar:1111"), Joining) + + val seq = members.toSeq + seq.size must equal(3) + seq(0) must equal(Member(AddressFromURIString("akka://sys@darkstar:1111"), Joining)) + seq(1) must equal(Member(AddressFromURIString("akka://sys@darkstar:1113"), Up)) + seq(2) must equal(Member(AddressFromURIString("akka://sys@darkstar:1112"), Exiting)) + } + + "order multiple exiting members by last but internally by host:port" in { + val members = SortedSet.empty[Member] + + Member(AddressFromURIString("akka://sys@darkstar:1112"), Exiting) + + Member(AddressFromURIString("akka://sys@darkstar:1113"), Leaving) + + Member(AddressFromURIString("akka://sys@darkstar:1111"), Up) + + Member(AddressFromURIString("akka://sys@darkstar:1110"), Exiting) + + val seq = members.toSeq + seq.size must equal(4) + seq(0) must equal(Member(AddressFromURIString("akka://sys@darkstar:1111"), Up)) + seq(1) must equal(Member(AddressFromURIString("akka://sys@darkstar:1113"), Leaving)) + seq(2) must equal(Member(AddressFromURIString("akka://sys@darkstar:1110"), Exiting)) + seq(3) must equal(Member(AddressFromURIString("akka://sys@darkstar:1112"), Exiting)) + } + } + + "An Ordering[Address]" must { + + "order addresses by port" in { + val addresses = SortedSet.empty[Address] + + AddressFromURIString("akka://sys@darkstar:1112") + + AddressFromURIString("akka://sys@darkstar:1113") + + AddressFromURIString("akka://sys@darkstar:1110") + + AddressFromURIString("akka://sys@darkstar:1111") + + val seq = addresses.toSeq + seq.size must equal(4) + seq(0) must equal(AddressFromURIString("akka://sys@darkstar:1110")) + seq(1) must equal(AddressFromURIString("akka://sys@darkstar:1111")) + seq(2) must equal(AddressFromURIString("akka://sys@darkstar:1112")) + seq(3) must equal(AddressFromURIString("akka://sys@darkstar:1113")) + } + + "order addresses by hostname" in { + val addresses = SortedSet.empty[Address] + + AddressFromURIString("akka://sys@darkstar2:1110") + + AddressFromURIString("akka://sys@darkstar1:1110") + + AddressFromURIString("akka://sys@darkstar3:1110") + + AddressFromURIString("akka://sys@darkstar0:1110") + + val seq = addresses.toSeq + seq.size must equal(4) + seq(0) must equal(AddressFromURIString("akka://sys@darkstar0:1110")) + seq(1) must equal(AddressFromURIString("akka://sys@darkstar1:1110")) + seq(2) must equal(AddressFromURIString("akka://sys@darkstar2:1110")) + seq(3) must equal(AddressFromURIString("akka://sys@darkstar3:1110")) + } + + "order addresses by hostname and port" in { + val addresses = SortedSet.empty[Address] + + AddressFromURIString("akka://sys@darkstar2:1110") + + AddressFromURIString("akka://sys@darkstar0:1111") + + AddressFromURIString("akka://sys@darkstar2:1111") + + AddressFromURIString("akka://sys@darkstar0:1110") + + val seq = addresses.toSeq + seq.size must equal(4) + seq(0) must equal(AddressFromURIString("akka://sys@darkstar0:1110")) + seq(1) must equal(AddressFromURIString("akka://sys@darkstar0:1111")) + seq(2) must equal(AddressFromURIString("akka://sys@darkstar2:1110")) + seq(3) must equal(AddressFromURIString("akka://sys@darkstar2:1111")) + } + } +} From 6e60d51263070fcf2897e79943289db47a1b14c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 18 Jun 2012 15:26:23 +0200 Subject: [PATCH 431/538] Reenabled LeaderLeavingSpec and added successful leader-handoff assertion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../scala/akka/cluster/LeaderLeavingSpec.scala | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala index f3274583b5..37312a7351 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala @@ -21,9 +21,8 @@ object LeaderLeavingMultiJvmSpec extends MultiNodeConfig { akka.cluster { leader-actions-interval = 5 s # increase the leader action task frequency to make sure we get a chance to test the LEAVING state unreachable-nodes-reaper-interval = 30 s - } - """) - .withFallback(MultiNodeClusterSpec.clusterConfig))) + }""") + .withFallback(MultiNodeClusterSpec.clusterConfig))) } class LeaderLeavingMultiJvmNode1 extends LeaderLeavingSpec with FailureDetectorPuppetStrategy @@ -42,7 +41,7 @@ abstract class LeaderLeavingSpec "A LEADER that is LEAVING" must { - "be moved to LEAVING, then to EXITING, then to REMOVED, then be shut down and then a new LEADER should be elected" taggedAs LongRunningTest ignore { + "be moved to LEAVING, then to EXITING, then to REMOVED, then be shut down and then a new LEADER should be elected" taggedAs LongRunningTest in { awaitClusterUp(first, second, third) @@ -53,8 +52,11 @@ abstract class LeaderLeavingSpec cluster.leave(oldLeaderAddress) testConductor.enter("leader-left") + // verify that a NEW LEADER have taken over + awaitCond(!cluster.isLeader) + // verify that the LEADER is shut down - awaitCond(!cluster.isRunning) + awaitCond(!cluster.isRunning, 30.seconds.dilated) // verify that the LEADER is REMOVED awaitCond(cluster.status == MemberStatus.Removed) From bf32ca0c37ad82518707db1cf46cabdc289d3451 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 18 Jun 2012 17:18:17 +0200 Subject: [PATCH 432/538] Minor code restructuring for the SSL tests --- .../scala/akka/remote/Ticket1978CommunicationSpec.scala | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 75b437afce..0524aed6aa 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -24,7 +24,6 @@ object Configuration { actor.provider = "akka.remote.RemoteActorRefProvider" remote.netty { hostname = localhost - port = 12345 ssl { enable = on trust-store = "%s" @@ -43,9 +42,12 @@ object Configuration { """ def getCipherConfig(cipher: String, enabled: String*): (String, Boolean, Config) = try { - val config = ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher, enabled.mkString(", "))) + + val config = ConfigFactory.parseString("akka.remote.netty.port=12345").withFallback(ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher, enabled.mkString(", ")))) val settings = new NettySettings(config.withFallback(AkkaSpec.testConf).withFallback(ConfigFactory.load).getConfig("akka.remote.netty"), "pigdog") - (NettySSLSupport.initializeClientSSL(settings, NoLogging) ne null) || (throw new NoSuchAlgorithmException(cipher)) + val rng = NettySSLSupport.initializeCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, NoLogging) + rng.nextInt() // Take it for a spin + rng.getAlgorithm == cipher || (throw new NoSuchAlgorithmException(cipher)) (cipher, true, config) } catch { case (_: IllegalArgumentException) | (_: NoSuchAlgorithmException) ⇒ (cipher, false, AkkaSpec.testConf) // Cannot match against the message since the message might be localized :S From a2c15f83217b94df2a0ee46e7aaf18dbcb877be3 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 18 Jun 2012 17:57:09 +0200 Subject: [PATCH 433/538] Trying to embed smarted detection for the SSL tests, is this a keeper? --- .../remote/Ticket1978CommunicationSpec.scala | 26 +++++++++++++++---- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 0524aed6aa..b634737767 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -12,6 +12,9 @@ import java.io.File import akka.event.{ NoLogging, LoggingAdapter } import java.security.{ NoSuchAlgorithmException, SecureRandom, PrivilegedAction, AccessController } import netty.{ NettySettings, NettySSLSupport } +import javax.net.ssl.SSLException +import akka.util.{ Timeout, Duration } +import akka.util.duration._ object Configuration { // set this in your JAVA_OPTS to see all ssl debug info: "-Djavax.net.debug=ssl,keymanager" @@ -44,13 +47,24 @@ object Configuration { def getCipherConfig(cipher: String, enabled: String*): (String, Boolean, Config) = try { val config = ConfigFactory.parseString("akka.remote.netty.port=12345").withFallback(ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher, enabled.mkString(", ")))) - val settings = new NettySettings(config.withFallback(AkkaSpec.testConf).withFallback(ConfigFactory.load).getConfig("akka.remote.netty"), "pigdog") + val fullConfig = config.withFallback(AkkaSpec.testConf).withFallback(ConfigFactory.load).getConfig("akka.remote.netty") + val settings = new NettySettings(fullConfig, "placeholder") + val rng = NettySSLSupport.initializeCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, NoLogging) - rng.nextInt() // Take it for a spin - rng.getAlgorithm == cipher || (throw new NoSuchAlgorithmException(cipher)) + + rng.nextInt() // Has to work + settings.SSLRandomNumberGenerator foreach { sRng ⇒ rng.getAlgorithm == sRng || (throw new NoSuchAlgorithmException(sRng)) } + + val engine = NettySSLSupport.initializeServerSSL(settings, NoLogging).getEngine + val gotAllSupported = enabled.toSet -- engine.getSupportedCipherSuites.toSet + val gotAllEnabled = enabled.toSet -- engine.getEnabledCipherSuites.toSet + gotAllSupported.isEmpty || (throw new IllegalArgumentException("Cipher Suite not supported: " + gotAllSupported)) + gotAllEnabled.isEmpty || (throw new IllegalArgumentException("Cipher Suite not enabled: " + gotAllEnabled)) + engine.getSupportedProtocols.contains(settings.SSLProtocol.get) || (throw new IllegalArgumentException(settings.SSLProtocol.get)) + (cipher, true, config) } catch { - case (_: IllegalArgumentException) | (_: NoSuchAlgorithmException) ⇒ (cipher, false, AkkaSpec.testConf) // Cannot match against the message since the message might be localized :S + case (_: IllegalArgumentException) | (_: NoSuchAlgorithmException) | (_: SSLException) ⇒ (cipher, false, AkkaSpec.testConf) // Cannot match against the message since the message might be localized :S } } @@ -80,7 +94,9 @@ class Ticket1978DefaultRNGSecureSpec extends Ticket1978CommunicationSpec(getCiph @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class Ticket1978NonExistingRNGSecureSpec extends Ticket1978CommunicationSpec(("NonExistingRNG", false, AkkaSpec.testConf)) -abstract class Ticket1978CommunicationSpec(val cipherEnabledconfig: (String, Boolean, Config)) extends AkkaSpec(cipherEnabledconfig._3) with ImplicitSender with DefaultTimeout { +abstract class Ticket1978CommunicationSpec(val cipherEnabledconfig: (String, Boolean, Config)) extends AkkaSpec(cipherEnabledconfig._3) with ImplicitSender { + + implicit val timeout: Timeout = Timeout(30 seconds) import RemoteCommunicationSpec._ From db53285272a39e25dd997e348b7c04169e15091f Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 18 Jun 2012 18:21:12 +0200 Subject: [PATCH 434/538] #2245 - Adding nullchecks in EventStream --- .../src/test/scala/akka/event/EventStreamSpec.scala | 11 +++++++++++ .../src/main/scala/akka/event/EventStream.scala | 3 +++ 2 files changed, 14 insertions(+) diff --git a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala index a8cd32f5d3..d1846860f3 100644 --- a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala @@ -74,6 +74,17 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) { } } + "not allow null as subscriber" in { + val bus = new EventStream(true) + intercept[IllegalArgumentException] { bus.subscribe(null, classOf[M]) }.getMessage must be("subscriber is null") + } + + "not allow null as unsubscriber" in { + val bus = new EventStream(true) + intercept[IllegalArgumentException] { bus.unsubscribe(null, classOf[M]) }.getMessage must be("subscriber is null") + intercept[IllegalArgumentException] { bus.unsubscribe(null) }.getMessage must be("subscriber is null") + } + "be able to log unhandled messages" in { val sys = ActorSystem("EventStreamSpecUnhandled", configUnhandled) try { diff --git a/akka-actor/src/main/scala/akka/event/EventStream.scala b/akka-actor/src/main/scala/akka/event/EventStream.scala index 172cf052ca..2cc9bf8c2b 100644 --- a/akka-actor/src/main/scala/akka/event/EventStream.scala +++ b/akka-actor/src/main/scala/akka/event/EventStream.scala @@ -39,17 +39,20 @@ class EventStream(private val debug: Boolean = false) extends LoggingBus with Su } override def subscribe(subscriber: ActorRef, channel: Class[_]): Boolean = { + if (subscriber eq null) throw new IllegalArgumentException("subscriber is null") if (debug) publish(Logging.Debug(simpleName(this), this.getClass, "subscribing " + subscriber + " to channel " + channel)) super.subscribe(subscriber, channel) } override def unsubscribe(subscriber: ActorRef, channel: Class[_]): Boolean = { + if (subscriber eq null) throw new IllegalArgumentException("subscriber is null") val ret = super.unsubscribe(subscriber, channel) if (debug) publish(Logging.Debug(simpleName(this), this.getClass, "unsubscribing " + subscriber + " from channel " + channel)) ret } override def unsubscribe(subscriber: ActorRef) { + if (subscriber eq null) throw new IllegalArgumentException("subscriber is null") super.unsubscribe(subscriber) if (debug) publish(Logging.Debug(simpleName(this), this.getClass, "unsubscribing " + subscriber + " from all channels")) } From 8eca3692c859de91022e6dec7f891ff5525f16c7 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 18 Jun 2012 18:47:35 +0200 Subject: [PATCH 435/538] Switching to 30s timeout for the SSL tests + loading from classloader rather than file path --- .../remote/Ticket1978CommunicationSpec.scala | 93 ++++++++++--------- 1 file changed, 50 insertions(+), 43 deletions(-) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index b634737767..505ce180cf 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -19,9 +19,8 @@ import akka.util.duration._ object Configuration { // set this in your JAVA_OPTS to see all ssl debug info: "-Djavax.net.debug=ssl,keymanager" // The certificate will expire in 2109 - private val trustStore = getPath("truststore") - private val keyStore = getPath("keystore") - private def getPath(name: String): String = (new File("akka-remote/src/test/resources/" + name)).getAbsolutePath.replace("\\", "\\\\") + private val trustStore = getClass.getClassLoader.getResource("truststore").getPath + private val keyStore = getClass.getClassLoader.getResource("keystore").getPath private val conf = """ akka { actor.provider = "akka.remote.RemoteActorRefProvider" @@ -114,15 +113,17 @@ abstract class Ticket1978CommunicationSpec(val cipherEnabledconfig: (String, Boo "support remote look-ups" in { here ! "ping" - expectMsgPF() { + expectMsgPF(timeout.duration) { case ("pong", s: AnyRef) if s eq testActor ⇒ true } } "send error message for wrong address" in { - EventFilter.error(start = "dropping", occurrences = 1).intercept { - system.actorFor("akka://remotesys@localhost:12346/user/echo") ! "ping" - }(other) + within(timeout.duration) { + EventFilter.error(start = "dropping", occurrences = 1).intercept { + system.actorFor("akka://remotesys@localhost:12346/user/echo") ! "ping" + }(other) + } } "support ask" in { @@ -133,52 +134,58 @@ abstract class Ticket1978CommunicationSpec(val cipherEnabledconfig: (String, Boo } "send dead letters on remote if actor does not exist" in { - EventFilter.warning(pattern = "dead.*buh", occurrences = 1).intercept { - system.actorFor("akka://remote-sys@localhost:12346/does/not/exist") ! "buh" - }(other) + within(timeout.duration) { + EventFilter.warning(pattern = "dead.*buh", occurrences = 1).intercept { + system.actorFor("akka://remote-sys@localhost:12346/does/not/exist") ! "buh" + }(other) + } } "create and supervise children on remote node" in { - val r = system.actorOf(Props[Echo], "blub") - r.path.toString must be === "akka://remote-sys@localhost:12346/remote/Ticket1978CommunicationSpec@localhost:12345/user/blub" - r ! 42 - expectMsg(42) - EventFilter[Exception]("crash", occurrences = 1).intercept { - r ! new Exception("crash") - }(other) - expectMsg("preRestart") - r ! 42 - expectMsg(42) - system.stop(r) - expectMsg("postStop") + within(timeout.duration) { + val r = system.actorOf(Props[Echo], "blub") + r.path.toString must be === "akka://remote-sys@localhost:12346/remote/Ticket1978CommunicationSpec@localhost:12345/user/blub" + r ! 42 + expectMsg(42) + EventFilter[Exception]("crash", occurrences = 1).intercept { + r ! new Exception("crash") + }(other) + expectMsg("preRestart") + r ! 42 + expectMsg(42) + system.stop(r) + expectMsg("postStop") + } } "look-up actors across node boundaries" in { - val l = system.actorOf(Props(new Actor { - def receive = { - case (p: Props, n: String) ⇒ sender ! context.actorOf(p, n) - case s: String ⇒ sender ! context.actorFor(s) - } - }), "looker") - l ! (Props[Echo], "child") - val r = expectMsgType[ActorRef] - r ! (Props[Echo], "grandchild") - val remref = expectMsgType[ActorRef] - remref.isInstanceOf[LocalActorRef] must be(true) - val myref = system.actorFor(system / "looker" / "child" / "grandchild") - myref.isInstanceOf[RemoteActorRef] must be(true) - myref ! 43 - expectMsg(43) - lastSender must be theSameInstanceAs remref - r.asInstanceOf[RemoteActorRef].getParent must be(l) - system.actorFor("/user/looker/child") must be theSameInstanceAs r - Await.result(l ? "child/..", timeout.duration).asInstanceOf[AnyRef] must be theSameInstanceAs l - Await.result(system.actorFor(system / "looker" / "child") ? "..", timeout.duration).asInstanceOf[AnyRef] must be theSameInstanceAs l + within(timeout.duration) { + val l = system.actorOf(Props(new Actor { + def receive = { + case (p: Props, n: String) ⇒ sender ! context.actorOf(p, n) + case s: String ⇒ sender ! context.actorFor(s) + } + }), "looker") + l ! (Props[Echo], "child") + val r = expectMsgType[ActorRef] + r ! (Props[Echo], "grandchild") + val remref = expectMsgType[ActorRef] + remref.isInstanceOf[LocalActorRef] must be(true) + val myref = system.actorFor(system / "looker" / "child" / "grandchild") + myref.isInstanceOf[RemoteActorRef] must be(true) + myref ! 43 + expectMsg(43) + lastSender must be theSameInstanceAs remref + r.asInstanceOf[RemoteActorRef].getParent must be(l) + system.actorFor("/user/looker/child") must be theSameInstanceAs r + Await.result(l ? "child/..", timeout.duration).asInstanceOf[AnyRef] must be theSameInstanceAs l + Await.result(system.actorFor(system / "looker" / "child") ? "..", timeout.duration).asInstanceOf[AnyRef] must be theSameInstanceAs l + } } "not fail ask across node boundaries" in { val f = for (_ ← 1 to 1000) yield here ? "ping" mapTo manifest[(String, ActorRef)] - Await.result(Future.sequence(f), remaining).map(_._1).toSet must be(Set("pong")) + Await.result(Future.sequence(f), timeout.duration).map(_._1).toSet must be(Set("pong")) } } else { "not be run when the cipher is not supported by the platform this test is currently being executed on" ignore { From 4f0fe8723bd2d8c0b72a88cbc7dffa2f0f954dc7 Mon Sep 17 00:00:00 2001 From: Gert Vanthienen Date: Mon, 18 Jun 2012 19:00:36 +0200 Subject: [PATCH 436/538] Using setRuntimeClass() avoids the need for the user bundle to import akka.osgi.blueprint --- .../main/scala/akka/osgi/blueprint/aries/NamespaceHandler.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/NamespaceHandler.scala b/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/NamespaceHandler.scala index 245ea538b6..b20e959f23 100644 --- a/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/NamespaceHandler.scala +++ b/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/NamespaceHandler.scala @@ -80,7 +80,7 @@ class NamespaceHandler extends org.apache.aries.blueprint.NamespaceHandler { factory.setId(findAvailableId(context)) factory.setScope(BeanMetadata.SCOPE_SINGLETON) factory.setProcessor(true) - factory.setClassName(classOf[BlueprintActorSystemFactory].getName) + factory.setRuntimeClass(classOf[BlueprintActorSystemFactory]) factory.setDestroyMethod(DESTROY_METHOD_NAME) From a9f258b23e54eb6b78056c81bdf0402e2f5a7fd6 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 18 Jun 2012 19:31:36 +0200 Subject: [PATCH 437/538] Trying to work around the fact that intercept doesn't respect remaining but invents its own default timeout --- .../scala/akka/remote/Ticket1978CommunicationSpec.scala | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 505ce180cf..eb7c249b9c 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -24,6 +24,12 @@ object Configuration { private val conf = """ akka { actor.provider = "akka.remote.RemoteActorRefProvider" + test { + single-expect-default = 30s + filter-leeway = 30s + default-timeout = 30s + } + remote.netty { hostname = localhost ssl { From 16530fd88a8ead2b77b3c03e25ffb10bd281732d Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 18 Jun 2012 19:58:23 +0200 Subject: [PATCH 438/538] Deactivating the SSL tests for now --- .../test/scala/akka/remote/Ticket1978CommunicationSpec.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index eb7c249b9c..e51b3afa01 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -51,6 +51,8 @@ object Configuration { def getCipherConfig(cipher: String, enabled: String*): (String, Boolean, Config) = try { + if (true) throw new IllegalArgumentException("This is not working properly yet") + val config = ConfigFactory.parseString("akka.remote.netty.port=12345").withFallback(ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher, enabled.mkString(", ")))) val fullConfig = config.withFallback(AkkaSpec.testConf).withFallback(ConfigFactory.load).getConfig("akka.remote.netty") val settings = new NettySettings(fullConfig, "placeholder") From e730432cb07f060a9e7b11d0ce827faae5fdb8da Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 18 Jun 2012 20:07:58 +0200 Subject: [PATCH 439/538] Switching to 256bit for the Ticket1978AES128CounterRNGSecureSpec --- .../akka/remote/Ticket1978CommunicationSpec.scala | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index e51b3afa01..4ebf37e66c 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -25,9 +25,9 @@ object Configuration { akka { actor.provider = "akka.remote.RemoteActorRefProvider" test { - single-expect-default = 30s - filter-leeway = 30s - default-timeout = 30s + single-expect-default = 10s + filter-leeway = 10s + default-timeout = 10s } remote.netty { @@ -87,7 +87,7 @@ class Ticket1978AES128CounterRNGFastSpec extends Ticket1978CommunicationSpec(get * Both of the Secure variants require access to the Internet to access random.org. */ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES128CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGSecure", "TLS_RSA_WITH_AES_128_CBC_SHA")) +class Ticket1978AES128CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGSecure", "TLS_RSA_WITH_AES_256_CBC_SHA")) /** * Both of the Secure variants require access to the Internet to access random.org. @@ -103,7 +103,7 @@ class Ticket1978NonExistingRNGSecureSpec extends Ticket1978CommunicationSpec(("N abstract class Ticket1978CommunicationSpec(val cipherEnabledconfig: (String, Boolean, Config)) extends AkkaSpec(cipherEnabledconfig._3) with ImplicitSender { - implicit val timeout: Timeout = Timeout(30 seconds) + implicit val timeout: Timeout = Timeout(5 seconds) import RemoteCommunicationSpec._ From 4f3f4aadba4d6f173b3774fe149e6ea838a4902c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 19 Jun 2012 10:36:09 +0200 Subject: [PATCH 440/538] I think I nailed it. --- akka-remote/src/main/resources/reference.conf | 4 ++-- .../scala/akka/remote/netty/NettySSLSupport.scala | 10 ++++++---- .../main/scala/akka/remote/netty/Settings.scala | 2 +- .../akka/remote/Ticket1978CommunicationSpec.scala | 15 +++++++-------- .../scala/akka/remote/Ticket1978ConfigSpec.scala | 2 +- 5 files changed, 17 insertions(+), 16 deletions(-) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index e2c0a45346..9cd7b767be 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -193,7 +193,7 @@ akka { # Examples: [ "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA" ] # You need to install the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256 # More info here: http://docs.oracle.com/javase/7/docs/technotes/guides/security/SunProviders.html#SunJCEProvider - supported-algorithms = ["TLS_RSA_WITH_AES_128_CBC_SHA"] + enabled-algorithms = ["TLS_RSA_WITH_AES_128_CBC_SHA"] # Using /dev/./urandom is only necessary when using SHA1PRNG on Linux to prevent blocking # It is NOT as secure because it reuses the seed @@ -208,7 +208,7 @@ akka { # The following use one of 3 possible seed sources, depending on availability: /dev/random, random.org and SecureRandom (provided by Java) # "AES128CounterRNGSecure" # "AES256CounterRNGSecure" (Install JCE Unlimited Strength Jurisdiction Policy Files first) - # Setting a value here may require you to supply the appropriate cipher suite (see supported-algorithms section above) + # Setting a value here may require you to supply the appropriate cipher suite (see enabled-algorithms section above) random-number-generator = "" } } diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala index 9440c09c95..cca8662b2f 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala @@ -17,6 +17,10 @@ import akka.security.provider.AkkaProvider * Internal use only */ private[akka] object NettySSLSupport { + + val akka = new AkkaProvider + Security.addProvider(akka) + /** * Construct a SSLHandler which can be inserted into a Netty server/client pipeline */ @@ -34,8 +38,6 @@ private[akka] object NettySSLSupport { val rng = rngName match { case Some(r @ ("AES128CounterRNGFast" | "AES128CounterRNGSecure" | "AES256CounterRNGSecure")) ⇒ log.debug("SSL random number generator set to: {}", r) - val akka = new AkkaProvider - Security.addProvider(akka) SecureRandom.getInstance(r, akka) case Some("SHA1PRNG") ⇒ log.debug("SSL random number generator set to: SHA1PRNG") @@ -82,7 +84,7 @@ private[akka] object NettySSLSupport { log.debug("Using client SSL context to create SSLEngine ...") val sslEngine = context.createSSLEngine sslEngine.setUseClientMode(true) - sslEngine.setEnabledCipherSuites(settings.SSLSupportedAlgorithms.toArray.map(_.toString)) + sslEngine.setEnabledCipherSuites(settings.SSLEnabledAlgorithms.toArray.map(_.toString)) new SslHandler(sslEngine) case None ⇒ throw new GeneralSecurityException( @@ -119,7 +121,7 @@ private[akka] object NettySSLSupport { log.debug("Using server SSL context to create SSLEngine ...") val sslEngine = context.createSSLEngine sslEngine.setUseClientMode(false) - sslEngine.setEnabledCipherSuites(settings.SSLSupportedAlgorithms.toArray.map(_.toString)) + sslEngine.setEnabledCipherSuites(settings.SSLEnabledAlgorithms.toArray.map(_.toString)) new SslHandler(sslEngine) case None ⇒ throw new GeneralSecurityException( """Failed to initialize server SSL because SSL context could not be found. diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index 024ed104c3..d168c67eca 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -106,7 +106,7 @@ private[akka] class NettySettings(config: Config, val systemName: String) { case password ⇒ Some(password) } - val SSLSupportedAlgorithms = getStringList("ssl.supported-algorithms").toArray.toSet + val SSLEnabledAlgorithms = getStringList("ssl.enabled-algorithms").toArray.toSet val SSLProtocol = getString("ssl.protocol") match { case "" ⇒ None diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 4ebf37e66c..c247f6ce19 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -37,7 +37,7 @@ object Configuration { trust-store = "%s" key-store = "%s" random-number-generator = "%s" - supported-algorithms = [%s] + enabled-algorithms = [%s] sha1prng-random-source = "/dev/./urandom" } } @@ -51,7 +51,7 @@ object Configuration { def getCipherConfig(cipher: String, enabled: String*): (String, Boolean, Config) = try { - if (true) throw new IllegalArgumentException("This is not working properly yet") + if (false) throw new IllegalArgumentException("This is not working properly yet") val config = ConfigFactory.parseString("akka.remote.netty.port=12345").withFallback(ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher, enabled.mkString(", ")))) val fullConfig = config.withFallback(AkkaSpec.testConf).withFallback(ConfigFactory.load).getConfig("akka.remote.netty") @@ -62,16 +62,16 @@ object Configuration { rng.nextInt() // Has to work settings.SSLRandomNumberGenerator foreach { sRng ⇒ rng.getAlgorithm == sRng || (throw new NoSuchAlgorithmException(sRng)) } - val engine = NettySSLSupport.initializeServerSSL(settings, NoLogging).getEngine + val engine = NettySSLSupport.initializeClientSSL(settings, NoLogging).getEngine val gotAllSupported = enabled.toSet -- engine.getSupportedCipherSuites.toSet val gotAllEnabled = enabled.toSet -- engine.getEnabledCipherSuites.toSet gotAllSupported.isEmpty || (throw new IllegalArgumentException("Cipher Suite not supported: " + gotAllSupported)) gotAllEnabled.isEmpty || (throw new IllegalArgumentException("Cipher Suite not enabled: " + gotAllEnabled)) - engine.getSupportedProtocols.contains(settings.SSLProtocol.get) || (throw new IllegalArgumentException(settings.SSLProtocol.get)) + engine.getSupportedProtocols.contains(settings.SSLProtocol.get) || (throw new IllegalArgumentException("Protocol not supported: " + settings.SSLProtocol.get)) (cipher, true, config) } catch { - case (_: IllegalArgumentException) | (_: NoSuchAlgorithmException) | (_: SSLException) ⇒ (cipher, false, AkkaSpec.testConf) // Cannot match against the message since the message might be localized :S + case (_: IllegalArgumentException) | (_: NoSuchAlgorithmException) ⇒ (cipher, false, AkkaSpec.testConf) // Cannot match against the message since the message might be localized :S } } @@ -87,7 +87,7 @@ class Ticket1978AES128CounterRNGFastSpec extends Ticket1978CommunicationSpec(get * Both of the Secure variants require access to the Internet to access random.org. */ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES128CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGSecure", "TLS_RSA_WITH_AES_256_CBC_SHA")) +class Ticket1978AES128CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGSecure", "TLS_RSA_WITH_AES_128_CBC_SHA")) /** * Both of the Secure variants require access to the Internet to access random.org. @@ -111,6 +111,7 @@ abstract class Ticket1978CommunicationSpec(val cipherEnabledconfig: (String, Boo override def atTermination() { other.shutdown() + other.awaitTermination() } "SSL Remoting" must { @@ -161,8 +162,6 @@ abstract class Ticket1978CommunicationSpec(val cipherEnabledconfig: (String, Boo expectMsg("preRestart") r ! 42 expectMsg(42) - system.stop(r) - expectMsg("postStop") } } diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala index 4017f1cfcc..4c39b94087 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala @@ -40,7 +40,7 @@ akka { SSLTrustStore must be(Some("truststore")) SSLTrustStorePassword must be(Some("changeme")) SSLProtocol must be(Some("TLSv1")) - SSLSupportedAlgorithms must be(Set("TLS_RSA_WITH_AES_128_CBC_SHA")) + SSLEnabledAlgorithms must be(Set("TLS_RSA_WITH_AES_128_CBC_SHA")) SSLRandomSource must be(None) SSLRandomNumberGenerator must be(None) } From 32562652af8e0a4cf062daa7ac0ed56b2febcaee Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 19 Jun 2012 11:09:54 +0200 Subject: [PATCH 441/538] remove debug statement --- .../test/scala/akka/remote/Ticket1978CommunicationSpec.scala | 3 --- 1 file changed, 3 deletions(-) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index c247f6ce19..9536b983a7 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -50,9 +50,6 @@ object Configuration { """ def getCipherConfig(cipher: String, enabled: String*): (String, Boolean, Config) = try { - - if (false) throw new IllegalArgumentException("This is not working properly yet") - val config = ConfigFactory.parseString("akka.remote.netty.port=12345").withFallback(ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher, enabled.mkString(", ")))) val fullConfig = config.withFallback(AkkaSpec.testConf).withFallback(ConfigFactory.load).getConfig("akka.remote.netty") val settings = new NettySettings(fullConfig, "placeholder") From 09df0889dbcd535e603128bd08a175ee4baa287a Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 19 Jun 2012 11:39:05 +0200 Subject: [PATCH 442/538] Adding IAEs for null subscriber/classifier in ActorClassification --- .../src/main/scala/akka/event/EventBus.scala | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/akka-actor/src/main/scala/akka/event/EventBus.scala b/akka-actor/src/main/scala/akka/event/EventBus.scala index 6a5cc67cc4..cad7351bbb 100644 --- a/akka-actor/src/main/scala/akka/event/EventBus.scala +++ b/akka-actor/src/main/scala/akka/event/EventBus.scala @@ -324,7 +324,17 @@ trait ActorClassification { this: ActorEventBus with ActorClassifier ⇒ case some ⇒ some foreach { _ ! event } } - def subscribe(subscriber: Subscriber, to: Classifier): Boolean = associate(to, subscriber) - def unsubscribe(subscriber: Subscriber, from: Classifier): Boolean = dissociate(from, subscriber) - def unsubscribe(subscriber: Subscriber): Unit = dissociate(subscriber) + def subscribe(subscriber: Subscriber, to: Classifier): Boolean = + if (subscriber eq null) throw new IllegalArgumentException("Subscriber is null") + else if (to eq null) throw new IllegalArgumentException("Classifier is null") + else associate(to, subscriber) + + def unsubscribe(subscriber: Subscriber, from: Classifier): Boolean = + if (subscriber eq null) throw new IllegalArgumentException("Subscriber is null") + else if (from eq null) throw new IllegalArgumentException("Classifier is null") + else dissociate(from, subscriber) + + def unsubscribe(subscriber: Subscriber): Unit = + if (subscriber eq null) throw new IllegalArgumentException("Subscriber is null") + else dissociate(subscriber) } From 9862afab84eb76c55c72b6bb939118f90278a80c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 19 Jun 2012 12:16:10 +0200 Subject: [PATCH 443/538] Sigh, the tests are flakier than a flaky flake from flakeville --- .../test/scala/akka/remote/Ticket1978CommunicationSpec.scala | 3 +++ 1 file changed, 3 insertions(+) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 9536b983a7..2ff63b20a4 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -50,6 +50,9 @@ object Configuration { """ def getCipherConfig(cipher: String, enabled: String*): (String, Boolean, Config) = try { + + if (true) throw new IllegalArgumentException("Test not enabled") + val config = ConfigFactory.parseString("akka.remote.netty.port=12345").withFallback(ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher, enabled.mkString(", ")))) val fullConfig = config.withFallback(AkkaSpec.testConf).withFallback(ConfigFactory.load).getConfig("akka.remote.netty") val settings = new NettySettings(fullConfig, "placeholder") From 5b8ce4cc6b1adeba9d3a6d6ae52e6d70fe14b96d Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 19 Jun 2012 12:17:41 +0200 Subject: [PATCH 444/538] Turning on the ssl test again --- .../test/scala/akka/remote/Ticket1978CommunicationSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 2ff63b20a4..94142b8f66 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -51,7 +51,7 @@ object Configuration { def getCipherConfig(cipher: String, enabled: String*): (String, Boolean, Config) = try { - if (true) throw new IllegalArgumentException("Test not enabled") + if (false) throw new IllegalArgumentException("Test not enabled") val config = ConfigFactory.parseString("akka.remote.netty.port=12345").withFallback(ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher, enabled.mkString(", ")))) val fullConfig = config.withFallback(AkkaSpec.testConf).withFallback(ConfigFactory.load).getConfig("akka.remote.netty") From 410fd6ca58d8bd444d18b6f047f06962566a9ee7 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 18 Jun 2012 11:10:59 +0200 Subject: [PATCH 445/538] Improve phi in AccrualFailureDetector, see #2066 * Implementation of phi according to the paper * Config properties and documentation, min-std-deviation, * acceptable-lost-heartbeats * Restructure code, HeartbeatHistory is responsible for stats from historical heartbeats * Correct and efficient calculation of mean and standard deviation * More tests --- .../src/main/resources/reference.conf | 16 +- .../akka/cluster/AccrualFailureDetector.scala | 285 ++++++++++++------ .../scala/akka/cluster/ClusterSettings.scala | 6 +- .../cluster/AccrualFailureDetectorSpec.scala | 161 +++++++--- .../akka/cluster/ClusterConfigSpec.scala | 4 +- 5 files changed, 335 insertions(+), 137 deletions(-) diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index b9104fe6cf..90d02d4fd1 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -43,7 +43,21 @@ akka { # a quick detection in the event of a real crash. Conversely, a high # threshold generates fewer mistakes but needs more time to detect # actual crashes - threshold = 8 + threshold = 8.0 + + # Minimum standard deviation to use for the normal distribution in + # AccrualFailureDetector. Too low standard deviation might result in + # too much sensitivity for sudden, but normal, deviations in heartbeat + # inter arrival times. + min-std-deviation = 100 ms + + # Number of potentially lost/delayed heartbeats that will be + # accepted before considering it to be an anomaly. + # It is a factor of heartbeat-interval. + # This margin is important to be able to survive sudden, occasional, + # pauses in heartbeat arrivals, due to for example garbage collect or + # network drop. + acceptable-lost-heartbeats = 3.0 implementation-class = "" diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index 6632111f00..1dfac252fe 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -7,50 +7,89 @@ package akka.cluster import akka.actor.{ ActorSystem, Address, ExtendedActorSystem } import akka.remote.RemoteActorRefProvider import akka.event.Logging - import scala.collection.immutable.Map import scala.annotation.tailrec - import java.util.concurrent.atomic.AtomicReference +import java.util.concurrent.TimeUnit.NANOSECONDS +import akka.util.Duration +import akka.util.duration._ +object AccrualFailureDetector { + private def realTimeMachine: () ⇒ Long = () ⇒ NANOSECONDS.toMillis(System.nanoTime) +} /** * Implementation of 'The Phi Accrual Failure Detector' by Hayashibara et al. as defined in their paper: * [http://ddg.jaist.ac.jp/pub/HDY+04.pdf] - *

- * A low threshold is prone to generate many wrong suspicions but ensures a quick detection in the event - * of a real crash. Conversely, a high threshold generates fewer mistakes but needs more time to detect - * actual crashes - *

- * Default threshold is 8, but can be configured in the Akka config. + * + * @param system Belongs to the [[akka.actor.ActorSystem]]. Used for logging. + * + * @param threshold A low threshold is prone to generate many wrong suspicions but ensures a quick detection in the event + * of a real crash. Conversely, a high threshold generates fewer mistakes but needs more time to detect + * actual crashes + * + * @param maxSampleSize Number of samples to use for calculation of mean and standard deviation of + * inter-arrival times. + * + * @param minStdDeviation Minimum standard deviation to use for the normal distribution used when calculating phi. + * Too low standard deviation might result in too much sensitivity for sudden, but normal, deviations + * in heartbeat inter arrival times. + * + * @param acceptableLostDuration Duration corresponding to number of potentially lost/delayed + * heartbeats that will be accepted before considering it to be an anomaly. + * This margin is important to be able to survive sudden, occasional, pauses in heartbeat + * arrivals, due to for example garbage collect or network drop. + * + * @param firstHeartbeatEstimate Bootstrap the stats with heartbeats that corresponds to + * to this duration, with a with rather high standard deviation (since environment is unknown + * in the beginning) + * + * @timeMachine The clock, returning time in milliseconds, but can be faked for testing + * purposes. It is only used for measuring intervals (duration). + * */ class AccrualFailureDetector( val system: ActorSystem, - val threshold: Int = 8, - val maxSampleSize: Int = 1000, - val timeMachine: () ⇒ Long = System.currentTimeMillis) extends FailureDetector { + val threshold: Double, + val maxSampleSize: Int, + val minStdDeviation: Duration, + val acceptableLostDuration: Duration, + val firstHeartbeatEstimate: Duration, + val timeMachine: () ⇒ Long) extends FailureDetector { + import AccrualFailureDetector._ + + /** + * Constructor that picks configuration from the settings. + */ def this( system: ActorSystem, settings: ClusterSettings, - timeMachine: () ⇒ Long = System.currentTimeMillis) = + timeMachine: () ⇒ Long = AccrualFailureDetector.realTimeMachine) = this( system, settings.FailureDetectorThreshold, settings.FailureDetectorMaxSampleSize, + settings.HeartbeatInterval * settings.FailureDetectorAcceptableLostHeartbeats, + settings.FailureDetectorMinStdDeviation, + // we use a conservative estimate for the first heartbeat because + // gossip needs to spread back to the joining node before the + // first real heartbeat is sent. Initial heartbeat is added when joining. + // FIXME this can be changed to HeartbeatInterval when ticket #2249 is fixed + settings.GossipInterval * 3 + settings.HeartbeatInterval, timeMachine) - private final val PhiFactor = 1.0 / math.log(10.0) - private val log = Logging(system, "FailureDetector") - /** - * Holds the failure statistics for a specific node Address. - */ - private case class FailureStats(mean: Double = 0.0, variance: Double = 0.0, deviation: Double = 0.0) - // guess statistics for first heartbeat, - // important so that connections with only one heartbeat becomes unavailble - private val failureStatsFirstHeartbeat = FailureStats(mean = 1000.0) + // important so that connections with only one heartbeat becomes unavailable + private val firstHeartbeat: HeartbeatHistory = { + // bootstrap with 2 entries with rather high standard deviation + val mean = firstHeartbeatEstimate.toMillis + val stdDeviation = mean / 4 + HeartbeatHistory(maxSampleSize) :+ (mean - stdDeviation) :+ (mean + stdDeviation) + } + + private val acceptableLostMillis = acceptableLostDuration.toMillis /** * Implement using optimistic lockless concurrency, all state is represented @@ -58,8 +97,7 @@ class AccrualFailureDetector( */ private case class State( version: Long = 0L, - failureStats: Map[Address, FailureStats] = Map.empty[Address, FailureStats], - intervalHistory: Map[Address, IndexedSeq[Long]] = Map.empty[Address, IndexedSeq[Long]], + history: Map[Address, HeartbeatHistory] = Map.empty, timestamps: Map[Address, Long] = Map.empty[Address, Long], explicitRemovals: Set[Address] = Set.empty[Address]) @@ -78,96 +116,88 @@ class AccrualFailureDetector( final def heartbeat(connection: Address) { log.debug("Heartbeat from connection [{}] ", connection) + val timestamp = timeMachine() val oldState = state.get - val latestTimestamp = oldState.timestamps.get(connection) - if (latestTimestamp.isEmpty) { - // this is heartbeat from a new connection - // add starter records for this new connection - val newState = oldState copy ( - version = oldState.version + 1, - failureStats = oldState.failureStats + (connection -> failureStatsFirstHeartbeat), - intervalHistory = oldState.intervalHistory + (connection -> IndexedSeq.empty[Long]), - timestamps = oldState.timestamps + (connection -> timeMachine()), - explicitRemovals = oldState.explicitRemovals - connection) - - // if we won the race then update else try again - if (!state.compareAndSet(oldState, newState)) heartbeat(connection) // recur - - } else { - // this is a known connection - val timestamp = timeMachine() - val interval = timestamp - latestTimestamp.get - - val newIntervalsForConnection = (oldState.intervalHistory.get(connection) match { - case Some(history) if history.size >= maxSampleSize ⇒ - // reached max history, drop first interval - history drop 1 - case Some(history) ⇒ history - case _ ⇒ IndexedSeq.empty[Long] - }) :+ interval - - val newFailureStats = { - val newMean: Double = newIntervalsForConnection.sum.toDouble / newIntervalsForConnection.size - - val oldConnectionFailureStats = oldState.failureStats.get(connection).getOrElse { - throw new IllegalStateException("Can't calculate new failure statistics due to missing heartbeat history") - } - - val deviationSum = (0.0d /: newIntervalsForConnection) { (mean, interval) ⇒ - mean + interval.toDouble - newMean - } - - val newVariance: Double = deviationSum / newIntervalsForConnection.size - val newDeviation: Double = math.sqrt(newVariance) - - val newFailureStats = oldConnectionFailureStats copy (mean = newMean, deviation = newDeviation, variance = newVariance) - oldState.failureStats + (connection -> newFailureStats) - } - - val newState = oldState copy (version = oldState.version + 1, - failureStats = newFailureStats, - intervalHistory = oldState.intervalHistory + (connection -> newIntervalsForConnection), - timestamps = oldState.timestamps + (connection -> timestamp), // record new timestamp, - explicitRemovals = oldState.explicitRemovals - connection) - - // if we won the race then update else try again - if (!state.compareAndSet(oldState, newState)) heartbeat(connection) // recur + val newHistory = oldState.timestamps.get(connection) match { + case None ⇒ + // this is heartbeat from a new connection + // add starter records for this new connection + firstHeartbeat + case (Some(latestTimestamp)) ⇒ + // this is a known connection + val interval = timestamp - latestTimestamp + oldState.history(connection) :+ interval } + + val newState = oldState copy (version = oldState.version + 1, + history = oldState.history + (connection -> newHistory), + timestamps = oldState.timestamps + (connection -> timestamp), // record new timestamp, + explicitRemovals = oldState.explicitRemovals - connection) + + // if we won the race then update else try again + if (!state.compareAndSet(oldState, newState)) heartbeat(connection) // recur } /** - * Calculates how likely it is that the connection has failed. - *

+ * The suspicion level of accrual failure detector is given by a value called φ (phi). + * The basic idea of the φ failure detector is to express the value of φ on a scale that + * is dynamically adjusted to reflect current network conditions. + * + * The value of φ is calculated as: + * + * {{{ + * φ = -log10(1 - F(timeSinceLastHeartbeat) + * }}} + * where F is the cumulative distribution function of a normal distribution with mean + * and standard deviation estimated from historical heartbeat inter-arrival times. + * * If a connection does not have any records in failure detector then it is * considered healthy. - *

- * Implementations of 'Cumulative Distribution Function' for Exponential Distribution. - * For a discussion on the math read [https://issues.apache.org/jira/browse/CASSANDRA-2597]. + * */ def phi(connection: Address): Double = { val oldState = state.get val oldTimestamp = oldState.timestamps.get(connection) - val phi = - // if connection has been removed explicitly - if (oldState.explicitRemovals.contains(connection)) Double.MaxValue - else if (oldTimestamp.isEmpty) 0.0 // treat unmanaged connections, e.g. with zero heartbeats, as healthy connections - else { - val timestampDiff = timeMachine() - oldTimestamp.get + // if connection has been removed explicitly + if (oldState.explicitRemovals.contains(connection)) Double.MaxValue + else if (oldTimestamp.isEmpty) 0.0 // treat unmanaged connections, e.g. with zero heartbeats, as healthy connections + else { + val timeDiff = timeMachine() - oldTimestamp.get - val mean = oldState.failureStats.get(connection) match { - case Some(FailureStats(mean, _, _)) ⇒ mean - case _ ⇒ throw new IllegalStateException("Can't calculate Failure Detector Phi value for a node that have no heartbeat history") - } + val history = oldState.history(connection) + val mean = history.mean + val stdDeviation = ensureValidStdDeviation(history.stdDeviation) - if (mean == 0.0) 0.0 - else PhiFactor * timestampDiff / mean - } + val φ = phi(timeDiff, mean + acceptableLostMillis, stdDeviation) - // FIXME change to debug log level, when failure detector is stable - log.info("Phi value [{}] and threshold [{}] for connection [{}] ", phi, threshold, connection) - phi + // FIXME change to debug log level, when failure detector is stable + if (φ > 1.0) + log.info("Phi value [{}] for connection [{}], after [{} ms], based on [{}]", + φ, connection, timeDiff, "N(" + mean + ", " + stdDeviation + ")") + + φ + } + } + + private[cluster] def phi(timeDiff: Long, mean: Double, stdDeviation: Double): Double = { + val cdf = cumulativeDistributionFunction(timeDiff, mean, stdDeviation) + -math.log10(1.0 - cdf) + } + + private val minStdDeviationMillis = minStdDeviation.toMillis + + private def ensureValidStdDeviation(stdDeviation: Double): Double = math.max(stdDeviation, minStdDeviationMillis) + + /** + * Cumulative distribution function for N(mean, stdDeviation) normal distribution. + * This is an approximation defined in β Mathematics Handbook. + */ + private[cluster] def cumulativeDistributionFunction(x: Double, mean: Double, stdDeviation: Double): Double = { + val y = (x - mean) / stdDeviation + // Cumulative distribution function for N(0, 1) + 1.0 / (1.0 + math.exp(-y * (1.5976 + 0.070566 * y * y))) } /** @@ -178,10 +208,9 @@ class AccrualFailureDetector( log.debug("Remove connection [{}] ", connection) val oldState = state.get - if (oldState.failureStats.contains(connection)) { + if (oldState.history.contains(connection)) { val newState = oldState copy (version = oldState.version + 1, - failureStats = oldState.failureStats - connection, - intervalHistory = oldState.intervalHistory - connection, + history = oldState.history - connection, timestamps = oldState.timestamps - connection, explicitRemovals = oldState.explicitRemovals + connection) @@ -190,3 +219,59 @@ class AccrualFailureDetector( } } } + +private[cluster] object HeartbeatHistory { + + /** + * Create an empty HeartbeatHistory, without any history. + * Can only be used as starting point for appending intervals. + * The stats (mean, variance, stdDeviation) are not defined for + * for empty HeartbeatHistory, i.e. throws AritmeticException. + */ + def apply(maxSampleSize: Int): HeartbeatHistory = HeartbeatHistory( + maxSampleSize = maxSampleSize, + intervals = IndexedSeq.empty, + intervalSum = 0L, + interval2Sum = 0L) + +} + +/** + * Holds the heartbeat statistics for a specific node Address. + * It is capped by the number of samples specified in `maxSampleSize`. + * + * The stats (mean, variance, stdDeviation) are not defined for + * for empty HeartbeatHistory, i.e. throws AritmeticException. + */ +private[cluster] case class HeartbeatHistory private ( + maxSampleSize: Int, + intervals: IndexedSeq[Long], + intervalSum: Long, + interval2Sum: Long) { + + def mean: Double = intervalSum.toDouble / intervals.size + + def variance: Double = (interval2Sum.toDouble / intervals.size) - (mean * mean) + + def stdDeviation: Double = math.sqrt(variance) + + @tailrec + final def :+(interval: Long): HeartbeatHistory = { + if (intervals.size < maxSampleSize) + HeartbeatHistory( + maxSampleSize, + intervals = intervals :+ interval, + intervalSum = intervalSum + interval, + interval2Sum = interval2Sum + pow2(interval)) + else + dropOldest :+ interval // recur + } + + private def dropOldest: HeartbeatHistory = HeartbeatHistory( + maxSampleSize, + intervals = intervals drop 1, + intervalSum = intervalSum - intervals.head, + interval2Sum = interval2Sum - pow2(intervals.head)) + + private def pow2(x: Long) = x * x +} \ No newline at end of file diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index ee4f6a03d2..f1e0c2d31b 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -13,12 +13,16 @@ import akka.actor.AddressFromURIString class ClusterSettings(val config: Config, val systemName: String) { import config._ - final val FailureDetectorThreshold = getInt("akka.cluster.failure-detector.threshold") + + final val FailureDetectorThreshold = getDouble("akka.cluster.failure-detector.threshold") final val FailureDetectorMaxSampleSize = getInt("akka.cluster.failure-detector.max-sample-size") final val FailureDetectorImplementationClass: Option[String] = getString("akka.cluster.failure-detector.implementation-class") match { case "" ⇒ None case fqcn ⇒ Some(fqcn) } + final val FailureDetectorMinStdDeviation: Duration = Duration(getMilliseconds("akka.cluster.failure-detector.min-std-deviation"), MILLISECONDS) + final val FailureDetectorAcceptableLostHeartbeats: Double = getDouble("akka.cluster.failure-detector.acceptable-lost-heartbeats") + final val NodeToJoin: Option[Address] = getString("akka.cluster.node-to-join") match { case "" ⇒ None case AddressFromURIString(addr) ⇒ Some(addr) diff --git a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala index bd4d5d2c52..081fc9f0fd 100644 --- a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala @@ -6,6 +6,9 @@ package akka.cluster import akka.actor.Address import akka.testkit.{ LongRunningTest, AkkaSpec } +import scala.collection.immutable.TreeMap +import akka.util.duration._ +import akka.util.Duration @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class AccrualFailureDetectorSpec extends AkkaSpec(""" @@ -27,33 +30,72 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" timeGenerator } + val defaultFakeTimeIntervals = Vector.fill(20)(1000L) + def createFailureDetector( + threshold: Double = 8.0, + maxSampleSize: Int = 1000, + minStdDeviation: Duration = 10.millis, + acceptableLostDuration: Duration = Duration.Zero, + firstHeartbeatEstimate: Duration = 1.second, + timeMachine: () ⇒ Long = fakeTimeGenerator(defaultFakeTimeIntervals)): AccrualFailureDetector = + new AccrualFailureDetector(system, + threshold, + maxSampleSize, + minStdDeviation, + acceptableLostDuration, + firstHeartbeatEstimate = firstHeartbeatEstimate, + timeMachine = timeMachine) + + "use good enough cumulative distribution function" in { + val fd = createFailureDetector() + fd.cumulativeDistributionFunction(0.0, 0, 1) must be(0.5 plusOrMinus (0.001)) + fd.cumulativeDistributionFunction(0.6, 0, 1) must be(0.7257 plusOrMinus (0.001)) + fd.cumulativeDistributionFunction(1.5, 0, 1) must be(0.9332 plusOrMinus (0.001)) + fd.cumulativeDistributionFunction(2.0, 0, 1) must be(0.97725 plusOrMinus (0.01)) + fd.cumulativeDistributionFunction(2.5, 0, 1) must be(0.9379 plusOrMinus (0.1)) + fd.cumulativeDistributionFunction(3.5, 0, 1) must be(0.99977 plusOrMinus (0.1)) + fd.cumulativeDistributionFunction(4.0, 0, 1) must be(0.99997 plusOrMinus (0.1)) + + for (x :: y :: Nil ← (0.0 to 4.0 by 0.1).toList.sliding(2)) { + fd.cumulativeDistributionFunction(x, 0, 1) must be < ( + fd.cumulativeDistributionFunction(y, 0, 1)) + } + + fd.cumulativeDistributionFunction(2.2, 2.0, 0.3) must be(0.7475 plusOrMinus (0.001)) + } + + "return realistic phi values" in { + val fd = createFailureDetector() + val test = TreeMap(0 -> 0.0, 500 -> 0.1, 1000 -> 0.3, 1200 -> 1.6, 1400 -> 4.7, 1600 -> 10.8, 1700 -> 15.3) + for ((timeDiff, expectedPhi) ← test) { + fd.phi(timeDiff = timeDiff, mean = 1000.0, stdDeviation = 100.0) must be(expectedPhi plusOrMinus (0.1)) + } + + // larger stdDeviation results => lower phi + fd.phi(timeDiff = 1100, mean = 1000.0, stdDeviation = 500.0) must be < ( + fd.phi(timeDiff = 1100, mean = 1000.0, stdDeviation = 100.0)) + } + "return phi value of 0.0 on startup for each address, when no heartbeats" in { - val fd = new AccrualFailureDetector(system) + val fd = createFailureDetector() fd.phi(conn) must be(0.0) fd.phi(conn2) must be(0.0) } "return phi based on guess when only one heartbeat" in { - // 1 second ticks - val timeInterval = Vector.fill(30)(1000L) - val fd = new AccrualFailureDetector(system, + val timeInterval = List[Long](0, 1000, 1000, 1000, 1000) + val fd = createFailureDetector(firstHeartbeatEstimate = 1.seconds, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) - fd.phi(conn) must be > (0.0) - // let time go - for (n ← 2 to 8) - fd.phi(conn) must be < (4.0) - for (n ← 9 to 18) - fd.phi(conn) must be < (8.0) - - fd.phi(conn) must be > (8.0) + fd.phi(conn) must be(0.3 plusOrMinus 0.2) + fd.phi(conn) must be(4.5 plusOrMinus 0.3) + fd.phi(conn) must be > (15.0) } "return phi value using first interval after second heartbeat" in { val timeInterval = List[Long](0, 100, 100, 100) - val fd = new AccrualFailureDetector(system, - timeMachine = fakeTimeGenerator(timeInterval)) + val fd = createFailureDetector(timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) fd.phi(conn) must be > (0.0) @@ -63,8 +105,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as available after a series of successful heartbeats" in { val timeInterval = List[Long](0, 1000, 100, 100) - val fd = new AccrualFailureDetector(system, - timeMachine = fakeTimeGenerator(timeInterval)) + val fd = createFailureDetector(timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) fd.heartbeat(conn) @@ -75,8 +116,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as dead after explicit removal of connection" in { val timeInterval = List[Long](0, 1000, 100, 100, 100) - val fd = new AccrualFailureDetector(system, - timeMachine = fakeTimeGenerator(timeInterval)) + val fd = createFailureDetector(timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) fd.heartbeat(conn) @@ -89,8 +129,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as available after explicit removal of connection and receiving heartbeat again" in { val timeInterval = List[Long](0, 1000, 100, 1100, 1100, 1100, 1100, 1100, 100) - val fd = new AccrualFailureDetector(system, - timeMachine = fakeTimeGenerator(timeInterval)) + val fd = createFailureDetector(timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 @@ -112,40 +151,65 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" } "mark node as dead if heartbeat are missed" in { - val timeInterval = List[Long](0, 1000, 100, 100, 5000) + val timeInterval = List[Long](0, 1000, 100, 100, 7000) val ft = fakeTimeGenerator(timeInterval) - val fd = new AccrualFailureDetector(system, threshold = 3, - timeMachine = fakeTimeGenerator(timeInterval)) + val fd = createFailureDetector(threshold = 3, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 fd.heartbeat(conn) //1000 fd.heartbeat(conn) //1100 fd.isAvailable(conn) must be(true) //1200 - fd.isAvailable(conn) must be(false) //6200 + fd.isAvailable(conn) must be(false) //8200 } "mark node as available if it starts heartbeat again after being marked dead due to detection of failure" in { - val timeInterval = List[Long](0, 1000, 100, 1100, 5000, 100, 1000, 100, 100) - val fd = new AccrualFailureDetector(system, threshold = 3, - timeMachine = fakeTimeGenerator(timeInterval)) + val timeInterval = List[Long](0, 1000, 100, 1100, 7000, 100, 1000, 100, 100) + val fd = createFailureDetector(threshold = 3, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 fd.heartbeat(conn) //1000 fd.heartbeat(conn) //1100 fd.isAvailable(conn) must be(true) //1200 - fd.isAvailable(conn) must be(false) //6200 - fd.heartbeat(conn) //6300 - fd.heartbeat(conn) //7300 - fd.heartbeat(conn) //7400 + fd.isAvailable(conn) must be(false) //8200 + fd.heartbeat(conn) //8300 + fd.heartbeat(conn) //9300 + fd.heartbeat(conn) //9400 - fd.isAvailable(conn) must be(true) //7500 + fd.isAvailable(conn) must be(true) //9500 + } + + "accept some configured missing heartbeats" in { + val timeInterval = List[Long](0, 1000, 1000, 1000, 4000, 1000, 1000) + val fd = createFailureDetector(acceptableLostDuration = 3.seconds, timeMachine = fakeTimeGenerator(timeInterval)) + + fd.heartbeat(conn) + fd.heartbeat(conn) + fd.heartbeat(conn) + fd.heartbeat(conn) + fd.isAvailable(conn) must be(true) + fd.heartbeat(conn) + fd.isAvailable(conn) must be(true) + } + + "fail after configured acceptable missing heartbeats" in { + val timeInterval = List[Long](0, 1000, 1000, 1000, 1000, 1000, 500, 500, 5000) + val fd = createFailureDetector(acceptableLostDuration = 3.seconds, timeMachine = fakeTimeGenerator(timeInterval)) + + fd.heartbeat(conn) + fd.heartbeat(conn) + fd.heartbeat(conn) + fd.heartbeat(conn) + fd.heartbeat(conn) + fd.heartbeat(conn) + fd.isAvailable(conn) must be(true) + fd.heartbeat(conn) + fd.isAvailable(conn) must be(false) } "use maxSampleSize heartbeats" in { val timeInterval = List[Long](0, 100, 100, 100, 100, 600, 1000, 1000, 1000, 1000, 1000) - val fd = new AccrualFailureDetector(system, maxSampleSize = 3, - timeMachine = fakeTimeGenerator(timeInterval)) + val fd = createFailureDetector(maxSampleSize = 3, timeMachine = fakeTimeGenerator(timeInterval)) // 100 ms interval fd.heartbeat(conn) //0 @@ -163,4 +227,33 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" } } + + "Statistics for heartbeats" must { + + "calculate correct mean and variance" in { + val samples = Seq(100, 200, 125, 340, 130) + val stats = (HeartbeatHistory(maxSampleSize = 20) /: samples) { (stats, value) ⇒ stats :+ value } + stats.mean must be(179.0 plusOrMinus 0.00001) + stats.variance must be(7584.0 plusOrMinus 0.00001) + } + + "have 0.0 variance for one sample" in { + (HeartbeatHistory(600) :+ 1000L).variance must be(0.0 plusOrMinus 0.00001) + } + + "be capped by the specified maxSampleSize" in { + val history3 = HeartbeatHistory(maxSampleSize = 3) :+ 100 :+ 110 :+ 90 + history3.mean must be(100.0 plusOrMinus 0.00001) + history3.variance must be(66.6666667 plusOrMinus 0.00001) + + val history4 = history3 :+ 140 + history4.mean must be(113.333333 plusOrMinus 0.00001) + history4.variance must be(422.222222 plusOrMinus 0.00001) + + val history5 = history4 :+ 80 + history5.mean must be(103.333333 plusOrMinus 0.00001) + history5.variance must be(688.88888889 plusOrMinus 0.00001) + + } + } } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 481d9f7e5a..ab8ffcf157 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -16,9 +16,11 @@ class ClusterConfigSpec extends AkkaSpec { "be able to parse generic cluster config elements" in { val settings = new ClusterSettings(system.settings.config, system.name) import settings._ - FailureDetectorThreshold must be(8) + FailureDetectorThreshold must be(8.0 plusOrMinus 0.0001) FailureDetectorMaxSampleSize must be(1000) FailureDetectorImplementationClass must be(None) + FailureDetectorMinStdDeviation must be(100 millis) + FailureDetectorAcceptableLostHeartbeats must be(3.0 plusOrMinus 0.0001) NodeToJoin must be(None) PeriodicTasksInitialDelay must be(1 seconds) GossipInterval must be(1 second) From 4f8522dc6381cb236d6ff833ada4abf34b6bc9ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 19 Jun 2012 14:21:11 +0200 Subject: [PATCH 446/538] Merged in the old MemberSpec with the new MemberOrderingSpec. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../akka/cluster/MemberOrderingSpec.scala | 34 +++++++++++++- .../test/scala/akka/cluster/MemberSpec.scala | 45 ------------------- 2 files changed, 32 insertions(+), 47 deletions(-) delete mode 100644 akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala diff --git a/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala index 7528750a22..d8687312da 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala @@ -5,11 +5,14 @@ package akka.cluster import akka.actor.{ Address, AddressFromURIString } -import akka.testkit.AkkaSpec import java.net.InetSocketAddress +import org.scalatest.matchers.MustMatchers +import org.scalatest.WordSpec import scala.collection.immutable.SortedSet +import scala.util.Random -class MemberOrderingSpec extends AkkaSpec { +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class MemberOrderingSpec extends WordSpec with MustMatchers { import Member.ordering import Member.addressOrdering import MemberStatus._ @@ -56,6 +59,33 @@ class MemberOrderingSpec extends AkkaSpec { seq(2) must equal(Member(AddressFromURIString("akka://sys@darkstar:1110"), Exiting)) seq(3) must equal(Member(AddressFromURIString("akka://sys@darkstar:1112"), Exiting)) } + + "be sorted by address correctly" in { + import Member.ordering + // sorting should be done on host and port, only + val m1 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Up) + val m2 = Member(Address("akka", "sys1", "host1", 10000), MemberStatus.Up) + val m3 = Member(Address("cluster", "sys2", "host2", 8000), MemberStatus.Up) + val m4 = Member(Address("cluster", "sys2", "host2", 9000), MemberStatus.Up) + val m5 = Member(Address("cluster", "sys1", "host2", 10000), MemberStatus.Up) + + val expected = IndexedSeq(m1, m2, m3, m4, m5) + val shuffled = Random.shuffle(expected) + shuffled.sorted must be(expected) + (SortedSet.empty[Member] ++ shuffled).toIndexedSeq must be(expected) + } + + "have stable equals and hashCode" in { + val m1 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Joining) + val m2 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Up) + val m3 = Member(Address("akka", "sys1", "host1", 10000), MemberStatus.Up) + + m1 must be(m2) + m1.hashCode must be(m2.hashCode) + + m3 must not be (m2) + m3 must not be (m1) + } } "An Ordering[Address]" must { diff --git a/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala deleted file mode 100644 index bc1f70ae86..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import akka.actor.Address -import scala.util.Random -import scala.collection.immutable.SortedSet - -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class MemberSpec extends WordSpec with MustMatchers { - - "Member" must { - - "be sorted by address correctly" in { - import Member.ordering - // sorting should be done on host and port, only - val m1 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Up) - val m2 = Member(Address("akka", "sys1", "host1", 10000), MemberStatus.Up) - val m3 = Member(Address("cluster", "sys2", "host2", 8000), MemberStatus.Up) - val m4 = Member(Address("cluster", "sys2", "host2", 9000), MemberStatus.Up) - val m5 = Member(Address("cluster", "sys1", "host2", 10000), MemberStatus.Up) - - val expected = IndexedSeq(m1, m2, m3, m4, m5) - val shuffled = Random.shuffle(expected) - shuffled.sorted must be(expected) - (SortedSet.empty[Member] ++ shuffled).toIndexedSeq must be(expected) - } - - "have stable equals and hashCode" in { - val m1 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Joining) - val m2 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Up) - val m3 = Member(Address("akka", "sys1", "host1", 10000), MemberStatus.Up) - - m1 must be(m2) - m1.hashCode must be(m2.hashCode) - - m3 must not be (m2) - m3 must not be (m1) - } - } -} From fd54a93135271b823008ecc3ed04436694c3548e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 19 Jun 2012 14:21:56 +0200 Subject: [PATCH 447/538] Added ScalaDoc on 'def status: MemberStatus' describing the MemberStatus.Removed semantics. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- akka-cluster/src/main/scala/akka/cluster/Cluster.scala | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index cc91680b4a..ce3daa2dbd 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -603,7 +603,11 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) def latestGossip: Gossip = state.get.latestGossip /** - * Member status for this node. + * Member status for this node (`MemberStatus`). + * + * NOTE: If the node has been removed from the cluster (and shut down) then it's status is set to the 'REMOVED' tombstone state + * and is no longer present in the node ring or any other part of the gossiping state. However in order to maintain the + * model and the semantics the user would expect, this method will in this situation return `MemberStatus.Removed`. */ def status: MemberStatus = { if (isRunning) self.status @@ -1160,11 +1164,9 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val hasChangedState = removedMembers.nonEmpty || upMembers.nonEmpty || exitingMembers.nonEmpty // removing REMOVED nodes from the 'seen' table - //val newSeen = removedMembers.foldLeft(localSeen) { (seen, removed) ⇒ seen - removed.address } val newSeen = localSeen -- removedMembers.map(_.address) // removing REMOVED nodes from the 'unreachable' set - //val newUnreachableMembers = removedMembers.foldLeft(localUnreachableMembers) { (unreachable, removed) ⇒ unreachable - removed } val newUnreachableMembers = localUnreachableMembers -- removedMembers val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachableMembers) // update gossip overview From 9011c310e1b0bb37a5d0f73a71a9833a3a164c15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 19 Jun 2012 14:27:12 +0200 Subject: [PATCH 448/538] Minor cleanup. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- akka-cluster/src/main/scala/akka/cluster/Cluster.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index ce3daa2dbd..411c9d4b18 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -123,8 +123,8 @@ object Member { * it puts all members that are in MemberStatus.EXITING last. */ implicit val ordering: Ordering[Member] = Ordering.fromLessThan[Member] { (a, b) ⇒ - if (a.status == MemberStatus.Exiting && b.status != MemberStatus.Exiting) false - else if (a.status != MemberStatus.Exiting && b.status == MemberStatus.Exiting) true + if (a.status == Exiting && b.status != Exiting) false + else if (a.status != Exiting && b.status == Exiting) true else addressOrdering.compare(a.address, b.address) < 0 } From 422cf386c8ba5fefd2c17f144386a8fbd964083f Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 19 Jun 2012 14:52:02 +0200 Subject: [PATCH 449/538] incorporate review comments, add docs, see #2031 also add Java sample for creating custom MailboxType --- .../src/main/scala/akka/actor/ActorCell.scala | 12 +++--- .../src/main/scala/akka/actor/ActorRef.scala | 16 ++++++++ .../scala/akka/actor/ActorRefProvider.scala | 13 +++--- .../main/scala/akka/actor/ActorSystem.scala | 29 ++++--------- .../akka/actor/RepointableActorRef.scala | 27 ++++++++---- .../dispatcher/DispatcherDocTestBase.java | 37 +++++++++++++++++ akka-docs/java/dispatchers.rst | 41 +++++++++++++++++++ akka-docs/java/untyped-actors.rst | 7 ---- akka-docs/scala/actors.rst | 7 ---- .../docs/dispatcher/DispatcherDocSpec.scala | 4 +- akka-docs/scala/dispatchers.rst | 20 +++++++++ .../akka/remote/RemoteActorRefProvider.scala | 4 +- 12 files changed, 157 insertions(+), 60 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 5a1269b5fe..c0d15d07bb 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -328,12 +328,12 @@ private[akka] object ActorCell { def remove(child: ActorRef): ChildrenContainer = NormalChildrenContainer(c - child.path.name) - def getByName(name: String): Option[ChildRestartStats] = c get name match { + def getByName(name: String): Option[ChildRestartStats] = c.get(name) match { case s @ Some(_: ChildRestartStats) ⇒ s.asInstanceOf[Option[ChildRestartStats]] case _ ⇒ None } - def getByRef(actor: ActorRef): Option[ChildRestartStats] = c get actor.path.name match { + def getByRef(actor: ActorRef): Option[ChildRestartStats] = c.get(actor.path.name) match { case c @ Some(crs: ChildRestartStats) if (crs.child == actor) ⇒ c.asInstanceOf[Option[ChildRestartStats]] case _ ⇒ None } @@ -349,7 +349,7 @@ private[akka] object ActorCell { throw new InvalidActorNameException("actor name " + name + " is not unique!") else new NormalChildrenContainer(c.updated(name, ChildNameReserved)) - def unreserve(name: String): ChildrenContainer = c get name match { + def unreserve(name: String): ChildrenContainer = c.get(name) match { case Some(ChildNameReserved) ⇒ NormalChildrenContainer(c - name) case _ ⇒ this } @@ -389,12 +389,12 @@ private[akka] object ActorCell { else copy(c - child.path.name, t) } - def getByName(name: String): Option[ChildRestartStats] = c get name match { + def getByName(name: String): Option[ChildRestartStats] = c.get(name) match { case s @ Some(_: ChildRestartStats) ⇒ s.asInstanceOf[Option[ChildRestartStats]] case _ ⇒ None } - def getByRef(actor: ActorRef): Option[ChildRestartStats] = c get actor.path.name match { + def getByRef(actor: ActorRef): Option[ChildRestartStats] = c.get(actor.path.name) match { case c @ Some(crs: ChildRestartStats) if (crs.child == actor) ⇒ c.asInstanceOf[Option[ChildRestartStats]] case _ ⇒ None } @@ -413,7 +413,7 @@ private[akka] object ActorCell { else copy(c = c.updated(name, ChildNameReserved)) } - def unreserve(name: String): ChildrenContainer = c get name match { + def unreserve(name: String): ChildrenContainer = c.get(name) match { case Some(ChildNameReserved) ⇒ copy(c = c - name) case _ ⇒ this } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index bde2a2194c..8d42714b00 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -163,10 +163,20 @@ private[akka] trait ActorRefScope { def isLocal: Boolean } +/** + * Refs which are statically known to be local inherit from this Scope + */ private[akka] trait LocalRef extends ActorRefScope { final def isLocal = true } +/** + * RepointableActorRef (and potentially others) may change their locality at + * runtime, meaning that isLocal might not be stable. RepointableActorRef has + * the feature that it starts out “not fully started” (but you can send to it), + * which is why `isStarted` features here; it is not improbable that cluster + * actor refs will have the same behavior. + */ private[akka] trait RepointableRef extends ActorRefScope { def isStarted: Boolean } @@ -214,6 +224,12 @@ private[akka] abstract class InternalActorRef extends ActorRef with ScalaActorRe def isLocal: Boolean } +/** + * Common trait of all actor refs which actually have a Cell, most notably + * LocalActorRef and RepointableActorRef. The former specializes the return + * type of `underlying` so that follow-up calls can use invokevirtual instead + * of invokeinterface. + */ private[akka] abstract class ActorRefWithCell extends InternalActorRef { this: ActorRefScope ⇒ def underlying: Cell } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 8195aea64c..bbb84144c5 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -26,12 +26,12 @@ trait ActorRefProvider { /** * Reference to the supervisor used for all top-level user actors. */ - def guardian: InternalActorRef + def guardian: LocalActorRef /** * Reference to the supervisor used for all top-level system actors. */ - def systemGuardian: InternalActorRef + def systemGuardian: LocalActorRef /** * Dead letter destination for this provider. @@ -482,13 +482,10 @@ class LocalActorRefProvider( } } - lazy val guardian: InternalActorRef = - actorOf(system, guardianProps, rootGuardian, rootPath / "user", - systemService = true, deploy = None, lookupDeploy = false, async = false) + lazy val guardian: LocalActorRef = new LocalActorRef(system, guardianProps, rootGuardian, rootPath / "user") - lazy val systemGuardian: InternalActorRef = - actorOf(system, guardianProps.withCreator(new SystemGuardian), rootGuardian, rootPath / "system", - systemService = true, deploy = None, lookupDeploy = false, async = false) + lazy val systemGuardian: LocalActorRef = + new LocalActorRef(system, guardianProps.withCreator(new SystemGuardian), rootGuardian, rootPath / "system") lazy val tempContainer = new VirtualPathContainer(system.provider, tempNode, rootGuardian, log) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 030fa4a8b5..e45363253a 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -426,8 +426,10 @@ abstract class ExtendedActorSystem extends ActorSystem { /** * For debugging: traverse actor hierarchy and make string representation. + * Careful, this may OOM on large actor systems, and it is only meant for + * helping debugging in case something already went terminally wrong. */ - def printTree: String + private[akka] def printTree: String } private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, classLoader: ClassLoader) extends ExtendedActorSystem { @@ -485,26 +487,11 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, protected def systemImpl: ActorSystemImpl = this - private[akka] def systemActorOf(props: Props, name: String): ActorRef = { - systemGuardian match { - case g: LocalActorRef ⇒ g.underlying.attachChild(props, name) - case s ⇒ throw new UnsupportedOperationException("unknown systemGuardian type " + s.getClass) - } - } + private[akka] def systemActorOf(props: Props, name: String): ActorRef = systemGuardian.underlying.attachChild(props, name) - def actorOf(props: Props, name: String): ActorRef = { - guardian match { - case g: LocalActorRef ⇒ g.underlying.attachChild(props, name) - case s ⇒ throw new UnsupportedOperationException("unknown guardian type " + s.getClass) - } - } + def actorOf(props: Props, name: String): ActorRef = guardian.underlying.attachChild(props, name) - def actorOf(props: Props): ActorRef = { - guardian match { - case g: LocalActorRef ⇒ g.underlying.attachChild(props) - case s ⇒ throw new UnsupportedOperationException("unknown guardian type " + s.getClass) - } - } + def actorOf(props: Props): ActorRef = guardian.underlying.attachChild(props) def stop(actor: ActorRef): Unit = { implicit val timeout = settings.CreationTimeout @@ -569,8 +556,8 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, def terminationFuture: Future[Unit] = provider.terminationFuture def lookupRoot: InternalActorRef = provider.rootGuardian - def guardian: InternalActorRef = provider.guardian - def systemGuardian: InternalActorRef = provider.systemGuardian + def guardian: LocalActorRef = provider.guardian + def systemGuardian: LocalActorRef = provider.systemGuardian def /(actorName: String): ActorPath = guardian.path / actorName def /(path: Iterable[String]): ActorPath = guardian.path / path diff --git a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala index 1344735993..ad9a7cb0c4 100644 --- a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala @@ -43,11 +43,13 @@ private[akka] class RepointableActorRef( if (Unsafe.instance.compareAndSwapObject(this, cellOffset, old, next)) old else swapCell(next) } - /* + /** * Initialize: make a dummy cell which holds just a mailbox, then tell our - * supervisor that we exist so that he can create the real Cell in + * supervisor that we exist so that he can create the real Cell in * handleSupervise(). - * + * + * Call twice on your own peril! + * * This is protected so that others can have different initialization. */ def initialize(): this.type = { @@ -57,9 +59,10 @@ private[akka] class RepointableActorRef( } /** - * This method is supposedly called by the supervisor in handleSupervise() + * This method is supposed to be called by the supervisor in handleSupervise() * to replace the UnstartedCell with the real one. It assumes no concurrent - * modification of the underlying Cell. + * modification of the `underlying` field, though it is safe to send messages + * at any time. */ def activate(): this.type = { underlying match { @@ -69,6 +72,10 @@ private[akka] class RepointableActorRef( this } + /** + * This is called by activate() to obtain the cell which is to replace the + * unstarted cell. The cell must be fully functional. + */ def newCell(): Cell = new ActorCell(system, this, props, supervisor).start() def suspend(): Unit = underlying.suspend() @@ -138,11 +145,17 @@ private[akka] class UnstartedCell(val systemImpl: ActorSystemImpl, val self: Rep while (systemQueue.nonEmpty || queue.nonEmpty) { while (systemQueue.nonEmpty) { val msg = systemQueue.dequeue() - try cell sendSystemMessage msg catch { case _: InterruptedException ⇒ interrupted = true } + try cell.sendSystemMessage(msg) + catch { + case _: InterruptedException ⇒ interrupted = true + } } if (queue.nonEmpty) { val envelope = queue.dequeue() - try cell tell (envelope.message, envelope.sender) catch { case _: InterruptedException ⇒ interrupted = true } + try cell.tell(envelope.message, envelope.sender) + catch { + case _: InterruptedException ⇒ interrupted = true + } } } if (interrupted) throw new InterruptedException diff --git a/akka-docs/java/code/docs/dispatcher/DispatcherDocTestBase.java b/akka-docs/java/code/docs/dispatcher/DispatcherDocTestBase.java index 94e4b38121..ca5569657e 100644 --- a/akka-docs/java/code/docs/dispatcher/DispatcherDocTestBase.java +++ b/akka-docs/java/code/docs/dispatcher/DispatcherDocTestBase.java @@ -24,6 +24,15 @@ import com.typesafe.config.Config; //#imports-prio-mailbox +//#imports-custom +import akka.dispatch.Envelope; +import akka.dispatch.MessageQueue; +import akka.dispatch.MailboxType; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; + +//#imports-custom + import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -136,4 +145,32 @@ public class DispatcherDocTestBase { } } //#prio-mailbox + + //#mailbox-implementation-example + class MyUnboundedMailbox implements MailboxType { + + // This constructor signature must exist, it will be called by Akka + public MyUnboundedMailbox(ActorSystem.Settings settings, Config config) { + // put your initialization code here + } + + // The create method is called to create the MessageQueue + public MessageQueue create(Option owner, Option system) { + return new MessageQueue() { + private final Queue queue = new ConcurrentLinkedQueue(); + + // these must be implemented; queue used as example + public void enqueue(ActorRef receiver, Envelope handle) { queue.offer(handle); } + public Envelope dequeue() { return queue.poll(); } + public int numberOfMessages() { return queue.size(); } + public boolean hasMessages() { return !queue.isEmpty(); } + public void cleanUp(ActorRef owner, MessageQueue deadLetters) { + for (Envelope handle: queue) { + deadLetters.enqueue(owner, handle); + } + } + }; + } + } + //#mailbox-implementation-example } diff --git a/akka-docs/java/dispatchers.rst b/akka-docs/java/dispatchers.rst index 2723883e9c..27716275c0 100644 --- a/akka-docs/java/dispatchers.rst +++ b/akka-docs/java/dispatchers.rst @@ -183,3 +183,44 @@ And then an example on how you would use it: the configuration which describes the dispatcher using this mailbox type; the mailbox type will be instantiated once for each dispatcher using it. +Creating your own Mailbox type +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +An example is worth a thousand quacks: + +.. includecode:: code/docs/dispatcher/DispatcherDocTestBase.java#imports-custom + +.. includecode:: code/docs/dispatcher/DispatcherDocTestBase.java#mailbox-implementation-example + +And then you just specify the FQCN of your MailboxType as the value of the "mailbox-type" in the dispatcher configuration. + +.. note:: + + Make sure to include a constructor which takes + ``akka.actor.ActorSystem.Settings`` and ``com.typesafe.config.Config`` + arguments, as this constructor is invoked reflectively to construct your + mailbox type. The config passed in as second argument is that section from + the configuration which describes the dispatcher using this mailbox type; the + mailbox type will be instantiated once for each dispatcher using it. + + +Special Semantics of ``system.actorOf`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In order to make ``system.actorOf`` both synchronous and non-blocking while +keeping the return type :class:`ActorRef` (and the semantics that the returned +ref is fully functional), special handling takes place for this case. Behind +the scenes, a hollow kind of actor reference is constructed, which is sent to +the system’s guardian actor who actually creates the actor and its context and +puts those inside the reference. Until that has happened, messages sent to the +:class:`ActorRef` will be queued locally, and only upon swapping the real +filling in will they be transferred into the real mailbox. Thus, + +.. code-block:: scala + + system.actorOf(...).tell("bang"); + assert(bangIsInMyCustomMailbx); + +will probably fail; you will have to allow for some time to pass and retry the +check à la :meth:`TestKit.awaitCond`. + diff --git a/akka-docs/java/untyped-actors.rst b/akka-docs/java/untyped-actors.rst index ac911fd216..57dbaa5604 100644 --- a/akka-docs/java/untyped-actors.rst +++ b/akka-docs/java/untyped-actors.rst @@ -82,13 +82,6 @@ that is used in log messages and for identifying actors. The name must not be em or start with ``$``. If the given name is already in use by another child to the same parent actor an `InvalidActorNameException` is thrown. -.. warning:: - - Creating top-level actors with ``system.actorOf`` is a blocking operation, - hence it may dead-lock due to starvation if the default dispatcher is - overloaded. To avoid problems, do not call this method from within actors or - futures which run on the default dispatcher. - Actors are automatically started asynchronously when created. When you create the ``UntypedActor`` then it will automatically call the ``preStart`` callback method on the ``UntypedActor`` class. This is an excellent place to diff --git a/akka-docs/scala/actors.rst b/akka-docs/scala/actors.rst index 9b2cb9a7e5..47a2318e53 100644 --- a/akka-docs/scala/actors.rst +++ b/akka-docs/scala/actors.rst @@ -76,13 +76,6 @@ that is used in log messages and for identifying actors. The name must not be em or start with ``$``. If the given name is already in use by another child to the same parent actor an `InvalidActorNameException` is thrown. -.. warning:: - - Creating top-level actors with ``system.actorOf`` is a blocking operation, - hence it may dead-lock due to starvation if the default dispatcher is - overloaded. To avoid problems, do not call this method from within actors or - futures which run on the default dispatcher. - Actors are automatically started asynchronously when created. When you create the ``Actor`` then it will automatically call the ``preStart`` callback method on the ``Actor`` trait. This is an excellent place to diff --git a/akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala b/akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala index 09a2f810bf..7fdd0cd9bf 100644 --- a/akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala +++ b/akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala @@ -134,7 +134,7 @@ object DispatcherDocSpec { } //#mailbox-implementation-example - case class MyUnboundedMailbox() extends akka.dispatch.MailboxType { + class MyUnboundedMailbox extends akka.dispatch.MailboxType { import akka.actor.{ ActorRef, ActorSystem } import com.typesafe.config.Config import java.util.concurrent.ConcurrentLinkedQueue @@ -153,8 +153,8 @@ object DispatcherDocSpec { new QueueBasedMessageQueue with UnboundedMessageQueueSemantics { final val queue = new ConcurrentLinkedQueue[Envelope]() } - //#mailbox-implementation-example } + //#mailbox-implementation-example } class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { diff --git a/akka-docs/scala/dispatchers.rst b/akka-docs/scala/dispatchers.rst index cea9ee6e0a..5be19ad799 100644 --- a/akka-docs/scala/dispatchers.rst +++ b/akka-docs/scala/dispatchers.rst @@ -198,3 +198,23 @@ And then you just specify the FQCN of your MailboxType as the value of the "mail the configuration which describes the dispatcher using this mailbox type; the mailbox type will be instantiated once for each dispatcher using it. +Special Semantics of ``system.actorOf`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In order to make ``system.actorOf`` both synchronous and non-blocking while +keeping the return type :class:`ActorRef` (and the semantics that the returned +ref is fully functional), special handling takes place for this case. Behind +the scenes, a hollow kind of actor reference is constructed, which is sent to +the system’s guardian actor who actually creates the actor and its context and +puts those inside the reference. Until that has happened, messages sent to the +:class:`ActorRef` will be queued locally, and only upon swapping the real +filling in will they be transferred into the real mailbox. Thus, + +.. code-block:: scala + + system.actorOf(...) ! "bang" + assert(bangIsInMyCustomMailbx) + +will probably fail; you will have to allow for some time to pass and retry the +check à la :meth:`TestKit.awaitCond`. + diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index dfdf25759b..cdf9ad9d70 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -36,8 +36,8 @@ private[akka] class RemoteActorRefProvider( // these are only available after init() override def rootGuardian: InternalActorRef = local.rootGuardian - override def guardian: InternalActorRef = local.guardian - override def systemGuardian: InternalActorRef = local.systemGuardian + override def guardian: LocalActorRef = local.guardian + override def systemGuardian: LocalActorRef = local.systemGuardian override def terminationFuture: Promise[Unit] = local.terminationFuture override def dispatcher: MessageDispatcher = local.dispatcher override def registerTempActor(actorRef: InternalActorRef, path: ActorPath): Unit = local.registerTempActor(actorRef, path) From 53d8ef601eaa28b180c3ebcbf7fa6da3296d78f6 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 19 Jun 2012 15:24:00 +0200 Subject: [PATCH 450/538] Revert "Sigh, the tests are flakier than a flaky flake from flakeville" This reverts commit 9862afab84eb76c55c72b6bb939118f90278a80c. --- .../test/scala/akka/remote/Ticket1978CommunicationSpec.scala | 3 --- 1 file changed, 3 deletions(-) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 2ff63b20a4..9536b983a7 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -50,9 +50,6 @@ object Configuration { """ def getCipherConfig(cipher: String, enabled: String*): (String, Boolean, Config) = try { - - if (true) throw new IllegalArgumentException("Test not enabled") - val config = ConfigFactory.parseString("akka.remote.netty.port=12345").withFallback(ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher, enabled.mkString(", ")))) val fullConfig = config.withFallback(AkkaSpec.testConf).withFallback(ConfigFactory.load).getConfig("akka.remote.netty") val settings = new NettySettings(fullConfig, "placeholder") From 23a87afca6c5b5bc23a322de4980400f66720aab Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 19 Jun 2012 16:10:46 +0200 Subject: [PATCH 451/538] Commenting out one of the SSL tests since I can't see how it is needed --- .../test/scala/akka/remote/Ticket1978CommunicationSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 9536b983a7..09253c1387 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -124,7 +124,7 @@ abstract class Ticket1978CommunicationSpec(val cipherEnabledconfig: (String, Boo } } - "send error message for wrong address" in { + "send error message for wrong address" ignore { within(timeout.duration) { EventFilter.error(start = "dropping", occurrences = 1).intercept { system.actorFor("akka://remotesys@localhost:12346/user/echo") ! "ping" From 3db9f7eb41df58135410b4edcec0b43d27ffad7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Tue, 19 Jun 2012 16:32:51 +0200 Subject: [PATCH 452/538] Changes based on review --- .../src/main/scala/akka/util/Duration.scala | 8 +- .../akka/remote/testconductor/Conductor.scala | 13 ++-- .../akka/remote/testconductor/DataTypes.scala | 2 +- .../akka/remote/testconductor/Player.scala | 6 +- .../remote/testconductor/BarrierSpec.scala | 76 +++++++++---------- 5 files changed, 54 insertions(+), 51 deletions(-) diff --git a/akka-actor/src/main/scala/akka/util/Duration.scala b/akka-actor/src/main/scala/akka/util/Duration.scala index b37cf24c3b..3a1c2e80c8 100644 --- a/akka-actor/src/main/scala/akka/util/Duration.scala +++ b/akka-actor/src/main/scala/akka/util/Duration.scala @@ -9,16 +9,22 @@ import TimeUnit._ import java.lang.{ Double ⇒ JDouble } //TODO add @SerialVersionUID(1L) when SI-4804 is fixed -case class Deadline private (time: Duration) { +case class Deadline private (time: Duration) extends Ordered[Deadline] { def +(other: Duration): Deadline = copy(time = time + other) def -(other: Duration): Deadline = copy(time = time - other) def -(other: Deadline): Duration = time - other.time def timeLeft: Duration = this - Deadline.now def hasTimeLeft(): Boolean = !isOverdue() //Code reuse FTW def isOverdue(): Boolean = (time.toNanos - System.nanoTime()) < 0 + def compare(that: Deadline) = this.time compare that.time } + object Deadline { def now: Deadline = Deadline(Duration(System.nanoTime, NANOSECONDS)) + + implicit object DeadlineIsOrdered extends Ordering[Deadline] { + def compare(a: Deadline, b: Deadline) = a compare b + } } object Duration { diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index 9256ec1abc..b6265125b1 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -542,7 +542,7 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor } onTransition { - case Idle -> Waiting ⇒ setTimer("Timeout", StateTimeout, nextStateData.deadline - Deadline.now, false) + case Idle -> Waiting ⇒ setTimer("Timeout", StateTimeout, nextStateData.deadline.timeLeft, false) case Waiting -> Idle ⇒ cancelTimer("Timeout") } @@ -552,12 +552,11 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor val together = if (clients.exists(_.fsm == sender)) sender :: arrived else arrived val enterDeadline = getDeadline(timeout) // we only allow the deadlines to get shorter - val newDeadline = if ((enterDeadline - deadline) < Duration.Zero) enterDeadline else deadline - if (newDeadline != deadline) { - cancelTimer("Timeout") - setTimer("Timeout", StateTimeout, newDeadline - Deadline.now, false) - } - handleBarrier(d.copy(arrived = together, deadline = newDeadline)) + if (enterDeadline < deadline) { + setTimer("Timeout", StateTimeout, enterDeadline.timeLeft, false) + handleBarrier(d.copy(arrived = together, deadline = enterDeadline)) + } else + handleBarrier(d.copy(arrived = together)) case Event(RemoveClient(name), d @ Data(clients, barrier, arrived, _)) ⇒ clients find (_.name == name) match { case None ⇒ stay diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala index 5adc07bef2..830b32e485 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -30,7 +30,7 @@ private[akka] sealed trait ConfirmedClientOp extends ClientOp */ private[akka] case class Hello(name: String, addr: Address) extends NetworkOp -private[akka] case class EnterBarrier(name: String, timeout: Option[Duration] = None) extends ServerOp with NetworkOp +private[akka] case class EnterBarrier(name: String, timeout: Option[Duration]) extends ServerOp with NetworkOp private[akka] case class FailBarrier(name: String) extends ServerOp with NetworkOp private[akka] case class BarrierResult(name: String, success: Boolean) extends UnconfirmedClientOp with NetworkOp diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala index 46b7106a19..c7f69091cf 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala @@ -85,12 +85,10 @@ trait Player { this: TestConductorExt ⇒ * throw an exception in case of timeouts or other errors. */ def enter(timeout: Timeout, name: Seq[String]) { - def now: Duration = System.nanoTime.nanos - system.log.debug("entering barriers " + name.mkString("(", ", ", ")")) - val stop = now + timeout.duration + val stop = Deadline.now + timeout.duration name foreach { b ⇒ - val barrierTimeout = stop - now + val barrierTimeout = stop.timeLeft if (barrierTimeout < Duration.Zero) { client ! ToServer(FailBarrier(b)) throw new TimeoutException("Server timed out while waiting for barrier " + b); diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala index 779c02b670..f418f4a717 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala @@ -71,7 +71,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { "fail entering barrier when nobody registered" taggedAs TimingTest in { val b = getBarrier() - b ! EnterBarrier("bar1") + b ! EnterBarrier("bar1", None) expectMsg(ToClient(BarrierResult("bar1", false))) } @@ -80,10 +80,10 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { val a, b = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) - a.send(barrier, EnterBarrier("bar2")) + a.send(barrier, EnterBarrier("bar2", None)) noMsg(a, b) within(2 seconds) { - b.send(barrier, EnterBarrier("bar2")) + b.send(barrier, EnterBarrier("bar2", None)) a.expectMsg(ToClient(BarrierResult("bar2", true))) b.expectMsg(ToClient(BarrierResult("bar2", true))) } @@ -94,12 +94,12 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { val a, b, c = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) - a.send(barrier, EnterBarrier("bar3")) + a.send(barrier, EnterBarrier("bar3", None)) barrier ! NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) - b.send(barrier, EnterBarrier("bar3")) + b.send(barrier, EnterBarrier("bar3", None)) noMsg(a, b, c) within(2 seconds) { - c.send(barrier, EnterBarrier("bar3")) + c.send(barrier, EnterBarrier("bar3", None)) a.expectMsg(ToClient(BarrierResult("bar3", true))) b.expectMsg(ToClient(BarrierResult("bar3", true))) c.expectMsg(ToClient(BarrierResult("bar3", true))) @@ -112,8 +112,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) - a.send(barrier, EnterBarrier("bar4")) - b.send(barrier, EnterBarrier("bar4")) + a.send(barrier, EnterBarrier("bar4", None)) + b.send(barrier, EnterBarrier("bar4", None)) barrier ! RemoveClient(A) barrier ! ClientDisconnected(A) noMsg(a, b, c) @@ -130,9 +130,9 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { val a, b = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) - a.send(barrier, EnterBarrier("bar5")) + a.send(barrier, EnterBarrier("bar5", None)) barrier ! RemoveClient(A) - b.send(barrier, EnterBarrier("foo")) + b.send(barrier, EnterBarrier("foo", None)) b.expectMsg(ToClient(BarrierResult("foo", true))) } @@ -142,7 +142,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) barrier ! nodeA barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) - a.send(barrier, EnterBarrier("bar6")) + a.send(barrier, EnterBarrier("bar6", None)) EventFilter[ClientLost](occurrences = 1) intercept { barrier ! ClientDisconnected(B) } @@ -161,8 +161,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { barrier ! nodeA barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeC - a.send(barrier, EnterBarrier("bar7")) - b.send(barrier, EnterBarrier("bar7")) + a.send(barrier, EnterBarrier("bar7", None)) + b.send(barrier, EnterBarrier("bar7", None)) EventFilter[ClientLost](occurrences = 1) intercept { barrier ! ClientDisconnected(B) } @@ -180,9 +180,9 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { barrier ! nodeA val nodeB = NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeB - a.send(barrier, EnterBarrier("bar8")) + a.send(barrier, EnterBarrier("bar8", None)) EventFilter[WrongBarrier](occurrences = 1) intercept { - b.send(barrier, EnterBarrier("foo")) + b.send(barrier, EnterBarrier("foo", None)) } val msg = expectMsgType[Failed] msg match { @@ -203,7 +203,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { case x ⇒ fail("Expected " + Failed(barrier, BarrierEmpty(Data(Set(), "", Nil, null), "cannot remove RoleName(a): no client to remove")) + " but got " + x) } barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) - a.send(barrier, EnterBarrier("bar9")) + a.send(barrier, EnterBarrier("bar9", None)) a.expectMsg(ToClient(BarrierResult("bar9", false))) } @@ -214,7 +214,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { val nodeB = NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeA barrier ! nodeB - a.send(barrier, EnterBarrier("bar10")) + a.send(barrier, EnterBarrier("bar10", None)) EventFilter[BarrierTimeout](occurrences = 1) intercept { val msg = expectMsgType[Failed](7 seconds) msg match { @@ -274,7 +274,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { "fail entering barrier when nobody registered" taggedAs TimingTest in { val b = getController(0) - b ! EnterBarrier("b") + b ! EnterBarrier("b", None) expectMsg(ToClient(BarrierResult("b", false))) } @@ -285,10 +285,10 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("bar11")) + a.send(barrier, EnterBarrier("bar11", None)) noMsg(a, b) within(2 seconds) { - b.send(barrier, EnterBarrier("bar11")) + b.send(barrier, EnterBarrier("bar11", None)) a.expectMsg(ToClient(BarrierResult("bar11", true))) b.expectMsg(ToClient(BarrierResult("bar11", true))) } @@ -301,13 +301,13 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("bar12")) + a.send(barrier, EnterBarrier("bar12", None)) barrier ! NodeInfo(C, AddressFromURIString("akka://sys"), c.ref) c.expectMsg(ToClient(Done)) - b.send(barrier, EnterBarrier("bar12")) + b.send(barrier, EnterBarrier("bar12", None)) noMsg(a, b, c) within(2 seconds) { - c.send(barrier, EnterBarrier("bar12")) + c.send(barrier, EnterBarrier("bar12", None)) a.expectMsg(ToClient(BarrierResult("bar12", true))) b.expectMsg(ToClient(BarrierResult("bar12", true))) c.expectMsg(ToClient(BarrierResult("bar12", true))) @@ -323,8 +323,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) c.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("bar13")) - b.send(barrier, EnterBarrier("bar13")) + a.send(barrier, EnterBarrier("bar13", None)) + b.send(barrier, EnterBarrier("bar13", None)) barrier ! Remove(A) barrier ! ClientDisconnected(A) noMsg(a, b, c) @@ -343,9 +343,9 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("bar14")) + a.send(barrier, EnterBarrier("bar14", None)) barrier ! Remove(A) - b.send(barrier, EnterBarrier("foo")) + b.send(barrier, EnterBarrier("foo", None)) b.expectMsg(ToClient(BarrierResult("foo", true))) } @@ -357,7 +357,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("bar15")) + a.send(barrier, EnterBarrier("bar15", None)) barrier ! ClientDisconnected(RoleName("unknown")) noMsg(a) EventFilter[ClientLost](occurrences = 1) intercept { @@ -377,8 +377,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) c.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("bar16")) - b.send(barrier, EnterBarrier("bar16")) + a.send(barrier, EnterBarrier("bar16", None)) + b.send(barrier, EnterBarrier("bar16", None)) EventFilter[ClientLost](occurrences = 1) intercept { barrier ! ClientDisconnected(B) } @@ -394,9 +394,9 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { barrier ! nodeB a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) - a.send(barrier, EnterBarrier("bar17")) + a.send(barrier, EnterBarrier("bar17", None)) EventFilter[WrongBarrier](occurrences = 1) intercept { - b.send(barrier, EnterBarrier("foo")) + b.send(barrier, EnterBarrier("foo", None)) } a.expectMsg(ToClient(BarrierResult("bar17", false))) b.expectMsg(ToClient(BarrierResult("foo", false))) @@ -415,7 +415,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { EventFilter[BarrierTimeout](occurrences = 1) intercept { Thread.sleep(4000) } - b.send(barrier, EnterBarrier("bar18")) + b.send(barrier, EnterBarrier("bar18", None)) a.expectMsg(ToClient(BarrierResult("bar18", false))) b.expectMsg(ToClient(BarrierResult("bar18", false))) } @@ -444,7 +444,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { controller ! nodeB b.expectMsg(ToClient(BarrierResult("initial startup", false))) } - a.send(controller, EnterBarrier("bar19")) + a.send(controller, EnterBarrier("bar19", None)) a.expectMsg(ToClient(BarrierResult("bar19", false))) } @@ -463,8 +463,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { a.expectMsg(ToClient(BarrierResult("bar20", false))) b.expectNoMsg(1 second) } - a.send(barrier, EnterBarrier("bar21")) - b.send(barrier, EnterBarrier("bar21")) + a.send(barrier, EnterBarrier("bar21", None)) + b.send(barrier, EnterBarrier("bar21", None)) a.expectMsg(ToClient(BarrierResult("bar21", false))) b.expectMsg(ToClient(BarrierResult("bar21", false))) } @@ -486,7 +486,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { EventFilter[BarrierTimeout](occurrences = 1) intercept { Thread.sleep(4000) } - c.send(barrier, EnterBarrier("bar22")) + c.send(barrier, EnterBarrier("bar22", None)) a.expectMsg(ToClient(BarrierResult("bar22", false))) b.expectMsg(ToClient(BarrierResult("bar22", false))) c.expectMsg(ToClient(BarrierResult("bar22", false))) @@ -509,7 +509,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { EventFilter[BarrierTimeout](occurrences = 1) intercept { Thread.sleep(4000) } - c.send(barrier, EnterBarrier("bar23")) + c.send(barrier, EnterBarrier("bar23", None)) a.expectMsg(ToClient(BarrierResult("bar23", false))) b.expectMsg(ToClient(BarrierResult("bar23", false))) c.expectMsg(ToClient(BarrierResult("bar23", false))) From 5f066165ba3a41179e502d18260161dbfe39831f Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 19 Jun 2012 16:34:02 +0200 Subject: [PATCH 453/538] Commenting out Ticket1978AES128CounterRNGSecureSpec --- .../test/scala/akka/remote/Ticket1978CommunicationSpec.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 09253c1387..9ecc74ca71 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -83,8 +83,8 @@ class Ticket1978AES128CounterRNGFastSpec extends Ticket1978CommunicationSpec(get /** * Both of the Secure variants require access to the Internet to access random.org. */ -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES128CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGSecure", "TLS_RSA_WITH_AES_128_CBC_SHA")) +//@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +//class Ticket1978AES128CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGSecure", "TLS_RSA_WITH_AES_128_CBC_SHA")) /** * Both of the Secure variants require access to the Internet to access random.org. From 14184e99e96efa9b5bd90da40f987c3193892bc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Tue, 19 Jun 2012 17:10:03 +0200 Subject: [PATCH 454/538] Changed tests to use enterBarrier --- .../akka/cluster/JoinTwoClustersSpec.scala | 8 ++-- .../akka/cluster/LeaderLeavingSpec.scala | 6 +-- .../scala/akka/cluster/TransitionSpec.scala | 44 +++++++++---------- 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index 4b64bb6e58..16be8c0c68 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -44,7 +44,7 @@ abstract class JoinTwoClustersSpec runOn(a1, b1, c1) { startClusterNode() } - testConductor.enter("first-started") + enterBarrier("first-started") runOn(a1, a2) { cluster.join(a1Address) @@ -62,7 +62,7 @@ abstract class JoinTwoClustersSpec assertLeader(b1, b2) assertLeader(c1, c2) - testConductor.enter("two-members") + enterBarrier("two-members") runOn(b2) { cluster.join(a1Address) @@ -75,7 +75,7 @@ abstract class JoinTwoClustersSpec assertLeader(a1, a2, b1, b2) assertLeader(c1, c2) - testConductor.enter("four-members") + enterBarrier("four-members") } "be able to 'elect' a single leader after joining (C -> A + B)" taggedAs LongRunningTest in { @@ -88,7 +88,7 @@ abstract class JoinTwoClustersSpec assertLeader(a1, a2, b1, b2, c1, c2) - testConductor.enter("six-members") + enterBarrier("six-members") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala index 37312a7351..ad15fdc21d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala @@ -50,7 +50,7 @@ abstract class LeaderLeavingSpec if (cluster.isLeader) { cluster.leave(oldLeaderAddress) - testConductor.enter("leader-left") + enterBarrier("leader-left") // verify that a NEW LEADER have taken over awaitCond(!cluster.isLeader) @@ -63,7 +63,7 @@ abstract class LeaderLeavingSpec } else { - testConductor.enter("leader-left") + enterBarrier("leader-left") // verify that the LEADER is LEAVING awaitCond(cluster.latestGossip.members.exists(m => m.status == MemberStatus.Leaving && m.address == oldLeaderAddress)) // wait on LEAVING @@ -81,7 +81,7 @@ abstract class LeaderLeavingSpec awaitCond(cluster.leader != oldLeaderAddress) } - testConductor.enter("finished") + enterBarrier("finished") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala index 0fb3cb03c4..0130c5bc1c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -83,18 +83,18 @@ abstract class TransitionSpec gossipBarrierCounter += 1 runOn(toRole) { val g = cluster.latestGossip - testConductor.enter("before-gossip-" + gossipBarrierCounter) + enterBarrier("before-gossip-" + gossipBarrierCounter) awaitCond(cluster.latestGossip != g) // received gossip - testConductor.enter("after-gossip-" + gossipBarrierCounter) + enterBarrier("after-gossip-" + gossipBarrierCounter) } runOn(fromRole) { - testConductor.enter("before-gossip-" + gossipBarrierCounter) + enterBarrier("before-gossip-" + gossipBarrierCounter) cluster.gossipTo(node(toRole).address) // send gossip - testConductor.enter("after-gossip-" + gossipBarrierCounter) + enterBarrier("after-gossip-" + gossipBarrierCounter) } runOn(roles.filterNot(r ⇒ r == fromRole || r == toRole): _*) { - testConductor.enter("before-gossip-" + gossipBarrierCounter) - testConductor.enter("after-gossip-" + gossipBarrierCounter) + enterBarrier("before-gossip-" + gossipBarrierCounter) + enterBarrier("after-gossip-" + gossipBarrierCounter) } } } @@ -110,7 +110,7 @@ abstract class TransitionSpec cluster.leaderActions() cluster.status must be(Up) - testConductor.enter("after-1") + enterBarrier("after-1") } "perform correct transitions when second joining first" taggedAs LongRunningTest in { @@ -124,7 +124,7 @@ abstract class TransitionSpec memberStatus(second) must be(Joining) cluster.convergence.isDefined must be(false) } - testConductor.enter("second-joined") + enterBarrier("second-joined") first gossipTo second runOn(second) { @@ -151,14 +151,14 @@ abstract class TransitionSpec memberStatus(second) must be(Joining) cluster.convergence.isDefined must be(true) } - testConductor.enter("convergence-joining-2") + enterBarrier("convergence-joining-2") runOn(leader(first, second)) { cluster.leaderActions() memberStatus(first) must be(Up) memberStatus(second) must be(Up) } - testConductor.enter("leader-actions-2") + enterBarrier("leader-actions-2") leader(first, second) gossipTo nonLeader(first, second).head runOn(nonLeader(first, second).head) { @@ -176,7 +176,7 @@ abstract class TransitionSpec cluster.convergence.isDefined must be(true) } - testConductor.enter("after-2") + enterBarrier("after-2") } "perform correct transitions when third joins second" taggedAs LongRunningTest in { @@ -190,7 +190,7 @@ abstract class TransitionSpec memberStatus(third) must be(Joining) seenLatestGossip must be(Set(second)) } - testConductor.enter("third-joined-second") + enterBarrier("third-joined-second") second gossipTo first runOn(first) { @@ -234,7 +234,7 @@ abstract class TransitionSpec cluster.convergence.isDefined must be(true) } - testConductor.enter("convergence-joining-3") + enterBarrier("convergence-joining-3") runOn(leader(first, second, third)) { cluster.leaderActions() @@ -242,7 +242,7 @@ abstract class TransitionSpec memberStatus(second) must be(Up) memberStatus(third) must be(Up) } - testConductor.enter("leader-actions-3") + enterBarrier("leader-actions-3") // leader gossipTo first non-leader leader(first, second, third) gossipTo nonLeader(first, second, third).head @@ -281,7 +281,7 @@ abstract class TransitionSpec cluster.convergence.isDefined must be(true) } - testConductor.enter("after-3") + enterBarrier("after-3") } "startup a second separated cluster consisting of nodes fourth and fifth" taggedAs LongRunningTest in { @@ -299,9 +299,9 @@ abstract class TransitionSpec cluster.gossipTo(fourth) cluster.convergence.isDefined must be(true) } - testConductor.enter("fourth-joined-fifth") + enterBarrier("fourth-joined-fifth") - testConductor.enter("after-4") + enterBarrier("after-4") } "perform correct transitions when second cluster (node fourth) joins first cluster (node third)" taggedAs LongRunningTest in { @@ -313,7 +313,7 @@ abstract class TransitionSpec awaitMembers(first, second, third, fourth) seenLatestGossip must be(Set(third)) } - testConductor.enter("fourth-joined-third") + enterBarrier("fourth-joined-third") third gossipTo second runOn(second) { @@ -365,7 +365,7 @@ abstract class TransitionSpec memberStatus(fifth) must be(Up) cluster.convergence.isDefined must be(true) - testConductor.enter("convergence-joining-3") + enterBarrier("convergence-joining-3") runOn(leader(roles: _*)) { cluster.leaderActions() @@ -378,7 +378,7 @@ abstract class TransitionSpec x gossipTo y } - testConductor.enter("spread-5") + enterBarrier("spread-5") seenLatestGossip must be(roles.toSet) memberStatus(first) must be(Up) @@ -388,7 +388,7 @@ abstract class TransitionSpec memberStatus(fifth) must be(Up) cluster.convergence.isDefined must be(true) - testConductor.enter("after-5") + enterBarrier("after-5") } "perform correct transitions when second becomes unavailble" taggedAs LongRunningTest in { @@ -428,7 +428,7 @@ abstract class TransitionSpec cluster.convergence.isDefined must be(true) } - testConductor.enter("after-6") + enterBarrier("after-6") } } From d38aa2ed9c99a4dade822f017668ce909c601b02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 19 Jun 2012 20:11:54 +0200 Subject: [PATCH 455/538] Added ScalaDoc about the Leaving, Exiting and Removed states --- .../src/main/scala/akka/cluster/Cluster.scala | 38 +++++++++---------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 411c9d4b18..d9496b3235 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -211,28 +211,32 @@ object Gossip { * Represents the state of the cluster; cluster ring membership, ring convergence, meta data - * all versioned by a vector clock. * - * When a node is joining the Member, with status Joining, is added to `members`. - * If the joining node was downed it is moved from `overview.unreachable` (status Down) - * to `members` (status Joining). It cannot rejoin if not first downed. + * When a node is joining the `Member`, with status `Joining`, is added to `members`. + * If the joining node was downed it is moved from `overview.unreachable` (status `Down`) + * to `members` (status `Joining`). It cannot rejoin if not first downed. * - * When convergence is reached the leader change status of `members` from Joining - * to Up. + * When convergence is reached the leader change status of `members` from `Joining` + * to `Up`. * * When failure detector consider a node as unavailble it will be moved from * `members` to `overview.unreachable`. * - * When a node is downed, either manually or automatically, its status is changed to Down. - * It is also removed from `overview.seen` table. - * The node will reside as Down in the `overview.unreachable` set until joining - * again and it will then go through the normal joining procedure. + * When a node is downed, either manually or automatically, its status is changed to `Down`. + * It is also removed from `overview.seen` table. The node will reside as `Down` in the + * `overview.unreachable` set until joining again and it will then go through the normal + * joining procedure. * - * When a Gossip is received the version (vector clock) is used to determine if the - * received Gossip is newer or older than the current local Gossip. The received Gossip - * and local Gossip is merged in case of conflicting version, i.e. vector clocks without + * When a `Gossip` is received the version (vector clock) is used to determine if the + * received `Gossip` is newer or older than the current local `Gossip`. The received `Gossip` + * and local `Gossip` is merged in case of conflicting version, i.e. vector clocks without * same history. When merged the seen table is cleared. * - * TODO document leaving, exiting and removed when that is implemented - * + * When a node is told by the user to leave the cluster the leader will move it to `Leaving` + * and then rebalance and repartition the cluster and start hand-off by migrating the actors + * from the leaving node to the new partitions. Once this process is complete the leader will + * move the node to the `Exiting` state and once a convergence is complete move the node to + * `Removed` by removing it from the `members` set and sending a `Removed` command to the + * removed node telling it to shut itself down. */ case class Gossip( overview: GossipOverview = GossipOverview(), @@ -1230,12 +1234,6 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // 10. Success - run all the side-effecting processing // ---------------------- - // if (removedMembers.exists(_.address == selfAddress)) { - // // we now know that this node (the leader) is just about to shut down since it will be moved from EXITING -> REMOVED - // // so now let's gossip out this information directly since there will not be any other chance - // gossip() - // } - // log the move of members from joining to up upMembers foreach { member ⇒ log.info("Cluster Node [{}] - Leader is moving node [{}] from JOINING to UP", selfAddress, member.address) } From f5fb24719dd6f75d65d6ec5fe3d539aa43a9dc3a Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 19 Jun 2012 21:19:19 +0200 Subject: [PATCH 456/538] Disabling SSL once again --- .../scala/akka/remote/Ticket1978CommunicationSpec.scala | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 9ecc74ca71..778214a869 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -50,6 +50,9 @@ object Configuration { """ def getCipherConfig(cipher: String, enabled: String*): (String, Boolean, Config) = try { + + if (true) throw new IllegalArgumentException("Ticket1978*Spec isn't enabled") + val config = ConfigFactory.parseString("akka.remote.netty.port=12345").withFallback(ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher, enabled.mkString(", ")))) val fullConfig = config.withFallback(AkkaSpec.testConf).withFallback(ConfigFactory.load).getConfig("akka.remote.netty") val settings = new NettySettings(fullConfig, "placeholder") @@ -83,8 +86,8 @@ class Ticket1978AES128CounterRNGFastSpec extends Ticket1978CommunicationSpec(get /** * Both of the Secure variants require access to the Internet to access random.org. */ -//@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -//class Ticket1978AES128CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGSecure", "TLS_RSA_WITH_AES_128_CBC_SHA")) +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class Ticket1978AES128CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGSecure", "TLS_RSA_WITH_AES_128_CBC_SHA")) /** * Both of the Secure variants require access to the Internet to access random.org. From 64566e6912db40711c6818771f7345cae1486d75 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 19 Jun 2012 22:44:01 +0200 Subject: [PATCH 457/538] Rewriting large parts of the SSL tests and adding cleanup to the code --- .../scala/akka/remote/RemoteTransport.scala | 8 +- .../remote/netty/NettyRemoteSupport.scala | 10 +- .../akka/remote/netty/NettySSLSupport.scala | 25 ++- .../remote/Ticket1978CommunicationSpec.scala | 151 ++++++------------ 4 files changed, 76 insertions(+), 118 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala index aefd34ec74..f6b85dbc0d 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala @@ -106,8 +106,14 @@ case class RemoteServerShutdown( case class RemoteServerError( @BeanProperty val cause: Throwable, @transient @BeanProperty remote: RemoteTransport) extends RemoteServerLifeCycleEvent { + + cause match { + case s: javax.net.ssl.SSLException ⇒ var e: Throwable = s; while (e.getCause ne null) e = e.getCause; println(Logging.stackTraceFor(e)) + case _ ⇒ + } + override def logLevel: Logging.LogLevel = Logging.ErrorLevel - override def toString: String = "RemoteServerError@" + remote + "] Error[" + cause + "]" + override def toString: String = "RemoteServerError@" + remote + "] Error[" + Logging.stackTraceFor(cause) + "]" } /** diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 5f62bb58c8..9fc64b0a68 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -65,17 +65,15 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider * @param withTimeout determines whether an IdleStateHandler shall be included */ def apply(endpoint: ⇒ Seq[ChannelHandler], withTimeout: Boolean, isClient: Boolean): ChannelPipelineFactory = - new ChannelPipelineFactory { - def getPipeline = apply(defaultStack(withTimeout, isClient) ++ endpoint) - } + new ChannelPipelineFactory { override def getPipeline = apply(defaultStack(withTimeout, isClient) ++ endpoint) } /** * Construct a default protocol stack, excluding the “head” handler (i.e. the one which * actually dispatches the received messages to the local target actors). */ def defaultStack(withTimeout: Boolean, isClient: Boolean): Seq[ChannelHandler] = - (if (settings.EnableSSL) NettySSLSupport(settings, NettyRemoteTransport.this.log, isClient) :: Nil else Nil) ::: - (if (withTimeout) timeout :: Nil else Nil) ::: + (if (settings.EnableSSL) List(NettySSLSupport(settings, NettyRemoteTransport.this.log, isClient)) else Nil) ::: + (if (withTimeout) List(timeout) else Nil) ::: msgFormat ::: authenticator ::: executionHandler :: @@ -116,7 +114,7 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider * protect the TCP port from unauthorized use (don’t rely on it too much, though, * as this is NOT a cryptographic feature). */ - def authenticator = if (settings.RequireCookie) new RemoteServerAuthenticationHandler(settings.SecureCookie) :: Nil else Nil + def authenticator = if (settings.RequireCookie) List(new RemoteServerAuthenticationHandler(settings.SecureCookie)) else Nil } /** diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala index cca8662b2f..8915af559e 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala @@ -60,11 +60,16 @@ private[akka] object NettySSLSupport { def constructClientContext(settings: NettySettings, log: LoggingAdapter, trustStorePath: String, trustStorePassword: String, protocol: String): Option[SSLContext] = try { - val trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm) - val trustStore = KeyStore.getInstance(KeyStore.getDefaultType) - trustStore.load(new FileInputStream(trustStorePath), trustStorePassword.toCharArray) //FIXME does the FileInputStream need to be closed? - trustManagerFactory.init(trustStore) - val trustManagers: Array[TrustManager] = trustManagerFactory.getTrustManagers + val trustManagers: Array[TrustManager] = { + val trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm) + trustManagerFactory.init({ + val trustStore = KeyStore.getInstance(KeyStore.getDefaultType) + val fin = new FileInputStream(trustStorePath) + try trustStore.load(fin, trustStorePassword.toCharArray) finally fin.close() + trustStore + }) + trustManagerFactory.getTrustManagers + } Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(null, trustManagers, initializeCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, log)); ctx } } catch { case e: FileNotFoundException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because trust store could not be loaded", e) @@ -102,9 +107,13 @@ private[akka] object NettySSLSupport { def constructServerContext(settings: NettySettings, log: LoggingAdapter, keyStorePath: String, keyStorePassword: String, protocol: String): Option[SSLContext] = try { val factory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm) - val keyStore = KeyStore.getInstance(KeyStore.getDefaultType) - keyStore.load(new FileInputStream(keyStorePath), keyStorePassword.toCharArray) //FIXME does the FileInputStream need to be closed? - factory.init(keyStore, keyStorePassword.toCharArray) + + factory.init({ + val keyStore = KeyStore.getInstance(KeyStore.getDefaultType) + val fin = new FileInputStream(keyStorePath) + try keyStore.load(fin, keyStorePassword.toCharArray) finally fin.close() + keyStore + }, keyStorePassword.toCharArray) Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(factory.getKeyManagers, null, initializeCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, log)); ctx } } catch { case e: FileNotFoundException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because key store could not be loaded", e) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 778214a869..ee82e448bd 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -9,12 +9,12 @@ import com.typesafe.config._ import akka.dispatch.{ Await, Future } import akka.pattern.ask import java.io.File -import akka.event.{ NoLogging, LoggingAdapter } import java.security.{ NoSuchAlgorithmException, SecureRandom, PrivilegedAction, AccessController } import netty.{ NettySettings, NettySSLSupport } import javax.net.ssl.SSLException import akka.util.{ Timeout, Duration } import akka.util.duration._ +import akka.event.{ Logging, NoLogging, LoggingAdapter } object Configuration { // set this in your JAVA_OPTS to see all ssl debug info: "-Djavax.net.debug=ssl,keymanager" @@ -32,6 +32,7 @@ object Configuration { remote.netty { hostname = localhost + port = %d ssl { enable = on trust-store = "%s" @@ -41,41 +42,41 @@ object Configuration { sha1prng-random-source = "/dev/./urandom" } } - actor.deployment { - /blub.remote = "akka://remote-sys@localhost:12346" - /looker/child.remote = "akka://remote-sys@localhost:12346" - /looker/child/grandchild.remote = "akka://Ticket1978CommunicationSpec@localhost:12345" - } } """ - def getCipherConfig(cipher: String, enabled: String*): (String, Boolean, Config) = try { + case class CipherConfig(runTest: Boolean, config: Config, cipher: String, localPort: Int, remotePort: Int) - if (true) throw new IllegalArgumentException("Ticket1978*Spec isn't enabled") + def getCipherConfig(cipher: String, enabled: String*): CipherConfig = { + val localPort, remotePort = { val s = new java.net.ServerSocket(0); try s.getLocalPort finally s.close() } + try { - val config = ConfigFactory.parseString("akka.remote.netty.port=12345").withFallback(ConfigFactory.parseString(conf.format(trustStore, keyStore, cipher, enabled.mkString(", ")))) - val fullConfig = config.withFallback(AkkaSpec.testConf).withFallback(ConfigFactory.load).getConfig("akka.remote.netty") - val settings = new NettySettings(fullConfig, "placeholder") + //if (true) throw new IllegalArgumentException("Ticket1978*Spec isn't enabled") - val rng = NettySSLSupport.initializeCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, NoLogging) + val config = ConfigFactory.parseString(conf.format(localPort, trustStore, keyStore, cipher, enabled.mkString(", "))) + val fullConfig = config.withFallback(AkkaSpec.testConf).withFallback(ConfigFactory.load).getConfig("akka.remote.netty") + val settings = new NettySettings(fullConfig, "placeholder") - rng.nextInt() // Has to work - settings.SSLRandomNumberGenerator foreach { sRng ⇒ rng.getAlgorithm == sRng || (throw new NoSuchAlgorithmException(sRng)) } + val rng = NettySSLSupport.initializeCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, NoLogging) - val engine = NettySSLSupport.initializeClientSSL(settings, NoLogging).getEngine - val gotAllSupported = enabled.toSet -- engine.getSupportedCipherSuites.toSet - val gotAllEnabled = enabled.toSet -- engine.getEnabledCipherSuites.toSet - gotAllSupported.isEmpty || (throw new IllegalArgumentException("Cipher Suite not supported: " + gotAllSupported)) - gotAllEnabled.isEmpty || (throw new IllegalArgumentException("Cipher Suite not enabled: " + gotAllEnabled)) - engine.getSupportedProtocols.contains(settings.SSLProtocol.get) || (throw new IllegalArgumentException("Protocol not supported: " + settings.SSLProtocol.get)) + rng.nextInt() // Has to work + settings.SSLRandomNumberGenerator foreach { sRng ⇒ rng.getAlgorithm == sRng || (throw new NoSuchAlgorithmException(sRng)) } - (cipher, true, config) - } catch { - case (_: IllegalArgumentException) | (_: NoSuchAlgorithmException) ⇒ (cipher, false, AkkaSpec.testConf) // Cannot match against the message since the message might be localized :S + val engine = NettySSLSupport.initializeClientSSL(settings, NoLogging).getEngine + val gotAllSupported = enabled.toSet -- engine.getSupportedCipherSuites.toSet + val gotAllEnabled = enabled.toSet -- engine.getEnabledCipherSuites.toSet + gotAllSupported.isEmpty || (throw new IllegalArgumentException("Cipher Suite not supported: " + gotAllSupported)) + gotAllEnabled.isEmpty || (throw new IllegalArgumentException("Cipher Suite not enabled: " + gotAllEnabled)) + engine.getSupportedProtocols.contains(settings.SSLProtocol.get) || (throw new IllegalArgumentException("Protocol not supported: " + settings.SSLProtocol.get)) + + CipherConfig(true, config, cipher, localPort, remotePort) + } catch { + case (_: IllegalArgumentException) | (_: NoSuchAlgorithmException) ⇒ CipherConfig(false, AkkaSpec.testConf, cipher, localPort, remotePort) // Cannot match against the message since the message might be localized :S + } } } -import Configuration.getCipherConfig +import Configuration.{ CipherConfig, getCipherConfig } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class Ticket1978SHA1PRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("SHA1PRNG", "TLS_RSA_WITH_AES_128_CBC_SHA")) @@ -99,101 +100,45 @@ class Ticket1978AES256CounterRNGSecureSpec extends Ticket1978CommunicationSpec(g class Ticket1978DefaultRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("", "TLS_RSA_WITH_AES_128_CBC_SHA")) @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978NonExistingRNGSecureSpec extends Ticket1978CommunicationSpec(("NonExistingRNG", false, AkkaSpec.testConf)) +class Ticket1978CrappyRSAWithMD5OnlyHereToMakeSureThingsWorkSpec extends Ticket1978CommunicationSpec(getCipherConfig("", "SSL_RSA_WITH_NULL_MD5")) -abstract class Ticket1978CommunicationSpec(val cipherEnabledconfig: (String, Boolean, Config)) extends AkkaSpec(cipherEnabledconfig._3) with ImplicitSender { +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class Ticket1978NonExistingRNGSecureSpec extends Ticket1978CommunicationSpec(CipherConfig(false, AkkaSpec.testConf, "NonExistingRNG", 12345, 12346)) + +abstract class Ticket1978CommunicationSpec(val cipherConfig: CipherConfig) extends AkkaSpec(cipherConfig.config) with ImplicitSender { implicit val timeout: Timeout = Timeout(5 seconds) import RemoteCommunicationSpec._ - val other = ActorSystem("remote-sys", ConfigFactory.parseString("akka.remote.netty.port=12346").withFallback(system.settings.config)) + lazy val other: ActorSystem = ActorSystem( + "remote-sys", + ConfigFactory.parseString("akka.remote.netty.port=" + cipherConfig.remotePort).withFallback(system.settings.config)) override def atTermination() { - other.shutdown() - other.awaitTermination() + if (cipherConfig.runTest) { + other.shutdown() + other.awaitTermination() + } } - "SSL Remoting" must { - if (cipherEnabledconfig._2) { - val remote = other.actorOf(Props(new Actor { def receive = { case "ping" ⇒ sender ! (("pong", sender)) } }), "echo") + ("- SSL communication") must { + if (cipherConfig.runTest) { + val ignoreMe = other.actorOf(Props(new Actor { def receive = { case ("ping", x) ⇒ sender ! ((("pong", x), sender)) } }), "echo") + val otherAddress = other.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider].transport.address - val here = system.actorFor("akka://remote-sys@localhost:12346/user/echo") - - "support remote look-ups" in { - here ! "ping" - expectMsgPF(timeout.duration) { - case ("pong", s: AnyRef) if s eq testActor ⇒ true - } - } - - "send error message for wrong address" ignore { - within(timeout.duration) { - EventFilter.error(start = "dropping", occurrences = 1).intercept { - system.actorFor("akka://remotesys@localhost:12346/user/echo") ! "ping" - }(other) - } + "support tell" in { + val here = system.actorFor(otherAddress.toString + "/user/echo") + for (i ← 1 to 100) here ! (("ping", i)) + for (i ← 1 to 100) expectMsgPF(timeout.duration) { case (("pong", i), `testActor`) ⇒ true } } "support ask" in { - Await.result(here ? "ping", timeout.duration) match { - case ("pong", s: akka.pattern.PromiseActorRef) ⇒ // good - case m ⇒ fail(m + " was not (pong, AskActorRef)") - } + val here = system.actorFor(otherAddress.toString + "/user/echo") + val f = for (i ← 1 to 1000) yield here ? (("ping", i)) mapTo manifest[((String, Int), ActorRef)] + Await.result(Future.sequence(f), timeout.duration).map(_._1._1).toSet must be(Set("pong")) } - "send dead letters on remote if actor does not exist" in { - within(timeout.duration) { - EventFilter.warning(pattern = "dead.*buh", occurrences = 1).intercept { - system.actorFor("akka://remote-sys@localhost:12346/does/not/exist") ! "buh" - }(other) - } - } - - "create and supervise children on remote node" in { - within(timeout.duration) { - val r = system.actorOf(Props[Echo], "blub") - r.path.toString must be === "akka://remote-sys@localhost:12346/remote/Ticket1978CommunicationSpec@localhost:12345/user/blub" - r ! 42 - expectMsg(42) - EventFilter[Exception]("crash", occurrences = 1).intercept { - r ! new Exception("crash") - }(other) - expectMsg("preRestart") - r ! 42 - expectMsg(42) - } - } - - "look-up actors across node boundaries" in { - within(timeout.duration) { - val l = system.actorOf(Props(new Actor { - def receive = { - case (p: Props, n: String) ⇒ sender ! context.actorOf(p, n) - case s: String ⇒ sender ! context.actorFor(s) - } - }), "looker") - l ! (Props[Echo], "child") - val r = expectMsgType[ActorRef] - r ! (Props[Echo], "grandchild") - val remref = expectMsgType[ActorRef] - remref.isInstanceOf[LocalActorRef] must be(true) - val myref = system.actorFor(system / "looker" / "child" / "grandchild") - myref.isInstanceOf[RemoteActorRef] must be(true) - myref ! 43 - expectMsg(43) - lastSender must be theSameInstanceAs remref - r.asInstanceOf[RemoteActorRef].getParent must be(l) - system.actorFor("/user/looker/child") must be theSameInstanceAs r - Await.result(l ? "child/..", timeout.duration).asInstanceOf[AnyRef] must be theSameInstanceAs l - Await.result(system.actorFor(system / "looker" / "child") ? "..", timeout.duration).asInstanceOf[AnyRef] must be theSameInstanceAs l - } - } - - "not fail ask across node boundaries" in { - val f = for (_ ← 1 to 1000) yield here ? "ping" mapTo manifest[(String, ActorRef)] - Await.result(Future.sequence(f), timeout.duration).map(_._1).toSet must be(Set("pong")) - } } else { "not be run when the cipher is not supported by the platform this test is currently being executed on" ignore { From 8a7c8a2cedff5f1cbfd404f4b853b3fec18a1229 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 20 Jun 2012 00:10:06 +0200 Subject: [PATCH 458/538] Restructuring some of the SSL code to avoid the risk of races --- .../akka/remote/netty/NettyRemoteSupport.scala | 12 +++--------- .../akka/remote/netty/NettySSLSupport.scala | 17 ++++++++++------- .../akka/security/provider/AkkaProvider.scala | 6 +++--- 3 files changed, 16 insertions(+), 19 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 9fc64b0a68..a596f783d7 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -24,7 +24,7 @@ import akka.remote.{ RemoteTransportException, RemoteTransport, RemoteActorRefPr import akka.util.NonFatal import akka.actor.{ ExtendedActorSystem, Address, ActorRef } -object ChannelAddress extends ChannelLocal[Option[Address]] { +private[akka] object ChannelAddress extends ChannelLocal[Option[Address]] { override def initialValue(ch: Channel): Option[Address] = None } @@ -54,9 +54,7 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider * in implementations of ChannelPipelineFactory. */ def apply(handlers: Seq[ChannelHandler]): DefaultChannelPipeline = - handlers.foldLeft(new DefaultChannelPipeline) { - (pipe, handler) ⇒ pipe.addLast(Logging.simpleName(handler.getClass), handler); pipe - } + (new DefaultChannelPipeline /: handlers) { (p, h) ⇒ p.addLast(Logging.simpleName(h.getClass), h); p } /** * Constructs the NettyRemoteTransport default pipeline with the give “head” handler, which @@ -73,11 +71,7 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider */ def defaultStack(withTimeout: Boolean, isClient: Boolean): Seq[ChannelHandler] = (if (settings.EnableSSL) List(NettySSLSupport(settings, NettyRemoteTransport.this.log, isClient)) else Nil) ::: - (if (withTimeout) List(timeout) else Nil) ::: - msgFormat ::: - authenticator ::: - executionHandler :: - Nil + (if (withTimeout) List(timeout) else Nil) ::: msgFormat ::: authenticator ::: executionHandler :: Nil /** * Construct an IdleStateHandler which uses [[akka.remote.netty.NettyRemoteTransport]].timer. diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala index 8915af559e..0ab188425c 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala @@ -60,6 +60,7 @@ private[akka] object NettySSLSupport { def constructClientContext(settings: NettySettings, log: LoggingAdapter, trustStorePath: String, trustStorePassword: String, protocol: String): Option[SSLContext] = try { + val rng = initializeCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, log) val trustManagers: Array[TrustManager] = { val trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm) trustManagerFactory.init({ @@ -70,7 +71,7 @@ private[akka] object NettySSLSupport { }) trustManagerFactory.getTrustManagers } - Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(null, trustManagers, initializeCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, log)); ctx } + Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(null, trustManagers, rng); ctx } } catch { case e: FileNotFoundException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because trust store could not be loaded", e) case e: IOException ⇒ throw new RemoteTransportException("Client SSL connection could not be established because: " + e.getMessage, e) @@ -87,10 +88,12 @@ private[akka] object NettySSLSupport { }) match { case Some(context) ⇒ log.debug("Using client SSL context to create SSLEngine ...") - val sslEngine = context.createSSLEngine - sslEngine.setUseClientMode(true) - sslEngine.setEnabledCipherSuites(settings.SSLEnabledAlgorithms.toArray.map(_.toString)) - new SslHandler(sslEngine) + new SslHandler({ + val sslEngine = context.createSSLEngine + sslEngine.setUseClientMode(true) + sslEngine.setEnabledCipherSuites(settings.SSLEnabledAlgorithms.toArray.map(_.toString)) + sslEngine + }) case None ⇒ throw new GeneralSecurityException( """Failed to initialize client SSL because SSL context could not be found." + @@ -106,15 +109,15 @@ private[akka] object NettySSLSupport { def constructServerContext(settings: NettySettings, log: LoggingAdapter, keyStorePath: String, keyStorePassword: String, protocol: String): Option[SSLContext] = try { + val rng = initializeCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, log) val factory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm) - factory.init({ val keyStore = KeyStore.getInstance(KeyStore.getDefaultType) val fin = new FileInputStream(keyStorePath) try keyStore.load(fin, keyStorePassword.toCharArray) finally fin.close() keyStore }, keyStorePassword.toCharArray) - Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(factory.getKeyManagers, null, initializeCustomSecureRandom(settings.SSLRandomNumberGenerator, settings.SSLRandomSource, log)); ctx } + Option(SSLContext.getInstance(protocol)) map { ctx ⇒ ctx.init(factory.getKeyManagers, null, rng); ctx } } catch { case e: FileNotFoundException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because key store could not be loaded", e) case e: IOException ⇒ throw new RemoteTransportException("Server SSL connection could not be established because: " + e.getMessage, e) diff --git a/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala b/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala index f44aeae584..1ed93557a6 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala @@ -12,9 +12,9 @@ final class AkkaProvider extends Provider("Akka", 1.0, "Akka provider 1.0 that i AccessController.doPrivileged(new PrivilegedAction[AkkaProvider] { def run = { //SecureRandom - put("SecureRandom.AES128CounterRNGFast", "akka.security.provider.AES128CounterRNGFast") - put("SecureRandom.AES128CounterRNGSecure", "akka.security.provider.AES128CounterRNGSecure") - put("SecureRandom.AES256CounterRNGSecure", "akka.security.provider.AES256CounterRNGSecure") + put("SecureRandom.AES128CounterRNGFast", classOf[AES128CounterRNGFast].getName) + put("SecureRandom.AES128CounterRNGSecure", classOf[AES128CounterRNGSecure].getName) + put("SecureRandom.AES256CounterRNGSecure", classOf[AES256CounterRNGSecure].getName) //Implementation type: software or hardware put("SecureRandom.AES128CounterRNGFast ImplementedIn", "Software") From dbe72a6bf3e1464049e9f71fed21abe5a70eceb4 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 20 Jun 2012 00:47:11 +0200 Subject: [PATCH 459/538] Making AkkaProvider a Scala Object --- .../main/scala/akka/remote/netty/NettySSLSupport.scala | 9 ++++----- .../src/main/scala/akka/remote/netty/Settings.scala | 3 ++- .../main/scala/akka/security/provider/AkkaProvider.scala | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala index 0ab188425c..9a16a1e5f5 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala @@ -18,8 +18,7 @@ import akka.security.provider.AkkaProvider */ private[akka] object NettySSLSupport { - val akka = new AkkaProvider - Security.addProvider(akka) + Security addProvider AkkaProvider /** * Construct a SSLHandler which can be inserted into a Netty server/client pipeline @@ -38,7 +37,7 @@ private[akka] object NettySSLSupport { val rng = rngName match { case Some(r @ ("AES128CounterRNGFast" | "AES128CounterRNGSecure" | "AES256CounterRNGSecure")) ⇒ log.debug("SSL random number generator set to: {}", r) - SecureRandom.getInstance(r, akka) + SecureRandom.getInstance(r, AkkaProvider) case Some("SHA1PRNG") ⇒ log.debug("SSL random number generator set to: SHA1PRNG") // This needs /dev/urandom to be the source on Linux to prevent problems with /dev/random blocking @@ -91,7 +90,7 @@ private[akka] object NettySSLSupport { new SslHandler({ val sslEngine = context.createSSLEngine sslEngine.setUseClientMode(true) - sslEngine.setEnabledCipherSuites(settings.SSLEnabledAlgorithms.toArray.map(_.toString)) + sslEngine.setEnabledCipherSuites(settings.SSLEnabledAlgorithms.toArray) sslEngine }) case None ⇒ @@ -133,7 +132,7 @@ private[akka] object NettySSLSupport { log.debug("Using server SSL context to create SSLEngine ...") val sslEngine = context.createSSLEngine sslEngine.setUseClientMode(false) - sslEngine.setEnabledCipherSuites(settings.SSLEnabledAlgorithms.toArray.map(_.toString)) + sslEngine.setEnabledCipherSuites(settings.SSLEnabledAlgorithms.toArray) new SslHandler(sslEngine) case None ⇒ throw new GeneralSecurityException( """Failed to initialize server SSL because SSL context could not be found. diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index d168c67eca..ada257f674 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -8,6 +8,7 @@ import akka.util.Duration import java.util.concurrent.TimeUnit._ import java.net.InetAddress import akka.ConfigurationException +import scala.collection.JavaConverters.iterableAsScalaIterableConverter private[akka] class NettySettings(config: Config, val systemName: String) { @@ -106,7 +107,7 @@ private[akka] class NettySettings(config: Config, val systemName: String) { case password ⇒ Some(password) } - val SSLEnabledAlgorithms = getStringList("ssl.enabled-algorithms").toArray.toSet + val SSLEnabledAlgorithms = iterableAsScalaIterableConverter(getStringList("ssl.enabled-algorithms")).asScala.toSet[String] val SSLProtocol = getString("ssl.protocol") match { case "" ⇒ None diff --git a/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala b/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala index 1ed93557a6..0b85231348 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala @@ -3,13 +3,13 @@ */ package akka.security.provider -import java.security.{ PrivilegedAction, AccessController, Provider } +import java.security.{ PrivilegedAction, AccessController, Provider, Security } /** * A provider that for AES128CounterRNGFast, a cryptographically secure random number generator through SecureRandom */ -final class AkkaProvider extends Provider("Akka", 1.0, "Akka provider 1.0 that implements a secure AES random number generator") { - AccessController.doPrivileged(new PrivilegedAction[AkkaProvider] { +object AkkaProvider extends Provider("Akka", 1.0, "Akka provider 1.0 that implements a secure AES random number generator") { + AccessController.doPrivileged(new PrivilegedAction[this.type] { def run = { //SecureRandom put("SecureRandom.AES128CounterRNGFast", classOf[AES128CounterRNGFast].getName) From 95419ba82f76539152882fc25e0187f44d049f10 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 20 Jun 2012 01:56:15 +0200 Subject: [PATCH 460/538] Somehow the code for the fast and the secure was flipped --- .../akka/security/provider/AES128CounterRNGFast.scala | 6 +++--- .../akka/security/provider/AES128CounterRNGSecure.scala | 6 +++--- .../src/test/scala/akka/remote/Ticket1978ConfigSpec.scala | 7 +------ 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala b/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala index c355f5a548..1c58c4f1d0 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala @@ -3,14 +3,14 @@ */ package akka.security.provider -import org.uncommons.maths.random.{ AESCounterRNG, SecureRandomSeedGenerator } +import org.uncommons.maths.random.{ AESCounterRNG, DefaultSeedGenerator } import java.security.SecureRandom /** * Internal API */ class AES128CounterRNGFast extends java.security.SecureRandomSpi { - private val rng = new AESCounterRNG(new SecureRandomSeedGenerator()) + private val rng = new AESCounterRNG() /** * This is managed internally only @@ -31,6 +31,6 @@ class AES128CounterRNGFast extends java.security.SecureRandomSpi { * @param numBytes the number of seed bytes to generate. * @return the seed bytes. */ - override protected def engineGenerateSeed(numBytes: Int): Array[Byte] = (new SecureRandom).generateSeed(numBytes) + override protected def engineGenerateSeed(numBytes: Int): Array[Byte] = DefaultSeedGenerator.getInstance.generateSeed(numBytes) } diff --git a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala b/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala index 846476cc2d..60beecded7 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala @@ -3,13 +3,13 @@ */ package akka.security.provider -import org.uncommons.maths.random.{ AESCounterRNG, DefaultSeedGenerator } +import org.uncommons.maths.random.{ AESCounterRNG, SecureRandomSeedGenerator } /** * Internal API */ class AES128CounterRNGSecure extends java.security.SecureRandomSpi { - private val rng = new AESCounterRNG() + private val rng = new AESCounterRNG(new SecureRandomSeedGenerator()) /** * This is managed internally only @@ -30,6 +30,6 @@ class AES128CounterRNGSecure extends java.security.SecureRandomSpi { * @param numBytes the number of seed bytes to generate. * @return the seed bytes. */ - override protected def engineGenerateSeed(numBytes: Int): Array[Byte] = DefaultSeedGenerator.getInstance.generateSeed(numBytes) + override protected def engineGenerateSeed(numBytes: Int): Array[Byte] = (new SecureRandomSeedGenerator()).generateSeed(numBytes) } diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala index 4c39b94087..0a39d20a9a 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala @@ -15,12 +15,7 @@ akka { actor.provider = "akka.remote.RemoteActorRefProvider" remote.netty { hostname = localhost - port = 12345 - } - actor.deployment { - /blub.remote = "akka://remote-sys@localhost:12346" - /looker/child.remote = "akka://remote-sys@localhost:12346" - /looker/child/grandchild.remote = "akka://RemoteCommunicationSpec@localhost:12345" + port = 0 } } """) with ImplicitSender with DefaultTimeout { From 0e0f05309c41773a8d29d3006715c00076e3f2f9 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 20 Jun 2012 09:19:09 +0200 Subject: [PATCH 461/538] One more barrier in TransitionSpec --- .../src/multi-jvm/scala/akka/cluster/TransitionSpec.scala | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala index 20dd0c8dda..5a041fb675 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -396,6 +396,8 @@ abstract class TransitionSpec seenLatestGossip must be(Set(fifth)) } + testConductor.enter("after-second-unavailble") + // spread the word val gossipRound = List(fifth, fourth, third, first, third, fourth, fifth) for (x :: y :: Nil ← gossipRound.sliding(2)) { @@ -412,7 +414,7 @@ abstract class TransitionSpec awaitMemberStatus(second, Down) } - testConductor.enter("after-third-down") + testConductor.enter("after-second-down") // spread the word val gossipRound2 = List(third, fourth, fifth, first, third, fourth, fifth) From e02310847129d456dd726528ade3500f0786c68f Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 20 Jun 2012 10:18:15 +0200 Subject: [PATCH 462/538] Incorporated feedback from review, see #2066 --- .../src/main/resources/reference.conf | 2 +- .../akka/cluster/AccrualFailureDetector.scala | 75 +++++++++++-------- .../scala/akka/cluster/ClusterSettings.scala | 6 +- .../cluster/AccrualFailureDetectorSpec.scala | 24 +++--- .../akka/cluster/ClusterConfigSpec.scala | 2 +- 5 files changed, 60 insertions(+), 49 deletions(-) diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index 90d02d4fd1..8bf51b4fa5 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -57,7 +57,7 @@ akka { # This margin is important to be able to survive sudden, occasional, # pauses in heartbeat arrivals, due to for example garbage collect or # network drop. - acceptable-lost-heartbeats = 3.0 + acceptable-heartbeat-pause = 3s implementation-class = "" diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index 1dfac252fe..b10962ce11 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -15,12 +15,26 @@ import akka.util.Duration import akka.util.duration._ object AccrualFailureDetector { - private def realTimeMachine: () ⇒ Long = () ⇒ NANOSECONDS.toMillis(System.nanoTime) + private def realClock: () ⇒ Long = () ⇒ NANOSECONDS.toMillis(System.nanoTime) } /** * Implementation of 'The Phi Accrual Failure Detector' by Hayashibara et al. as defined in their paper: * [http://ddg.jaist.ac.jp/pub/HDY+04.pdf] * + * The suspicion level of failure is given by a value called φ (phi). + * The basic idea of the φ failure detector is to express the value of φ on a scale that + * is dynamically adjusted to reflect current network conditions. A configurable + * threshold is used to decide if φ is considered to be a failure. + * + * The value of φ is calculated as: + * + * {{{ + * φ = -log10(1 - F(timeSinceLastHeartbeat) + * }}} + * where F is the cumulative distribution function of a normal distribution with mean + * and standard deviation estimated from historical heartbeat inter-arrival times. + * + * * @param system Belongs to the [[akka.actor.ActorSystem]]. Used for logging. * * @param threshold A low threshold is prone to generate many wrong suspicions but ensures a quick detection in the event @@ -34,7 +48,7 @@ object AccrualFailureDetector { * Too low standard deviation might result in too much sensitivity for sudden, but normal, deviations * in heartbeat inter arrival times. * - * @param acceptableLostDuration Duration corresponding to number of potentially lost/delayed + * @param acceptableHeartbeatPause Duration corresponding to number of potentially lost/delayed * heartbeats that will be accepted before considering it to be an anomaly. * This margin is important to be able to survive sudden, occasional, pauses in heartbeat * arrivals, due to for example garbage collect or network drop. @@ -43,7 +57,7 @@ object AccrualFailureDetector { * to this duration, with a with rather high standard deviation (since environment is unknown * in the beginning) * - * @timeMachine The clock, returning time in milliseconds, but can be faked for testing + * @clock The clock, returning current time in milliseconds, but can be faked for testing * purposes. It is only used for measuring intervals (duration). * */ @@ -52,9 +66,9 @@ class AccrualFailureDetector( val threshold: Double, val maxSampleSize: Int, val minStdDeviation: Duration, - val acceptableLostDuration: Duration, + val acceptableHeartbeatPause: Duration, val firstHeartbeatEstimate: Duration, - val timeMachine: () ⇒ Long) extends FailureDetector { + val clock: () ⇒ Long) extends FailureDetector { import AccrualFailureDetector._ @@ -64,19 +78,19 @@ class AccrualFailureDetector( def this( system: ActorSystem, settings: ClusterSettings, - timeMachine: () ⇒ Long = AccrualFailureDetector.realTimeMachine) = + clock: () ⇒ Long = AccrualFailureDetector.realClock) = this( system, settings.FailureDetectorThreshold, settings.FailureDetectorMaxSampleSize, - settings.HeartbeatInterval * settings.FailureDetectorAcceptableLostHeartbeats, + settings.FailureDetectorAcceptableHeartbeatPause, settings.FailureDetectorMinStdDeviation, // we use a conservative estimate for the first heartbeat because // gossip needs to spread back to the joining node before the // first real heartbeat is sent. Initial heartbeat is added when joining. // FIXME this can be changed to HeartbeatInterval when ticket #2249 is fixed settings.GossipInterval * 3 + settings.HeartbeatInterval, - timeMachine) + clock) private val log = Logging(system, "FailureDetector") @@ -89,7 +103,7 @@ class AccrualFailureDetector( HeartbeatHistory(maxSampleSize) :+ (mean - stdDeviation) :+ (mean + stdDeviation) } - private val acceptableLostMillis = acceptableLostDuration.toMillis + private val acceptableHeartbeatPauseMillis = acceptableHeartbeatPause.toMillis /** * Implement using optimistic lockless concurrency, all state is represented @@ -116,7 +130,7 @@ class AccrualFailureDetector( final def heartbeat(connection: Address) { log.debug("Heartbeat from connection [{}] ", connection) - val timestamp = timeMachine() + val timestamp = clock() val oldState = state.get val newHistory = oldState.timestamps.get(connection) match { @@ -124,7 +138,7 @@ class AccrualFailureDetector( // this is heartbeat from a new connection // add starter records for this new connection firstHeartbeat - case (Some(latestTimestamp)) ⇒ + case Some(latestTimestamp) ⇒ // this is a known connection val interval = timestamp - latestTimestamp oldState.history(connection) :+ interval @@ -140,21 +154,10 @@ class AccrualFailureDetector( } /** - * The suspicion level of accrual failure detector is given by a value called φ (phi). - * The basic idea of the φ failure detector is to express the value of φ on a scale that - * is dynamically adjusted to reflect current network conditions. - * - * The value of φ is calculated as: - * - * {{{ - * φ = -log10(1 - F(timeSinceLastHeartbeat) - * }}} - * where F is the cumulative distribution function of a normal distribution with mean - * and standard deviation estimated from historical heartbeat inter-arrival times. + * The suspicion level of the accrual failure detector. * * If a connection does not have any records in failure detector then it is * considered healthy. - * */ def phi(connection: Address): Double = { val oldState = state.get @@ -164,18 +167,17 @@ class AccrualFailureDetector( if (oldState.explicitRemovals.contains(connection)) Double.MaxValue else if (oldTimestamp.isEmpty) 0.0 // treat unmanaged connections, e.g. with zero heartbeats, as healthy connections else { - val timeDiff = timeMachine() - oldTimestamp.get + val timeDiff = clock() - oldTimestamp.get val history = oldState.history(connection) val mean = history.mean val stdDeviation = ensureValidStdDeviation(history.stdDeviation) - val φ = phi(timeDiff, mean + acceptableLostMillis, stdDeviation) + val φ = phi(timeDiff, mean + acceptableHeartbeatPauseMillis, stdDeviation) // FIXME change to debug log level, when failure detector is stable - if (φ > 1.0) - log.info("Phi value [{}] for connection [{}], after [{} ms], based on [{}]", - φ, connection, timeDiff, "N(" + mean + ", " + stdDeviation + ")") + if (φ > 1.0) log.info("Phi value [{}] for connection [{}], after [{} ms], based on [{}]", + φ, connection, timeDiff, "N(" + mean + ", " + stdDeviation + ")") φ } @@ -232,7 +234,7 @@ private[cluster] object HeartbeatHistory { maxSampleSize = maxSampleSize, intervals = IndexedSeq.empty, intervalSum = 0L, - interval2Sum = 0L) + squaredIntervalSum = 0L) } @@ -247,11 +249,18 @@ private[cluster] case class HeartbeatHistory private ( maxSampleSize: Int, intervals: IndexedSeq[Long], intervalSum: Long, - interval2Sum: Long) { + squaredIntervalSum: Long) { + + if (maxSampleSize < 1) + throw new IllegalArgumentException("maxSampleSize must be >= 1, got [%s]" format maxSampleSize) + if (intervalSum < 0L) + throw new IllegalArgumentException("intervalSum must be >= 0, got [%s]" format intervalSum) + if (squaredIntervalSum < 0L) + throw new IllegalArgumentException("squaredIntervalSum must be >= 0, got [%s]" format squaredIntervalSum) def mean: Double = intervalSum.toDouble / intervals.size - def variance: Double = (interval2Sum.toDouble / intervals.size) - (mean * mean) + def variance: Double = (squaredIntervalSum.toDouble / intervals.size) - (mean * mean) def stdDeviation: Double = math.sqrt(variance) @@ -262,7 +271,7 @@ private[cluster] case class HeartbeatHistory private ( maxSampleSize, intervals = intervals :+ interval, intervalSum = intervalSum + interval, - interval2Sum = interval2Sum + pow2(interval)) + squaredIntervalSum = squaredIntervalSum + pow2(interval)) else dropOldest :+ interval // recur } @@ -271,7 +280,7 @@ private[cluster] case class HeartbeatHistory private ( maxSampleSize, intervals = intervals drop 1, intervalSum = intervalSum - intervals.head, - interval2Sum = interval2Sum - pow2(intervals.head)) + squaredIntervalSum = squaredIntervalSum - pow2(intervals.head)) private def pow2(x: Long) = x * x } \ No newline at end of file diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index f1e0c2d31b..e54e74617d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -20,8 +20,10 @@ class ClusterSettings(val config: Config, val systemName: String) { case "" ⇒ None case fqcn ⇒ Some(fqcn) } - final val FailureDetectorMinStdDeviation: Duration = Duration(getMilliseconds("akka.cluster.failure-detector.min-std-deviation"), MILLISECONDS) - final val FailureDetectorAcceptableLostHeartbeats: Double = getDouble("akka.cluster.failure-detector.acceptable-lost-heartbeats") + final val FailureDetectorMinStdDeviation: Duration = + Duration(getMilliseconds("akka.cluster.failure-detector.min-std-deviation"), MILLISECONDS) + final val FailureDetectorAcceptableHeartbeatPause: Duration = + Duration(getMilliseconds("akka.cluster.failure-detector.acceptable-heartbeat-pause"), MILLISECONDS) final val NodeToJoin: Option[Address] = getString("akka.cluster.node-to-join") match { case "" ⇒ None diff --git a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala index 081fc9f0fd..5c7186502c 100644 --- a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala @@ -37,14 +37,14 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" minStdDeviation: Duration = 10.millis, acceptableLostDuration: Duration = Duration.Zero, firstHeartbeatEstimate: Duration = 1.second, - timeMachine: () ⇒ Long = fakeTimeGenerator(defaultFakeTimeIntervals)): AccrualFailureDetector = + clock: () ⇒ Long = fakeTimeGenerator(defaultFakeTimeIntervals)): AccrualFailureDetector = new AccrualFailureDetector(system, threshold, maxSampleSize, minStdDeviation, acceptableLostDuration, firstHeartbeatEstimate = firstHeartbeatEstimate, - timeMachine = timeMachine) + clock = clock) "use good enough cumulative distribution function" in { val fd = createFailureDetector() @@ -85,7 +85,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "return phi based on guess when only one heartbeat" in { val timeInterval = List[Long](0, 1000, 1000, 1000, 1000) val fd = createFailureDetector(firstHeartbeatEstimate = 1.seconds, - timeMachine = fakeTimeGenerator(timeInterval)) + clock = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) fd.phi(conn) must be(0.3 plusOrMinus 0.2) @@ -95,7 +95,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "return phi value using first interval after second heartbeat" in { val timeInterval = List[Long](0, 100, 100, 100) - val fd = createFailureDetector(timeMachine = fakeTimeGenerator(timeInterval)) + val fd = createFailureDetector(clock = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) fd.phi(conn) must be > (0.0) @@ -105,7 +105,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as available after a series of successful heartbeats" in { val timeInterval = List[Long](0, 1000, 100, 100) - val fd = createFailureDetector(timeMachine = fakeTimeGenerator(timeInterval)) + val fd = createFailureDetector(clock = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) fd.heartbeat(conn) @@ -116,7 +116,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as dead after explicit removal of connection" in { val timeInterval = List[Long](0, 1000, 100, 100, 100) - val fd = createFailureDetector(timeMachine = fakeTimeGenerator(timeInterval)) + val fd = createFailureDetector(clock = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) fd.heartbeat(conn) @@ -129,7 +129,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as available after explicit removal of connection and receiving heartbeat again" in { val timeInterval = List[Long](0, 1000, 100, 1100, 1100, 1100, 1100, 1100, 100) - val fd = createFailureDetector(timeMachine = fakeTimeGenerator(timeInterval)) + val fd = createFailureDetector(clock = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 @@ -153,7 +153,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as dead if heartbeat are missed" in { val timeInterval = List[Long](0, 1000, 100, 100, 7000) val ft = fakeTimeGenerator(timeInterval) - val fd = createFailureDetector(threshold = 3, timeMachine = fakeTimeGenerator(timeInterval)) + val fd = createFailureDetector(threshold = 3, clock = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 fd.heartbeat(conn) //1000 @@ -165,7 +165,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as available if it starts heartbeat again after being marked dead due to detection of failure" in { val timeInterval = List[Long](0, 1000, 100, 1100, 7000, 100, 1000, 100, 100) - val fd = createFailureDetector(threshold = 3, timeMachine = fakeTimeGenerator(timeInterval)) + val fd = createFailureDetector(threshold = 3, clock = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 fd.heartbeat(conn) //1000 @@ -181,7 +181,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "accept some configured missing heartbeats" in { val timeInterval = List[Long](0, 1000, 1000, 1000, 4000, 1000, 1000) - val fd = createFailureDetector(acceptableLostDuration = 3.seconds, timeMachine = fakeTimeGenerator(timeInterval)) + val fd = createFailureDetector(acceptableLostDuration = 3.seconds, clock = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) fd.heartbeat(conn) @@ -194,7 +194,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "fail after configured acceptable missing heartbeats" in { val timeInterval = List[Long](0, 1000, 1000, 1000, 1000, 1000, 500, 500, 5000) - val fd = createFailureDetector(acceptableLostDuration = 3.seconds, timeMachine = fakeTimeGenerator(timeInterval)) + val fd = createFailureDetector(acceptableLostDuration = 3.seconds, clock = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) fd.heartbeat(conn) @@ -209,7 +209,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "use maxSampleSize heartbeats" in { val timeInterval = List[Long](0, 100, 100, 100, 100, 600, 1000, 1000, 1000, 1000, 1000) - val fd = createFailureDetector(maxSampleSize = 3, timeMachine = fakeTimeGenerator(timeInterval)) + val fd = createFailureDetector(maxSampleSize = 3, clock = fakeTimeGenerator(timeInterval)) // 100 ms interval fd.heartbeat(conn) //0 diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index ab8ffcf157..91fab3aea3 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -20,7 +20,7 @@ class ClusterConfigSpec extends AkkaSpec { FailureDetectorMaxSampleSize must be(1000) FailureDetectorImplementationClass must be(None) FailureDetectorMinStdDeviation must be(100 millis) - FailureDetectorAcceptableLostHeartbeats must be(3.0 plusOrMinus 0.0001) + FailureDetectorAcceptableHeartbeatPause must be(3 seconds) NodeToJoin must be(None) PeriodicTasksInitialDelay must be(1 seconds) GossipInterval must be(1 second) From b062539ae3280cb62c8f4ca9d079475b63555d36 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 20 Jun 2012 10:58:30 +0200 Subject: [PATCH 463/538] Adding the setting of securerandom.source and support for NativePRNG --- .../scala/akka/remote/netty/NettySSLSupport.scala | 15 +++++++++------ .../akka/remote/Ticket1978CommunicationSpec.scala | 4 ++-- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala index 9a16a1e5f5..9323fb8143 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala @@ -9,8 +9,8 @@ import javax.net.ssl.{ KeyManagerFactory, TrustManager, TrustManagerFactory, SSL import akka.remote.RemoteTransportException import akka.event.LoggingAdapter import java.io.{ IOException, FileNotFoundException, FileInputStream } -import java.security.{ SecureRandom, GeneralSecurityException, KeyStore, Security } import akka.security.provider.AkkaProvider +import java.security._ /** * Used for adding SSL support to Netty pipeline @@ -32,17 +32,20 @@ private[akka] object NettySSLSupport { * Using /dev/./urandom is only necessary when using SHA1PRNG on Linux * Use 'new SecureRandom()' instead of 'SecureRandom.getInstance("SHA1PRNG")' to avoid having problems */ - sourceOfRandomness foreach { path ⇒ System.setProperty("java.security.egd", path) } + sourceOfRandomness foreach { path ⇒ + System.setProperty("java.security.egd", path) + System.setProperty("securerandom.source", path) + } val rng = rngName match { case Some(r @ ("AES128CounterRNGFast" | "AES128CounterRNGSecure" | "AES256CounterRNGSecure")) ⇒ log.debug("SSL random number generator set to: {}", r) SecureRandom.getInstance(r, AkkaProvider) - case Some("SHA1PRNG") ⇒ - log.debug("SSL random number generator set to: SHA1PRNG") - // This needs /dev/urandom to be the source on Linux to prevent problems with /dev/random blocking + case Some(s @ ("SHA1PRNG" | "NativePRNG")) ⇒ + log.debug("SSL random number generator set to: " + s) + // SHA1PRNG needs /dev/urandom to be the source on Linux to prevent problems with /dev/random blocking // However, this also makes the seed source insecure as the seed is reused to avoid blocking (not a problem on FreeBSD). - SecureRandom.getInstance("SHA1PRNG") + SecureRandom.getInstance(s) case Some(unknown) ⇒ log.debug("Unknown SSLRandomNumberGenerator [{}] falling back to SecureRandom", unknown) new SecureRandom diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 6344d0c435..c31784f313 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -128,8 +128,8 @@ abstract class Ticket1978CommunicationSpec(val cipherConfig: CipherConfig) exten "support tell" in { val here = system.actorFor(otherAddress.toString + "/user/echo") - for (i ← 1 to 100) here ! (("ping", i)) - for (i ← 1 to 100) expectMsgPF(timeout.duration) { case (("pong", i), `testActor`) ⇒ true } + for (i ← 1 to 1000) here ! (("ping", i)) + for (i ← 1 to 1000) expectMsgPF(timeout.duration) { case (("pong", i), `testActor`) ⇒ true } } "support ask" in { From 803b37d015ef877db9495592cd4544a44dd23eba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Wed, 20 Jun 2012 11:03:03 +0200 Subject: [PATCH 464/538] Added longer waiting time to wait for leader hand-off and convergence on new leader in the LeaderLeavingSpec. Fixes #2254 --- .../multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala | 8 +++++--- .../src/multi-jvm/scala/akka/cluster/TransitionSpec.scala | 3 +-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala index ad15fdc21d..31b9ec641a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala @@ -39,6 +39,8 @@ abstract class LeaderLeavingSpec lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address + val leaderHandoffWaitingTime = 30.seconds.dilated + "A LEADER that is LEAVING" must { "be moved to LEAVING, then to EXITING, then to REMOVED, then be shut down and then a new LEADER should be elected" taggedAs LongRunningTest in { @@ -72,13 +74,13 @@ abstract class LeaderLeavingSpec awaitCond(cluster.latestGossip.members.exists(m => m.status == MemberStatus.Exiting && m.address == oldLeaderAddress)) // wait on EXITING // verify that the LEADER is no longer part of the 'members' set - awaitCond(cluster.latestGossip.members.forall(_.address != oldLeaderAddress)) + awaitCond(cluster.latestGossip.members.forall(_.address != oldLeaderAddress), leaderHandoffWaitingTime) // verify that the LEADER is not part of the 'unreachable' set - awaitCond(cluster.latestGossip.overview.unreachable.forall(_.address != oldLeaderAddress)) + awaitCond(cluster.latestGossip.overview.unreachable.forall(_.address != oldLeaderAddress), leaderHandoffWaitingTime) // verify that we have a new LEADER - awaitCond(cluster.leader != oldLeaderAddress) + awaitCond(cluster.leader != oldLeaderAddress, leaderHandoffWaitingTime) } enterBarrier("finished") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala index 0130c5bc1c..268944a139 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -20,8 +20,7 @@ object TransitionMultiJvmSpec extends MultiNodeConfig { val fifth = role("fifth") commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString( - "akka.cluster.periodic-tasks-initial-delay = 300 s # turn off all periodic tasks")). + withFallback(ConfigFactory.parseString("akka.cluster.periodic-tasks-initial-delay = 300 s # turn off all periodic tasks")). withFallback(MultiNodeClusterSpec.clusterConfig)) } From 4b2316a56b843c5ceac9084f7ac58304f56305dc Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 20 Jun 2012 11:06:47 +0200 Subject: [PATCH 465/538] Rename GossipingAccrualFailureDetectorSpec --- ...> ClusterAccrualFailureDetectorSpec.scala} | 24 ++++++++++--------- .../scala/akka/cluster/TransitionSpec.scala | 4 ++-- 2 files changed, 15 insertions(+), 13 deletions(-) rename akka-cluster/src/multi-jvm/scala/akka/cluster/{GossipingAccrualFailureDetectorSpec.scala => ClusterAccrualFailureDetectorSpec.scala} (61%) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterAccrualFailureDetectorSpec.scala similarity index 61% rename from akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterAccrualFailureDetectorSpec.scala index b52695dcf1..d5d41b52aa 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterAccrualFailureDetectorSpec.scala @@ -9,7 +9,7 @@ import akka.remote.testkit.MultiNodeSpec import akka.util.duration._ import akka.testkit._ -object GossipingAccrualFailureDetectorMultiJvmSpec extends MultiNodeConfig { +object ClusterAccrualFailureDetectorMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") val third = role("third") @@ -19,22 +19,22 @@ object GossipingAccrualFailureDetectorMultiJvmSpec extends MultiNodeConfig { withFallback(MultiNodeClusterSpec.clusterConfig)) } -class GossipingWithAccrualFailureDetectorMultiJvmNode1 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy -class GossipingWithAccrualFailureDetectorMultiJvmNode2 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy -class GossipingWithAccrualFailureDetectorMultiJvmNode3 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy +class ClusterAccrualFailureDetectorMultiJvmNode1 extends ClusterAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy +class ClusterAccrualFailureDetectorMultiJvmNode2 extends ClusterAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy +class ClusterAccrualFailureDetectorMultiJvmNode3 extends ClusterAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy -abstract class GossipingAccrualFailureDetectorSpec - extends MultiNodeSpec(GossipingAccrualFailureDetectorMultiJvmSpec) +abstract class ClusterAccrualFailureDetectorSpec + extends MultiNodeSpec(ClusterAccrualFailureDetectorMultiJvmSpec) with MultiNodeClusterSpec { - import GossipingAccrualFailureDetectorMultiJvmSpec._ + import ClusterAccrualFailureDetectorMultiJvmSpec._ - "A Gossip-driven Failure Detector" must { + "A heartbeat driven Failure Detector" must { - "receive gossip heartbeats so that all member nodes in the cluster are marked 'available'" taggedAs LongRunningTest in { + "receive heartbeats so that all member nodes in the cluster are marked 'available'" taggedAs LongRunningTest in { awaitClusterUp(first, second, third) - 5.seconds.dilated.sleep // let them gossip + 5.seconds.dilated.sleep // let them heartbeat cluster.failureDetector.isAvailable(first) must be(true) cluster.failureDetector.isAvailable(second) must be(true) cluster.failureDetector.isAvailable(third) must be(true) @@ -47,9 +47,11 @@ abstract class GossipingAccrualFailureDetectorSpec testConductor.shutdown(third, 0) } + enterBarrier("third-shutdown") + runOn(first, second) { // remaning nodes should detect failure... - awaitCond(!cluster.failureDetector.isAvailable(third), 10.seconds) + awaitCond(!cluster.failureDetector.isAvailable(third), 15.seconds) // other connections still ok cluster.failureDetector.isAvailable(first) must be(true) cluster.failureDetector.isAvailable(second) must be(true) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala index 6330772ef6..ce31d8fe0e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -396,7 +396,7 @@ abstract class TransitionSpec seenLatestGossip must be(Set(fifth)) } - testConductor.enter("after-second-unavailble") + enterBarrier("after-second-unavailble") // spread the word val gossipRound = List(fifth, fourth, third, first, third, fourth, fifth) @@ -414,7 +414,7 @@ abstract class TransitionSpec awaitMemberStatus(second, Down) } - testConductor.enter("after-second-down") + enterBarrier("after-second-down") // spread the word val gossipRound2 = List(third, fourth, fifth, first, third, fourth, fifth) From dccb0ca2d7f806e46dc6f684ee9a821f778aaa95 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 19 Jun 2012 16:00:45 +0200 Subject: [PATCH 466/538] Avoid gossip merge when singleton cluster, see #2250 --- .../src/main/scala/akka/cluster/Cluster.scala | 6 +- .../scala/akka/cluster/SunnyWeatherSpec.scala | 4 +- .../scala/akka/cluster/TransitionSpec.scala | 67 +++++-------------- 3 files changed, 23 insertions(+), 54 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index d9496b3235..78015078f3 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -893,7 +893,11 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val localGossip = localState.latestGossip val winningGossip = - if (remoteGossip.version <> localGossip.version) { + if (isSingletonCluster(localState) && localGossip.overview.unreachable.isEmpty && remoteGossip.members.contains(self)) { + // a fresh singleton cluster that is joining, no need to merge, use received gossip + remoteGossip + + } else if (remoteGossip.version <> localGossip.version) { // concurrent val mergedGossip = remoteGossip merge localGossip val versionedMergedGossip = mergedGossip :+ vclockNode diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index c283665b30..3c74bc02e2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -19,11 +19,11 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { val fourth = role("fourth") val fifth = role("fifth") + // Note that this test uses default configuration, + // not MultiNodeClusterSpec.clusterConfig commonConfig(ConfigFactory.parseString(""" akka.cluster { nr-of-deputy-nodes = 0 - # FIXME remove this (use default) when ticket #2239 has been fixed - gossip-interval = 400 ms } akka.loglevel = INFO """)) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala index 60594d145e..57b973390d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -118,33 +118,18 @@ abstract class TransitionSpec awaitMembers(first, second) memberStatus(first) must be(Up) memberStatus(second) must be(Joining) + seenLatestGossip must be(Set(first)) cluster.convergence.isDefined must be(false) } enterBarrier("second-joined") first gossipTo second - runOn(second) { - members must be(Set(first, second)) - memberStatus(first) must be(Up) - memberStatus(second) must be(Joining) - // we got a conflicting version in second, and therefore not convergence in second - seenLatestGossip must be(Set(second)) - cluster.convergence.isDefined must be(false) - } - second gossipTo first - runOn(first) { - seenLatestGossip must be(Set(first, second)) - } - - first gossipTo second - runOn(second) { - seenLatestGossip must be(Set(first, second)) - } runOn(first, second) { memberStatus(first) must be(Up) memberStatus(second) must be(Joining) + seenLatestGossip must be(Set(first, second)) cluster.convergence.isDefined must be(true) } enterBarrier("convergence-joining-2") @@ -191,42 +176,20 @@ abstract class TransitionSpec second gossipTo first runOn(first) { members must be(Set(first, second, third)) - cluster.convergence.isDefined must be(false) memberStatus(third) must be(Joining) + seenLatestGossip must be(Set(first, second)) + cluster.convergence.isDefined must be(false) } first gossipTo third - runOn(third) { - members must be(Set(first, second, third)) - cluster.convergence.isDefined must be(false) - memberStatus(third) must be(Joining) - // conflicting version - seenLatestGossip must be(Set(third)) - } - third gossipTo first third gossipTo second - runOn(first, second) { - seenLatestGossip must be(Set(myself, third)) - } - - first gossipTo second - runOn(second) { - seenLatestGossip must be(Set(first, second, third)) - cluster.convergence.isDefined must be(true) - } - - runOn(first, third) { - cluster.convergence.isDefined must be(false) - } - - second gossipTo first - second gossipTo third runOn(first, second, third) { - seenLatestGossip must be(Set(first, second, third)) + members must be(Set(first, second, third)) memberStatus(first) must be(Up) memberStatus(second) must be(Up) memberStatus(third) must be(Joining) + seenLatestGossip must be(Set(first, second, third)) cluster.convergence.isDefined must be(true) } @@ -283,19 +246,21 @@ abstract class TransitionSpec "startup a second separated cluster consisting of nodes fourth and fifth" taggedAs LongRunningTest in { runOn(fourth) { cluster.join(fifth) - awaitMembers(fourth, fifth) - cluster.gossipTo(fifth) - awaitSeen(fourth, fifth) - cluster.convergence.isDefined must be(true) } runOn(fifth) { awaitMembers(fourth, fifth) - cluster.gossipTo(fourth) - awaitSeen(fourth, fifth) - cluster.gossipTo(fourth) + } + testConductor.enter("fourth-joined") + + fifth gossipTo fourth + fourth gossipTo fifth + + runOn(fourth, fifth) { + memberStatus(fourth) must be(Joining) + memberStatus(fifth) must be(Up) + seenLatestGossip must be(Set(fourth, fifth)) cluster.convergence.isDefined must be(true) } - enterBarrier("fourth-joined-fifth") enterBarrier("after-4") } From 0563ae04f113351a82eb73c85256a5b054e82f43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Wed, 20 Jun 2012 11:54:30 +0200 Subject: [PATCH 467/538] Increased timeout for LeaderLeavingSpec --- .../src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala index e903368498..54154b6973 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala @@ -64,10 +64,10 @@ abstract class LeaderLeavingSpec enterBarrier("leader-left") // verify that the LEADER is LEAVING - awaitCond(cluster.latestGossip.members.exists(m ⇒ m.status == MemberStatus.Leaving && m.address == oldLeaderAddress)) // wait on LEAVING + awaitCond(cluster.latestGossip.members.exists(m ⇒ m.status == MemberStatus.Leaving && m.address == oldLeaderAddress), leaderHandoffWaitingTime) // wait on LEAVING // verify that the LEADER is EXITING - awaitCond(cluster.latestGossip.members.exists(m ⇒ m.status == MemberStatus.Exiting && m.address == oldLeaderAddress)) // wait on EXITING + awaitCond(cluster.latestGossip.members.exists(m ⇒ m.status == MemberStatus.Exiting && m.address == oldLeaderAddress), leaderHandoffWaitingTime) // wait on EXITING // verify that the LEADER is no longer part of the 'members' set awaitCond(cluster.latestGossip.members.forall(_.address != oldLeaderAddress), leaderHandoffWaitingTime) From 529c25f3dc22467c9f1b1c0358c13b0d5845797b Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 20 Jun 2012 08:26:41 +0200 Subject: [PATCH 468/538] Start sending heartbeats immediately when joining, see #2249 * Keep track of joins that are in progress in State.joinInProgress, with Deadline * Add test that fails without this feature --- .../src/main/resources/reference.conf | 4 ++ .../akka/cluster/AccrualFailureDetector.scala | 6 +- .../src/main/scala/akka/cluster/Cluster.scala | 66 ++++++++++++++----- .../scala/akka/cluster/ClusterSettings.scala | 21 +++--- .../akka/cluster/JoinInProgressSpec.scala | 65 ++++++++++++++++++ .../akka/cluster/ClusterConfigSpec.scala | 1 + 6 files changed, 131 insertions(+), 32 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index 8bf51b4fa5..e5536ec6c1 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -36,6 +36,10 @@ akka { # how often should the node move nodes, marked as unreachable by the failure detector, out of the membership ring? unreachable-nodes-reaper-interval = 1s + # A joining node stops sending heartbeats to the node to join if it hasn't become member + # of the cluster within this deadline. + join-timeout = 60s + failure-detector { # defines the failure detector threshold diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index b10962ce11..db5f21607b 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -85,11 +85,7 @@ class AccrualFailureDetector( settings.FailureDetectorMaxSampleSize, settings.FailureDetectorAcceptableHeartbeatPause, settings.FailureDetectorMinStdDeviation, - // we use a conservative estimate for the first heartbeat because - // gossip needs to spread back to the joining node before the - // first real heartbeat is sent. Initial heartbeat is added when joining. - // FIXME this can be changed to HeartbeatInterval when ticket #2249 is fixed - settings.GossipInterval * 3 + settings.HeartbeatInterval, + settings.HeartbeatInterval, clock) private val log = Logging(system, "FailureDetector") diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index d9496b3235..2ea21db230 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -194,8 +194,8 @@ object MemberStatus { * Represents the overview of the cluster, holds the cluster convergence table and set with unreachable nodes. */ case class GossipOverview( - seen: Map[Address, VectorClock] = Map.empty[Address, VectorClock], - unreachable: Set[Member] = Set.empty[Member]) { + seen: Map[Address, VectorClock] = Map.empty, + unreachable: Set[Member] = Set.empty) { override def toString = "GossipOverview(seen = [" + seen.mkString(", ") + @@ -241,7 +241,7 @@ object Gossip { case class Gossip( overview: GossipOverview = GossipOverview(), members: SortedSet[Member], // sorted set of members with their status, sorted by address - meta: Map[String, Array[Byte]] = Map.empty[String, Array[Byte]], + meta: Map[String, Array[Byte]] = Map.empty, version: VectorClock = VectorClock()) // vector clock version extends ClusterMessage // is a serializable cluster message with Versioned[Gossip] { @@ -463,7 +463,8 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) */ private case class State( latestGossip: Gossip, - memberMembershipChangeListeners: Set[MembershipChangeListener] = Set.empty[MembershipChangeListener]) + joinInProgress: Map[Address, Deadline] = Map.empty, + memberMembershipChangeListeners: Set[MembershipChangeListener] = Set.empty) if (!system.provider.isInstanceOf[RemoteActorRefProvider]) throw new ConfigurationException("ActorSystem[" + system + "] needs to have a 'RemoteActorRefProvider' enabled in the configuration") @@ -674,11 +675,18 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) * Try to join this cluster node with the node specified by 'address'. * A 'Join(thisNodeAddress)' command is sent to the node to join. */ - def join(address: Address): Unit = { - val connection = clusterCommandConnectionFor(address) - val command = ClusterUserAction.Join(selfAddress) - log.info("Cluster Node [{}] - Trying to send JOIN to [{}] through connection [{}]", selfAddress, address, connection) - connection ! command + @tailrec + final def join(address: Address): Unit = { + val localState = state.get + val newState = localState copy (joinInProgress = localState.joinInProgress + + (address -> (Deadline.now + JoinTimeout))) + if (!state.compareAndSet(localState, newState)) join(address) // recur + else { + val connection = clusterCommandConnectionFor(address) + val command = ClusterUserAction.Join(selfAddress) + log.info("Cluster Node [{}] - Trying to send JOIN to [{}] through connection [{}]", selfAddress, address, connection) + connection ! command + } } /** @@ -913,7 +921,15 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) remoteGossip } - val newState = localState copy (latestGossip = winningGossip seen selfAddress) + val newJoinInProgress = + if (localState.joinInProgress.isEmpty) localState.joinInProgress + else localState.joinInProgress -- + winningGossip.members.map(_.address) -- + winningGossip.overview.unreachable.map(_.address) + + val newState = localState copy ( + latestGossip = winningGossip seen selfAddress, + joinInProgress = newJoinInProgress) // if we won the race then update else try again if (!state.compareAndSet(localState, newState)) receiveGossip(from, remoteGossip) // recur if we fail the update @@ -1023,16 +1039,15 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) * INTERNAL API. */ private[cluster] def heartbeat(): Unit = { + removeOverdueJoinInProgress() val localState = state.get - if (!isSingletonCluster(localState)) { - val liveMembers = localState.latestGossip.members.toIndexedSeq + val beatTo = localState.latestGossip.members.toSeq.map(_.address) ++ localState.joinInProgress.keys - for (member ← liveMembers; if member.address != selfAddress) { - val connection = clusterGossipConnectionFor(member.address) - log.debug("Cluster Node [{}] - Heartbeat to [{}]", selfAddress, connection) - connection ! selfHeartbeat - } + for (address ← beatTo; if address != selfAddress) { + val connection = clusterGossipConnectionFor(address) + log.debug("Cluster Node [{}] - Heartbeat to [{}]", selfAddress, connection) + connection ! selfHeartbeat } } @@ -1080,6 +1095,23 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } } + /** + * INTERNAL API. + * + * Removes overdue joinInProgress from State. + */ + @tailrec + final private[cluster] def removeOverdueJoinInProgress(): Unit = { + val localState = state.get + val overdueJoins = localState.joinInProgress collect { + case (address, deadline) if deadline.isOverdue ⇒ address + } + if (overdueJoins.nonEmpty) { + val newState = localState copy (joinInProgress = localState.joinInProgress -- overdueJoins) + if (!state.compareAndSet(localState, newState)) removeOverdueJoinInProgress() // recur + } + } + /** * INTERNAL API. * diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index e54e74617d..544f1802ac 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -29,14 +29,15 @@ class ClusterSettings(val config: Config, val systemName: String) { case "" ⇒ None case AddressFromURIString(addr) ⇒ Some(addr) } - final val PeriodicTasksInitialDelay = Duration(getMilliseconds("akka.cluster.periodic-tasks-initial-delay"), MILLISECONDS) - final val GossipInterval = Duration(getMilliseconds("akka.cluster.gossip-interval"), MILLISECONDS) - final val HeartbeatInterval = Duration(getMilliseconds("akka.cluster.heartbeat-interval"), MILLISECONDS) - final val LeaderActionsInterval = Duration(getMilliseconds("akka.cluster.leader-actions-interval"), MILLISECONDS) - final val UnreachableNodesReaperInterval = Duration(getMilliseconds("akka.cluster.unreachable-nodes-reaper-interval"), MILLISECONDS) - final val NrOfGossipDaemons = getInt("akka.cluster.nr-of-gossip-daemons") - final val NrOfDeputyNodes = getInt("akka.cluster.nr-of-deputy-nodes") - final val AutoDown = getBoolean("akka.cluster.auto-down") - final val SchedulerTickDuration = Duration(getMilliseconds("akka.cluster.scheduler.tick-duration"), MILLISECONDS) - final val SchedulerTicksPerWheel = getInt("akka.cluster.scheduler.ticks-per-wheel") + final val PeriodicTasksInitialDelay: Duration = Duration(getMilliseconds("akka.cluster.periodic-tasks-initial-delay"), MILLISECONDS) + final val GossipInterval: Duration = Duration(getMilliseconds("akka.cluster.gossip-interval"), MILLISECONDS) + final val HeartbeatInterval: Duration = Duration(getMilliseconds("akka.cluster.heartbeat-interval"), MILLISECONDS) + final val LeaderActionsInterval: Duration = Duration(getMilliseconds("akka.cluster.leader-actions-interval"), MILLISECONDS) + final val UnreachableNodesReaperInterval: Duration = Duration(getMilliseconds("akka.cluster.unreachable-nodes-reaper-interval"), MILLISECONDS) + final val NrOfGossipDaemons: Int = getInt("akka.cluster.nr-of-gossip-daemons") + final val NrOfDeputyNodes: Int = getInt("akka.cluster.nr-of-deputy-nodes") + final val AutoDown: Boolean = getBoolean("akka.cluster.auto-down") + final val JoinTimeout: Duration = Duration(getMilliseconds("akka.cluster.join-timeout"), MILLISECONDS) + final val SchedulerTickDuration: Duration = Duration(getMilliseconds("akka.cluster.scheduler.tick-duration"), MILLISECONDS) + final val SchedulerTicksPerWheel: Int = getInt("akka.cluster.scheduler.ticks-per-wheel") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala new file mode 100644 index 0000000000..256b7d563d --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala @@ -0,0 +1,65 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ +import akka.util.Deadline + +object JoinInProgressMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" + akka.cluster { + # simulate delay in gossip by turning it off + gossip-interval = 300 s + failure-detector { + threshold = 4 + acceptable-heartbeat-pause = 1 second + } + }""") // increase the leader action task interval + .withFallback(MultiNodeClusterSpec.clusterConfig))) +} + +class JoinInProgressMultiJvmNode1 extends JoinInProgressSpec with AccrualFailureDetectorStrategy +class JoinInProgressMultiJvmNode2 extends JoinInProgressSpec with AccrualFailureDetectorStrategy + +abstract class JoinInProgressSpec + extends MultiNodeSpec(JoinInProgressMultiJvmSpec) + with MultiNodeClusterSpec { + + import JoinInProgressMultiJvmSpec._ + + "A cluster node" must { + "send heartbeats immediately when joining to avoid false failure detection due to delayed gossip" taggedAs LongRunningTest in { + + runOn(first) { + startClusterNode() + } + + enterBarrier("first-started") + + runOn(second) { + cluster.join(first) + } + + runOn(first) { + val until = Deadline.now + 5.seconds + while (!until.isOverdue) { + 200.millis.sleep + cluster.failureDetector.isAvailable(second) must be(true) + } + } + + enterBarrier("after") + } + } +} diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 91fab3aea3..864cdafc37 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -27,6 +27,7 @@ class ClusterConfigSpec extends AkkaSpec { HeartbeatInterval must be(1 second) LeaderActionsInterval must be(1 second) UnreachableNodesReaperInterval must be(1 second) + JoinTimeout must be(60 seconds) NrOfGossipDaemons must be(4) NrOfDeputyNodes must be(3) AutoDown must be(true) From 063c2606157640f28acd5c3fef604a37da9d3afa Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 20 Jun 2012 13:30:30 +0200 Subject: [PATCH 469/538] Add missing after barriers in LeaderElectionSpec, see #2256 --- .../src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index 662dfbca56..9ed8f27ad4 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -50,7 +50,7 @@ abstract class LeaderElectionSpec assertLeaderIn(sortedRoles) } - enterBarrier("after") + enterBarrier("after-1") } def shutdownLeaderAndVerifyNewLeader(alreadyShutdown: Int): Unit = { @@ -97,10 +97,12 @@ abstract class LeaderElectionSpec "be able to 're-elect' a single leader after leader has left" taggedAs LongRunningTest in { shutdownLeaderAndVerifyNewLeader(alreadyShutdown = 0) + enterBarrier("after-2") } "be able to 're-elect' a single leader after leader has left (again)" taggedAs LongRunningTest in { shutdownLeaderAndVerifyNewLeader(alreadyShutdown = 1) + enterBarrier("after-3") } } } From a050fe349d9a0d87e05404fb2fa662523752ae53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Wed, 20 Jun 2012 14:01:23 +0200 Subject: [PATCH 470/538] Fixed link to licenses for dependency projects --- akka-docs/project/licenses.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-docs/project/licenses.rst b/akka-docs/project/licenses.rst index b83b6a5f46..7dbcf5ef9f 100644 --- a/akka-docs/project/licenses.rst +++ b/akka-docs/project/licenses.rst @@ -196,4 +196,4 @@ Licenses for Dependency Libraries --------------------------------- Each dependency and its license can be seen in the project build file (the comment on the side of each dependency): -``_ +``_ From 9b73d75c1ba4bf80dddfa134f667a8dcbe4a0d67 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 20 Jun 2012 14:14:10 +0200 Subject: [PATCH 471/538] Removing the naught default in code of the failure detector and changed so that the AccrualFailureDetectors constructor matches what the instantiator expects --- akka-cluster/src/main/resources/reference.conf | 2 +- .../src/main/scala/akka/cluster/Cluster.scala | 14 ++++++-------- .../main/scala/akka/cluster/ClusterSettings.scala | 5 +---- .../akka/cluster/FailureDetectorStrategy.scala | 4 ++-- .../scala/akka/cluster/ClusterConfigSpec.scala | 2 +- 5 files changed, 11 insertions(+), 16 deletions(-) diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index 8bf51b4fa5..33616f5812 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -59,7 +59,7 @@ akka { # network drop. acceptable-heartbeat-pause = 3s - implementation-class = "" + implementation-class = "akka.cluster.AccrualFailureDetector" max-sample-size = 1000 } diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index d9496b3235..3732019a50 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -403,14 +403,12 @@ object Cluster extends ExtensionId[Cluster] with ExtensionIdProvider { override def createExtension(system: ExtendedActorSystem): Cluster = { val clusterSettings = new ClusterSettings(system.settings.config, system.name) - val failureDetector = clusterSettings.FailureDetectorImplementationClass match { - case None ⇒ new AccrualFailureDetector(system, clusterSettings) - case Some(fqcn) ⇒ - system.dynamicAccess.createInstanceFor[FailureDetector]( - fqcn, Seq((classOf[ActorSystem], system), (classOf[ClusterSettings], clusterSettings))) match { - case Right(fd) ⇒ fd - case Left(e) ⇒ throw new ConfigurationException("Could not create custom failure detector [" + fqcn + "] due to:" + e.toString) - } + val failureDetector = { + import clusterSettings.{ FailureDetectorImplementationClass ⇒ fqcn } + system.dynamicAccess.createInstanceFor[FailureDetector]( + fqcn, Seq(classOf[ActorSystem] -> system, classOf[ClusterSettings] -> clusterSettings)).fold( + e ⇒ throw new ConfigurationException("Could not create custom failure detector [" + fqcn + "] due to:" + e.toString), + identity) } new Cluster(system, failureDetector) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index e54e74617d..64ae1c28cb 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -16,10 +16,7 @@ class ClusterSettings(val config: Config, val systemName: String) { final val FailureDetectorThreshold = getDouble("akka.cluster.failure-detector.threshold") final val FailureDetectorMaxSampleSize = getInt("akka.cluster.failure-detector.max-sample-size") - final val FailureDetectorImplementationClass: Option[String] = getString("akka.cluster.failure-detector.implementation-class") match { - case "" ⇒ None - case fqcn ⇒ Some(fqcn) - } + final val FailureDetectorImplementationClass = getString("akka.cluster.failure-detector.implementation-class") final val FailureDetectorMinStdDeviation: Duration = Duration(getMilliseconds("akka.cluster.failure-detector.min-std-deviation"), MILLISECONDS) final val FailureDetectorAcceptableHeartbeatPause: Duration = diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/FailureDetectorStrategy.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/FailureDetectorStrategy.scala index dcbb65d0f1..86e03f9457 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/FailureDetectorStrategy.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/FailureDetectorStrategy.scala @@ -55,7 +55,7 @@ trait AccrualFailureDetectorStrategy extends FailureDetectorStrategy { self: Mul override val failureDetector: FailureDetector = new AccrualFailureDetector(system, new ClusterSettings(system.settings.config, system.name)) - override def markNodeAsAvailable(address: Address): Unit = { /* no-op */ } + override def markNodeAsAvailable(address: Address): Unit = () - override def markNodeAsUnavailable(address: Address): Unit = { /* no-op */ } + override def markNodeAsUnavailable(address: Address): Unit = () } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 91fab3aea3..58f0683c25 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -18,7 +18,7 @@ class ClusterConfigSpec extends AkkaSpec { import settings._ FailureDetectorThreshold must be(8.0 plusOrMinus 0.0001) FailureDetectorMaxSampleSize must be(1000) - FailureDetectorImplementationClass must be(None) + FailureDetectorImplementationClass must be(classOf[AccrualFailureDetector].getName) FailureDetectorMinStdDeviation must be(100 millis) FailureDetectorAcceptableHeartbeatPause must be(3 seconds) NodeToJoin must be(None) From 28ee78bfd5b8529a1f2fc9c752dc4d951c91e9f4 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 20 Jun 2012 14:29:41 +0200 Subject: [PATCH 472/538] Adding support for being able to _not_ use ExecutionHandler by setting the poolsize to 0 --- akka-remote/src/main/resources/reference.conf | 3 ++- .../akka/remote/netty/NettyRemoteSupport.scala | 18 ++++++++++-------- .../scala/akka/remote/netty/Settings.scala | 2 +- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 9cd7b767be..22b0ce3226 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -122,7 +122,8 @@ akka { # (I) Length in akka.time-unit how long core threads will be kept alive if idling execution-pool-keepalive = 60s - # (I) Size of the core pool of the remote execution unit + # (I) Size in number of threads of the core pool of the remote execution unit, + # set to 0 to disable the execution pool execution-pool-size = 4 # (I) Maximum channel size, 0 for off diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index a596f783d7..da67ea4f06 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -71,7 +71,7 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider */ def defaultStack(withTimeout: Boolean, isClient: Boolean): Seq[ChannelHandler] = (if (settings.EnableSSL) List(NettySSLSupport(settings, NettyRemoteTransport.this.log, isClient)) else Nil) ::: - (if (withTimeout) List(timeout) else Nil) ::: msgFormat ::: authenticator ::: executionHandler :: Nil + (if (withTimeout) List(timeout) else Nil) ::: msgFormat ::: authenticator ::: executionHandler /** * Construct an IdleStateHandler which uses [[akka.remote.netty.NettyRemoteTransport]].timer. @@ -95,13 +95,15 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider * happen on a netty thread (that could be bad if re-sending over the network for * remote-deployed actors). */ - val executionHandler = new ExecutionHandler(new OrderedMemoryAwareThreadPoolExecutor( - settings.ExecutionPoolSize, - settings.MaxChannelMemorySize, - settings.MaxTotalMemorySize, - settings.ExecutionPoolKeepalive.length, - settings.ExecutionPoolKeepalive.unit, - system.threadFactory)) + val executionHandler = if (settings.ExecutionPoolSize != 0) + List(new ExecutionHandler(new OrderedMemoryAwareThreadPoolExecutor( + settings.ExecutionPoolSize, + settings.MaxChannelMemorySize, + settings.MaxTotalMemorySize, + settings.ExecutionPoolKeepalive.length, + settings.ExecutionPoolKeepalive.unit, + system.threadFactory))) + else Nil /** * Construct and authentication handler which uses the SecureCookie to somewhat diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index ada257f674..9babf6005c 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -73,7 +73,7 @@ private[akka] class NettySettings(config: Config, val systemName: String) { val ExecutionPoolKeepalive: Duration = Duration(getMilliseconds("execution-pool-keepalive"), MILLISECONDS) val ExecutionPoolSize: Int = getInt("execution-pool-size") match { - case sz if sz < 1 ⇒ throw new IllegalArgumentException("akka.remote.netty.execution-pool-size is less than 1") + case sz if sz < 0 ⇒ throw new IllegalArgumentException("akka.remote.netty.execution-pool-size is less than 0") case sz ⇒ sz } From 8fcffcab00eff254b0483d7b68e6a6e70d690c42 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 20 Jun 2012 14:43:11 +0200 Subject: [PATCH 473/538] Doing a roundtrip before issuing the wave of messages --- .../akka/remote/Ticket1978CommunicationSpec.scala | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index c31784f313..04ec0b88ec 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -121,20 +121,26 @@ abstract class Ticket1978CommunicationSpec(val cipherConfig: CipherConfig) exten } } - ("- SSL communication") must { + ("-") must { if (cipherConfig.runTest) { val ignoreMe = other.actorOf(Props(new Actor { def receive = { case ("ping", x) ⇒ sender ! ((("pong", x), sender)) } }), "echo") val otherAddress = other.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider].transport.address "support tell" in { val here = system.actorFor(otherAddress.toString + "/user/echo") - for (i ← 1 to 1000) here ! (("ping", i)) - for (i ← 1 to 1000) expectMsgPF(timeout.duration) { case (("pong", i), `testActor`) ⇒ true } + + Await.result(here ? (("ping", -1)) mapTo manifest[((String, Int), ActorRef)], timeout.duration)._1 must be(("pong", -1)) + + for (i ← 1 to 10000) here ! (("ping", i)) + for (i ← 1 to 10000) expectMsgPF(timeout.duration) { case (("pong", i), `testActor`) ⇒ true } } "support ask" in { val here = system.actorFor(otherAddress.toString + "/user/echo") - val f = for (i ← 1 to 1000) yield here ? (("ping", i)) mapTo manifest[((String, Int), ActorRef)] + + Await.result(here ? (("ping", -1)) mapTo manifest[((String, Int), ActorRef)], timeout.duration)._1 must be(("pong", -1)) + + val f = for (i ← 1 to 10000) yield here ? (("ping", i)) mapTo manifest[((String, Int), ActorRef)] Await.result(Future.sequence(f), timeout.duration).map(_._1._1).toSet must be(Set("pong")) } From 6be3acacec1470e9835a107d70e3727553a3163a Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 20 Jun 2012 15:01:14 +0200 Subject: [PATCH 474/538] I think we have a winner --- .../main/scala/akka/remote/netty/Client.scala | 23 +++++++++++-------- .../remote/Ticket1978CommunicationSpec.scala | 4 ---- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 0917086d4d..96484d164f 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -18,6 +18,7 @@ import akka.AkkaException import akka.event.Logging import akka.actor.{ DeadLetter, Address, ActorRef } import akka.util.{ NonFatal, Switch } +import org.jboss.netty.handler.ssl.SslHandler /** * This is the abstract baseclass for netty remote clients, currently there's only an @@ -115,15 +116,19 @@ private[akka] class ActiveRemoteClient private[akka] ( */ def connect(reconnectIfAlreadyConnected: Boolean = false): Boolean = { - def sendSecureCookie(connection: ChannelFuture) { - val handshake = RemoteControlProtocol.newBuilder.setCommandType(CommandType.CONNECT) - if (settings.SecureCookie.nonEmpty) handshake.setCookie(settings.SecureCookie.get) - handshake.setOrigin(RemoteProtocol.AddressProtocol.newBuilder - .setSystem(localAddress.system) - .setHostname(localAddress.host.get) - .setPort(localAddress.port.get) - .build) - connection.getChannel.write(netty.createControlEnvelope(handshake.build)) + // Returns whether the handshake was written to the channel or not + def sendSecureCookie(connection: ChannelFuture): Boolean = { + if (!settings.EnableSSL || connection.getChannel.getPipeline.get[SslHandler](classOf[SslHandler]).handshake().awaitUninterruptibly().isSuccess) { + val handshake = RemoteControlProtocol.newBuilder.setCommandType(CommandType.CONNECT) + if (settings.SecureCookie.nonEmpty) handshake.setCookie(settings.SecureCookie.get) + handshake.setOrigin(RemoteProtocol.AddressProtocol.newBuilder + .setSystem(localAddress.system) + .setHostname(localAddress.host.get) + .setPort(localAddress.port.get) + .build) + connection.getChannel.write(netty.createControlEnvelope(handshake.build)) + true + } else false } def attemptReconnect(): Boolean = { diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 04ec0b88ec..19c8c7432f 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -129,8 +129,6 @@ abstract class Ticket1978CommunicationSpec(val cipherConfig: CipherConfig) exten "support tell" in { val here = system.actorFor(otherAddress.toString + "/user/echo") - Await.result(here ? (("ping", -1)) mapTo manifest[((String, Int), ActorRef)], timeout.duration)._1 must be(("pong", -1)) - for (i ← 1 to 10000) here ! (("ping", i)) for (i ← 1 to 10000) expectMsgPF(timeout.duration) { case (("pong", i), `testActor`) ⇒ true } } @@ -138,8 +136,6 @@ abstract class Ticket1978CommunicationSpec(val cipherConfig: CipherConfig) exten "support ask" in { val here = system.actorFor(otherAddress.toString + "/user/echo") - Await.result(here ? (("ping", -1)) mapTo manifest[((String, Int), ActorRef)], timeout.duration)._1 must be(("pong", -1)) - val f = for (i ← 1 to 10000) yield here ? (("ping", i)) mapTo manifest[((String, Int), ActorRef)] Await.result(Future.sequence(f), timeout.duration).map(_._1._1).toSet must be(Set("pong")) } From cb4831d52e32da8ad9afa85851b62af87a8a6995 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 20 Jun 2012 15:19:24 +0200 Subject: [PATCH 475/538] Rearchitecting the reconnection semantics since we need to handle the ssl handshake there as well --- .../main/scala/akka/remote/netty/Client.scala | 36 +++++++++---------- .../remote/Ticket1978CommunicationSpec.scala | 8 ++--- 2 files changed, 20 insertions(+), 24 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 96484d164f..35c0674d23 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -118,7 +118,15 @@ private[akka] class ActiveRemoteClient private[akka] ( // Returns whether the handshake was written to the channel or not def sendSecureCookie(connection: ChannelFuture): Boolean = { - if (!settings.EnableSSL || connection.getChannel.getPipeline.get[SslHandler](classOf[SslHandler]).handshake().awaitUninterruptibly().isSuccess) { + val future = + if (!connection.isSuccess || !settings.EnableSSL) connection + else connection.getChannel.getPipeline.get[SslHandler](classOf[SslHandler]).handshake().awaitUninterruptibly() + + if (!future.isSuccess) { + notifyListeners(RemoteClientError(future.getCause, netty, remoteAddress)) + false + } else { + ChannelAddress.set(connection.getChannel, Some(remoteAddress)) val handshake = RemoteControlProtocol.newBuilder.setCommandType(CommandType.CONNECT) if (settings.SecureCookie.nonEmpty) handshake.setCookie(settings.SecureCookie.get) handshake.setOrigin(RemoteProtocol.AddressProtocol.newBuilder @@ -128,7 +136,7 @@ private[akka] class ActiveRemoteClient private[akka] ( .build) connection.getChannel.write(netty.createControlEnvelope(handshake.build)) true - } else false + } } def attemptReconnect(): Boolean = { @@ -136,14 +144,7 @@ private[akka] class ActiveRemoteClient private[akka] ( log.debug("Remote client reconnecting to [{}|{}]", remoteAddress, remoteIP) connection = bootstrap.connect(new InetSocketAddress(remoteIP, remoteAddress.port.get)) openChannels.add(connection.awaitUninterruptibly.getChannel) // Wait until the connection attempt succeeds or fails. - - if (!connection.isSuccess) { - notifyListeners(RemoteClientError(connection.getCause, netty, remoteAddress)) - false - } else { - sendSecureCookie(connection) - true - } + sendSecureCookie(connection) } runSwitch switchOn { @@ -168,24 +169,19 @@ private[akka] class ActiveRemoteClient private[akka] ( openChannels.add(connection.awaitUninterruptibly.getChannel) // Wait until the connection attempt succeeds or fails. - if (!connection.isSuccess) { - notifyListeners(RemoteClientError(connection.getCause, netty, remoteAddress)) - false - } else { - ChannelAddress.set(connection.getChannel, Some(remoteAddress)) - sendSecureCookie(connection) + if (sendSecureCookie(connection)) { notifyListeners(RemoteClientStarted(netty, remoteAddress)) true + } else { + connection.getChannel.close() + openChannels.remove(connection.getChannel) + false } } match { case true ⇒ true case false if reconnectIfAlreadyConnected ⇒ - connection.getChannel.close() - openChannels.remove(connection.getChannel) - log.debug("Remote client reconnecting to [{}]", remoteAddress) attemptReconnect() - case false ⇒ false } } diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 19c8c7432f..79196f321f 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -106,7 +106,7 @@ class Ticket1978NonExistingRNGSecureSpec extends Ticket1978CommunicationSpec(Cip abstract class Ticket1978CommunicationSpec(val cipherConfig: CipherConfig) extends AkkaSpec(cipherConfig.config) with ImplicitSender { - implicit val timeout: Timeout = Timeout(5 seconds) + implicit val timeout: Timeout = Timeout(10 seconds) import RemoteCommunicationSpec._ @@ -129,14 +129,14 @@ abstract class Ticket1978CommunicationSpec(val cipherConfig: CipherConfig) exten "support tell" in { val here = system.actorFor(otherAddress.toString + "/user/echo") - for (i ← 1 to 10000) here ! (("ping", i)) - for (i ← 1 to 10000) expectMsgPF(timeout.duration) { case (("pong", i), `testActor`) ⇒ true } + for (i ← 1 to 1000) here ! (("ping", i)) + for (i ← 1 to 1000) expectMsgPF(timeout.duration) { case (("pong", i), `testActor`) ⇒ true } } "support ask" in { val here = system.actorFor(otherAddress.toString + "/user/echo") - val f = for (i ← 1 to 10000) yield here ? (("ping", i)) mapTo manifest[((String, Int), ActorRef)] + val f = for (i ← 1 to 1000) yield here ? (("ping", i)) mapTo manifest[((String, Int), ActorRef)] Await.result(Future.sequence(f), timeout.duration).map(_._1._1).toSet must be(Set("pong")) } From ebcdb5d01709bf4a75ab4eccbc72d80340959a57 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 20 Jun 2012 15:45:50 +0200 Subject: [PATCH 476/538] Removing SSL debug residue --- .../src/main/scala/akka/remote/RemoteTransport.scala | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala index f6b85dbc0d..ecd59c40e0 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala @@ -107,13 +107,8 @@ case class RemoteServerError( @BeanProperty val cause: Throwable, @transient @BeanProperty remote: RemoteTransport) extends RemoteServerLifeCycleEvent { - cause match { - case s: javax.net.ssl.SSLException ⇒ var e: Throwable = s; while (e.getCause ne null) e = e.getCause; println(Logging.stackTraceFor(e)) - case _ ⇒ - } - override def logLevel: Logging.LogLevel = Logging.ErrorLevel - override def toString: String = "RemoteServerError@" + remote + "] Error[" + Logging.stackTraceFor(cause) + "]" + override def toString: String = "RemoteServerError@" + remote + "] Error[" + cause + "]" } /** From 3f86abaac88c5f6800f76032f53393fcb85de538 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 20 Jun 2012 15:50:22 +0200 Subject: [PATCH 477/538] Review reformatting --- .../main/scala/akka/remote/netty/NettyRemoteSupport.scala | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index da67ea4f06..7dbce1b5af 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -71,7 +71,10 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider */ def defaultStack(withTimeout: Boolean, isClient: Boolean): Seq[ChannelHandler] = (if (settings.EnableSSL) List(NettySSLSupport(settings, NettyRemoteTransport.this.log, isClient)) else Nil) ::: - (if (withTimeout) List(timeout) else Nil) ::: msgFormat ::: authenticator ::: executionHandler + (if (withTimeout) List(timeout) else Nil) ::: + msgFormat ::: + authenticator ::: + executionHandler /** * Construct an IdleStateHandler which uses [[akka.remote.netty.NettyRemoteTransport]].timer. From f2a857046d79b68af6b4c0c77475557a4ca7cc99 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 20 Jun 2012 15:59:52 +0200 Subject: [PATCH 478/538] Using softer language and warning about using 0 threads for the execution handler --- akka-remote/src/main/resources/reference.conf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 22b0ce3226..0ba9bb3b06 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -122,8 +122,8 @@ akka { # (I) Length in akka.time-unit how long core threads will be kept alive if idling execution-pool-keepalive = 60s - # (I) Size in number of threads of the core pool of the remote execution unit, - # set to 0 to disable the execution pool + # (I) Size in number of threads of the core pool of the remote execution unit. + # A value of 0 will turn this off, which is can lead to deadlocks under some configurations! execution-pool-size = 4 # (I) Maximum channel size, 0 for off From 3b771c1590f2e1d46ea92f011b9c2dd733ba045a Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 20 Jun 2012 16:21:48 +0200 Subject: [PATCH 479/538] Formatting --- .../main/scala/akka/remote/netty/NettyRemoteSupport.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 7dbce1b5af..9c6e4c85f2 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -71,10 +71,10 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider */ def defaultStack(withTimeout: Boolean, isClient: Boolean): Seq[ChannelHandler] = (if (settings.EnableSSL) List(NettySSLSupport(settings, NettyRemoteTransport.this.log, isClient)) else Nil) ::: - (if (withTimeout) List(timeout) else Nil) ::: - msgFormat ::: - authenticator ::: - executionHandler + (if (withTimeout) List(timeout) else Nil) ::: + msgFormat ::: + authenticator ::: + executionHandler /** * Construct an IdleStateHandler which uses [[akka.remote.netty.NettyRemoteTransport]].timer. From 8414063afe3333dbe359912987a4500ddaf1197e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 20 Jun 2012 16:40:29 +0200 Subject: [PATCH 480/538] Revert "Somehow the code for the fast and the secure was flipped" This reverts commit 95419ba82f76539152882fc25e0187f44d049f10. --- .../akka/security/provider/AES128CounterRNGFast.scala | 6 +++--- .../akka/security/provider/AES128CounterRNGSecure.scala | 6 +++--- .../src/test/scala/akka/remote/Ticket1978ConfigSpec.scala | 7 ++++++- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala b/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala index 1c58c4f1d0..c355f5a548 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala @@ -3,14 +3,14 @@ */ package akka.security.provider -import org.uncommons.maths.random.{ AESCounterRNG, DefaultSeedGenerator } +import org.uncommons.maths.random.{ AESCounterRNG, SecureRandomSeedGenerator } import java.security.SecureRandom /** * Internal API */ class AES128CounterRNGFast extends java.security.SecureRandomSpi { - private val rng = new AESCounterRNG() + private val rng = new AESCounterRNG(new SecureRandomSeedGenerator()) /** * This is managed internally only @@ -31,6 +31,6 @@ class AES128CounterRNGFast extends java.security.SecureRandomSpi { * @param numBytes the number of seed bytes to generate. * @return the seed bytes. */ - override protected def engineGenerateSeed(numBytes: Int): Array[Byte] = DefaultSeedGenerator.getInstance.generateSeed(numBytes) + override protected def engineGenerateSeed(numBytes: Int): Array[Byte] = (new SecureRandom).generateSeed(numBytes) } diff --git a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala b/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala index 60beecded7..846476cc2d 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala @@ -3,13 +3,13 @@ */ package akka.security.provider -import org.uncommons.maths.random.{ AESCounterRNG, SecureRandomSeedGenerator } +import org.uncommons.maths.random.{ AESCounterRNG, DefaultSeedGenerator } /** * Internal API */ class AES128CounterRNGSecure extends java.security.SecureRandomSpi { - private val rng = new AESCounterRNG(new SecureRandomSeedGenerator()) + private val rng = new AESCounterRNG() /** * This is managed internally only @@ -30,6 +30,6 @@ class AES128CounterRNGSecure extends java.security.SecureRandomSpi { * @param numBytes the number of seed bytes to generate. * @return the seed bytes. */ - override protected def engineGenerateSeed(numBytes: Int): Array[Byte] = (new SecureRandomSeedGenerator()).generateSeed(numBytes) + override protected def engineGenerateSeed(numBytes: Int): Array[Byte] = DefaultSeedGenerator.getInstance.generateSeed(numBytes) } diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala index 0a39d20a9a..4c39b94087 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala @@ -15,7 +15,12 @@ akka { actor.provider = "akka.remote.RemoteActorRefProvider" remote.netty { hostname = localhost - port = 0 + port = 12345 + } + actor.deployment { + /blub.remote = "akka://remote-sys@localhost:12346" + /looker/child.remote = "akka://remote-sys@localhost:12346" + /looker/child/grandchild.remote = "akka://RemoteCommunicationSpec@localhost:12345" } } """) with ImplicitSender with DefaultTimeout { From 07293f258453c60e05af992150d06308735ce1b6 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 20 Jun 2012 16:41:17 +0200 Subject: [PATCH 481/538] When the Cipher changes were reverted, this also got reverted, so fixing it --- .../src/test/scala/akka/remote/Ticket1978ConfigSpec.scala | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala index 4c39b94087..0a39d20a9a 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala @@ -15,12 +15,7 @@ akka { actor.provider = "akka.remote.RemoteActorRefProvider" remote.netty { hostname = localhost - port = 12345 - } - actor.deployment { - /blub.remote = "akka://remote-sys@localhost:12346" - /looker/child.remote = "akka://remote-sys@localhost:12346" - /looker/child/grandchild.remote = "akka://RemoteCommunicationSpec@localhost:12345" + port = 0 } } """) with ImplicitSender with DefaultTimeout { From 26d56f1540684ae6fe5c5b65cf5b62440c3560b3 Mon Sep 17 00:00:00 2001 From: Peter Badenhorst Date: Wed, 20 Jun 2012 19:06:12 +0200 Subject: [PATCH 482/538] Renamed AESCounterRNG classes to be a bit more self-explanatory Added some comments --- akka-remote/src/main/resources/reference.conf | 6 +++--- .../akka/remote/netty/NettySSLSupport.scala | 2 +- ...RNGFast.scala => AES128CounterInetRNG.scala} | 10 +++++++--- ...ecure.scala => AES128CounterSecureRNG.scala} | 8 ++++++-- ...GSecure.scala => AES256CounterInetRNG.scala} | 17 ++++++++++++++--- .../akka/security/provider/AkkaProvider.scala | 12 ++++++------ .../remote/Ticket1978CommunicationSpec.scala | 10 +++++----- 7 files changed, 42 insertions(+), 23 deletions(-) rename akka-remote/src/main/scala/akka/security/provider/{AES128CounterRNGFast.scala => AES128CounterInetRNG.scala} (65%) rename akka-remote/src/main/scala/akka/security/provider/{AES128CounterRNGSecure.scala => AES128CounterSecureRNG.scala} (70%) rename akka-remote/src/main/scala/akka/security/provider/{AES256CounterRNGSecure.scala => AES256CounterInetRNG.scala} (52%) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 0ba9bb3b06..a8d2cb2680 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -205,10 +205,10 @@ akka { # There are three options, in increasing order of security: # "" or SecureRandom => (default) # "SHA1PRNG" => Can be slow because of blocking issues on Linux - # "AES128CounterRNGFast" => fastest startup and based on AES encryption algorithm + # "AES128CounterSecureRNG" => fastest startup and based on AES encryption algorithm # The following use one of 3 possible seed sources, depending on availability: /dev/random, random.org and SecureRandom (provided by Java) - # "AES128CounterRNGSecure" - # "AES256CounterRNGSecure" (Install JCE Unlimited Strength Jurisdiction Policy Files first) + # "AES128CounterInetRNG" + # "AES256CounterInetRNG" (Install JCE Unlimited Strength Jurisdiction Policy Files first) # Setting a value here may require you to supply the appropriate cipher suite (see enabled-algorithms section above) random-number-generator = "" } diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala index 9323fb8143..690b4522ec 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala @@ -38,7 +38,7 @@ private[akka] object NettySSLSupport { } val rng = rngName match { - case Some(r @ ("AES128CounterRNGFast" | "AES128CounterRNGSecure" | "AES256CounterRNGSecure")) ⇒ + case Some(r @ ("AES128CounterSecureRNG" | "AES128CounterInetRNG" | "AES256CounterInetRNG")) ⇒ log.debug("SSL random number generator set to: {}", r) SecureRandom.getInstance(r, AkkaProvider) case Some(s @ ("SHA1PRNG" | "NativePRNG")) ⇒ diff --git a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala b/akka-remote/src/main/scala/akka/security/provider/AES128CounterInetRNG.scala similarity index 65% rename from akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala rename to akka-remote/src/main/scala/akka/security/provider/AES128CounterInetRNG.scala index 1c58c4f1d0..41d12b275f 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AES128CounterInetRNG.scala @@ -4,16 +4,19 @@ package akka.security.provider import org.uncommons.maths.random.{ AESCounterRNG, DefaultSeedGenerator } -import java.security.SecureRandom /** * Internal API + * This class is a wrapper around the 128-bit AESCounterRNG algorithm provided by http://maths.uncommons.org/ + * It uses the default seed generator which uses one of the following 3 random seed sources: + * Depending on availability: /dev/random, random.org and SecureRandom (provided by Java) + * The only method used by netty ssl is engineNextBytes(bytes) */ -class AES128CounterRNGFast extends java.security.SecureRandomSpi { +class AES128CounterInetRNG extends java.security.SecureRandomSpi { private val rng = new AESCounterRNG() /** - * This is managed internally only + * This is managed internally by AESCounterRNG */ override protected def engineSetSeed(seed: Array[Byte]): Unit = () @@ -25,6 +28,7 @@ class AES128CounterRNGFast extends java.security.SecureRandomSpi { override protected def engineNextBytes(bytes: Array[Byte]): Unit = rng.nextBytes(bytes) /** + * Unused method * Returns the given number of seed bytes. This call may be used to * seed other random number generators. * diff --git a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala b/akka-remote/src/main/scala/akka/security/provider/AES128CounterSecureRNG.scala similarity index 70% rename from akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala rename to akka-remote/src/main/scala/akka/security/provider/AES128CounterSecureRNG.scala index 60beecded7..cda59ee03b 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AES128CounterSecureRNG.scala @@ -7,12 +7,15 @@ import org.uncommons.maths.random.{ AESCounterRNG, SecureRandomSeedGenerator } /** * Internal API + * This class is a wrapper around the AESCounterRNG algorithm provided by http://maths.uncommons.org/ * + * The only method used by netty ssl is engineNextBytes(bytes) + * This RNG is good to use to prevent startup delay when you don't have Internet access to random.org */ -class AES128CounterRNGSecure extends java.security.SecureRandomSpi { +class AES128CounterSecureRNG extends java.security.SecureRandomSpi { private val rng = new AESCounterRNG(new SecureRandomSeedGenerator()) /** - * This is managed internally only + * This is managed internally by AESCounterRNG */ override protected def engineSetSeed(seed: Array[Byte]): Unit = () @@ -24,6 +27,7 @@ class AES128CounterRNGSecure extends java.security.SecureRandomSpi { override protected def engineNextBytes(bytes: Array[Byte]): Unit = rng.nextBytes(bytes) /** + * Unused method * Returns the given number of seed bytes. This call may be used to * seed other random number generators. * diff --git a/akka-remote/src/main/scala/akka/security/provider/AES256CounterRNGSecure.scala b/akka-remote/src/main/scala/akka/security/provider/AES256CounterInetRNG.scala similarity index 52% rename from akka-remote/src/main/scala/akka/security/provider/AES256CounterRNGSecure.scala rename to akka-remote/src/main/scala/akka/security/provider/AES256CounterInetRNG.scala index d942938411..076d4fcd7f 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AES256CounterRNGSecure.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AES256CounterInetRNG.scala @@ -7,12 +7,22 @@ import org.uncommons.maths.random.{ AESCounterRNG, DefaultSeedGenerator } /** * Internal API + * This class is a wrapper around the 256-bit AESCounterRNG algorithm provided by http://maths.uncommons.org/ + * It uses the default seed generator which uses one of the following 3 random seed sources: + * Depending on availability: /dev/random, random.org and SecureRandom (provided by Java) + * The only method used by netty ssl is engineNextBytes(bytes) */ -class AES256CounterRNGSecure extends java.security.SecureRandomSpi { - private val rng = new AESCounterRNG(32) // Magic number is magic +class AES256CounterInetRNG extends java.security.SecureRandomSpi { + /** + * From AESCounterRNG API docs: + * Valid values are 16 (128 bits), 24 (192 bits) and 32 (256 bits). + * Any other values will result in an exception from the AES implementation. + */ + private val AES_256_BIT = 32 // Magic number is magic + private val rng = new AESCounterRNG(AES_256_BIT) /** - * This is managed internally only + * This is managed internally by AESCounterRNG */ override protected def engineSetSeed(seed: Array[Byte]): Unit = () @@ -24,6 +34,7 @@ class AES256CounterRNGSecure extends java.security.SecureRandomSpi { override protected def engineNextBytes(bytes: Array[Byte]): Unit = rng.nextBytes(bytes) /** + * Unused method * Returns the given number of seed bytes. This call may be used to * seed other random number generators. * diff --git a/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala b/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala index 0b85231348..707ad0c399 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala @@ -12,14 +12,14 @@ object AkkaProvider extends Provider("Akka", 1.0, "Akka provider 1.0 that implem AccessController.doPrivileged(new PrivilegedAction[this.type] { def run = { //SecureRandom - put("SecureRandom.AES128CounterRNGFast", classOf[AES128CounterRNGFast].getName) - put("SecureRandom.AES128CounterRNGSecure", classOf[AES128CounterRNGSecure].getName) - put("SecureRandom.AES256CounterRNGSecure", classOf[AES256CounterRNGSecure].getName) + put("SecureRandom.AES128CounterSecureRNG", classOf[AES128CounterSecureRNG].getName) + put("SecureRandom.AES128CounterInetRNG", classOf[AES128CounterInetRNG].getName) + put("SecureRandom.AES256CounterInetRNG", classOf[AES256CounterInetRNG].getName) //Implementation type: software or hardware - put("SecureRandom.AES128CounterRNGFast ImplementedIn", "Software") - put("SecureRandom.AES128CounterRNGSecure ImplementedIn", "Software") - put("SecureRandom.AES256CounterRNGSecure ImplementedIn", "Software") + put("SecureRandom.AES128CounterSecureRNG ImplementedIn", "Software") + put("SecureRandom.AES128CounterInetRNG ImplementedIn", "Software") + put("SecureRandom.AES256CounterInetRNG ImplementedIn", "Software") null //Magic null is magic } }) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 79196f321f..64408f15b1 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -81,19 +81,19 @@ import Configuration.{ CipherConfig, getCipherConfig } class Ticket1978SHA1PRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("SHA1PRNG", "TLS_RSA_WITH_AES_128_CBC_SHA")) @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES128CounterRNGFastSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGFast", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA")) +class Ticket1978AES128CounterSecureRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterSecureRNG", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA")) /** - * Both of the Secure variants require access to the Internet to access random.org. + * Both of the Inet variants require access to the Internet to access random.org. */ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES128CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGSecure", "TLS_RSA_WITH_AES_128_CBC_SHA")) +class Ticket1978AES128CounterInetRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterInetRNG", "TLS_RSA_WITH_AES_128_CBC_SHA")) /** - * Both of the Secure variants require access to the Internet to access random.org. + * Both of the Inet variants require access to the Internet to access random.org. */ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES256CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES256CounterRNGSecure", "TLS_RSA_WITH_AES_256_CBC_SHA")) +class Ticket1978AES256CounterInetRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES256CounterInetRNG", "TLS_RSA_WITH_AES_256_CBC_SHA")) @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class Ticket1978DefaultRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("", "TLS_RSA_WITH_AES_128_CBC_SHA")) From 5b36334003e151707d1b2ab08b9e360bf88a8159 Mon Sep 17 00:00:00 2001 From: Peter Badenhorst Date: Wed, 20 Jun 2012 19:06:12 +0200 Subject: [PATCH 483/538] Renamed AESCounterRNG classes to be a bit more self-explanatory Added some comments --- akka-remote/src/main/resources/reference.conf | 6 +++--- .../akka/remote/netty/NettySSLSupport.scala | 2 +- ...GSecure.scala => AES128CounterInetRNG.scala} | 9 +++++++-- ...GFast.scala => AES128CounterSecureRNG.scala} | 11 +++++++---- ...GSecure.scala => AES256CounterInetRNG.scala} | 17 ++++++++++++++--- .../akka/security/provider/AkkaProvider.scala | 12 ++++++------ .../remote/Ticket1978CommunicationSpec.scala | 10 +++++----- 7 files changed, 43 insertions(+), 24 deletions(-) rename akka-remote/src/main/scala/akka/security/provider/{AES128CounterRNGSecure.scala => AES128CounterInetRNG.scala} (64%) rename akka-remote/src/main/scala/akka/security/provider/{AES128CounterRNGFast.scala => AES128CounterSecureRNG.scala} (64%) rename akka-remote/src/main/scala/akka/security/provider/{AES256CounterRNGSecure.scala => AES256CounterInetRNG.scala} (52%) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 0ba9bb3b06..a8d2cb2680 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -205,10 +205,10 @@ akka { # There are three options, in increasing order of security: # "" or SecureRandom => (default) # "SHA1PRNG" => Can be slow because of blocking issues on Linux - # "AES128CounterRNGFast" => fastest startup and based on AES encryption algorithm + # "AES128CounterSecureRNG" => fastest startup and based on AES encryption algorithm # The following use one of 3 possible seed sources, depending on availability: /dev/random, random.org and SecureRandom (provided by Java) - # "AES128CounterRNGSecure" - # "AES256CounterRNGSecure" (Install JCE Unlimited Strength Jurisdiction Policy Files first) + # "AES128CounterInetRNG" + # "AES256CounterInetRNG" (Install JCE Unlimited Strength Jurisdiction Policy Files first) # Setting a value here may require you to supply the appropriate cipher suite (see enabled-algorithms section above) random-number-generator = "" } diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala index 9323fb8143..690b4522ec 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettySSLSupport.scala @@ -38,7 +38,7 @@ private[akka] object NettySSLSupport { } val rng = rngName match { - case Some(r @ ("AES128CounterRNGFast" | "AES128CounterRNGSecure" | "AES256CounterRNGSecure")) ⇒ + case Some(r @ ("AES128CounterSecureRNG" | "AES128CounterInetRNG" | "AES256CounterInetRNG")) ⇒ log.debug("SSL random number generator set to: {}", r) SecureRandom.getInstance(r, AkkaProvider) case Some(s @ ("SHA1PRNG" | "NativePRNG")) ⇒ diff --git a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala b/akka-remote/src/main/scala/akka/security/provider/AES128CounterInetRNG.scala similarity index 64% rename from akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala rename to akka-remote/src/main/scala/akka/security/provider/AES128CounterInetRNG.scala index 846476cc2d..41d12b275f 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGSecure.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AES128CounterInetRNG.scala @@ -7,12 +7,16 @@ import org.uncommons.maths.random.{ AESCounterRNG, DefaultSeedGenerator } /** * Internal API + * This class is a wrapper around the 128-bit AESCounterRNG algorithm provided by http://maths.uncommons.org/ + * It uses the default seed generator which uses one of the following 3 random seed sources: + * Depending on availability: /dev/random, random.org and SecureRandom (provided by Java) + * The only method used by netty ssl is engineNextBytes(bytes) */ -class AES128CounterRNGSecure extends java.security.SecureRandomSpi { +class AES128CounterInetRNG extends java.security.SecureRandomSpi { private val rng = new AESCounterRNG() /** - * This is managed internally only + * This is managed internally by AESCounterRNG */ override protected def engineSetSeed(seed: Array[Byte]): Unit = () @@ -24,6 +28,7 @@ class AES128CounterRNGSecure extends java.security.SecureRandomSpi { override protected def engineNextBytes(bytes: Array[Byte]): Unit = rng.nextBytes(bytes) /** + * Unused method * Returns the given number of seed bytes. This call may be used to * seed other random number generators. * diff --git a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala b/akka-remote/src/main/scala/akka/security/provider/AES128CounterSecureRNG.scala similarity index 64% rename from akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala rename to akka-remote/src/main/scala/akka/security/provider/AES128CounterSecureRNG.scala index c355f5a548..cda59ee03b 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AES128CounterRNGFast.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AES128CounterSecureRNG.scala @@ -4,16 +4,18 @@ package akka.security.provider import org.uncommons.maths.random.{ AESCounterRNG, SecureRandomSeedGenerator } -import java.security.SecureRandom /** * Internal API + * This class is a wrapper around the AESCounterRNG algorithm provided by http://maths.uncommons.org/ * + * The only method used by netty ssl is engineNextBytes(bytes) + * This RNG is good to use to prevent startup delay when you don't have Internet access to random.org */ -class AES128CounterRNGFast extends java.security.SecureRandomSpi { +class AES128CounterSecureRNG extends java.security.SecureRandomSpi { private val rng = new AESCounterRNG(new SecureRandomSeedGenerator()) /** - * This is managed internally only + * This is managed internally by AESCounterRNG */ override protected def engineSetSeed(seed: Array[Byte]): Unit = () @@ -25,12 +27,13 @@ class AES128CounterRNGFast extends java.security.SecureRandomSpi { override protected def engineNextBytes(bytes: Array[Byte]): Unit = rng.nextBytes(bytes) /** + * Unused method * Returns the given number of seed bytes. This call may be used to * seed other random number generators. * * @param numBytes the number of seed bytes to generate. * @return the seed bytes. */ - override protected def engineGenerateSeed(numBytes: Int): Array[Byte] = (new SecureRandom).generateSeed(numBytes) + override protected def engineGenerateSeed(numBytes: Int): Array[Byte] = (new SecureRandomSeedGenerator()).generateSeed(numBytes) } diff --git a/akka-remote/src/main/scala/akka/security/provider/AES256CounterRNGSecure.scala b/akka-remote/src/main/scala/akka/security/provider/AES256CounterInetRNG.scala similarity index 52% rename from akka-remote/src/main/scala/akka/security/provider/AES256CounterRNGSecure.scala rename to akka-remote/src/main/scala/akka/security/provider/AES256CounterInetRNG.scala index d942938411..076d4fcd7f 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AES256CounterRNGSecure.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AES256CounterInetRNG.scala @@ -7,12 +7,22 @@ import org.uncommons.maths.random.{ AESCounterRNG, DefaultSeedGenerator } /** * Internal API + * This class is a wrapper around the 256-bit AESCounterRNG algorithm provided by http://maths.uncommons.org/ + * It uses the default seed generator which uses one of the following 3 random seed sources: + * Depending on availability: /dev/random, random.org and SecureRandom (provided by Java) + * The only method used by netty ssl is engineNextBytes(bytes) */ -class AES256CounterRNGSecure extends java.security.SecureRandomSpi { - private val rng = new AESCounterRNG(32) // Magic number is magic +class AES256CounterInetRNG extends java.security.SecureRandomSpi { + /** + * From AESCounterRNG API docs: + * Valid values are 16 (128 bits), 24 (192 bits) and 32 (256 bits). + * Any other values will result in an exception from the AES implementation. + */ + private val AES_256_BIT = 32 // Magic number is magic + private val rng = new AESCounterRNG(AES_256_BIT) /** - * This is managed internally only + * This is managed internally by AESCounterRNG */ override protected def engineSetSeed(seed: Array[Byte]): Unit = () @@ -24,6 +34,7 @@ class AES256CounterRNGSecure extends java.security.SecureRandomSpi { override protected def engineNextBytes(bytes: Array[Byte]): Unit = rng.nextBytes(bytes) /** + * Unused method * Returns the given number of seed bytes. This call may be used to * seed other random number generators. * diff --git a/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala b/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala index 0b85231348..707ad0c399 100644 --- a/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala +++ b/akka-remote/src/main/scala/akka/security/provider/AkkaProvider.scala @@ -12,14 +12,14 @@ object AkkaProvider extends Provider("Akka", 1.0, "Akka provider 1.0 that implem AccessController.doPrivileged(new PrivilegedAction[this.type] { def run = { //SecureRandom - put("SecureRandom.AES128CounterRNGFast", classOf[AES128CounterRNGFast].getName) - put("SecureRandom.AES128CounterRNGSecure", classOf[AES128CounterRNGSecure].getName) - put("SecureRandom.AES256CounterRNGSecure", classOf[AES256CounterRNGSecure].getName) + put("SecureRandom.AES128CounterSecureRNG", classOf[AES128CounterSecureRNG].getName) + put("SecureRandom.AES128CounterInetRNG", classOf[AES128CounterInetRNG].getName) + put("SecureRandom.AES256CounterInetRNG", classOf[AES256CounterInetRNG].getName) //Implementation type: software or hardware - put("SecureRandom.AES128CounterRNGFast ImplementedIn", "Software") - put("SecureRandom.AES128CounterRNGSecure ImplementedIn", "Software") - put("SecureRandom.AES256CounterRNGSecure ImplementedIn", "Software") + put("SecureRandom.AES128CounterSecureRNG ImplementedIn", "Software") + put("SecureRandom.AES128CounterInetRNG ImplementedIn", "Software") + put("SecureRandom.AES256CounterInetRNG ImplementedIn", "Software") null //Magic null is magic } }) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 79196f321f..64408f15b1 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -81,19 +81,19 @@ import Configuration.{ CipherConfig, getCipherConfig } class Ticket1978SHA1PRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("SHA1PRNG", "TLS_RSA_WITH_AES_128_CBC_SHA")) @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES128CounterRNGFastSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGFast", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA")) +class Ticket1978AES128CounterSecureRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterSecureRNG", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA")) /** - * Both of the Secure variants require access to the Internet to access random.org. + * Both of the Inet variants require access to the Internet to access random.org. */ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES128CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterRNGSecure", "TLS_RSA_WITH_AES_128_CBC_SHA")) +class Ticket1978AES128CounterInetRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES128CounterInetRNG", "TLS_RSA_WITH_AES_128_CBC_SHA")) /** - * Both of the Secure variants require access to the Internet to access random.org. + * Both of the Inet variants require access to the Internet to access random.org. */ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket1978AES256CounterRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES256CounterRNGSecure", "TLS_RSA_WITH_AES_256_CBC_SHA")) +class Ticket1978AES256CounterInetRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("AES256CounterInetRNG", "TLS_RSA_WITH_AES_256_CBC_SHA")) @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class Ticket1978DefaultRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("", "TLS_RSA_WITH_AES_128_CBC_SHA")) From a7c8d7da1042d6ca766bc56e1f8cf815d596d7da Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 20 Jun 2012 19:22:25 +0200 Subject: [PATCH 484/538] Remove unnecessary clock param in one of AccrualFailureDetector's constructors --- .../main/scala/akka/cluster/AccrualFailureDetector.scala | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index b10962ce11..6962fc10d6 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -57,7 +57,7 @@ object AccrualFailureDetector { * to this duration, with a with rather high standard deviation (since environment is unknown * in the beginning) * - * @clock The clock, returning current time in milliseconds, but can be faked for testing + * @param clock The clock, returning current time in milliseconds, but can be faked for testing * purposes. It is only used for measuring intervals (duration). * */ @@ -68,7 +68,7 @@ class AccrualFailureDetector( val minStdDeviation: Duration, val acceptableHeartbeatPause: Duration, val firstHeartbeatEstimate: Duration, - val clock: () ⇒ Long) extends FailureDetector { + val clock: () ⇒ Long = AccrualFailureDetector.realClock) extends FailureDetector { import AccrualFailureDetector._ @@ -77,8 +77,7 @@ class AccrualFailureDetector( */ def this( system: ActorSystem, - settings: ClusterSettings, - clock: () ⇒ Long = AccrualFailureDetector.realClock) = + settings: ClusterSettings) = this( system, settings.FailureDetectorThreshold, @@ -90,7 +89,7 @@ class AccrualFailureDetector( // first real heartbeat is sent. Initial heartbeat is added when joining. // FIXME this can be changed to HeartbeatInterval when ticket #2249 is fixed settings.GossipInterval * 3 + settings.HeartbeatInterval, - clock) + AccrualFailureDetector.realClock) private val log = Logging(system, "FailureDetector") From 42078e70836ef593f392324c05211dd14db5a88a Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 21 Jun 2012 10:58:35 +0200 Subject: [PATCH 485/538] Reintroduce 'seed' nodes, see #2219 * Implement the join to seed nodes process When a new node is started started it sends a message to all seed nodes and then sends join command to the one that answers first. * Configuration of seed-nodes and auto-join * New JoinSeedNodeSpec that verifies the auto join to seed nodes * In tests seed nodes are configured by overriding seedNodes function, since addresses are not known before start * Deputy nodes are the live members of the seed nodes (not sure if that will be the final solution, see ticket 2252 * Updated cluster.rst with latest info about deputy and seed nodes --- .../src/main/resources/reference.conf | 12 ++- .../src/main/scala/akka/cluster/Cluster.scala | 75 +++++++++++++++---- .../scala/akka/cluster/ClusterSettings.scala | 9 +-- .../scala/akka/cluster/JoinSeedNodeSpec.scala | 46 ++++++++++++ .../akka/cluster/MultiNodeClusterSpec.scala | 15 +++- .../scala/akka/cluster/NodeJoinSpec.scala | 2 + .../scala/akka/cluster/SunnyWeatherSpec.scala | 1 - .../akka/cluster/ClusterConfigSpec.scala | 4 +- .../test/scala/akka/cluster/ClusterSpec.scala | 31 +++++--- akka-docs/cluster/cluster.rst | 16 ++-- 10 files changed, 166 insertions(+), 45 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index 33616f5812..f75c7f9018 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -8,9 +8,15 @@ akka { cluster { - # node to join - the full URI defined by a string on the form of "akka://system@hostname:port" - # leave as empty string if the node should be a singleton cluster - node-to-join = "" + # Initial contact points of the cluster. Nodes to join at startup if auto-join = on. + # The seed nodes also play the role of deputy nodes (the nodes responsible + # for breaking network partitions). + # Comma separated full URIs defined by a string on the form of "akka://system@hostname:port" + # Leave as empty if the node should be a singleton cluster. + seed-nodes = [] + + # automatic join the seed-nodes at startup + auto-join = on # should the 'leader' in the cluster be allowed to automatically mark unreachable nodes as DOWN? auto-down = on diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 3732019a50..a4765fc2cf 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -11,7 +11,7 @@ import akka.dispatch.Await import akka.dispatch.MonitorableThreadFactory import akka.event.Logging import akka.jsr166y.ThreadLocalRandom -import akka.pattern.ask +import akka.pattern._ import akka.remote._ import akka.routing._ import akka.util._ @@ -55,11 +55,29 @@ sealed trait ClusterMessage extends Serializable object ClusterUserAction { /** - * Command to join the cluster. Sent when a node (reprsesented by 'address') + * Command to join the cluster. Sent when a node (represented by 'address') * wants to join another node (the receiver). */ case class Join(address: Address) extends ClusterMessage + /** + * Start message of the process to join one of the seed nodes. + * The node sends `InitJoin` to all seed nodes, which replies + * with `InitJoinAck`. The first reply is used others are discarded. + * The node sends `Join` command to the seed node that replied first. + */ + case object JoinSeedNode extends ClusterMessage + + /** + * @see JoinSeedNode + */ + case object InitJoin extends ClusterMessage + + /** + * @see JoinSeedNode + */ + case class InitJoinAck(address: Address) extends ClusterMessage + /** * Command to leave the cluster. */ @@ -343,11 +361,28 @@ private[cluster] final class ClusterCommandDaemon(cluster: Cluster) extends Acto val log = Logging(context.system, this) def receive = { - case Join(address) ⇒ cluster.joining(address) - case Down(address) ⇒ cluster.downing(address) - case Leave(address) ⇒ cluster.leaving(address) - case Exit(address) ⇒ cluster.exiting(address) - case Remove(address) ⇒ cluster.removing(address) + case JoinSeedNode ⇒ joinSeedNode() + case InitJoin ⇒ sender ! InitJoinAck(cluster.selfAddress) + case InitJoinAck(address) ⇒ cluster.join(address) + case Join(address) ⇒ cluster.joining(address) + case Down(address) ⇒ cluster.downing(address) + case Leave(address) ⇒ cluster.leaving(address) + case Exit(address) ⇒ cluster.exiting(address) + case Remove(address) ⇒ cluster.removing(address) + } + + def joinSeedNode(): Unit = { + val seedRoutees = for (address ← cluster.seedNodes; if address != cluster.selfAddress) + yield self.path.toStringWithAddress(address) + if (seedRoutees.nonEmpty) { + // FIXME config of within (use JoinInProgressTimeout when that is in master) + implicit val within = Timeout(5 seconds) + val seedRouter = context.actorOf( + Props.empty.withRouter(ScatterGatherFirstCompletedRouter( + routees = seedRoutees, within = within.duration))) + seedRouter ? InitJoin pipeTo self + seedRouter ! PoisonPill + } } override def unhandled(unknown: Any) = log.error("Illegal command [{}]", unknown) @@ -479,8 +514,6 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) implicit private val defaultTimeout = Timeout(remoteSettings.RemoteSystemDaemonAckTimeout) - private val nodeToJoin: Option[Address] = NodeToJoin filter (_ != selfAddress) - private val serialization = remote.serialization private val _isRunning = new AtomicBoolean(true) @@ -507,8 +540,8 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) new AtomicReference[State](State(seenVersionedGossip)) } - // try to join the node defined in the 'akka.cluster.node-to-join' option - autoJoin() + // try to join one of the nodes defined in the 'akka.cluster.seed-nodes' + if (AutoJoin) joinSeedNode() // ======================================================== // ===================== WORK DAEMONS ===================== @@ -927,9 +960,9 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) private[cluster] def receiveHeartbeat(from: Address): Unit = failureDetector heartbeat from /** - * Joins the pre-configured contact point. + * Joins the pre-configured contact points. */ - private def autoJoin(): Unit = nodeToJoin foreach join + private def joinSeedNode(): Unit = clusterCommandDaemon ! ClusterUserAction.JoinSeedNode /** * INTERNAL API. @@ -999,6 +1032,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // 1. gossip to alive members val gossipedToAlive = gossipToRandomNodeOf(localMemberAddresses) + // FIXME does this work as intended? See ticket #2252 // 2. gossip to unreachable members if (localUnreachableSize > 0) { val probability = gossipToUnreachableProbablity(localMembersSize, localUnreachableSize) @@ -1006,11 +1040,12 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) gossipToRandomNodeOf(localUnreachableMembers.map(_.address)) } + // FIXME does this work as intended? See ticket #2252 // 3. gossip to a deputy nodes for facilitating partition healing val deputies = deputyNodes(localMemberAddresses) val alreadyGossipedToDeputy = gossipedToAlive.map(deputies.contains(_)).getOrElse(false) - if ((!alreadyGossipedToDeputy || localMembersSize < NrOfDeputyNodes) && deputies.nonEmpty) { - val probability = gossipToDeputyProbablity(localMembersSize, localUnreachableSize, NrOfDeputyNodes) + if ((!alreadyGossipedToDeputy || localMembersSize < seedNodes.size) && deputies.nonEmpty) { + val probability = gossipToDeputyProbablity(localMembersSize, localUnreachableSize, seedNodes.size) if (ThreadLocalRandom.current.nextDouble() < probability) gossipToRandomNodeOf(deputies) } @@ -1337,7 +1372,15 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) * Gets the addresses of a all the 'deputy' nodes - excluding this node if part of the group. */ private def deputyNodes(addresses: IndexedSeq[Address]): IndexedSeq[Address] = - addresses drop 1 take NrOfDeputyNodes filterNot (_ == selfAddress) + addresses filterNot (_ == selfAddress) intersect seedNodes + + /** + * INTERNAL API. + * + * Make it possible to override/configure seedNodes from tests without + * specifying in config. Addresses are unknown before startup time. + */ + private[cluster] def seedNodes: IndexedSeq[Address] = SeedNodes /** * INTERNAL API. diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 64ae1c28cb..12ed666680 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -22,17 +22,16 @@ class ClusterSettings(val config: Config, val systemName: String) { final val FailureDetectorAcceptableHeartbeatPause: Duration = Duration(getMilliseconds("akka.cluster.failure-detector.acceptable-heartbeat-pause"), MILLISECONDS) - final val NodeToJoin: Option[Address] = getString("akka.cluster.node-to-join") match { - case "" ⇒ None - case AddressFromURIString(addr) ⇒ Some(addr) - } + final val SeedNodes: IndexedSeq[Address] = getStringList("akka.cluster.seed-nodes").asScala.map { + case AddressFromURIString(addr) ⇒ addr + }.toIndexedSeq final val PeriodicTasksInitialDelay = Duration(getMilliseconds("akka.cluster.periodic-tasks-initial-delay"), MILLISECONDS) final val GossipInterval = Duration(getMilliseconds("akka.cluster.gossip-interval"), MILLISECONDS) final val HeartbeatInterval = Duration(getMilliseconds("akka.cluster.heartbeat-interval"), MILLISECONDS) final val LeaderActionsInterval = Duration(getMilliseconds("akka.cluster.leader-actions-interval"), MILLISECONDS) final val UnreachableNodesReaperInterval = Duration(getMilliseconds("akka.cluster.unreachable-nodes-reaper-interval"), MILLISECONDS) final val NrOfGossipDaemons = getInt("akka.cluster.nr-of-gossip-daemons") - final val NrOfDeputyNodes = getInt("akka.cluster.nr-of-deputy-nodes") + final val AutoJoin = getBoolean("akka.cluster.auto-join") final val AutoDown = getBoolean("akka.cluster.auto-down") final val SchedulerTickDuration = Duration(getMilliseconds("akka.cluster.scheduler.tick-duration"), MILLISECONDS) final val SchedulerTicksPerWheel = getInt("akka.cluster.scheduler.ticks-per-wheel") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala new file mode 100644 index 0000000000..38f03a4e66 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala @@ -0,0 +1,46 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object JoinSeedNodeMultiJvmSpec extends MultiNodeConfig { + val seed1 = role("seed1") + val seed2 = role("seed2") + val ordinary1 = role("ordinary1") + val ordinary2 = role("ordinary2") + + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class JoinSeedNodeMultiJvmNode1 extends JoinSeedNodeSpec with FailureDetectorPuppetStrategy +class JoinSeedNodeMultiJvmNode2 extends JoinSeedNodeSpec with FailureDetectorPuppetStrategy +class JoinSeedNodeMultiJvmNode3 extends JoinSeedNodeSpec with FailureDetectorPuppetStrategy +class JoinSeedNodeMultiJvmNode4 extends JoinSeedNodeSpec with FailureDetectorPuppetStrategy + +abstract class JoinSeedNodeSpec + extends MultiNodeSpec(JoinSeedNodeMultiJvmSpec) + with MultiNodeClusterSpec { + + import JoinSeedNodeMultiJvmSpec._ + + override def seedNodes = IndexedSeq(seed1, seed2) + + "A cluster with configured seed nodes" must { + "join the seed nodes at startup" taggedAs LongRunningTest in { + + startClusterNode() + enterBarrier("all-started") + + awaitUpConvergence(4) + + enterBarrier("after") + } + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 9fd8746923..79e3a67e1e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -26,7 +26,6 @@ object MultiNodeClusterSpec { leader-actions-interval = 200 ms unreachable-nodes-reaper-interval = 200 ms periodic-tasks-initial-delay = 300 ms - nr-of-deputy-nodes = 2 } akka.test { single-expect-default = 5 s @@ -77,10 +76,22 @@ trait MultiNodeClusterSpec extends FailureDetectorStrategy with Suite { self: Mu throw t } + /** + * Make it possible to override/configure seedNodes from tests without + * specifying in config. Addresses are unknown before startup time. + */ + protected def seedNodes: IndexedSeq[RoleName] = IndexedSeq.empty + /** * The cluster node instance. Needs to be lazily created. */ - private lazy val clusterNode = new Cluster(system.asInstanceOf[ExtendedActorSystem], failureDetector) + private lazy val clusterNode = new Cluster(system.asInstanceOf[ExtendedActorSystem], failureDetector) { + override def seedNodes: IndexedSeq[Address] = { + val testSeedNodes = MultiNodeClusterSpec.this.seedNodes + if (testSeedNodes.isEmpty) super.seedNodes + else testSeedNodes map address + } + } /** * Get the cluster node to use. diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala index cee5efc0db..50656a6a9d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala @@ -36,6 +36,8 @@ abstract class NodeJoinSpec startClusterNode() } + enterBarrier("first-started") + runOn(second) { cluster.join(first) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index c283665b30..086c2fb00a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -21,7 +21,6 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { commonConfig(ConfigFactory.parseString(""" akka.cluster { - nr-of-deputy-nodes = 0 # FIXME remove this (use default) when ticket #2239 has been fixed gossip-interval = 400 ms } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 58f0683c25..5e44b0a4bc 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -21,14 +21,14 @@ class ClusterConfigSpec extends AkkaSpec { FailureDetectorImplementationClass must be(classOf[AccrualFailureDetector].getName) FailureDetectorMinStdDeviation must be(100 millis) FailureDetectorAcceptableHeartbeatPause must be(3 seconds) - NodeToJoin must be(None) + SeedNodes must be(Seq.empty[String]) PeriodicTasksInitialDelay must be(1 seconds) GossipInterval must be(1 second) HeartbeatInterval must be(1 second) LeaderActionsInterval must be(1 second) UnreachableNodesReaperInterval must be(1 second) NrOfGossipDaemons must be(4) - NrOfDeputyNodes must be(3) + AutoJoin must be(true) AutoDown must be(true) SchedulerTickDuration must be(33 millis) SchedulerTicksPerWheel must be(512) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index 229ec7137d..a0bc7f6450 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -11,12 +11,13 @@ import akka.actor.ExtendedActorSystem import akka.actor.Address import java.util.concurrent.atomic.AtomicInteger import org.scalatest.BeforeAndAfter +import akka.remote.RemoteActorRefProvider object ClusterSpec { val config = """ akka.cluster { + auto-join = off auto-down = off - nr-of-deputy-nodes = 3 periodic-tasks-initial-delay = 120 seconds // turn off scheduled tasks } akka.actor.provider = "akka.remote.RemoteActorRefProvider" @@ -31,12 +32,24 @@ object ClusterSpec { class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { import ClusterSpec._ + val selfAddress = system.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider].transport.address + val addresses = IndexedSeq( + selfAddress, + Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 1), + Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 2), + Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 3), + Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 4), + Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 5)) + val deterministicRandom = new AtomicInteger val failureDetector = new FailureDetectorPuppet(system) val cluster = new Cluster(system.asInstanceOf[ExtendedActorSystem], failureDetector) { + // 3 deputy nodes (addresses index 1, 2, 3) + override def seedNodes = addresses.slice(1, 4) + override def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = { if (addresses.isEmpty) None else Some(addresses.toSeq(deterministicRandom.getAndIncrement % addresses.size)) @@ -68,15 +81,6 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { } - val selfAddress = cluster.self.address - val addresses = IndexedSeq( - selfAddress, - Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 1), - Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 2), - Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 3), - Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 4), - Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 5)) - def memberStatus(address: Address): Option[MemberStatus] = cluster.latestGossip.members.collectFirst { case m if m.address == address ⇒ m.status } @@ -89,6 +93,11 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { "A Cluster" must { + "use the address of the remote transport" in { + cluster.selfAddress must be(selfAddress) + cluster.self.address must be(selfAddress) + } + "initially be singleton cluster and reach convergence immediately" in { cluster.isSingletonCluster must be(true) cluster.latestGossip.members.map(_.address) must be(Set(selfAddress)) @@ -161,7 +170,7 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { "gossip to duputy node" in { cluster._gossipToDeputyProbablity = 1.0 // always - // we have configured 2 deputy nodes + // we have configured 3 deputy nodes (seedNodes) cluster.gossip() // 1 is deputy cluster.gossip() // 2 is deputy cluster.gossip() // 3 is deputy diff --git a/akka-docs/cluster/cluster.rst b/akka-docs/cluster/cluster.rst index 0126897dab..a9190420dc 100644 --- a/akka-docs/cluster/cluster.rst +++ b/akka-docs/cluster/cluster.rst @@ -183,14 +183,20 @@ according to the Failure Detector is considered unreachable. This means setting the unreachable node status to ``down`` automatically. +Seed Nodes +^^^^^^^^^^ + +The seed nodes are configured contact points for inital join of the cluster. +When a new node is started started it sends a message to all seed nodes and +then sends join command to the one that answers first. + +It is possible to turn off automatic join. + Deputy Nodes ^^^^^^^^^^^^ -After gossip convergence a set of ``deputy`` nodes for the cluster can be -determined. As with the ``leader``, there is no ``deputy`` election process, -the deputies can always be recognised deterministically by any node whenever there -is gossip convergence. The list of ``deputy`` nodes is simply the N - 1 number -of nodes (e.g. starting with the first node after the ``leader``) in sorted order. +The deputy nodes are the live members of the configured seed nodes. +It is preferred to use deputy nodes in different racks/data centers. The nodes defined as ``deputy`` nodes are just regular member nodes whose only "special role" is to help breaking logical partitions as seen in the gossip From 91268365c15729650cd7c7dbcab4b296793ebb91 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 21 Jun 2012 12:30:26 +0200 Subject: [PATCH 486/538] Temporary ignore, see #2259 --- .../src/multi-jvm/scala/akka/cluster/TransitionSpec.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala index 60594d145e..3043c198df 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -387,7 +387,8 @@ abstract class TransitionSpec enterBarrier("after-5") } - "perform correct transitions when second becomes unavailble" taggedAs LongRunningTest in { + // FIXME ignored due to #2259 + "perform correct transitions when second becomes unavailble" taggedAs LongRunningTest ignore { runOn(fifth) { markNodeAsUnavailable(second) cluster.reapUnreachableMembers() From 4e49b2c843ab08ddc08ee0ae98b9626b2a88eb02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Thu, 21 Jun 2012 13:16:35 +0200 Subject: [PATCH 487/538] Make MultiNodeSpec shut down the conductor after other nodes. See #2230 --- .../akka/remote/testconductor/Conductor.scala | 24 +++++++------ .../remote/testkit/MultiNodeSpecSpec.scala | 36 +++++++++++++++++++ .../remote/testconductor/BarrierSpec.scala | 20 ++++------- .../akka/remote/testkit/MultiNodeSpec.scala | 18 ++++++++-- .../test/scala/akka/testkit/AkkaSpec.scala | 3 ++ 5 files changed, 75 insertions(+), 26 deletions(-) create mode 100644 akka-remote-tests/src/multi-jvm/scala/akka/remote/testkit/MultiNodeSpecSpec.scala diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index b6265125b1..eba0fffe63 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -282,6 +282,8 @@ private[akka] class ServerFSM(val controller: ActorRef, val channel: Channel) ex import akka.actor.FSM._ import Controller._ + var roleName: RoleName = null + startWith(Initial, None) whenUnhandled { @@ -292,12 +294,15 @@ private[akka] class ServerFSM(val controller: ActorRef, val channel: Channel) ex } onTermination { - case _ ⇒ controller ! ClientDisconnected + case _ ⇒ + controller ! ClientDisconnected(roleName) + channel.close() } when(Initial, stateTimeout = 10 seconds) { case Event(Hello(name, addr), _) ⇒ - controller ! NodeInfo(RoleName(name), addr, self) + roleName = RoleName(name) + controller ! NodeInfo(roleName, addr, self) goto(Ready) case Event(x: NetworkOp, _) ⇒ log.warning("client {} sent no Hello in first message (instead {}), disconnecting", getAddrString(channel), x) @@ -334,10 +339,6 @@ private[akka] class ServerFSM(val controller: ActorRef, val channel: Channel) ex } initialize - - onTermination { - case _ ⇒ channel.close() - } } /** @@ -517,10 +518,13 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor if (clients.find(_.name == n.name).isDefined) throw new DuplicateNode(d, n) stay using d.copy(clients = clients + n) case Event(ClientDisconnected(name), d @ Data(clients, _, arrived, _)) ⇒ - if (clients.isEmpty) throw BarrierEmpty(d, "cannot disconnect " + name + ": no client to disconnect") - (clients find (_.name == name)) match { - case None ⇒ stay - case Some(c) ⇒ throw ClientLost(d.copy(clients = clients - c, arrived = arrived filterNot (_ == c.fsm)), name) + if (arrived.isEmpty) + stay using d.copy(clients = clients.filterNot(_.name == name)) + else { + (clients find (_.name == name)) match { + case None ⇒ stay + case Some(c) ⇒ throw ClientLost(d.copy(clients = clients - c, arrived = arrived filterNot (_ == c.fsm)), name) + } } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testkit/MultiNodeSpecSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testkit/MultiNodeSpecSpec.scala new file mode 100644 index 0000000000..2a709a99a7 --- /dev/null +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testkit/MultiNodeSpecSpec.scala @@ -0,0 +1,36 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.remote.testkit + +import akka.testkit.LongRunningTest + +object MultiNodeSpecMultiJvmSpec extends MultiNodeConfig { + commonConfig(debugConfig(on = false)) + + val node1 = role("node1") + val node2 = role("node2") + val node3 = role("node3") + val node4 = role("node4") +} + +class MultiNodeSpecSpecMultiJvmNode1 extends MultiNodeSpecSpec +class MultiNodeSpecSpecMultiJvmNode2 extends MultiNodeSpecSpec +class MultiNodeSpecSpecMultiJvmNode3 extends MultiNodeSpecSpec +class MultiNodeSpecSpecMultiJvmNode4 extends MultiNodeSpecSpec + +class MultiNodeSpecSpec extends MultiNodeSpec(MultiNodeSpecMultiJvmSpec) { + + import MultiNodeSpecMultiJvmSpec._ + + def initialParticipants = 4 + + "A MultiNodeSpec" must { + + "wait for all nodes to remove themselves before we shut the conductor down" taggedAs LongRunningTest in { + enterBarrier("startup") + // this test is empty here since it only exercises the shutdown code in the MultiNodeSpec + } + + } +} diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala index f418f4a717..8ff95d0831 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala @@ -59,14 +59,9 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { val b = getBarrier() b ! NodeInfo(A, AddressFromURIString("akka://sys"), system.deadLetters) b ! ClientDisconnected(B) - EventFilter[ClientLost](occurrences = 1) intercept { - b ! ClientDisconnected(A) - } - expectMsg(Failed(b, ClientLost(Data(Set(), "", Nil, null), A))) - EventFilter[BarrierEmpty](occurrences = 1) intercept { - b ! ClientDisconnected(A) - } - expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil, null), "cannot disconnect RoleName(a): no client to disconnect"))) + expectNoMsg(1 second) + b ! ClientDisconnected(A) + expectNoMsg(1 second) } "fail entering barrier when nobody registered" taggedAs TimingTest in { @@ -264,12 +259,9 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { b ! NodeInfo(A, AddressFromURIString("akka://sys"), testActor) expectMsg(ToClient(Done)) b ! ClientDisconnected(B) - EventFilter[ClientLost](occurrences = 1) intercept { - b ! ClientDisconnected(A) - } - EventFilter[BarrierEmpty](occurrences = 1) intercept { - b ! ClientDisconnected(A) - } + expectNoMsg(1 second) + b ! ClientDisconnected(A) + expectNoMsg(1 second) } "fail entering barrier when nobody registered" taggedAs TimingTest in { diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 4d65a2084e..9f88f9e1c8 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -7,12 +7,13 @@ import java.net.InetSocketAddress import com.typesafe.config.{ ConfigObject, ConfigFactory, Config } -import akka.actor.{ RootActorPath, Deploy, ActorPath, ActorSystem, ExtendedActorSystem } +import akka.actor.{ RootActorPath, ActorPath, ActorSystem, ExtendedActorSystem } import akka.dispatch.Await import akka.dispatch.Await.Awaitable import akka.remote.testconductor.{ TestConductorExt, TestConductor, RoleName } import akka.testkit.AkkaSpec -import akka.util.{ Timeout, NonFatal, Duration } +import akka.util.{ Timeout, NonFatal } +import akka.util.duration._ /** * Configure the role names and participants of the test, including configuration settings. @@ -261,4 +262,17 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: // useful to see which jvm is running which role log.info("Role [{}] started", myself.name) + // wait for all nodes to remove themselves before we shut the conductor down + final override def beforeShutdown() = { + if (selfIndex == 0) { + testConductor.removeNode(myself) + within(testConductor.Settings.BarrierTimeout.duration) { + awaitCond { + val nodes = testConductor.getNodes.await + nodes.size < 1 || (nodes.size == 1 && nodes.head == myself) + } + } + } + } + } diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala index 424c913662..f9ee989e1c 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala @@ -74,6 +74,7 @@ abstract class AkkaSpec(_system: ActorSystem) } final override def afterAll { + beforeShutdown() system.shutdown() try system.awaitTermination(5 seconds) catch { case _: TimeoutException ⇒ system.log.warning("Failed to stop [{}] within 5 seconds", system.name) @@ -83,6 +84,8 @@ abstract class AkkaSpec(_system: ActorSystem) protected def atStartup() {} + protected def beforeShutdown() {} + protected def atTermination() {} def spawn(dispatcherId: String = Dispatchers.DefaultDispatcherId)(body: ⇒ Unit) { From 9710cceacb810428618c0803fc4f4e816f01b495 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Thu, 21 Jun 2012 16:10:04 +0200 Subject: [PATCH 488/538] Changes based on review. See #2230 --- .../src/test/scala/akka/remote/testkit/MultiNodeSpec.scala | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 9f88f9e1c8..25bb8df7dc 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -268,8 +268,7 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: testConductor.removeNode(myself) within(testConductor.Settings.BarrierTimeout.duration) { awaitCond { - val nodes = testConductor.getNodes.await - nodes.size < 1 || (nodes.size == 1 && nodes.head == myself) + testConductor.getNodes.await.filterNot(_ == myself).isEmpty } } } From 5fe9dcaf4e126487a8ff5a9f4e416a7b6edf4b3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sun, 24 Jun 2012 14:43:46 +0200 Subject: [PATCH 489/538] Cleaned up stuff around file-based mailbox --- .../src/main/resources/reference.conf | 38 +++++++++---------- .../akka/actor/mailbox/FileBasedMailbox.scala | 9 ++--- .../mailbox/FileBasedMailboxSettings.scala | 32 ++++++++-------- .../mailbox/filequeue/PersistentQueue.scala | 26 ++++++------- .../akka/actor/mailbox/DurableMailbox.scala | 14 ++++--- 5 files changed, 61 insertions(+), 58 deletions(-) diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/resources/reference.conf b/akka-durable-mailboxes/akka-file-mailbox/src/main/resources/reference.conf index f454716af0..1fb5cceeb1 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/resources/reference.conf +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/resources/reference.conf @@ -13,50 +13,50 @@ akka { file-based { # directory below which this queue resides directory-path = "./_mb" - + # attempting to add an item after the queue reaches this size (in items) will fail. max-items = 2147483647 - + # attempting to add an item after the queue reaches this size (in bytes) will fail. max-size = 2147483647 bytes - + # attempting to add an item larger than this size (in bytes) will fail. max-item-size = 2147483647 bytes - + # maximum expiration time for this queue (seconds). max-age = 0s - + # maximum journal size before the journal should be rotated. max-journal-size = 16 MiB - + # maximum size of a queue before it drops into read-behind mode. max-memory-size = 128 MiB - + # maximum overflow (multiplier) of a journal file before we re-create it. max-journal-overflow = 10 - + # absolute maximum size of a journal file until we rebuild it, no matter what. max-journal-size-absolute = 9223372036854775807 bytes - + # whether to drop older items (instead of newer) when the queue is full - discard-old-when-full = on - + discard-old-when-full = on + # whether to keep a journal file at all - keep-journal = on - + keep-journal = on + # whether to sync the journal after each transaction sync-journal = off # circuit breaker configuration circuit-breaker { - # maximum number of failures before opening breaker - max-failures = 3 + # maximum number of failures before opening breaker + max-failures = 3 - # duration of time beyond which a call is assumed to be timed out and considered a failure - call-timeout = 3 seconds + # duration of time beyond which a call is assumed to be timed out and considered a failure + call-timeout = 3 seconds - # duration of time to wait until attempting to reset the breaker during which all calls fail-fast - reset-timeout = 30 seconds + # duration of time to wait until attempting to reset the breaker during which all calls fail-fast + reset-timeout = 30 seconds } } } diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala index fccb6b5aea..1416e8f148 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala @@ -21,18 +21,17 @@ class FileBasedMailboxType(systemSettings: ActorSystem.Settings, config: Config) case None ⇒ throw new ConfigurationException("creating a durable mailbox requires an owner (i.e. does not work with BalancingDispatcher)") } } - class FileBasedMessageQueue(_owner: ActorContext, val settings: FileBasedMailboxSettings) extends DurableMessageQueue(_owner) with DurableMessageSerialization { - // TODO Is it reasonable for all FileBasedMailboxes to have their own logger? - private val log = Logging(system, "FileBasedMessageQueue") - val breaker = CircuitBreaker(_owner.system.scheduler, settings.CircuitBreakerMaxFailures, settings.CircuitBreakerCallTimeout, settings.CircuitBreakerResetTimeout) + private val breaker = CircuitBreaker(_owner.system.scheduler, settings.CircuitBreakerMaxFailures, settings.CircuitBreakerCallTimeout, settings.CircuitBreakerResetTimeout) + + private val log = Logging(system, "FileBasedMessageQueue") private val queue = try { (new java.io.File(settings.QueuePath)) match { case dir if dir.exists && !dir.isDirectory ⇒ throw new IllegalStateException("Path already occupied by non-directory " + dir) case dir if !dir.exists ⇒ if (!dir.mkdirs() && !dir.isDirectory) throw new IllegalStateException("Creation of directory failed " + dir) - case _ ⇒ //All good + case _ ⇒ // All good } val queue = new filequeue.PersistentQueue(settings.QueuePath, name, settings, log) queue.setup // replays journal diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala index dff4021d96..27088dfc92 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala @@ -16,20 +16,20 @@ class FileBasedMailboxSettings(val systemSettings: ActorSystem.Settings, val use val config = initialize import config._ - val QueuePath: String = getString("directory-path") - val MaxItems: Int = getInt("max-items") - val MaxSize: Long = getBytes("max-size") - val MaxItemSize: Long = getBytes("max-item-size") - val MaxAge: Duration = Duration(getMilliseconds("max-age"), MILLISECONDS) - val MaxJournalSize: Long = getBytes("max-journal-size") - val MaxMemorySize: Long = getBytes("max-memory-size") - val MaxJournalOverflow: Int = getInt("max-journal-overflow") - val MaxJournalSizeAbsolute: Long = getBytes("max-journal-size-absolute") - val DiscardOldWhenFull: Boolean = getBoolean("discard-old-when-full") - val KeepJournal: Boolean = getBoolean("keep-journal") - val SyncJournal: Boolean = getBoolean("sync-journal") + final val QueuePath: String = getString("directory-path") + final val MaxItems: Int = getInt("max-items") + final val MaxSize: Long = getBytes("max-size") + final val MaxItemSize: Long = getBytes("max-item-size") + final val MaxAge: Duration = Duration(getMilliseconds("max-age"), MILLISECONDS) + final val MaxJournalSize: Long = getBytes("max-journal-size") + final val MaxMemorySize: Long = getBytes("max-memory-size") + final val MaxJournalOverflow: Int = getInt("max-journal-overflow") + final val MaxJournalSizeAbsolute: Long = getBytes("max-journal-size-absolute") + final val DiscardOldWhenFull: Boolean = getBoolean("discard-old-when-full") + final val KeepJournal: Boolean = getBoolean("keep-journal") + final val SyncJournal: Boolean = getBoolean("sync-journal") - val CircuitBreakerMaxFailures = getInt("circuit-breaker.max-failures") - val CircuitBreakerCallTimeout = Duration.fromNanos(getNanoseconds("circuit-breaker.call-timeout")) - val CircuitBreakerResetTimeout = Duration.fromNanos(getNanoseconds("circuit-breaker.reset-timeout")) -} \ No newline at end of file + final val CircuitBreakerMaxFailures = getInt("circuit-breaker.max-failures") + final val CircuitBreakerCallTimeout = Duration.fromNanos(getNanoseconds("circuit-breaker.call-timeout")) + final val CircuitBreakerResetTimeout = Duration.fromNanos(getNanoseconds("circuit-breaker.reset-timeout")) +} diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/filequeue/PersistentQueue.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/filequeue/PersistentQueue.scala index 1a5ddf4a8c..152b29406c 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/filequeue/PersistentQueue.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/filequeue/PersistentQueue.scala @@ -68,44 +68,44 @@ class PersistentQueue(persistencePath: String, val name: String, val settings: F def overlay[T](base: ⇒ T) = new OverlaySetting(base) // attempting to add an item after the queue reaches this size (in items) will fail. - val maxItems = overlay(PersistentQueue.maxItems) + final val maxItems = overlay(PersistentQueue.maxItems) // attempting to add an item after the queue reaches this size (in bytes) will fail. - val maxSize = overlay(PersistentQueue.maxSize) + final val maxSize = overlay(PersistentQueue.maxSize) // attempting to add an item larger than this size (in bytes) will fail. - val maxItemSize = overlay(PersistentQueue.maxItemSize) + final val maxItemSize = overlay(PersistentQueue.maxItemSize) // maximum expiration time for this queue (seconds). - val maxAge = overlay(PersistentQueue.maxAge) + final val maxAge = overlay(PersistentQueue.maxAge) // maximum journal size before the journal should be rotated. - val maxJournalSize = overlay(PersistentQueue.maxJournalSize) + final val maxJournalSize = overlay(PersistentQueue.maxJournalSize) // maximum size of a queue before it drops into read-behind mode. - val maxMemorySize = overlay(PersistentQueue.maxMemorySize) + final val maxMemorySize = overlay(PersistentQueue.maxMemorySize) // maximum overflow (multiplier) of a journal file before we re-create it. - val maxJournalOverflow = overlay(PersistentQueue.maxJournalOverflow) + final val maxJournalOverflow = overlay(PersistentQueue.maxJournalOverflow) // absolute maximum size of a journal file until we rebuild it, no matter what. - val maxJournalSizeAbsolute = overlay(PersistentQueue.maxJournalSizeAbsolute) + final val maxJournalSizeAbsolute = overlay(PersistentQueue.maxJournalSizeAbsolute) // whether to drop older items (instead of newer) when the queue is full - val discardOldWhenFull = overlay(PersistentQueue.discardOldWhenFull) + final val discardOldWhenFull = overlay(PersistentQueue.discardOldWhenFull) // whether to keep a journal file at all - val keepJournal = overlay(PersistentQueue.keepJournal) + final val keepJournal = overlay(PersistentQueue.keepJournal) // whether to sync the journal after each transaction - val syncJournal = overlay(PersistentQueue.syncJournal) + final val syncJournal = overlay(PersistentQueue.syncJournal) // (optional) move expired items over to this queue - val expiredQueue = overlay(PersistentQueue.expiredQueue) + final val expiredQueue = overlay(PersistentQueue.expiredQueue) private var journal = new Journal(new File(persistencePath, name).getCanonicalPath, syncJournal(), log) - // track tentative removals + // track tentative remofinal vals private var xidCounter: Int = 0 private val openTransactions = new mutable.HashMap[Int, QItem] def openTransactionCount = openTransactions.size diff --git a/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala b/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala index b21878d00e..ff985b44cc 100644 --- a/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala +++ b/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala @@ -69,11 +69,15 @@ trait DurableMessageSerialization { this: DurableMessageQueue ⇒ * Conventional organization of durable mailbox settings: * * {{{ - * my-durable-dispatcher { - * mailbox-type = "my.durable.mailbox" - * my-durable-mailbox { - * setting1 = 1 - * setting2 = 2 + * akka { + * actor { + * my-durable-dispatcher { + * mailbox-type = "my.durable.mailbox" + * my-durable-mailbox { + * setting1 = 1 + * setting2 = 2 + * } + * } * } * } * }}} From 526820bdb76bfaa41f225dc5452e3bfa395391f6 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 25 Jun 2012 08:32:55 +0200 Subject: [PATCH 490/538] Fix failing TransitionSpec, failed due to wrong test, see #2259 --- .../src/multi-jvm/scala/akka/cluster/TransitionSpec.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala index 3043c198df..7b6ee2d79c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -68,7 +68,7 @@ abstract class TransitionSpec } def awaitMemberStatus(address: Address, status: MemberStatus): Unit = awaitCond { - memberStatus(address) == Up + memberStatus(address) == status } // DSL sugar for `role1 gossipTo role2` @@ -388,7 +388,7 @@ abstract class TransitionSpec } // FIXME ignored due to #2259 - "perform correct transitions when second becomes unavailble" taggedAs LongRunningTest ignore { + "perform correct transitions when second becomes unavailble" taggedAs LongRunningTest in { runOn(fifth) { markNodeAsUnavailable(second) cluster.reapUnreachableMembers() From e817b10a93d5826a2d19ce1333cb119513bc99ce Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 25 Jun 2012 08:34:30 +0200 Subject: [PATCH 491/538] Remove fixme --- .../src/multi-jvm/scala/akka/cluster/TransitionSpec.scala | 1 - 1 file changed, 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala index 7b6ee2d79c..422b67de81 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -387,7 +387,6 @@ abstract class TransitionSpec enterBarrier("after-5") } - // FIXME ignored due to #2259 "perform correct transitions when second becomes unavailble" taggedAs LongRunningTest in { runOn(fifth) { markNodeAsUnavailable(second) From a0ddb377454fd564bc93594f5b87fe89989391bd Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 25 Jun 2012 10:32:21 +0200 Subject: [PATCH 492/538] Attempt to deal with race between sending and receving multi-framed messages --- .../src/main/scala/akka/zeromq/ConcurrentSocketActor.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala index 71b7b185f0..3f9cb48f01 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala @@ -206,7 +206,8 @@ private[zeromq] class ConcurrentSocketActor(params: Seq[SocketOption]) extends A } result match { case null ⇒ - if (currentFrames.isEmpty) currentFrames + if (socket.hasReceiveMore) receiveMessage(mode, currentFrames) + else if (currentFrames.isEmpty) currentFrames else throw new IllegalStateException("no more frames available while socket.hasReceivedMore==true") case bytes ⇒ val frames = currentFrames :+ Frame(if (bytes.length == 0) noBytes else bytes) From 1400ea605b6b2e9aa02dd05195d40f69c10fc00f Mon Sep 17 00:00:00 2001 From: Roland Date: Mon, 25 Jun 2012 10:49:49 +0200 Subject: [PATCH 493/538] clarify system.actorOf special semantics section, see #2031 --- .../src/main/scala/akka/dispatch/Mailbox.scala | 12 +++++++++++- akka-docs/java/dispatchers.rst | 6 ++++-- akka-docs/scala/dispatchers.rst | 6 ++++-- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index fd2da63a8b..0587e845d5 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -476,7 +476,17 @@ trait BoundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueue { } /** - * MailboxType is a factory to create MessageQueues for an optionally provided ActorContext + * MailboxType is a factory to create MessageQueues for an optionally + * provided ActorContext. + * + * Possibly Important Notice + * + * When implementing a custom mailbox type, be aware that there is special + * semantics attached to `system.actorOf()` in that sending to the returned + * ActorRef may—for a short period of time—enqueue the messages first in a + * dummy queue. Top-level actors are created in two steps, and only after the + * guardian actor has performed that second step will all previously sent + * messages be transferred from the dummy queue into the real mailbox. */ trait MailboxType { def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue diff --git a/akka-docs/java/dispatchers.rst b/akka-docs/java/dispatchers.rst index 27716275c0..577740d78c 100644 --- a/akka-docs/java/dispatchers.rst +++ b/akka-docs/java/dispatchers.rst @@ -218,8 +218,10 @@ filling in will they be transferred into the real mailbox. Thus, .. code-block:: scala - system.actorOf(...).tell("bang"); - assert(bangIsInMyCustomMailbx); + final Props props = ... + // this actor uses MyCustomMailbox, which is assumed to be a singleton + system.actorOf(props.withDispatcher("myCustomMailbox").tell("bang"); + assert(MyCustomMailbox.getInstance().getLastEnqueued().equals("bang")); will probably fail; you will have to allow for some time to pass and retry the check à la :meth:`TestKit.awaitCond`. diff --git a/akka-docs/scala/dispatchers.rst b/akka-docs/scala/dispatchers.rst index 5be19ad799..4253d3a1e4 100644 --- a/akka-docs/scala/dispatchers.rst +++ b/akka-docs/scala/dispatchers.rst @@ -212,8 +212,10 @@ filling in will they be transferred into the real mailbox. Thus, .. code-block:: scala - system.actorOf(...) ! "bang" - assert(bangIsInMyCustomMailbx) + val props: Props = ... + // this actor uses MyCustomMailbox, which is assumed to be a singleton + system.actorOf(props.withDispatcher("myCustomMailbox")) ! "bang" + assert(MyCustomMailbox.instance.getLastEnqueuedMessage == "bang") will probably fail; you will have to allow for some time to pass and retry the check à la :meth:`TestKit.awaitCond`. From c63481cda47c6a8b411436c3c07dc61b4ff6c878 Mon Sep 17 00:00:00 2001 From: Roland Date: Mon, 25 Jun 2012 11:20:51 +0200 Subject: [PATCH 494/538] fix up one last test, see #2031 --- .../src/test/scala/akka/actor/dispatch/ActorModelSpec.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index 00ac2b98a8..da789d9dce 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -286,6 +286,7 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa implicit val dispatcher = interceptedDispatcher() val start, oneAtATime = new CountDownLatch(1) val a = newTestActor(dispatcher.id) + awaitStarted(a) a ! CountDown(start) assertCountDown(start, 3.seconds.dilated.toMillis, "Should process first message within 3 seconds") From 382a96a189c1d4898befbf590519ea5cd956f6dc Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 25 Jun 2012 11:31:27 +0200 Subject: [PATCH 495/538] Rewriting the polling for 0mq so that we can discern EAGAIN from empty poll --- .../akka/zeromq/ConcurrentSocketActor.scala | 36 ++++++++----------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala index 3f9cb48f01..e1b1ba4ddf 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala @@ -190,30 +190,24 @@ private[zeromq] class ConcurrentSocketActor(params: Seq[SocketOption]) extends A } @tailrec private def doPoll(mode: PollMsg, togo: Int = 10): Unit = - receiveMessage(mode) match { - case null ⇒ // receiveMessage has already done something special here - case Seq() ⇒ doPollTimeout(mode) - case frames ⇒ - notifyListener(deserializer(frames)) - if (togo > 0) doPoll(mode, togo - 1) - else self ! mode + if (togo <= 0) self ! mode + else receiveMessage(mode) match { + case Seq() ⇒ doPollTimeout(mode) + case frames ⇒ notifyListener(deserializer(frames)); doPoll(mode, togo - 1) } - @tailrec private def receiveMessage(mode: PollMsg, currentFrames: Vector[Frame] = Vector.empty): Seq[Frame] = { - val result = mode match { - case Poll ⇒ socket.recv(JZMQ.NOBLOCK) - case PollCareful ⇒ if (poller.poll(0) > 0) socket.recv(0) else null + @tailrec private def receiveMessage(mode: PollMsg, currentFrames: Vector[Frame] = Vector.empty): Seq[Frame] = + if (mode == PollCareful && (poller.poll(0) <= 0)) { + if (currentFrames.isEmpty) currentFrames else throw new IllegalStateException("Received partial transmission!") + } else { + socket.recv(if (mode == Poll) JZMQ.NOBLOCK else 0) match { + case null ⇒ /*EAGAIN*/ + if (currentFrames.isEmpty) currentFrames else receiveMessage(mode, currentFrames) + case bytes ⇒ + val frames = currentFrames :+ Frame(if (bytes.length == 0) noBytes else bytes) + if (socket.hasReceiveMore) receiveMessage(mode, frames) else frames + } } - result match { - case null ⇒ - if (socket.hasReceiveMore) receiveMessage(mode, currentFrames) - else if (currentFrames.isEmpty) currentFrames - else throw new IllegalStateException("no more frames available while socket.hasReceivedMore==true") - case bytes ⇒ - val frames = currentFrames :+ Frame(if (bytes.length == 0) noBytes else bytes) - if (socket.hasReceiveMore) receiveMessage(mode, frames) else frames - } - } private val listenerOpt = params collectFirst { case Listener(l) ⇒ l } private def watchListener(): Unit = listenerOpt foreach context.watch From 785bda66632e0d3449e04692e4daccc818d5f803 Mon Sep 17 00:00:00 2001 From: Roland Date: Mon, 25 Jun 2012 11:39:21 +0200 Subject: [PATCH 496/538] add simple script for priming the REPL --- repl | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 repl diff --git a/repl b/repl new file mode 100644 index 0000000000..701b021b35 --- /dev/null +++ b/repl @@ -0,0 +1,9 @@ +import akka.actor._ +import akka.dispatch.{ Future, Promise } +import com.typesafe.config.ConfigFactory +val config=ConfigFactory.parseString("akka.daemonic=on") +val sys=ActorSystem("repl", config.withFallback(ConfigFactory.load())).asInstanceOf[ExtendedActorSystem] +implicit val ec=sys.dispatcher +import akka.util.duration._ +import akka.util.Timeout +implicit val timeout=Timeout(5 seconds) From cc4864346789ecbe5560591bf7d2d89b6694adcc Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 25 Jun 2012 11:41:40 +0200 Subject: [PATCH 497/538] Upgrading Netty to 3.5.1-Final --- project/AkkaBuild.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 4213f65611..584625cc82 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -485,7 +485,7 @@ object Dependency { object V { val Camel = "2.8.0" val Logback = "1.0.4" - val Netty = "3.5.0.Final" + val Netty = "3.5.1.Final" val Protobuf = "2.4.1" val ScalaStm = "0.5" val Scalatest = "1.6.1" From 29377e0b41755e1401ca6f97eca1efdca9145407 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 25 Jun 2012 12:12:30 +0200 Subject: [PATCH 498/538] #2260 - making sure that Terminated deregisters the watch from the kid and then propagates it to the normal receive --- .../scala/akka/actor/DeathWatchSpec.scala | 20 +++++++++++++++++++ .../src/main/scala/akka/actor/Actor.scala | 2 +- .../src/main/scala/akka/actor/ActorCell.scala | 1 + .../src/main/scala/akka/routing/Routing.scala | 1 - 4 files changed, 22 insertions(+), 2 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala index 97eec5be01..8a21f5f070 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala @@ -143,6 +143,26 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout result must be(Seq(1, 2, 3)) } } + + "be able to watch a child with the same name after the old died" in { + val parent = system.actorOf(Props(new Actor { + def receive = { + case "NKOTB" ⇒ + val currentKid = context.watch(context.actorOf(Props(ctx ⇒ { case "NKOTB" ⇒ ctx stop ctx.self }), "kid")) + currentKid forward "NKOTB" + context become { + case Terminated(`currentKid`) ⇒ + testActor ! "GREEN" + context unbecome + } + } + })) + + parent ! "NKOTB" + expectMsg("GREEN") + parent ! "NKOTB" + expectMsg("GREEN") + } } } diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index d14f33a915..8b9476efe9 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -58,7 +58,7 @@ case object Kill extends Kill { /** * When Death Watch is used, the watcher will receive a Terminated(watched) message when watched is terminated. */ -case class Terminated(@BeanProperty actor: ActorRef)(@BeanProperty val existenceConfirmed: Boolean) +case class Terminated(@BeanProperty actor: ActorRef)(@BeanProperty val existenceConfirmed: Boolean) extends AutoReceivedMessage abstract class ReceiveTimeout extends PossiblyHarmful diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 8c31ffb27b..e739ffc859 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -896,6 +896,7 @@ private[akka] class ActorCell( msg.message match { case Failed(cause) ⇒ handleFailure(sender, cause) + case t: Terminated ⇒ watching -= t.actor; receiveMessage(t) case Kill ⇒ throw new ActorKilledException("Kill") case PoisonPill ⇒ self.stop() case SelectParent(m) ⇒ parent.tell(m, msg.sender) diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 5800e14ed2..cb0f5ee09b 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -80,7 +80,6 @@ private[akka] class RoutedActorCell(_system: ActorSystemImpl, _ref: InternalActo def applyRoute(sender: ActorRef, message: Any): Iterable[Destination] = message match { case _: AutoReceivedMessage ⇒ Destination(self, self) :: Nil - case Terminated(_) ⇒ Destination(self, self) :: Nil case CurrentRoutees ⇒ sender ! RouterRoutees(_routees) Nil From c402d5e8096f8096c349173420999e46fee5245c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 25 Jun 2012 12:24:35 +0200 Subject: [PATCH 499/538] Switching to our embedded Murmur hash --- akka-actor/src/main/scala/akka/actor/ActorPath.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorPath.scala b/akka-actor/src/main/scala/akka/actor/ActorPath.scala index aa93dbcc47..1112b90f31 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorPath.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorPath.scala @@ -192,7 +192,7 @@ final class ChildActorPath(val parent: ActorPath, val name: String) extends Acto // TODO RK investigate Phil’s hash from scala.collection.mutable.HashTable.improve override def hashCode: Int = { - import scala.util.MurmurHash._ + import akka.routing.MurmurHash._ @tailrec def rec(p: ActorPath, h: Int, c: Int, k: Int): Int = p match { From cba64403a70ab8cbc47c32cf868f1ffdd79cd284 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 25 Jun 2012 15:23:15 +0200 Subject: [PATCH 500/538] Don't gossip to unreachable, see #2263 * Also, ignore gossip from unreachable, see #2264 * Update gossip protocol in cluster doc --- .../src/main/scala/akka/cluster/Cluster.scala | 92 +++++++++---------- .../test/scala/akka/cluster/ClusterSpec.scala | 36 -------- akka-docs/cluster/cluster.rst | 9 +- 3 files changed, 44 insertions(+), 93 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 7593245587..55b3311dee 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -197,6 +197,9 @@ case class GossipOverview( seen: Map[Address, VectorClock] = Map.empty, unreachable: Set[Member] = Set.empty) { + def isNonDownUnreachable(address: Address): Boolean = + unreachable.exists { m ⇒ m.address == address && m.status != Down } + override def toString = "GossipOverview(seen = [" + seen.mkString(", ") + "], unreachable = [" + unreachable.mkString(", ") + @@ -751,7 +754,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val localUnreachable = localGossip.overview.unreachable val alreadyMember = localMembers.exists(_.address == node) - val isUnreachable = localUnreachable.exists { m ⇒ m.address == node && m.status != Down } + val isUnreachable = localGossip.overview.isNonDownUnreachable(node) if (!alreadyMember && !isUnreachable) { @@ -898,46 +901,49 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val localState = state.get val localGossip = localState.latestGossip - val winningGossip = - if (isSingletonCluster(localState) && localGossip.overview.unreachable.isEmpty && remoteGossip.members.contains(self)) { - // a fresh singleton cluster that is joining, no need to merge, use received gossip - remoteGossip + if (!localGossip.overview.isNonDownUnreachable(from)) { - } else if (remoteGossip.version <> localGossip.version) { - // concurrent - val mergedGossip = remoteGossip merge localGossip - val versionedMergedGossip = mergedGossip :+ vclockNode + val winningGossip = + if (isSingletonCluster(localState) && localGossip.overview.unreachable.isEmpty && remoteGossip.members.contains(self)) { + // a fresh singleton cluster that is joining, no need to merge, use received gossip + remoteGossip - log.debug( - """Can't establish a causal relationship between "remote" gossip and "local" gossip - Remote[{}] - Local[{}] - merging them into [{}]""", - remoteGossip, localGossip, versionedMergedGossip) + } else if (remoteGossip.version <> localGossip.version) { + // concurrent + val mergedGossip = remoteGossip merge localGossip + val versionedMergedGossip = mergedGossip :+ vclockNode - versionedMergedGossip + log.debug( + """Can't establish a causal relationship between "remote" gossip and "local" gossip - Remote[{}] - Local[{}] - merging them into [{}]""", + remoteGossip, localGossip, versionedMergedGossip) - } else if (remoteGossip.version < localGossip.version) { - // local gossip is newer - localGossip + versionedMergedGossip - } else { - // remote gossip is newer - remoteGossip + } else if (remoteGossip.version < localGossip.version) { + // local gossip is newer + localGossip + + } else { + // remote gossip is newer + remoteGossip + } + + val newJoinInProgress = + if (localState.joinInProgress.isEmpty) localState.joinInProgress + else localState.joinInProgress -- + winningGossip.members.map(_.address) -- + winningGossip.overview.unreachable.map(_.address) + + val newState = localState copy ( + latestGossip = winningGossip seen selfAddress, + joinInProgress = newJoinInProgress) + + // if we won the race then update else try again + if (!state.compareAndSet(localState, newState)) receiveGossip(from, remoteGossip) // recur if we fail the update + else { + log.debug("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, from) + notifyMembershipChangeListeners(localState, newState) } - - val newJoinInProgress = - if (localState.joinInProgress.isEmpty) localState.joinInProgress - else localState.joinInProgress -- - winningGossip.members.map(_.address) -- - winningGossip.overview.unreachable.map(_.address) - - val newState = localState copy ( - latestGossip = winningGossip seen selfAddress, - joinInProgress = newJoinInProgress) - - // if we won the race then update else try again - if (!state.compareAndSet(localState, newState)) receiveGossip(from, remoteGossip) // recur if we fail the update - else { - log.debug("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, from) - notifyMembershipChangeListeners(localState, newState) } } @@ -975,15 +981,6 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) peer } - /** - * INTERNAL API. - */ - private[cluster] def gossipToUnreachableProbablity(membersSize: Int, unreachableSize: Int): Double = - (membersSize + unreachableSize) match { - case 0 ⇒ 0.0 - case sum ⇒ unreachableSize.toDouble / sum - } - /** * INTERNAL API. */ @@ -1019,13 +1016,6 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // 1. gossip to alive members val gossipedToAlive = gossipToRandomNodeOf(localMemberAddresses) - // 2. gossip to unreachable members - if (localUnreachableSize > 0) { - val probability = gossipToUnreachableProbablity(localMembersSize, localUnreachableSize) - if (ThreadLocalRandom.current.nextDouble() < probability) - gossipToRandomNodeOf(localUnreachableMembers.map(_.address)) - } - // 3. gossip to a deputy nodes for facilitating partition healing val deputies = deputyNodes(localMemberAddresses) val alreadyGossipedToDeputy = gossipedToAlive.map(deputies.contains(_)).getOrElse(false) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index 229ec7137d..3abdf2bf9b 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -50,14 +50,6 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { testActor ! GossipTo(address) } - @volatile - var _gossipToUnreachableProbablity = 0.0 - - override def gossipToUnreachableProbablity(membersSize: Int, unreachableSize: Int): Double = { - if (_gossipToUnreachableProbablity < 0.0) super.gossipToUnreachableProbablity(membersSize, unreachableSize) - else _gossipToUnreachableProbablity - } - @volatile var _gossipToDeputyProbablity = 0.0 @@ -81,7 +73,6 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { cluster.latestGossip.members.collectFirst { case m if m.address == address ⇒ m.status } before { - cluster._gossipToUnreachableProbablity = 0.0 cluster._gossipToDeputyProbablity = 0.0 addresses foreach failureDetector.remove deterministicRandom.set(0) @@ -133,17 +124,6 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { expectNoMsg(1 second) } - "use certain probability for gossiping to unreachable node depending on the number of unreachable and live nodes" in { - cluster._gossipToUnreachableProbablity = -1.0 // use real impl - cluster.gossipToUnreachableProbablity(10, 1) must be < (cluster.gossipToUnreachableProbablity(9, 1)) - cluster.gossipToUnreachableProbablity(10, 1) must be < (cluster.gossipToUnreachableProbablity(10, 2)) - cluster.gossipToUnreachableProbablity(10, 5) must be < (cluster.gossipToUnreachableProbablity(10, 9)) - cluster.gossipToUnreachableProbablity(0, 10) must be <= (1.0) - cluster.gossipToUnreachableProbablity(1, 10) must be <= (1.0) - cluster.gossipToUnreachableProbablity(10, 0) must be(0.0 plusOrMinus (0.0001)) - cluster.gossipToUnreachableProbablity(0, 0) must be(0.0 plusOrMinus (0.0001)) - } - "use certain probability for gossiping to deputy node depending on the number of unreachable and live nodes" in { cluster._gossipToDeputyProbablity = -1.0 // use real impl cluster.gossipToDeputyProbablity(10, 1, 2) must be < (cluster.gossipToDeputyProbablity(9, 1, 2)) @@ -178,22 +158,6 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { } - "gossip to random unreachable node" in { - val dead = Set(addresses(1)) - dead foreach failureDetector.markNodeAsUnavailable - cluster._gossipToUnreachableProbablity = 1.0 // always - - cluster.reapUnreachableMembers() - cluster.latestGossip.overview.unreachable.map(_.address) must be(dead) - - cluster.gossip() - - expectMsg(GossipTo(addresses(2))) // first available - expectMsg(GossipTo(addresses(1))) // the unavailable - - expectNoMsg(1 second) - } - "gossip to random deputy node if number of live nodes is less than number of deputy nodes" in { cluster._gossipToDeputyProbablity = -1.0 // real impl // 0 and 2 still alive diff --git a/akka-docs/cluster/cluster.rst b/akka-docs/cluster/cluster.rst index 0126897dab..833d56f51c 100644 --- a/akka-docs/cluster/cluster.rst +++ b/akka-docs/cluster/cluster.rst @@ -213,7 +213,7 @@ nodes involved in a gossip exchange. Periodically, the default is every 1 second, each node chooses another random node to initiate a round of gossip with. The choice of node is random but can -also include extra gossiping for unreachable nodes, ``deputy`` nodes, and nodes with +also include extra gossiping for ``deputy`` nodes, and nodes with either newer or older state versions. The gossip overview contains the current state version for all nodes and also a @@ -228,14 +228,11 @@ During each round of gossip exchange the following process is used: 1. Gossip to random live node (if any) -2. Gossip to random unreachable node with certain probability depending on the - number of unreachable and live nodes - -3. If the node gossiped to at (1) was not a ``deputy`` node, or the number of live +2. If the node gossiped to at (1) was not a ``deputy`` node, or the number of live nodes is less than number of ``deputy`` nodes, gossip to random ``deputy`` node with certain probability depending on number of unreachable, ``deputy``, and live nodes. -4. Gossip to random node with newer or older state information, based on the +3. Gossip to random node with newer or older state information, based on the current gossip overview, with some probability (?) The gossiper only sends the gossip overview to the chosen node. The recipient of From 66bcca8a918839a0f2d7df64d5573f47a4354848 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 25 Jun 2012 15:46:30 +0200 Subject: [PATCH 501/538] Shorter gossip interval still needed --- .../src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index 3c74bc02e2..0f9a8a8c73 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -24,6 +24,8 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { commonConfig(ConfigFactory.parseString(""" akka.cluster { nr-of-deputy-nodes = 0 + # FIXME remove this (use default) when ticket #2239 has been fixed + gossip-interval = 400 ms } akka.loglevel = INFO """)) From 738565883b8602388bcc76a2a363dac50996d5b4 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 25 Jun 2012 20:20:11 +0200 Subject: [PATCH 502/538] Add join-seed-node-timeout config, see #2219 --- akka-cluster/src/main/resources/reference.conf | 3 +++ .../src/main/scala/akka/cluster/Cluster.scala | 17 +++++++---------- .../scala/akka/cluster/ClusterSettings.scala | 1 + .../scala/akka/cluster/ClusterConfigSpec.scala | 1 + 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index f75c7f9018..60b934a864 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -15,6 +15,9 @@ akka { # Leave as empty if the node should be a singleton cluster. seed-nodes = [] + # how long to wait for one of the seed nodes to reply to initial join request + join-seed-node-timeout = 5s + # automatic join the seed-nodes at startup auto-join = on diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index a4765fc2cf..0cf79d7102 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -375,8 +375,7 @@ private[cluster] final class ClusterCommandDaemon(cluster: Cluster) extends Acto val seedRoutees = for (address ← cluster.seedNodes; if address != cluster.selfAddress) yield self.path.toStringWithAddress(address) if (seedRoutees.nonEmpty) { - // FIXME config of within (use JoinInProgressTimeout when that is in master) - implicit val within = Timeout(5 seconds) + implicit val within = Timeout(cluster.clusterSettings.JoinSeedNodeTimeout) val seedRouter = context.actorOf( Props.empty.withRouter(ScatterGatherFirstCompletedRouter( routees = seedRoutees, within = within.duration))) @@ -679,6 +678,12 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) */ def isAvailable: Boolean = !isUnavailable(state.get) + /** + * Make it possible to override/configure seedNodes from tests without + * specifying in config. Addresses are unknown before startup time. + */ + def seedNodes: IndexedSeq[Address] = SeedNodes + /** * Registers a listener to subscribe to cluster membership changes. */ @@ -1374,14 +1379,6 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) private def deputyNodes(addresses: IndexedSeq[Address]): IndexedSeq[Address] = addresses filterNot (_ == selfAddress) intersect seedNodes - /** - * INTERNAL API. - * - * Make it possible to override/configure seedNodes from tests without - * specifying in config. Addresses are unknown before startup time. - */ - private[cluster] def seedNodes: IndexedSeq[Address] = SeedNodes - /** * INTERNAL API. */ diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 12ed666680..c026b8c1a0 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -25,6 +25,7 @@ class ClusterSettings(val config: Config, val systemName: String) { final val SeedNodes: IndexedSeq[Address] = getStringList("akka.cluster.seed-nodes").asScala.map { case AddressFromURIString(addr) ⇒ addr }.toIndexedSeq + final val JoinSeedNodeTimeout = Duration(getMilliseconds("akka.cluster.join-seed-node-timeout"), MILLISECONDS) final val PeriodicTasksInitialDelay = Duration(getMilliseconds("akka.cluster.periodic-tasks-initial-delay"), MILLISECONDS) final val GossipInterval = Duration(getMilliseconds("akka.cluster.gossip-interval"), MILLISECONDS) final val HeartbeatInterval = Duration(getMilliseconds("akka.cluster.heartbeat-interval"), MILLISECONDS) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 5e44b0a4bc..d5a9752e5e 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -22,6 +22,7 @@ class ClusterConfigSpec extends AkkaSpec { FailureDetectorMinStdDeviation must be(100 millis) FailureDetectorAcceptableHeartbeatPause must be(3 seconds) SeedNodes must be(Seq.empty[String]) + JoinSeedNodeTimeout must be(5 seconds) PeriodicTasksInitialDelay must be(1 seconds) GossipInterval must be(1 second) HeartbeatInterval must be(1 second) From 97bf8c4bb527bd917c39c36b6dce4f9adcbd5642 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 25 Jun 2012 20:46:48 +0200 Subject: [PATCH 503/538] Cleanup of comments, see #2263 --- .../src/main/scala/akka/cluster/Cluster.scala | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 55b3311dee..8232a762cf 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -439,15 +439,12 @@ trait ClusterNodeMBean { /** * This module is responsible for Gossiping cluster information. The abstraction maintains the list of live * and dead members. Periodically i.e. every 1 second this module chooses a random member and initiates a round - * of Gossip with it. Whenever it gets gossip updates it updates the Failure Detector with the liveness - * information. + * of Gossip with it. *

- * During each of these runs the member initiates gossip exchange according to following rules (as defined in the - * Cassandra documentation [http://wiki.apache.org/cassandra/ArchitectureGossip]: + * During each of these runs the member initiates gossip exchange according to following rules: *

  *   1) Gossip to random live member (if any)
- *   2) Gossip to random unreachable member with certain probability depending on number of unreachable and live members
- *   3) If the member gossiped to at (1) was not deputy, or the number of live members is less than number of deputy list,
+ *   2) If the member gossiped to at (1) was not deputy, or the number of live members is less than number of deputy list,
  *       gossip to random deputy with certain probability depending on number of unreachable, deputy and live members.
  * 
* @@ -1016,7 +1013,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // 1. gossip to alive members val gossipedToAlive = gossipToRandomNodeOf(localMemberAddresses) - // 3. gossip to a deputy nodes for facilitating partition healing + // 2. gossip to a deputy nodes for facilitating partition healing val deputies = deputyNodes(localMemberAddresses) val alreadyGossipedToDeputy = gossipedToAlive.map(deputies.contains(_)).getOrElse(false) if ((!alreadyGossipedToDeputy || localMembersSize < NrOfDeputyNodes) && deputies.nonEmpty) { From 25996bf28458f64ebf485fdb1b3e3dc0c250546f Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 25 Jun 2012 21:07:44 +0200 Subject: [PATCH 504/538] Join seed nodes before becoming singleton cluster, see #2267 * self is initially not member (in gossip state) * if the join to seed nodes timeout it joins itself, and becomes singleton cluster * remove the special case handling of singelton cluster in gossip merge, since singleton cluster is not the normal state when joining any more --- .../src/main/resources/reference.conf | 3 +- .../src/main/scala/akka/cluster/Cluster.scala | 40 ++++++++++--------- .../scala/akka/cluster/JoinSeedNodeSpec.scala | 4 +- .../akka/cluster/MultiNodeClusterSpec.scala | 12 ++++-- .../akka/cluster/SingletonClusterSpec.scala | 15 +++++-- .../scala/akka/cluster/SunnyWeatherSpec.scala | 1 + .../test/scala/akka/cluster/ClusterSpec.scala | 8 ++-- 7 files changed, 53 insertions(+), 30 deletions(-) diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index 4da4dd6620..d226506acc 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -18,7 +18,8 @@ akka { # how long to wait for one of the seed nodes to reply to initial join request seed-node-timeout = 5s - # automatic join the seed-nodes at startup + # Automatic join the seed-nodes at startup. + # If seed-nodes is empty it will join itself and become a single node cluster. auto-join = on # should the 'leader' in the cluster be allowed to automatically mark unreachable nodes as DOWN? diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index caecf3906b..3eddb5bf60 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -364,20 +364,23 @@ private[cluster] final class ClusterCommandDaemon(cluster: Cluster) extends Acto val log = Logging(context.system, this) def receive = { - case JoinSeedNode ⇒ joinSeedNode() - case InitJoin ⇒ sender ! InitJoinAck(cluster.selfAddress) - case InitJoinAck(address) ⇒ cluster.join(address) - case Join(address) ⇒ cluster.joining(address) - case Down(address) ⇒ cluster.downing(address) - case Leave(address) ⇒ cluster.leaving(address) - case Exit(address) ⇒ cluster.exiting(address) - case Remove(address) ⇒ cluster.removing(address) + case JoinSeedNode ⇒ joinSeedNode() + case InitJoin ⇒ sender ! InitJoinAck(cluster.selfAddress) + case InitJoinAck(address) ⇒ cluster.join(address) + case Join(address) ⇒ cluster.joining(address) + case Down(address) ⇒ cluster.downing(address) + case Leave(address) ⇒ cluster.leaving(address) + case Exit(address) ⇒ cluster.exiting(address) + case Remove(address) ⇒ cluster.removing(address) + case Failure(e: AskTimeoutException) ⇒ joinSeedNodeTimeout() } def joinSeedNode(): Unit = { val seedRoutees = for (address ← cluster.seedNodes; if address != cluster.selfAddress) yield self.path.toStringWithAddress(address) - if (seedRoutees.nonEmpty) { + if (seedRoutees.isEmpty) { + cluster join cluster.selfAddress + } else { implicit val within = Timeout(cluster.clusterSettings.SeedNodeTimeout) val seedRouter = context.actorOf( Props.empty.withRouter(ScatterGatherFirstCompletedRouter( @@ -387,6 +390,8 @@ private[cluster] final class ClusterCommandDaemon(cluster: Cluster) extends Acto } } + def joinSeedNodeTimeout(): Unit = cluster join cluster.selfAddress + override def unhandled(unknown: Any) = log.error("Illegal command [{}]", unknown) } @@ -534,10 +539,9 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } private val state = { - val member = Member(selfAddress, Joining) - val versionedGossip = Gossip(members = Gossip.emptyMembers + member) :+ vclockNode // add me as member and update my vector clock - val seenVersionedGossip = versionedGossip seen selfAddress - new AtomicReference[State](State(seenVersionedGossip)) + // note that self is not initially member, + // and the Gossip is not versioned for this 'Node' yet + new AtomicReference[State](State(Gossip(members = Gossip.emptyMembers))) } // try to join one of the nodes defined in the 'akka.cluster.seed-nodes' @@ -797,7 +801,9 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val newUnreachableMembers = localUnreachable filterNot { _.address == node } val newOverview = localGossip.overview copy (unreachable = newUnreachableMembers) - val newMembers = localMembers + Member(node, Joining) // add joining node as Joining + // add joining node as Joining + // add self in case someone else joins before self has joined (Set discards duplicates) + val newMembers = localMembers + Member(node, Joining) + Member(selfAddress, Joining) val newGossip = localGossip copy (overview = newOverview, members = newMembers) val versionedGossip = newGossip :+ vclockNode @@ -939,11 +945,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) if (!localGossip.overview.isNonDownUnreachable(from)) { val winningGossip = - if (isSingletonCluster(localState) && localGossip.overview.unreachable.isEmpty && remoteGossip.members.contains(self)) { - // a fresh singleton cluster that is joining, no need to merge, use received gossip - remoteGossip - - } else if (remoteGossip.version <> localGossip.version) { + if (remoteGossip.version <> localGossip.version) { // concurrent val mergedGossip = remoteGossip merge localGossip val versionedMergedGossip = mergedGossip :+ vclockNode diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala index 38f03a4e66..726a7d8c76 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala @@ -16,7 +16,9 @@ object JoinSeedNodeMultiJvmSpec extends MultiNodeConfig { val ordinary1 = role("ordinary1") val ordinary2 = role("ordinary2") - commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) + commonConfig(debugConfig(on = false). + withFallback(ConfigFactory.parseString("akka.cluster.auto-join = on")). + withFallback(MultiNodeClusterSpec.clusterConfig)) } class JoinSeedNodeMultiJvmNode1 extends JoinSeedNodeSpec with FailureDetectorPuppetStrategy diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 79e3a67e1e..ed95013bf4 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -20,6 +20,7 @@ import akka.actor.RootActorPath object MultiNodeClusterSpec { def clusterConfig: Config = ConfigFactory.parseString(""" akka.cluster { + auto-join = off auto-down = off gossip-interval = 200 ms heartbeat-interval = 400 ms @@ -99,10 +100,15 @@ trait MultiNodeClusterSpec extends FailureDetectorStrategy with Suite { self: Mu def cluster: Cluster = clusterNode /** - * Use this method instead of 'cluster.self' - * for the initial startup of the cluster node. + * Use this method for the initial startup of the cluster node. */ - def startClusterNode(): Unit = cluster.self + def startClusterNode(): Unit = { + if (cluster.latestGossip.members.isEmpty) { + cluster join myself + awaitCond(cluster.latestGossip.members.exists(_.address == address(myself))) + } else + cluster.self + } /** * Initialize the cluster with the specified member diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala index 9f79af2f13..3c35e95333 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala @@ -16,6 +16,7 @@ object SingletonClusterMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false). withFallback(ConfigFactory.parseString(""" akka.cluster { + auto-join = on auto-down = on failure-detector.threshold = 4 } @@ -38,12 +39,20 @@ abstract class SingletonClusterSpec "A cluster of 2 nodes" must { - "not be singleton cluster when joined" taggedAs LongRunningTest in { + "become singleton cluster when started with 'auto-join=on' and 'seed-nodes=[]'" taggedAs LongRunningTest in { + startClusterNode() + awaitUpConvergence(1) + cluster.isSingletonCluster must be(true) + + enterBarrier("after-1") + } + + "not be singleton cluster when joined with other node" taggedAs LongRunningTest in { awaitClusterUp(first, second) cluster.isSingletonCluster must be(false) assertLeader(first, second) - enterBarrier("after-1") + enterBarrier("after-2") } "become singleton cluster when one node is shutdown" taggedAs LongRunningTest in { @@ -58,7 +67,7 @@ abstract class SingletonClusterSpec assertLeader(first) } - enterBarrier("after-2") + enterBarrier("after-3") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index d661f0cc51..3be082d2f3 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -25,6 +25,7 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { akka.cluster { # FIXME remove this (use default) when ticket #2239 has been fixed gossip-interval = 400 ms + auto-join = off } akka.loglevel = INFO """)) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index 6f70193715..e818847969 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -86,11 +86,13 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { "use the address of the remote transport" in { cluster.selfAddress must be(selfAddress) - cluster.self.address must be(selfAddress) } - "initially be singleton cluster and reach convergence immediately" in { - cluster.isSingletonCluster must be(true) + "initially become singleton cluster when joining itself and reach convergence" in { + cluster.isSingletonCluster must be(false) // auto-join = off + cluster.join(selfAddress) + awaitCond(cluster.isSingletonCluster) + cluster.self.address must be(selfAddress) cluster.latestGossip.members.map(_.address) must be(Set(selfAddress)) memberStatus(selfAddress) must be(Some(MemberStatus.Joining)) cluster.convergence.isDefined must be(true) From 86449fd12d9e3503ae09c3b423360a9b20ad84b3 Mon Sep 17 00:00:00 2001 From: Peter Vlugter Date: Tue, 26 Jun 2012 12:09:59 +1200 Subject: [PATCH 505/538] Explicitly name Akka.pdf in sphinx doc generation --- project/Sphinx.scala | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/project/Sphinx.scala b/project/Sphinx.scala index 43b7e60358..4707215875 100644 --- a/project/Sphinx.scala +++ b/project/Sphinx.scala @@ -87,16 +87,15 @@ object Sphinx { def pdfTask = (sphinxLatex, streams) map { (latex, s) => { - val empty = (latex * "*.pdf").get.isEmpty + val pdf = latex / "Akka.pdf" def failed = sys.error("Failed to build Sphinx pdf documentation.") - if (empty) { + if (!pdf.exists) { s.log.info("Building Sphinx pdf documentation...") val logger = newLogger(s) val exitCode = Process(Seq("make", "all-pdf"), latex) ! logger if (exitCode != 0) failed + s.log.info("Sphinx pdf documentation created: %s" format pdf) } - val pdf = (latex * "*.pdf").get.headOption.getOrElse(failed) - if (empty) s.log.info("Sphinx pdf documentation created: %s" format pdf) pdf } } From a3ceb407542e43e2312561af5e916f084115a255 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 26 Jun 2012 09:51:04 +0200 Subject: [PATCH 506/538] Start seed nodes sequentially in JoinSeedNodeSpec, see #2271 --- .../scala/akka/cluster/JoinSeedNodeSpec.scala | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala index 38f03a4e66..bd161a435c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala @@ -33,6 +33,23 @@ abstract class JoinSeedNodeSpec override def seedNodes = IndexedSeq(seed1, seed2) "A cluster with configured seed nodes" must { + "start the seed nodes sequentially" taggedAs LongRunningTest in { + runOn(seed1) { + startClusterNode() + } + enterBarrier("seed1-started") + + runOn(seed2) { + startClusterNode() + } + enterBarrier("seed2-started") + + runOn(seed1, seed2) { + awaitUpConvergence(2) + } + enterBarrier("after-1") + } + "join the seed nodes at startup" taggedAs LongRunningTest in { startClusterNode() @@ -40,7 +57,7 @@ abstract class JoinSeedNodeSpec awaitUpConvergence(4) - enterBarrier("after") + enterBarrier("after-2") } } } From aa2de6aacd00be16d1f0fb86338a645b026a21f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Tue, 26 Jun 2012 10:34:09 +0200 Subject: [PATCH 507/538] Fixed RoutingSpec test. See #2268 --- .../src/test/scala/akka/routing/RoutingSpec.scala | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index a202778fe5..2b946ec1da 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -26,6 +26,10 @@ object RoutingSpec { router = round-robin nr-of-instances = 3 } + /router2 { + router = round-robin + nr-of-instances = 3 + } /myrouter { router = "akka.routing.RoutingSpec$MyRouter" foo = bar @@ -129,7 +133,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with } "use configured nr-of-instances when router is specified" in { - val router = system.actorOf(Props[TestActor].withRouter(RoundRobinRouter(nrOfInstances = 2)), "router1") + val router = system.actorOf(Props[TestActor].withRouter(RoundRobinRouter(nrOfInstances = 2)), "router2") Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(3) system.stop(router) } From 344824c22edc861609871d49f64b4d4c3517e6c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Tue, 26 Jun 2012 11:58:04 +0200 Subject: [PATCH 508/538] Adding test case duration output to the tests --- project/AkkaBuild.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 584625cc82..24e803aecc 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -388,8 +388,8 @@ object AkkaBuild extends Build { if (tags.isEmpty) Seq.empty else Seq(Tests.Argument("-n", tags.mkString(" "))) }, - // show full stack traces - testOptions in Test += Tests.Argument("-oF") + // show full stack traces and test case durations + testOptions in Test += Tests.Argument("-oDF") ) lazy val formatSettings = ScalariformPlugin.scalariformSettings ++ Seq( From aed78f702b64549d996b098b5091c7df7507130f Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 26 Jun 2012 18:19:29 +0200 Subject: [PATCH 509/538] Workaround for SI-5986, see #2275 * Add new operators :+ and :++ by implicit conversion * Unfortunately this means that we must remember to use these until SI-5986 is fixed. Is there a better way? --- .../src/main/scala/akka/cluster/Cluster.scala | 22 +++++++++++++++--- .../scala/akka/cluster/TransitionSpec.scala | 23 +++++++++++++------ 2 files changed, 35 insertions(+), 10 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 3eddb5bf60..357d610ed5 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -27,6 +27,7 @@ import javax.management._ import MemberStatus._ import scala.annotation.tailrec import scala.collection.immutable.{ Map, SortedSet } +import scala.collection.GenTraversableOnce /** * Interface for membership change listener. @@ -179,6 +180,20 @@ object Member { case (Joining, Joining) ⇒ m1 case (Up, Up) ⇒ m1 } + + // FIXME Workaround for https://issues.scala-lang.org/browse/SI-5986 + // SortedSet + and ++ operators replaces existing element + // Use these :+ and :++ operators for the Gossip members + implicit def sortedSetWorkaround(sortedSet: SortedSet[Member]): SortedSetWorkaround = new SortedSetWorkaround(sortedSet) + class SortedSetWorkaround(sortedSet: SortedSet[Member]) { + implicit def :+(elem: Member): SortedSet[Member] = { + if (sortedSet.contains(elem)) sortedSet + else sortedSet + elem + } + + implicit def :++(elems: GenTraversableOnce[Member]): SortedSet[Member] = + sortedSet ++ (elems.toSet diff sortedSet) + } } /** @@ -226,6 +241,7 @@ case class GossipOverview( object Gossip { val emptyMembers: SortedSet[Member] = SortedSet.empty + } /** @@ -300,7 +316,7 @@ case class Gossip( */ def :+(member: Member): Gossip = { if (members contains member) this - else this copy (members = members + member) + else this copy (members = members :+ member) } /** @@ -329,7 +345,7 @@ case class Gossip( // 4. merge members by selecting the single Member with highest MemberStatus out of the Member groups, // and exclude unreachable - val mergedMembers = Gossip.emptyMembers ++ Member.pickHighestPriority(this.members, that.members).filterNot(mergedUnreachable.contains) + val mergedMembers = Gossip.emptyMembers :++ Member.pickHighestPriority(this.members, that.members).filterNot(mergedUnreachable.contains) // 5. fresh seen table val mergedSeen = Map.empty[Address, VectorClock] @@ -803,7 +819,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // add joining node as Joining // add self in case someone else joins before self has joined (Set discards duplicates) - val newMembers = localMembers + Member(node, Joining) + Member(selfAddress, Joining) + val newMembers = localMembers :+ Member(node, Joining) :+ Member(selfAddress, Joining) val newGossip = localGossip copy (overview = newOverview, members = newMembers) val versionedGossip = newGossip :+ vclockNode diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala index 0376545b41..397d824ef4 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -99,12 +99,14 @@ abstract class TransitionSpec "start nodes as singleton clusters" taggedAs LongRunningTest in { - startClusterNode() - cluster.isSingletonCluster must be(true) - cluster.status must be(Joining) - cluster.convergence.isDefined must be(true) - cluster.leaderActions() - cluster.status must be(Up) + runOn(first) { + startClusterNode() + cluster.isSingletonCluster must be(true) + cluster.status must be(Joining) + cluster.convergence.isDefined must be(true) + cluster.leaderActions() + cluster.status must be(Up) + } enterBarrier("after-1") } @@ -244,13 +246,20 @@ abstract class TransitionSpec } "startup a second separated cluster consisting of nodes fourth and fifth" taggedAs LongRunningTest in { + runOn(fifth) { + startClusterNode() + cluster.leaderActions() + cluster.status must be(Up) + } + enterBarrier("fifth-started") + runOn(fourth) { cluster.join(fifth) } runOn(fifth) { awaitMembers(fourth, fifth) } - testConductor.enter("fourth-joined") + enterBarrier("fourth-joined") fifth gossipTo fourth fourth gossipTo fifth From 932ea6f98ac7b1393ab6186832125ee1ca5efb73 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 26 Jun 2012 09:33:46 +0200 Subject: [PATCH 510/538] Test split brain scenario, see #2265 --- .../src/main/resources/reference.conf | 3 +- .../akka/cluster/MultiNodeClusterSpec.scala | 2 +- .../scala/akka/cluster/SplitBrainSpec.scala | 111 ++++++++++++++++++ 3 files changed, 114 insertions(+), 2 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index d226506acc..a1497ed4b6 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -22,7 +22,8 @@ akka { # If seed-nodes is empty it will join itself and become a single node cluster. auto-join = on - # should the 'leader' in the cluster be allowed to automatically mark unreachable nodes as DOWN? + # Should the 'leader' in the cluster be allowed to automatically mark unreachable nodes as DOWN? + # Using auto-down implies that two separate clusters will be formed in case of network partition. auto-down = on # the number of gossip daemon actors diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index ed95013bf4..3264c661b0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -198,7 +198,7 @@ trait MultiNodeClusterSpec extends FailureDetectorStrategy with Suite { self: Mu } } - def roleOfLeader(nodesInCluster: Seq[RoleName]): RoleName = { + def roleOfLeader(nodesInCluster: Seq[RoleName] = roles): RoleName = { nodesInCluster.length must not be (0) nodesInCluster.sorted.head } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala new file mode 100644 index 0000000000..24e94f715d --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala @@ -0,0 +1,111 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ +import akka.actor.Address +import akka.remote.testconductor.Direction + +object SplitBrainMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + val fifth = role("fifth") + + commonConfig(debugConfig(on = false). + withFallback(ConfigFactory.parseString(""" + akka.cluster { + auto-down = on + failure-detector.threshold = 4 + }""")). + withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class SplitBrainWithFailureDetectorPuppetMultiJvmNode1 extends SplitBrainSpec with FailureDetectorPuppetStrategy +class SplitBrainWithFailureDetectorPuppetMultiJvmNode2 extends SplitBrainSpec with FailureDetectorPuppetStrategy +class SplitBrainWithFailureDetectorPuppetMultiJvmNode3 extends SplitBrainSpec with FailureDetectorPuppetStrategy +class SplitBrainWithFailureDetectorPuppetMultiJvmNode4 extends SplitBrainSpec with FailureDetectorPuppetStrategy +class SplitBrainWithFailureDetectorPuppetMultiJvmNode5 extends SplitBrainSpec with FailureDetectorPuppetStrategy + +class SplitBrainWithAccrualFailureDetectorMultiJvmNode1 extends SplitBrainSpec with AccrualFailureDetectorStrategy +class SplitBrainWithAccrualFailureDetectorMultiJvmNode2 extends SplitBrainSpec with AccrualFailureDetectorStrategy +class SplitBrainWithAccrualFailureDetectorMultiJvmNode3 extends SplitBrainSpec with AccrualFailureDetectorStrategy +class SplitBrainWithAccrualFailureDetectorMultiJvmNode4 extends SplitBrainSpec with AccrualFailureDetectorStrategy +class SplitBrainWithAccrualFailureDetectorMultiJvmNode5 extends SplitBrainSpec with AccrualFailureDetectorStrategy + +abstract class SplitBrainSpec + extends MultiNodeSpec(SplitBrainMultiJvmSpec) + with MultiNodeClusterSpec { + + import SplitBrainMultiJvmSpec._ + + val side1 = IndexedSeq(first, second) + val side2 = IndexedSeq(third, fourth, fifth) + + "A cluster of 5 members" must { + + "reach initial convergence" taggedAs LongRunningTest in { + awaitClusterUp(first, second, third, fourth, fifth) + + enterBarrier("after-1") + } + + "detect network partition and mark nodes on other side as unreachable" taggedAs LongRunningTest in { + val thirdAddress = address(third) + enterBarrier("before-split") + + runOn(first) { + // split the cluster in two parts (first, second) / (third, fourth, fifth) + for (role1 ← side1; role2 ← side2) { + testConductor.blackhole(role1, role2, Direction.Both).await + } + } + enterBarrier("after-split") + + runOn(side1.last) { + for (role ← side2) markNodeAsUnavailable(role) + } + runOn(side2.last) { + for (role ← side1) markNodeAsUnavailable(role) + } + + runOn(side1: _*) { + awaitCond(cluster.latestGossip.overview.unreachable.map(_.address) == (side2.toSet map address), 20 seconds) + } + runOn(side2: _*) { + awaitCond(cluster.latestGossip.overview.unreachable.map(_.address) == (side1.toSet map address), 20 seconds) + } + + enterBarrier("after-2") + } + + "auto-down the other nodes and form new cluster with potentially new leader" taggedAs LongRunningTest in { + + runOn(side1: _*) { + // auto-down = on + awaitCond(cluster.latestGossip.overview.unreachable.forall(m ⇒ m.status == MemberStatus.Down), 15 seconds) + cluster.latestGossip.overview.unreachable.map(_.address) must be(side2.toSet map address) + awaitUpConvergence(side1.size, side2 map address) + assertLeader(side1: _*) + } + + runOn(side2: _*) { + // auto-down = on + awaitCond(cluster.latestGossip.overview.unreachable.forall(m ⇒ m.status == MemberStatus.Down), 15 seconds) + cluster.latestGossip.overview.unreachable.map(_.address) must be(side1.toSet map address) + awaitUpConvergence(side2.size, side1 map address) + assertLeader(side2: _*) + } + + enterBarrier("after-3") + } + + } + +} From 379c0e16b462bf6354a1b7c83512b9519ce758f1 Mon Sep 17 00:00:00 2001 From: Gert Vanthienen Date: Tue, 26 Jun 2012 22:07:57 +0200 Subject: [PATCH 511/538] Move Aries Blueprint namespace handler into a separate akka-osgi-aries module --- .../blueprint/akka-namespacehandler.xml | 2 +- .../akka/osgi/aries/blueprint}/akka.xsd | 0 .../BlueprintActorSystemFactory.scala | 4 +--- .../aries/blueprint}/NamespaceHandler.scala | 7 +++--- .../osgi/aries/blueprint}/ParserHelper.scala | 2 +- .../akka/osgi/aries/blueprint}/config.xml | 0 .../akka/osgi/aries/blueprint}/injection.xml | 4 ++-- .../akka/osgi/aries/blueprint}/simple.xml | 0 .../blueprint}/ActorSystemAwareBean.scala | 2 +- .../blueprint}/NamespaceHandlerTest.scala | 3 +-- .../akka/osgi/ActorSystemActivator.scala | 1 - project/AkkaBuild.scala | 24 +++++++++++++++---- 12 files changed, 29 insertions(+), 20 deletions(-) rename {akka-osgi => akka-osgi-aries}/src/main/resources/OSGI-INF/blueprint/akka-namespacehandler.xml (87%) rename {akka-osgi/src/main/resources/akka/osgi/blueprint/aries => akka-osgi-aries/src/main/resources/akka/osgi/aries/blueprint}/akka.xsd (100%) rename {akka-osgi/src/main/scala/akka/osgi => akka-osgi-aries/src/main/scala/akka/osgi/aries}/blueprint/BlueprintActorSystemFactory.scala (91%) rename {akka-osgi/src/main/scala/akka/osgi/blueprint/aries => akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint}/NamespaceHandler.scala (96%) rename {akka-osgi/src/main/scala/akka/osgi/blueprint/aries => akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint}/ParserHelper.scala (92%) rename {akka-osgi/src/test/resources/akka/osgi/blueprint/aries => akka-osgi-aries/src/test/resources/akka/osgi/aries/blueprint}/config.xml (100%) rename {akka-osgi/src/test/resources/akka/osgi/blueprint/aries => akka-osgi-aries/src/test/resources/akka/osgi/aries/blueprint}/injection.xml (68%) rename {akka-osgi/src/test/resources/akka/osgi/blueprint/aries => akka-osgi-aries/src/test/resources/akka/osgi/aries/blueprint}/simple.xml (100%) rename {akka-osgi/src/test/scala/akka/osgi/test => akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint}/ActorSystemAwareBean.scala (86%) rename {akka-osgi/src/test/scala/akka/osgi/blueprint/aries => akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint}/NamespaceHandlerTest.scala (97%) diff --git a/akka-osgi/src/main/resources/OSGI-INF/blueprint/akka-namespacehandler.xml b/akka-osgi-aries/src/main/resources/OSGI-INF/blueprint/akka-namespacehandler.xml similarity index 87% rename from akka-osgi/src/main/resources/OSGI-INF/blueprint/akka-namespacehandler.xml rename to akka-osgi-aries/src/main/resources/OSGI-INF/blueprint/akka-namespacehandler.xml index 650738b10a..8cc52e85f2 100644 --- a/akka-osgi/src/main/resources/OSGI-INF/blueprint/akka-namespacehandler.xml +++ b/akka-osgi-aries/src/main/resources/OSGI-INF/blueprint/akka-namespacehandler.xml @@ -9,7 +9,7 @@ - + diff --git a/akka-osgi/src/main/resources/akka/osgi/blueprint/aries/akka.xsd b/akka-osgi-aries/src/main/resources/akka/osgi/aries/blueprint/akka.xsd similarity index 100% rename from akka-osgi/src/main/resources/akka/osgi/blueprint/aries/akka.xsd rename to akka-osgi-aries/src/main/resources/akka/osgi/aries/blueprint/akka.xsd diff --git a/akka-osgi/src/main/scala/akka/osgi/blueprint/BlueprintActorSystemFactory.scala b/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/BlueprintActorSystemFactory.scala similarity index 91% rename from akka-osgi/src/main/scala/akka/osgi/blueprint/BlueprintActorSystemFactory.scala rename to akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/BlueprintActorSystemFactory.scala index 51c3e7291f..876d0f0045 100644 --- a/akka-osgi/src/main/scala/akka/osgi/blueprint/BlueprintActorSystemFactory.scala +++ b/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/BlueprintActorSystemFactory.scala @@ -1,9 +1,7 @@ -package akka.osgi.blueprint +package akka.osgi.aries.blueprint import org.osgi.framework.BundleContext import akka.osgi.OsgiActorSystemFactory -import collection.mutable.Buffer -import akka.actor.{ Actor, Props, ActorSystem } import com.typesafe.config.ConfigFactory /** diff --git a/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/NamespaceHandler.scala b/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/NamespaceHandler.scala similarity index 96% rename from akka-osgi/src/main/scala/akka/osgi/blueprint/aries/NamespaceHandler.scala rename to akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/NamespaceHandler.scala index b20e959f23..13b644010e 100644 --- a/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/NamespaceHandler.scala +++ b/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/NamespaceHandler.scala @@ -1,4 +1,4 @@ -package akka.osgi.blueprint.aries +package akka.osgi.aries.blueprint import org.apache.aries.blueprint.ParserContext import org.osgi.service.blueprint.container.ComponentDefinitionException @@ -7,10 +7,9 @@ import org.apache.aries.blueprint.mutable.MutableBeanMetadata import collection.JavaConversions.setAsJavaSet import org.osgi.framework.BundleContext import org.apache.aries.blueprint.reflect.{ ValueMetadataImpl, RefMetadataImpl, BeanArgumentImpl } -import org.w3c.dom.{ NodeList, Element, Node } +import org.w3c.dom.{ Element, Node } import org.osgi.service.blueprint.reflect.{ BeanMetadata, ComponentMetadata } -import akka.actor.{ ActorSystem } -import akka.osgi.blueprint.{ BlueprintActorSystemFactory } +import akka.actor.ActorSystem import java.util.concurrent.atomic.AtomicInteger import ParserHelper.childElements diff --git a/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/ParserHelper.scala b/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/ParserHelper.scala similarity index 92% rename from akka-osgi/src/main/scala/akka/osgi/blueprint/aries/ParserHelper.scala rename to akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/ParserHelper.scala index 82fb7bc113..35bf72931a 100644 --- a/akka-osgi/src/main/scala/akka/osgi/blueprint/aries/ParserHelper.scala +++ b/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/ParserHelper.scala @@ -1,4 +1,4 @@ -package akka.osgi.blueprint.aries +package akka.osgi.aries.blueprint import org.w3c.dom.{ Node, Element } diff --git a/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/config.xml b/akka-osgi-aries/src/test/resources/akka/osgi/aries/blueprint/config.xml similarity index 100% rename from akka-osgi/src/test/resources/akka/osgi/blueprint/aries/config.xml rename to akka-osgi-aries/src/test/resources/akka/osgi/aries/blueprint/config.xml diff --git a/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/injection.xml b/akka-osgi-aries/src/test/resources/akka/osgi/aries/blueprint/injection.xml similarity index 68% rename from akka-osgi/src/test/resources/akka/osgi/blueprint/aries/injection.xml rename to akka-osgi-aries/src/test/resources/akka/osgi/aries/blueprint/injection.xml index 9712ee6d1f..6fd21db5ef 100644 --- a/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/injection.xml +++ b/akka-osgi-aries/src/test/resources/akka/osgi/aries/blueprint/injection.xml @@ -2,8 +2,8 @@ - - + + diff --git a/akka-osgi/src/test/resources/akka/osgi/blueprint/aries/simple.xml b/akka-osgi-aries/src/test/resources/akka/osgi/aries/blueprint/simple.xml similarity index 100% rename from akka-osgi/src/test/resources/akka/osgi/blueprint/aries/simple.xml rename to akka-osgi-aries/src/test/resources/akka/osgi/aries/blueprint/simple.xml diff --git a/akka-osgi/src/test/scala/akka/osgi/test/ActorSystemAwareBean.scala b/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/ActorSystemAwareBean.scala similarity index 86% rename from akka-osgi/src/test/scala/akka/osgi/test/ActorSystemAwareBean.scala rename to akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/ActorSystemAwareBean.scala index ca0df7cc04..6e4bac39dd 100644 --- a/akka-osgi/src/test/scala/akka/osgi/test/ActorSystemAwareBean.scala +++ b/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/ActorSystemAwareBean.scala @@ -1,4 +1,4 @@ -package akka.osgi.test +package akka.osgi.aries.blueprint import akka.actor.ActorSystem diff --git a/akka-osgi/src/test/scala/akka/osgi/blueprint/aries/NamespaceHandlerTest.scala b/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/NamespaceHandlerTest.scala similarity index 97% rename from akka-osgi/src/test/scala/akka/osgi/blueprint/aries/NamespaceHandlerTest.scala rename to akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/NamespaceHandlerTest.scala index bbe26b5e92..44178978ec 100644 --- a/akka-osgi/src/test/scala/akka/osgi/blueprint/aries/NamespaceHandlerTest.scala +++ b/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/NamespaceHandlerTest.scala @@ -1,11 +1,10 @@ -package akka.osgi.blueprint.aries +package akka.osgi.aries.blueprint import org.scalatest.FlatSpec import akka.actor.ActorSystem import de.kalpatec.pojosr.framework.launch.BundleDescriptor import akka.osgi.PojoSRTestSupport import akka.osgi.PojoSRTestSupport.bundle -import akka.osgi.test.ActorSystemAwareBean /** * Test cases for {@link ActorSystemActivator} diff --git a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala index 546ff8c2c4..28912cf6b0 100644 --- a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala +++ b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala @@ -1,6 +1,5 @@ package akka.osgi -import com.typesafe.config.{ Config, ConfigFactory } import akka.actor.ActorSystem import java.util.{ Dictionary, Properties } import org.osgi.framework.{ ServiceRegistration, BundleContext, BundleActivator } diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index d9d5517703..f1d71729c2 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -226,6 +226,15 @@ object AkkaBuild extends Build { ) ) + lazy val osgiAries = Project( + id = "akka-osgi-aries", + base = file("akka-osgi-aries"), + dependencies = Seq(osgi % "compile;test->test"), + settings = defaultSettings ++ OSGi.osgiAries ++ Seq( + libraryDependencies ++= Dependencies.osgiAries + ) + ) + lazy val akkaSbtPlugin = Project( id = "akka-sbt-plugin", base = file("akka-sbt-plugin"), @@ -480,7 +489,9 @@ object Dependencies { val camel = Seq(camelCore, Test.scalatest, Test.junit, Test.mockito) - val osgi = Seq(osgiCore, ariesBlueprint, Test.logback, Test.ariesProxy, Test.commonsIo, Test.pojosr, Test.tinybundles, Test.scalatest, Test.junit) + val osgi = Seq(osgiCore,Test.logback, Test.commonsIo, Test.pojosr, Test.tinybundles, Test.scalatest, Test.junit) + + val osgiAries = Seq(osgiCore, ariesBlueprint, Test.ariesProxy) val tutorials = Seq(Test.scalatest, Test.junit) @@ -551,9 +562,12 @@ object OSGi { val mailboxesCommon = exports(Seq("akka.actor.mailbox.*")) - val osgi = exports(Seq("akka.osgi.*")) ++ Seq( - OsgiKeys.importPackage := Seq("org.apache.aries.blueprint.*;resolution:=optional", - "org.osgi.service.blueprint.*;resolution:=optional") ++ defaultImports + val osgi = exports(Seq("akka.osgi")) ++ Seq( + OsgiKeys.privatePackage := Seq("akka.osgi.impl") + ) + + val osgiAries = exports() ++ Seq( + OsgiKeys.privatePackage := Seq("akka.osgi.aries.*") ) val remote = exports(Seq("akka.remote.*", "akka.routing.*", "akka.serialization.*")) @@ -564,7 +578,7 @@ object OSGi { val zeroMQ = exports(Seq("akka.zeromq.*")) - def exports(packages: Seq[String]) = osgiSettings ++ Seq( + def exports(packages: Seq[String] = Seq()) = osgiSettings ++ Seq( OsgiKeys.importPackage := defaultImports, OsgiKeys.exportPackage := packages ) From 133e1561c1c623a72466cb7b84786d8fe6f5dbcb Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 27 Jun 2012 10:58:48 +0200 Subject: [PATCH 512/538] Adjusted comment based on feedback, see #2265 --- akka-cluster/src/main/resources/reference.conf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index a1497ed4b6..a06e9273cb 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -23,7 +23,8 @@ akka { auto-join = on # Should the 'leader' in the cluster be allowed to automatically mark unreachable nodes as DOWN? - # Using auto-down implies that two separate clusters will be formed in case of network partition. + # Using auto-down implies that two separate clusters will automatically be formed in case of + # network partition. auto-down = on # the number of gossip daemon actors From 3011c6ebcf6ea227643b98a7e59c998d5eed7dd9 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 27 Jun 2012 11:19:20 +0200 Subject: [PATCH 513/538] Fix wrong order of constructor params in AccrualFailureDetector --- .../scala/akka/cluster/AccrualFailureDetector.scala | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index c397d065e5..7d719f6141 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -80,12 +80,12 @@ class AccrualFailureDetector( settings: ClusterSettings) = this( system, - settings.FailureDetectorThreshold, - settings.FailureDetectorMaxSampleSize, - settings.FailureDetectorAcceptableHeartbeatPause, - settings.FailureDetectorMinStdDeviation, - settings.HeartbeatInterval, - AccrualFailureDetector.realClock) + threshold = settings.FailureDetectorThreshold, + maxSampleSize = settings.FailureDetectorMaxSampleSize, + minStdDeviation = settings.FailureDetectorMinStdDeviation, + acceptableHeartbeatPause = settings.FailureDetectorAcceptableHeartbeatPause, + firstHeartbeatEstimate = settings.HeartbeatInterval, + clock = AccrualFailureDetector.realClock) private val log = Logging(system, "FailureDetector") From 85cf1c0580aec43f112df8d8308119d4585b0c74 Mon Sep 17 00:00:00 2001 From: Gert Vanthienen Date: Wed, 27 Jun 2012 14:01:09 +0200 Subject: [PATCH 514/538] Add types to API methods and clarify Scaladocs --- .../blueprint/akka-namespacehandler.xml | 3 ++ .../BlueprintActorSystemFactory.scala | 6 ++- .../aries/blueprint/NamespaceHandler.scala | 21 ++++++++- .../osgi/aries/blueprint/ParserHelper.scala | 5 ++- .../akka/osgi/ActorSystemActivator.scala | 45 +++++++++++++------ .../akka/osgi/OsgiActorSystemFactory.scala | 17 +++++-- .../impl/BundleDelegatingClassLoader.scala | 4 +- 7 files changed, 76 insertions(+), 25 deletions(-) diff --git a/akka-osgi-aries/src/main/resources/OSGI-INF/blueprint/akka-namespacehandler.xml b/akka-osgi-aries/src/main/resources/OSGI-INF/blueprint/akka-namespacehandler.xml index 8cc52e85f2..99492bedf2 100644 --- a/akka-osgi-aries/src/main/resources/OSGI-INF/blueprint/akka-namespacehandler.xml +++ b/akka-osgi-aries/src/main/resources/OSGI-INF/blueprint/akka-namespacehandler.xml @@ -1,6 +1,9 @@ + diff --git a/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/BlueprintActorSystemFactory.scala b/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/BlueprintActorSystemFactory.scala index 876d0f0045..40c9d7367b 100644 --- a/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/BlueprintActorSystemFactory.scala +++ b/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/BlueprintActorSystemFactory.scala @@ -5,7 +5,11 @@ import akka.osgi.OsgiActorSystemFactory import com.typesafe.config.ConfigFactory /** - * A set of helper/factory classes to build a Akka system using Blueprint + * A set of helper/factory classes to build a Akka system using Blueprint. This class is only meant to be used by + * the [[akka.osgi.aries.blueprint.NamespaceHandler]] class, you should not use this class directly. + * + * If you're looking for a way to set up Akka using Blueprint without the namespace handler, you should use + * [[akka.osgi.OsgiActorSystemFactory]] instead. */ class BlueprintActorSystemFactory(context: BundleContext, name: String) extends OsgiActorSystemFactory(context) { diff --git a/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/NamespaceHandler.scala b/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/NamespaceHandler.scala index 13b644010e..2ab1306a0f 100644 --- a/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/NamespaceHandler.scala +++ b/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/NamespaceHandler.scala @@ -15,7 +15,26 @@ import java.util.concurrent.atomic.AtomicInteger import ParserHelper.childElements /** - * Aries Blueprint namespace handler implementation + * Aries Blueprint namespace handler implementation. This namespace handler will allow users of Apache Aries' Blueprint + * implementation to define their Akka [[akka.actor.ActorSystem]] using a syntax like this: + * + * {{{ + * + * + * + * + * + * some.config { + * key=value + * } + * + * + * + * + * }}} + * + * Users of other IoC frameworks in an OSGi environment should use [[akka.osgi.OsgiActorSystemFactory]] instead. */ class NamespaceHandler extends org.apache.aries.blueprint.NamespaceHandler { diff --git a/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/ParserHelper.scala b/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/ParserHelper.scala index 35bf72931a..585037db09 100644 --- a/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/ParserHelper.scala +++ b/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/ParserHelper.scala @@ -7,9 +7,10 @@ import org.w3c.dom.{ Node, Element } */ object ParserHelper { - def childElements(element: Element) = children(element).filter(_.getNodeType == Node.ELEMENT_NODE).asInstanceOf[Seq[Element]] + def childElements(element: Element): Seq[Element] = + children(element).filter(_.getNodeType == Node.ELEMENT_NODE).asInstanceOf[Seq[Element]] - private[this] def children(element: Element) = { + private[this] def children(element: Element): Seq[Node] = { val nodelist = element.getChildNodes for (index ← 0 until nodelist.getLength) yield nodelist.item(index) } diff --git a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala index 28912cf6b0..794eec0317 100644 --- a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala +++ b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala @@ -5,52 +5,69 @@ import java.util.{ Dictionary, Properties } import org.osgi.framework.{ ServiceRegistration, BundleContext, BundleActivator } /** - * Abstract {@link BundleActivator} implementation to bootstrap and configure an {@link ActorSystem} in an - * OSGi environment. + * Abstract bundle activator implementation to bootstrap and configure an actor system in an + * OSGi environment. It also provides a convenience method to register the actor system in + * the OSGi Service Registry for sharing it with other OSGi bundles. + * + * This convenience activator is mainly useful for setting up a single [[akka.actor.ActorSystem]] instance and sharing that + * with other bundles in the OSGi Framework. If you want to set up multiple systems in the same bundle context, look at + * the [[akka.osgi.OsgiActorSystemFactory]] instead. + * + * @param nameFor a function that allows you to determine the name of the [[akka.actor.ActorSystem]] at bundle startup time */ abstract class ActorSystemActivator(nameFor: (BundleContext) ⇒ Option[String]) extends BundleActivator { + /** + * No-args constructor - a default name (`bundle--ActorSystem`) will be assigned to the [[akka.actor.ActorSystem]] + */ def this() = this({ context: BundleContext ⇒ None }) - def this(name: String) = this({ context: BundleContext ⇒ Some(name) }) - - var system: Option[ActorSystem] = None - var registration: Option[ServiceRegistration] = None /** - * Implement this method to add your own actors to the ActorSystem + * Create the activator, specifying the name of the [[akka.actor.ActorSystem]] to be created + */ + def this(name: String) = this({ context: BundleContext ⇒ Some(name) }) + + private var system: Option[ActorSystem] = None + private var registration: Option[ServiceRegistration] = None + + /** + * Implement this method to add your own actors to the ActorSystem. If you want to share the actor + * system with other bundles, call the `registerService(BundleContext, ActorSystem)` method from within + * this method. * * @param context the bundle context * @param system the ActorSystem that was created by the activator */ - def configure(context: BundleContext, system: ActorSystem) + def configure(context: BundleContext, system: ActorSystem): Unit /** - * Sets up a new ActorSystem and registers it in the OSGi Service Registry + * Sets up a new ActorSystem * * @param context the BundleContext */ - def start(context: BundleContext) { + def start(context: BundleContext): Unit = { system = Some(OsgiActorSystemFactory(context).createActorSystem(nameFor(context))) system.foreach(configure(context, _)) } /** - * Shuts down the ActorSystem when the bundle is stopped and, if necessary, unregisters a service registration + * Shuts down the ActorSystem when the bundle is stopped and, if necessary, unregisters a service registration. * * @param context the BundleContext */ - def stop(context: BundleContext) { + def stop(context: BundleContext): Unit = { registration.foreach(_.unregister()) system.foreach(_.shutdown()) } /** - * Register the actor system in the OSGi service registry + * Register the actor system in the OSGi service registry. The activator itself will ensure that this service + * is unregistered again when the bundle is being stopped. * * @param context the bundle context * @param system the actor system */ - def registerService(context: BundleContext, system: ActorSystem) { + def registerService(context: BundleContext, system: ActorSystem): Unit = { val properties = new Properties() properties.put("name", system.name) registration = Some(context.registerService(classOf[ActorSystem].getName, system, diff --git a/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala b/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala index 2c5a6eca14..ae36406a60 100644 --- a/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala +++ b/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala @@ -14,13 +14,18 @@ class OsgiActorSystemFactory(val context: BundleContext) { /* * Classloader that delegates to the bundle for which the factory is creating an ActorSystem */ - val classloader = BundleDelegatingClassLoader.createFor(context) + private val classloader = BundleDelegatingClassLoader.createFor(context) /** - * Creates the ActorSystem and registers it in the OSGi Service Registry + * Creates the [[akka.actor.ActorSystem]], using the name specified */ def createActorSystem(name: String): ActorSystem = createActorSystem(Option(name)) + /** + * Creates the [[akka.actor.ActorSystem]], using the name specified. + * + * A default name (`bundle--ActorSystem`) is assigned when you pass along [[scala.None]] instead. + */ def createActorSystem(name: Option[String]): ActorSystem = ActorSystem(actorSystemName(name), actorSystemConfig(context), classloader) @@ -34,7 +39,8 @@ class OsgiActorSystemFactory(val context: BundleContext) { } /** - * Determine a the ActorSystem name + * Determine the name for the [[akka.actor.ActorSystem]] + * Returns a default value of `bundle--ActorSystem` is no name is being specified */ def actorSystemName(name: Option[String]): String = name.getOrElse("bundle-%s-ActorSystem".format(context.getBundle().getBundleId)) @@ -43,6 +49,9 @@ class OsgiActorSystemFactory(val context: BundleContext) { object OsgiActorSystemFactory { - def apply(context: BundleContext) = new OsgiActorSystemFactory(context) + /* + * Create an [[OsgiActorSystemFactory]] instance to set up Akka in an OSGi environment + */ + def apply(context: BundleContext): OsgiActorSystemFactory = new OsgiActorSystemFactory(context) } diff --git a/akka-osgi/src/main/scala/akka/osgi/impl/BundleDelegatingClassLoader.scala b/akka-osgi/src/main/scala/akka/osgi/impl/BundleDelegatingClassLoader.scala index 74592392d9..0231a77714 100644 --- a/akka-osgi/src/main/scala/akka/osgi/impl/BundleDelegatingClassLoader.scala +++ b/akka-osgi/src/main/scala/akka/osgi/impl/BundleDelegatingClassLoader.scala @@ -63,11 +63,9 @@ class BundleDelegatingClassLoader(bundle: Bundle, classLoader: Option[ClassLoade case cnfe: ClassNotFoundException ⇒ rethrowClassNotFoundException(name, cnfe) } - def rethrowClassNotFoundException(name: String, cnfe: ClassNotFoundException): Nothing = + private def rethrowClassNotFoundException(name: String, cnfe: ClassNotFoundException): Nothing = throw new ClassNotFoundException(name + " from bundle " + bundle.getBundleId + " (" + bundle.getSymbolicName + ")", cnfe) - def getBundle: Bundle = bundle - override def toString = String.format("BundleDelegatingClassLoader(%s)", bundle) } From 4e603623562d84ba7a884802b412c68e5c5959a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Wed, 27 Jun 2012 14:05:22 +0200 Subject: [PATCH 515/538] Make FailureDetector.remove clean out information about address. See #2277 --- .../akka/cluster/AccrualFailureDetector.scala | 25 ++++++++++++------- .../scala/akka/cluster/FailureDetector.scala | 5 ++++ .../cluster/AccrualFailureDetectorSpec.scala | 6 ++--- .../akka/cluster/FailureDetectorPuppet.scala | 5 ++++ 4 files changed, 29 insertions(+), 12 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index 7d719f6141..f1c761dec7 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -107,8 +107,7 @@ class AccrualFailureDetector( private case class State( version: Long = 0L, history: Map[Address, HeartbeatHistory] = Map.empty, - timestamps: Map[Address, Long] = Map.empty[Address, Long], - explicitRemovals: Set[Address] = Set.empty[Address]) + timestamps: Map[Address, Long] = Map.empty[Address, Long]) private val state = new AtomicReference[State](State()) @@ -141,8 +140,7 @@ class AccrualFailureDetector( val newState = oldState copy (version = oldState.version + 1, history = oldState.history + (connection -> newHistory), - timestamps = oldState.timestamps + (connection -> timestamp), // record new timestamp, - explicitRemovals = oldState.explicitRemovals - connection) + timestamps = oldState.timestamps + (connection -> timestamp)) // record new timestamp // if we won the race then update else try again if (!state.compareAndSet(oldState, newState)) heartbeat(connection) // recur @@ -158,9 +156,7 @@ class AccrualFailureDetector( val oldState = state.get val oldTimestamp = oldState.timestamps.get(connection) - // if connection has been removed explicitly - if (oldState.explicitRemovals.contains(connection)) Double.MaxValue - else if (oldTimestamp.isEmpty) 0.0 // treat unmanaged connections, e.g. with zero heartbeats, as healthy connections + if (oldTimestamp.isEmpty) 0.0 // treat unmanaged connections, e.g. with zero heartbeats, as healthy connections else { val timeDiff = clock() - oldTimestamp.get @@ -208,13 +204,24 @@ class AccrualFailureDetector( if (oldState.history.contains(connection)) { val newState = oldState copy (version = oldState.version + 1, history = oldState.history - connection, - timestamps = oldState.timestamps - connection, - explicitRemovals = oldState.explicitRemovals + connection) + timestamps = oldState.timestamps - connection) // if we won the race then update else try again if (!state.compareAndSet(oldState, newState)) remove(connection) // recur } } + + def reset(): Unit = { + @tailrec + def doReset(): Unit = { + val oldState = state.get + val newState = oldState.copy(version = oldState.version + 1, history = Map.empty, timestamps = Map.empty) + // if we won the race then update else try again + if (!state.compareAndSet(oldState, newState)) doReset() // recur + } + log.debug("Resetting failure detector") + doReset() + } } private[cluster] object HeartbeatHistory { diff --git a/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala index 60af0a1c41..1aa926c5e5 100644 --- a/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala @@ -25,4 +25,9 @@ trait FailureDetector { * Removes the heartbeat management for a connection. */ def remove(connection: Address): Unit + + /** + * Removes all connections and starts over. + */ + def reset(): Unit } diff --git a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala index 5c7186502c..df69a52e19 100644 --- a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala @@ -114,7 +114,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" fd.isAvailable(conn) must be(true) } - "mark node as dead after explicit removal of connection" in { + "mark node as available after explicit removal of connection" in { val timeInterval = List[Long](0, 1000, 100, 100, 100) val fd = createFailureDetector(clock = fakeTimeGenerator(timeInterval)) @@ -124,7 +124,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" fd.isAvailable(conn) must be(true) fd.remove(conn) - fd.isAvailable(conn) must be(false) + fd.isAvailable(conn) must be(true) } "mark node as available after explicit removal of connection and receiving heartbeat again" in { @@ -140,7 +140,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" fd.remove(conn) - fd.isAvailable(conn) must be(false) //3300 + fd.isAvailable(conn) must be(true) //3300 // it receives heartbeat from an explicitly removed node fd.heartbeat(conn) //4400 diff --git a/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala index f35bca381d..9ddc9942b0 100644 --- a/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala +++ b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala @@ -57,4 +57,9 @@ class FailureDetectorPuppet(system: ActorSystem, settings: ClusterSettings) exte log.debug("Removing cluster node [{}]", connection) connections.remove(connection) } + + def reset(): Unit = { + log.debug("Resetting failure detector") + connections.clear() + } } From 35056d765da18d2bfb38592be1c3aeb1341c109b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Thu, 28 Jun 2012 08:51:09 +0200 Subject: [PATCH 516/538] Adding test case duration output to the multi-jvm tests --- .../src/multi-jvm/scala/akka/remote/QuietReporter.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/QuietReporter.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/QuietReporter.scala index f323b75e23..eea5f079d3 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/QuietReporter.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/QuietReporter.scala @@ -8,8 +8,8 @@ import org.scalatest.tools.StandardOutReporter import org.scalatest.events._ import java.lang.Boolean.getBoolean -class QuietReporter(inColor: Boolean) extends StandardOutReporter(false, inColor, false, true) { - def this() = this(!getBoolean("akka.test.nocolor")) +class QuietReporter(inColor: Boolean, withDurations: Boolean = false) extends StandardOutReporter(withDurations, inColor, false, true) { + def this() = this(!getBoolean("akka.test.nocolor"), !getBoolean("akka.test.nodurations")) override def apply(event: Event): Unit = event match { case _: RunStarting ⇒ () From 54a9fc303511af70b47c4bc720bded44ebd53c0b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 28 Jun 2012 10:44:23 +0200 Subject: [PATCH 517/538] #2279 - making logging of RLCEs configurable --- akka-remote/src/main/resources/reference.conf | 3 +++ .../src/main/scala/akka/remote/RemoteSettings.scala | 1 + .../src/main/scala/akka/remote/RemoteTransport.scala | 7 ++++++- .../main/scala/akka/remote/netty/NettyRemoteSupport.scala | 2 ++ 4 files changed, 12 insertions(+), 1 deletion(-) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index a8d2cb2680..f365d5ce19 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -71,6 +71,9 @@ akka { # If this is "on", Akka will log all outbound messages at DEBUG level, if off then they are not logged log-sent-messages = off + # If this is "on", Akka will log all RemoteLifeCycleEvents at the level defined for each, if off then they are not logged + log-remote-lifecycle-events = off + # Each property is annotated with (I) or (O) or (I&O), where I stands for “inbound” and O for “outbound” connections. # The NettyRemoteTransport always starts the server role to allow inbound connections, and it starts # active client connections whenever sending to a destination which is not yet connected; if configured diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index 951c007fbc..88a7003309 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -14,4 +14,5 @@ class RemoteSettings(val config: Config, val systemName: String) { val LogSend: Boolean = getBoolean("akka.remote.log-sent-messages") val RemoteSystemDaemonAckTimeout: Duration = Duration(getMilliseconds("akka.remote.remote-daemon-ack-timeout"), MILLISECONDS) val UntrustedMode: Boolean = getBoolean("akka.remote.untrusted-mode") + val LogRemoteLifeCycleEvents: Boolean = getBoolean("akka.remote.log-remote-lifecycle-events") } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala index c48cc430f2..f91c5b03d0 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala @@ -202,7 +202,7 @@ abstract class RemoteTransport(val system: ExtendedActorSystem, val provider: Re */ def notifyListeners(message: RemoteLifeCycleEvent): Unit = { system.eventStream.publish(message) - system.log.log(message.logLevel, "{}", message) + if (logRemoteLifeCycleEvents) log.log(message.logLevel, "{}", message) } /** @@ -220,6 +220,11 @@ abstract class RemoteTransport(val system: ExtendedActorSystem, val provider: Re */ protected def useUntrustedMode: Boolean + /** + * When this method returns true, RemoteLifeCycleEvents will be logged as well as be put onto the eventStream. + */ + protected def logRemoteLifeCycleEvents: Boolean + /** * Returns a newly created AkkaRemoteProtocol with the given message payload. */ diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 9c6e4c85f2..5e3c989fd5 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -128,6 +128,8 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider override protected def useUntrustedMode = remoteSettings.UntrustedMode + override protected def logRemoteLifeCycleEvents = remoteSettings.LogRemoteLifeCycleEvents + val server: NettyRemoteServer = try createServer() catch { case NonFatal(ex) ⇒ shutdown(); throw ex } /** From 4a334bd4bca2c9057fcee66fd37e4a8e171dc6b6 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 28 Jun 2012 11:07:40 +0200 Subject: [PATCH 518/538] Adding test for logging of RLCEs --- akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala index 8ac11e2440..11b427fc17 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala @@ -27,6 +27,7 @@ class RemoteConfigSpec extends AkkaSpec( RemoteTransport must be("akka.remote.netty.NettyRemoteTransport") UntrustedMode must be(false) RemoteSystemDaemonAckTimeout must be(30 seconds) + LogRemoteLifeCycleEvents must be (false) } "be able to parse Netty config elements" in { From ebfda69c839e2a44d2424dd21d9222e728be67fa Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 28 Jun 2012 11:13:30 +0200 Subject: [PATCH 519/538] adding RCLE test --- akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala index 11b427fc17..e35cd42cd8 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala @@ -27,7 +27,7 @@ class RemoteConfigSpec extends AkkaSpec( RemoteTransport must be("akka.remote.netty.NettyRemoteTransport") UntrustedMode must be(false) RemoteSystemDaemonAckTimeout must be(30 seconds) - LogRemoteLifeCycleEvents must be (false) + LogRemoteLifeCycleEvents must be(false) } "be able to parse Netty config elements" in { From a900052f681dae82aba1c386e44b588372b5b962 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 28 Jun 2012 11:32:11 +0200 Subject: [PATCH 520/538] Propagate akka system properties to multi-node tests, see #2280 * Change build to propagate all system properties starting with 'akka.' to multi-jvm and multi-node tests. * Adjusted AkkaSpec and MultiNodeSpec to use load of the config, which means that default overrides (system properties) are used. --- .../test/scala/akka/remote/testkit/MultiNodeSpec.scala | 3 ++- .../src/test/scala/akka/testkit/AkkaSpec.scala | 3 ++- project/AkkaBuild.scala | 10 +++++----- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 25bb8df7dc..0efde92c79 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -138,7 +138,8 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: import MultiNodeSpec._ def this(config: MultiNodeConfig) = - this(config.myself, ActorSystem(AkkaSpec.getCallerName(classOf[MultiNodeSpec]), config.config), config.roles, config.deployments) + this(config.myself, ActorSystem(AkkaSpec.getCallerName(classOf[MultiNodeSpec]), ConfigFactory.load(config.config)), + config.roles, config.deployments) /* * Test Class Interface diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala index f9ee989e1c..f381e53013 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala @@ -59,7 +59,8 @@ object AkkaSpec { abstract class AkkaSpec(_system: ActorSystem) extends TestKit(_system) with WordSpec with MustMatchers with BeforeAndAfterAll { - def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName(getClass), config.withFallback(AkkaSpec.testConf))) + def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName(getClass), + ConfigFactory.load(config.withFallback(AkkaSpec.testConf)))) def this(s: String) = this(ConfigFactory.parseString(s)) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 24e803aecc..340516aa03 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -310,11 +310,11 @@ object AkkaBuild extends Build { val defaultExcludedTags = Set("timing", "long-running") lazy val defaultMultiJvmOptions: Seq[String] = { - (System.getProperty("akka.test.timefactor") match { - case null => Nil - case x => List("-Dakka.test.timefactor=" + x) - }) ::: - (if (getBoolean("sbt.log.noformat")) List("-Dakka.test.nocolor=true") else Nil) + import scala.collection.JavaConverters._ + val akkaProperties = System.getProperties.propertyNames.asScala.toList.collect { + case key: String if key.startsWith("akka.") => "-D" + key + "=" + System.getProperty(key) + } + akkaProperties ::: (if (getBoolean("sbt.log.noformat")) List("-Dakka.test.nocolor=true") else Nil) } // for excluding tests by name use system property: -Dakka.test.names.exclude=TimingSpec From aca66de73296ad68b681079dee6c892b9acd8832 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 27 Jun 2012 10:56:00 +0200 Subject: [PATCH 521/538] Test gossip in large cluster, see #2239 --- .../src/main/scala/akka/cluster/Cluster.scala | 9 + .../scala/akka/cluster/LargeClusterSpec.scala | 267 ++++++++++++++++++ 2 files changed, 276 insertions(+) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 357d610ed5..eb1c2c08fb 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -28,6 +28,7 @@ import MemberStatus._ import scala.annotation.tailrec import scala.collection.immutable.{ Map, SortedSet } import scala.collection.GenTraversableOnce +import java.util.concurrent.atomic.AtomicLong /** * Interface for membership change listener. @@ -948,6 +949,13 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } } + // Can be removed when gossip has been optimized + private val _receivedGossipCount = new AtomicLong + /** + * INTERNAL API. + */ + private[cluster] def receivedGossipCount: Long = _receivedGossipCount.get + /** * INTERNAL API. * @@ -995,6 +1003,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) if (!state.compareAndSet(localState, newState)) receiveGossip(from, remoteGossip) // recur if we fail the update else { log.debug("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, from) + _receivedGossipCount.incrementAndGet() notifyMembershipChangeListeners(localState, newState) } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala new file mode 100644 index 0000000000..a12fc90ff9 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala @@ -0,0 +1,267 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ +import akka.actor.ActorSystem +import akka.util.Deadline +import java.util.concurrent.TimeoutException +import scala.collection.immutable.SortedSet +import akka.dispatch.Await +import akka.util.Duration +import java.util.concurrent.TimeUnit +import akka.remote.testconductor.RoleName + +object LargeClusterMultiJvmSpec extends MultiNodeConfig { + // each jvm simulates a datacenter with many nodes + val firstDatacenter = role("first-datacenter") + val secondDatacenter = role("second-datacenter") + val thirdDatacenter = role("third-datacenter") + val fourthDatacenter = role("fourth-datacenter") + val fifthDatacenter = role("fifth-datacenter") + + // Note that this test uses default configuration, + // not MultiNodeClusterSpec.clusterConfig + commonConfig(ConfigFactory.parseString(""" + # Number of ActorSystems in each jvm, can be specified as + # system property when running real tests. Many nodes + # will take long time. + akka.test.large-cluster-spec.nodes-per-datacenter = 2 + akka.cluster { + gossip-interval = 500 ms + auto-join = off + failure-detector.threshold = 4 + } + akka.loglevel = INFO + akka.actor.default-dispatcher.fork-join-executor.parallelism-max = 2 + akka.scheduler.tick-duration = 33 ms + akka.remote.netty.execution-pool-size = 1 + """)) +} + +class LargeClusterMultiJvmNode1 extends LargeClusterSpec with AccrualFailureDetectorStrategy +class LargeClusterMultiJvmNode2 extends LargeClusterSpec with AccrualFailureDetectorStrategy +class LargeClusterMultiJvmNode3 extends LargeClusterSpec with AccrualFailureDetectorStrategy +class LargeClusterMultiJvmNode4 extends LargeClusterSpec with AccrualFailureDetectorStrategy +class LargeClusterMultiJvmNode5 extends LargeClusterSpec with AccrualFailureDetectorStrategy + +abstract class LargeClusterSpec + extends MultiNodeSpec(LargeClusterMultiJvmSpec) + with MultiNodeClusterSpec { + + import LargeClusterMultiJvmSpec._ + + var systems: IndexedSeq[ActorSystem] = IndexedSeq(system) + val nodesPerDatacenter = system.settings.config.getInt( + "akka.test.large-cluster-spec.nodes-per-datacenter") + + /** + * Since we start some ActorSystems/Clusters outside of the + * MultiNodeClusterSpec control we can't use use the mechanism + * defined in MultiNodeClusterSpec to inject failure detector etc. + * Use ordinary Cluster extension with default AccrualFailureDetector. + */ + override def cluster: Cluster = Cluster(system) + + override def atTermination(): Unit = { + systems foreach { _.shutdown } + val shutdownTimeout = 20.seconds + val deadline = Deadline.now + shutdownTimeout + systems.foreach { sys ⇒ + if (sys.isTerminated) + () // already done + else if (deadline.isOverdue) + sys.log.warning("Failed to shutdown [{}] within [{}]", sys.name, shutdownTimeout) + else { + try sys.awaitTermination(deadline.timeLeft) catch { + case _: TimeoutException ⇒ sys.log.warning("Failed to shutdown [{}] within [{}]", sys.name, shutdownTimeout) + } + } + } + } + + def startupSystems(): Unit = { + // one system is already started by the multi-node test + for (n ← 2 to nodesPerDatacenter) + systems :+= ActorSystem(myself.name + "-" + n, system.settings.config) + + // Initialize the Cluster extensions, i.e. startup the clusters + systems foreach { Cluster(_) } + } + + def expectedMaxDuration(totalNodes: Int): Duration = + 5.seconds + (2.seconds * totalNodes) + + def joinAll(from: RoleName, to: RoleName, totalNodes: Int, runOnRoles: RoleName*): Unit = { + val joiningClusters = systems.map(Cluster(_)).toSet + join(joiningClusters, from, to, totalNodes, runOnRoles: _*) + } + + def join(joiningClusterNodes: Set[Cluster], from: RoleName, to: RoleName, totalNodes: Int, runOnRoles: RoleName*): Unit = { + runOnRoles must contain(from) + runOnRoles must contain(to) + + runOn(runOnRoles: _*) { + systems.size must be(nodesPerDatacenter) // make sure it is initialized + + val clusterNodes = ifNode(from)(joiningClusterNodes)(systems.map(Cluster(_)).toSet) + val startGossipCounts = Map.empty[Cluster, Long] ++ + clusterNodes.map(c ⇒ (c -> c.receivedGossipCount)) + def gossipCount(c: Cluster): Long = c.receivedGossipCount - startGossipCounts(c) + val startTime = System.nanoTime + def tookMillis: String = TimeUnit.NANOSECONDS.toMillis(System.nanoTime - startTime) + " ms" + + val latch = TestLatch(clusterNodes.size) + clusterNodes foreach { c ⇒ + c.registerListener(new MembershipChangeListener { + override def notify(members: SortedSet[Member]): Unit = { + if (!latch.isOpen && members.size == totalNodes && members.forall(_.status == MemberStatus.Up)) { + log.debug("All [{}] nodes Up in [{}], it took [{}], received [{}] gossip messages", + totalNodes, c.selfAddress, tookMillis, gossipCount(c)) + latch.countDown() + } + } + }) + } + + runOn(from) { + clusterNodes foreach { _ join to } + } + + Await.ready(latch, remaining) + + awaitCond(clusterNodes.forall(_.convergence.isDefined)) + val counts = clusterNodes.map(gossipCount(_)) + val formattedStats = "mean=%s min=%s max=%s".format(counts.sum / clusterNodes.size, counts.min, counts.max) + log.info("Convergence of [{}] nodes reached, it took [{}], received [{}] gossip messages per node", + totalNodes, tookMillis, formattedStats) + + } + } + + "A large cluster" must { + + "join all nodes in first-datacenter to first-datacenter" taggedAs LongRunningTest in { + runOn(firstDatacenter) { + startupSystems() + startClusterNode() + } + enterBarrier("first-datacenter-started") + + val totalNodes = nodesPerDatacenter + within(expectedMaxDuration(totalNodes)) { + joinAll(from = firstDatacenter, to = firstDatacenter, totalNodes, runOnRoles = firstDatacenter) + enterBarrier("first-datacenter-joined") + } + } + + "join all nodes in second-datacenter to first-datacenter" taggedAs LongRunningTest in { + runOn(secondDatacenter) { + startupSystems() + } + enterBarrier("second-datacenter-started") + + val totalNodes = nodesPerDatacenter * 2 + within(expectedMaxDuration(totalNodes)) { + joinAll(from = secondDatacenter, to = firstDatacenter, totalNodes, runOnRoles = firstDatacenter, secondDatacenter) + enterBarrier("second-datacenter-joined") + } + } + + "join all nodes in third-datacenter to first-datacenter" taggedAs LongRunningTest in { + runOn(thirdDatacenter) { + startupSystems() + } + enterBarrier("third-datacenter-started") + + val totalNodes = nodesPerDatacenter * 3 + within(expectedMaxDuration(totalNodes)) { + joinAll(from = thirdDatacenter, to = firstDatacenter, totalNodes, + runOnRoles = firstDatacenter, secondDatacenter, thirdDatacenter) + enterBarrier("third-datacenter-joined") + } + } + + "join all nodes in fourth-datacenter to first-datacenter" taggedAs LongRunningTest in { + runOn(fourthDatacenter) { + startupSystems() + } + enterBarrier("fourth-datacenter-started") + + val totalNodes = nodesPerDatacenter * 4 + within(expectedMaxDuration(totalNodes)) { + joinAll(from = fourthDatacenter, to = firstDatacenter, totalNodes, + runOnRoles = firstDatacenter, secondDatacenter, thirdDatacenter, fourthDatacenter) + enterBarrier("fourth-datacenter-joined") + } + } + + "join nodes one by one from fifth-datacenter to first-datacenter" taggedAs LongRunningTest in { + runOn(fifthDatacenter) { + startupSystems() + } + enterBarrier("fifth-datacenter-started") + + for (i ← 0 until nodesPerDatacenter) { + val totalNodes = nodesPerDatacenter * 4 + i + 1 + within(expectedMaxDuration(totalNodes)) { + val joiningClusters = ifNode(fifthDatacenter)(Set(Cluster(systems(i))))(Set.empty) + join(joiningClusters, from = fifthDatacenter, to = firstDatacenter, totalNodes, + runOnRoles = firstDatacenter, secondDatacenter, thirdDatacenter, fourthDatacenter, fifthDatacenter) + enterBarrier("fifth-datacenter-joined-" + i) + } + } + } + + // FIXME sometimes this fails, FD marks nodes from other than second-datacenter as unavailable + "detect failure and auto-down crashed nodes in second-datacenter" taggedAs LongRunningTest ignore { + val unreachableNodes = nodesPerDatacenter + val liveNodes = nodesPerDatacenter * 4 + + within(20.seconds + expectedMaxDuration(liveNodes)) { + val startGossipCounts = Map.empty[Cluster, Long] ++ + systems.map(sys ⇒ (Cluster(sys) -> Cluster(sys).receivedGossipCount)) + def gossipCount(c: Cluster): Long = c.receivedGossipCount - startGossipCounts(c) + val startTime = System.nanoTime + def tookMillis: String = TimeUnit.NANOSECONDS.toMillis(System.nanoTime - startTime) + " ms" + + val latch = TestLatch(nodesPerDatacenter) + systems foreach { sys ⇒ + Cluster(sys).registerListener(new MembershipChangeListener { + override def notify(members: SortedSet[Member]): Unit = { + if (!latch.isOpen && members.size == liveNodes && Cluster(sys).latestGossip.overview.unreachable.size == unreachableNodes) { + log.info("Detected [{}] unreachable nodes in [{}], it took [{}], received [{}] gossip messages", + unreachableNodes, Cluster(sys).selfAddress, tookMillis, gossipCount(Cluster(sys))) + latch.countDown() + } + } + }) + } + + runOn(firstDatacenter) { + testConductor.shutdown(secondDatacenter, 0) + } + + enterBarrier("second-datacenter-shutdown") + + runOn(firstDatacenter, thirdDatacenter, fourthDatacenter, fifthDatacenter) { + Await.ready(latch, remaining) + awaitCond(systems.forall(Cluster(_).convergence.isDefined)) + val counts = systems.map(sys ⇒ gossipCount(Cluster(sys))) + val formattedStats = "mean=%s min=%s max=%s".format(counts.sum / nodesPerDatacenter, counts.min, counts.max) + log.info("Convergence of [{}] nodes reached after failure, it took [{}], received [{}] gossip messages per node", + liveNodes, tookMillis, formattedStats) + } + + enterBarrier("after-6") + } + + } + + } +} From 2da1a912fe58e629a4acf30727e0a63e0e83cfb3 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 28 Jun 2012 11:36:13 +0200 Subject: [PATCH 522/538] Improve efficiency of gossip, see #2193 and #2253 * Essentially as already described in cluster specification, but now fully implemented and tested with LargeClusterSpec * Gossip to nodes with different view (using seen table) with certain probability * Gossip chat, gossip back to sender * Immediate gossip to joining node * Updated some tests to reflect current implementation --- .../src/main/scala/akka/cluster/Cluster.scala | 40 +++- .../scala/akka/cluster/SunnyWeatherSpec.scala | 2 - .../scala/akka/cluster/TransitionSpec.scala | 204 +++--------------- .../test/scala/akka/cluster/ClusterSpec.scala | 3 +- akka-docs/cluster/cluster.rst | 9 +- 5 files changed, 67 insertions(+), 191 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index eb1c2c08fb..0d87a4b89c 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -832,7 +832,10 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) else { log.info("Cluster Node [{}] - Node [{}] is JOINING", selfAddress, node) // treat join as initial heartbeat, so that it becomes unavailable if nothing more happens - if (node != selfAddress) failureDetector heartbeat node + if (node != selfAddress) { + failureDetector heartbeat node + gossipTo(node) + } notifyMembershipChangeListeners(localState, newState) } } @@ -974,10 +977,6 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val mergedGossip = remoteGossip merge localGossip val versionedMergedGossip = mergedGossip :+ vclockNode - log.debug( - """Can't establish a causal relationship between "remote" gossip and "local" gossip - Remote[{}] - Local[{}] - merging them into [{}]""", - remoteGossip, localGossip, versionedMergedGossip) - versionedMergedGossip } else if (remoteGossip.version < localGossip.version) { @@ -1003,8 +1002,20 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) if (!state.compareAndSet(localState, newState)) receiveGossip(from, remoteGossip) // recur if we fail the update else { log.debug("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, from) + + if ((winningGossip ne localGossip) && (winningGossip ne remoteGossip)) + log.debug( + """Couldn't establish a causal relationship between "remote" gossip and "local" gossip - Remote[{}] - Local[{}] - merged them into [{}]""", + remoteGossip, localGossip, winningGossip) + _receivedGossipCount.incrementAndGet() notifyMembershipChangeListeners(localState, newState) + + if ((winningGossip ne remoteGossip) || (newState.latestGossip ne remoteGossip)) { + // send back gossip to sender when sender had different view, i.e. merge, or sender had + // older or sender had newer + gossipTo(from) + } } } } @@ -1055,6 +1066,8 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } } + private def gossipToDifferentViewProbability: Double = 0.8 + /** * INTERNAL API. * @@ -1075,8 +1088,21 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val localUnreachableMembers = localGossip.overview.unreachable.toIndexedSeq val localUnreachableSize = localUnreachableMembers.size - // 1. gossip to alive members - val gossipedToAlive = gossipToRandomNodeOf(localMemberAddresses) + // 1. gossip to a random alive member with preference to a member + // with older or newer gossip version + val nodesWithdifferentView = { + val localMemberAddressesSet = localGossip.members map { _.address } + for { + (address, version) ← localGossip.overview.seen + if localMemberAddressesSet contains address + if version != localGossip.version + } yield address + } + val gossipedToAlive = + if (nodesWithdifferentView.nonEmpty && ThreadLocalRandom.current.nextDouble() < gossipToDifferentViewProbability) + gossipToRandomNodeOf(nodesWithdifferentView.toIndexedSeq) + else + gossipToRandomNodeOf(localMemberAddresses) // 2. gossip to a deputy nodes for facilitating partition healing val deputies = deputyNodes(localMemberAddresses) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index 3be082d2f3..ddacf668e0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -23,8 +23,6 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { // not MultiNodeClusterSpec.clusterConfig commonConfig(ConfigFactory.parseString(""" akka.cluster { - # FIXME remove this (use default) when ticket #2239 has been fixed - gossip-interval = 400 ms auto-join = off } akka.loglevel = INFO diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala index 397d824ef4..c4e43b9abf 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -16,8 +16,6 @@ object TransitionMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") val third = role("third") - val fourth = role("fourth") - val fifth = role("fifth") commonConfig(debugConfig(on = false). withFallback(ConfigFactory.parseString("akka.cluster.periodic-tasks-initial-delay = 300 s # turn off all periodic tasks")). @@ -27,8 +25,6 @@ object TransitionMultiJvmSpec extends MultiNodeConfig { class TransitionMultiJvmNode1 extends TransitionSpec with FailureDetectorPuppetStrategy class TransitionMultiJvmNode2 extends TransitionSpec with FailureDetectorPuppetStrategy class TransitionMultiJvmNode3 extends TransitionSpec with FailureDetectorPuppetStrategy -class TransitionMultiJvmNode4 extends TransitionSpec with FailureDetectorPuppetStrategy -class TransitionMultiJvmNode5 extends TransitionSpec with FailureDetectorPuppetStrategy abstract class TransitionSpec extends MultiNodeSpec(TransitionMultiJvmSpec) @@ -81,11 +77,15 @@ abstract class TransitionSpec val g = cluster.latestGossip enterBarrier("before-gossip-" + gossipBarrierCounter) awaitCond(cluster.latestGossip != g) // received gossip + // gossip chat will synchronize the views + awaitCond((Set(fromRole, toRole) -- seenLatestGossip).isEmpty) enterBarrier("after-gossip-" + gossipBarrierCounter) } runOn(fromRole) { enterBarrier("before-gossip-" + gossipBarrierCounter) cluster.gossipTo(toRole) // send gossip + // gossip chat will synchronize the views + awaitCond((Set(fromRole, toRole) -- seenLatestGossip).isEmpty) enterBarrier("after-gossip-" + gossipBarrierCounter) } runOn(roles.filterNot(r ⇒ r == fromRole || r == toRole): _*) { @@ -116,22 +116,12 @@ abstract class TransitionSpec runOn(second) { cluster.join(first) } - runOn(first) { + runOn(first, second) { + // gossip chat from the join will synchronize the views awaitMembers(first, second) memberStatus(first) must be(Up) memberStatus(second) must be(Joining) - seenLatestGossip must be(Set(first)) - cluster.convergence.isDefined must be(false) - } - enterBarrier("second-joined") - - first gossipTo second - second gossipTo first - - runOn(first, second) { - memberStatus(first) must be(Up) - memberStatus(second) must be(Joining) - seenLatestGossip must be(Set(first, second)) + awaitCond(seenLatestGossip == Set(first, second)) cluster.convergence.isDefined must be(true) } enterBarrier("convergence-joining-2") @@ -144,18 +134,11 @@ abstract class TransitionSpec enterBarrier("leader-actions-2") leader(first, second) gossipTo nonLeader(first, second).head - runOn(nonLeader(first, second).head) { - memberStatus(first) must be(Up) - memberStatus(second) must be(Up) - seenLatestGossip must be(Set(first, second)) - cluster.convergence.isDefined must be(true) - } - - nonLeader(first, second).head gossipTo leader(first, second) runOn(first, second) { - memberStatus(first) must be(Up) - memberStatus(second) must be(Up) + // gossip chat will synchronize the views + awaitCond(memberStatus(second) == Up) seenLatestGossip must be(Set(first, second)) + memberStatus(first) must be(Up) cluster.convergence.isDefined must be(true) } @@ -167,25 +150,26 @@ abstract class TransitionSpec runOn(third) { cluster.join(second) } - runOn(second) { + runOn(second, third) { + // gossip chat from the join will synchronize the views awaitMembers(first, second, third) - cluster.convergence.isDefined must be(false) memberStatus(third) must be(Joining) - seenLatestGossip must be(Set(second)) + awaitCond(seenLatestGossip == Set(second, third)) + cluster.convergence.isDefined must be(false) } enterBarrier("third-joined-second") second gossipTo first - runOn(first) { - members must be(Set(first, second, third)) + runOn(first, second) { + // gossip chat will synchronize the views + awaitMembers(first, second, third) memberStatus(third) must be(Joining) - seenLatestGossip must be(Set(first, second)) - cluster.convergence.isDefined must be(false) + awaitCond(memberStatus(second) == Up) + seenLatestGossip must be(Set(first, second, third)) + cluster.convergence.isDefined must be(true) } first gossipTo third - third gossipTo first - third gossipTo second runOn(first, second, third) { members must be(Set(first, second, third)) memberStatus(first) must be(Up) @@ -224,14 +208,6 @@ abstract class TransitionSpec cluster.convergence.isDefined must be(true) } - // and back again - nonLeader(first, second, third).tail.head gossipTo nonLeader(first, second, third).head - runOn(nonLeader(first, second, third).head) { - memberStatus(third) must be(Up) - seenLatestGossip must be(Set(first, second, third)) - cluster.convergence.isDefined must be(true) - } - // first non-leader gossipTo the leader nonLeader(first, second, third).head gossipTo leader(first, second, third) runOn(first, second, third) { @@ -245,160 +221,36 @@ abstract class TransitionSpec enterBarrier("after-3") } - "startup a second separated cluster consisting of nodes fourth and fifth" taggedAs LongRunningTest in { - runOn(fifth) { - startClusterNode() - cluster.leaderActions() - cluster.status must be(Up) - } - enterBarrier("fifth-started") - - runOn(fourth) { - cluster.join(fifth) - } - runOn(fifth) { - awaitMembers(fourth, fifth) - } - enterBarrier("fourth-joined") - - fifth gossipTo fourth - fourth gossipTo fifth - - runOn(fourth, fifth) { - memberStatus(fourth) must be(Joining) - memberStatus(fifth) must be(Up) - seenLatestGossip must be(Set(fourth, fifth)) - cluster.convergence.isDefined must be(true) - } - - enterBarrier("after-4") - } - - "perform correct transitions when second cluster (node fourth) joins first cluster (node third)" taggedAs LongRunningTest in { - - runOn(fourth) { - cluster.join(third) - } - runOn(third) { - awaitMembers(first, second, third, fourth) - seenLatestGossip must be(Set(third)) - } - enterBarrier("fourth-joined-third") - - third gossipTo second - runOn(second) { - seenLatestGossip must be(Set(second, third)) - } - - second gossipTo fourth - runOn(fourth) { - members must be(roles.toSet) - // merge conflict - seenLatestGossip must be(Set(fourth)) - } - - fourth gossipTo first - fourth gossipTo second - fourth gossipTo third - fourth gossipTo fifth - runOn(first, second, third, fifth) { - members must be(roles.toSet) - seenLatestGossip must be(Set(fourth, myself)) - } - - first gossipTo fifth - runOn(fifth) { - seenLatestGossip must be(Set(first, fourth, fifth)) - } - - fifth gossipTo third - runOn(third) { - seenLatestGossip must be(Set(first, third, fourth, fifth)) - } - - third gossipTo second - runOn(second) { - seenLatestGossip must be(roles.toSet) - cluster.convergence.isDefined must be(true) - } - - second gossipTo first - second gossipTo third - second gossipTo fourth - third gossipTo fifth - - seenLatestGossip must be(roles.toSet) - memberStatus(first) must be(Up) - memberStatus(second) must be(Up) - memberStatus(third) must be(Up) - memberStatus(fourth) must be(Joining) - memberStatus(fifth) must be(Up) - cluster.convergence.isDefined must be(true) - - enterBarrier("convergence-joining-3") - - runOn(leader(roles: _*)) { - cluster.leaderActions() - memberStatus(fourth) must be(Up) - seenLatestGossip must be(Set(myself)) - cluster.convergence.isDefined must be(false) - } - // spread the word - for (x :: y :: Nil ← (roles.sorted ++ roles.sorted.dropRight(1)).toList.sliding(2)) { - x gossipTo y - } - - enterBarrier("spread-5") - - seenLatestGossip must be(roles.toSet) - memberStatus(first) must be(Up) - memberStatus(second) must be(Up) - memberStatus(third) must be(Up) - memberStatus(fourth) must be(Up) - memberStatus(fifth) must be(Up) - cluster.convergence.isDefined must be(true) - - enterBarrier("after-5") - } - "perform correct transitions when second becomes unavailble" taggedAs LongRunningTest in { - runOn(fifth) { + runOn(third) { markNodeAsUnavailable(second) cluster.reapUnreachableMembers() cluster.latestGossip.overview.unreachable must contain(Member(second, Up)) - seenLatestGossip must be(Set(fifth)) + seenLatestGossip must be(Set(third)) } enterBarrier("after-second-unavailble") - // spread the word - val gossipRound = List(fifth, fourth, third, first, third, fourth, fifth) - for (x :: y :: Nil ← gossipRound.sliding(2)) { - x gossipTo y - } + third gossipTo first - runOn((roles.filterNot(_ == second)): _*) { + runOn(first, third) { cluster.latestGossip.overview.unreachable must contain(Member(second, Up)) cluster.convergence.isDefined must be(false) } - runOn(third) { + runOn(first) { cluster.down(second) awaitMemberStatus(second, Down) } enterBarrier("after-second-down") - // spread the word - val gossipRound2 = List(third, fourth, fifth, first, third, fourth, fifth) - for (x :: y :: Nil ← gossipRound2.sliding(2)) { - x gossipTo y - } + first gossipTo third - runOn((roles.filterNot(_ == second)): _*) { + runOn(first, third) { cluster.latestGossip.overview.unreachable must contain(Member(second, Down)) memberStatus(second) must be(Down) - seenLatestGossip must be(Set(first, third, fourth, fifth)) + seenLatestGossip must be(Set(first, third)) cluster.convergence.isDefined must be(true) } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index e818847969..68731b89b2 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -105,12 +105,14 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { cluster.latestGossip.members.map(_.address) must be(Set(selfAddress, addresses(1))) memberStatus(addresses(1)) must be(Some(MemberStatus.Joining)) cluster.convergence.isDefined must be(false) + expectMsg(GossipTo(addresses(1))) } "accept a few more joining nodes" in { for (a ← addresses.drop(2)) { cluster.joining(a) memberStatus(a) must be(Some(MemberStatus.Joining)) + expectMsg(GossipTo(a)) } cluster.latestGossip.members.map(_.address) must be(addresses.toSet) } @@ -121,7 +123,6 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { } "gossip to random live node" in { - cluster.latestGossip.members cluster.gossip() cluster.gossip() cluster.gossip() diff --git a/akka-docs/cluster/cluster.rst b/akka-docs/cluster/cluster.rst index 1812c33561..cbad3ef690 100644 --- a/akka-docs/cluster/cluster.rst +++ b/akka-docs/cluster/cluster.rst @@ -138,7 +138,7 @@ implementation of `The Phi Accrual Failure Detector`_ by Hayashibara et al. An accrual failure detector decouple monitoring and interpretation. That makes them applicable to a wider area of scenarios and more adequate to build generic failure detection services. The idea is that it is keeping a history of failure -statistics, calculated from heartbeats received from the gossip protocol, and is +statistics, calculated from heartbeats received from other nodes, and is trying to do educated guesses by taking multiple factors, and how they accumulate over time, into account in order to come up with a better guess if a specific node is up or down. Rather than just answering "yes" or "no" to the @@ -232,15 +232,14 @@ breaking logical partitions as seen in the gossip algorithm defined below. During each round of gossip exchange the following process is used: -1. Gossip to random live node (if any) +1. Gossip to random node with newer or older state information, if any, based on the + current gossip overview, with some probability. Otherwise Gossip to any random + live node. 2. If the node gossiped to at (1) was not a ``deputy`` node, or the number of live nodes is less than number of ``deputy`` nodes, gossip to random ``deputy`` node with certain probability depending on number of unreachable, ``deputy``, and live nodes. -3. Gossip to random node with newer or older state information, based on the - current gossip overview, with some probability (?) - The gossiper only sends the gossip overview to the chosen node. The recipient of the gossip can use the gossip overview to determine whether: From 0517fac42eeac134eff74bbd728bf1659a72bf0e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 28 Jun 2012 17:24:14 +0200 Subject: [PATCH 523/538] Making use-dispatcher-for-io also used for the client connector --- .../scala/akka/remote/netty/NettyRemoteSupport.scala | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 5e3c989fd5..5c506abfc8 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -40,10 +40,13 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider // TODO replace by system.scheduler val timer: HashedWheelTimer = new HashedWheelTimer(system.threadFactory) - // TODO make configurable/shareable with server socket factory - val clientChannelFactory = new NioClientSocketChannelFactory( - Executors.newCachedThreadPool(system.threadFactory), - Executors.newCachedThreadPool(system.threadFactory)) + val clientChannelFactory = settings.UseDispatcherForIO match { + case Some(id) ⇒ + val d = system.dispatchers.lookup(id) + new NioClientSocketChannelFactory(d, d) + case None ⇒ + new NioClientSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool()) + } /** * Backing scaffolding for the default implementation of NettyRemoteSupport.createPipeline. From 211732391dc5738e5e4d301c129b379987592f6a Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 29 Jun 2012 08:42:52 +0200 Subject: [PATCH 524/538] Minor improvement of LargeClusterSpec, see #2239 --- .../scala/akka/cluster/LargeClusterSpec.scala | 38 ++++++++++++++++--- 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala index a12fc90ff9..e3dc7719c1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala @@ -30,7 +30,8 @@ object LargeClusterMultiJvmSpec extends MultiNodeConfig { commonConfig(ConfigFactory.parseString(""" # Number of ActorSystems in each jvm, can be specified as # system property when running real tests. Many nodes - # will take long time. + # will take long time and consume many threads. + # 10 => 50 nodes is possible to run on one machine. akka.test.large-cluster-spec.nodes-per-datacenter = 2 akka.cluster { gossip-interval = 500 ms @@ -40,7 +41,19 @@ object LargeClusterMultiJvmSpec extends MultiNodeConfig { akka.loglevel = INFO akka.actor.default-dispatcher.fork-join-executor.parallelism-max = 2 akka.scheduler.tick-duration = 33 ms - akka.remote.netty.execution-pool-size = 1 + akka.remote.netty.execution-pool-size = 0 + + # don't use testconductor transport in this test, especially not + # when using use-dispatcher-for-io + akka.remote.transport = "akka.remote.netty.NettyRemoteTransport" + + # Using a separate dispatcher for netty io doesn't reduce number + # of needed threads + # akka.remote.netty.use-dispatcher-for-io=akka.test.io-dispatcher + # akka.test.io-dispatcher.fork-join-executor { + # parallelism-min = 100 + # parallelism-max = 100 + # } """)) } @@ -207,13 +220,26 @@ abstract class LargeClusterSpec } enterBarrier("fifth-datacenter-started") - for (i ← 0 until nodesPerDatacenter) { - val totalNodes = nodesPerDatacenter * 4 + i + 1 + // enough to join a few one-by-one (takes too long time otherwise) + val (bulk, oneByOne) = systems.splitAt(systems.size - 3) + + if (bulk.nonEmpty) { + val totalNodes = nodesPerDatacenter * 4 + bulk.size within(expectedMaxDuration(totalNodes)) { - val joiningClusters = ifNode(fifthDatacenter)(Set(Cluster(systems(i))))(Set.empty) + val joiningClusters = ifNode(fifthDatacenter)(bulk.map(Cluster(_)).toSet)(Set.empty) join(joiningClusters, from = fifthDatacenter, to = firstDatacenter, totalNodes, runOnRoles = firstDatacenter, secondDatacenter, thirdDatacenter, fourthDatacenter, fifthDatacenter) - enterBarrier("fifth-datacenter-joined-" + i) + enterBarrier("fifth-datacenter-joined-" + bulk.size) + } + } + + for (i ← 0 until oneByOne.size) { + val totalNodes = nodesPerDatacenter * 4 + bulk.size + i + 1 + within(expectedMaxDuration(totalNodes)) { + val joiningClusters = ifNode(fifthDatacenter)(Set(Cluster(oneByOne(i))))(Set.empty) + join(joiningClusters, from = fifthDatacenter, to = firstDatacenter, totalNodes, + runOnRoles = firstDatacenter, secondDatacenter, thirdDatacenter, fourthDatacenter, fifthDatacenter) + enterBarrier("fifth-datacenter-joined-" + (bulk.size + i)) } } } From d47ff04c0323b08f05eeaa3ab49de0d10d424bc3 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 29 Jun 2012 08:56:58 +0200 Subject: [PATCH 525/538] Moved GossipDifferentViewProbability to config, see #2253 --- akka-cluster/src/main/resources/reference.conf | 5 +++++ akka-cluster/src/main/scala/akka/cluster/Cluster.scala | 4 +--- .../src/main/scala/akka/cluster/ClusterSettings.scala | 1 + .../src/test/scala/akka/cluster/ClusterConfigSpec.scala | 1 + 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index a06e9273cb..b60b91ec43 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -52,6 +52,11 @@ akka { # of the cluster within this deadline. join-timeout = 60s + # Gossip to random node with newer or older state information, if any with some + # this probability. Otherwise Gossip to any random live node. + # Probability value is between 0.0 and 1.0. 0.0 means never, 1.0 means always. + gossip-different-view-probability = 0.8 + failure-detector { # defines the failure detector threshold diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 0d87a4b89c..40d67d7161 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -1066,8 +1066,6 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } } - private def gossipToDifferentViewProbability: Double = 0.8 - /** * INTERNAL API. * @@ -1099,7 +1097,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } yield address } val gossipedToAlive = - if (nodesWithdifferentView.nonEmpty && ThreadLocalRandom.current.nextDouble() < gossipToDifferentViewProbability) + if (nodesWithdifferentView.nonEmpty && ThreadLocalRandom.current.nextDouble() < GossipDifferentViewProbability) gossipToRandomNodeOf(nodesWithdifferentView.toIndexedSeq) else gossipToRandomNodeOf(localMemberAddresses) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 08a9b5160d..6e4cbc4e60 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -36,6 +36,7 @@ class ClusterSettings(val config: Config, val systemName: String) { final val AutoJoin: Boolean = getBoolean("akka.cluster.auto-join") final val AutoDown: Boolean = getBoolean("akka.cluster.auto-down") final val JoinTimeout: Duration = Duration(getMilliseconds("akka.cluster.join-timeout"), MILLISECONDS) + final val GossipDifferentViewProbability: Double = getDouble("akka.cluster.gossip-different-view-probability") final val SchedulerTickDuration: Duration = Duration(getMilliseconds("akka.cluster.scheduler.tick-duration"), MILLISECONDS) final val SchedulerTicksPerWheel: Int = getInt("akka.cluster.scheduler.ticks-per-wheel") } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 92e219a540..07671c6164 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -32,6 +32,7 @@ class ClusterConfigSpec extends AkkaSpec { NrOfGossipDaemons must be(4) AutoJoin must be(true) AutoDown must be(true) + GossipDifferentViewProbability must be(0.8 plusOrMinus 0.0001) SchedulerTickDuration must be(33 millis) SchedulerTicksPerWheel must be(512) } From ab3a26d0d25cdd743887ba3b3f62df87e289b6dd Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 29 Jun 2012 10:57:34 +0200 Subject: [PATCH 526/538] Fix LogRoleReplace, remote lifecyle not logged --- .../akka/remote/testkit/LogRoleReplace.scala | 20 ++++--------------- .../akka/remote/testkit/MultiNodeSpec.scala | 8 ++++---- 2 files changed, 8 insertions(+), 20 deletions(-) diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala index 1e5a53d82e..5dd41365bf 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala @@ -88,12 +88,10 @@ object LogRoleReplace extends ClipboardOwner { class LogRoleReplace { - private val RoleStarted = """\[([\w\-]+)\].*Role \[([\w]+)\] started""".r - private val RemoteServerStarted = """\[([\w\-]+)\].*RemoteServerStarted@akka://.*@([\w\-\.]+):([0-9]+)""".r + private val RoleStarted = """\[([\w\-]+)\].*Role \[([\w]+)\] started with address \[akka://.*@([\w\-\.]+):([0-9]+)\]""".r private val ColorCode = """\[[0-9]+m""" private var replacements: Map[String, String] = Map.empty - private var jvmToAddress: Map[String, String] = Map.empty def process(in: BufferedReader, out: PrintWriter): Unit = { @@ -121,23 +119,13 @@ class LogRoleReplace { if (line.startsWith("[info] * ")) { // reset when new test begins replacements = Map.empty - jvmToAddress = Map.empty } line match { - case RemoteServerStarted(jvm, host, port) ⇒ - jvmToAddress += (jvm -> (host + ":" + port)) + case RoleStarted(jvm, role, host, port) ⇒ + replacements += (jvm -> role) + replacements += ((host + ":" + port) -> role) false - - case RoleStarted(jvm, role) ⇒ - jvmToAddress.get(jvm) match { - case Some(address) ⇒ - replacements += (jvm -> role) - replacements += (address -> role) - false - case None ⇒ false - } - case _ ⇒ true } } diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 25bb8df7dc..8abfd887e5 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -4,9 +4,7 @@ package akka.remote.testkit import java.net.InetSocketAddress - import com.typesafe.config.{ ConfigObject, ConfigFactory, Config } - import akka.actor.{ RootActorPath, ActorPath, ActorSystem, ExtendedActorSystem } import akka.dispatch.Await import akka.dispatch.Await.Awaitable @@ -14,6 +12,7 @@ import akka.remote.testconductor.{ TestConductorExt, TestConductor, RoleName } import akka.testkit.AkkaSpec import akka.util.{ Timeout, NonFatal } import akka.util.duration._ +import akka.remote.RemoteActorRefProvider /** * Configure the role names and participants of the test, including configuration settings. @@ -259,8 +258,9 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: } } - // useful to see which jvm is running which role - log.info("Role [{}] started", myself.name) + // useful to see which jvm is running which role, used by LogRoleReplace utility + log.info("Role [{}] started with address [{}]", myself.name, + system.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider].transport.address) // wait for all nodes to remove themselves before we shut the conductor down final override def beforeShutdown() = { From 2ea0bba9e9efcd76d0571b54595134dff4042923 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Tue, 5 Jun 2012 15:46:26 +0200 Subject: [PATCH 527/538] Cluster node that is UNREACHABLE and rejoins. see #2160 --- .../akka/cluster/MultiNodeClusterSpec.scala | 4 + .../UnreachableNodeRejoinsClusterSpec.scala | 154 ++++++++++++++++++ .../akka/remote/testconductor/Conductor.scala | 13 ++ 3 files changed, 171 insertions(+) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 3264c661b0..8e0f781ceb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -203,6 +203,10 @@ trait MultiNodeClusterSpec extends FailureDetectorStrategy with Suite { self: Mu nodesInCluster.sorted.head } + def clusterSortedRoles(nodesInCluster: Seq[RoleName]): Seq[RoleName] = { + nodesInCluster.sorted + } + /** * Sort the roles in the order used by the cluster. */ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala new file mode 100644 index 0000000000..6ce00687bf --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala @@ -0,0 +1,154 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import com.typesafe.config.ConfigFactory +import akka.actor.Address +import akka.remote.testconductor.{RoleName, Direction} +import akka.util.duration._ + +object UnreachableNodeRejoinsClusterMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + + val allRoles = Seq(first, second, third, fourth) + + def allBut(role: RoleName, roles: Seq[RoleName] = allRoles): Seq[RoleName] = { + roles.filter(_ != role) + } + + commonConfig(debugConfig(on = false). + withFallback(ConfigFactory.parseString(""" + akka.cluster { + failure-detector.threshold = 5 + } """) + ).withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class UnreachableNodeRejoinsClusterMultiJvmNode1 extends UnreachableNodeRejoinsClusterSpec +class UnreachableNodeRejoinsClusterMultiJvmNode2 extends UnreachableNodeRejoinsClusterSpec +class UnreachableNodeRejoinsClusterMultiJvmNode3 extends UnreachableNodeRejoinsClusterSpec +class UnreachableNodeRejoinsClusterMultiJvmNode4 extends UnreachableNodeRejoinsClusterSpec + +class UnreachableNodeRejoinsClusterSpec + extends MultiNodeSpec(UnreachableNodeRejoinsClusterMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender with BeforeAndAfter { + import UnreachableNodeRejoinsClusterMultiJvmSpec._ + + override def initialParticipants = allRoles.size + + val sortedRoles = clusterSortedRoles(allRoles) + val master = sortedRoles(0) + val victim = sortedRoles(1) + + var endBarrierNumber = 0 + def endBarrier = { + endBarrierNumber += 1 + testConductor.enter("after_" + endBarrierNumber) + } + + "A cluster of " + allRoles.size + " members" must { + + "reach initial convergence" taggedAs LongRunningTest in { + runOn(master) { + cluster.self + awaitUpConvergence(numberOfMembers = allRoles.size) + } + + runOn(allBut(master):_*) { + cluster.join(node(master).address) + awaitUpConvergence(numberOfMembers = allRoles.size) + } + + endBarrier + } + + "mark a node as UNREACHABLE when we pull the network" taggedAs LongRunningTest in { + runOn(first) { + // pull network for victim node from all nodes + allBut(victim).foreach { roleName => + testConductor.blackhole(victim, roleName, Direction.Both).await + } + testConductor.enter("unplug_victim") + } + + runOn(allBut(first):_*) { + testConductor.enter("unplug_victim") + } + + runOn(victim) { + val otherAddresses = sortedRoles.filter(_ != victim).map(node(_).address) + within(30 seconds) { + awaitCond(cluster.latestGossip.overview.unreachable.size == (allRoles.size - 1)) + awaitCond(cluster.latestGossip.members.size == 1) + awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up)) + cluster.latestGossip.overview.unreachable.map(_.address) must be(otherAddresses.toSet) + cluster.convergence.isDefined must be(false) + } + } + + val allButVictim = allBut(victim) + runOn(allButVictim: _*) { + val victimAddress = node(victim).address + val otherAddresses = allButVictim.map(node(_).address) + within(30 seconds) { + // victim becomes unreachable + awaitCond(cluster.latestGossip.overview.unreachable.size == 1) + awaitCond(cluster.latestGossip.members.size == (allRoles.size - 1)) + awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up)) + awaitSeenSameState(otherAddresses) + // still one unreachable + cluster.latestGossip.overview.unreachable.size must be(1) + cluster.latestGossip.overview.unreachable.head.address must be(victimAddress) + // and therefore no convergence + cluster.convergence.isDefined must be(false) + } + } + + endBarrier + } + + "mark the node as DOWN" taggedAs LongRunningTest in { + val victimAddress = node(victim).address + runOn(master) { + cluster.down(victimAddress) + } + + runOn(allBut(victim):_*) { + awaitUpConvergence(allRoles.size - 1, Seq(victimAddress)) + } + + endBarrier + } + + "allow node to REJOIN when the network is plugged back in" taggedAs LongRunningTest in { + runOn(first) { + // put the network back in + allBut(victim).foreach { roleName => + testConductor.passThrough(victim, roleName, Direction.Both).await + } + testConductor.enter("plug_in_victim") + } + + runOn(allBut(first):_*) { + testConductor.enter("plug_in_victim") + } + + runOn(victim) { + cluster.join(node(master).address) + } + + awaitUpConvergence(allRoles.size) + + endBarrier + } + } +} diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index eba0fffe63..24377d54a1 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -139,6 +139,19 @@ trait Conductor { this: TestConductorExt ⇒ controller ? Throttle(node, target, direction, 0f) mapTo } + /** + * Switch the Netty pipeline of the remote support into pass through mode for + * sending and/or receiving. + * + * @param node is the symbolic name of the node which is to be affected + * @param target is the symbolic name of the other node to which connectivity shall be impeded + * @param direction can be either `Direction.Send`, `Direction.Receive` or `Direction.Both` + */ + def passThrough(node: RoleName, target: RoleName, direction: Direction): Future[Done] = { + import Settings.QueryTimeout + controller ? Throttle(node, target, direction, -1f) mapTo + } + /** * Tell the remote support to shutdown the connection to the given remote * peer. It works regardless of whether the recipient was initiator or From 9691dd0325a015830ac40064dd32c63a6f63ba72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Thu, 7 Jun 2012 10:37:04 +0200 Subject: [PATCH 528/538] Changes after review --- .../UnreachableNodeRejoinsClusterSpec.scala | 35 ++++++------------- 1 file changed, 10 insertions(+), 25 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala index 6ce00687bf..da9d62e1d2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala @@ -21,7 +21,7 @@ object UnreachableNodeRejoinsClusterMultiJvmSpec extends MultiNodeConfig { val allRoles = Seq(first, second, third, fourth) def allBut(role: RoleName, roles: Seq[RoleName] = allRoles): Seq[RoleName] = { - roles.filter(_ != role) + roles.filterNot(_ == role) } commonConfig(debugConfig(on = false). @@ -45,12 +45,12 @@ class UnreachableNodeRejoinsClusterSpec override def initialParticipants = allRoles.size - val sortedRoles = clusterSortedRoles(allRoles) - val master = sortedRoles(0) - val victim = sortedRoles(1) + lazy val sortedRoles = clusterSortedRoles(allRoles) + lazy val master = sortedRoles(0) + lazy val victim = sortedRoles(1) var endBarrierNumber = 0 - def endBarrier = { + def endBarrier: Unit = { endBarrierNumber += 1 testConductor.enter("after_" + endBarrierNumber) } @@ -58,16 +58,7 @@ class UnreachableNodeRejoinsClusterSpec "A cluster of " + allRoles.size + " members" must { "reach initial convergence" taggedAs LongRunningTest in { - runOn(master) { - cluster.self - awaitUpConvergence(numberOfMembers = allRoles.size) - } - - runOn(allBut(master):_*) { - cluster.join(node(master).address) - awaitUpConvergence(numberOfMembers = allRoles.size) - } - + awaitClusterUp(allRoles:_*) endBarrier } @@ -77,15 +68,12 @@ class UnreachableNodeRejoinsClusterSpec allBut(victim).foreach { roleName => testConductor.blackhole(victim, roleName, Direction.Both).await } - testConductor.enter("unplug_victim") } - runOn(allBut(first):_*) { - testConductor.enter("unplug_victim") - } + testConductor.enter("unplug_victim") runOn(victim) { - val otherAddresses = sortedRoles.filter(_ != victim).map(node(_).address) + val otherAddresses = sortedRoles.collect { case x if x != victim => node(x).address } within(30 seconds) { awaitCond(cluster.latestGossip.overview.unreachable.size == (allRoles.size - 1)) awaitCond(cluster.latestGossip.members.size == 1) @@ -96,7 +84,7 @@ class UnreachableNodeRejoinsClusterSpec } val allButVictim = allBut(victim) - runOn(allButVictim: _*) { + runOn(allButVictim:_*) { val victimAddress = node(victim).address val otherAddresses = allButVictim.map(node(_).address) within(30 seconds) { @@ -135,12 +123,9 @@ class UnreachableNodeRejoinsClusterSpec allBut(victim).foreach { roleName => testConductor.passThrough(victim, roleName, Direction.Both).await } - testConductor.enter("plug_in_victim") } - runOn(allBut(first):_*) { - testConductor.enter("plug_in_victim") - } + testConductor.enter("plug_in_victim") runOn(victim) { cluster.join(node(master).address) From dd042e3573e4b334097e8f18c81dc4877a687a59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Thu, 7 Jun 2012 11:08:23 +0200 Subject: [PATCH 529/538] Group multiple awaitCond into single one --- .../UnreachableNodeRejoinsClusterSpec.scala | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala index da9d62e1d2..347a2c79bc 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala @@ -75,9 +75,11 @@ class UnreachableNodeRejoinsClusterSpec runOn(victim) { val otherAddresses = sortedRoles.collect { case x if x != victim => node(x).address } within(30 seconds) { - awaitCond(cluster.latestGossip.overview.unreachable.size == (allRoles.size - 1)) - awaitCond(cluster.latestGossip.members.size == 1) - awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up)) + // victim becomes all alone + awaitCond({ val gossip = cluster.latestGossip + gossip.overview.unreachable.size == (allRoles.size - 1) && + gossip.members.size == 1 && + gossip.members.forall(_.status == MemberStatus.Up) }) cluster.latestGossip.overview.unreachable.map(_.address) must be(otherAddresses.toSet) cluster.convergence.isDefined must be(false) } @@ -89,9 +91,10 @@ class UnreachableNodeRejoinsClusterSpec val otherAddresses = allButVictim.map(node(_).address) within(30 seconds) { // victim becomes unreachable - awaitCond(cluster.latestGossip.overview.unreachable.size == 1) - awaitCond(cluster.latestGossip.members.size == (allRoles.size - 1)) - awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up)) + awaitCond({ val gossip = cluster.latestGossip + gossip.overview.unreachable.size == 1 && + gossip.members.size == (allRoles.size - 1) && + gossip.members.forall(_.status == MemberStatus.Up) }) awaitSeenSameState(otherAddresses) // still one unreachable cluster.latestGossip.overview.unreachable.size must be(1) From db1175e1f3c6a00ebf1e0ccf4bda7d21caf71e9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Wed, 27 Jun 2012 13:54:43 +0200 Subject: [PATCH 530/538] Bringing UnreachableNodeRejoinsClusterSpec up to speed with master --- .../akka/cluster/MultiNodeClusterSpec.scala | 4 --- .../UnreachableNodeRejoinsClusterSpec.scala | 35 ++++++++++--------- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 8e0f781ceb..3264c661b0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -203,10 +203,6 @@ trait MultiNodeClusterSpec extends FailureDetectorStrategy with Suite { self: Mu nodesInCluster.sorted.head } - def clusterSortedRoles(nodesInCluster: Seq[RoleName]): Seq[RoleName] = { - nodesInCluster.sorted - } - /** * Sort the roles in the order used by the cluster. */ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala index 347a2c79bc..e943ae6c6c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala @@ -24,20 +24,21 @@ object UnreachableNodeRejoinsClusterMultiJvmSpec extends MultiNodeConfig { roles.filterNot(_ == role) } - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" - akka.cluster { - failure-detector.threshold = 5 - } """) - ).withFallback(MultiNodeClusterSpec.clusterConfig)) + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class UnreachableNodeRejoinsClusterMultiJvmNode1 extends UnreachableNodeRejoinsClusterSpec -class UnreachableNodeRejoinsClusterMultiJvmNode2 extends UnreachableNodeRejoinsClusterSpec -class UnreachableNodeRejoinsClusterMultiJvmNode3 extends UnreachableNodeRejoinsClusterSpec -class UnreachableNodeRejoinsClusterMultiJvmNode4 extends UnreachableNodeRejoinsClusterSpec +class UnreachableNodeRejoinsClusterWithFailureDetectorPuppetMultiJvmNode1 extends UnreachableNodeRejoinsClusterSpec with FailureDetectorPuppetStrategy +class UnreachableNodeRejoinsClusterWithFailureDetectorPuppetMultiJvmNode2 extends UnreachableNodeRejoinsClusterSpec with FailureDetectorPuppetStrategy +class UnreachableNodeRejoinsClusterWithFailureDetectorPuppetMultiJvmNode3 extends UnreachableNodeRejoinsClusterSpec with FailureDetectorPuppetStrategy +class UnreachableNodeRejoinsClusterWithFailureDetectorPuppetMultiJvmNode4 extends UnreachableNodeRejoinsClusterSpec with FailureDetectorPuppetStrategy -class UnreachableNodeRejoinsClusterSpec + +class UnreachableNodeRejoinsClusterWithAccrualFailureDetectorMultiJvmNode1 extends UnreachableNodeRejoinsClusterSpec with AccrualFailureDetectorStrategy +class UnreachableNodeRejoinsClusterWithAccrualFailureDetectorMultiJvmNode2 extends UnreachableNodeRejoinsClusterSpec with AccrualFailureDetectorStrategy +class UnreachableNodeRejoinsClusterWithAccrualFailureDetectorMultiJvmNode3 extends UnreachableNodeRejoinsClusterSpec with AccrualFailureDetectorStrategy +class UnreachableNodeRejoinsClusterWithAccrualFailureDetectorMultiJvmNode4 extends UnreachableNodeRejoinsClusterSpec with AccrualFailureDetectorStrategy + +abstract class UnreachableNodeRejoinsClusterSpec extends MultiNodeSpec(UnreachableNodeRejoinsClusterMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { @@ -45,14 +46,14 @@ class UnreachableNodeRejoinsClusterSpec override def initialParticipants = allRoles.size - lazy val sortedRoles = clusterSortedRoles(allRoles) + lazy val sortedRoles = allRoles.sorted lazy val master = sortedRoles(0) lazy val victim = sortedRoles(1) var endBarrierNumber = 0 def endBarrier: Unit = { endBarrierNumber += 1 - testConductor.enter("after_" + endBarrierNumber) + enterBarrier("after_" + endBarrierNumber) } "A cluster of " + allRoles.size + " members" must { @@ -70,10 +71,11 @@ class UnreachableNodeRejoinsClusterSpec } } - testConductor.enter("unplug_victim") + enterBarrier("unplug_victim") runOn(victim) { val otherAddresses = sortedRoles.collect { case x if x != victim => node(x).address } + otherAddresses.foreach(markNodeAsUnavailable(_)) within(30 seconds) { // victim becomes all alone awaitCond({ val gossip = cluster.latestGossip @@ -89,13 +91,14 @@ class UnreachableNodeRejoinsClusterSpec runOn(allButVictim:_*) { val victimAddress = node(victim).address val otherAddresses = allButVictim.map(node(_).address) + markNodeAsUnavailable(victimAddress) within(30 seconds) { // victim becomes unreachable awaitCond({ val gossip = cluster.latestGossip gossip.overview.unreachable.size == 1 && gossip.members.size == (allRoles.size - 1) && gossip.members.forall(_.status == MemberStatus.Up) }) - awaitSeenSameState(otherAddresses) + awaitSeenSameState(otherAddresses:_*) // still one unreachable cluster.latestGossip.overview.unreachable.size must be(1) cluster.latestGossip.overview.unreachable.head.address must be(victimAddress) @@ -128,7 +131,7 @@ class UnreachableNodeRejoinsClusterSpec } } - testConductor.enter("plug_in_victim") + enterBarrier("plug_in_victim") runOn(victim) { cluster.join(node(master).address) From 574ff26bb46baaf8ca46e38900e5ab9abd7e584d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Wed, 27 Jun 2012 15:56:45 +0200 Subject: [PATCH 531/538] Support for re-JOINING a node that have been DOWN. See #1908 --- .../src/main/scala/akka/cluster/Cluster.scala | 24 +++++++++++++++---- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 40d67d7161..709f82a5e8 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -256,7 +256,7 @@ object Gossip { * When convergence is reached the leader change status of `members` from `Joining` * to `Up`. * - * When failure detector consider a node as unavailble it will be moved from + * When failure detector consider a node as unavailable it will be moved from * `members` to `overview.unreachable`. * * When a node is downed, either manually or automatically, its status is changed to `Down`. @@ -555,12 +555,14 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } } - private val state = { + private def createCleanState: State = { // note that self is not initially member, // and the Gossip is not versioned for this 'Node' yet - new AtomicReference[State](State(Gossip(members = Gossip.emptyMembers))) + State(Gossip(members = Gossip.emptyMembers)) } + private val state = new AtomicReference[State](createCleanState) + // try to join one of the nodes defined in the 'akka.cluster.seed-nodes' if (AutoJoin) joinSeedNode() @@ -735,8 +737,10 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) @tailrec final def join(address: Address): Unit = { val localState = state.get - val newState = localState copy (joinInProgress = localState.joinInProgress + - (address -> (Deadline.now + JoinTimeout))) + // wipe our state + val newState = createCleanState copy (joinInProgress = Map.empty + (address -> (Deadline.now + JoinTimeout))) + // wipe the failure detector + failureDetector.reset() if (!state.compareAndSet(localState, newState)) join(address) // recur else { val connection = clusterCommandConnectionFor(address) @@ -818,6 +822,9 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val newUnreachableMembers = localUnreachable filterNot { _.address == node } val newOverview = localGossip.overview copy (unreachable = newUnreachableMembers) + // remove the node from the failure detector if it is a DOWN node that is rejoining cluster + if (localUnreachable.size > newUnreachableMembers.size) failureDetector.remove(node) + // add joining node as Joining // add self in case someone else joins before self has joined (Set discards duplicates) val newMembers = localMembers :+ Member(node, Joining) :+ Member(selfAddress, Joining) @@ -998,6 +1005,13 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) latestGossip = winningGossip seen selfAddress, joinInProgress = newJoinInProgress) + // for all new joining nodes we optimistically remove them from the failure detector, since if we wait until + // we have won the CAS, then the node might be picked up by the reapUnreachableMembers task and moved to + // unreachable before we can remove the node from the failure detector + (newState.latestGossip.members -- localState.latestGossip.members).filter(_.status == Joining).foreach { + case node ⇒ failureDetector.remove(node.address) + } + // if we won the race then update else try again if (!state.compareAndSet(localState, newState)) receiveGossip(from, remoteGossip) // recur if we fail the update else { From 6ad96c257909576c5d95bd6f7ace1192a7912d89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Thu, 28 Jun 2012 14:52:12 +0200 Subject: [PATCH 532/538] Review changes --- .../src/main/scala/akka/cluster/Cluster.scala | 8 ++-- .../UnreachableNodeRejoinsClusterSpec.scala | 48 ++++++++----------- 2 files changed, 24 insertions(+), 32 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 709f82a5e8..a15a361aff 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -737,9 +737,9 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) @tailrec final def join(address: Address): Unit = { val localState = state.get - // wipe our state + // wipe our state since a node that joins a cluster must be empty val newState = createCleanState copy (joinInProgress = Map.empty + (address -> (Deadline.now + JoinTimeout))) - // wipe the failure detector + // wipe the failure detector since we are starting fresh and shouldn't care about the past failureDetector.reset() if (!state.compareAndSet(localState, newState)) join(address) // recur else { @@ -819,11 +819,11 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) if (!alreadyMember && !isUnreachable) { // remove the node from the 'unreachable' set in case it is a DOWN node that is rejoining cluster - val newUnreachableMembers = localUnreachable filterNot { _.address == node } + val (rejoiningMember, newUnreachableMembers) = localUnreachable partition { _.address == node } val newOverview = localGossip.overview copy (unreachable = newUnreachableMembers) // remove the node from the failure detector if it is a DOWN node that is rejoining cluster - if (localUnreachable.size > newUnreachableMembers.size) failureDetector.remove(node) + if (rejoiningMember.nonEmpty) failureDetector.remove(node) // add joining node as Joining // add self in case someone else joins before self has joined (Set discards duplicates) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala index e943ae6c6c..34f8605af1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala @@ -18,12 +18,6 @@ object UnreachableNodeRejoinsClusterMultiJvmSpec extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - val allRoles = Seq(first, second, third, fourth) - - def allBut(role: RoleName, roles: Seq[RoleName] = allRoles): Seq[RoleName] = { - roles.filterNot(_ == role) - } - commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -40,13 +34,15 @@ class UnreachableNodeRejoinsClusterWithAccrualFailureDetectorMultiJvmNode4 exten abstract class UnreachableNodeRejoinsClusterSpec extends MultiNodeSpec(UnreachableNodeRejoinsClusterMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import UnreachableNodeRejoinsClusterMultiJvmSpec._ - override def initialParticipants = allRoles.size + def allBut(role: RoleName, roles: Seq[RoleName] = roles): Seq[RoleName] = { + roles.filterNot(_ == role) + } - lazy val sortedRoles = allRoles.sorted + + lazy val sortedRoles = roles.sorted lazy val master = sortedRoles(0) lazy val victim = sortedRoles(1) @@ -56,10 +52,10 @@ abstract class UnreachableNodeRejoinsClusterSpec enterBarrier("after_" + endBarrierNumber) } - "A cluster of " + allRoles.size + " members" must { + "A cluster of " + roles.size + " members" must { "reach initial convergence" taggedAs LongRunningTest in { - awaitClusterUp(allRoles:_*) + awaitClusterUp(roles:_*) endBarrier } @@ -73,35 +69,32 @@ abstract class UnreachableNodeRejoinsClusterSpec enterBarrier("unplug_victim") + val allButVictim = allBut(victim, sortedRoles) runOn(victim) { - val otherAddresses = sortedRoles.collect { case x if x != victim => node(x).address } - otherAddresses.foreach(markNodeAsUnavailable(_)) + allButVictim.foreach(markNodeAsUnavailable(_)) within(30 seconds) { // victim becomes all alone awaitCond({ val gossip = cluster.latestGossip - gossip.overview.unreachable.size == (allRoles.size - 1) && + gossip.overview.unreachable.size == (roles.size - 1) && gossip.members.size == 1 && gossip.members.forall(_.status == MemberStatus.Up) }) - cluster.latestGossip.overview.unreachable.map(_.address) must be(otherAddresses.toSet) + cluster.latestGossip.overview.unreachable.map(_.address) must be((allButVictim map address).toSet) cluster.convergence.isDefined must be(false) } } - val allButVictim = allBut(victim) runOn(allButVictim:_*) { - val victimAddress = node(victim).address - val otherAddresses = allButVictim.map(node(_).address) - markNodeAsUnavailable(victimAddress) + markNodeAsUnavailable(victim) within(30 seconds) { // victim becomes unreachable awaitCond({ val gossip = cluster.latestGossip gossip.overview.unreachable.size == 1 && - gossip.members.size == (allRoles.size - 1) && + gossip.members.size == (roles.size - 1) && gossip.members.forall(_.status == MemberStatus.Up) }) - awaitSeenSameState(otherAddresses:_*) + awaitSeenSameState(allButVictim map address:_*) // still one unreachable cluster.latestGossip.overview.unreachable.size must be(1) - cluster.latestGossip.overview.unreachable.head.address must be(victimAddress) + cluster.latestGossip.overview.unreachable.head.address must be(node(victim).address) // and therefore no convergence cluster.convergence.isDefined must be(false) } @@ -111,13 +104,12 @@ abstract class UnreachableNodeRejoinsClusterSpec } "mark the node as DOWN" taggedAs LongRunningTest in { - val victimAddress = node(victim).address runOn(master) { - cluster.down(victimAddress) + cluster down victim } runOn(allBut(victim):_*) { - awaitUpConvergence(allRoles.size - 1, Seq(victimAddress)) + awaitUpConvergence(roles.size - 1, Seq(victim)) } endBarrier @@ -134,10 +126,10 @@ abstract class UnreachableNodeRejoinsClusterSpec enterBarrier("plug_in_victim") runOn(victim) { - cluster.join(node(master).address) + cluster join master } - awaitUpConvergence(allRoles.size) + awaitUpConvergence(roles.size) endBarrier } From 675dfd918285bfdbd95a9708c5420a1eb6ffdf9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Fri, 29 Jun 2012 12:32:41 +0200 Subject: [PATCH 533/538] Keep the cluster node membership change listeners when joining. --- akka-cluster/src/main/scala/akka/cluster/Cluster.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index a15a361aff..44c646ebe8 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -738,7 +738,8 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) final def join(address: Address): Unit = { val localState = state.get // wipe our state since a node that joins a cluster must be empty - val newState = createCleanState copy (joinInProgress = Map.empty + (address -> (Deadline.now + JoinTimeout))) + val newState = createCleanState copy (joinInProgress = Map.empty + (address -> (Deadline.now + JoinTimeout)), + memberMembershipChangeListeners = localState.memberMembershipChangeListeners) // wipe the failure detector since we are starting fresh and shouldn't care about the past failureDetector.reset() if (!state.compareAndSet(localState, newState)) join(address) // recur From 88c1f69050952188dc9db9b79500c43525f164b9 Mon Sep 17 00:00:00 2001 From: Gert Vanthienen Date: Fri, 29 Jun 2012 14:12:36 +0200 Subject: [PATCH 534/538] A few more code-style fixes --- .../main/scala/akka/osgi/ActorSystemActivator.scala | 10 +++++----- .../akka/osgi/impl/BundleDelegatingClassLoader.scala | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala index 794eec0317..e92415e1e1 100644 --- a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala +++ b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala @@ -20,12 +20,12 @@ abstract class ActorSystemActivator(nameFor: (BundleContext) ⇒ Option[String]) /** * No-args constructor - a default name (`bundle--ActorSystem`) will be assigned to the [[akka.actor.ActorSystem]] */ - def this() = this({ context: BundleContext ⇒ None }) + def this() = this(context ⇒ None) /** * Create the activator, specifying the name of the [[akka.actor.ActorSystem]] to be created */ - def this(name: String) = this({ context: BundleContext ⇒ Some(name) }) + def this(name: String) = this(context ⇒ Some(name)) private var system: Option[ActorSystem] = None private var registration: Option[ServiceRegistration] = None @@ -47,7 +47,7 @@ abstract class ActorSystemActivator(nameFor: (BundleContext) ⇒ Option[String]) */ def start(context: BundleContext): Unit = { system = Some(OsgiActorSystemFactory(context).createActorSystem(nameFor(context))) - system.foreach(configure(context, _)) + system foreach (configure(context, _)) } /** @@ -56,8 +56,8 @@ abstract class ActorSystemActivator(nameFor: (BundleContext) ⇒ Option[String]) * @param context the BundleContext */ def stop(context: BundleContext): Unit = { - registration.foreach(_.unregister()) - system.foreach(_.shutdown()) + registration foreach (_.unregister()) + system foreach (_.shutdown()) } /** diff --git a/akka-osgi/src/main/scala/akka/osgi/impl/BundleDelegatingClassLoader.scala b/akka-osgi/src/main/scala/akka/osgi/impl/BundleDelegatingClassLoader.scala index 0231a77714..08dee0344e 100644 --- a/akka-osgi/src/main/scala/akka/osgi/impl/BundleDelegatingClassLoader.scala +++ b/akka-osgi/src/main/scala/akka/osgi/impl/BundleDelegatingClassLoader.scala @@ -28,7 +28,7 @@ class BundleDelegatingClassLoader(bundle: Bundle, classLoader: Option[ClassLoade protected override def findClass(name: String): Class[_] = bundle.loadClass(name) protected override def findResource(name: String): URL = { - val resource: URL = bundle.getResource(name) + val resource = bundle.getResource(name) classLoader match { case Some(loader) if resource == null ⇒ loader.getResource(name) case _ ⇒ resource @@ -40,7 +40,7 @@ class BundleDelegatingClassLoader(bundle: Bundle, classLoader: Option[ClassLoade bundle.getResources(name).asInstanceOf[Enumeration[URL]] protected override def loadClass(name: String, resolve: Boolean): Class[_] = { - val clazz: Class[_] = try { + val clazz = try { findClass(name) } catch { case cnfe: ClassNotFoundException ⇒ { @@ -66,7 +66,7 @@ class BundleDelegatingClassLoader(bundle: Bundle, classLoader: Option[ClassLoade private def rethrowClassNotFoundException(name: String, cnfe: ClassNotFoundException): Nothing = throw new ClassNotFoundException(name + " from bundle " + bundle.getBundleId + " (" + bundle.getSymbolicName + ")", cnfe) - override def toString = String.format("BundleDelegatingClassLoader(%s)", bundle) + override def toString: String = String.format("BundleDelegatingClassLoader(%s)", bundle) } From a151afbe6813b0b4827ec1668eac45a5ebd45ed2 Mon Sep 17 00:00:00 2001 From: Roland Date: Sat, 30 Jun 2012 00:23:49 +0200 Subject: [PATCH 535/538] link to Config lib docs, see #2296 --- .../src/main/scala/akka/actor/ActorSystem.scala | 12 ++++++++++++ project/AkkaBuild.scala | 4 +--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index eb0f241177..261a6b3c58 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -55,11 +55,15 @@ object ActorSystem { * obtains the current ClassLoader by first inspecting the current threads' getContextClassLoader, * then tries to walk the stack to find the callers class loader, then falls back to the ClassLoader * associated with the ActorSystem class. + * + * @see The Typesafe Config Library API Documentation */ def create(name: String, config: Config): ActorSystem = apply(name, config) /** * Creates a new ActorSystem with the name "default", the specified Config, and specified ClassLoader + * + * @see The Typesafe Config Library API Documentation */ def create(name: String, config: Config, classLoader: ClassLoader): ActorSystem = apply(name, config, classLoader) @@ -89,11 +93,15 @@ object ActorSystem { * obtains the current ClassLoader by first inspecting the current threads' getContextClassLoader, * then tries to walk the stack to find the callers class loader, then falls back to the ClassLoader * associated with the ActorSystem class. + * + * @see The Typesafe Config Library API Documentation */ def apply(name: String, config: Config): ActorSystem = apply(name, config, findClassLoader()) /** * Creates a new ActorSystem with the name "default", the specified Config, and specified ClassLoader + * + * @see The Typesafe Config Library API Documentation */ def apply(name: String, config: Config, classLoader: ClassLoader): ActorSystem = new ActorSystemImpl(name, config, classLoader).start() @@ -101,11 +109,15 @@ object ActorSystem { * Settings are the overall ActorSystem Settings which also provides a convenient access to the Config object. * * For more detailed information about the different possible configuration options, look in the Akka Documentation under "Configuration" + * + * @see The Typesafe Config Library API Documentation */ class Settings(classLoader: ClassLoader, cfg: Config, final val name: String) { /** * The backing Config of this ActorSystem's Settings + * + * @see The Typesafe Config Library API Documentation */ final val config: Config = { val config = cfg.withFallback(ConfigFactory.defaultReference(classLoader)) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 340516aa03..3a670062ad 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -440,9 +440,7 @@ object AkkaBuild extends Build { object Dependencies { import Dependency._ - val actor = Seq( - config - ) + val actor = Seq(config) val testkit = Seq(Test.scalatest, Test.junit) From 3204e8a426e7216828d4388d71130675b2861b32 Mon Sep 17 00:00:00 2001 From: Gert Vanthienen Date: Sat, 30 Jun 2012 20:30:36 +0200 Subject: [PATCH 536/538] A few more code-style improvements and an easier API for the activator --- .../aries/blueprint/NamespaceHandler.scala | 10 +-- .../blueprint/NamespaceHandlerTest.scala | 78 ++++++++++--------- .../akka/osgi/ActorSystemActivator.scala | 25 +++--- .../akka/osgi/ActorSystemActivatorTest.scala | 70 ++++++++++++----- .../scala/akka/osgi/PojoSRTestSupport.scala | 56 +++++++------ .../scala/akka/osgi/test/TestActivators.scala | 39 ++++++++++ .../osgi/test/TestActorSystemActivator.scala | 18 ----- 7 files changed, 181 insertions(+), 115 deletions(-) create mode 100644 akka-osgi/src/test/scala/akka/osgi/test/TestActivators.scala delete mode 100644 akka-osgi/src/test/scala/akka/osgi/test/TestActorSystemActivator.scala diff --git a/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/NamespaceHandler.scala b/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/NamespaceHandler.scala index 2ab1306a0f..0570a027b6 100644 --- a/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/NamespaceHandler.scala +++ b/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/NamespaceHandler.scala @@ -52,7 +52,7 @@ class NamespaceHandler extends org.apache.aries.blueprint.NamespaceHandler { } def decorate(node: Node, component: ComponentMetadata, context: ParserContext) = - throw new ComponentDefinitionException("Bad xml syntax: node decoration is not supported"); + throw new ComponentDefinitionException("Bad xml syntax: node decoration is not supported") /* * Parse @@ -114,9 +114,9 @@ class NamespaceHandler extends org.apache.aries.blueprint.NamespaceHandler { */ def getId(context: ParserContext, element: Element) = { if (element.hasAttribute(ID_ATTRIBUTE)) { - element.getAttribute(ID_ATTRIBUTE); + element.getAttribute(ID_ATTRIBUTE) } else { - findAvailableId(context); + findAvailableId(context) } } @@ -134,8 +134,8 @@ class NamespaceHandler extends org.apache.aries.blueprint.NamespaceHandler { object NamespaceHandler { - private val ID_ATTRIBUTE = "id"; - private val NAME_ATTRIBUTE = "name"; + private val ID_ATTRIBUTE = "id" + private val NAME_ATTRIBUTE = "name" private val BUNDLE_CONTEXT_REFID = "blueprintBundleContext" diff --git a/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/NamespaceHandlerTest.scala b/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/NamespaceHandlerTest.scala index 44178978ec..4f4eb647e0 100644 --- a/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/NamespaceHandlerTest.scala +++ b/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/NamespaceHandlerTest.scala @@ -1,10 +1,11 @@ package akka.osgi.aries.blueprint -import org.scalatest.FlatSpec +import org.scalatest.WordSpec import akka.actor.ActorSystem import de.kalpatec.pojosr.framework.launch.BundleDescriptor import akka.osgi.PojoSRTestSupport import akka.osgi.PojoSRTestSupport.bundle +import org.scalatest.matchers.MustMatchers /** * Test cases for {@link ActorSystemActivator} @@ -24,70 +25,77 @@ object NamespaceHandlerTest { } -class SimpleNamespaceHandlerTest extends FlatSpec with PojoSRTestSupport { +class SimpleNamespaceHandlerTest extends WordSpec with MustMatchers with PojoSRTestSupport { import NamespaceHandlerTest._ - val testBundles: Seq[BundleDescriptor] = Seq( + val testBundles: Seq[BundleDescriptor] = buildTestBundles(Seq( AKKA_OSGI_BLUEPRINT, - bundle(TEST_BUNDLE_NAME).withBlueprintFile(getClass.getResource("simple.xml"))) + bundle(TEST_BUNDLE_NAME).withBlueprintFile(getClass.getResource("simple.xml")))) - "simple.xml" should "set up ActorSystem when bundle starts" in { - val system = serviceForType[ActorSystem] - assert(system != null) - } + "simple.xml" must { + "set up ActorSystem when bundle starts" in { + val system = serviceForType[ActorSystem] + assert(system != null) + } - it should "stop the ActorSystem when bundle stops" in { - val system = serviceForType[ActorSystem] - assert(!system.isTerminated) + "stop the ActorSystem when bundle stops" in { + val system = serviceForType[ActorSystem] + assert(!system.isTerminated) - bundleForName(TEST_BUNDLE_NAME).stop() + bundleForName(TEST_BUNDLE_NAME).stop() - system.awaitTermination() - assert(system.isTerminated) + system.awaitTermination() + assert(system.isTerminated) + } } } -class ConfigNamespaceHandlerTest extends FlatSpec with PojoSRTestSupport { +class ConfigNamespaceHandlerTest extends WordSpec with MustMatchers with PojoSRTestSupport { import NamespaceHandlerTest._ - val testBundles: Seq[BundleDescriptor] = Seq( + val testBundles: Seq[BundleDescriptor] = buildTestBundles(Seq( AKKA_OSGI_BLUEPRINT, - bundle(TEST_BUNDLE_NAME).withBlueprintFile(getClass.getResource("config.xml"))) + bundle(TEST_BUNDLE_NAME).withBlueprintFile(getClass.getResource("config.xml")))) - "config.xml" should "set up ActorSystem when bundle starts" in { - val system = serviceForType[ActorSystem] - assert(system != null) + "config.xml" must { + "set up ActorSystem when bundle starts" in { + val system = serviceForType[ActorSystem] + assert(system != null) - assert(system.settings.config.getString("some.config.key") == "value") - } + assert(system.settings.config.getString("some.config.key") == "value") + } - it should "stop the ActorSystem when bundle stops" in { - val system = serviceForType[ActorSystem] - assert(!system.isTerminated) + "stop the ActorSystem when bundle stops" in { + val system = serviceForType[ActorSystem] + assert(!system.isTerminated) - bundleForName(TEST_BUNDLE_NAME).stop() + bundleForName(TEST_BUNDLE_NAME).stop() - system.awaitTermination() - assert(system.isTerminated) + system.awaitTermination() + assert(system.isTerminated) + } } } -class DependencyInjectionNamespaceHandlerTest extends FlatSpec with PojoSRTestSupport { +class DependencyInjectionNamespaceHandlerTest extends WordSpec with MustMatchers with PojoSRTestSupport { import NamespaceHandlerTest._ - val testBundles: Seq[BundleDescriptor] = Seq( + val testBundles: Seq[BundleDescriptor] = buildTestBundles(Seq( AKKA_OSGI_BLUEPRINT, - bundle(TEST_BUNDLE_NAME).withBlueprintFile(getClass.getResource("injection.xml"))) + bundle(TEST_BUNDLE_NAME).withBlueprintFile(getClass.getResource("injection.xml")))) - "injection.xml" should "set up bean containing ActorSystem" in { - val bean = serviceForType[ActorSystemAwareBean] - assert(bean != null) - assert(bean.system != null) + "injection.xml" must { + + "set up bean containing ActorSystem" in { + val bean = serviceForType[ActorSystemAwareBean] + assert(bean != null) + assert(bean.system != null) + } } } diff --git a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala index e92415e1e1..e279247dbc 100644 --- a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala +++ b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala @@ -12,20 +12,8 @@ import org.osgi.framework.{ ServiceRegistration, BundleContext, BundleActivator * This convenience activator is mainly useful for setting up a single [[akka.actor.ActorSystem]] instance and sharing that * with other bundles in the OSGi Framework. If you want to set up multiple systems in the same bundle context, look at * the [[akka.osgi.OsgiActorSystemFactory]] instead. - * - * @param nameFor a function that allows you to determine the name of the [[akka.actor.ActorSystem]] at bundle startup time */ -abstract class ActorSystemActivator(nameFor: (BundleContext) ⇒ Option[String]) extends BundleActivator { - - /** - * No-args constructor - a default name (`bundle--ActorSystem`) will be assigned to the [[akka.actor.ActorSystem]] - */ - def this() = this(context ⇒ None) - - /** - * Create the activator, specifying the name of the [[akka.actor.ActorSystem]] to be created - */ - def this(name: String) = this(context ⇒ Some(name)) +abstract class ActorSystemActivator extends BundleActivator { private var system: Option[ActorSystem] = None private var registration: Option[ServiceRegistration] = None @@ -46,7 +34,7 @@ abstract class ActorSystemActivator(nameFor: (BundleContext) ⇒ Option[String]) * @param context the BundleContext */ def start(context: BundleContext): Unit = { - system = Some(OsgiActorSystemFactory(context).createActorSystem(nameFor(context))) + system = Some(OsgiActorSystemFactory(context).createActorSystem(Option(getActorSystemName(context)))) system foreach (configure(context, _)) } @@ -74,4 +62,13 @@ abstract class ActorSystemActivator(nameFor: (BundleContext) ⇒ Option[String]) properties.asInstanceOf[Dictionary[String, Any]])) } + /** + * By default, the [[akka.actor.ActorSystem]] name will be set to `bundle--ActorSystem`. Override this + * method to define another name for your [[akka.actor.ActorSystem]] instance. + * + * @param context the bundle context + * @return the actor system name + */ + def getActorSystemName(context: BundleContext): String = null + } diff --git a/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala b/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala index 0b2fdd19ac..37002975e4 100644 --- a/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala +++ b/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala @@ -1,44 +1,74 @@ package akka.osgi -import org.scalatest.FlatSpec +import org.scalatest.WordSpec import akka.actor.ActorSystem import akka.pattern.ask import akka.dispatch.Await import akka.util.duration._ import akka.util.Timeout import de.kalpatec.pojosr.framework.launch.BundleDescriptor -import test.TestActorSystemActivator +import test.{ RuntimeNameActorSystemActivator, TestActivators, PingPongActorSystemActivator } import test.PingPong._ import PojoSRTestSupport.bundle +import org.scalatest.matchers.MustMatchers /** - * Test cases for {@link ActorSystemActivator} + * Test cases for [[akka.osgi.ActorSystemActivator]] in 2 different scenarios: + * - no name configured for [[akka.actor.ActorSystem]] + * - runtime name configuration */ -class ActorSystemActivatorTest extends FlatSpec with PojoSRTestSupport { +object ActorSystemActivatorTest { val TEST_BUNDLE_NAME = "akka.osgi.test.activator" - val testBundles: Seq[BundleDescriptor] = Seq( - bundle(TEST_BUNDLE_NAME).withActivator(classOf[TestActorSystemActivator])) +} - "ActorSystemActivator" should "start and register the ActorSystem when bundle starts" in { - val system = serviceForType[ActorSystem] - val actor = system.actorFor("/user/pong") +class PingPongActorSystemActivatorTest extends WordSpec with MustMatchers with PojoSRTestSupport { - implicit val timeout = Timeout(5 seconds) - val future = actor ? Ping - val result = Await.result(future, timeout.duration) - assert(result != null) - } + import ActorSystemActivatorTest._ - it should "stop the ActorSystem when bundle stops" in { - val system = serviceForType[ActorSystem] - assert(!system.isTerminated) + val testBundles: Seq[BundleDescriptor] = buildTestBundles(Seq( + bundle(TEST_BUNDLE_NAME).withActivator(classOf[PingPongActorSystemActivator]))) - bundleForName(TEST_BUNDLE_NAME).stop() + "PingPongActorSystemActivator" must { - system.awaitTermination() - assert(system.isTerminated) + "start and register the ActorSystem when bundle starts" in { + val system = serviceForType[ActorSystem] + val actor = system.actorFor("/user/pong") + + implicit val timeout = Timeout(5 seconds) + val future = actor ? Ping + val result = Await.result(future, timeout.duration) + assert(result != null) + } + + "stop the ActorSystem when bundle stops" in { + val system = serviceForType[ActorSystem] + assert(!system.isTerminated) + + bundleForName(TEST_BUNDLE_NAME).stop() + + system.awaitTermination() + assert(system.isTerminated) + } } } + +class RuntimeNameActorSystemActivatorTest extends WordSpec with MustMatchers with PojoSRTestSupport { + + import ActorSystemActivatorTest._ + + val testBundles: Seq[BundleDescriptor] = buildTestBundles(Seq( + bundle(TEST_BUNDLE_NAME).withActivator(classOf[RuntimeNameActorSystemActivator]))) + + "RuntimeNameActorSystemActivator" must { + + "register an ActorSystem and add the bundle id to the system name" in { + val system = serviceForType[ActorSystem] + val bundle = bundleForName(TEST_BUNDLE_NAME) + system.name must equal(TestActivators.ACTOR_SYSTEM_NAME_PATTERN.format(bundle.getBundleId)) + } + } + +} \ No newline at end of file diff --git a/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala b/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala index cbed282c18..3ba9068907 100644 --- a/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala +++ b/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala @@ -19,10 +19,8 @@ import org.scalatest.{ BeforeAndAfterAll, Suite } */ trait PojoSRTestSupport extends Suite with BeforeAndAfterAll { - val MAX_WAIT_TIME = 8000; - val START_WAIT_TIME = 100; - - implicit def buildBundleDescriptor(builder: BundleDescriptorBuilder) = builder.build + val MAX_WAIT_TIME = 12800 + val START_WAIT_TIME = 50 /** * All bundles being found on the test classpath are automatically installed and started in the PojoSR runtime. @@ -31,12 +29,12 @@ trait PojoSRTestSupport extends Suite with BeforeAndAfterAll { val testBundles: Seq[BundleDescriptor] lazy val context: BundleContext = { - val config = new HashMap[String, AnyRef](); + val config = new HashMap[String, AnyRef]() System.setProperty("org.osgi.framework.storage", "target/akka-osgi/" + System.currentTimeMillis) val bundles = new ClasspathScanner().scanForBundles() bundles.addAll(testBundles) - config.put(PojoServiceRegistryFactory.BUNDLE_DESCRIPTORS, bundles); + config.put(PojoServiceRegistryFactory.BUNDLE_DESCRIPTORS, bundles) val loader: ServiceLoader[PojoServiceRegistryFactory] = ServiceLoader.load(classOf[PojoServiceRegistryFactory]) @@ -68,15 +66,15 @@ trait PojoSRTestSupport extends Suite with BeforeAndAfterAll { def awaitReference(serviceType: Class[_], wait: Long): ServiceReference = { val option = Option(context.getServiceReference(serviceType.getName)) + Thread.sleep(wait) option match { - case Some(reference) ⇒ reference; + case Some(reference) ⇒ reference case None if (wait > MAX_WAIT_TIME) ⇒ fail("Gave up waiting for service of type %s".format(serviceType)) - case None ⇒ { - Thread.sleep(wait); - awaitReference(serviceType, wait * 2); - } + case None ⇒ awaitReference(serviceType, wait * 2) } } + + protected def buildTestBundles(builders: Seq[BundleDescriptorBuilder]): Seq[BundleDescriptor] = builders map (_.build) } object PojoSRTestSupport { @@ -97,35 +95,47 @@ class BundleDescriptorBuilder(name: String) { val tinybundle = TinyBundles.bundle.set(Constants.BUNDLE_SYMBOLICNAME, name) - def withBlueprintFile(name: String, contents: URL) = + /** + * Add a Blueprint XML file to our test bundle + */ + def withBlueprintFile(name: String, contents: URL): BundleDescriptorBuilder = returnBuilder(tinybundle.add("OSGI-INF/blueprint/%s".format(name), contents)) + /** + * Add a Blueprint XML file to our test bundle + */ def withBlueprintFile(contents: URL): BundleDescriptorBuilder = withBlueprintFile(filename(contents), contents) - def withActivator(activator: Class[_ <: BundleActivator]) = + /** + * Add a Bundle activator to our test bundle + */ + def withActivator(activator: Class[_ <: BundleActivator]): BundleDescriptorBuilder = returnBuilder(tinybundle.set(Constants.BUNDLE_ACTIVATOR, activator.getName)) - def returnBuilder(block: ⇒ Unit) = { + private def returnBuilder(block: ⇒ Unit) = { block this } - def build = { + /** + * Build the actual PojoSR BundleDescriptor instance + */ + def build: BundleDescriptor = { val file: File = tinybundleToJarFile(name) new BundleDescriptor( getClass().getClassLoader(), new URL("jar:" + file.toURI().toString() + "!/"), - extractHeaders(file)); + extractHeaders(file)) } def extractHeaders(file: File): HashMap[String, String] = { - val headers = new HashMap[String, String](); + val headers = new HashMap[String, String]() - val jis = new JarInputStream(new FileInputStream(file)); + val jis = new JarInputStream(new FileInputStream(file)) try { for (entry ← jis.getManifest().getMainAttributes().entrySet()) { - headers.put(entry.getKey().toString(), entry.getValue().toString()); + headers.put(entry.getKey().toString(), entry.getValue().toString()) } } finally { jis.close() @@ -135,12 +145,12 @@ class BundleDescriptorBuilder(name: String) { } def tinybundleToJarFile(name: String): File = { - val file = new File("target/%s-%tQ.jar".format(name, new Date())); - val fos = new FileOutputStream(file); + val file = new File("target/%s-%tQ.jar".format(name, new Date())) + val fos = new FileOutputStream(file) try { - copy(tinybundle.build(), fos); + copy(tinybundle.build(), fos) } finally { - fos.close(); + fos.close() } file } diff --git a/akka-osgi/src/test/scala/akka/osgi/test/TestActivators.scala b/akka-osgi/src/test/scala/akka/osgi/test/TestActivators.scala new file mode 100644 index 0000000000..54369d88ca --- /dev/null +++ b/akka-osgi/src/test/scala/akka/osgi/test/TestActivators.scala @@ -0,0 +1,39 @@ +package akka.osgi.test + +import akka.osgi.ActorSystemActivator +import akka.actor.{ Props, ActorSystem } +import PingPong._ +import org.osgi.framework.BundleContext + +/** + * A set of [[akka.osgi.ActorSystemActivator]]s for testing purposes + */ +object TestActivators { + + val ACTOR_SYSTEM_NAME_PATTERN = "actor-system-for-bundle-%s" + +} + +/** + * Simple ActorSystemActivator that starts the sample ping-pong application + */ +class PingPongActorSystemActivator extends ActorSystemActivator { + + def configure(context: BundleContext, system: ActorSystem) { + system.actorOf(Props(new PongActor), name = "pong") + registerService(context, system) + } + +} + +/** + * [[akka.osgi.ActorSystemActivator]] implementation that determines [[akka.actor.ActorSystem]] name at runtime + */ +class RuntimeNameActorSystemActivator extends ActorSystemActivator { + + def configure(context: BundleContext, system: ActorSystem) = registerService(context, system); + + override def getActorSystemName(context: BundleContext) = + TestActivators.ACTOR_SYSTEM_NAME_PATTERN.format(context.getBundle.getBundleId) + +} \ No newline at end of file diff --git a/akka-osgi/src/test/scala/akka/osgi/test/TestActorSystemActivator.scala b/akka-osgi/src/test/scala/akka/osgi/test/TestActorSystemActivator.scala deleted file mode 100644 index 90305bc663..0000000000 --- a/akka-osgi/src/test/scala/akka/osgi/test/TestActorSystemActivator.scala +++ /dev/null @@ -1,18 +0,0 @@ -package akka.osgi.test - -import akka.osgi.ActorSystemActivator -import akka.actor.{ Props, ActorSystem } -import PingPong._ -import org.osgi.framework.BundleContext - -/** - * Sample ActorSystemActivator implementation used for testing purposes - */ -class TestActorSystemActivator extends ActorSystemActivator { - - def configure(context: BundleContext, system: ActorSystem) { - system.actorOf(Props(new PongActor), name = "pong") - registerService(context, system) - } - -} From b684eb1d2fb36dd0b7387081ab449ba02f478716 Mon Sep 17 00:00:00 2001 From: Roland Date: Sat, 30 Jun 2012 21:04:13 +0200 Subject: [PATCH 537/538] =?UTF-8?q?re-add=20=E2=80=9Crepl=E2=80=9D=20scrip?= =?UTF-8?q?t=20for=20priming=20the=20REPL?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- repl | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 repl diff --git a/repl b/repl new file mode 100644 index 0000000000..29f505e292 --- /dev/null +++ b/repl @@ -0,0 +1,16 @@ +import akka.actor._ +import akka.dispatch.{ Future, Promise } +import com.typesafe.config.ConfigFactory +import akka.testkit._ +val remoteConfig = try { + Class.forName("akka.remote.RemoteActorRefProvider") + "\nakka.actor.provider=akka.remote.RemoteActorRefProvider" + } catch { + case _: ClassNotFoundException => "" + } +val config=ConfigFactory.parseString("akka.daemonic=on" + remoteConfig) +val sys=ActorSystem("repl", config.withFallback(ConfigFactory.load())).asInstanceOf[ExtendedActorSystem] +implicit val ec=sys.dispatcher +import akka.util.duration._ +import akka.util.Timeout +implicit val timeout=Timeout(5 seconds) From 3797b72c45708a6a0c0168d27b791f264af5cbff Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Sun, 1 Jul 2012 13:31:11 +0200 Subject: [PATCH 538/538] Making sure that OSGi docs don't break the build and making sure that the osgi artifacts are bundled in the project --- .../additional/code/osgi/Activator.scala | 22 ++++--- .../blueprint/NamespaceHandlerTest.scala | 19 +++--- .../akka/osgi/ActorSystemActivatorTest.scala | 15 ++--- .../scala/akka/osgi/PojoSRTestSupport.scala | 61 +++++++------------ project/AkkaBuild.scala | 12 ++-- 5 files changed, 53 insertions(+), 76 deletions(-) diff --git a/akka-docs/additional/code/osgi/Activator.scala b/akka-docs/additional/code/osgi/Activator.scala index 34e83fcf77..4f432452c3 100644 --- a/akka-docs/additional/code/osgi/Activator.scala +++ b/akka-docs/additional/code/osgi/Activator.scala @@ -1,18 +1,24 @@ -import akka.actor.{ Props, ActorSystem } -import akka.osgi.ActorSystemActivator -import org.apache.servicemix.examples.akka.Listener -import org.apache.servicemix.examples.akka.Master +package docs.osgi + +case object SomeMessage + +class SomeActor extends akka.actor.Actor { + def receive = { case SomeMessage ⇒ } +} //#Activator -class Activator extends ActorSystemActivator("PiSystem") { +import akka.actor.{ Props, ActorSystem } +import org.osgi.framework.BundleContext +import akka.osgi.ActorSystemActivator + +class Activator extends ActorSystemActivator { def configure(context: BundleContext, system: ActorSystem) { // optionally register the ActorSystem in the OSGi Service Registry registerService(context, system) - val listener = system.actorOf(Props[Listener], name = "listener") - val master = system.actorOf(Props(new Master(4, 10000, 10000, listener)), name = "master") - master ! Calculate + val someActor = system.actorOf(Props[SomeActor], name = "someName") + someActor ! SomeMessage } } diff --git a/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/NamespaceHandlerTest.scala b/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/NamespaceHandlerTest.scala index 4f4eb647e0..3bc32c6141 100644 --- a/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/NamespaceHandlerTest.scala +++ b/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/NamespaceHandlerTest.scala @@ -35,18 +35,17 @@ class SimpleNamespaceHandlerTest extends WordSpec with MustMatchers with PojoSRT "simple.xml" must { "set up ActorSystem when bundle starts" in { - val system = serviceForType[ActorSystem] - assert(system != null) + serviceForType[ActorSystem] must not be (null) } "stop the ActorSystem when bundle stops" in { val system = serviceForType[ActorSystem] - assert(!system.isTerminated) + system.isTerminated must be(false) bundleForName(TEST_BUNDLE_NAME).stop() system.awaitTermination() - assert(system.isTerminated) + system.isTerminated must be(true) } } @@ -63,19 +62,19 @@ class ConfigNamespaceHandlerTest extends WordSpec with MustMatchers with PojoSRT "config.xml" must { "set up ActorSystem when bundle starts" in { val system = serviceForType[ActorSystem] - assert(system != null) + system must not be (null) - assert(system.settings.config.getString("some.config.key") == "value") + system.settings.config.getString("some.config.key") must be("value") } "stop the ActorSystem when bundle stops" in { val system = serviceForType[ActorSystem] - assert(!system.isTerminated) + system.isTerminated must be(false) bundleForName(TEST_BUNDLE_NAME).stop() system.awaitTermination() - assert(system.isTerminated) + system.isTerminated must be(true) } } @@ -93,8 +92,8 @@ class DependencyInjectionNamespaceHandlerTest extends WordSpec with MustMatchers "set up bean containing ActorSystem" in { val bean = serviceForType[ActorSystemAwareBean] - assert(bean != null) - assert(bean.system != null) + bean must not be (null) + bean.system must not be (null) } } diff --git a/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala b/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala index 37002975e4..6fa89886dd 100644 --- a/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala +++ b/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala @@ -37,19 +37,17 @@ class PingPongActorSystemActivatorTest extends WordSpec with MustMatchers with P val actor = system.actorFor("/user/pong") implicit val timeout = Timeout(5 seconds) - val future = actor ? Ping - val result = Await.result(future, timeout.duration) - assert(result != null) + Await.result(actor ? Ping, timeout.duration) must be(Pong) } "stop the ActorSystem when bundle stops" in { val system = serviceForType[ActorSystem] - assert(!system.isTerminated) + system.isTerminated must be(false) bundleForName(TEST_BUNDLE_NAME).stop() system.awaitTermination() - assert(system.isTerminated) + system.isTerminated must be(true) } } @@ -59,15 +57,12 @@ class RuntimeNameActorSystemActivatorTest extends WordSpec with MustMatchers wit import ActorSystemActivatorTest._ - val testBundles: Seq[BundleDescriptor] = buildTestBundles(Seq( - bundle(TEST_BUNDLE_NAME).withActivator(classOf[RuntimeNameActorSystemActivator]))) + val testBundles: Seq[BundleDescriptor] = buildTestBundles(Seq(bundle(TEST_BUNDLE_NAME).withActivator(classOf[RuntimeNameActorSystemActivator]))) "RuntimeNameActorSystemActivator" must { "register an ActorSystem and add the bundle id to the system name" in { - val system = serviceForType[ActorSystem] - val bundle = bundleForName(TEST_BUNDLE_NAME) - system.name must equal(TestActivators.ACTOR_SYSTEM_NAME_PATTERN.format(bundle.getBundleId)) + serviceForType[ActorSystem].name must equal(TestActivators.ACTOR_SYSTEM_NAME_PATTERN.format(bundleForName(TEST_BUNDLE_NAME).getBundleId)) } } diff --git a/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala b/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala index 3ba9068907..b19a90bf21 100644 --- a/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala +++ b/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala @@ -36,10 +36,7 @@ trait PojoSRTestSupport extends Suite with BeforeAndAfterAll { bundles.addAll(testBundles) config.put(PojoServiceRegistryFactory.BUNDLE_DESCRIPTORS, bundles) - val loader: ServiceLoader[PojoServiceRegistryFactory] = ServiceLoader.load(classOf[PojoServiceRegistryFactory]) - - val registry = loader.iterator.next.newPojoServiceRegistry(config) - registry.getBundleContext + ServiceLoader.load(classOf[PojoServiceRegistryFactory]).iterator.next.newPojoServiceRegistry(config).getBundleContext } // Ensure bundles get stopped at the end of the test to release resources and stop threads @@ -48,25 +45,21 @@ trait PojoSRTestSupport extends Suite with BeforeAndAfterAll { /** * Convenience method to find a bundle by symbolic name */ - def bundleForName(name: String) = context.getBundles.find(_.getSymbolicName == name) match { - case Some(bundle) ⇒ bundle - case None ⇒ fail("Unable to find bundle with symbolic name %s".format(name)) - } + def bundleForName(name: String) = + context.getBundles.find(_.getSymbolicName == name).getOrElse(fail("Unable to find bundle with symbolic name %s".format(name))) /** * Convenience method to find a service by interface. If the service is not already available in the OSGi Service * Registry, this method will wait for a few seconds for the service to appear. */ - def serviceForType[T](implicit manifest: Manifest[T]): T = { - val reference = awaitReference(manifest.erasure) - context.getService(reference).asInstanceOf[T] - } + def serviceForType[T](implicit manifest: Manifest[T]): T = + context.getService(awaitReference(manifest.erasure)).asInstanceOf[T] def awaitReference(serviceType: Class[_]): ServiceReference = awaitReference(serviceType, START_WAIT_TIME) def awaitReference(serviceType: Class[_], wait: Long): ServiceReference = { val option = Option(context.getServiceReference(serviceType.getName)) - Thread.sleep(wait) + Thread.sleep(wait) //FIXME No sleep please option match { case Some(reference) ⇒ reference case None if (wait > MAX_WAIT_TIME) ⇒ fail("Gave up waiting for service of type %s".format(serviceType)) @@ -78,12 +71,10 @@ trait PojoSRTestSupport extends Suite with BeforeAndAfterAll { } object PojoSRTestSupport { - /** * Convenience method to define additional test bundles */ def bundle(name: String) = new BundleDescriptorBuilder(name) - } /** @@ -98,22 +89,24 @@ class BundleDescriptorBuilder(name: String) { /** * Add a Blueprint XML file to our test bundle */ - def withBlueprintFile(name: String, contents: URL): BundleDescriptorBuilder = - returnBuilder(tinybundle.add("OSGI-INF/blueprint/%s".format(name), contents)) + def withBlueprintFile(name: String, contents: URL): BundleDescriptorBuilder = { + tinybundle.add("OSGI-INF/blueprint/%s".format(name), contents) + this + } /** * Add a Blueprint XML file to our test bundle */ - def withBlueprintFile(contents: URL): BundleDescriptorBuilder = withBlueprintFile(filename(contents), contents) + def withBlueprintFile(contents: URL): BundleDescriptorBuilder = { + val filename = contents.getFile.split("/").last + withBlueprintFile(filename, contents) + } /** * Add a Bundle activator to our test bundle */ - def withActivator(activator: Class[_ <: BundleActivator]): BundleDescriptorBuilder = - returnBuilder(tinybundle.set(Constants.BUNDLE_ACTIVATOR, activator.getName)) - - private def returnBuilder(block: ⇒ Unit) = { - block + def withActivator(activator: Class[_ <: BundleActivator]): BundleDescriptorBuilder = { + tinybundle.set(Constants.BUNDLE_ACTIVATOR, activator.getName) this } @@ -122,11 +115,7 @@ class BundleDescriptorBuilder(name: String) { */ def build: BundleDescriptor = { val file: File = tinybundleToJarFile(name) - - new BundleDescriptor( - getClass().getClassLoader(), - new URL("jar:" + file.toURI().toString() + "!/"), - extractHeaders(file)) + new BundleDescriptor(getClass().getClassLoader(), new URL("jar:" + file.toURI().toString() + "!/"), extractHeaders(file)) } def extractHeaders(file: File): HashMap[String, String] = { @@ -134,12 +123,9 @@ class BundleDescriptorBuilder(name: String) { val jis = new JarInputStream(new FileInputStream(file)) try { - for (entry ← jis.getManifest().getMainAttributes().entrySet()) { + for (entry ← jis.getManifest().getMainAttributes().entrySet()) headers.put(entry.getKey().toString(), entry.getValue().toString()) - } - } finally { - jis.close() - } + } finally jis.close() headers } @@ -147,14 +133,9 @@ class BundleDescriptorBuilder(name: String) { def tinybundleToJarFile(name: String): File = { val file = new File("target/%s-%tQ.jar".format(name, new Date())) val fos = new FileOutputStream(file) - try { - copy(tinybundle.build(), fos) - } finally { - fos.close() - } + try copy(tinybundle.build(), fos) finally fos.close() + file } - - private[this] def filename(url: URL) = url.getFile.split("/").last } diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 30c1db8996..7b83364ed0 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -43,7 +43,7 @@ object AkkaBuild extends Build { sphinxLatex <<= sphinxLatex in LocalProject(docs.id), sphinxPdf <<= sphinxPdf in LocalProject(docs.id) ), - aggregate = Seq(actor, testkit, actorTests, remote, remoteTests, camel, cluster, slf4j, agent, transactor, mailboxes, zeroMQ, kernel, akkaSbtPlugin, samples, tutorials, docs) + aggregate = Seq(actor, testkit, actorTests, remote, remoteTests, camel, cluster, slf4j, agent, transactor, mailboxes, zeroMQ, kernel, akkaSbtPlugin, samples, tutorials, osgi, osgiAries, docs) ) lazy val actor = Project( @@ -299,7 +299,7 @@ object AkkaBuild extends Build { id = "akka-docs", base = file("akka-docs"), dependencies = Seq(actor, testkit % "test->test", mailboxesCommon % "compile;test->test", - remote, cluster, slf4j, agent, transactor, fileMailbox, zeroMQ, camel), + remote, cluster, slf4j, agent, transactor, fileMailbox, zeroMQ, camel, osgi, osgiAries), settings = defaultSettings ++ Sphinx.settings ++ Seq( unmanagedSourceDirectories in Test <<= baseDirectory { _ ** "code" get }, libraryDependencies ++= Dependencies.docs, @@ -560,13 +560,9 @@ object OSGi { val mailboxesCommon = exports(Seq("akka.actor.mailbox.*")) - val osgi = exports(Seq("akka.osgi")) ++ Seq( - OsgiKeys.privatePackage := Seq("akka.osgi.impl") - ) + val osgi = exports(Seq("akka.osgi")) ++ Seq(OsgiKeys.privatePackage := Seq("akka.osgi.impl")) - val osgiAries = exports() ++ Seq( - OsgiKeys.privatePackage := Seq("akka.osgi.aries.*") - ) + val osgiAries = exports() ++ Seq(OsgiKeys.privatePackage := Seq("akka.osgi.aries.*")) val remote = exports(Seq("akka.remote.*", "akka.routing.*", "akka.serialization.*"))