diff --git a/akka-cluster/src/main/java/akka/cloud/cluster/ClusterProtocol.java b/akka-cluster/src/main/java/akka/cloud/cluster/ClusterProtocol.java new file mode 100644 index 0000000000..7fdc8510b2 --- /dev/null +++ b/akka-cluster/src/main/java/akka/cloud/cluster/ClusterProtocol.java @@ -0,0 +1,1438 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: ClusterProtocol.proto + +package akka.cloud.cluster; + +public final class ClusterProtocol { + private ClusterProtocol() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public enum RemoteDaemonMessageType + implements com.google.protobuf.ProtocolMessageEnum { + START(0, 1), + STOP(1, 2), + USE(2, 3), + RELEASE(3, 4), + MAKE_AVAILABLE(4, 5), + MAKE_UNAVAILABLE(5, 6), + DISCONNECT(6, 7), + RECONNECT(7, 8), + RESIGN(8, 9), + FAIL_OVER_CONNECTIONS(9, 10), + FUNCTION_FUN0_UNIT(10, 11), + FUNCTION_FUN0_ANY(11, 12), + FUNCTION_FUN1_ARG_UNIT(12, 13), + FUNCTION_FUN1_ARG_ANY(13, 14), + ; + + + public final int getNumber() { return value; } + + public static RemoteDaemonMessageType valueOf(int value) { + switch (value) { + case 1: return START; + case 2: return STOP; + case 3: return USE; + case 4: return RELEASE; + case 5: return MAKE_AVAILABLE; + case 6: return MAKE_UNAVAILABLE; + case 7: return DISCONNECT; + case 8: return RECONNECT; + case 9: return RESIGN; + case 10: return FAIL_OVER_CONNECTIONS; + case 11: return FUNCTION_FUN0_UNIT; + case 12: return FUNCTION_FUN0_ANY; + case 13: return FUNCTION_FUN1_ARG_UNIT; + case 14: return FUNCTION_FUN1_ARG_ANY; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public RemoteDaemonMessageType findValueByNumber(int number) { + return RemoteDaemonMessageType.valueOf(number) + ; } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return akka.cloud.cluster.ClusterProtocol.getDescriptor().getEnumTypes().get(0); + } + + private static final RemoteDaemonMessageType[] VALUES = { + START, STOP, USE, RELEASE, MAKE_AVAILABLE, MAKE_UNAVAILABLE, DISCONNECT, RECONNECT, RESIGN, FAIL_OVER_CONNECTIONS, FUNCTION_FUN0_UNIT, FUNCTION_FUN0_ANY, FUNCTION_FUN1_ARG_UNIT, FUNCTION_FUN1_ARG_ANY, + }; + public static RemoteDaemonMessageType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + private final int index; + private final int value; + private RemoteDaemonMessageType(int index, int value) { + this.index = index; + this.value = value; + } + + static { + akka.cloud.cluster.ClusterProtocol.getDescriptor(); + } + + // @@protoc_insertion_point(enum_scope:RemoteDaemonMessageType) + } + + public static final class RemoteDaemonMessageProtocol extends + com.google.protobuf.GeneratedMessage { + // Use RemoteDaemonMessageProtocol.newBuilder() to construct. + private RemoteDaemonMessageProtocol() { + initFields(); + } + private RemoteDaemonMessageProtocol(boolean noInit) {} + + private static final RemoteDaemonMessageProtocol defaultInstance; + public static RemoteDaemonMessageProtocol getDefaultInstance() { + return defaultInstance; + } + + public RemoteDaemonMessageProtocol getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cloud.cluster.ClusterProtocol.internal_static_RemoteDaemonMessageProtocol_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cloud.cluster.ClusterProtocol.internal_static_RemoteDaemonMessageProtocol_fieldAccessorTable; + } + + // required .RemoteDaemonMessageType messageType = 1; + public static final int MESSAGETYPE_FIELD_NUMBER = 1; + private boolean hasMessageType; + private akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageType messageType_; + public boolean hasMessageType() { return hasMessageType; } + public akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageType getMessageType() { return messageType_; } + + // optional .UuidProtocol actorUuid = 2; + public static final int ACTORUUID_FIELD_NUMBER = 2; + private boolean hasActorUuid; + private akka.cloud.cluster.ClusterProtocol.UuidProtocol actorUuid_; + public boolean hasActorUuid() { return hasActorUuid; } + public akka.cloud.cluster.ClusterProtocol.UuidProtocol getActorUuid() { return actorUuid_; } + + // optional string actorId = 3; + public static final int ACTORID_FIELD_NUMBER = 3; + private boolean hasActorId; + private java.lang.String actorId_ = ""; + public boolean hasActorId() { return hasActorId; } + public java.lang.String getActorId() { return actorId_; } + + // optional string actorClassName = 4; + public static final int ACTORCLASSNAME_FIELD_NUMBER = 4; + private boolean hasActorClassName; + private java.lang.String actorClassName_ = ""; + public boolean hasActorClassName() { return hasActorClassName; } + public java.lang.String getActorClassName() { return actorClassName_; } + + // optional bytes payload = 5; + public static final int PAYLOAD_FIELD_NUMBER = 5; + private boolean hasPayload; + private com.google.protobuf.ByteString payload_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasPayload() { return hasPayload; } + public com.google.protobuf.ByteString getPayload() { return payload_; } + + private void initFields() { + messageType_ = akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageType.START; + actorUuid_ = akka.cloud.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance(); + } + public final boolean isInitialized() { + if (!hasMessageType) return false; + if (hasActorUuid()) { + if (!getActorUuid().isInitialized()) return false; + } + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (hasMessageType()) { + output.writeEnum(1, getMessageType().getNumber()); + } + if (hasActorUuid()) { + output.writeMessage(2, getActorUuid()); + } + if (hasActorId()) { + output.writeString(3, getActorId()); + } + if (hasActorClassName()) { + output.writeString(4, getActorClassName()); + } + if (hasPayload()) { + output.writeBytes(5, getPayload()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (hasMessageType()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, getMessageType().getNumber()); + } + if (hasActorUuid()) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getActorUuid()); + } + if (hasActorId()) { + size += com.google.protobuf.CodedOutputStream + .computeStringSize(3, getActorId()); + } + if (hasActorClassName()) { + size += com.google.protobuf.CodedOutputStream + .computeStringSize(4, getActorClassName()); + } + if (hasPayload()) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, getPayload()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + public static akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder { + private akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol result; + + // Construct using akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol.newBuilder() + private Builder() {} + + private static Builder create() { + Builder builder = new Builder(); + builder.result = new akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol(); + return builder; + } + + protected akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol internalGetResult() { + return result; + } + + public Builder clear() { + if (result == null) { + throw new IllegalStateException( + "Cannot call clear() after build()."); + } + result = new akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol(); + return this; + } + + public Builder clone() { + return create().mergeFrom(result); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol.getDescriptor(); + } + + public akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol getDefaultInstanceForType() { + return akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol.getDefaultInstance(); + } + + public boolean isInitialized() { + return result.isInitialized(); + } + public akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol build() { + if (result != null && !isInitialized()) { + throw newUninitializedMessageException(result); + } + return buildPartial(); + } + + private akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + if (!isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return buildPartial(); + } + + public akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol buildPartial() { + if (result == null) { + throw new IllegalStateException( + "build() has already been called on this Builder."); + } + akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol returnMe = result; + result = null; + return returnMe; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol) { + return mergeFrom((akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol other) { + if (other == akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol.getDefaultInstance()) return this; + if (other.hasMessageType()) { + setMessageType(other.getMessageType()); + } + if (other.hasActorUuid()) { + mergeActorUuid(other.getActorUuid()); + } + if (other.hasActorId()) { + setActorId(other.getActorId()); + } + if (other.hasActorClassName()) { + setActorClassName(other.getActorClassName()); + } + if (other.hasPayload()) { + setPayload(other.getPayload()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + return this; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageType value = akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + setMessageType(value); + } + break; + } + case 18: { + akka.cloud.cluster.ClusterProtocol.UuidProtocol.Builder subBuilder = akka.cloud.cluster.ClusterProtocol.UuidProtocol.newBuilder(); + if (hasActorUuid()) { + subBuilder.mergeFrom(getActorUuid()); + } + input.readMessage(subBuilder, extensionRegistry); + setActorUuid(subBuilder.buildPartial()); + break; + } + case 26: { + setActorId(input.readString()); + break; + } + case 34: { + setActorClassName(input.readString()); + break; + } + case 42: { + setPayload(input.readBytes()); + break; + } + } + } + } + + + // required .RemoteDaemonMessageType messageType = 1; + public boolean hasMessageType() { + return result.hasMessageType(); + } + public akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageType getMessageType() { + return result.getMessageType(); + } + public Builder setMessageType(akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageType value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasMessageType = true; + result.messageType_ = value; + return this; + } + public Builder clearMessageType() { + result.hasMessageType = false; + result.messageType_ = akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageType.START; + return this; + } + + // optional .UuidProtocol actorUuid = 2; + public boolean hasActorUuid() { + return result.hasActorUuid(); + } + public akka.cloud.cluster.ClusterProtocol.UuidProtocol getActorUuid() { + return result.getActorUuid(); + } + public Builder setActorUuid(akka.cloud.cluster.ClusterProtocol.UuidProtocol value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasActorUuid = true; + result.actorUuid_ = value; + return this; + } + public Builder setActorUuid(akka.cloud.cluster.ClusterProtocol.UuidProtocol.Builder builderForValue) { + result.hasActorUuid = true; + result.actorUuid_ = builderForValue.build(); + return this; + } + public Builder mergeActorUuid(akka.cloud.cluster.ClusterProtocol.UuidProtocol value) { + if (result.hasActorUuid() && + result.actorUuid_ != akka.cloud.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance()) { + result.actorUuid_ = + akka.cloud.cluster.ClusterProtocol.UuidProtocol.newBuilder(result.actorUuid_).mergeFrom(value).buildPartial(); + } else { + result.actorUuid_ = value; + } + result.hasActorUuid = true; + return this; + } + public Builder clearActorUuid() { + result.hasActorUuid = false; + result.actorUuid_ = akka.cloud.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance(); + return this; + } + + // optional string actorId = 3; + public boolean hasActorId() { + return result.hasActorId(); + } + public java.lang.String getActorId() { + return result.getActorId(); + } + public Builder setActorId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasActorId = true; + result.actorId_ = value; + return this; + } + public Builder clearActorId() { + result.hasActorId = false; + result.actorId_ = getDefaultInstance().getActorId(); + return this; + } + + // optional string actorClassName = 4; + public boolean hasActorClassName() { + return result.hasActorClassName(); + } + public java.lang.String getActorClassName() { + return result.getActorClassName(); + } + public Builder setActorClassName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasActorClassName = true; + result.actorClassName_ = value; + return this; + } + public Builder clearActorClassName() { + result.hasActorClassName = false; + result.actorClassName_ = getDefaultInstance().getActorClassName(); + return this; + } + + // optional bytes payload = 5; + public boolean hasPayload() { + return result.hasPayload(); + } + public com.google.protobuf.ByteString getPayload() { + return result.getPayload(); + } + public Builder setPayload(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasPayload = true; + result.payload_ = value; + return this; + } + public Builder clearPayload() { + result.hasPayload = false; + result.payload_ = getDefaultInstance().getPayload(); + return this; + } + + // @@protoc_insertion_point(builder_scope:RemoteDaemonMessageProtocol) + } + + static { + defaultInstance = new RemoteDaemonMessageProtocol(true); + akka.cloud.cluster.ClusterProtocol.internalForceInit(); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RemoteDaemonMessageProtocol) + } + + public static final class DurableMailboxMessageProtocol extends + com.google.protobuf.GeneratedMessage { + // Use DurableMailboxMessageProtocol.newBuilder() to construct. + private DurableMailboxMessageProtocol() { + initFields(); + } + private DurableMailboxMessageProtocol(boolean noInit) {} + + private static final DurableMailboxMessageProtocol defaultInstance; + public static DurableMailboxMessageProtocol getDefaultInstance() { + return defaultInstance; + } + + public DurableMailboxMessageProtocol getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cloud.cluster.ClusterProtocol.internal_static_DurableMailboxMessageProtocol_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cloud.cluster.ClusterProtocol.internal_static_DurableMailboxMessageProtocol_fieldAccessorTable; + } + + // required string ownerActorId = 1; + public static final int OWNERACTORID_FIELD_NUMBER = 1; + private boolean hasOwnerActorId; + private java.lang.String ownerActorId_ = ""; + public boolean hasOwnerActorId() { return hasOwnerActorId; } + public java.lang.String getOwnerActorId() { return ownerActorId_; } + + // optional string senderActorId = 2; + public static final int SENDERACTORID_FIELD_NUMBER = 2; + private boolean hasSenderActorId; + private java.lang.String senderActorId_ = ""; + public boolean hasSenderActorId() { return hasSenderActorId; } + public java.lang.String getSenderActorId() { return senderActorId_; } + + // optional .UuidProtocol futureUuid = 3; + public static final int FUTUREUUID_FIELD_NUMBER = 3; + private boolean hasFutureUuid; + private akka.cloud.cluster.ClusterProtocol.UuidProtocol futureUuid_; + public boolean hasFutureUuid() { return hasFutureUuid; } + public akka.cloud.cluster.ClusterProtocol.UuidProtocol getFutureUuid() { return futureUuid_; } + + // required bytes message = 4; + public static final int MESSAGE_FIELD_NUMBER = 4; + private boolean hasMessage; + private com.google.protobuf.ByteString message_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasMessage() { return hasMessage; } + public com.google.protobuf.ByteString getMessage() { return message_; } + + private void initFields() { + futureUuid_ = akka.cloud.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance(); + } + public final boolean isInitialized() { + if (!hasOwnerActorId) return false; + if (!hasMessage) return false; + if (hasFutureUuid()) { + if (!getFutureUuid().isInitialized()) return false; + } + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (hasOwnerActorId()) { + output.writeString(1, getOwnerActorId()); + } + if (hasSenderActorId()) { + output.writeString(2, getSenderActorId()); + } + if (hasFutureUuid()) { + output.writeMessage(3, getFutureUuid()); + } + if (hasMessage()) { + output.writeBytes(4, getMessage()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (hasOwnerActorId()) { + size += com.google.protobuf.CodedOutputStream + .computeStringSize(1, getOwnerActorId()); + } + if (hasSenderActorId()) { + size += com.google.protobuf.CodedOutputStream + .computeStringSize(2, getSenderActorId()); + } + if (hasFutureUuid()) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getFutureUuid()); + } + if (hasMessage()) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, getMessage()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + public static akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder { + private akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol result; + + // Construct using akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol.newBuilder() + private Builder() {} + + private static Builder create() { + Builder builder = new Builder(); + builder.result = new akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol(); + return builder; + } + + protected akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol internalGetResult() { + return result; + } + + public Builder clear() { + if (result == null) { + throw new IllegalStateException( + "Cannot call clear() after build()."); + } + result = new akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol(); + return this; + } + + public Builder clone() { + return create().mergeFrom(result); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol.getDescriptor(); + } + + public akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol getDefaultInstanceForType() { + return akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol.getDefaultInstance(); + } + + public boolean isInitialized() { + return result.isInitialized(); + } + public akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol build() { + if (result != null && !isInitialized()) { + throw newUninitializedMessageException(result); + } + return buildPartial(); + } + + private akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + if (!isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return buildPartial(); + } + + public akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol buildPartial() { + if (result == null) { + throw new IllegalStateException( + "build() has already been called on this Builder."); + } + akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol returnMe = result; + result = null; + return returnMe; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol) { + return mergeFrom((akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol other) { + if (other == akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol.getDefaultInstance()) return this; + if (other.hasOwnerActorId()) { + setOwnerActorId(other.getOwnerActorId()); + } + if (other.hasSenderActorId()) { + setSenderActorId(other.getSenderActorId()); + } + if (other.hasFutureUuid()) { + mergeFutureUuid(other.getFutureUuid()); + } + if (other.hasMessage()) { + setMessage(other.getMessage()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + return this; + } + break; + } + case 10: { + setOwnerActorId(input.readString()); + break; + } + case 18: { + setSenderActorId(input.readString()); + break; + } + case 26: { + akka.cloud.cluster.ClusterProtocol.UuidProtocol.Builder subBuilder = akka.cloud.cluster.ClusterProtocol.UuidProtocol.newBuilder(); + if (hasFutureUuid()) { + subBuilder.mergeFrom(getFutureUuid()); + } + input.readMessage(subBuilder, extensionRegistry); + setFutureUuid(subBuilder.buildPartial()); + break; + } + case 34: { + setMessage(input.readBytes()); + break; + } + } + } + } + + + // required string ownerActorId = 1; + public boolean hasOwnerActorId() { + return result.hasOwnerActorId(); + } + public java.lang.String getOwnerActorId() { + return result.getOwnerActorId(); + } + public Builder setOwnerActorId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasOwnerActorId = true; + result.ownerActorId_ = value; + return this; + } + public Builder clearOwnerActorId() { + result.hasOwnerActorId = false; + result.ownerActorId_ = getDefaultInstance().getOwnerActorId(); + return this; + } + + // optional string senderActorId = 2; + public boolean hasSenderActorId() { + return result.hasSenderActorId(); + } + public java.lang.String getSenderActorId() { + return result.getSenderActorId(); + } + public Builder setSenderActorId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasSenderActorId = true; + result.senderActorId_ = value; + return this; + } + public Builder clearSenderActorId() { + result.hasSenderActorId = false; + result.senderActorId_ = getDefaultInstance().getSenderActorId(); + return this; + } + + // optional .UuidProtocol futureUuid = 3; + public boolean hasFutureUuid() { + return result.hasFutureUuid(); + } + public akka.cloud.cluster.ClusterProtocol.UuidProtocol getFutureUuid() { + return result.getFutureUuid(); + } + public Builder setFutureUuid(akka.cloud.cluster.ClusterProtocol.UuidProtocol value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasFutureUuid = true; + result.futureUuid_ = value; + return this; + } + public Builder setFutureUuid(akka.cloud.cluster.ClusterProtocol.UuidProtocol.Builder builderForValue) { + result.hasFutureUuid = true; + result.futureUuid_ = builderForValue.build(); + return this; + } + public Builder mergeFutureUuid(akka.cloud.cluster.ClusterProtocol.UuidProtocol value) { + if (result.hasFutureUuid() && + result.futureUuid_ != akka.cloud.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance()) { + result.futureUuid_ = + akka.cloud.cluster.ClusterProtocol.UuidProtocol.newBuilder(result.futureUuid_).mergeFrom(value).buildPartial(); + } else { + result.futureUuid_ = value; + } + result.hasFutureUuid = true; + return this; + } + public Builder clearFutureUuid() { + result.hasFutureUuid = false; + result.futureUuid_ = akka.cloud.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance(); + return this; + } + + // required bytes message = 4; + public boolean hasMessage() { + return result.hasMessage(); + } + public com.google.protobuf.ByteString getMessage() { + return result.getMessage(); + } + public Builder setMessage(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasMessage = true; + result.message_ = value; + return this; + } + public Builder clearMessage() { + result.hasMessage = false; + result.message_ = getDefaultInstance().getMessage(); + return this; + } + + // @@protoc_insertion_point(builder_scope:DurableMailboxMessageProtocol) + } + + static { + defaultInstance = new DurableMailboxMessageProtocol(true); + akka.cloud.cluster.ClusterProtocol.internalForceInit(); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:DurableMailboxMessageProtocol) + } + + public static final class UuidProtocol extends + com.google.protobuf.GeneratedMessage { + // Use UuidProtocol.newBuilder() to construct. + private UuidProtocol() { + initFields(); + } + private UuidProtocol(boolean noInit) {} + + private static final UuidProtocol defaultInstance; + public static UuidProtocol getDefaultInstance() { + return defaultInstance; + } + + public UuidProtocol getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cloud.cluster.ClusterProtocol.internal_static_UuidProtocol_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cloud.cluster.ClusterProtocol.internal_static_UuidProtocol_fieldAccessorTable; + } + + // required uint64 high = 1; + public static final int HIGH_FIELD_NUMBER = 1; + private boolean hasHigh; + private long high_ = 0L; + public boolean hasHigh() { return hasHigh; } + public long getHigh() { return high_; } + + // required uint64 low = 2; + public static final int LOW_FIELD_NUMBER = 2; + private boolean hasLow; + private long low_ = 0L; + public boolean hasLow() { return hasLow; } + public long getLow() { return low_; } + + private void initFields() { + } + public final boolean isInitialized() { + if (!hasHigh) return false; + if (!hasLow) return false; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (hasHigh()) { + output.writeUInt64(1, getHigh()); + } + if (hasLow()) { + output.writeUInt64(2, getLow()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (hasHigh()) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, getHigh()); + } + if (hasLow()) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, getLow()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + public static akka.cloud.cluster.ClusterProtocol.UuidProtocol parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.UuidProtocol parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.UuidProtocol parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.UuidProtocol parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.UuidProtocol parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.UuidProtocol parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.UuidProtocol parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.ClusterProtocol.UuidProtocol parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.ClusterProtocol.UuidProtocol parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.ClusterProtocol.UuidProtocol parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cloud.cluster.ClusterProtocol.UuidProtocol prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder { + private akka.cloud.cluster.ClusterProtocol.UuidProtocol result; + + // Construct using akka.cloud.cluster.ClusterProtocol.UuidProtocol.newBuilder() + private Builder() {} + + private static Builder create() { + Builder builder = new Builder(); + builder.result = new akka.cloud.cluster.ClusterProtocol.UuidProtocol(); + return builder; + } + + protected akka.cloud.cluster.ClusterProtocol.UuidProtocol internalGetResult() { + return result; + } + + public Builder clear() { + if (result == null) { + throw new IllegalStateException( + "Cannot call clear() after build()."); + } + result = new akka.cloud.cluster.ClusterProtocol.UuidProtocol(); + return this; + } + + public Builder clone() { + return create().mergeFrom(result); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cloud.cluster.ClusterProtocol.UuidProtocol.getDescriptor(); + } + + public akka.cloud.cluster.ClusterProtocol.UuidProtocol getDefaultInstanceForType() { + return akka.cloud.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance(); + } + + public boolean isInitialized() { + return result.isInitialized(); + } + public akka.cloud.cluster.ClusterProtocol.UuidProtocol build() { + if (result != null && !isInitialized()) { + throw newUninitializedMessageException(result); + } + return buildPartial(); + } + + private akka.cloud.cluster.ClusterProtocol.UuidProtocol buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + if (!isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return buildPartial(); + } + + public akka.cloud.cluster.ClusterProtocol.UuidProtocol buildPartial() { + if (result == null) { + throw new IllegalStateException( + "build() has already been called on this Builder."); + } + akka.cloud.cluster.ClusterProtocol.UuidProtocol returnMe = result; + result = null; + return returnMe; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cloud.cluster.ClusterProtocol.UuidProtocol) { + return mergeFrom((akka.cloud.cluster.ClusterProtocol.UuidProtocol)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cloud.cluster.ClusterProtocol.UuidProtocol other) { + if (other == akka.cloud.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance()) return this; + if (other.hasHigh()) { + setHigh(other.getHigh()); + } + if (other.hasLow()) { + setLow(other.getLow()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + return this; + } + break; + } + case 8: { + setHigh(input.readUInt64()); + break; + } + case 16: { + setLow(input.readUInt64()); + break; + } + } + } + } + + + // required uint64 high = 1; + public boolean hasHigh() { + return result.hasHigh(); + } + public long getHigh() { + return result.getHigh(); + } + public Builder setHigh(long value) { + result.hasHigh = true; + result.high_ = value; + return this; + } + public Builder clearHigh() { + result.hasHigh = false; + result.high_ = 0L; + return this; + } + + // required uint64 low = 2; + public boolean hasLow() { + return result.hasLow(); + } + public long getLow() { + return result.getLow(); + } + public Builder setLow(long value) { + result.hasLow = true; + result.low_ = value; + return this; + } + public Builder clearLow() { + result.hasLow = false; + result.low_ = 0L; + return this; + } + + // @@protoc_insertion_point(builder_scope:UuidProtocol) + } + + static { + defaultInstance = new UuidProtocol(true); + akka.cloud.cluster.ClusterProtocol.internalForceInit(); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:UuidProtocol) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RemoteDaemonMessageProtocol_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RemoteDaemonMessageProtocol_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_DurableMailboxMessageProtocol_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_DurableMailboxMessageProtocol_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_UuidProtocol_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_UuidProtocol_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\025ClusterProtocol.proto\"\250\001\n\033RemoteDaemon" + + "MessageProtocol\022-\n\013messageType\030\001 \002(\0162\030.R" + + "emoteDaemonMessageType\022 \n\tactorUuid\030\002 \001(" + + "\0132\r.UuidProtocol\022\017\n\007actorId\030\003 \001(\t\022\026\n\016act" + + "orClassName\030\004 \001(\t\022\017\n\007payload\030\005 \001(\014\"\200\001\n\035D" + + "urableMailboxMessageProtocol\022\024\n\014ownerAct" + + "orId\030\001 \002(\t\022\025\n\rsenderActorId\030\002 \001(\t\022!\n\nfut" + + "ureUuid\030\003 \001(\0132\r.UuidProtocol\022\017\n\007message\030" + + "\004 \002(\014\")\n\014UuidProtocol\022\014\n\004high\030\001 \002(\004\022\013\n\003l" + + "ow\030\002 \002(\004*\232\002\n\027RemoteDaemonMessageType\022\t\n\005", + "START\020\001\022\010\n\004STOP\020\002\022\007\n\003USE\020\003\022\013\n\007RELEASE\020\004\022" + + "\022\n\016MAKE_AVAILABLE\020\005\022\024\n\020MAKE_UNAVAILABLE\020" + + "\006\022\016\n\nDISCONNECT\020\007\022\r\n\tRECONNECT\020\010\022\n\n\006RESI" + + "GN\020\t\022\031\n\025FAIL_OVER_CONNECTIONS\020\n\022\026\n\022FUNCT" + + "ION_FUN0_UNIT\020\013\022\025\n\021FUNCTION_FUN0_ANY\020\014\022\032" + + "\n\026FUNCTION_FUN1_ARG_UNIT\020\r\022\031\n\025FUNCTION_F" + + "UN1_ARG_ANY\020\016B\026\n\022akka.cloud.clusterH\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_RemoteDaemonMessageProtocol_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_RemoteDaemonMessageProtocol_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RemoteDaemonMessageProtocol_descriptor, + new java.lang.String[] { "MessageType", "ActorUuid", "ActorId", "ActorClassName", "Payload", }, + akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol.class, + akka.cloud.cluster.ClusterProtocol.RemoteDaemonMessageProtocol.Builder.class); + internal_static_DurableMailboxMessageProtocol_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_DurableMailboxMessageProtocol_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_DurableMailboxMessageProtocol_descriptor, + new java.lang.String[] { "OwnerActorId", "SenderActorId", "FutureUuid", "Message", }, + akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol.class, + akka.cloud.cluster.ClusterProtocol.DurableMailboxMessageProtocol.Builder.class); + internal_static_UuidProtocol_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_UuidProtocol_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_UuidProtocol_descriptor, + new java.lang.String[] { "High", "Low", }, + akka.cloud.cluster.ClusterProtocol.UuidProtocol.class, + akka.cloud.cluster.ClusterProtocol.UuidProtocol.Builder.class); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + public static void internalForceInit() {} + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/akka-cluster/src/main/java/akka/cloud/cluster/EventProtocol.java b/akka-cluster/src/main/java/akka/cloud/cluster/EventProtocol.java new file mode 100644 index 0000000000..278e5b3a75 --- /dev/null +++ b/akka-cluster/src/main/java/akka/cloud/cluster/EventProtocol.java @@ -0,0 +1,2485 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: EventProtocol.proto + +package akka.cloud.cluster; + +public final class EventProtocol { + private EventProtocol() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public static final class GenericLoggingEvent extends + com.google.protobuf.GeneratedMessage { + // Use GenericLoggingEvent.newBuilder() to construct. + private GenericLoggingEvent() { + initFields(); + } + private GenericLoggingEvent(boolean noInit) {} + + private static final GenericLoggingEvent defaultInstance; + public static GenericLoggingEvent getDefaultInstance() { + return defaultInstance; + } + + public GenericLoggingEvent getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cloud.cluster.EventProtocol.internal_static_GenericLoggingEvent_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cloud.cluster.EventProtocol.internal_static_GenericLoggingEvent_fieldAccessorTable; + } + + // required string context = 1; + public static final int CONTEXT_FIELD_NUMBER = 1; + private boolean hasContext; + private java.lang.String context_ = ""; + public boolean hasContext() { return hasContext; } + public java.lang.String getContext() { return context_; } + + // required string message = 2; + public static final int MESSAGE_FIELD_NUMBER = 2; + private boolean hasMessage; + private java.lang.String message_ = ""; + public boolean hasMessage() { return hasMessage; } + public java.lang.String getMessage() { return message_; } + + // required uint64 time = 3; + public static final int TIME_FIELD_NUMBER = 3; + private boolean hasTime; + private long time_ = 0L; + public boolean hasTime() { return hasTime; } + public long getTime() { return time_; } + + private void initFields() { + } + public final boolean isInitialized() { + if (!hasContext) return false; + if (!hasMessage) return false; + if (!hasTime) return false; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (hasContext()) { + output.writeString(1, getContext()); + } + if (hasMessage()) { + output.writeString(2, getMessage()); + } + if (hasTime()) { + output.writeUInt64(3, getTime()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (hasContext()) { + size += com.google.protobuf.CodedOutputStream + .computeStringSize(1, getContext()); + } + if (hasMessage()) { + size += com.google.protobuf.CodedOutputStream + .computeStringSize(2, getMessage()); + } + if (hasTime()) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, getTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + public static akka.cloud.cluster.EventProtocol.GenericLoggingEvent parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.GenericLoggingEvent parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.GenericLoggingEvent parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.GenericLoggingEvent parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.GenericLoggingEvent parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.GenericLoggingEvent parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.GenericLoggingEvent parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.EventProtocol.GenericLoggingEvent parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.EventProtocol.GenericLoggingEvent parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.GenericLoggingEvent parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cloud.cluster.EventProtocol.GenericLoggingEvent prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder { + private akka.cloud.cluster.EventProtocol.GenericLoggingEvent result; + + // Construct using akka.cloud.cluster.EventProtocol.GenericLoggingEvent.newBuilder() + private Builder() {} + + private static Builder create() { + Builder builder = new Builder(); + builder.result = new akka.cloud.cluster.EventProtocol.GenericLoggingEvent(); + return builder; + } + + protected akka.cloud.cluster.EventProtocol.GenericLoggingEvent internalGetResult() { + return result; + } + + public Builder clear() { + if (result == null) { + throw new IllegalStateException( + "Cannot call clear() after build()."); + } + result = new akka.cloud.cluster.EventProtocol.GenericLoggingEvent(); + return this; + } + + public Builder clone() { + return create().mergeFrom(result); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cloud.cluster.EventProtocol.GenericLoggingEvent.getDescriptor(); + } + + public akka.cloud.cluster.EventProtocol.GenericLoggingEvent getDefaultInstanceForType() { + return akka.cloud.cluster.EventProtocol.GenericLoggingEvent.getDefaultInstance(); + } + + public boolean isInitialized() { + return result.isInitialized(); + } + public akka.cloud.cluster.EventProtocol.GenericLoggingEvent build() { + if (result != null && !isInitialized()) { + throw newUninitializedMessageException(result); + } + return buildPartial(); + } + + private akka.cloud.cluster.EventProtocol.GenericLoggingEvent buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + if (!isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return buildPartial(); + } + + public akka.cloud.cluster.EventProtocol.GenericLoggingEvent buildPartial() { + if (result == null) { + throw new IllegalStateException( + "build() has already been called on this Builder."); + } + akka.cloud.cluster.EventProtocol.GenericLoggingEvent returnMe = result; + result = null; + return returnMe; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cloud.cluster.EventProtocol.GenericLoggingEvent) { + return mergeFrom((akka.cloud.cluster.EventProtocol.GenericLoggingEvent)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cloud.cluster.EventProtocol.GenericLoggingEvent other) { + if (other == akka.cloud.cluster.EventProtocol.GenericLoggingEvent.getDefaultInstance()) return this; + if (other.hasContext()) { + setContext(other.getContext()); + } + if (other.hasMessage()) { + setMessage(other.getMessage()); + } + if (other.hasTime()) { + setTime(other.getTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + return this; + } + break; + } + case 10: { + setContext(input.readString()); + break; + } + case 18: { + setMessage(input.readString()); + break; + } + case 24: { + setTime(input.readUInt64()); + break; + } + } + } + } + + + // required string context = 1; + public boolean hasContext() { + return result.hasContext(); + } + public java.lang.String getContext() { + return result.getContext(); + } + public Builder setContext(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasContext = true; + result.context_ = value; + return this; + } + public Builder clearContext() { + result.hasContext = false; + result.context_ = getDefaultInstance().getContext(); + return this; + } + + // required string message = 2; + public boolean hasMessage() { + return result.hasMessage(); + } + public java.lang.String getMessage() { + return result.getMessage(); + } + public Builder setMessage(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasMessage = true; + result.message_ = value; + return this; + } + public Builder clearMessage() { + result.hasMessage = false; + result.message_ = getDefaultInstance().getMessage(); + return this; + } + + // required uint64 time = 3; + public boolean hasTime() { + return result.hasTime(); + } + public long getTime() { + return result.getTime(); + } + public Builder setTime(long value) { + result.hasTime = true; + result.time_ = value; + return this; + } + public Builder clearTime() { + result.hasTime = false; + result.time_ = 0L; + return this; + } + + // @@protoc_insertion_point(builder_scope:GenericLoggingEvent) + } + + static { + defaultInstance = new GenericLoggingEvent(true); + akka.cloud.cluster.EventProtocol.internalForceInit(); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GenericLoggingEvent) + } + + public static final class AuditEvent extends + com.google.protobuf.GeneratedMessage { + // Use AuditEvent.newBuilder() to construct. + private AuditEvent() { + initFields(); + } + private AuditEvent(boolean noInit) {} + + private static final AuditEvent defaultInstance; + public static AuditEvent getDefaultInstance() { + return defaultInstance; + } + + public AuditEvent getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cloud.cluster.EventProtocol.internal_static_AuditEvent_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cloud.cluster.EventProtocol.internal_static_AuditEvent_fieldAccessorTable; + } + + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private boolean hasName; + private java.lang.String name_ = ""; + public boolean hasName() { return hasName; } + public java.lang.String getName() { return name_; } + + // required string context = 2; + public static final int CONTEXT_FIELD_NUMBER = 2; + private boolean hasContext; + private java.lang.String context_ = ""; + public boolean hasContext() { return hasContext; } + public java.lang.String getContext() { return context_; } + + // required string message = 3; + public static final int MESSAGE_FIELD_NUMBER = 3; + private boolean hasMessage; + private java.lang.String message_ = ""; + public boolean hasMessage() { return hasMessage; } + public java.lang.String getMessage() { return message_; } + + // required uint64 time = 4; + public static final int TIME_FIELD_NUMBER = 4; + private boolean hasTime; + private long time_ = 0L; + public boolean hasTime() { return hasTime; } + public long getTime() { return time_; } + + private void initFields() { + } + public final boolean isInitialized() { + if (!hasName) return false; + if (!hasContext) return false; + if (!hasMessage) return false; + if (!hasTime) return false; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (hasName()) { + output.writeString(1, getName()); + } + if (hasContext()) { + output.writeString(2, getContext()); + } + if (hasMessage()) { + output.writeString(3, getMessage()); + } + if (hasTime()) { + output.writeUInt64(4, getTime()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (hasName()) { + size += com.google.protobuf.CodedOutputStream + .computeStringSize(1, getName()); + } + if (hasContext()) { + size += com.google.protobuf.CodedOutputStream + .computeStringSize(2, getContext()); + } + if (hasMessage()) { + size += com.google.protobuf.CodedOutputStream + .computeStringSize(3, getMessage()); + } + if (hasTime()) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(4, getTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + public static akka.cloud.cluster.EventProtocol.AuditEvent parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AuditEvent parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AuditEvent parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AuditEvent parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AuditEvent parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AuditEvent parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AuditEvent parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.EventProtocol.AuditEvent parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.EventProtocol.AuditEvent parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AuditEvent parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cloud.cluster.EventProtocol.AuditEvent prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder { + private akka.cloud.cluster.EventProtocol.AuditEvent result; + + // Construct using akka.cloud.cluster.EventProtocol.AuditEvent.newBuilder() + private Builder() {} + + private static Builder create() { + Builder builder = new Builder(); + builder.result = new akka.cloud.cluster.EventProtocol.AuditEvent(); + return builder; + } + + protected akka.cloud.cluster.EventProtocol.AuditEvent internalGetResult() { + return result; + } + + public Builder clear() { + if (result == null) { + throw new IllegalStateException( + "Cannot call clear() after build()."); + } + result = new akka.cloud.cluster.EventProtocol.AuditEvent(); + return this; + } + + public Builder clone() { + return create().mergeFrom(result); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cloud.cluster.EventProtocol.AuditEvent.getDescriptor(); + } + + public akka.cloud.cluster.EventProtocol.AuditEvent getDefaultInstanceForType() { + return akka.cloud.cluster.EventProtocol.AuditEvent.getDefaultInstance(); + } + + public boolean isInitialized() { + return result.isInitialized(); + } + public akka.cloud.cluster.EventProtocol.AuditEvent build() { + if (result != null && !isInitialized()) { + throw newUninitializedMessageException(result); + } + return buildPartial(); + } + + private akka.cloud.cluster.EventProtocol.AuditEvent buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + if (!isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return buildPartial(); + } + + public akka.cloud.cluster.EventProtocol.AuditEvent buildPartial() { + if (result == null) { + throw new IllegalStateException( + "build() has already been called on this Builder."); + } + akka.cloud.cluster.EventProtocol.AuditEvent returnMe = result; + result = null; + return returnMe; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cloud.cluster.EventProtocol.AuditEvent) { + return mergeFrom((akka.cloud.cluster.EventProtocol.AuditEvent)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cloud.cluster.EventProtocol.AuditEvent other) { + if (other == akka.cloud.cluster.EventProtocol.AuditEvent.getDefaultInstance()) return this; + if (other.hasName()) { + setName(other.getName()); + } + if (other.hasContext()) { + setContext(other.getContext()); + } + if (other.hasMessage()) { + setMessage(other.getMessage()); + } + if (other.hasTime()) { + setTime(other.getTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + return this; + } + break; + } + case 10: { + setName(input.readString()); + break; + } + case 18: { + setContext(input.readString()); + break; + } + case 26: { + setMessage(input.readString()); + break; + } + case 32: { + setTime(input.readUInt64()); + break; + } + } + } + } + + + // required string name = 1; + public boolean hasName() { + return result.hasName(); + } + public java.lang.String getName() { + return result.getName(); + } + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasName = true; + result.name_ = value; + return this; + } + public Builder clearName() { + result.hasName = false; + result.name_ = getDefaultInstance().getName(); + return this; + } + + // required string context = 2; + public boolean hasContext() { + return result.hasContext(); + } + public java.lang.String getContext() { + return result.getContext(); + } + public Builder setContext(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasContext = true; + result.context_ = value; + return this; + } + public Builder clearContext() { + result.hasContext = false; + result.context_ = getDefaultInstance().getContext(); + return this; + } + + // required string message = 3; + public boolean hasMessage() { + return result.hasMessage(); + } + public java.lang.String getMessage() { + return result.getMessage(); + } + public Builder setMessage(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasMessage = true; + result.message_ = value; + return this; + } + public Builder clearMessage() { + result.hasMessage = false; + result.message_ = getDefaultInstance().getMessage(); + return this; + } + + // required uint64 time = 4; + public boolean hasTime() { + return result.hasTime(); + } + public long getTime() { + return result.getTime(); + } + public Builder setTime(long value) { + result.hasTime = true; + result.time_ = value; + return this; + } + public Builder clearTime() { + result.hasTime = false; + result.time_ = 0L; + return this; + } + + // @@protoc_insertion_point(builder_scope:AuditEvent) + } + + static { + defaultInstance = new AuditEvent(true); + akka.cloud.cluster.EventProtocol.internalForceInit(); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:AuditEvent) + } + + public static final class CounterAddEvent extends + com.google.protobuf.GeneratedMessage { + // Use CounterAddEvent.newBuilder() to construct. + private CounterAddEvent() { + initFields(); + } + private CounterAddEvent(boolean noInit) {} + + private static final CounterAddEvent defaultInstance; + public static CounterAddEvent getDefaultInstance() { + return defaultInstance; + } + + public CounterAddEvent getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cloud.cluster.EventProtocol.internal_static_CounterAddEvent_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cloud.cluster.EventProtocol.internal_static_CounterAddEvent_fieldAccessorTable; + } + + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private boolean hasName; + private java.lang.String name_ = ""; + public boolean hasName() { return hasName; } + public java.lang.String getName() { return name_; } + + // required uint64 delta = 2; + public static final int DELTA_FIELD_NUMBER = 2; + private boolean hasDelta; + private long delta_ = 0L; + public boolean hasDelta() { return hasDelta; } + public long getDelta() { return delta_; } + + private void initFields() { + } + public final boolean isInitialized() { + if (!hasName) return false; + if (!hasDelta) return false; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (hasName()) { + output.writeString(1, getName()); + } + if (hasDelta()) { + output.writeUInt64(2, getDelta()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (hasName()) { + size += com.google.protobuf.CodedOutputStream + .computeStringSize(1, getName()); + } + if (hasDelta()) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, getDelta()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + public static akka.cloud.cluster.EventProtocol.CounterAddEvent parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterAddEvent parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterAddEvent parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterAddEvent parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterAddEvent parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterAddEvent parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterAddEvent parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.EventProtocol.CounterAddEvent parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.EventProtocol.CounterAddEvent parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterAddEvent parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cloud.cluster.EventProtocol.CounterAddEvent prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder { + private akka.cloud.cluster.EventProtocol.CounterAddEvent result; + + // Construct using akka.cloud.cluster.EventProtocol.CounterAddEvent.newBuilder() + private Builder() {} + + private static Builder create() { + Builder builder = new Builder(); + builder.result = new akka.cloud.cluster.EventProtocol.CounterAddEvent(); + return builder; + } + + protected akka.cloud.cluster.EventProtocol.CounterAddEvent internalGetResult() { + return result; + } + + public Builder clear() { + if (result == null) { + throw new IllegalStateException( + "Cannot call clear() after build()."); + } + result = new akka.cloud.cluster.EventProtocol.CounterAddEvent(); + return this; + } + + public Builder clone() { + return create().mergeFrom(result); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cloud.cluster.EventProtocol.CounterAddEvent.getDescriptor(); + } + + public akka.cloud.cluster.EventProtocol.CounterAddEvent getDefaultInstanceForType() { + return akka.cloud.cluster.EventProtocol.CounterAddEvent.getDefaultInstance(); + } + + public boolean isInitialized() { + return result.isInitialized(); + } + public akka.cloud.cluster.EventProtocol.CounterAddEvent build() { + if (result != null && !isInitialized()) { + throw newUninitializedMessageException(result); + } + return buildPartial(); + } + + private akka.cloud.cluster.EventProtocol.CounterAddEvent buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + if (!isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return buildPartial(); + } + + public akka.cloud.cluster.EventProtocol.CounterAddEvent buildPartial() { + if (result == null) { + throw new IllegalStateException( + "build() has already been called on this Builder."); + } + akka.cloud.cluster.EventProtocol.CounterAddEvent returnMe = result; + result = null; + return returnMe; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cloud.cluster.EventProtocol.CounterAddEvent) { + return mergeFrom((akka.cloud.cluster.EventProtocol.CounterAddEvent)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cloud.cluster.EventProtocol.CounterAddEvent other) { + if (other == akka.cloud.cluster.EventProtocol.CounterAddEvent.getDefaultInstance()) return this; + if (other.hasName()) { + setName(other.getName()); + } + if (other.hasDelta()) { + setDelta(other.getDelta()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + return this; + } + break; + } + case 10: { + setName(input.readString()); + break; + } + case 16: { + setDelta(input.readUInt64()); + break; + } + } + } + } + + + // required string name = 1; + public boolean hasName() { + return result.hasName(); + } + public java.lang.String getName() { + return result.getName(); + } + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasName = true; + result.name_ = value; + return this; + } + public Builder clearName() { + result.hasName = false; + result.name_ = getDefaultInstance().getName(); + return this; + } + + // required uint64 delta = 2; + public boolean hasDelta() { + return result.hasDelta(); + } + public long getDelta() { + return result.getDelta(); + } + public Builder setDelta(long value) { + result.hasDelta = true; + result.delta_ = value; + return this; + } + public Builder clearDelta() { + result.hasDelta = false; + result.delta_ = 0L; + return this; + } + + // @@protoc_insertion_point(builder_scope:CounterAddEvent) + } + + static { + defaultInstance = new CounterAddEvent(true); + akka.cloud.cluster.EventProtocol.internalForceInit(); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:CounterAddEvent) + } + + public static final class CounterSetEvent extends + com.google.protobuf.GeneratedMessage { + // Use CounterSetEvent.newBuilder() to construct. + private CounterSetEvent() { + initFields(); + } + private CounterSetEvent(boolean noInit) {} + + private static final CounterSetEvent defaultInstance; + public static CounterSetEvent getDefaultInstance() { + return defaultInstance; + } + + public CounterSetEvent getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cloud.cluster.EventProtocol.internal_static_CounterSetEvent_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cloud.cluster.EventProtocol.internal_static_CounterSetEvent_fieldAccessorTable; + } + + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private boolean hasName; + private java.lang.String name_ = ""; + public boolean hasName() { return hasName; } + public java.lang.String getName() { return name_; } + + // required uint32 value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private boolean hasValue; + private int value_ = 0; + public boolean hasValue() { return hasValue; } + public int getValue() { return value_; } + + private void initFields() { + } + public final boolean isInitialized() { + if (!hasName) return false; + if (!hasValue) return false; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (hasName()) { + output.writeString(1, getName()); + } + if (hasValue()) { + output.writeUInt32(2, getValue()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (hasName()) { + size += com.google.protobuf.CodedOutputStream + .computeStringSize(1, getName()); + } + if (hasValue()) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(2, getValue()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + public static akka.cloud.cluster.EventProtocol.CounterSetEvent parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterSetEvent parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterSetEvent parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterSetEvent parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterSetEvent parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterSetEvent parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterSetEvent parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.EventProtocol.CounterSetEvent parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.EventProtocol.CounterSetEvent parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterSetEvent parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cloud.cluster.EventProtocol.CounterSetEvent prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder { + private akka.cloud.cluster.EventProtocol.CounterSetEvent result; + + // Construct using akka.cloud.cluster.EventProtocol.CounterSetEvent.newBuilder() + private Builder() {} + + private static Builder create() { + Builder builder = new Builder(); + builder.result = new akka.cloud.cluster.EventProtocol.CounterSetEvent(); + return builder; + } + + protected akka.cloud.cluster.EventProtocol.CounterSetEvent internalGetResult() { + return result; + } + + public Builder clear() { + if (result == null) { + throw new IllegalStateException( + "Cannot call clear() after build()."); + } + result = new akka.cloud.cluster.EventProtocol.CounterSetEvent(); + return this; + } + + public Builder clone() { + return create().mergeFrom(result); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cloud.cluster.EventProtocol.CounterSetEvent.getDescriptor(); + } + + public akka.cloud.cluster.EventProtocol.CounterSetEvent getDefaultInstanceForType() { + return akka.cloud.cluster.EventProtocol.CounterSetEvent.getDefaultInstance(); + } + + public boolean isInitialized() { + return result.isInitialized(); + } + public akka.cloud.cluster.EventProtocol.CounterSetEvent build() { + if (result != null && !isInitialized()) { + throw newUninitializedMessageException(result); + } + return buildPartial(); + } + + private akka.cloud.cluster.EventProtocol.CounterSetEvent buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + if (!isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return buildPartial(); + } + + public akka.cloud.cluster.EventProtocol.CounterSetEvent buildPartial() { + if (result == null) { + throw new IllegalStateException( + "build() has already been called on this Builder."); + } + akka.cloud.cluster.EventProtocol.CounterSetEvent returnMe = result; + result = null; + return returnMe; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cloud.cluster.EventProtocol.CounterSetEvent) { + return mergeFrom((akka.cloud.cluster.EventProtocol.CounterSetEvent)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cloud.cluster.EventProtocol.CounterSetEvent other) { + if (other == akka.cloud.cluster.EventProtocol.CounterSetEvent.getDefaultInstance()) return this; + if (other.hasName()) { + setName(other.getName()); + } + if (other.hasValue()) { + setValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + return this; + } + break; + } + case 10: { + setName(input.readString()); + break; + } + case 16: { + setValue(input.readUInt32()); + break; + } + } + } + } + + + // required string name = 1; + public boolean hasName() { + return result.hasName(); + } + public java.lang.String getName() { + return result.getName(); + } + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasName = true; + result.name_ = value; + return this; + } + public Builder clearName() { + result.hasName = false; + result.name_ = getDefaultInstance().getName(); + return this; + } + + // required uint32 value = 2; + public boolean hasValue() { + return result.hasValue(); + } + public int getValue() { + return result.getValue(); + } + public Builder setValue(int value) { + result.hasValue = true; + result.value_ = value; + return this; + } + public Builder clearValue() { + result.hasValue = false; + result.value_ = 0; + return this; + } + + // @@protoc_insertion_point(builder_scope:CounterSetEvent) + } + + static { + defaultInstance = new CounterSetEvent(true); + akka.cloud.cluster.EventProtocol.internalForceInit(); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:CounterSetEvent) + } + + public static final class CounterResetEvent extends + com.google.protobuf.GeneratedMessage { + // Use CounterResetEvent.newBuilder() to construct. + private CounterResetEvent() { + initFields(); + } + private CounterResetEvent(boolean noInit) {} + + private static final CounterResetEvent defaultInstance; + public static CounterResetEvent getDefaultInstance() { + return defaultInstance; + } + + public CounterResetEvent getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cloud.cluster.EventProtocol.internal_static_CounterResetEvent_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cloud.cluster.EventProtocol.internal_static_CounterResetEvent_fieldAccessorTable; + } + + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private boolean hasName; + private java.lang.String name_ = ""; + public boolean hasName() { return hasName; } + public java.lang.String getName() { return name_; } + + private void initFields() { + } + public final boolean isInitialized() { + if (!hasName) return false; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (hasName()) { + output.writeString(1, getName()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (hasName()) { + size += com.google.protobuf.CodedOutputStream + .computeStringSize(1, getName()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + public static akka.cloud.cluster.EventProtocol.CounterResetEvent parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterResetEvent parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterResetEvent parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterResetEvent parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterResetEvent parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterResetEvent parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterResetEvent parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.EventProtocol.CounterResetEvent parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.EventProtocol.CounterResetEvent parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.CounterResetEvent parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cloud.cluster.EventProtocol.CounterResetEvent prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder { + private akka.cloud.cluster.EventProtocol.CounterResetEvent result; + + // Construct using akka.cloud.cluster.EventProtocol.CounterResetEvent.newBuilder() + private Builder() {} + + private static Builder create() { + Builder builder = new Builder(); + builder.result = new akka.cloud.cluster.EventProtocol.CounterResetEvent(); + return builder; + } + + protected akka.cloud.cluster.EventProtocol.CounterResetEvent internalGetResult() { + return result; + } + + public Builder clear() { + if (result == null) { + throw new IllegalStateException( + "Cannot call clear() after build()."); + } + result = new akka.cloud.cluster.EventProtocol.CounterResetEvent(); + return this; + } + + public Builder clone() { + return create().mergeFrom(result); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cloud.cluster.EventProtocol.CounterResetEvent.getDescriptor(); + } + + public akka.cloud.cluster.EventProtocol.CounterResetEvent getDefaultInstanceForType() { + return akka.cloud.cluster.EventProtocol.CounterResetEvent.getDefaultInstance(); + } + + public boolean isInitialized() { + return result.isInitialized(); + } + public akka.cloud.cluster.EventProtocol.CounterResetEvent build() { + if (result != null && !isInitialized()) { + throw newUninitializedMessageException(result); + } + return buildPartial(); + } + + private akka.cloud.cluster.EventProtocol.CounterResetEvent buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + if (!isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return buildPartial(); + } + + public akka.cloud.cluster.EventProtocol.CounterResetEvent buildPartial() { + if (result == null) { + throw new IllegalStateException( + "build() has already been called on this Builder."); + } + akka.cloud.cluster.EventProtocol.CounterResetEvent returnMe = result; + result = null; + return returnMe; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cloud.cluster.EventProtocol.CounterResetEvent) { + return mergeFrom((akka.cloud.cluster.EventProtocol.CounterResetEvent)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cloud.cluster.EventProtocol.CounterResetEvent other) { + if (other == akka.cloud.cluster.EventProtocol.CounterResetEvent.getDefaultInstance()) return this; + if (other.hasName()) { + setName(other.getName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + return this; + } + break; + } + case 10: { + setName(input.readString()); + break; + } + } + } + } + + + // required string name = 1; + public boolean hasName() { + return result.hasName(); + } + public java.lang.String getName() { + return result.getName(); + } + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasName = true; + result.name_ = value; + return this; + } + public Builder clearName() { + result.hasName = false; + result.name_ = getDefaultInstance().getName(); + return this; + } + + // @@protoc_insertion_point(builder_scope:CounterResetEvent) + } + + static { + defaultInstance = new CounterResetEvent(true); + akka.cloud.cluster.EventProtocol.internalForceInit(); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:CounterResetEvent) + } + + public static final class AverageAddEvent extends + com.google.protobuf.GeneratedMessage { + // Use AverageAddEvent.newBuilder() to construct. + private AverageAddEvent() { + initFields(); + } + private AverageAddEvent(boolean noInit) {} + + private static final AverageAddEvent defaultInstance; + public static AverageAddEvent getDefaultInstance() { + return defaultInstance; + } + + public AverageAddEvent getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cloud.cluster.EventProtocol.internal_static_AverageAddEvent_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cloud.cluster.EventProtocol.internal_static_AverageAddEvent_fieldAccessorTable; + } + + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private boolean hasName; + private java.lang.String name_ = ""; + public boolean hasName() { return hasName; } + public java.lang.String getName() { return name_; } + + // required uint64 value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private boolean hasValue; + private long value_ = 0L; + public boolean hasValue() { return hasValue; } + public long getValue() { return value_; } + + private void initFields() { + } + public final boolean isInitialized() { + if (!hasName) return false; + if (!hasValue) return false; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (hasName()) { + output.writeString(1, getName()); + } + if (hasValue()) { + output.writeUInt64(2, getValue()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (hasName()) { + size += com.google.protobuf.CodedOutputStream + .computeStringSize(1, getName()); + } + if (hasValue()) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, getValue()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + public static akka.cloud.cluster.EventProtocol.AverageAddEvent parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AverageAddEvent parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AverageAddEvent parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AverageAddEvent parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AverageAddEvent parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AverageAddEvent parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AverageAddEvent parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.EventProtocol.AverageAddEvent parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.EventProtocol.AverageAddEvent parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AverageAddEvent parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cloud.cluster.EventProtocol.AverageAddEvent prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder { + private akka.cloud.cluster.EventProtocol.AverageAddEvent result; + + // Construct using akka.cloud.cluster.EventProtocol.AverageAddEvent.newBuilder() + private Builder() {} + + private static Builder create() { + Builder builder = new Builder(); + builder.result = new akka.cloud.cluster.EventProtocol.AverageAddEvent(); + return builder; + } + + protected akka.cloud.cluster.EventProtocol.AverageAddEvent internalGetResult() { + return result; + } + + public Builder clear() { + if (result == null) { + throw new IllegalStateException( + "Cannot call clear() after build()."); + } + result = new akka.cloud.cluster.EventProtocol.AverageAddEvent(); + return this; + } + + public Builder clone() { + return create().mergeFrom(result); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cloud.cluster.EventProtocol.AverageAddEvent.getDescriptor(); + } + + public akka.cloud.cluster.EventProtocol.AverageAddEvent getDefaultInstanceForType() { + return akka.cloud.cluster.EventProtocol.AverageAddEvent.getDefaultInstance(); + } + + public boolean isInitialized() { + return result.isInitialized(); + } + public akka.cloud.cluster.EventProtocol.AverageAddEvent build() { + if (result != null && !isInitialized()) { + throw newUninitializedMessageException(result); + } + return buildPartial(); + } + + private akka.cloud.cluster.EventProtocol.AverageAddEvent buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + if (!isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return buildPartial(); + } + + public akka.cloud.cluster.EventProtocol.AverageAddEvent buildPartial() { + if (result == null) { + throw new IllegalStateException( + "build() has already been called on this Builder."); + } + akka.cloud.cluster.EventProtocol.AverageAddEvent returnMe = result; + result = null; + return returnMe; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cloud.cluster.EventProtocol.AverageAddEvent) { + return mergeFrom((akka.cloud.cluster.EventProtocol.AverageAddEvent)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cloud.cluster.EventProtocol.AverageAddEvent other) { + if (other == akka.cloud.cluster.EventProtocol.AverageAddEvent.getDefaultInstance()) return this; + if (other.hasName()) { + setName(other.getName()); + } + if (other.hasValue()) { + setValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + return this; + } + break; + } + case 10: { + setName(input.readString()); + break; + } + case 16: { + setValue(input.readUInt64()); + break; + } + } + } + } + + + // required string name = 1; + public boolean hasName() { + return result.hasName(); + } + public java.lang.String getName() { + return result.getName(); + } + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasName = true; + result.name_ = value; + return this; + } + public Builder clearName() { + result.hasName = false; + result.name_ = getDefaultInstance().getName(); + return this; + } + + // required uint64 value = 2; + public boolean hasValue() { + return result.hasValue(); + } + public long getValue() { + return result.getValue(); + } + public Builder setValue(long value) { + result.hasValue = true; + result.value_ = value; + return this; + } + public Builder clearValue() { + result.hasValue = false; + result.value_ = 0L; + return this; + } + + // @@protoc_insertion_point(builder_scope:AverageAddEvent) + } + + static { + defaultInstance = new AverageAddEvent(true); + akka.cloud.cluster.EventProtocol.internalForceInit(); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:AverageAddEvent) + } + + public static final class AverageResetEvent extends + com.google.protobuf.GeneratedMessage { + // Use AverageResetEvent.newBuilder() to construct. + private AverageResetEvent() { + initFields(); + } + private AverageResetEvent(boolean noInit) {} + + private static final AverageResetEvent defaultInstance; + public static AverageResetEvent getDefaultInstance() { + return defaultInstance; + } + + public AverageResetEvent getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cloud.cluster.EventProtocol.internal_static_AverageResetEvent_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cloud.cluster.EventProtocol.internal_static_AverageResetEvent_fieldAccessorTable; + } + + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private boolean hasName; + private java.lang.String name_ = ""; + public boolean hasName() { return hasName; } + public java.lang.String getName() { return name_; } + + private void initFields() { + } + public final boolean isInitialized() { + if (!hasName) return false; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (hasName()) { + output.writeString(1, getName()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (hasName()) { + size += com.google.protobuf.CodedOutputStream + .computeStringSize(1, getName()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + public static akka.cloud.cluster.EventProtocol.AverageResetEvent parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AverageResetEvent parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AverageResetEvent parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AverageResetEvent parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AverageResetEvent parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AverageResetEvent parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AverageResetEvent parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.EventProtocol.AverageResetEvent parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static akka.cloud.cluster.EventProtocol.AverageResetEvent parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static akka.cloud.cluster.EventProtocol.AverageResetEvent parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cloud.cluster.EventProtocol.AverageResetEvent prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder { + private akka.cloud.cluster.EventProtocol.AverageResetEvent result; + + // Construct using akka.cloud.cluster.EventProtocol.AverageResetEvent.newBuilder() + private Builder() {} + + private static Builder create() { + Builder builder = new Builder(); + builder.result = new akka.cloud.cluster.EventProtocol.AverageResetEvent(); + return builder; + } + + protected akka.cloud.cluster.EventProtocol.AverageResetEvent internalGetResult() { + return result; + } + + public Builder clear() { + if (result == null) { + throw new IllegalStateException( + "Cannot call clear() after build()."); + } + result = new akka.cloud.cluster.EventProtocol.AverageResetEvent(); + return this; + } + + public Builder clone() { + return create().mergeFrom(result); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cloud.cluster.EventProtocol.AverageResetEvent.getDescriptor(); + } + + public akka.cloud.cluster.EventProtocol.AverageResetEvent getDefaultInstanceForType() { + return akka.cloud.cluster.EventProtocol.AverageResetEvent.getDefaultInstance(); + } + + public boolean isInitialized() { + return result.isInitialized(); + } + public akka.cloud.cluster.EventProtocol.AverageResetEvent build() { + if (result != null && !isInitialized()) { + throw newUninitializedMessageException(result); + } + return buildPartial(); + } + + private akka.cloud.cluster.EventProtocol.AverageResetEvent buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + if (!isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return buildPartial(); + } + + public akka.cloud.cluster.EventProtocol.AverageResetEvent buildPartial() { + if (result == null) { + throw new IllegalStateException( + "build() has already been called on this Builder."); + } + akka.cloud.cluster.EventProtocol.AverageResetEvent returnMe = result; + result = null; + return returnMe; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cloud.cluster.EventProtocol.AverageResetEvent) { + return mergeFrom((akka.cloud.cluster.EventProtocol.AverageResetEvent)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cloud.cluster.EventProtocol.AverageResetEvent other) { + if (other == akka.cloud.cluster.EventProtocol.AverageResetEvent.getDefaultInstance()) return this; + if (other.hasName()) { + setName(other.getName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + return this; + } + break; + } + case 10: { + setName(input.readString()); + break; + } + } + } + } + + + // required string name = 1; + public boolean hasName() { + return result.hasName(); + } + public java.lang.String getName() { + return result.getName(); + } + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + result.hasName = true; + result.name_ = value; + return this; + } + public Builder clearName() { + result.hasName = false; + result.name_ = getDefaultInstance().getName(); + return this; + } + + // @@protoc_insertion_point(builder_scope:AverageResetEvent) + } + + static { + defaultInstance = new AverageResetEvent(true); + akka.cloud.cluster.EventProtocol.internalForceInit(); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:AverageResetEvent) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GenericLoggingEvent_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GenericLoggingEvent_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_AuditEvent_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_AuditEvent_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_CounterAddEvent_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_CounterAddEvent_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_CounterSetEvent_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_CounterSetEvent_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_CounterResetEvent_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_CounterResetEvent_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_AverageAddEvent_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_AverageAddEvent_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_AverageResetEvent_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_AverageResetEvent_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\023EventProtocol.proto\"E\n\023GenericLoggingE" + + "vent\022\017\n\007context\030\001 \002(\t\022\017\n\007message\030\002 \002(\t\022\014" + + "\n\004time\030\003 \002(\004\"J\n\nAuditEvent\022\014\n\004name\030\001 \002(\t" + + "\022\017\n\007context\030\002 \002(\t\022\017\n\007message\030\003 \002(\t\022\014\n\004ti" + + "me\030\004 \002(\004\".\n\017CounterAddEvent\022\014\n\004name\030\001 \002(" + + "\t\022\r\n\005delta\030\002 \002(\004\".\n\017CounterSetEvent\022\014\n\004n" + + "ame\030\001 \002(\t\022\r\n\005value\030\002 \002(\r\"!\n\021CounterReset" + + "Event\022\014\n\004name\030\001 \002(\t\".\n\017AverageAddEvent\022\014" + + "\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\004\"!\n\021AverageRe" + + "setEvent\022\014\n\004name\030\001 \002(\tB\026\n\022akka.cloud.clu", + "sterH\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_GenericLoggingEvent_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_GenericLoggingEvent_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GenericLoggingEvent_descriptor, + new java.lang.String[] { "Context", "Message", "Time", }, + akka.cloud.cluster.EventProtocol.GenericLoggingEvent.class, + akka.cloud.cluster.EventProtocol.GenericLoggingEvent.Builder.class); + internal_static_AuditEvent_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_AuditEvent_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_AuditEvent_descriptor, + new java.lang.String[] { "Name", "Context", "Message", "Time", }, + akka.cloud.cluster.EventProtocol.AuditEvent.class, + akka.cloud.cluster.EventProtocol.AuditEvent.Builder.class); + internal_static_CounterAddEvent_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_CounterAddEvent_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_CounterAddEvent_descriptor, + new java.lang.String[] { "Name", "Delta", }, + akka.cloud.cluster.EventProtocol.CounterAddEvent.class, + akka.cloud.cluster.EventProtocol.CounterAddEvent.Builder.class); + internal_static_CounterSetEvent_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_CounterSetEvent_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_CounterSetEvent_descriptor, + new java.lang.String[] { "Name", "Value", }, + akka.cloud.cluster.EventProtocol.CounterSetEvent.class, + akka.cloud.cluster.EventProtocol.CounterSetEvent.Builder.class); + internal_static_CounterResetEvent_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_CounterResetEvent_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_CounterResetEvent_descriptor, + new java.lang.String[] { "Name", }, + akka.cloud.cluster.EventProtocol.CounterResetEvent.class, + akka.cloud.cluster.EventProtocol.CounterResetEvent.Builder.class); + internal_static_AverageAddEvent_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_AverageAddEvent_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_AverageAddEvent_descriptor, + new java.lang.String[] { "Name", "Value", }, + akka.cloud.cluster.EventProtocol.AverageAddEvent.class, + akka.cloud.cluster.EventProtocol.AverageAddEvent.Builder.class); + internal_static_AverageResetEvent_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_AverageResetEvent_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_AverageResetEvent_descriptor, + new java.lang.String[] { "Name", }, + akka.cloud.cluster.EventProtocol.AverageResetEvent.class, + akka.cloud.cluster.EventProtocol.AverageResetEvent.Builder.class); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + public static void internalForceInit() {} + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/akka-cluster/src/main/java/akka/cloud/cluster/LocalBookKeeper.java b/akka-cluster/src/main/java/akka/cloud/cluster/LocalBookKeeper.java new file mode 100644 index 0000000000..dbd6ff26c9 --- /dev/null +++ b/akka-cluster/src/main/java/akka/cloud/cluster/LocalBookKeeper.java @@ -0,0 +1,187 @@ +package akka.cloud.cluster; + +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.Socket; + +import org.apache.bookkeeper.proto.BookieServer; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.ZooDefs.Ids; +import org.apache.zookeeper.server.NIOServerCnxnFactory; +import org.apache.zookeeper.server.ZooKeeperServer; + +public class LocalBookKeeper { + public static final int CONNECTION_TIMEOUT = 30000; + + int numberOfBookies; + + public LocalBookKeeper() { + numberOfBookies = 3; + } + + public LocalBookKeeper(int numberOfBookies){ + this(); + this.numberOfBookies = numberOfBookies; + } + + private final String HOSTPORT = "127.0.0.1:2181"; + NIOServerCnxnFactory serverFactory; + ZooKeeperServer zks; + ZooKeeper zkc; + int ZooKeeperDefaultPort = 2181; + File ZkTmpDir; + + //BookKeeper variables + File tmpDirs[]; + BookieServer bs[]; + Integer initialPort = 5000; + + /** + * @param args + */ + + public void runZookeeper(int maxCC) throws IOException{ + // create a ZooKeeper server(dataDir, dataLogDir, port) + //ServerStats.registerAsConcrete(); + //ClientBase.setupTestEnv(); + ZkTmpDir = File.createTempFile("zookeeper", "test"); + ZkTmpDir.delete(); + ZkTmpDir.mkdir(); + + try { + zks = new ZooKeeperServer(ZkTmpDir, ZkTmpDir, ZooKeeperDefaultPort); + serverFactory = new NIOServerCnxnFactory(); + serverFactory.configure(new InetSocketAddress(ZooKeeperDefaultPort), maxCC); + serverFactory.startup(zks); + } catch (Exception e) { + // TODO Auto-generated catch block + } + + boolean b = waitForServerUp(HOSTPORT, CONNECTION_TIMEOUT); + } + + public void initializeZookeper(){ + //initialize the zk client with values + try { + zkc = new ZooKeeper("127.0.0.1", ZooKeeperDefaultPort, new emptyWatcher()); + zkc.create("/ledgers", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); + zkc.create("/ledgers/available", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); + // No need to create an entry for each requested bookie anymore as the + // BookieServers will register themselves with ZooKeeper on startup. + } catch (KeeperException e) { + } catch (InterruptedException e) { + } catch (IOException e) { + } + } + + public void runBookies() throws IOException{ + // Create Bookie Servers (B1, B2, B3) + + tmpDirs = new File[numberOfBookies]; + bs = new BookieServer[numberOfBookies]; + + for(int i = 0; i < numberOfBookies; i++){ + tmpDirs[i] = File.createTempFile("bookie" + Integer.toString(i), "test"); + tmpDirs[i].delete(); + tmpDirs[i].mkdir(); + + bs[i] = new BookieServer(initialPort + i, InetAddress.getLocalHost().getHostAddress() + ":" + + ZooKeeperDefaultPort, tmpDirs[i], new File[]{tmpDirs[i]}); + bs[i].start(); + } + } + + public static void main(String[] args) throws IOException, InterruptedException { + if(args.length < 1){ + usage(); + System.exit(-1); + } + LocalBookKeeper lb = new LocalBookKeeper(Integer.parseInt(args[0])); + lb.runZookeeper(1000); + lb.initializeZookeper(); + lb.runBookies(); + while (true){ + Thread.sleep(5000); + } + } + + private static void usage() { + System.err.println("Usage: LocalBookKeeper number-of-bookies"); + } + + /* User for testing purposes, void */ + class emptyWatcher implements Watcher{ + public void process(WatchedEvent event) {} + } + + public static boolean waitForServerUp(String hp, long timeout) { + long start = System.currentTimeMillis(); + String split[] = hp.split(":"); + String host = split[0]; + int port = Integer.parseInt(split[1]); + while (true) { + try { + Socket sock = new Socket(host, port); + BufferedReader reader = null; + try { + OutputStream outstream = sock.getOutputStream(); + outstream.write("stat".getBytes()); + outstream.flush(); + + reader = + new BufferedReader( + new InputStreamReader(sock.getInputStream())); + String line = reader.readLine(); + if (line != null && line.startsWith("Zookeeper version:")) { + return true; + } + } finally { + sock.close(); + if (reader != null) { + reader.close(); + } + } + } catch (IOException e) { + // ignore as this is expected + } + + if (System.currentTimeMillis() > start + timeout) { + break; + } + try { + Thread.sleep(250); + } catch (InterruptedException e) { + // ignore + } + } + return false; + } + +} diff --git a/akka-cluster/src/main/protocol/ClusterProtocol.proto b/akka-cluster/src/main/protocol/ClusterProtocol.proto new file mode 100644 index 0000000000..6661564240 --- /dev/null +++ b/akka-cluster/src/main/protocol/ClusterProtocol.proto @@ -0,0 +1,61 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +option java_package = "akka.cloud.cluster"; +option optimize_for = SPEED; + +/****************************************** + Compile with: + cd ./akka-cloud-cluster/src/main/protocol + protoc ClusterProtocol.proto --java_out ../java +*******************************************/ + +/** + * Defines the remote daemon message. + */ +message RemoteDaemonMessageProtocol { + required RemoteDaemonMessageType messageType = 1; + optional UuidProtocol actorUuid = 2; + optional string actorId = 3; + optional string actorClassName = 4; + optional bytes payload = 5; +} + +/** + * Defines the remote daemon message type. + */ +enum RemoteDaemonMessageType { + START = 1; + STOP = 2; + USE = 3; + RELEASE = 4; + MAKE_AVAILABLE = 5; + MAKE_UNAVAILABLE = 6; + DISCONNECT = 7; + RECONNECT = 8; + RESIGN = 9; + FAIL_OVER_CONNECTIONS = 10; + FUNCTION_FUN0_UNIT = 11; + FUNCTION_FUN0_ANY = 12; + FUNCTION_FUN1_ARG_UNIT = 13; + FUNCTION_FUN1_ARG_ANY = 14; +} + +/** + * Defines the durable mailbox message. + */ +message DurableMailboxMessageProtocol { + required string ownerActorId = 1; + optional string senderActorId = 2; + optional UuidProtocol futureUuid = 3; + required bytes message = 4; +} + +/** + * Defines a UUID. + */ +message UuidProtocol { + required uint64 high = 1; + required uint64 low = 2; +} diff --git a/akka-cluster/src/main/protocol/EventProtocol.proto b/akka-cluster/src/main/protocol/EventProtocol.proto new file mode 100644 index 0000000000..92e893265f --- /dev/null +++ b/akka-cluster/src/main/protocol/EventProtocol.proto @@ -0,0 +1,49 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + + option java_package = "akka.cloud.cluster"; + option optimize_for = SPEED; + + /****************************************** + Compile with: + cd ./akka-cloud-cluster/src/main/protocol + protoc MonitoringProtocol.proto --java_out ../java +*/ + +message GenericLoggingEvent { + required string context = 1; + required string message = 2; + required uint64 time = 3; +} + +message AuditEvent { + required string name = 1; + required string context = 2; + required string message = 3; + required uint64 time = 4; +} + +message CounterAddEvent { + required string name = 1; + required uint64 delta = 2; +} + +message CounterSetEvent { + required string name = 1; + required uint32 value = 2; +} + +message CounterResetEvent { + required string name = 1; +} + +message AverageAddEvent { + required string name = 1; + required uint64 value = 2; +} + +message AverageResetEvent { + required string name = 1; +} + diff --git a/akka-cluster/src/main/scala/akka/cloud/cluster/BookKeeperServer.scala b/akka-cluster/src/main/scala/akka/cloud/cluster/BookKeeperServer.scala new file mode 100644 index 0000000000..4e9a6c4f46 --- /dev/null +++ b/akka-cluster/src/main/scala/akka/cloud/cluster/BookKeeperServer.scala @@ -0,0 +1,31 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ +package akka.cloud.cluster + +import org.apache.bookkeeper.proto.BookieServer + +import java.io.File + +/* +A simple use of BooKeeper is to implement a write-ahead transaction log. A server maintains an in-memory data structure (with periodic snapshots for example) and logs changes to that structure before it applies the change. The application server creates a ledger at startup and store the ledger id and password in a well known place (ZooKeeper maybe). When it needs to make a change, the server adds an entry with the change information to a ledger and apply the change when BookKeeper adds the entry successfully. The server can even use asyncAddEntry to queue up many changes for high change throughput. BooKeeper meticulously logs the changes in order and call the completion functions in order. + +When the application server dies, a backup server will come online, get the last snapshot and then it will open the ledger of the old server and read all the entries from the time the snapshot was taken. (Since it doesn't know the last entry number it will use MAX_INTEGER). Once all the entries have been processed, it will close the ledger and start a new one for its use. + +*/ + +/** + * @author Jonas Bonér + */ +object BookKeeperServer { + val port = 3181 + val zkServers = "localhost:2181" + val journal = new File("./bk/journal") + val ledgers = Array(new File("./bk/ledger")) + val bookie = new BookieServer(port, zkServers, journal, ledgers) + + def start = { + bookie.start + bookie.join + } +} diff --git a/akka-cluster/src/main/scala/akka/cloud/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cloud/cluster/Cluster.scala new file mode 100644 index 0000000000..729e9ae498 --- /dev/null +++ b/akka-cluster/src/main/scala/akka/cloud/cluster/Cluster.scala @@ -0,0 +1,1742 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ +package akka.cloud.cluster + +import org.apache.zookeeper._ +import org.apache.zookeeper.Watcher.Event._ +import org.apache.zookeeper.data.Stat +import org.apache.zookeeper.recipes.lock.{WriteLock, LockListener} + +import org.I0Itec.zkclient._ +import org.I0Itec.zkclient.serialize._ +import org.I0Itec.zkclient.exception._ + +import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference, AtomicInteger} +import java.util.concurrent.{ConcurrentSkipListSet, CopyOnWriteArrayList, Callable, ConcurrentHashMap} +import java.util.{List => JList} +import java.net.InetSocketAddress +import javax.management.StandardMBean + +import scala.collection.immutable.{HashMap, HashSet} +import scala.collection.mutable.ConcurrentMap +import scala.collection.JavaConversions._ + +import ClusterProtocol._ +import RemoteDaemonMessageType._ + +import akka.util._ +import akka.actor._ +import akka.actor.Actor._ +import akka.event.EventHandler +import akka.dispatch.{Dispatchers, Future} +import akka.remoteinterface._ +import akka.config.Config._ +import akka.serialization.{Format, Serializer} +import akka.serialization.Compression.LZF +import akka.AkkaException + +import akka.cloud.common.JMX +import akka.cloud.common.Util._ +import akka.cloud.monitoring.Monitoring +import akka.cloud.zookeeper._ + +import com.eaio.uuid.UUID + +import com.google.protobuf.ByteString + +// FIXME add watch for each node that when the entry for the node is removed then the node shuts itself down +// FIXME Provisioning data in ZK (file names etc) and files in S3 and on disk + +class ClusterException(message: String) extends AkkaException(message) + +/** + * JMX MBean for the cluster service. + * + * @author Jonas Bonér + */ +trait ClusterNodeMBean { + def start: Unit + def stop: Unit + + def disconnect: Unit + def reconnect: Unit + def resign: Unit + + def isConnected: Boolean + + def getRemoteServerHostname: String + def getRemoteServerPort: Int + + def getNodeName: String + def getClusterName: String + def getZooKeeperServerAddresses: String + + def getMemberNodes: Array[String] + def getLeader: String + + def getUuidsForClusteredActors: Array[String] + def getIdsForClusteredActors: Array[String] + def getClassNamesForClusteredActors: Array[String] + + def getUuidsForActorsInUse: Array[String] + def getIdsForActorsInUse: Array[String] + def getClassNamesForActorsInUse: Array[String] + + def getNodesForActorInUseWithUuid(uuid: String): Array[String] + def getNodesForActorInUseWithId(id: String): Array[String] + def getNodesForActorInUseWithClassName(className: String): Array[String] + + def getUuidsForActorsInUseOnNode(nodeName: String): Array[String] + def getIdsForActorsInUseOnNode(nodeName: String): Array[String] + def getClassNamesForActorsInUseOnNode(nodeName: String): Array[String] + + def setConfigElement(key: String, value: String): Unit + def getConfigElement(key: String): AnyRef + def removeConfigElement(key: String): Unit + def getConfigElementKeys: Array[String] +} + +/** + * Node address holds the node name and the cluster name and can be used as a hash lookup key for a Node instance. + * + * @author Jonas Bonér + */ +final case class NodeAddress( + clusterName: String, + nodeName: String, + hostname: String = Cluster.lookupLocalhostName, + port: Int = Cluster.remoteServerPort) { + if ((nodeName eq null) || nodeName == "") throw new NullPointerException("Node name must not be null or empty string") + if ((clusterName eq null) || clusterName == "") throw new NullPointerException("Cluster name must not be null or empty string") + override def toString = "%s:%s:%s:%s".format(clusterName, nodeName, hostname, port) +} + +case class ActorAddress( + actorUuid: UUID = null, + actorId: String = Cluster.EMPTY_STRING, + actorClassName: String = Cluster.EMPTY_STRING) + +object ActorAddress { + def forUuid(actorUuid: UUID) = ActorAddress(actorUuid, Cluster.EMPTY_STRING, Cluster.EMPTY_STRING) + def forId(actorId: String) = ActorAddress(null, actorId, Cluster.EMPTY_STRING) + def forClassName(actorClassName: String) = ActorAddress(null, actorClassName, Cluster.EMPTY_STRING) +} + +/** + * Factory object for ClusterNode. Also holds global state such as configuration data etc. + * + * @author Jonas Bonér + */ +object Cluster { + val EMPTY_STRING = "".intern + val UUID_PREFIX = "uuid:".intern + + // config options + val zooKeeperServers = config.getString("akka.cloud.cluster.zookeeper-server-addresses", "localhost:2181") + val remoteServerPort = config.getInt("akka.cloud.cluster.remote-server-port", 2552) + val sessionTimeout = Duration(config.getInt("akka.cloud.cluster.session-timeout", 60), TIME_UNIT).toMillis.toInt + val connectionTimeout = Duration(config.getInt("akka.cloud.cluster.connection-timeout", 60), TIME_UNIT).toMillis.toInt + val maxTimeToWaitUntilConnected = Duration(config.getInt("akka.cloud.cluster.max-time-to-wait-until-connected", 30), TIME_UNIT).toMillis.toInt + val shouldCompressData = config.getBool("akka.cloud.cluster.use-compression", false) + val enableJMX = config.getBool("akka.enable-jmx", true) + + /** + * Cluster membership change listener. + * For Scala API. + */ + trait ChangeListener { + def notify(event: ChangeNotification, client: ClusterNode) = event match { + case NodeConnected(name) => nodeConnected(name, client) + case NodeDisconnected(name) => nodeDisconnected(name, client) + case NewLeader(name: String) => newLeader(name, client) + case NewSession => thisNodeNewSession(client) + case ThisNode.Connected => thisNodeConnected(client) + case ThisNode.Disconnected => thisNodeDisconnected(client) + case ThisNode.Expired => thisNodeExpired(client) + } + def nodeConnected(node: String, client: ClusterNode) = {} + def nodeDisconnected(node: String, client: ClusterNode) = {} + def newLeader(name: String, client: ClusterNode) = {} + def thisNodeNewSession(client: ClusterNode) = {} + def thisNodeConnected(client: ClusterNode) = {} + def thisNodeDisconnected(client: ClusterNode) = {} + def thisNodeExpired(client: ClusterNode) = {} + } + + /** + * Cluster membership change listener. + * For Java API. + */ + abstract class ChangeListenerAdapter extends ChangeListener + + sealed trait ChangeNotification + case class NodeConnected(node: String) extends ChangeNotification + case class NodeDisconnected(node: String) extends ChangeNotification + case class NewLeader(name: String) extends ChangeNotification + case object NewSession extends ChangeNotification + object ThisNode { + case object Connected extends ChangeNotification + case object Disconnected extends ChangeNotification + case object Expired extends ChangeNotification + } + + type Nodes = HashMap[NodeAddress, ClusterNode] + + val defaultSerializer = new SerializableSerializer + + private val _zkServer = new AtomicReference[Option[ZkServer]](None) + private val _nodes = new AtomicReference(new Nodes) + private val _clusterNames = new ConcurrentSkipListSet[String] + + private[cluster] def updateNodes(f: Nodes => Nodes) = + while (Some(_nodes.get).map(node => _nodes.compareAndSet(node, f(node)) == false).get) {} + + /** + * Looks up the local hostname. + */ + def lookupLocalhostName = NetworkUtil.getLocalhostName + + /** + * Returns all the nodes created by this Cluster object, e.g. created in this class loader hierarchy in this JVM. + */ + def nodes = _nodes.get + + /** + * Returns an Array with NodeAddress for all the nodes in a specific cluster. + */ + def nodesInCluster(clusterName: String): Array[NodeAddress] = _nodes.get.filter(_._1 == clusterName).map(_._1).toArray + + /** + * Returns the NodeAddress for a random node in a specific cluster. + */ + def randomNodeInCluster(clusterName: String): NodeAddress = { + val nodes = nodesInCluster(clusterName) + val random = new java.util.Random + nodes(random.nextInt(nodes.length)) + } + + /** + * Returns the names of all clusters that this JVM is connected to. + */ + def clusters: Array[String] = _clusterNames.toList.toArray + + /** + * Returns the node for a specific NodeAddress. + */ + def nodeFor(nodeAddress: NodeAddress) = _nodes.get()(nodeAddress) + + /** + * Creates a new cluster node; ClusterNode. + */ + def apply( + nodeAddress: NodeAddress, + zkServerAddresses: String = Cluster.zooKeeperServers, + serializer: ZkSerializer = Cluster.defaultSerializer): ClusterNode = + newNode(nodeAddress, zkServerAddresses, serializer) + + /** + * Creates a new cluster node; ClusterNode. + */ + def newNode(nodeAddress: NodeAddress): ClusterNode = + newNode(nodeAddress, Cluster.zooKeeperServers, Cluster.defaultSerializer) + + /** + * Creates a new cluster node; ClusterNode. + */ + def newNode(nodeAddress: NodeAddress, zkServerAddresses: String): ClusterNode = + newNode(nodeAddress, zkServerAddresses, Cluster.defaultSerializer) + + /** + * Creates a new cluster node; ClusterNode. + */ + def newNode(nodeAddress: NodeAddress, serializer: ZkSerializer): ClusterNode = + newNode(nodeAddress, Cluster.zooKeeperServers, serializer) + + /** + * Creates a new cluster node; ClusterNode. + */ + def newNode( + nodeAddress: NodeAddress, + zkServerAddresses: String, + serializer: ZkSerializer): ClusterNode = { + + if (nodeAddress eq null) throw new IllegalArgumentException("NodeAddress can't be null") + + val node = new ClusterNode( + nodeAddress, + if ((zkServerAddresses eq null) || zkServerAddresses == "") Cluster.zooKeeperServers else zkServerAddresses, + if (serializer eq null) Cluster.defaultSerializer else serializer) + + // FIXME Cluster nodes are never removed? + updateNodes(_ + (nodeAddress -> node)) + _clusterNames add nodeAddress.clusterName + node + } + + /** + * Starts up a local ZooKeeper server. Should only be used for testing purposes. + */ + def startLocalCluster(): ZkServer = + startLocalCluster("_akka_cluster/data", "_akka_cluster/log", 2181, 5000) + + /** + * Starts up a local ZooKeeper server. Should only be used for testing purposes. + */ + def startLocalCluster(port: Int, tickTime: Int): ZkServer = + startLocalCluster("_akka_cluster/data", "_akka_cluster/log", port, tickTime) + + /** + * Starts up a local ZooKeeper server. Should only be used for testing purposes. + */ + def startLocalCluster(tickTime: Int): ZkServer = + startLocalCluster("_akka_cluster/data", "_akka_cluster/log", 2181, tickTime) + + /** + * Starts up a local ZooKeeper server. Should only be used for testing purposes. + */ + def startLocalCluster(dataPath: String, logPath: String): ZkServer = + startLocalCluster(dataPath, logPath, 2181, 500) + + /** + * Starts up a local ZooKeeper server. Should only be used for testing purposes. + */ + def startLocalCluster(dataPath: String, logPath: String, port: Int, tickTime: Int): ZkServer = { + try { + EventHandler.info(this, + "Starting local ZooKeeper server on\n\tport [%s]\n\tdata path [%s]\n\tlog path [%s]\n\ttick time [%s]" + .format(port, dataPath, logPath, tickTime)) + val zkServer = AkkaZooKeeper.startLocalServer(dataPath, logPath, port, tickTime) + _zkServer.set(Some(zkServer)) + zkServer + } catch { + case e: Throwable => + EventHandler.error(e, this, "Could not start local ZooKeeper cluster") + throw e + } + } + + /** + * Resets all clusters managed connected to in this JVM. + *

+ * WARNING: Use with care + */ + def reset(): Unit = withPrintStackTraceOnError { + EventHandler.info(this, "Resetting all clusters connected to in this JVM") + if (!clusters.isEmpty) { + nodes foreach { tp => + val (_, node) = tp + node.disconnect + node.remoteService.shutdown + } + implicit val zkClient = newZkClient + clusters foreach (resetNodesInCluster(_)) + ignore[ZkNoNodeException](zkClient.deleteRecursive(ZooKeeperBarrier.BarriersNode)) + zkClient.close + } + } + + /** + * Resets all nodes in a specific cluster. + */ + def resetNodesInCluster(clusterName: String)(implicit zkClient: AkkaZkClient = newZkClient) = withPrintStackTraceOnError { + EventHandler.info(this, "Resetting nodes in cluster [%s]".format(clusterName)) + ignore[ZkNoNodeException](zkClient.deleteRecursive("/" + clusterName)) + } + + /** + * Shut down the local ZooKeeper server. + */ + def shutdownLocalCluster() = withPrintStackTraceOnError { + EventHandler.info(this, "Shuts down local cluster") + reset + _zkServer.get.foreach(_.shutdown) + _zkServer.set(None) + } + + /** + * Creates a new AkkaZkClient. + */ + def newZkClient: AkkaZkClient = new AkkaZkClient(zooKeeperServers, sessionTimeout, connectionTimeout, defaultSerializer) + + def uuidToString(uuid: UUID): String = uuid.toString + + def stringToUuid(uuid: String): UUID = { + if (uuid eq null) throw new ClusterException("UUID is null") + if (uuid == "") throw new ClusterException("UUID is an empty string") + try { new UUID(uuid) } + catch { + case e: StringIndexOutOfBoundsException => + val error = new ClusterException("UUID not valid [" + uuid + "]") + EventHandler.error(error, this, "") + throw error + } + } + + def uuidProtocolToUuid(uuid: UuidProtocol) = new UUID(uuid.getHigh, uuid.getLow) + + def uuidToUuidProtocol(uuid: UUID) = + UuidProtocol.newBuilder + .setHigh(uuid.getTime) + .setLow(uuid.getClockSeqAndNode) + .build + +} + +/** + * @author Jonas Bonér + */ +class ClusterNode private[akka] ( + val nodeAddress: NodeAddress, + val zkServerAddresses: String, + val serializer: ZkSerializer) extends ErrorHandler { self => + + if (nodeAddress eq null) throw new IllegalArgumentException("'nodeAddress' can not be 'null'") + + import Cluster._ + + EventHandler.info(this, + ("\nCreating cluster node with" + + "\n\tnode name = [%s]" + + "\n\tcluster name = [%s]" + + "\n\tzookeeper server addresses = [%s]" + + "\n\tserializer = [%s]") + .format(nodeAddress.nodeName, nodeAddress.clusterName, zkServerAddresses, serializer)) + + val remoteClientLifeCycleListener = actorOf(new Actor { + def receive = { + case RemoteClientError(cause, client, address) => client.shutdownClientModule + case RemoteClientDisconnected(client, address) => client.shutdownClientModule + case _ => //ignore other + } + }).start + + val remoteDaemon = actorOf(new RemoteClusterDaemon(this)).start + + val remoteService: RemoteSupport = { + val remote = new akka.remote.netty.NettyRemoteSupport + remote.start(nodeAddress.hostname, nodeAddress.port) + remote.register(RemoteClusterDaemon.ID, remoteDaemon) + remote.addListener(remoteClientLifeCycleListener) + remote + } + val remoteServerAddress: InetSocketAddress = remoteService.address + + val clusterJmxObjectName = JMX.nameFor(nodeAddress.hostname, "monitoring", "cluster") + + // static nodes + val CLUSTER_NODE = "/" + nodeAddress.clusterName + val MEMBERSHIP_NODE = CLUSTER_NODE + "/members" + val CONFIGURATION_NODE = CLUSTER_NODE + "/config" + val PROVISIONING_NODE = CLUSTER_NODE + "/provisioning" + val ACTOR_REGISTRY_NODE = CLUSTER_NODE + "/actor-registry" + val ACTOR_LOCATIONS_NODE = CLUSTER_NODE + "/actor-locations" + val ACTOR_ID_TO_UUIDS_NODE = CLUSTER_NODE + "/actor-id-to-uuids" + val ACTOR_CLASS_TO_UUIDS_NODE = CLUSTER_NODE + "/actor-class-to-uuids" + val ACTORS_AT_ADDRESS_NODE = CLUSTER_NODE + "/actors-at-address" + val baseNodes = List( + CLUSTER_NODE, + MEMBERSHIP_NODE, + ACTOR_REGISTRY_NODE, + ACTOR_LOCATIONS_NODE, + ACTORS_AT_ADDRESS_NODE, + ACTOR_ID_TO_UUIDS_NODE, + ACTOR_CLASS_TO_UUIDS_NODE, + CONFIGURATION_NODE, + PROVISIONING_NODE) + + val LEADER_ELECTION_NODE = CLUSTER_NODE + "/leader" // should NOT be part of 'baseNodes' only used by 'leaderLock' + + val isConnected = new Switch(false) + val isLeader = new AtomicBoolean(false) + val electionNumber = new AtomicInteger(Integer.MAX_VALUE) + + private val membershipNodePath = membershipNodePathFor(nodeAddress.nodeName) + + // local caches of ZK data + private[akka] val locallyCachedMembershipNodes = new ConcurrentSkipListSet[String]() + private[akka] val nodeNameToAddress: ConcurrentMap[String, InetSocketAddress] = new ConcurrentHashMap[String, InetSocketAddress] + private[akka] val locallyCheckedOutActors: ConcurrentMap[UUID, Array[Byte]] = new ConcurrentHashMap[UUID, Array[Byte]] + + def membershipNodes: Array[String] = locallyCachedMembershipNodes.toList.toArray.asInstanceOf[Array[String]] + + private[akka] val replicaConnections: ConcurrentMap[String, Tuple2[InetSocketAddress, ActorRef]] = + new ConcurrentHashMap[String, Tuple2[InetSocketAddress, ActorRef]] + + // zookeeper listeners + private val stateListener = new StateListener(this) + private val membershipListener = new MembershipChildListener(this) + + // cluster node listeners + private val changeListeners = new CopyOnWriteArrayList[ChangeListener]() + + // Address -> ClusterActorRef + private val clusterActorRefs = new Index[InetSocketAddress, ClusterActorRef] + + // resources + private[cluster] val zkClient = new AkkaZkClient(zkServerAddresses, sessionTimeout, connectionTimeout, serializer) + + private[cluster] val leaderElectionCallback = new LockListener { + def lockAcquired { + EventHandler.info(this, "Node [%s] is the new leader".format(self.nodeAddress.nodeName)) + self.isLeader.set(true) + self.publish(Cluster.NewLeader(self.nodeAddress.nodeName)) + } + + def lockReleased { + EventHandler.info(this, + "Node [%s] is *NOT* the leader anymore".format(self.nodeAddress.nodeName)) + self.isLeader.set(false) + // self.publish(Cluster.LeaderChange) + } + } + + private[cluster] val leaderLock = new WriteLock( + zkClient.connection.getZookeeper, LEADER_ELECTION_NODE, null, leaderElectionCallback) { + // ugly hack, but what do you do? <--- haha epic + private val ownerIdField = classOf[WriteLock].getDeclaredField("ownerId") + ownerIdField.setAccessible(true) + def leader: String = ownerIdField.get(this).asInstanceOf[String] + } + + if (enableJMX) createMBean + + // ======================================= + // Node + // ======================================= + + def isRunning: Boolean = isConnected.isOn + + def start(): ClusterNode = { + isConnected switchOn { + initializeNode + } + this + } + + def stop(): Unit = isConnected switchOff { + ignore[ZkNoNodeException](zkClient.deleteRecursive(membershipNodePath)) + + locallyCachedMembershipNodes.clear + locallyCheckedOutActors.clear + + replicaConnections.toList.foreach({ case (_, (address, _)) => + remote.shutdownClientConnection(address) // shut down client connections + }) + + remoteService.shutdown // shutdown server + + remoteClientLifeCycleListener.stop + remoteDaemon.stop + + // for monitoring remote listener + registry.actors.filter(remoteService.hasListener).foreach(_.stop) + + replicaConnections.clear + updateNodes(_ - nodeAddress) + + disconnect() + EventHandler.info(this, "Cluster node shut down [%s]".format(nodeAddress)) + } + + def disconnect(): ClusterNode = { + zkClient.unsubscribeAll + zkClient.close + this + } + + def reconnect(): ClusterNode = { + zkClient.reconnect + this + } + + // ======================================= + // Change notification + // ======================================= + + /** + * Registers a cluster change listener. + */ + def register(listener: ChangeListener): ClusterNode = if (isConnected.isOff) { + changeListeners.add(listener) + this + } else throw new IllegalStateException("Can not register 'ChangeListener' after the cluster node has been started") + + private[cluster] def publish(change: ChangeNotification) = changeListeners.iterator.foreach(_.notify(change, this)) + + // ======================================= + // Leader + // ======================================= + + /** + * Returns the name of the current leader. + */ + def leader: String = leaderLock.leader + + /** + * Explicitly resign from being a leader. If this node is not a leader then this operation is a no-op. + */ + def resign = if (isLeader.get) leaderLock.unlock + + // ======================================= + // Actor + // ======================================= + + /** + * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated + * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly + * available durable store. + */ + def store[T <: Actor] + (actorClass: Class[T]) + (implicit format: Format[T]): ClusterNode = store(Actor.actorOf(actorClass).start, 0, false) + + /** + * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated + * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly + * available durable store. + */ + def store[T <: Actor] + (actorClass: Class[T], replicationFactor: Int) + (implicit format: Format[T]): ClusterNode = store(Actor.actorOf(actorClass).start, replicationFactor, false) + + /** + * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated + * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly + * available durable store. + */ + def store[T <: Actor] + (actorClass: Class[T], serializeMailbox: Boolean) + (implicit format: Format[T]): ClusterNode = store(Actor.actorOf(actorClass).start, 0, serializeMailbox) + + /** + * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated + * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly + * available durable store. + */ + def store[T <: Actor] + (actorClass: Class[T], replicationFactor: Int, serializeMailbox: Boolean) + (implicit format: Format[T]): ClusterNode = + store(Actor.actorOf(actorClass).start, replicationFactor, serializeMailbox) + + /** + * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated + * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly + * available durable store. + */ + def store[T <: Actor] + (actorRef: ActorRef) + (implicit format: Format[T]): ClusterNode = store(actorRef, 0, false) + + /** + * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated + * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly + * available durable store. + */ + def store[T <: Actor] + (actorRef: ActorRef, replicationFactor: Int) + (implicit format: Format[T]): ClusterNode = store(actorRef, replicationFactor, false) + + /** + * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated + * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly + * available durable store. + */ + def store[T <: Actor] + (actorRef: ActorRef, serializeMailbox: Boolean) + (implicit format: Format[T]): ClusterNode = store(actorRef, 0, serializeMailbox) + + /** + * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated + * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly + * available durable store. + */ + def store[T <: Actor] + (actorRef: ActorRef, replicationFactor: Int, serializeMailbox: Boolean) + (implicit format: Format[T]): ClusterNode = if (isConnected.isOn) { + + import akka.serialization.ActorSerialization._ + + if (!actorRef.isInstanceOf[LocalActorRef]) throw new IllegalArgumentException( + "'actorRef' must be an instance of 'LocalActorRef' [" + actorRef.getClass.getName + "]") + + val uuid = actorRef.uuid + EventHandler.debug(this, + "Clustering actor [%s] with UUID [%s]".format( actorRef.actorClassName, uuid)) + + val actorBytes = if (shouldCompressData) LZF.compress(toBinary(actorRef, serializeMailbox)(format)) + else toBinary(actorRef)(format) + val actorRegistryPath = actorRegistryNodePathFor(uuid) + + // create UUID -> Array[Byte] for actor registry + if (zkClient.exists(actorRegistryPath)) zkClient.writeData(actorRegistryPath, actorBytes) // FIXME check for size and warn if too big + else { + zkClient.retryUntilConnected(new Callable[Either[String, Exception]]() { + def call: Either[String, Exception] = { + try { + Left(zkClient.connection.create(actorRegistryPath, actorBytes, CreateMode.PERSISTENT)) + } catch { case e: KeeperException.NodeExistsException => Right(e) } + } + }) match { + case Left(path) => path + case Right(exception) => actorRegistryPath + } + + // create UUID -> Format registry + try { + zkClient.createPersistent(actorRegistryFormatNodePathFor(uuid), format) + } catch { + case e: ZkNodeExistsException => zkClient.writeData(actorRegistryFormatNodePathFor(uuid), format) + } + + // create UUID -> ID registry + try { + zkClient.createPersistent(actorRegistryActorIdNodePathFor(uuid), actorRef.id) + } catch { + case e: ZkNodeExistsException => zkClient.writeData(actorRegistryActorIdNodePathFor(uuid), actorRef.id) + } + + // create UUID -> class name registry + try { + zkClient.createPersistent(actorRegistryActorClassNameNodePathFor(uuid), actorRef.actorClassName) + } catch { + case e: ZkNodeExistsException => zkClient.writeData(actorRegistryActorClassNameNodePathFor(uuid), actorRef.actorClassName) + } + + // create UUID -> Address registry + ignore[ZkNodeExistsException]( zkClient.createPersistent(actorRegistryAddressNodePathFor(uuid)) ) + + // create UUID -> Node registry + ignore[ZkNodeExistsException]( zkClient.createPersistent(actorLocationsNodePathFor(uuid)) ) + + // create ID -> UUIDs registry + ignore[ZkNodeExistsException]( zkClient.createPersistent(actorIdToUuidsNodePathFor(actorRef.id)) ) + ignore[ZkNodeExistsException]( zkClient.createPersistent("%s/%s".format(actorIdToUuidsNodePathFor(actorRef.id), uuid)) ) + + // create class name -> UUIDs registry + ignore[ZkNodeExistsException]( zkClient.createPersistent(actorClassNameToUuidsNodePathFor(actorRef.actorClassName)) ) + ignore[ZkNodeExistsException]( zkClient.createPersistent("%s/%s".format(actorClassNameToUuidsNodePathFor(actorRef.actorClassName), uuid)) ) + } + + val command = RemoteDaemonMessageProtocol.newBuilder + .setMessageType(USE) + .setActorUuid(uuidToUuidProtocol(uuid)) + .build + replicaConnectionsForReplicationFactor(replicationFactor) foreach { connection => + connection ! command + } + + this + } else throw new ClusterException("Not connected to cluster") + + /** + * Removes actor by type from the cluster. + *

+   *   clusterNode remove classOf[MyActor]
+   * 
+ */ + def remove[T <: Actor](actorClass: Class[T]): ClusterNode = remove(ActorAddress(actorClassName = actorClass.getName)) + + /** + * Removes actor with UUID from the cluster. + */ + def remove(actorAddress: ActorAddress): ClusterNode = { + + def removeByUuid(actorUuid: UUID) = { + releaseActorOnAllNodes(actorUuid) + + locallyCheckedOutActors.remove(actorUuid) + // warning: ordering matters here + ignore[ZkNoNodeException](zkClient.deleteRecursive(actorIdToUuidsNodePathFor(actorIdForUuid(actorUuid)))) // remove ID to UUID mapping + ignore[ZkNoNodeException](zkClient.deleteRecursive(actorClassNameToUuidsNodePathFor(actorClassNameForUuid(actorUuid)))) // remove class name to UUID mapping + ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAtAddressNodePathFor(nodeAddress.nodeName, actorUuid))) + ignore[ZkNoNodeException](zkClient.deleteRecursive(actorRegistryNodePathFor(actorUuid))) + ignore[ZkNoNodeException](zkClient.deleteRecursive(actorLocationsNodePathFor(actorUuid))) + } + + isConnected ifOn { + // remove by UUID + if (actorAddress.actorUuid ne null) { + EventHandler.debug(this, + "Removing actor with UUID [%s] from cluster".format(actorAddress.actorUuid)) + removeByUuid(actorAddress.actorUuid) + + // remove by ID + } else if (actorAddress.actorId != EMPTY_STRING) { + EventHandler.debug(this, + "Removing actor(s) with ID [%s] from cluster".format(actorAddress.actorId)) + uuidsForActorId(actorAddress.actorId) foreach (uuid => removeByUuid(uuid)) + + // remove by class name + } else if (actorAddress.actorClassName != EMPTY_STRING) { + EventHandler.debug(this, + "Removing actor(s) with class name [%s] from cluster".format(actorAddress.actorClassName)) + uuidsForActorClassName(actorAddress.actorClassName) foreach (uuid => removeByUuid(uuid)) + + } else throw new IllegalArgumentException( + "You need to pass in at least one of 'actorUuid' or 'actorId' or 'actorClassName' to 'ClusterNode.remove(..)'") + } + this + } + + /** + * Is the actor with uuid clustered or not? + */ + def isClustered(actorAddress: ActorAddress): Boolean = if (isConnected.isOn) { + actorUuidsForActorAddress(actorAddress) map { uuid => + zkClient.exists(actorRegistryNodePathFor(uuid)) + } exists (_ == true) + } else false + + /** + * Is the actor with uuid in use on 'this' node or not? + */ + def isInUseOnNode(actorAddress: ActorAddress): Boolean = isInUseOnNode(actorAddress, nodeAddress) + + /** + * Is the actor with uuid in use or not? + */ + def isInUseOnNode(actorAddress: ActorAddress, node: NodeAddress): Boolean = if (isConnected.isOn) { + actorUuidsForActorAddress(actorAddress) map { uuid => + zkClient.exists(actorLocationsNodePathFor(uuid, node)) + } exists (_ == true) + } else false + + /** + * Checks out an actor for use on this node, e.g. checked out as a 'LocalActorRef' but it makes it available + * for remote access through lookup by its UUID. + */ + def use[T <: Actor](actorAddress: ActorAddress)( + implicit format: Format[T] = formatForActor(actorAddress)): Array[LocalActorRef] = if (isConnected.isOn) { + + import akka.serialization.ActorSerialization._ + + actorUuidsForActorAddress(actorAddress) map { uuid => + EventHandler.debug(this, + "Checking out actor with UUID [%s] to be used on node [%s]".format(uuid, nodeAddress.nodeName)) + + ignore[ZkNodeExistsException](zkClient.createPersistent(actorAtAddressNodePathFor(nodeAddress.nodeName, uuid), true)) + ignore[ZkNodeExistsException](zkClient.createEphemeral(actorLocationsNodePathFor(uuid, nodeAddress))) + + // set home address + ignore[ZkNodeExistsException](zkClient.createPersistent(actorRegistryAddressNodePathFor(uuid))) + ignore[ZkNodeExistsException](zkClient.createEphemeral(actorRegistryAddressNodePathFor(uuid, remoteServerAddress))) + + val actorPath = actorRegistryNodePathFor(uuid) + zkClient.retryUntilConnected(new Callable[Either[Array[Byte], Exception]]() { + def call: Either[Array[Byte], Exception] = { + try { + Left(if (shouldCompressData) LZF.uncompress(zkClient.connection.readData(actorPath, new Stat, false)) + else zkClient.connection.readData(actorPath, new Stat, false)) + } catch { case e: KeeperException.NodeExistsException => Right(e) } + } + }) match { + case Left(bytes) => + locallyCheckedOutActors += (uuid -> bytes) + // FIXME switch to ReplicatedActorRef here + // val actor = new ReplicatedActorRef(fromBinary[T](bytes, remoteServerAddress)(format)) + val actor = fromBinary[T](bytes, remoteServerAddress)(format) + remoteService.register(UUID_PREFIX + uuid, actor) // clustered refs are always registered and looked up by UUID + actor.start + actor.asInstanceOf[LocalActorRef] + case Right(exception) => throw exception + } + } + } else Array.empty[LocalActorRef] + + /** + * Using (checking out) all actors with a specific UUID on all nodes in the cluster. + */ + def useActorOnAllNodes(uuid: UUID): Unit = isConnected ifOn { + EventHandler.debug(this, + "Using (checking out) all actors with UUID [%s] on all nodes in cluster".format(uuid)) + val command = RemoteDaemonMessageProtocol.newBuilder + .setMessageType(USE) + .setActorUuid(uuidToUuidProtocol(uuid)) + .build + membershipNodes foreach { node => + replicaConnections.get(node) foreach { case (_, connection) => + connection ! command + } + } + } + + /** + * Using (checking out) specific UUID on a specefic node. + */ + def useActorOnNode(node: String, uuid: UUID): Unit = isConnected ifOn { + replicaConnections.get(node) foreach { case (_, connection) => + connection ! RemoteDaemonMessageProtocol.newBuilder + .setMessageType(USE) + .setActorUuid(uuidToUuidProtocol(uuid)) + .build + } + } + + /** + * Checks in an actor after done using it on this node. + */ + def release(actorAddress: ActorAddress): Unit = isConnected ifOn { + actorUuidsForActorAddress(actorAddress) foreach { uuid => + EventHandler.debug(this, + "Releasing actor with UUID [%s] after usage".format(uuid)) + locallyCheckedOutActors.remove(uuid) + ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAtAddressNodePathFor(nodeAddress.nodeName, uuid))) + ignore[ZkNoNodeException](zkClient.delete(actorAtAddressNodePathFor(nodeAddress.nodeName, uuid))) + ignore[ZkNoNodeException](zkClient.delete(actorLocationsNodePathFor(uuid, nodeAddress))) + ignore[ZkNoNodeException](zkClient.delete(actorRegistryAddressNodePathFor(uuid, remoteServerAddress))) + } + } + + /** + * Releases (checking in) all actors with a specific UUID on all nodes in the cluster where the actor is in 'use'. + */ + def releaseActorOnAllNodes(uuid: UUID): Unit = isConnected ifOn { + EventHandler.debug(this, + "Releasing (checking in) all actors with UUID [%s] on all nodes in cluster".format(uuid)) + val command = RemoteDaemonMessageProtocol.newBuilder + .setMessageType(RELEASE) + .setActorUuid(uuidToUuidProtocol(uuid)) + .build + nodesForActorsInUseWithUuid(uuid) foreach { node => + replicaConnections.get(node) foreach { case (_, connection) => + connection ! command + } + } + } + + /** + * Creates an ActorRef with a Router to a set of clustered actors. + */ + def ref(actorAddress: ActorAddress, router: Router.RouterType): ActorRef = if (isConnected.isOn) { + + val addresses = addressesForActor(actorAddress) + val actorType = ActorType.ScalaActor // FIXME later we also want to suppot TypedActor, then 'actorType' needs to be configurable + + EventHandler.debug(this, + "Creating cluster actor ref with router [%s] for actors [%s]".format(router, addresses.mkString(", "))) + + def registerClusterActorRefForAddress(actorRef: ClusterActorRef, addresses: Array[(UUID, InetSocketAddress)]) = + addresses foreach { case (_, address) => clusterActorRefs.put(address, actorRef) } + + def refByUuid(actorUuid: UUID): ActorRef = { + val actorClassName = actorClassNameForUuid(actorUuid) + val actor = Router newRouter ( + router, addresses, + uuidToString(actorUuid), actorClassName, + Cluster.lookupLocalhostName, Cluster.remoteServerPort, // set it to local hostname:port + Actor.TIMEOUT, actorType) + registerClusterActorRefForAddress(actor, addresses) + actor + } + + def refById(actorId: String): ActorRef = { + val uuids = uuidsForActorId(actorId) + val actorClassName = uuids.map(uuid => actorClassNameForUuid(uuid)).head + if (actorClassName eq null) throw new IllegalStateException( + "Actor class name for actor with UUID [" + uuids.head + "] could not be retrieved") + val actor = Router newRouter ( + router, addresses, + actorId, actorClassName, + Cluster.lookupLocalhostName, Cluster.remoteServerPort, // set it to local hostname:port + Actor.TIMEOUT, actorType) + registerClusterActorRefForAddress(actor, addresses) + actor + } + + def refByClassName(actorClassName: String): ActorRef = { + val actor = Router newRouter ( + router, addresses, + actorClassName, actorClassName, + Cluster.lookupLocalhostName, Cluster.remoteServerPort, // set it to local hostname:port + Actor.TIMEOUT, actorType) + registerClusterActorRefForAddress(actor, addresses) + actor + } + + val actorUuid = actorAddress.actorUuid + val actorId = actorAddress.actorId + val actorClassName = actorAddress.actorClassName + if ((actorUuid ne null) && actorId == EMPTY_STRING && actorClassName == EMPTY_STRING) refByUuid(actorUuid) + else if (actorId != EMPTY_STRING && (actorUuid eq null) && actorClassName == EMPTY_STRING) refById(actorId) + else if (actorClassName != EMPTY_STRING && (actorUuid eq null) && actorId == EMPTY_STRING) refByClassName(actorClassName) + else throw new IllegalArgumentException("You need to pass in either 'actorUuid' or 'actorId' or 'actorClassName' and only one of them") + } else throw new ClusterException("Not connected to cluster") + + /** + * Migrate the actor from 'this' node to node 'to'. + */ + def migrate(to: NodeAddress, actorAddress: ActorAddress): Unit = migrate(nodeAddress, to, actorAddress) + + /** + * Migrate the actor from node 'from' to node 'to'. + */ + def migrate( + from: NodeAddress, to: NodeAddress, actorAddress: ActorAddress): Unit = isConnected ifOn { + if (from eq null) throw new IllegalArgumentException("NodeAddress 'from' can not be 'null'") + if (to eq null) throw new IllegalArgumentException("NodeAddress 'to' can not be 'null'") + if (isInUseOnNode(actorAddress, from)) { + migrateWithoutCheckingThatActorResidesOnItsHomeNode(from, to, actorAddress) + } else { + throw new ClusterException("Can't move actor from node [" + from + "] since it does not exist on this node") + } + } + + /** + * Returns the UUIDs of all actors checked out on this node. + */ + def uuidsForActorsInUse: Array[UUID] = uuidsForActorsInUseOnNode(nodeAddress.nodeName) + + /** + * Returns the IDs of all actors checked out on this node. + */ + def idsForActorsInUse: Array[String] = actorIdsForUuids(uuidsForActorsInUse) + + /** + * Returns the class names of all actors checked out on this node. + */ + def classNamesForActorsInUse: Array[String] = actorClassNamesForUuids(uuidsForActorsInUse) + + /** + * Returns the UUIDs of all actors registered in this cluster. + */ + def uuidsForClusteredActors: Array[UUID] = if (isConnected.isOn) { + zkClient.getChildren(ACTOR_REGISTRY_NODE).toList.map(new UUID(_)).toArray.asInstanceOf[Array[UUID]] + } else Array.empty[UUID] + + /** + * Returns the IDs of all actors registered in this cluster. + */ + def idsForClusteredActors: Array[String] = actorIdsForUuids(uuidsForClusteredActors) + + /** + * Returns the class names of all actors registered in this cluster. + */ + def classNamesForClusteredActors: Array[String] = actorClassNamesForUuids(uuidsForClusteredActors) + + /** + * Returns the actor id for the actor with a specific UUID. + */ + def actorIdForUuid(uuid: UUID): String = if (isConnected.isOn) { + try { zkClient.readData(actorRegistryActorIdNodePathFor(uuid)).asInstanceOf[String] } + catch { case e: ZkNoNodeException => "" } + } else "" + + /** + * Returns the actor ids for all the actors with a specific UUID. + */ + def actorIdsForUuids(uuids: Array[UUID]): Array[String] = uuids map (actorIdForUuid(_)) filter (_ != "") + + /** + * Returns the actor class name for the actor with a specific UUID. + */ + def actorClassNameForUuid(uuid: UUID): String = if (isConnected.isOn) { + try { zkClient.readData(actorRegistryActorClassNameNodePathFor(uuid)).asInstanceOf[String] } + catch { case e: ZkNoNodeException => "" } + } else "" + + /** + * Returns the actor class names for all the actors with a specific UUID. + */ + def actorClassNamesForUuids(uuids: Array[UUID]): Array[String] = uuids map (actorClassNameForUuid(_)) filter (_ != "") + + /** + * Returns the actor UUIDs for actor ID. + */ + def uuidsForActorId(actorId: String): Array[UUID] = if (isConnected.isOn) { + try { zkClient.getChildren(actorIdToUuidsNodePathFor(actorId)).toList.map(new UUID(_)).toArray.asInstanceOf[Array[UUID]] } + catch { case e: ZkNoNodeException => Array[UUID]() } + } else Array.empty[UUID] + + /** + * Returns the actor UUIDs for actor class name. + */ + def uuidsForActorClassName(actorClassName: String): Array[UUID] = if (isConnected.isOn) { + try { zkClient.getChildren(actorClassNameToUuidsNodePathFor(actorClassName)).toList.map(new UUID(_)).toArray.asInstanceOf[Array[UUID]] } + catch { case e: ZkNoNodeException => Array[UUID]() } + } else Array.empty[UUID] + + /** + * Returns the node names of all actors in use with UUID. + */ + def nodesForActorsInUseWithUuid(uuid: UUID): Array[String] = if (isConnected.isOn) { + try { zkClient.getChildren(actorLocationsNodePathFor(uuid)).toList.toArray.asInstanceOf[Array[String]] } + catch { case e: ZkNoNodeException => Array[String]() } + } else Array.empty[String] + + /** + * Returns the node names of all actors in use with id. + */ + def nodesForActorsInUseWithId(id: String): Array[String] = if (isConnected.isOn) { + flatten { + actorUuidsForActorAddress(ActorAddress(null, id, EMPTY_STRING)) map { uuid => + try { zkClient.getChildren(actorLocationsNodePathFor(uuid)).toList.toArray.asInstanceOf[Array[String]] } + catch { case e: ZkNoNodeException => Array[String]() } + } + } + } else Array.empty[String] + + /** + * Returns the node names of all actors in use with class name. + */ + def nodesForActorsInUseWithClassName(className: String): Array[String] = if (isConnected.isOn) { + flatten { + actorUuidsForActorAddress(ActorAddress(null, EMPTY_STRING, className)) map { uuid => + try { zkClient.getChildren(actorLocationsNodePathFor(uuid)).toList.toArray.asInstanceOf[Array[String]] } + catch { case e: ZkNoNodeException => Array[String]() } + } + } + } else Array.empty[String] + + /** + * Returns the UUIDs of all actors in use registered on a specific node. + */ + def uuidsForActorsInUseOnNode(nodeName: String): Array[UUID] = if (isConnected.isOn) { + try { zkClient.getChildren(actorsAtAddressNodePathFor(nodeName)).toList.map(new UUID(_)).toArray.asInstanceOf[Array[UUID]] } + catch { case e: ZkNoNodeException => Array[UUID]() } + } else Array.empty[UUID] + + /** + * Returns the IDs of all actors in use registered on a specific node. + */ + def idsForActorsInUseOnNode(nodeName: String): Array[String] = if (isConnected.isOn) { + val uuids = + try { zkClient.getChildren(actorsAtAddressNodePathFor(nodeName)).toList.map(new UUID(_)).toArray.asInstanceOf[Array[UUID]] } + catch { case e: ZkNoNodeException => Array[UUID]() } + actorIdsForUuids(uuids) + } else Array.empty[String] + + /** + * Returns the class namess of all actors in use registered on a specific node. + */ + def classNamesForActorsInUseOnNode(nodeName: String): Array[String] = if (isConnected.isOn) { + val uuids = + try { zkClient.getChildren(actorsAtAddressNodePathFor(nodeName)).toList.map(new UUID(_)).toArray.asInstanceOf[Array[UUID]] } + catch { case e: ZkNoNodeException => Array[UUID]() } + actorClassNamesForUuids(uuids) + } else Array.empty[String] + + /** + * Returns Format for actor with UUID. + */ + def formatForActor[T <: Actor](actorAddress: ActorAddress): Format[T] = { + + val formats = actorUuidsForActorAddress(actorAddress) map { uuid => + zkClient.readData(actorRegistryFormatNodePathFor(uuid), new Stat).asInstanceOf[Format[T]] + } + + val format = formats.head + if (formats.isEmpty) throw new IllegalStateException("No Format found for [%s]".format(actorAddress)) + if (formats map (_ == format) exists (_ == false)) throw new IllegalStateException( + "Multiple Format classes found for [%s]".format(actorAddress)) + format + } + + /** + * Returns home address for actor with UUID. + */ + def addressesForActor(actorAddress: ActorAddress): Array[(UUID, InetSocketAddress)] = { + try { + for { + uuid <- actorUuidsForActorAddress(actorAddress) + address <- zkClient.getChildren(actorRegistryAddressNodePathFor(uuid)).toList + } yield { + val tokenizer = new java.util.StringTokenizer(address, ":") + val hostname = tokenizer.nextToken // hostname + val port = tokenizer.nextToken.toInt // port + (uuid, new InetSocketAddress(hostname, port)) + } + } catch { + case e: ZkNoNodeException => Array[(UUID, InetSocketAddress)]() + } + } + + // ======================================= + // Compute Grid + // ======================================= + + /** + * Send a function 'Function0[Unit]' to be invoked on a random number of nodes (defined by 'replicationFactor' argument). + */ + def send(f: Function0[Unit], replicationFactor: Int): Unit = { + val message = RemoteDaemonMessageProtocol.newBuilder + .setMessageType(FUNCTION_FUN0_UNIT) + .setPayload(ByteString.copyFrom(Serializer.Java.toBinary(f))) + .build + replicaConnectionsForReplicationFactor(replicationFactor) foreach (_ ! message) + } + + /** + * Send a function 'Function0[Any]' to be invoked on a random number of nodes (defined by 'replicationFactor' argument). + * Returns an 'Array' with all the 'Future's from the computation. + */ + def send(f: Function0[Any], replicationFactor: Int): List[Future[Any]] = { + val message = RemoteDaemonMessageProtocol.newBuilder + .setMessageType(FUNCTION_FUN0_ANY) + .setPayload(ByteString.copyFrom(Serializer.Java.toBinary(f))) + .build + val results = replicaConnectionsForReplicationFactor(replicationFactor) map (_ !!! message) + results.toList.asInstanceOf[List[Future[Any]]] + } + + /** + * Send a function 'Function1[Any, Unit]' to be invoked on a random number of nodes (defined by 'replicationFactor' argument) + * with the argument speficied. + */ + def send(f: Function1[Any, Unit], arg: Any, replicationFactor: Int): Unit = { + val message = RemoteDaemonMessageProtocol.newBuilder + .setMessageType(FUNCTION_FUN1_ARG_UNIT) + .setPayload(ByteString.copyFrom(Serializer.Java.toBinary((f, arg)))) + .build + replicaConnectionsForReplicationFactor(replicationFactor) foreach (_ ! message) + } + + /** + * Send a function 'Function1[Any, Any]' to be invoked on a random number of nodes (defined by 'replicationFactor' argument) + * with the argument speficied. + * Returns an 'Array' with all the 'Future's from the computation. + */ + def send(f: Function1[Any, Any], arg: Any, replicationFactor: Int): List[Future[Any]] = { + val message = RemoteDaemonMessageProtocol.newBuilder + .setMessageType(FUNCTION_FUN1_ARG_ANY) + .setPayload(ByteString.copyFrom(Serializer.Java.toBinary((f, arg)))) + .build + val results = replicaConnectionsForReplicationFactor(replicationFactor) map (_ !!! message) + results.toList.asInstanceOf[List[Future[Any]]] + } + + // ======================================= + // Config + // ======================================= + + def setConfigElement(key: String, bytes: Array[Byte]) { + val compressedBytes = if (shouldCompressData) LZF.compress(bytes) else bytes + EventHandler.debug(this, + "Adding config value [%s] under key [%s] in cluster registry".format(key, compressedBytes)) + zkClient.retryUntilConnected(new Callable[Either[Unit, Exception]]() { + def call: Either[Unit, Exception] = { + try { + Left(zkClient.connection.create(configurationNodePathFor(key), compressedBytes, CreateMode.PERSISTENT)) + } catch { case e: KeeperException.NodeExistsException => + try { + Left(zkClient.connection.writeData(configurationNodePathFor(key), compressedBytes)) + } catch { case e: Exception => Right(e) } + } + } + }) match { + case Left(_) => { /* do nothing */ } + case Right(exception) => throw exception + } + } + + /** + * Returns the config element for the key or NULL if no element exists under the key. + */ + def getConfigElement(key: String): Array[Byte] = try { + zkClient.connection.readData(configurationNodePathFor(key), new Stat, true) + } catch { + case e: KeeperException.NoNodeException => null + } + + def removeConfigElement(key: String) = ignore[ZkNoNodeException]{ + EventHandler.debug(this, + "Removing config element with key [%s] from cluster registry".format(key)) + zkClient.deleteRecursive(configurationNodePathFor(key)) + } + + def getConfigElementKeys: Array[String] = zkClient.getChildren(CONFIGURATION_NODE).toList.toArray.asInstanceOf[Array[String]] + + // ======================================= + // Queue + // ======================================= + + def createQueue(rootPath: String, blocking: Boolean = true) = new ZooKeeperQueue(zkClient, rootPath, blocking) + + // ======================================= + // Barrier + // ======================================= + + def barrier(name: String, count: Int) = + ZooKeeperBarrier(zkClient, nodeAddress.clusterName, name, nodeAddress.nodeName, count) + + def barrier(name: String, count: Int, timeout: Duration) = + ZooKeeperBarrier(zkClient, nodeAddress.clusterName, name, nodeAddress.nodeName, count, timeout) + + // ======================================= + // Private + // ======================================= + + private[cluster] def membershipNodePathFor(node: String) = "%s/%s".format(MEMBERSHIP_NODE, node) + + private[cluster] def configurationNodePathFor(key: String) = "%s/%s".format(CONFIGURATION_NODE, key) + + private[cluster] def actorIdToUuidsNodePathFor(actorId: String) = "%s/%s".format(ACTOR_ID_TO_UUIDS_NODE, actorId.replace('.', '_')) + private[cluster] def actorClassNameToUuidsNodePathFor(actorClassName: String) = "%s/%s".format(ACTOR_CLASS_TO_UUIDS_NODE, actorClassName) + + private[cluster] def actorLocationsNodePathFor(actorUuid: UUID) = "%s/%s".format(ACTOR_LOCATIONS_NODE, actorUuid) + private[cluster] def actorLocationsNodePathFor(actorUuid: UUID, node: NodeAddress) = + "%s/%s/%s".format(ACTOR_LOCATIONS_NODE, actorUuid, node.nodeName) + + private[cluster] def actorsAtAddressNodePathFor(node: String) = "%s/%s".format(ACTORS_AT_ADDRESS_NODE, node) + private[cluster] def actorAtAddressNodePathFor(node: String, uuid: UUID) = "%s/%s/%s".format(ACTORS_AT_ADDRESS_NODE, node, uuid) + + private[cluster] def actorRegistryNodePathFor(actorUuid: UUID) = "%s/%s".format(ACTOR_REGISTRY_NODE, actorUuid) + private[cluster] def actorRegistryFormatNodePathFor(actorUuid: UUID) = "%s/%s".format(actorRegistryNodePathFor(actorUuid), "format") + private[cluster] def actorRegistryActorIdNodePathFor(actorUuid: UUID) = "%s/%s".format(actorRegistryNodePathFor(actorUuid), "id") + private[cluster] def actorRegistryActorClassNameNodePathFor(actorUuid: UUID) = "%s/%s".format(actorRegistryNodePathFor(actorUuid), "class") + private[cluster] def actorRegistryAddressNodePathFor(actorUuid: UUID): String = "%s/%s".format(actorRegistryNodePathFor(actorUuid), "address") + private[cluster] def actorRegistryAddressNodePathFor(actorUuid: UUID, address: InetSocketAddress): String = + "%s/%s:%s".format(actorRegistryAddressNodePathFor(actorUuid), address.getHostName, address.getPort) + + private[cluster] def initializeNode = { + EventHandler.info(this, "Initializing cluster node [%s]".format(nodeAddress)) + createRootClusterNode + val isLeader = joinLeaderElection + if (isLeader) createNodeStructureIfNeeded + registerListeners + joinMembershipNode + joinActorsAtAddressNode + fetchMembershipChildrenNodes + EventHandler.info(this, "Cluster node [%s] started successfully".format(nodeAddress)) + } + + private[cluster] def addressForNode(node: String): InetSocketAddress = { + val address = zkClient.readData(membershipNodePathFor(node)).asInstanceOf[String] + val tokenizer = new java.util.StringTokenizer(address, ":") + tokenizer.nextToken // cluster name + tokenizer.nextToken // node name + val hostname = tokenizer.nextToken // hostname + val port = tokenizer.nextToken.toInt // port + new InetSocketAddress(hostname, port) + } + + private def actorUuidsForActorAddress(actorAddress: ActorAddress): Array[UUID] = { + val actorUuid = actorAddress.actorUuid + val actorId = actorAddress.actorId + val actorClassName = actorAddress.actorClassName + if ((actorUuid ne null) && actorId == EMPTY_STRING && actorClassName == EMPTY_STRING) Array(actorUuid) + else if (actorId != EMPTY_STRING && (actorUuid eq null) && actorClassName == EMPTY_STRING) uuidsForActorId(actorId) + else if (actorClassName != EMPTY_STRING && (actorUuid eq null) && actorId == EMPTY_STRING) uuidsForActorClassName(actorClassName) + else throw new IllegalArgumentException("You need to pass in either 'actorUuid' or 'actorId' or 'actorClassName' and only one of them") + } filter (_ ne null) + + /** + * Returns a random set with replica connections of size 'replicationFactor'. + * Default replicationFactor is 0, which returns the empty set. + */ + private def replicaConnectionsForReplicationFactor(replicationFactor: Int = 0): Set[ActorRef] = { + var replicas = HashSet.empty[ActorRef] + if (replicationFactor < 1) return replicas + + connectToAllReplicas + + val numberOfReplicas = replicaConnections.size + val replicaConnectionsAsArray = replicaConnections.toList map { case (node, (address, actorRef)) => actorRef } // the ActorRefs + + if (numberOfReplicas < replicationFactor) { + throw new IllegalArgumentException( + "Replication factor [" + replicationFactor + "] is greater than the number of available nodes [" + numberOfReplicas + "]") + } else if (numberOfReplicas == replicationFactor) { + replicas = replicas ++ replicaConnectionsAsArray + } else { + val random = new java.util.Random(System.currentTimeMillis) + while (replicas.size < replicationFactor) { + val index = random.nextInt(numberOfReplicas) + replicas = replicas + replicaConnectionsAsArray(index) + } + } + replicas + } + + /** + * Connect to all available replicas unless already connected). + */ + private def connectToAllReplicas = { + membershipNodes foreach { node => + if (!replicaConnections.contains(node)) { + val address = addressForNode(node) + val clusterDaemon = Actor.remote.actorFor(RemoteClusterDaemon.ID, address.getHostName, address.getPort) + replicaConnections.put(node, (address, clusterDaemon)) + } + } + } + + private[cluster] def joinMembershipNode = { + nodeNameToAddress.put(nodeAddress.nodeName, remoteServerAddress) + try { + EventHandler.info(this, + "Joining cluster as membership node [%s] on [%s]".format(nodeAddress, membershipNodePath)) + zkClient.createEphemeral(membershipNodePath, nodeAddress.toString) + } catch { + case e: ZkNodeExistsException => + val error = new ClusterException("Can't join the cluster. The node name [" + nodeAddress.nodeName + "] is already in by another node") + EventHandler.error(error, this, "") + throw error + } + } + + private[cluster] def joinActorsAtAddressNode = + ignore[ZkNodeExistsException](zkClient.createPersistent(actorsAtAddressNodePathFor(nodeAddress.nodeName))) + + private[cluster] def joinLeaderElection: Boolean = { + EventHandler.info(this, "Node [%s] is joining leader election".format(nodeAddress.nodeName)) + leaderLock.lock + } + + private[cluster] def failOverConnections(from: InetSocketAddress, to: InetSocketAddress) { + clusterActorRefs.values(from) foreach (_.failOver(from, to)) + } + + private[cluster] def migrateFromFailedNodes[T <: Actor](currentSetOfClusterNodes: List[String]) = { + findFailedNodes(currentSetOfClusterNodes).foreach { failedNodeName => + + val allNodes = locallyCachedMembershipNodes.toList + val myIndex = allNodes.indexWhere(_.endsWith(nodeAddress.nodeName)) + val failedNodeIndex = allNodes.indexWhere(_ == failedNodeName) + + // Migrate to the successor of the failed node (using a sorted circular list of the node names) + if ((failedNodeIndex == 0 && myIndex == locallyCachedMembershipNodes.size - 1) || // No leftmost successor exists, check the tail + (failedNodeIndex == myIndex + 1)) { // Am I the leftmost successor? + + // Yes I am the node to migrate the actor to (can only be one in the cluster) + val actorUuidsForFailedNode = zkClient.getChildren(actorsAtAddressNodePathFor(failedNodeName)) + EventHandler.debug(this, + "Migrating actors from failed node [%s] to node [%s]: Actor UUIDs [%s]" + .format(failedNodeName, nodeAddress.nodeName, actorUuidsForFailedNode)) + + actorUuidsForFailedNode.foreach { actorUuid => + EventHandler.debug(this, + "Cluster node [%s] has failed, migrating actor with UUID [%s] to [%s]" + .format(failedNodeName, actorUuid, nodeAddress.nodeName)) + + val actorAddress = ActorAddress(actorUuid = stringToUuid(actorUuid)) + migrateWithoutCheckingThatActorResidesOnItsHomeNode( // since the ephemeral node is already gone, so can't check + NodeAddress(nodeAddress.clusterName, failedNodeName), nodeAddress, actorAddress) + + implicit val format: Format[T] = formatForActor(actorAddress) + use(actorAddress) foreach { actor => + // FIXME remove ugly reflection when we have 1.0 final which has 'fromBinary(byte, homeAddress)(format)' + //actor.homeAddress = remoteServerAddress + val homeAddress = classOf[LocalActorRef].getDeclaredField("homeAddress") + homeAddress.setAccessible(true) + homeAddress.set(actor, Some(remoteServerAddress)) + + remoteService.register(actorUuid, actor) + } + } + + // notify all available nodes that they should fail-over all connections from 'from' to 'to' + val from = nodeNameToAddress.get(failedNodeName) + val to = remoteServerAddress + val command = RemoteDaemonMessageProtocol.newBuilder + .setMessageType(FAIL_OVER_CONNECTIONS) + .setPayload(ByteString.copyFrom(Serializer.Java.toBinary((from, to)))) + .build + membershipNodes foreach { node => + replicaConnections.get(node) foreach { case (_, connection) => + connection ! command + } + } + } + } + } + + /** + * Used when the ephemeral "home" node is already gone, so we can't check. + */ + private def migrateWithoutCheckingThatActorResidesOnItsHomeNode( + from: NodeAddress, to: NodeAddress, actorAddress: ActorAddress) { + + actorUuidsForActorAddress(actorAddress) map { uuid => + val actorAddress = ActorAddress(actorUuid = uuid) + + if (!isInUseOnNode(actorAddress, to)) { + release(actorAddress) + + val newAddress = new InetSocketAddress(to.hostname, to.port) + ignore[ZkNodeExistsException](zkClient.createPersistent(actorRegistryAddressNodePathFor(uuid))) + ignore[ZkNodeExistsException](zkClient.createEphemeral(actorRegistryAddressNodePathFor(uuid, newAddress))) + ignore[ZkNodeExistsException](zkClient.createEphemeral(actorLocationsNodePathFor(uuid, to))) + ignore[ZkNodeExistsException](zkClient.createPersistent(actorAtAddressNodePathFor(nodeAddress.nodeName, uuid))) + + ignore[ZkNoNodeException](zkClient.delete(actorLocationsNodePathFor(uuid, from))) + ignore[ZkNoNodeException](zkClient.delete(actorAtAddressNodePathFor(from.nodeName, uuid))) + + // 'use' (check out) actor on the remote 'to' node + useActorOnNode(to.nodeName, uuid) + } + } + } + + private[cluster] def findFailedNodes(nodes: List[String]): List[String] = + (locallyCachedMembershipNodes diff Set(nodes: _*)).toList + + private[cluster] def findNewlyConnectedMembershipNodes(nodes: List[String]): List[String] = + (Set(nodes: _*) diff locallyCachedMembershipNodes).toList + + private[cluster] def findNewlyDisconnectedMembershipNodes(nodes: List[String]): List[String] = + (locallyCachedMembershipNodes diff Set(nodes: _*)).toList + + private[cluster] def findNewlyConnectedAvailableNodes(nodes: List[String]): List[String] = + (Set(nodes: _*) diff locallyCachedMembershipNodes).toList + + private[cluster] def findNewlyDisconnectedAvailableNodes(nodes: List[String]): List[String] = + (locallyCachedMembershipNodes diff Set(nodes: _*)).toList + + private def createRootClusterNode: Unit = ignore[ZkNodeExistsException] { + zkClient.create(CLUSTER_NODE, null, CreateMode.PERSISTENT) + EventHandler.info(this, "Created node [%s]".format(CLUSTER_NODE)) + } + + private def createNodeStructureIfNeeded = { + baseNodes.foreach { path => + try { + zkClient.create(path, null, CreateMode.PERSISTENT) + EventHandler.debug(this, "Created node [%s]".format(path)) + } catch { + case e: ZkNodeExistsException => {} // do nothing + case e => + val error = new ClusterException(e.toString) + EventHandler.error(error, this, "") + throw error + } + } + } + + private def registerListeners = { + zkClient.subscribeStateChanges(stateListener) + zkClient.subscribeChildChanges(MEMBERSHIP_NODE, membershipListener) + } + + private def fetchMembershipChildrenNodes = { + val membershipChildren = zkClient.getChildren(MEMBERSHIP_NODE) + locallyCachedMembershipNodes.clear + membershipChildren.iterator.foreach(locallyCachedMembershipNodes.add) + } + + private def createMBean = { + val clusterMBean = new StandardMBean(classOf[ClusterNodeMBean]) with ClusterNodeMBean { + import Cluster._ + + def start = self.start + def stop = self.stop + + def disconnect = self.disconnect + def reconnect = self.reconnect + def resign = self.resign + + def isConnected = self.isConnected.isOn + + def getRemoteServerHostname = self.nodeAddress.hostname + def getRemoteServerPort = self.nodeAddress.port + + def getNodeName = self.nodeAddress.nodeName + def getClusterName = self.nodeAddress.clusterName + def getZooKeeperServerAddresses = self.zkServerAddresses + + def getMemberNodes = self.locallyCachedMembershipNodes.iterator.map(_.toString).toArray + def getLeader = self.leader.toString + + def getUuidsForActorsInUse = self.uuidsForActorsInUse.map(_.toString).toArray + def getIdsForActorsInUse = self.idsForActorsInUse.map(_.toString).toArray + def getClassNamesForActorsInUse = self.classNamesForActorsInUse.map(_.toString).toArray + + def getUuidsForClusteredActors = self.uuidsForClusteredActors.map(_.toString).toArray + def getIdsForClusteredActors = self.idsForClusteredActors.map(_.toString).toArray + def getClassNamesForClusteredActors = self.classNamesForClusteredActors.map(_.toString).toArray + + def getNodesForActorInUseWithUuid(uuid: String) = self.nodesForActorsInUseWithUuid(stringToUuid(uuid)) + def getNodesForActorInUseWithId(id: String) = self.nodesForActorsInUseWithId(id) + def getNodesForActorInUseWithClassName(className: String) = self.nodesForActorsInUseWithClassName(className) + + def getUuidsForActorsInUseOnNode(nodeName: String) = self.uuidsForActorsInUseOnNode(nodeName).map(_.toString).toArray + def getIdsForActorsInUseOnNode(nodeName: String) = self.idsForActorsInUseOnNode(nodeName).map(_.toString).toArray + def getClassNamesForActorsInUseOnNode(nodeName: String) = self.classNamesForActorsInUseOnNode(nodeName).map(_.toString).toArray + + def setConfigElement(key: String, value: String) = self.setConfigElement(key, value.getBytes("UTF-8")) + def getConfigElement(key: String) = new String(self.getConfigElement(key), "UTF-8") + def removeConfigElement(key: String) = self.removeConfigElement(key) + def getConfigElementKeys = self.getConfigElementKeys.toArray + } + + JMX.register(clusterJmxObjectName, clusterMBean) + Monitoring.registerLocalMBean(clusterJmxObjectName, clusterMBean) + } +} + +/** + * @author Jonas Bonér + */ +class MembershipChildListener(self: ClusterNode) extends IZkChildListener with ErrorHandler { + def handleChildChange(parentPath: String, currentChilds: JList[String]) = withErrorHandler { + if (currentChilds ne null) { + val childList = currentChilds.toList + if (!childList.isEmpty) EventHandler.debug(this, + "MembershipChildListener at [%s] has children [%s]" + .format(self.nodeAddress.nodeName, childList.mkString(" "))) + self.findNewlyConnectedMembershipNodes(childList) foreach { name => + self.nodeNameToAddress.put(name, self.addressForNode(name)) // update 'nodename-address' map + self.publish(Cluster.NodeConnected(name)) + } + + self.findNewlyDisconnectedMembershipNodes(childList) foreach { name => + self.nodeNameToAddress.remove(name) // update 'nodename-address' map + self.publish(Cluster.NodeDisconnected(name)) + } + + self.locallyCachedMembershipNodes.clear + childList.foreach(self.locallyCachedMembershipNodes.add) + } + } +} + +/** + * @author Jonas Bonér + */ +class StateListener(self: ClusterNode) extends IZkStateListener { + def handleStateChanged(state: KeeperState) = state match { + + case KeeperState.SyncConnected => + EventHandler.debug(this, "Cluster node [%s] - Connected".format(self.nodeAddress)) + self.publish(Cluster.ThisNode.Connected) + + case KeeperState.Disconnected => + EventHandler.debug(this, "Cluster node [%s] - Disconnected".format(self.nodeAddress)) + self.publish(Cluster.ThisNode.Disconnected) + + case KeeperState.Expired => + EventHandler.debug(this, "Cluster node [%s] - Expired".format(self.nodeAddress)) + self.publish(Cluster.ThisNode.Expired) + } + + /** + * Re-initialize after the zookeeper session has expired and a new session has been created. + */ + def handleNewSession = { + EventHandler.debug(this, "Session expired re-initializing node [%s]".format(self.nodeAddress)) + self.initializeNode + self.publish(Cluster.NewSession) + } +} + +/** + * @author Jonas Bonér + */ +trait ErrorHandler { + def withErrorHandler[T](body: => T) = { + try { + body + } catch { + case e: org.I0Itec.zkclient.exception.ZkInterruptedException => { /* ignore */ } + case e: Throwable => + EventHandler.error(e, this, e.toString) + throw e + } + } +} + +/** + * @author Jonas Bonér + */ +object RemoteClusterDaemon { + val ID = "akka:cloud:cluster:daemon" + + // FIXME configure functionServerDispatcher to what? + val functionServerDispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher("akka:cloud:cluster:function:server").build +} + +/** + * @author Jonas Bonér + */ +class RemoteClusterDaemon(cluster: ClusterNode) extends Actor { + import RemoteClusterDaemon._ + import Cluster._ + + self.id = ID + self.dispatcher = Dispatchers.newThreadBasedDispatcher(self) + + def receive: Receive = { + case message: RemoteDaemonMessageProtocol => + EventHandler.debug(this, "Received command to RemoteClusterDaemon [%s]".format(message)) + message.getMessageType match { + + case USE => + if (message.hasActorUuid) { + val uuid = uuidProtocolToUuid(message.getActorUuid) + val address = ActorAddress(actorUuid = uuid) + implicit val format: Format[Actor] = cluster formatForActor address + val actors = cluster use address + } else if (message.hasActorId) { + val id = message.getActorId + val address = ActorAddress(actorId = id) + implicit val format: Format[Actor] = cluster formatForActor address + val actors = cluster use address + } else if (message.hasActorClassName) { + val actorClassName = message.getActorClassName + val address = ActorAddress(actorClassName = actorClassName) + implicit val format: Format[Actor] = cluster formatForActor address + val actors = cluster use address + } else EventHandler.warning(this, + "None of 'actorUuid', 'actorId' or 'actorClassName' is specified, ignoring remote cluster daemon command [%s]".format(message)) + + case RELEASE => + if (message.hasActorUuid) { cluster release ActorAddress(actorUuid = uuidProtocolToUuid(message.getActorUuid)) } + else if (message.hasActorId) { cluster release ActorAddress(actorId = message.getActorId) } + else if (message.hasActorClassName) { cluster release ActorAddress(actorClassName = message.getActorClassName) } + else EventHandler.warning(this, + "None of 'actorUuid', 'actorId' or 'actorClassName' is specified, ignoring remote cluster daemon command [%s]".format(message)) + + case START => cluster.start + + case STOP => cluster.stop + + case DISCONNECT => cluster.disconnect + + case RECONNECT => cluster.reconnect + + case RESIGN => cluster.resign + + case FAIL_OVER_CONNECTIONS => + val (from, to) = payloadFor(message, classOf[(InetSocketAddress, InetSocketAddress)]) + cluster.failOverConnections(from, to) + + case FUNCTION_FUN0_UNIT => + actorOf(new Actor() { + self.dispatcher = functionServerDispatcher + def receive = { + case f: Function0[Unit] => try { f() } finally { self.stop } + } + }).start ! payloadFor(message, classOf[Function0[Unit]]) + + case FUNCTION_FUN0_ANY => + actorOf(new Actor() { + self.dispatcher = functionServerDispatcher + def receive = { + case f: Function0[Any] => try { self.reply(f()) } finally { self.stop } + } + }).start forward payloadFor(message, classOf[Function0[Any]]) + + case FUNCTION_FUN1_ARG_UNIT => + actorOf(new Actor() { + self.dispatcher = functionServerDispatcher + def receive = { + case t: Tuple2[Function1[Any, Unit], Any] => try { t._1(t._2) } finally { self.stop } + } + }).start ! payloadFor(message, classOf[Tuple2[Function1[Any, Unit], Any]]) + + case FUNCTION_FUN1_ARG_ANY => + actorOf(new Actor() { + self.dispatcher = functionServerDispatcher + def receive = { + case t: Tuple2[Function1[Any, Any], Any] => try { self.reply(t._1(t._2)) } finally { self.stop } + } + }).start forward payloadFor(message, classOf[Tuple2[Function1[Any, Any], Any]]) + } + + case unknown => EventHandler.warning(this, "Unknown message [%s]".format(unknown)) + } + + private def payloadFor[T](message: RemoteDaemonMessageProtocol, clazz: Class[T]): T = { + Serializer.Java.fromBinary(message.getPayload.toByteArray, Some(clazz)).asInstanceOf[T] + } +} diff --git a/akka-cluster/src/main/scala/akka/cloud/cluster/ClusterActorRef.scala b/akka-cluster/src/main/scala/akka/cloud/cluster/ClusterActorRef.scala new file mode 100644 index 0000000000..d1e1f69759 --- /dev/null +++ b/akka-cluster/src/main/scala/akka/cloud/cluster/ClusterActorRef.scala @@ -0,0 +1,78 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ +package akka.cloud.cluster + +import Cluster._ + +import akka.actor._ +import akka.actor.Actor._ +import akka.event.EventHandler +import akka.dispatch.CompletableFuture + +import java.net.InetSocketAddress +import java.util.concurrent.atomic.AtomicReference + +import com.eaio.uuid.UUID + +/** + * @author Jonas Bonér + */ +class ClusterActorRef private[akka] ( + actorAddresses: Array[Tuple2[UUID, InetSocketAddress]], + val serviceId: String, + actorClassName: String, + hostname: String, + port: Int, + timeout: Long, + actorType: ActorType, + val replicationStrategy: ReplicationStrategy) + extends RemoteActorRef(serviceId, actorClassName, hostname, port, timeout, None, actorType) { + this: ClusterActorRef with Router.Router => + + EventHandler.debug(this, "Creating a ClusterActorRef [%s] for Actor [%s] on [%s:%s]" + .format(serviceId, actorClassName, hostname, port)) + + private[akka] val addresses = new AtomicReference[Map[InetSocketAddress, ActorRef]]( + createConnections(actorAddresses, actorClassName)) + + def connections: Map[InetSocketAddress, ActorRef] = addresses.get.toMap + + override def postMessageToMailbox(message: Any, senderOption: Option[ActorRef]): Unit = + route(message)(senderOption) + + override def postMessageToMailboxAndCreateFutureResultWithTimeout[T]( + message: Any, + timeout: Long, + senderOption: Option[ActorRef], + senderFuture: Option[CompletableFuture[T]]): CompletableFuture[T] = + route[T](message, timeout)(senderOption).asInstanceOf[CompletableFuture[T]] + + private[akka] def failOver(from: InetSocketAddress, to: InetSocketAddress) { + addresses set ( + addresses.get map { case (address, actorRef) => + if (address == from) { + actorRef.stop + (to, createRemoteActorRef(actorRef.uuid, to)) + } else (address, actorRef) + } + ) + } + + private def createConnections( + addresses: Array[Tuple2[UUID, InetSocketAddress]], + actorClassName: String): Map[InetSocketAddress, ActorRef] = { + var connections = Map.empty[InetSocketAddress, ActorRef] + addresses foreach { case (uuid, address) => + connections = connections + (address -> createRemoteActorRef(uuid, address)) + } + connections + } + + private def createRemoteActorRef(uuid: UUID, address: InetSocketAddress) = { + RemoteActorRef( + UUID_PREFIX + uuidToString(uuid), actorClassName, // clustered refs are always registered and looked up by UUID + address.getHostName, address.getPort, + Actor.TIMEOUT, None, actorType) + } +} diff --git a/akka-cluster/src/main/scala/akka/cloud/cluster/MurmurHash.scala b/akka-cluster/src/main/scala/akka/cloud/cluster/MurmurHash.scala new file mode 100644 index 0000000000..b310cbb665 --- /dev/null +++ b/akka-cluster/src/main/scala/akka/cloud/cluster/MurmurHash.scala @@ -0,0 +1,195 @@ +/* __ *\ +** ________ ___ / / ___ Scala API ** +** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL ** +** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ ** +** /____/\___/_/ |_/____/_/ | | ** +** |/ ** +\* */ + +package akka.cloud.cluster + +/** An implementation of Austin Appleby's MurmurHash 3.0 algorithm + * (32 bit version); reference: http://code.google.com/p/smhasher + * + * This is the hash used by collections and case classes (including + * tuples). + * + * @author Rex Kerr + * @version 2.9 + * @since 2.9 + */ + +import java.lang.Integer.{ rotateLeft => rotl } + +/** A class designed to generate well-distributed non-cryptographic + * hashes. It is designed to be passed to a collection's foreach method, + * or can take individual hash values with append. Its own hash code is + * set equal to the hash code of whatever it is hashing. + */ +class MurmurHash[@specialized(Int,Long,Float,Double) T](seed: Int) extends (T => Unit) { + import MurmurHash._ + + private var h = startHash(seed) + private var c = hiddenMagicA + private var k = hiddenMagicB + private var hashed = false + private var hashvalue = h + + /** Begin a new hash using the same seed. */ + def reset() { + h = startHash(seed) + c = hiddenMagicA + k = hiddenMagicB + hashed = false + } + + /** Incorporate the hash value of one item. */ + def apply(t: T) { + h = extendHash(h,t.##,c,k) + c = nextMagicA(c) + k = nextMagicB(k) + hashed = false + } + + /** Incorporate a known hash value. */ + def append(i: Int) { + h = extendHash(h,i,c,k) + c = nextMagicA(c) + k = nextMagicB(k) + hashed = false + } + + /** Retrieve the hash value */ + def hash = { + if (!hashed) { + hashvalue = finalizeHash(h) + hashed = true + } + hashvalue + } + override def hashCode = hash +} + +/** An object designed to generate well-distributed non-cryptographic + * hashes. It is designed to hash a collection of integers; along with + * the integers to hash, it generates two magic streams of integers to + * increase the distribution of repetitive input sequences. Thus, + * three methods need to be called at each step (to start and to + * incorporate a new integer) to update the values. Only one method + * needs to be called to finalize the hash. + */ + +object MurmurHash { + // Magic values used for MurmurHash's 32 bit hash. + // Don't change these without consulting a hashing expert! + final private val visibleMagic = 0x971e137b + final private val hiddenMagicA = 0x95543787 + final private val hiddenMagicB = 0x2ad7eb25 + final private val visibleMixer = 0x52dce729 + final private val hiddenMixerA = 0x7b7d159c + final private val hiddenMixerB = 0x6bce6396 + final private val finalMixer1 = 0x85ebca6b + final private val finalMixer2 = 0xc2b2ae35 + + // Arbitrary values used for hashing certain classes + final private val seedString = 0xf7ca7fd2 + final private val seedArray = 0x3c074a61 + + /** The first 23 magic integers from the first stream are stored here */ + val storedMagicA = + Iterator.iterate(hiddenMagicA)(nextMagicA).take(23).toArray + + /** The first 23 magic integers from the second stream are stored here */ + val storedMagicB = + Iterator.iterate(hiddenMagicB)(nextMagicB).take(23).toArray + + /** Begin a new hash with a seed value. */ + def startHash(seed: Int) = seed ^ visibleMagic + + /** The initial magic integers in the first stream. */ + def startMagicA = hiddenMagicA + + /** The initial magic integer in the second stream. */ + def startMagicB = hiddenMagicB + + /** Incorporates a new value into an existing hash. + * + * @param hash the prior hash value + * @param value the new value to incorporate + * @param magicA a magic integer from the stream + * @param magicB a magic integer from a different stream + * @return the updated hash value + */ + def extendHash(hash: Int, value: Int, magicA: Int, magicB: Int) = { + (hash ^ rotl(value*magicA,11)*magicB)*3 + visibleMixer + } + + /** Given a magic integer from the first stream, compute the next */ + def nextMagicA(magicA: Int) = magicA*5 + hiddenMixerA + + /** Given a magic integer from the second stream, compute the next */ + def nextMagicB(magicB: Int) = magicB*5 + hiddenMixerB + + /** Once all hashes have been incorporated, this performs a final mixing */ + def finalizeHash(hash: Int) = { + var i = (hash ^ (hash>>>16)) + i *= finalMixer1 + i ^= (i >>> 13) + i *= finalMixer2 + i ^= (i >>> 16) + i + } + + /** Compute a high-quality hash of an array */ + def arrayHash[@specialized T](a: Array[T]) = { + var h = startHash(a.length * seedArray) + var c = hiddenMagicA + var k = hiddenMagicB + var j = 0 + while (j < a.length) { + h = extendHash(h, a(j).##, c, k) + c = nextMagicA(c) + k = nextMagicB(k) + j += 1 + } + finalizeHash(h) + } + + /** Compute a high-quality hash of a string */ + def stringHash(s: String) = { + var h = startHash(s.length * seedString) + var c = hiddenMagicA + var k = hiddenMagicB + var j = 0 + while (j+1 < s.length) { + val i = (s.charAt(j)<<16) + s.charAt(j+1); + h = extendHash(h,i,c,k) + c = nextMagicA(c) + k = nextMagicB(k) + j += 2 + } + if (j < s.length) h = extendHash(h,s.charAt(j),c,k) + finalizeHash(h) + } + + /** Compute a hash that is symmetric in its arguments--that is, + * where the order of appearance of elements does not matter. + * This is useful for hashing sets, for example. + */ + def symmetricHash[T](xs: TraversableOnce[T], seed: Int) = { + var a,b,n = 0 + var c = 1 + xs.foreach(i => { + val h = i.## + a += h + b ^= h + if (h != 0) c *= h + n += 1 + }) + var h = startHash(seed * n) + h = extendHash(h, a, storedMagicA(0), storedMagicB(0)) + h = extendHash(h, b, storedMagicA(1), storedMagicB(1)) + h = extendHash(h, c, storedMagicA(2), storedMagicB(2)) + finalizeHash(h) + } +} diff --git a/akka-cluster/src/main/scala/akka/cloud/cluster/Routing.scala b/akka-cluster/src/main/scala/akka/cloud/cluster/Routing.scala new file mode 100644 index 0000000000..335d23c6f4 --- /dev/null +++ b/akka-cluster/src/main/scala/akka/cloud/cluster/Routing.scala @@ -0,0 +1,132 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ +package akka.cloud.cluster + +import Cluster._ + +import akka.actor._ +import akka.actor.Actor._ +import akka.dispatch.Future +import akka.AkkaException + +import java.net.InetSocketAddress + +import com.eaio.uuid.UUID + +class RoutingException(message: String) extends AkkaException(message) + +/** + * @author Jonas Bonér + */ +object Router { + sealed trait RouterType + object Direct extends RouterType + object Random extends RouterType + object RoundRobin extends RouterType + + def newRouter( + routerType: RouterType, + addresses: Array[Tuple2[UUID, InetSocketAddress]], + serviceId: String, + actorClassName: String, + hostname: String, + port: Int, + timeout: Long, + actorType: ActorType, + replicationStrategy: ReplicationStrategy = ReplicationStrategy.WriteThrough): ClusterActorRef = { + + routerType match { + case Direct => new ClusterActorRef( + addresses, serviceId, actorClassName, + hostname, port, timeout, + actorType, replicationStrategy) with Direct + + case Random => new ClusterActorRef( + addresses, serviceId, actorClassName, + hostname, port, timeout, + actorType, replicationStrategy) with Random + + case RoundRobin => new ClusterActorRef( + addresses, serviceId, actorClassName, + hostname, port, timeout, + actorType, replicationStrategy) with RoundRobin + } + } + + /** + * @author Jonas Bonér + */ + trait Router { + def connections: Map[InetSocketAddress, ActorRef] + + def route(message: Any)(implicit sender: Option[ActorRef]): Unit + + def route[T](message: Any, timeout: Long)(implicit sender: Option[ActorRef]): Future[T] + } + + /** + * @author Jonas Bonér + */ + trait Direct extends Router { + lazy val connection: Option[ActorRef] = { + if (connections.size == 0) throw new IllegalStateException("DirectRouter need a single replica connection found [0]") + connections.toList.map({ case (address, actor) => actor }).headOption + } + + def route(message: Any)(implicit sender: Option[ActorRef]): Unit = + if (connection.isDefined) connection.get.!(message)(sender) + else throw new RoutingException("No node connections for router") + + def route[T](message: Any, timeout: Long)(implicit sender: Option[ActorRef]): Future[T] = + if (connection.isDefined) connection.get.!!!(message, timeout)(sender) + else throw new RoutingException("No node connections for router") + } + + /** + * @author Jonas Bonér + */ + trait Random extends Router { + private val random = new java.util.Random(System.currentTimeMillis) + + def route(message: Any)(implicit sender: Option[ActorRef]): Unit = + if (next.isDefined) next.get.!(message)(sender) + else throw new RoutingException("No node connections for router") + + def route[T](message: Any, timeout: Long)(implicit sender: Option[ActorRef]): Future[T] = + if (next.isDefined) next.get.!!!(message, timeout)(sender) + else throw new RoutingException("No node connections for router") + + private def next: Option[ActorRef] = { + val nrOfConnections = connections.size + if (nrOfConnections == 0) None + else Some(connections.toArray.apply(random.nextInt(nrOfConnections))._2) + } + } + + /** + * @author Jonas Bonér + */ + trait RoundRobin extends Router { + private def items: List[ActorRef] = connections.toList.map({ case (address, actor) => actor }) + + @volatile + private var current = items + + def route(message: Any)(implicit sender: Option[ActorRef]): Unit = + if (next.isDefined) next.get.!(message)(sender) + else throw new RoutingException("No node connections for router") + + def route[T](message: Any, timeout: Long)(implicit sender: Option[ActorRef]): Future[T] = + if (next.isDefined) next.get.!!!(message, timeout)(sender) + else throw new RoutingException("No node connections for router") + + private def hasNext = items != Nil + + private def next: Option[ActorRef] = { + val rest = if (current == Nil) items else current + current = rest.tail + rest.headOption + } + } +} diff --git a/akka-cluster/src/main/scala/akka/cloud/cluster/replication/ReplicatedClusterRef.scala b/akka-cluster/src/main/scala/akka/cloud/cluster/replication/ReplicatedClusterRef.scala new file mode 100644 index 0000000000..528e1c9d84 --- /dev/null +++ b/akka-cluster/src/main/scala/akka/cloud/cluster/replication/ReplicatedClusterRef.scala @@ -0,0 +1,93 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ +package akka.cloud.cluster + +import Cluster._ + +import akka.actor._ +import akka.remote.MessageSerializer +import akka.event.EventHandler +import akka.config.Supervision._ +import akka.dispatch._ + +import java.net.InetSocketAddress +import java.util.concurrent.atomic.AtomicReference +import java.util.{ Map => JMap } + +/** + * @author Jonas Bonér + */ +trait Replicable { this: Actor => +} + +/** + * @author Jonas Bonér + */ +sealed trait ReplicationStrategy +object ReplicationStrategy { + case object Transient extends ReplicationStrategy + case object WriteThrough extends ReplicationStrategy + case object WriteBehind extends ReplicationStrategy +} + +/** + * @author Jonas Bonér + */ +class ReplicatedActorRef private[akka] (actorRef: ActorRef) extends ActorRef with ScalaActorRef { + + private lazy val txLog = { + EventHandler.debug(this, "Creating a ReplicatedActorRef for Actor [%s] on [%s]" + .format(actorClassName, homeAddress)) + TransactionLog.newLogFor(uuid.toString) + } + + def invoke(messageHandle: MessageInvocation) { + actorRef.invoke(messageHandle) + txLog.recordEntry(MessageSerializer.serialize(messageHandle.message).toByteArray) + } + + def start(): ActorRef = { + EventHandler.debug(this, "Starting ReplicatedActorRef for Actor [%s] with transaction log [%s]" + .format(actorClassName, txLog.logId)) + actorRef.start + } + + def stop() { + txLog.delete() + actorRef.stop() + } + + override def setFaultHandler(handler: FaultHandlingStrategy) = actorRef.setFaultHandler(handler) + override def getFaultHandler(): FaultHandlingStrategy = actorRef.getFaultHandler() + override def setLifeCycle(lifeCycle: LifeCycle): Unit = actorRef.setLifeCycle(lifeCycle) + override def getLifeCycle(): LifeCycle = actorRef.getLifeCycle + def homeAddress: Option[InetSocketAddress] = actorRef.homeAddress + def actorClass: Class[_ <: Actor] = actorRef.actorClass + def actorClassName: String = actorRef.actorClassName + def dispatcher_=(md: MessageDispatcher): Unit = actorRef.dispatcher_=(md) + def dispatcher: MessageDispatcher = actorRef.dispatcher + def link(actorRef: ActorRef): Unit = actorRef.link(actorRef) + def unlink(actorRef: ActorRef): Unit = actorRef.unlink(actorRef) + def startLink(actorRef: ActorRef): Unit = actorRef.startLink(actorRef) + def spawn(clazz: Class[_ <: Actor]): ActorRef = actorRef.spawn(clazz) + def spawnRemote(clazz: Class[_ <: Actor], hostname: String, port: Int, timeout: Long): ActorRef = actorRef.spawnRemote(clazz, hostname, port, timeout) + def spawnLink(clazz: Class[_ <: Actor]): ActorRef = actorRef.spawnLink(clazz) + def spawnLinkRemote(clazz: Class[_ <: Actor], hostname: String, port: Int, timeout: Long): ActorRef = actorRef.spawnLinkRemote(clazz, hostname, port, timeout) + def supervisor: Option[ActorRef] = actorRef.supervisor + def linkedActors: JMap[Uuid, ActorRef] = actorRef.linkedActors + protected[akka] def postMessageToMailbox(message: Any, senderOption: Option[ActorRef]): Unit = actorRef.postMessageToMailbox(message, senderOption) + protected[akka] def postMessageToMailboxAndCreateFutureResultWithTimeout[T]( + message: Any, + timeout: Long, + senderOption: Option[ActorRef], + senderFuture: Option[CompletableFuture[T]]): CompletableFuture[T] = actorRef.postMessageToMailboxAndCreateFutureResultWithTimeout(message, timeout, senderOption, senderFuture) + protected[akka] def actorInstance: AtomicReference[Actor] = actorRef.actorInstance + protected[akka] def supervisor_=(sup: Option[ActorRef]): Unit = actorRef.supervisor_=(sup) + protected[akka] def mailbox: AnyRef = actorRef.mailbox + protected[akka] def mailbox_=(value: AnyRef): AnyRef = actorRef.mailbox_=(value) + protected[akka] def handleTrapExit(dead: ActorRef, reason: Throwable): Unit = actorRef.handleTrapExit(dead, reason) + protected[akka] def restart(reason: Throwable, maxNrOfRetries: Option[Int], withinTimeRange: Option[Int]): Unit = actorRef.restart(reason, maxNrOfRetries, withinTimeRange) + protected[akka] def restartLinkedActors(reason: Throwable, maxNrOfRetries: Option[Int], withinTimeRange: Option[Int]): Unit = actorRef.restartLinkedActors(reason, maxNrOfRetries, withinTimeRange) + protected[akka] def registerSupervisorAsRemoteActor: Option[Uuid] = actorRef.registerSupervisorAsRemoteActor +} diff --git a/akka-cluster/src/main/scala/akka/cloud/cluster/replication/TransactionLog.scala b/akka-cluster/src/main/scala/akka/cloud/cluster/replication/TransactionLog.scala new file mode 100644 index 0000000000..e48c6f61a3 --- /dev/null +++ b/akka-cluster/src/main/scala/akka/cloud/cluster/replication/TransactionLog.scala @@ -0,0 +1,476 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ +package akka.cloud.cluster + +import org.apache.bookkeeper.client.{BookKeeper, LedgerHandle, LedgerEntry, BKException, AsyncCallback} +import org.apache.zookeeper.CreateMode + +import org.I0Itec.zkclient.exception._ + +import akka.config._ +import Config._ +import akka.util._ +import akka.event.EventHandler +import akka.dispatch.{DefaultCompletableFuture, CompletableFuture} +import akka.AkkaException + +import akka.cloud.zookeeper._ + +import java.util.Enumeration + +import scala.collection.JavaConversions._ + +// FIXME allow user to choose dynamically between 'async' and 'sync' tx logging (asyncAddEntry(byte[] data, AddCallback cb, Object ctx)) +// FIXME clean up old entries in log after doing a snapshot +// FIXME clean up all meta-data in ZK for a specific UUID when the corresponding actor is shut down +// FIXME delete tx log after migration of actor has been made and create a new one + +/** + * @author Jonas Bonér + */ +class ReplicationException(message: String) extends AkkaException(message) + +/** + * @author Jonas Bonér + */ +class TransactionLog private ( + ledger: LedgerHandle, val id: String, val isAsync: Boolean) { + import TransactionLog._ + + val logId = ledger.getId + val txLogPath = transactionLogNode + "/" + id + val snapshotPath = txLogPath + "/snapshot" + + private val isOpen = new Switch(true) + + /** + * TODO document method + */ + def recordEntry(entry: Array[Byte]): Unit = if (isOpen.isOn) { + try { + if (isAsync) { + ledger.asyncAddEntry( + entry, + new AsyncCallback.AddCallback { + def addComplete( + returnCode: Int, + ledgerHandle: LedgerHandle, + entryId: Long, + ctx: AnyRef) { + handleReturnCode(returnCode) + EventHandler.debug(this, + "Writing entry [%s] to log [%s]".format(entryId, logId)) + } + }, + null) + } else { + handleReturnCode(ledger.addEntry(entry)) + val entryId = ledger.getLastAddPushed + EventHandler.debug(this, "Writing entry [%s] to log [%s]".format(entryId, logId)) + } + } catch { + case e => handleError(e) + } + } else transactionClosedError + + /** + * TODO document method + */ + def recordSnapshot(snapshot: Array[Byte]): Unit = if (isOpen.isOn) { + try { + if (isAsync) { + ledger.asyncAddEntry( + snapshot, + new AsyncCallback.AddCallback { + def addComplete( + returnCode: Int, + ledgerHandle: LedgerHandle, + entryId: Long, + ctx: AnyRef) { + handleReturnCode(returnCode) + storeSnapshotMetaDataInZooKeeper(entryId) + } + }, + null) + } else { + handleReturnCode(ledger.addEntry(snapshot)) + storeSnapshotMetaDataInZooKeeper(ledger.getLastAddPushed) + } + } catch { + case e => handleError(e) + } + } else transactionClosedError + + /** + * TODO document method + */ + def entries: Vector[Array[Byte]] = entriesInRange(0, ledger.getLastAddConfirmed) + + /** + * TODO document method + */ + def entriesFromLatestSnapshot: Tuple2[Array[Byte], Vector[Array[Byte]]] = { + val snapshotId = latestSnapshotId + EventHandler.debug(this, + "Reading entries from snapshot id [%s] for log [%s]".format(snapshotId, logId)) + (entriesInRange(snapshotId, snapshotId).head, entriesInRange(snapshotId + 1, ledger.getLastAddConfirmed)) + } + + /** + * TODO document method + */ + def entriesInRange(from: Long, to: Long): Vector[Array[Byte]] = if (isOpen.isOn) { + try { + if (from < 0) throw new IllegalArgumentException("'from' can't be negative [" + from + "]") + if (to < 0) throw new IllegalArgumentException("'to' can't be negative [" + from + "]") + if (to < from) throw new IllegalArgumentException("'to' can't be smaller than 'from' [" + from + "," + to + "]") + EventHandler.debug(this, + "Reading entries [%s -> %s] for log [%s]".format(from, to, logId)) + if (isAsync) { + val future = new DefaultCompletableFuture[Vector[Array[Byte]]](timeout) + ledger.asyncReadEntries( + from, to, + new AsyncCallback.ReadCallback { + def readComplete( + returnCode: Int, + ledgerHandle: LedgerHandle, + enumeration: Enumeration[LedgerEntry], + ctx: AnyRef) { + val future = ctx.asInstanceOf[CompletableFuture[Vector[Array[Byte]]]] + var entries = Vector[Array[Byte]]() + while (enumeration.hasMoreElements) { + entries = entries :+ enumeration.nextElement.getEntry + } + if (returnCode == BKException.Code.OK) future.completeWithResult(entries) + else future.completeWithException(BKException.create(returnCode)) + } + }, + future) + await(future) + } else { + val enumeration = ledger.readEntries(from, to) + var entries = Vector[Array[Byte]]() + while (enumeration.hasMoreElements) { + entries = entries :+ enumeration.nextElement.getEntry + } + entries + } + } catch { + case e => handleError(e) + } + } else transactionClosedError + + /** + * TODO document method + */ + def latestEntryId: Long = ledger.getLastAddConfirmed + + /** + * TODO document method + */ + def latestSnapshotId: Long = { + try { + val snapshotId = zkClient.readData(snapshotPath).asInstanceOf[Long] + EventHandler.debug(this, + "Retrieved latest snapshot id [%s] from transaction log [%s]".format(snapshotId, logId)) + snapshotId + } catch { + case e: ZkNoNodeException => + handleError(new ReplicationException( + "Transaction log for UUID [" + id + + "] does not have a snapshot recorded in ZooKeeper")) + case e => handleError(e) + } + } + + /** + * TODO document method + */ + def delete(): Unit = if (isOpen.isOn) { + EventHandler.debug(this, "Deleting transaction log [%s]".format(logId)) + try { + if (isAsync) { + bookieClient.asyncDeleteLedger( + logId, + new AsyncCallback.DeleteCallback { + def deleteComplete(returnCode: Int, ctx: AnyRef) { + handleReturnCode(returnCode) + } + }, + null) + } else { + bookieClient.deleteLedger(logId) + } + } catch { + case e => handleError(e) + } + } + + /** + * TODO document method + */ + def close(): Unit = if (isOpen.switchOff) { + EventHandler.debug(this, "Closing transaction log [%s]".format(logId)) + try { + if (isAsync) { + ledger.asyncClose( + new AsyncCallback.CloseCallback { + def closeComplete( + returnCode: Int, + ledgerHandle: LedgerHandle, + ctx: AnyRef) { + handleReturnCode(returnCode) + } + }, + null) + } else { + ledger.close + } + } catch { + case e => handleError(e) + } + } + + private def storeSnapshotMetaDataInZooKeeper(snapshotId: Long): Unit = if (isOpen.isOn) { + try { + zkClient.create(snapshotPath, null, CreateMode.PERSISTENT) + } catch { + case e: ZkNodeExistsException => {} // do nothing + case e => handleError(e) + } + + try { + zkClient.writeData(snapshotPath, snapshotId) + } catch { + case e => + handleError(new ReplicationException( + "Could not store transaction log snapshot meta-data in ZooKeeper for UUID [" + + id +"]")) + } + EventHandler.debug(this, + "Writing snapshot [%s] to log [%s]".format(snapshotId, logId)) + } else transactionClosedError + + private def handleReturnCode(block: => Long) { + val code = block.toInt + if (code == BKException.Code.OK) {} // all fine + else handleError(BKException.create(code)) + } + + private def transactionClosedError: Nothing = { + handleError(new ReplicationException( + "Transaction log [" + logId + + "] is closed. You need to open up new a new one with 'TransactionLog.logFor(id)'")) + } +} + +/** + * @author Jonas Bonér + */ +object TransactionLog { + + val digestType = config.getString("akka.cloud.cluster.replication.digest-type", "CRC32") match { + case "CRC32" => BookKeeper.DigestType.CRC32 + case "MAC" => BookKeeper.DigestType.MAC + case unknown => throw new ConfigurationException( + "akka.cloud.cluster.replication.digest-type is invalid [" + unknown + "]") + } + val password = config.getString("akka.cloud.cluster.replication.password", "secret").getBytes("UTF-8") + val ensembleSize = config.getInt("akka.cloud.cluster.replication.ensemble-size", 3) + val quorumSize = config.getInt("akka.cloud.cluster.replication.quorum-size", 2) + val timeout = 5000 // FIXME make configurable + + private[akka] val transactionLogNode = "/transaction-log-ids" + + private val isConnected = new Switch(false) + + private[akka] lazy val (bookieClient, zkClient) = { + val bk = new BookKeeper(Cluster.zooKeeperServers) + + val zk = new AkkaZkClient( + Cluster.zooKeeperServers, + Cluster.sessionTimeout, + Cluster.connectionTimeout, + Cluster.defaultSerializer) + + try { + zk.create(transactionLogNode, null, CreateMode.PERSISTENT) + } catch { + case e: ZkNodeExistsException => {} // do nothing + case e => handleError(e) + } + + EventHandler.info(this, + ("Transaction log service started with" + + "\n\tdigest type [%s]" + + "\n\tensemble size [%s]" + + "\n\tquorum size [%s]" + + "\n\tlogging time out [%s]").format( + digestType, + ensembleSize, + quorumSize, + timeout)) + isConnected.switchOn + (bk, zk) + } + + private[akka] def apply(ledger: LedgerHandle, id: String, isAsync: Boolean = false) = + new TransactionLog(ledger, id, isAsync) + + /** + * TODO document method + */ + def shutdown() { + isConnected switchOff { + try { + zkClient.close + bookieClient.halt + } catch { + case e => handleError(e) + } + } + } + + /** + * TODO document method + */ + def newLogFor(id: String, isAsync: Boolean = false): TransactionLog = { + val txLogPath = transactionLogNode + "/" + id + + val ledger = try { + if (zkClient.exists(txLogPath)) throw new ReplicationException( + "Transaction log for UUID [" + id +"] already exists") + + val future = new DefaultCompletableFuture[LedgerHandle](timeout) + if (isAsync) { + bookieClient.asyncCreateLedger( + ensembleSize, quorumSize, digestType, password, + new AsyncCallback.CreateCallback { + def createComplete( + returnCode: Int, + ledgerHandle: LedgerHandle, + ctx: AnyRef) { + val future = ctx.asInstanceOf[CompletableFuture[LedgerHandle]] + if (returnCode == BKException.Code.OK) future.completeWithResult(ledgerHandle) + else future.completeWithException(BKException.create(returnCode)) + } + }, + future) + await(future) + } else { + bookieClient.createLedger(ensembleSize, quorumSize, digestType, password) + } + } catch { + case e => handleError(e) + } + + val logId = ledger.getId + try { + zkClient.create(txLogPath, null, CreateMode.PERSISTENT) + zkClient.writeData(txLogPath, logId) + logId + } catch { + case e => + bookieClient.deleteLedger(logId) // clean up + handleError(new ReplicationException( + "Could not store transaction log [" + logId + + "] meta-data in ZooKeeper for UUID [" + id +"]")) + } + + EventHandler.info(this, + "Created new transaction log [%s] for UUID [%s]".format(logId, id)) + TransactionLog(ledger, id, isAsync) + } + + /** + * TODO document method + */ + def logFor(id: String, isAsync: Boolean = false): TransactionLog = { + val txLogPath = transactionLogNode + "/" + id + + val logId = try { + val logId = zkClient.readData(txLogPath).asInstanceOf[Long] + EventHandler.debug(this, + "Retrieved transaction log [%s] for UUID [%s]".format(logId, id)) + logId + } catch { + case e: ZkNoNodeException => + handleError(new ReplicationException( + "Transaction log for UUID [" + id +"] does not exist in ZooKeeper")) + case e => handleError(e) + } + + val ledger = try { + if (isAsync) { + val future = new DefaultCompletableFuture[LedgerHandle](timeout) + bookieClient.asyncOpenLedger( + logId, digestType, password, + new AsyncCallback.OpenCallback { + def openComplete( + returnCode: Int, + ledgerHandle: LedgerHandle, + ctx: AnyRef) { + val future = ctx.asInstanceOf[CompletableFuture[LedgerHandle]] + if (returnCode == BKException.Code.OK) future.completeWithResult(ledgerHandle) + else future.completeWithException(BKException.create(returnCode)) + } + }, + future) + await(future) + } else { + bookieClient.openLedger(logId, digestType, password) + } + } catch { + case e => handleError(e) + } + + TransactionLog(ledger, id, isAsync) + } + + private[akka] def await[T](future: CompletableFuture[T]): T = { + future.await + if (future.result.isDefined) future.result.get + else if (future.exception.isDefined) handleError(future.exception.get) + else handleError(new ReplicationException("No result from async read of entries for transaction log")) + } + + private[akka] def handleError(e: Throwable): Nothing = { + EventHandler.error(e, this, e.toString) + throw e + } +} + +/** + * @author Jonas Bonér + */ +object LocalBookKeeperEnsemble { + private val isRunning = new Switch(false) + private val port = 5555 + + @volatile private var localBookKeeper: LocalBookKeeper = _ + + /** + * TODO document method + */ + def start() { + isRunning switchOn { + localBookKeeper = new LocalBookKeeper(TransactionLog.ensembleSize) + localBookKeeper.runZookeeper(port) + localBookKeeper.initializeZookeper + localBookKeeper.runBookies + } + } + + /** + * TODO document method + */ + def shutdown() { + isRunning switchOff { + localBookKeeper.bs.foreach(_.shutdown) // stop bookies + localBookKeeper.zkc.close // stop zk client + localBookKeeper.zks.shutdown // stop zk server + localBookKeeper.serverFactory.shutdown // stop zk NIOServer + } + } +} diff --git a/akka-cluster/src/test/resources/log4j.properties b/akka-cluster/src/test/resources/log4j.properties new file mode 100644 index 0000000000..9825970594 --- /dev/null +++ b/akka-cluster/src/test/resources/log4j.properties @@ -0,0 +1,58 @@ +# Define some default values that can be overridden by system properties +zookeeper.root.logger=INFO, CONSOLE +zookeeper.console.threshold=INFO +zookeeper.log.dir=. +zookeeper.log.file=zookeeper.log +zookeeper.log.threshold=DEBUG +zookeeper.tracelog.dir=. +zookeeper.tracelog.file=zookeeper_trace.log + +# +# ZooKeeper Logging Configuration +# + +# Format is " (, )+ + +# DEFAULT: console appender only +log4j.rootLogger=${zookeeper.root.logger} + +# Example with rolling log file +#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE + +# Example with rolling log file and tracing +#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE + +# +# Log INFO level and above messages to the console +# +log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender +log4j.appender.CONSOLE.Threshold=${zookeeper.console.threshold} +log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout +log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n + +# +# Add ROLLINGFILE to rootLogger to get log file output +# Log DEBUG level and above messages to a log file +log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender +log4j.appender.ROLLINGFILE.Threshold=${zookeeper.log.threshold} +log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/${zookeeper.log.file} + +# Max log file size of 10MB +log4j.appender.ROLLINGFILE.MaxFileSize=10MB +# uncomment the next line to limit number of backup files +#log4j.appender.ROLLINGFILE.MaxBackupIndex=10 + +log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout +log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n + + +# +# Add TRACEFILE to rootLogger to get log file output +# Log DEBUG level and above messages to a log file +log4j.appender.TRACEFILE=org.apache.log4j.FileAppender +log4j.appender.TRACEFILE.Threshold=TRACE +log4j.appender.TRACEFILE.File=${zookeeper.tracelog.dir}/${zookeeper.tracelog.file} + +log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout +### Notice we are including log4j's NDC here (%x) +log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L][%x] - %m%n diff --git a/akka-cluster/src/test/resources/logback-test.xml b/akka-cluster/src/test/resources/logback-test.xml new file mode 100644 index 0000000000..240a412687 --- /dev/null +++ b/akka-cluster/src/test/resources/logback-test.xml @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + [%4p] [%d{ISO8601}] [%t] %c{1}: %m%n + + + + + + + + + + + diff --git a/akka-cluster/src/test/resources/zoo.cfg b/akka-cluster/src/test/resources/zoo.cfg new file mode 100644 index 0000000000..b71eadcc33 --- /dev/null +++ b/akka-cluster/src/test/resources/zoo.cfg @@ -0,0 +1,12 @@ +# The number of milliseconds of each tick +tickTime=2000 +# The number of ticks that the initial +# synchronization phase can take +initLimit=10 +# The number of ticks that can pass between +# sending a request and getting an acknowledgement +syncLimit=5 +# the directory where the snapshot is stored. +dataDir=/export/crawlspace/mahadev/zookeeper/server1/data +# the port at which the clients will connect +clientPort=2181 diff --git a/akka-cluster/src/test/scala/akka/cloud/cluster/ClusterMultiJvmSpec.scala b/akka-cluster/src/test/scala/akka/cloud/cluster/ClusterMultiJvmSpec.scala new file mode 100644 index 0000000000..9d59cde803 --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cloud/cluster/ClusterMultiJvmSpec.scala @@ -0,0 +1,140 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package akka.cloud.cluster + +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers +import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach} + +import akka.cloud.zookeeper._ +import org.I0Itec.zkclient._ + +object MultiNodeTest { + val NrOfNodes = 2 + val ClusterName = "test-cluster" + val DataPath = "_akka_cluster/data" + val LogPath = "_akka_cluster/log" +} + +trait MultiNodeTest extends WordSpec with MustMatchers with BeforeAndAfterAll with BeforeAndAfterEach { + import MultiNodeTest._ + + val nodeNr = nodeNumber + val port = 9000 + nodeNumber + + var zkServer: ZkServer = _ + var zkClient: ZkClient = _ + + def nodeNumber: Int + + def createNode = Cluster.newNode(nodeAddress = NodeAddress(ClusterName, "node-" + nodeNr, port = port)) + + def barrier(name: String) = ZooKeeperBarrier(zkClient, ClusterName, name, "node-" + nodeNr, NrOfNodes) + + override def beforeAll() = { + if (nodeNr == 1) zkServer = Cluster.startLocalCluster(DataPath, LogPath) + zkClient = Cluster.newZkClient + } + + override def beforeEach() = { + if (nodeNr == 1) Cluster.reset + } + + override def afterAll() = { + zkClient.close + if (nodeNr == 1) Cluster.shutdownLocalCluster + } +} + +class ClusterMultiJvmNode1 extends MultiNodeTest { + def nodeNumber = 1 + + "A cluster" should { + + "be able to start and stop - one node" in { + val node = createNode + + barrier("start-stop") { + node.start() + + Thread.sleep(500) + node.membershipNodes.size must be(1) + + node.stop() + + Thread.sleep(500) + node.membershipNodes.size must be(0) + node.isRunning must be(false) + } + } + + "be able to start and stop - two nodes" in { + val node = createNode + + barrier("start-node1") { + node.start() + Thread.sleep(500) + node.membershipNodes.size must be(1) + } + + barrier("start-node2") { + // let node2 start + } + + node.membershipNodes.size must be(2) + node.leader must be(node.leaderLock.getId) + + barrier("stop-node1") { + node.stop() + Thread.sleep(500) + node.isRunning must be(false) + } + + barrier("stop-node2") { + // let node2 stop + } + } + } +} + +class ClusterMultiJvmNode2 extends MultiNodeTest { + def nodeNumber = 2 + + "A cluster" should { + + "be able to start and stop - one node" in { + barrier("start-stop") { + // let node1 start + } + } + + "be able to start and stop - two nodes" in { + val node = createNode + + barrier("start-node1") { + // let node1 start + } + + barrier("start-node2") { + node.start() + Thread.sleep(500) + node.membershipNodes.size must be(2) + } + + barrier("stop-node1") { + // let node1 stop + } + + node.membershipNodes.size must be(1) + node.leader must be(node.leaderLock.getId) + + barrier("stop-node2") { + node.stop() + Thread.sleep(500) + node.isRunning must be(false) + } + } + } +} diff --git a/akka-cluster/src/test/scala/akka/cloud/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cloud/cluster/ClusterSpec.scala new file mode 100644 index 0000000000..642fc51f01 --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cloud/cluster/ClusterSpec.scala @@ -0,0 +1,1815 @@ +package akka.cloud.cluster + +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers +import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach, Spec } + +import org.I0Itec.zkclient._ + +import akka.actor._ +import akka.actor.Actor._ +import akka.serialization.{Serializer, SerializerBasedActorFormat} + +import akka.cloud.common.Util._ + +import java.util.concurrent.{ CyclicBarrier, TimeUnit } + +import scala.collection.JavaConversions._ + +// FIXME: Test sending all funs + +class MyJavaSerializableActor extends Actor with Serializable { + var count = 0 + + def receive = { + case "hello" => + count = count + 1 + self.reply("world " + count) + } +} + +object BinaryFormatMyJavaSerializableActor { + implicit object MyJavaSerializableActorFormat extends SerializerBasedActorFormat[MyJavaSerializableActor] with Serializable { + val serializer = Serializer.Java + } +} + +class ClusterSpec extends WordSpec with MustMatchers with BeforeAndAfterAll with BeforeAndAfterEach { + import Cluster._ + + val dataPath = "_akka_cluster/data" + val logPath = "_akka_cluster/log" + + var zkServer: ZkServer = _ + + "A ClusterNode" should { + "be able to start and stop - one node" in { + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "start-stop-1", port = 9001)) + node.start() + + Thread.sleep(500) + node.membershipNodes.size must be(1) + + node.stop() + + Thread.sleep(500) + node.membershipNodes.size must be(0) + node.isRunning must be(false) + } + + "be able to start and stop - two nodes" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "start-stop-2-1", port = 9001)) + node1.start() + + Thread.sleep(500) + node1.membershipNodes.size must be(1) + + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "start-stop-2-2", port = 9002)) + node2.start() + + Thread.sleep(500) + node1.leader must be(node2.leader) + node1.membershipNodes.size must be(2) + + node1.stop() + Thread.sleep(500) + node2.membershipNodes.size must be(1) + + node2.stop() + Thread.sleep(500) + node1.isRunning must be(false) + node2.isRunning must be(false) + } + + "be able to subscribe to new connection of membership node events" in { + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "new-membership-connection-1", port = 9001)) + // register listener + val barrier = new CyclicBarrier(2) + node.register(new ChangeListener { + override def nodeConnected(node: String, client: ClusterNode) = barrier.await + }) + + // start node + node.start() + barrier.await(20, TimeUnit.SECONDS) + } + + "be able to subscribe to new disconnection of membership node events" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "new-membership-disconnection-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "new-membership-disconnection-2", port = 9002)) + // register listener + val barrier = new CyclicBarrier(2) + node1.register(new ChangeListener { + override def nodeDisconnected(node: String, client: ClusterNode) = barrier.await + }) + + // start node + node1.start() + node2.start() + node2.stop() + node2.isRunning must be(false) + barrier.await(20, TimeUnit.SECONDS) + } + + "and another cluster node should be able to agree on a leader election when starting up" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "leader-1-1", port = 9001)) + node1.start() + + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "leader-1-2", port = 9002)) + node2.start() + + node1.leader must be(node1.leaderLock.getId) + node1.leader must be(node2.leader) + + node1.stop() + node2.stop() + node1.isRunning must be(false) + node2.isRunning must be(false) + } + + "and two another cluster nodes should be able to agree on a leader election when starting up" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "leader-2-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "leader-2-2", port = 9002)) + val node3 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "leader-2-3", port = 9003)) + + node1.start() + node2.start() + node3.start() + + node1.leader must be(node1.leaderLock.getId) + node1.leader must be(node2.leader) + node2.leader must be(node3.leader) + node3.leader must be(node1.leader) + + node1.stop() + node2.stop() + node3.stop() + node1.isRunning must be(false) + node2.isRunning must be(false) + node3.isRunning must be(false) + } + + "and two another cluster nodes should be able to agree on a leader election the first is shut down" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "leader-3-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "leader-3-2", port = 9002)) + val node3 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "leader-3-3", port = 9003)) + + node1.start() + node2.start() + node3.start() + + node1.leader must be(node1.leaderLock.getId) + node1.leader must be(node2.leader) + node2.leader must be(node3.leader) + node3.leader must be(node1.leader) + + node1.stop() + node1.isRunning must be(false) + Thread.sleep(500) + node2.leader must be(node2.leaderLock.getId) + node2.leader must be(node2.leader) + + node2.stop() + node2.isRunning must be(false) + Thread.sleep(500) + node3.leader must be(node3.leaderLock.getId) + + node3.stop() + node3.isRunning must be(false) + } + + "and two another cluster nodes should be able to agree on a leader election the second is shut down" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "leader-4-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "leader-4-2", port = 9002)) + val node3 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "leader-4-3", port = 9003)) + + node1.start() + node2.start() + node3.start() + + node1.leader must be(node1.leaderLock.getId) + node1.leader must be(node2.leader) + node2.leader must be(node3.leader) + node3.leader must be(node1.leader) + + node2.stop() + node2.isRunning must be(false) + Thread.sleep(500) + node1.leader must be(node1.leaderLock.getId) + node3.leader must be(node1.leader) + + node3.stop() + node3.isRunning must be(false) + Thread.sleep(500) + node1.leader must be(node1.leaderLock.getId) + + node1.stop() + node1.isRunning must be(false) + } + + "and two another cluster nodes should be able to agree on a leader election the third is shut down" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "leader-5-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "leader-5-2", port = 9002)) + val node3 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "leader-5-3", port = 9003)) + + node1.start() + node2.start() + node3.start() + + node1.leader must be(node1.leaderLock.getId) + node1.leader must be(node2.leader) + node2.leader must be(node3.leader) + node3.leader must be(node1.leader) + + node3.stop() + node3.isRunning must be(false) + Thread.sleep(500) + node1.leader must be(node1.leaderLock.getId) + node2.leader must be(node1.leader) + + node2.stop() + Thread.sleep(500) + node2.isRunning must be(false) + node1.leader must be(node1.leaderLock.getId) + + node1.stop() + node1.isRunning must be(false) + } + + "be able to cluster an actor by ActorRef" in { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "cluster-actor-1", port = 9001)) + node.start + + // register actor + import BinaryFormatMyJavaSerializableActor._ + var serializeMailbox = true + node.store(actorRef, serializeMailbox) + + node.isClustered(ActorAddress(actorUuid = actorRef.uuid)) must be(true) + node.uuidsForClusteredActors.exists(_ == actorRef.uuid) must be(true) + + node.stop + } + + "be able to cluster an actor by class" in { + // create actor + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "cluster-actor-1", port = 9001)) + node.start + + // register actor + import BinaryFormatMyJavaSerializableActor._ + node.store(classOf[MyJavaSerializableActor]) + + node.isClustered(ActorAddress(actorClassName = classOf[MyJavaSerializableActor].getName)) must be(true) + + node.stop + } + + "be able to remove an actor by actor uuid" in { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "remove-actor-uuid", port = 9001)) + node.start + + // register actor + import BinaryFormatMyJavaSerializableActor._ + var serializeMailbox = true + node.store(actorRef, serializeMailbox) + + node.isClustered(ActorAddress(actorUuid = actorRef.uuid)) must be(true) + node.uuidsForClusteredActors.exists(_ == actorRef.uuid) must be(true) + + // deregister actor + node.remove(ActorAddress(actorUuid = actorRef.uuid)) + node.uuidsForClusteredActors.exists(_ == actorRef.uuid) must be(false) + + node.stop + } + + "be able to remove an actor by actor id" in { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "remove-actor-id", port = 9001)) + node.start + + // register actor + import BinaryFormatMyJavaSerializableActor._ + var serializeMailbox = true + node.store(actorRef, serializeMailbox) + + node.isClustered(ActorAddress(actorId = actorRef.id)) must be(true) + node.idsForClusteredActors.exists(_ == actorRef.id) must be(true) + + // deregister actor + node.remove(ActorAddress(actorId = actorRef.id)) + node.idsForClusteredActors.exists(_ == actorRef.id) must be(false) + + node.stop + } + + "be able to remove an actor by actor class name" in { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "remove-actor-classname", port = 9001)) + node.start + + // register actor + import BinaryFormatMyJavaSerializableActor._ + var serializeMailbox = true + node.store(actorRef, serializeMailbox) + + node.isClustered(ActorAddress(actorClassName = actorRef.actorClassName)) must be(true) + node.classNamesForClusteredActors.exists(_ == actorRef.actorClassName) must be(true) + + // deregister actor + node.remove(ActorAddress(actorClassName = actorRef.actorClassName)) + node.classNamesForClusteredActors.exists(_ == actorRef.actorClassName) must be(false) + + node.stop + } + + "be able to use an actor by actor uuid" in { + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "use-actor-uuid", port = 9001)) + node.start + + // create actor + val actorRef1 = actorOf[MyJavaSerializableActor].start + (actorRef1 !! "hello").getOrElse("_") must equal("world 1") + (actorRef1 !! "hello").getOrElse("_") must equal("world 2") + + // register actor + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node.store(actorRef1, serializeMailbox) + node.isClustered(ActorAddress(actorUuid = actorRef1.uuid)) must be(true) + node.uuidsForClusteredActors.exists(_ == actorRef1.uuid) must be(true) + + // check out actor + val actorRef2 = node.use(ActorAddress(actorUuid = actorRef1.uuid)).head + node.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "use-actor-uuid")) must be(true) + (actorRef2 !! "hello").getOrElse("_") must equal("world 3") + + actorRef1.stop + actorRef2.stop + + node.stop + } + + "be able to use an actor by actor id" in { + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "use-actor-id", port = 9001)) + node.start + + // create actor + val actorRef1 = actorOf[MyJavaSerializableActor].start + (actorRef1 !! "hello").getOrElse("_") must equal("world 1") + (actorRef1 !! "hello").getOrElse("_") must equal("world 2") + + // register actor + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node.store(actorRef1, serializeMailbox) + node.isClustered(ActorAddress(actorId = actorRef1.id)) must be(true) + node.idsForClusteredActors.exists(_ == actorRef1.id) must be(true) + + // check out actor + val actorRef2 = node.use(ActorAddress(actorId = actorRef1.id)).head + node.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "use-actor-id")) must be(true) + (actorRef2 !! "hello").getOrElse("_") must equal("world 3") + + actorRef1.stop + actorRef2.stop + + node.stop + } + + "be able to use an actor by actor class name" in { + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "use-actor-classname", port = 9001)) + node.start + + // create actor + val actorRef1 = actorOf[MyJavaSerializableActor].start + (actorRef1 !! "hello").getOrElse("_") must equal("world 1") + (actorRef1 !! "hello").getOrElse("_") must equal("world 2") + + // register actor + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node.store(actorRef1, serializeMailbox) + node.isClustered(ActorAddress(actorClassName = actorRef1.actorClassName)) must be(true) + node.classNamesForClusteredActors.exists(_ == actorRef1.actorClassName) must be(true) + + // check out actor + val actorRef2 = node.use(ActorAddress(actorClassName = actorRef1.actorClassName)).head + node.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "use-actor-classname")) must be(true) + (actorRef2 !! "hello").getOrElse("_") must equal("world 3") + + actorRef1.stop + actorRef2.stop + + node.stop + } + + "be able to release an actor by uuid" in { + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "release-actor-uuid", port = 9001)) + node.start + + // create actor + val actorRef1 = actorOf[MyJavaSerializableActor].start + (actorRef1 !! "hello").getOrElse("_") must equal("world 1") + (actorRef1 !! "hello").getOrElse("_") must equal("world 2") + + // register actor + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node.store(actorRef1, serializeMailbox) + node.isClustered(ActorAddress(actorUuid = actorRef1.uuid)) must be(true) + node.uuidsForClusteredActors.exists(_ == actorRef1.uuid) must be(true) + + // check out actor + val actorRef2 = node.use(ActorAddress(actorUuid = actorRef1.uuid)).head + node.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "release-actor-uuid")) must be(true) + (actorRef2 !! "hello").getOrElse("_") must equal("world 3") + + // check in actor + node.release(ActorAddress(actorUuid = actorRef2.uuid)) + node.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "release-actor-uuid")) must be(false) + + actorRef1.stop + actorRef2.stop + + node.stop + } + + "be able to release an actor by id" in { + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "release-actor-id", port = 9001)) + node.start + + // create actor + val actorRef1 = actorOf[MyJavaSerializableActor].start + (actorRef1 !! "hello").getOrElse("_") must equal("world 1") + (actorRef1 !! "hello").getOrElse("_") must equal("world 2") + + // register actor + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node.store(actorRef1, serializeMailbox) + node.isClustered(ActorAddress(actorId = actorRef1.id)) must be(true) + node.idsForClusteredActors.exists(_ == actorRef1.id) must be(true) + + // check out actor + val actorRef2 = node.use(ActorAddress(actorId = actorRef1.id)).head + node.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "release-actor-id")) must be(true) + (actorRef2 !! "hello").getOrElse("_") must equal("world 3") + + // check in actor + node.release(ActorAddress(actorId = actorRef2.id)) + node.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "release-actor-id")) must be(false) + + actorRef1.stop + actorRef2.stop + + node.stop + } + + "be able to release an actor by class name" in { + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "release-actor-classname", port = 9001)) + node.start + + // create actor + val actorRef1 = actorOf[MyJavaSerializableActor].start + (actorRef1 !! "hello").getOrElse("_") must equal("world 1") + (actorRef1 !! "hello").getOrElse("_") must equal("world 2") + + // register actor + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node.store(actorRef1, serializeMailbox) + node.isClustered(ActorAddress(actorClassName = actorRef1.actorClassName)) must be(true) + node.classNamesForClusteredActors.exists(_ == actorRef1.actorClassName) must be(true) + + // check out actor + val actorRef2 = node.use(ActorAddress(actorClassName = actorRef1.actorClassName)).head + node.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "release-actor-classname")) must be(true) + (actorRef2 !! "hello").getOrElse("_") must equal("world 3") + + // check in actor + node.release(ActorAddress(actorClassName = actorRef2.actorClassName)) + node.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "release-actor-classname")) must be(false) + + actorRef1.stop + actorRef2.stop + + node.stop + } + + "be able to release used actor on remove an actor by actor uuid" in { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "remove-actor-uuid", port = 9001)).start + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "remove-actor-uuid-2", port = 9002)).start + + // register actor + import BinaryFormatMyJavaSerializableActor._ + var serializeMailbox = true + node.store(actorRef, serializeMailbox) + val actorRef2 = node2.use(ActorAddress(actorUuid = actorRef.uuid)).head + + node2.isClustered(ActorAddress(actorUuid = actorRef.uuid)) must be(true) + node.uuidsForClusteredActors.exists(_ == actorRef.uuid) must be(true) + node.nodesForActorsInUseWithUuid(actorRef.uuid) must have length (1) + + // deregister actor + node.remove(ActorAddress(actorUuid = actorRef.uuid)) + node.uuidsForClusteredActors.exists(_ == actorRef.uuid) must be(false) + + node.nodesForActorsInUseWithUuid(actorRef.uuid) must have length (0) + node.stop + } + + "be able to release used actor on remove an actor by actor id" in { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "remove-actor-id", port = 9001)).start + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "remove-actor-uuid-2", port = 9002)).start + + // register actor + import BinaryFormatMyJavaSerializableActor._ + var serializeMailbox = true + node.store(actorRef, serializeMailbox) + val actorRef2 = node2.use(ActorAddress(actorId = actorRef.id)).head + + node2.isClustered(ActorAddress(actorId = actorRef.id)) must be(true) + node.idsForClusteredActors.exists(_ == actorRef.id) must be(true) + node.nodesForActorsInUseWithId(actorRef.id) must have length (1) + + // deregister actor + node.remove(ActorAddress(actorId = actorRef.id)) + node.idsForClusteredActors.exists(_ == actorRef.id) must be(false) + + node.nodesForActorsInUseWithId(actorRef.id) must have length (0) + node.stop + } + + "be able to release used actor on remove an actor by actor class name" in { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "remove-actor-classname", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "remove-actor-uuid-2", port = 9002)).start + node.start + + // register actor + import BinaryFormatMyJavaSerializableActor._ + var serializeMailbox = true + node.store(actorRef, serializeMailbox) + val actorRef2 = node2.use(ActorAddress(actorClassName = actorRef.actorClassName)).head + + node2.isClustered(ActorAddress(actorClassName = actorRef.actorClassName)) must be(true) + node.classNamesForClusteredActors.exists(_ == actorRef.actorClassName) must be(true) + node.nodesForActorsInUseWithClassName(actorRef.actorClassName) must have length (1) + + // deregister actor + node.remove(ActorAddress(actorClassName = actorRef.actorClassName)) + node.classNamesForClusteredActors.exists(_ == actorRef.actorClassName) must be(false) + node.nodesForActorsInUseWithClassName(actorRef.actorClassName) must have length (0) + + node.stop + } + + "be able to get home address for a clustered actor" in { + val node = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "get-home-address", port = 9001)) + node.start + + // create actor + val actorRef1 = actorOf[MyJavaSerializableActor].start + (actorRef1 !! "hello").getOrElse("_") must equal("world 1") + (actorRef1 !! "hello").getOrElse("_") must equal("world 2") + + // register actor + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node.store(actorRef1, serializeMailbox) + node.isClustered(ActorAddress(actorUuid = actorRef1.uuid)) must be(true) + node.uuidsForClusteredActors.exists(_ == actorRef1.uuid) must be(true) + + // check out actor + val actorRef2 = node.use(ActorAddress(actorUuid = actorRef1.uuid)).head + node.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "get-home-address")) must be(true) + (actorRef2 !! "hello").getOrElse("_") must equal("world 3") + + val addresses = node.addressesForActor(ActorAddress(actorUuid = actorRef1.uuid)) + addresses.length must be > (0) + addresses(0)._2.getPort must equal(9001) + + actorRef1.stop + actorRef2.stop + + node.stop + } + + "be able to migrate an actor between two nodes using uuid" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-uuid-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-uuid-2", port = 9002)) + node1.start + node2.start + + // create actors + val actorRef1 = actorOf[MyJavaSerializableActor].start + val actorRef2 = actorOf[MyJavaSerializableActor].start + + // register actors + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node1.store(actorRef1, serializeMailbox) + node1.store(actorRef2, serializeMailbox) + + node1.isClustered(ActorAddress(actorUuid = actorRef1.uuid)) must be(true) + node1.uuidsForClusteredActors.exists(_ == actorRef1.uuid) must be(true) + + // check out actor + val actorRef1_2 = node1.use(ActorAddress(actorUuid = actorRef1.uuid)).head + val actorRef2_2 = node1.use(ActorAddress(actorUuid = actorRef2.uuid)).head + (actorRef1_2 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_2 !! "hello").getOrElse("_") must equal("world 1") + + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-uuid-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-uuid-1")) must be(true) + + node1.uuidsForActorsInUse.exists(_ == actorRef1.uuid) must be(true) + node1.uuidsForActorsInUse.exists(_ == actorRef2.uuid) must be(true) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-uuid-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-uuid-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-uuid-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-uuid-1")) must be(true) + + node2.uuidsForActorsInUse.exists(_ == actorRef1.uuid) must be(false) + node2.uuidsForActorsInUse.exists(_ == actorRef2.uuid) must be(false) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-uuid-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-uuid-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-uuid-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-uuid-2")) must be(false) + + // migrate to node2 + node1.migrate(node1.nodeAddress, node2.nodeAddress, ActorAddress(actorUuid = actorRef1_2.uuid)) + node1.migrate(node1.nodeAddress, node2.nodeAddress, ActorAddress(actorUuid = actorRef2_2.uuid)) + + val actorRef1_3 = node2.use(ActorAddress(actorUuid = actorRef1.uuid)).head + val actorRef2_3 = node2.use(ActorAddress(actorUuid = actorRef2.uuid)).head + (actorRef1_3 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_3 !! "hello").getOrElse("_") must equal("world 1") + + node1.uuidsForActorsInUse.exists(_ == actorRef1.uuid) must be(false) + node1.uuidsForActorsInUse.exists(_ == actorRef2.uuid) must be(false) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-uuid-1")) must be(false) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-uuid-1")) must be(false) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-uuid-1")) must be(false) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-uuid-1")) must be(false) + + node2.uuidsForActorsInUse.exists(_ == actorRef1.uuid) must be(true) + node2.uuidsForActorsInUse.exists(_ == actorRef2.uuid) must be(true) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-uuid-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-uuid-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-uuid-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-uuid-2")) must be(true) + + actorRef1.stop + actorRef2.stop + actorRef1_2.stop + actorRef2_2.stop + + node1.stop + node2.stop + } + + "be able to migrate an actor between two nodes using id" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-id-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-id-2", port = 9002)) + node1.start + node2.start + + // create actors + val actorRef1 = actorOf[MyJavaSerializableActor].start + val actorRef2 = actorOf[MyJavaSerializableActor].start + + // register actors + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node1.store(actorRef1, serializeMailbox) + node1.store(actorRef2, serializeMailbox) + + node1.isClustered(ActorAddress(actorId = actorRef1.id)) must be(true) + node1.idsForClusteredActors.exists(_ == actorRef1.id) must be(true) + + // check out actor + val actorRef1_2 = node1.use(ActorAddress(actorId = actorRef1.id)).head + val actorRef2_2 = node1.use(ActorAddress(actorId = actorRef2.id)).head + (actorRef1_2 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_2 !! "hello").getOrElse("_") must equal("world 1") + + node1.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-id-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-id-1")) must be(true) + + node1.idsForActorsInUse.exists(_ == actorRef1.id) must be(true) + node1.idsForActorsInUse.exists(_ == actorRef2.id) must be(true) + node1.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-id-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-id-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-id-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-id-1")) must be(true) + + node2.idsForActorsInUse.exists(_ == actorRef1.id) must be(false) + node2.idsForActorsInUse.exists(_ == actorRef2.id) must be(false) + node2.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-id-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-id-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-id-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-id-2")) must be(false) + + // migrate to node2 + node1.migrate(node1.nodeAddress, node2.nodeAddress, ActorAddress(actorId = actorRef1_2.id)) + + val actorRef1_3 = node2.use(ActorAddress(actorId = actorRef1.id)).head + val actorRef2_3 = node2.use(ActorAddress(actorId = actorRef2.id)).head + (actorRef1_3 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_3 !! "hello").getOrElse("_") must equal("world 1") + + node1.idsForActorsInUse.exists(_ == actorRef1.id) must be(false) + node1.idsForActorsInUse.exists(_ == actorRef2.id) must be(false) + node1.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-id-1")) must be(false) + node1.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-id-1")) must be(false) + node1.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-id-1")) must be(false) + node1.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-id-1")) must be(false) + + node2.idsForActorsInUse.exists(_ == actorRef1.id) must be(true) + node2.idsForActorsInUse.exists(_ == actorRef2.id) must be(true) + node2.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-id-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-id-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-id-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-id-2")) must be(true) + + actorRef1.stop + actorRef2.stop + actorRef1_2.stop + actorRef2_2.stop + + node1.stop + node2.stop + } + + "be able to migrate an actor between two nodes using actor class name" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-class-name-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-class-name-2", port = 9002)) + node1.start + node2.start + + // create actors + val actorRef1 = actorOf[MyJavaSerializableActor].start + val actorRef2 = actorOf[MyJavaSerializableActor].start + + // register actors + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node1.store(actorRef1, serializeMailbox) + node1.store(actorRef2, serializeMailbox) + + node1.isClustered(ActorAddress(actorClassName = actorRef1.actorClassName)) must be(true) + node1.classNamesForClusteredActors.exists(_ == actorRef1.actorClassName) must be(true) + + // check out actor + val actorRef1_2 = node1.use(ActorAddress(actorClassName = actorRef1.actorClassName)).head + val actorRef2_2 = node1.use(ActorAddress(actorClassName = actorRef2.actorClassName)).head + (actorRef1_2 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_2 !! "hello").getOrElse("_") must equal("world 1") + + node1.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-class-name-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-class-name-1")) must be(true) + + node1.classNamesForActorsInUse.exists(_ == actorRef1.actorClassName) must be(true) + node1.classNamesForActorsInUse.exists(_ == actorRef2.actorClassName) must be(true) + node1.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-class-name-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-class-name-1")) must be(true) + + node2.idsForActorsInUse.exists(_ == actorRef1.actorClassName) must be(false) + node2.idsForActorsInUse.exists(_ == actorRef2.actorClassName) must be(false) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-class-name-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-class-name-2")) must be(false) + + // migrate to node2 + node1.migrate(node1.nodeAddress, node2.nodeAddress, ActorAddress(actorClassName = actorRef1_2.actorClassName)) + + val actorRef1_3 = node2.use(ActorAddress(actorClassName = actorRef1.actorClassName)).head + val actorRef2_3 = node2.use(ActorAddress(actorClassName = actorRef2.actorClassName)).head + (actorRef1_3 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_3 !! "hello").getOrElse("_") must equal("world 1") + + node1.classNamesForActorsInUse.exists(_ == actorRef1.actorClassName) must be(false) + node1.classNamesForActorsInUse.exists(_ == actorRef2.actorClassName) must be(false) + node1.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-class-name-1")) must be(false) + node1.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-class-name-1")) must be(false) + + node2.classNamesForActorsInUse.exists(_ == actorRef1.actorClassName) must be(true) + node2.classNamesForActorsInUse.exists(_ == actorRef2.actorClassName) must be(true) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-class-name-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-class-name-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-class-name-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-class-name-2")) must be(true) + + actorRef1.stop + actorRef2.stop + actorRef1_2.stop + actorRef2_2.stop + + node1.stop + node2.stop + } + + "automatically migrate actors of a failed node in a cluster of two nodes using uuid" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-2-uuid-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-2-uuid-2", port = 9002)) + node1.start + node2.start + + // create actors + val actorRef1 = actorOf[MyJavaSerializableActor].start + val actorRef2 = actorOf[MyJavaSerializableActor].start + + // register actors + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node1.store(actorRef1, serializeMailbox) + node1.store(actorRef2, serializeMailbox) + + node1.isClustered(ActorAddress(actorUuid = actorRef1.uuid)) must be(true) + node1.uuidsForClusteredActors.exists(_ == actorRef1.uuid) must be(true) + + // check out actor + val actorRef1_2 = node1.use(ActorAddress(actorUuid = actorRef1.uuid)).head + val actorRef2_2 = node1.use(ActorAddress(actorUuid = actorRef2.uuid)).head + + (actorRef1_2 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_2 !! "hello").getOrElse("_") must equal("world 1") + + node1.uuidsForActorsInUse.exists(_ == actorRef1.uuid) must be(true) + node1.uuidsForActorsInUse.exists(_ == actorRef2.uuid) must be(true) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-2-uuid-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-2-uuid-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-2-uuid-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-2-uuid-1")) must be(true) + + node2.uuidsForActorsInUse.exists(_ == actorRef1.uuid) must be(false) + node2.uuidsForActorsInUse.exists(_ == actorRef2.uuid) must be(false) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-2-uuid-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-2-uuid-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-2-uuid-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-2-uuid-2")) must be(false) + + // should migrate to node2 + node1.stop + node1.isRunning must be(false) + Thread.sleep(500) + + val actorRef1_3 = node2.use(ActorAddress(actorUuid = actorRef1.uuid)).head + val actorRef2_3 = node2.use(ActorAddress(actorUuid = actorRef2.uuid)).head + (actorRef1_3 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_3 !! "hello").getOrElse("_") must equal("world 1") + + node2.uuidsForActorsInUse.exists(_ == actorRef1.uuid) must be(true) + node2.uuidsForActorsInUse.exists(_ == actorRef2.uuid) must be(true) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-2-uuid-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-2-uuid-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-2-uuid-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-2-uuid-2")) must be(true) + + actorRef1.stop + actorRef2.stop + actorRef1_2.stop + actorRef2_2.stop + + node1.stop + } + + "automatically migrate actors of a failed node in a cluster of two nodes using id" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-2-id-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-2-id-2", port = 9002)) + node1.start + node2.start + + // create actors + val actorRef1 = actorOf[MyJavaSerializableActor].start + val actorRef2 = actorOf[MyJavaSerializableActor].start + + // register actors + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node1.store(actorRef1, serializeMailbox) + node1.store(actorRef2, serializeMailbox) + + node1.isClustered(ActorAddress(actorClassName = actorRef1.id)) must be(true) + node1.idsForClusteredActors.exists(_ == actorRef1.id) must be(true) + + // check out actor + val actorRef1_2 = node1.use(ActorAddress(actorId = actorRef1.id)).head + val actorRef2_2 = node1.use(ActorAddress(actorId = actorRef2.id)).head + + (actorRef1_2 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_2 !! "hello").getOrElse("_") must equal("world 1") + + node1.idsForActorsInUse.exists(_ == actorRef1.id) must be(true) + node1.idsForActorsInUse.exists(_ == actorRef2.id) must be(true) + node1.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-2-id-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-2-id-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-2-id-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-2-id-1")) must be(true) + + node2.idsForActorsInUse.exists(_ == actorRef1.id) must be(false) + node2.idsForActorsInUse.exists(_ == actorRef2.id) must be(false) + node2.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-2-id-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-2-id-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-2-id-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-2-id-2")) must be(false) + + // should migrate to node2 + node1.stop + node1.isRunning must be(false) + Thread.sleep(500) + + val actorRef1_3 = node2.use(ActorAddress(actorId = actorRef1.id)).head + val actorRef2_3 = node2.use(ActorAddress(actorId = actorRef2.id)).head + (actorRef1_3 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_3 !! "hello").getOrElse("_") must equal("world 1") + + node2.idsForActorsInUse.exists(_ == actorRef1.id) must be(true) + node2.idsForActorsInUse.exists(_ == actorRef2.id) must be(true) + node2.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-2-id-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-2-id-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-2-id-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-2-id-2")) must be(true) + + actorRef1.stop + actorRef2.stop + actorRef1_2.stop + actorRef2_2.stop + + node2.stop + } + + "automatically migrate actors of a failed node in a cluster of two nodes using class name" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-2-classname-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-2-classname-2", port = 9002)) + node1.start + node2.start + + // create actors + val actorRef1 = actorOf[MyJavaSerializableActor].start + val actorRef2 = actorOf[MyJavaSerializableActor].start + + // register actors + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node1.store(actorRef1, serializeMailbox) + node1.store(actorRef2, serializeMailbox) + + node1.isClustered(ActorAddress(actorClassName = actorRef1.actorClassName)) must be(true) + node1.classNamesForClusteredActors.exists(_ == actorRef1.actorClassName) must be(true) + + // check out actor + val actorRef1_2 = node1.use(ActorAddress(actorClassName = actorRef1.actorClassName)).head + val actorRef2_2 = node1.use(ActorAddress(actorClassName = actorRef2.actorClassName)).head + + (actorRef1_2 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_2 !! "hello").getOrElse("_") must equal("world 1") + + node1.classNamesForActorsInUse.exists(_ == actorRef1.actorClassName) must be(true) + node1.classNamesForActorsInUse.exists(_ == actorRef2.actorClassName) must be(true) + node1.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-2-classname-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-2-classname-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-2-classname-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-2-classname-1")) must be(true) + + node2.classNamesForActorsInUse.exists(_ == actorRef1.actorClassName) must be(false) + node2.classNamesForActorsInUse.exists(_ == actorRef2.actorClassName) must be(false) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-2-classname-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-2-classname-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-2-classname-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-2-classname-2")) must be(false) + + // should migrate to node2 + node1.stop + node1.isRunning must be(false) + Thread.sleep(500) + + val actorRef1_3 = node2.use(ActorAddress(actorClassName = actorRef1.actorClassName)).head + val actorRef2_3 = node2.use(ActorAddress(actorClassName = actorRef2.actorClassName)).head + (actorRef1_3 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_3 !! "hello").getOrElse("_") must equal("world 1") + + node2.classNamesForActorsInUse.exists(_ == actorRef1.actorClassName) must be(true) + node2.classNamesForActorsInUse.exists(_ == actorRef2.actorClassName) must be(true) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-2-classname-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-2-classname-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-2-classname-2")) must be(true) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-2-classname-2")) must be(true) + + actorRef1.stop + actorRef2.stop + actorRef1_2.stop + actorRef2_2.stop + + node2.stop + } + + "automatically migrate actors of a failed node in a cluster of three nodes using uuid" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-3-uuid-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-3-uuid-2", port = 9002)) + val node3 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-3-uuid-3", port = 9003)) + node1.start + node2.start + node3.start + + // create actors + val actorRef1 = actorOf[MyJavaSerializableActor].start + val actorRef2 = actorOf[MyJavaSerializableActor].start + + // register actors + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node1.store(actorRef1, serializeMailbox) + node1.store(actorRef2, serializeMailbox) + + node1.isClustered(ActorAddress(actorUuid = actorRef1.uuid)) must be(true) + node1.uuidsForClusteredActors.exists(_ == actorRef1.uuid) must be(true) + + // check out actor + val actorRef1_2 = node1.use(ActorAddress(actorUuid = actorRef1.uuid)).head + val actorRef2_2 = node1.use(ActorAddress(actorUuid = actorRef2.uuid)).head + (actorRef1_2 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_2 !! "hello").getOrElse("_") must equal("world 1") + + node1.uuidsForActorsInUse.exists(_ == actorRef1.uuid) must be(true) + node1.uuidsForActorsInUse.exists(_ == actorRef2.uuid) must be(true) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-1")) must be(true) + + node2.uuidsForActorsInUse.exists(_ == actorRef1.uuid) must be(false) + node2.uuidsForActorsInUse.exists(_ == actorRef2.uuid) must be(false) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-2")) must be(false) + + node3.uuidsForActorsInUse.exists(_ == actorRef1.uuid) must be(false) + node3.uuidsForActorsInUse.exists(_ == actorRef2.uuid) must be(false) + node3.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-3")) must be(false) + node3.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-3")) must be(false) + node3.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-3")) must be(false) + node3.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-3")) must be(false) + + // should migrate to node2 + node1.stop + node1.isRunning must be(false) + Thread.sleep(500) + + val actorRef1_3 = node3.use(ActorAddress(actorUuid = actorRef1.uuid)).head + val actorRef2_3 = node3.use(ActorAddress(actorUuid = actorRef2.uuid)).head + (actorRef1_3 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_3 !! "hello").getOrElse("_") must equal("world 1") + + node3.uuidsForActorsInUse.exists(_ == actorRef1.uuid) must be(true) + node3.uuidsForActorsInUse.exists(_ == actorRef2.uuid) must be(true) + node3.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-3")) must be(true) + node3.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-3")) must be(true) + node3.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-3")) must be(true) + node3.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-3")) must be(true) + + node2.uuidsForActorsInUse.exists(_ == actorRef1.uuid) must be(false) + node2.uuidsForActorsInUse.exists(_ == actorRef2.uuid) must be(false) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = NodeAddress("test-cluster", "migrate-3-uuid-2")) must be(false) + + actorRef1.stop + actorRef2.stop + actorRef1_2.stop + actorRef2_2.stop + + node2.stop + node3.stop + } + + "automatically migrate actors of a failed node in a cluster of three nodes using id" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-3-id-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-3-id-2", port = 9002)) + val node3 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-3-id-3", port = 9003)) + node1.start + node2.start + node3.start + + // create actors + val actorRef1 = actorOf[MyJavaSerializableActor].start + val actorRef2 = actorOf[MyJavaSerializableActor].start + + // register actors + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node1.store(actorRef1, serializeMailbox) + node1.store(actorRef2, serializeMailbox) + + node1.isClustered(ActorAddress(actorId = actorRef1.id)) must be(true) + node1.idsForClusteredActors.exists(_ == actorRef1.id) must be(true) + + // check out actor + val actorRef1_2 = node1.use(ActorAddress(actorId = actorRef1.id)).head + val actorRef2_2 = node1.use(ActorAddress(actorId = actorRef2.id)).head + (actorRef1_2 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_2 !! "hello").getOrElse("_") must equal("world 1") + + node1.idsForActorsInUse.exists(_ == actorRef1.id) must be(true) + node1.idsForActorsInUse.exists(_ == actorRef2.id) must be(true) + node1.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-3-id-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-3-id-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-3-id-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-3-id-1")) must be(true) + + node2.idsForActorsInUse.exists(_ == actorRef1.id) must be(false) + node2.idsForActorsInUse.exists(_ == actorRef2.id) must be(false) + node2.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-3-id-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-3-id-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-3-id-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-3-id-2")) must be(false) + + node3.idsForActorsInUse.exists(_ == actorRef1.id) must be(false) + node3.idsForActorsInUse.exists(_ == actorRef2.id) must be(false) + node3.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-3-id-3")) must be(false) + node3.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-3-id-3")) must be(false) + node3.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-3-id-3")) must be(false) + node3.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-3-id-3")) must be(false) + + // should migrate to node2 + node1.stop + node1.isRunning must be(false) + Thread.sleep(500) + + val actorRef1_3 = node3.use(ActorAddress(actorId = actorRef1.id)).head + val actorRef2_3 = node3.use(ActorAddress(actorId = actorRef2.id)).head + (actorRef1_3 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_3 !! "hello").getOrElse("_") must equal("world 1") + + node3.idsForActorsInUse.exists(_ == actorRef1.id) must be(true) + node3.idsForActorsInUse.exists(_ == actorRef2.id) must be(true) + node3.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-3-id-3")) must be(true) + node3.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-3-id-3")) must be(true) + node3.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-3-id-3")) must be(true) + node3.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-3-id-3")) must be(true) + + node2.idsForActorsInUse.exists(_ == actorRef1.id) must be(false) + node2.idsForActorsInUse.exists(_ == actorRef2.id) must be(false) + node2.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-3-id-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = NodeAddress("test-cluster", "migrate-3-id-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-3-id-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorId = actorRef2.id), node = NodeAddress("test-cluster", "migrate-3-id-2")) must be(false) + + actorRef1.stop + actorRef2.stop + actorRef1_2.stop + actorRef2_2.stop + + node2.stop + node3.stop + } + + "automatically migrate actors of a failed node in a cluster of three nodes using class name" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-3-classname-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-3-classname-2", port = 9002)) + val node3 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-3-classname-3", port = 9003)) + node1.start + node2.start + node3.start + + // create actors + val actorRef1 = actorOf[MyJavaSerializableActor].start + val actorRef2 = actorOf[MyJavaSerializableActor].start + + // register actors + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node1.store(actorRef1, serializeMailbox) + node1.store(actorRef2, serializeMailbox) + + node1.isClustered(ActorAddress(actorClassName = actorRef1.actorClassName)) must be(true) + node1.classNamesForClusteredActors.exists(_ == actorRef1.actorClassName) must be(true) + + // check out actor + val actorRef1_2 = node1.use(ActorAddress(actorClassName = actorRef1.actorClassName)).head + val actorRef2_2 = node1.use(ActorAddress(actorClassName = actorRef2.actorClassName)).head + (actorRef1_2 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_2 !! "hello").getOrElse("_") must equal("world 1") + + node1.classNamesForActorsInUse.exists(_ == actorRef1.actorClassName) must be(true) + node1.classNamesForActorsInUse.exists(_ == actorRef2.actorClassName) must be(true) + node1.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-1")) must be(true) + node1.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-1")) must be(true) + + node2.classNamesForActorsInUse.exists(_ == actorRef1.actorClassName) must be(false) + node2.classNamesForActorsInUse.exists(_ == actorRef2.actorClassName) must be(false) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-2")) must be(false) + + node3.classNamesForActorsInUse.exists(_ == actorRef1.actorClassName) must be(false) + node3.classNamesForActorsInUse.exists(_ == actorRef2.actorClassName) must be(false) + node3.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-3")) must be(false) + node3.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-3")) must be(false) + node3.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-3")) must be(false) + node3.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-3")) must be(false) + + // should migrate to node2 + node1.stop + node1.isRunning must be(false) + Thread.sleep(500) + + val actorRef1_3 = node3.use(ActorAddress(actorClassName = actorRef1.actorClassName)).head + val actorRef2_3 = node3.use(ActorAddress(actorClassName = actorRef2.actorClassName)).head + (actorRef1_3 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_3 !! "hello").getOrElse("_") must equal("world 1") + + node3.classNamesForActorsInUse.exists(_ == actorRef1.actorClassName) must be(true) + node3.classNamesForActorsInUse.exists(_ == actorRef2.actorClassName) must be(true) + node3.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-3")) must be(true) + node3.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-3")) must be(true) + node3.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-3")) must be(true) + node3.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-3")) must be(true) + + node2.classNamesForActorsInUse.exists(_ == actorRef1.actorClassName) must be(false) + node2.classNamesForActorsInUse.exists(_ == actorRef2.actorClassName) must be(false) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-2")) must be(false) + node2.isInUseOnNode(ActorAddress(actorClassName = actorRef2.actorClassName), node = NodeAddress("test-cluster", "migrate-3-classname-2")) must be(false) + + actorRef1.stop + actorRef2.stop + actorRef1_2.stop + actorRef2_2.stop + + node2.stop + node3.stop + } + + "be able to migrate an actor between two nodes using uuid and see that 'ref' to it is redirected and continue to work" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-uuid-and-see-ref-failover-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-uuid-and-see-ref-failover-2", port = 9002)) + node1.start + node2.start + + // create actors + val actorRef1 = actorOf[MyJavaSerializableActor].start + val actorRef2 = actorOf[MyJavaSerializableActor].start + + Thread.sleep(500) + + // register actors + import BinaryFormatMyJavaSerializableActor._ + node1.store(actorRef1) + node1.store(actorRef2) + + Thread.sleep(500) + + // use on node1 + node1.use(ActorAddress(actorUuid = actorRef1.uuid)) + node1.use(ActorAddress(actorUuid = actorRef2.uuid)) + + Thread.sleep(500) + + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef1.uuid), node = node1.nodeAddress) must be(true) + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef2.uuid), node = node1.nodeAddress) must be(true) + + // check out actor ref on node2 + val actorRef1_2 = node2.ref(ActorAddress(actorUuid = actorRef1.uuid), router = Router.Direct) + val actorRef2_2 = node2.ref(ActorAddress(actorUuid = actorRef2.uuid), router = Router.Direct) + + (actorRef1_2 !! "hello").getOrElse("_") must equal("world 1") + (actorRef2_2 !! "hello").getOrElse("_") must equal("world 1") + + // migrate to node2 + node1.migrate(node1.nodeAddress, node2.nodeAddress, ActorAddress(actorUuid = actorRef1.uuid)) + node1.migrate(node1.nodeAddress, node2.nodeAddress, ActorAddress(actorUuid = actorRef2.uuid)) + + Thread.sleep(500) + + (actorRef1_2 !! "hello").getOrElse("_") must equal("world 2") + (actorRef2_2 !! "hello").getOrElse("_") must equal("world 2") + + actorRef1.stop + actorRef2.stop + actorRef1_2.stop + actorRef2_2.stop + + node1.stop + node2.stop + } + + "be able to migrate an actor between two nodes using id and see that 'ref' to it is redirected and continue to work" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-id-and-see-ref-failover-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-id-and-see-ref-failover-2", port = 9002)) + node1.start + node2.start + + // create actors + val actorRef1 = actorOf[MyJavaSerializableActor].start + + Thread.sleep(500) + + // register actors + import BinaryFormatMyJavaSerializableActor._ + node1.store(actorRef1) + + Thread.sleep(500) + + // use on node1 + node1.use(ActorAddress(actorId = actorRef1.id)) + + node1.isInUseOnNode(ActorAddress(actorId = actorRef1.id), node = node1.nodeAddress) must be(true) + + // check out actor ref on node2 + val actorRef1_2 = node2.ref(ActorAddress(actorId = actorRef1.id), router = Router.Direct) + + (actorRef1_2 !! "hello").getOrElse("_") must equal("world 1") + + // migrate to node2 + node1.migrate(node1.nodeAddress, node2.nodeAddress, ActorAddress(actorId = actorRef1.id)) + + Thread.sleep(500) + + (actorRef1_2 !! "hello").getOrElse("_") must equal("world 2") + + actorRef1.stop + actorRef1_2.stop + + node1.stop + node2.stop + } + + "be able to migrate an actor between two nodes using class name and see that 'ref' to it is redirected and continue to work" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-classname-and-see-ref-failover-1", port = 9011)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "migrate-classname-and-see-ref-failover-2", port = 9012)) + node1.start + node2.start + + // create actors + val actorRef1 = actorOf[MyJavaSerializableActor].start + + Thread.sleep(500) + + // register actors + import BinaryFormatMyJavaSerializableActor._ + node1.store(actorRef1) + + Thread.sleep(500) + + // use on node1 + node1.use(ActorAddress(actorClassName = actorRef1.actorClassName)) + + node1.isInUseOnNode(ActorAddress(actorClassName = actorRef1.actorClassName), node = node1.nodeAddress) must be(true) + + Thread.sleep(500) + + // check out actor ref on node2 + val actorRef1_2 = node2.ref(ActorAddress(actorClassName = actorRef1.actorClassName), router = Router.Direct) + + (actorRef1_2 !! "hello").getOrElse("_") must equal("world 1") + + // migrate to node2 + node1.migrate(node1.nodeAddress, node2.nodeAddress, ActorAddress(actorClassName = actorRef1.actorClassName)) + + (actorRef1_2 !! "hello").getOrElse("_") must equal("world 2") + + actorRef1.stop + actorRef1_2.stop + + node1.stop + node2.stop + } + + "be able to set and get config elements" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "set-get-config-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "set-get-config-2", port = 9002)) + node1.start + node2.start + + node1.setConfigElement("key1", "value1".getBytes) + node2.getConfigElement("key1") must be("value1".getBytes) + + node2.setConfigElement("key2", "value2".getBytes) + node1.getConfigElement("key2") must be("value2".getBytes) + + node1.stop + node2.stop + } + + "be able to remove config elements" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "remove-config-1", port = 9001)) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "remove-config-2", port = 9002)) + node1.start + node2.start + + node1.setConfigElement("key1", "value1".getBytes) + node2.getConfigElement("key1") must be("value1".getBytes) + + node2.removeConfigElement("key1") + node1.getConfigElement("key1") must be(null) + + node1.stop + node2.stop + } + + "be able to replicate an actor" in { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "replicate-actor-1", port = 9001)).start + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "replicate-actor-2", port = 9002)).start + val node3 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "replicate-actor-3", port = 9003)).start + + Thread.sleep(500) + + // register actor + import BinaryFormatMyJavaSerializableActor._ + val replicationFactor = 3 + node1.store(actorRef, replicationFactor) + + Thread.sleep(500) // since deployment is async (daemon ! command), we have to wait some before checking + + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef.uuid), node = NodeAddress("test-cluster", "replicate-actor-1", port = 9001)) must be(true) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef.uuid), node = NodeAddress("test-cluster", "replicate-actor-2", port = 9002)) must be(true) + node3.isInUseOnNode(ActorAddress(actorUuid = actorRef.uuid), node = NodeAddress("test-cluster", "replicate-actor-3", port = 9003)) must be(true) + + node1.stop + node2.stop + node3.stop + } + + "be able to create a reference to a replicated actor by UUID using Router.Direct routing" in { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-direct-actor-by-uuid-1", port = 9001)).start + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-direct-actor-by-uuid-2", port = 9002)).start + + Thread.sleep(500) + + // register actor + import BinaryFormatMyJavaSerializableActor._ + val replicationFactor = 1 + node1.store(actorRef, replicationFactor) + + Thread.sleep(500) // since deployment is async (daemon ! command), we have to wait some before checking + + val ref = node1.ref(ActorAddress(actorUuid = actorRef.uuid), router = Router.Direct) + + (ref !! "hello").getOrElse("_") must equal("world 1") + (ref !! "hello").getOrElse("_") must equal("world 2") + + node1.stop + node2.stop + } + + "be able to create a reference to a replicated actor by ID using Router.Direct routing" in { + val actorRef = actorOf[MyJavaSerializableActor].start + + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-direct-actor-by-id-1", port = 9001)).start + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-direct-actor-by-id-2", port = 9002)).start + + Thread.sleep(500) + + import BinaryFormatMyJavaSerializableActor._ + val replicationFactor = 1 + node1.store(actorRef, replicationFactor) + + Thread.sleep(500) // since deployment is async (daemon ! command), we have to wait some before checking + + val ref = node1.ref(ActorAddress(actorId = actorRef.id), router = Router.Direct) + + (ref !! "hello").getOrElse("_") must equal("world 1") + (ref !! "hello").getOrElse("_") must equal("world 2") + + node1.stop + node2.stop + } + + "be able to create a reference to a replicated actor by ClassName using Router.Direct routing" in { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-direct-actor-by-classname-1", port = 9001)).start + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-direct-actor-by-classname-2", port = 9002)).start + + Thread.sleep(500) + + // register actor + import BinaryFormatMyJavaSerializableActor._ + val replicationFactor = 1 + node1.store(actorRef, replicationFactor) + + Thread.sleep(500) // since deployment is async (daemon ! command), we have to wait some before checking + + val ref = node1.ref(ActorAddress(actorClassName = actorRef.actorClassName), router = Router.Direct) + + (ref !! "hello").getOrElse("_") must equal("world 1") + (ref !! "hello").getOrElse("_") must equal("world 2") + + node1.stop + node2.stop + } + + "be able to create a reference to a replicated actor by UUID using Router.Random routing" in { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-random-actor-by-uuid-1", port = 9001)).start + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-random-actor-by-uuid-2", port = 9002)).start + val node3 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-random-actor-by-uuid-3", port = 9003)).start + + Thread.sleep(500) + + // register actor + import BinaryFormatMyJavaSerializableActor._ + val replicationFactor = 2 + node1.store(actorRef, replicationFactor) + + Thread.sleep(500) // since deployment is async (daemon ! command), we have to wait some before checking + + val ref = node1.ref(ActorAddress(actorUuid = actorRef.uuid), router = Router.Random) + + (ref !! "hello").getOrElse("_") must equal("world 1") + + node1.stop + node2.stop + node3.stop + } + + "be able to create a reference to a replicated actor by ID using Router.Random routing" in { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-random-actor-by-id-1", port = 9001)).start + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-random-actor-by-id-2", port = 9002)).start + val node3 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-random-actor-by-id-3", port = 9003)).start + + Thread.sleep(500) + + // register actor + import BinaryFormatMyJavaSerializableActor._ + val replicationFactor = 2 + node1.store(actorRef, replicationFactor) + + Thread.sleep(500) // since deployment is async (daemon ! command), we have to wait some before checking + + val ref = node1.ref(ActorAddress(actorId = actorRef.id), router = Router.Random) + + (ref !! "hello").getOrElse("_") must equal("world 1") + + node1.stop + node2.stop + node3.stop + } + + "be able to create a reference to a replicated actor by class name using Router.Random routing" in { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-random-actor-by-classname-1", port = 9001)).start + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-random-actor-by-classname-2", port = 9002)).start + val node3 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-random-actor-by-classname-3", port = 9003)).start + + Thread.sleep(500) + + // register actor + import BinaryFormatMyJavaSerializableActor._ + val replicationFactor = 2 + node1.store(actorRef, replicationFactor) + + Thread.sleep(500) // since deployment is async (daemon ! command), we have to wait some before checking + + val ref = node1.ref(ActorAddress(actorClassName = actorRef.actorClassName), router = Router.Random) + + (ref !! "hello").getOrElse("_") must equal("world 1") + + node1.stop + node2.stop + node3.stop + } + + "be able to create a reference to a replicated actor by UUID using Router.RoundRobin routing" in { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-round-robin-actor-by-uuid-1", port = 9001)).start + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-round-robin-actor-by-uuid-2", port = 9002)).start + val node3 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-round-robin-actor-by-uuid-3", port = 9003)).start + + Thread.sleep(500) + + // register actor + import BinaryFormatMyJavaSerializableActor._ + val replicationFactor = 3 + node1.store(actorRef, replicationFactor) + + Thread.sleep(500) // since deployment is async (daemon ! command), we have to wait some before checking + + val ref = node1.ref(ActorAddress(actorUuid = actorRef.uuid), router = Router.RoundRobin) + + node1.isInUseOnNode(ActorAddress(actorUuid = actorRef.uuid), node = NodeAddress("test-cluster", "router-round-robin-actor-by-uuid-1", port = 9001)) must be(true) + node2.isInUseOnNode(ActorAddress(actorUuid = actorRef.uuid), node = NodeAddress("test-cluster", "router-round-robin-actor-by-uuid-2", port = 9002)) must be(true) + node3.isInUseOnNode(ActorAddress(actorUuid = actorRef.uuid), node = NodeAddress("test-cluster", "router-round-robin-actor-by-uuid-3", port = 9003)) must be(true) + + val addresses = node1.addressesForActor(ActorAddress(actorUuid = actorRef.uuid)) + addresses.length must equal(3) + + (ref !! "hello").getOrElse("_") must equal("world 1") + (ref !! "hello").getOrElse("_") must equal("world 1") + (ref !! "hello").getOrElse("_") must equal("world 1") + + (ref !! "hello").getOrElse("_") must equal("world 2") + (ref !! "hello").getOrElse("_") must equal("world 2") + (ref !! "hello").getOrElse("_") must equal("world 2") + + (ref !! "hello").getOrElse("_") must equal("world 3") + (ref !! "hello").getOrElse("_") must equal("world 3") + (ref !! "hello").getOrElse("_") must equal("world 3") + + node1.stop + node2.stop + node3.stop + } + + "be able to create a reference to a replicated actor by ID using Router.RoundRobin routing" in { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-round-robin-actor-by-id-1", port = 9001)).start + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-round-robin-actor-by-id-2", port = 9002)).start + val node3 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-round-robin-actor-by-id-3", port = 9003)).start + + Thread.sleep(500) + + // register actor + import BinaryFormatMyJavaSerializableActor._ + val replicationFactor = 3 + node1.store(actorRef, replicationFactor) + + Thread.sleep(500) // since deployment is async (daemon ! command), we have to wait some before checking + + val ref = node1.ref(ActorAddress(actorId = actorRef.id), router = Router.RoundRobin) + + node1.isInUseOnNode(ActorAddress(actorId = actorRef.id), node = NodeAddress("test-cluster", "router-round-robin-actor-by-id-1", port = 9001)) must be(true) + node2.isInUseOnNode(ActorAddress(actorId = actorRef.id), node = NodeAddress("test-cluster", "router-round-robin-actor-by-id-2", port = 9002)) must be(true) + node3.isInUseOnNode(ActorAddress(actorId = actorRef.id), node = NodeAddress("test-cluster", "router-round-robin-actor-by-id-3", port = 9003)) must be(true) + + val addresses = node1.addressesForActor(ActorAddress(actorId = actorRef.id)) + addresses.length must equal(3) + + (ref !! "hello").getOrElse("_") must equal("world 1") + (ref !! "hello").getOrElse("_") must equal("world 1") + (ref !! "hello").getOrElse("_") must equal("world 1") + + (ref !! "hello").getOrElse("_") must equal("world 2") + (ref !! "hello").getOrElse("_") must equal("world 2") + (ref !! "hello").getOrElse("_") must equal("world 2") + + (ref !! "hello").getOrElse("_") must equal("world 3") + (ref !! "hello").getOrElse("_") must equal("world 3") + (ref !! "hello").getOrElse("_") must equal("world 3") + + node1.stop + node2.stop + node3.stop + } + + "be able to create a reference to a replicated actor by class name using Router.RoundRobin routing" in { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-round-robin-actor-by-classname-1", port = 9001)).start + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-round-robin-actor-by-classname-2", port = 9002)).start + val node3 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-round-robin-actor-by-classname-3", port = 9003)).start + + Thread.sleep(500) + + // register actor + import BinaryFormatMyJavaSerializableActor._ + val replicationFactor = 3 + node1.store(actorRef, replicationFactor) + + Thread.sleep(500) // since deployment is async (daemon ! command), we have to wait some before checking + + val ref = node1.ref(ActorAddress(actorId = actorRef.id), router = Router.RoundRobin) + + node1.isInUseOnNode(ActorAddress(actorId = actorRef.id), node = NodeAddress("test-cluster", "router-round-robin-actor-by-classname-1", port = 9001)) must be(true) + node2.isInUseOnNode(ActorAddress(actorId = actorRef.id), node = NodeAddress("test-cluster", "router-round-robin-actor-by-classname-2", port = 9002)) must be(true) + node3.isInUseOnNode(ActorAddress(actorId = actorRef.id), node = NodeAddress("test-cluster", "router-round-robin-actor-by-classname-3", port = 9003)) must be(true) + + val addresses = node1.addressesForActor(ActorAddress(actorId = actorRef.id)) + addresses.length must equal(3) + + (ref !! "hello").getOrElse("_") must equal("world 1") + (ref !! "hello").getOrElse("_") must equal("world 1") + (ref !! "hello").getOrElse("_") must equal("world 1") + + (ref !! "hello").getOrElse("_") must equal("world 2") + (ref !! "hello").getOrElse("_") must equal("world 2") + (ref !! "hello").getOrElse("_") must equal("world 2") + + (ref !! "hello").getOrElse("_") must equal("world 3") + (ref !! "hello").getOrElse("_") must equal("world 3") + (ref !! "hello").getOrElse("_") must equal("world 3") + + node1.stop + node2.stop + node3.stop + } + + "last dummy test" in withPrintStackTraceOnError { + // create actor + val actorRef = actorOf[MyJavaSerializableActor].start + Thread.sleep(1000) + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "router-round-robin-actor-by-classname-1", port = 9001)).start + node1.stop + } + } + + override def beforeAll() = { + zkServer = Cluster.startLocalCluster(dataPath, logPath) + } + + override def beforeEach() = { + Cluster.reset + } + + override def afterAll() = { + Cluster.shutdownLocalCluster + Actor.registry.shutdownAll + } +} + +/* + "be able to subscribe to actor location change events" in { + val node1 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "test-node1", port = 9991) + val node2 = Cluster.newNode(nodeAddress = NodeAddress("test-cluster", "test-node2", port = 9992) + + val barrier = new CyclicBarrier(2) + + node2.register(ActorLocationsChildChange, new ChangeListener() { + def notify(node: ClusterNode) = barrier.await + }) + + try { + node1.start + node2.start + + // create actors + val actorRef1 = actorOf[MyJavaSerializableActor].start + val actorRef2 = actorOf[MyJavaSerializableActor].start + + // register actors + var serializeMailbox = true + import BinaryFormatMyJavaSerializableActor._ + node1.store(actorRef1, serializeMailbox) + node1.store(actorRef2, serializeMailbox) + + node1.isClustered(ActorAddress(actorRef1.uuid)) must be (true) + node1.uuidsForClusteredActors.exists(_ == actorRef1.uuid) must be (true) + + // check out actor + val actorRef1_2 = node1.use(actorRef1.uuid) + val actorRef2_2 = node1.use(actorRef2.uuid) + + // should migrate to node2 + node1.stop + node1.isRunning must be (false) + + barrier.await(20, TimeUnit.SECONDS) + + actorRef1.stop + actorRef2.stop + actorRef1_2.stop + actorRef2_2.stop + + } finally { + node2.stop + node2.isRunning must be (false) + } + } + +*/ diff --git a/akka-cluster/src/test/scala/akka/cloud/cluster/ClusteredFunctions.scala b/akka-cluster/src/test/scala/akka/cloud/cluster/ClusteredFunctions.scala new file mode 100644 index 0000000000..3e19d1e47e --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cloud/cluster/ClusteredFunctions.scala @@ -0,0 +1,90 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package sample.cluster + +import akka.cloud.cluster._ +import akka.dispatch.Futures + +/** + * @author Jonas Bonér + */ +object ClusteredFunctions { +//sample.cluster.ClusteredFunctions.fun2 + + // run all + def run { + fun1 + fun2 + fun3 + fun4 + } + + // Send Function0[Unit] + def fun1 = { + Cluster.startLocalCluster() + val node = Cluster newNode (NodeAddress("test", "local", port = 9991)) start + val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start + + Thread.sleep(100) + val fun = () => println("=============>>> AKKA ROCKS <<<=============") + node send (fun, 2) // send and invoke function on to two cluster nodes + + node.stop + remote1.stop + Cluster.shutdownLocalCluster() + } + + // Send Function0[Any] + def fun2 = { + Cluster.startLocalCluster() + val local = Cluster newNode (NodeAddress("test", "local", port = 9991)) start + val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start + + Thread.sleep(100) + val fun = () => "AKKA ROCKS" + val futures = local send (fun, 2) // send and invoke function on to two cluster nodes and get result + + val result = Futures.fold("")(futures)(_ + " - " + _).await.resultOrException + println("===================>>> Cluster says [" + result + "]") + + local.stop + remote1.stop + Cluster.shutdownLocalCluster() + } + + // Send Function1[Any, Unit] + def fun3 = { + Cluster.startLocalCluster() + val local = Cluster newNode (NodeAddress("test", "local", port = 9991)) start + val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start + + val fun = ((s: String) => println("=============>>> " + s + " <<<=============")).asInstanceOf[Function1[Any, Unit]] + local send (fun, "AKKA ROCKS", 2) // send and invoke function on to two cluster nodes + + local.stop + remote1.stop + Cluster.shutdownLocalCluster() + } + + // Send Function1[Any, Any] + def fun4 = { + Cluster.startLocalCluster() + val local = Cluster newNode (NodeAddress("test", "local", port = 9991)) start + val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start + + val fun = ((i: Int) => i * i).asInstanceOf[Function1[Any, Any]] + + val future1 = local send (fun, 2, 1) head // send and invoke function on one cluster node and get result + val future2 = local send (fun, 2, 1) head // send and invoke function on one cluster node and get result + + // grab the result from the first one that returns + val result = Futures.firstCompletedOf(List(future1, future2)).await.resultOrException + println("===================>>> Cluster says [" + result.get + "]") + + local.stop + remote1.stop + Cluster.shutdownLocalCluster() + } +} diff --git a/akka-cluster/src/test/scala/akka/cloud/cluster/ClusteredPingPongSample.scala b/akka-cluster/src/test/scala/akka/cloud/cluster/ClusteredPingPongSample.scala new file mode 100644 index 0000000000..2e81cc0bcd --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cloud/cluster/ClusteredPingPongSample.scala @@ -0,0 +1,147 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package sample.cluster + +import akka.cloud.cluster._ + +import akka.actor._ +import akka.actor.Actor._ +import akka.serialization.{Serializer, SerializerBasedActorFormat} + +import java.util.concurrent.CountDownLatch + +object PingPong { + + val NrOfPings = 5 + + // ------------------------ + // Messages + // ------------------------ + + sealed trait PingPong extends Serializable + case object Ball extends PingPong + case object Stop extends PingPong + case class Latch(latch: CountDownLatch) extends PingPong + + // ------------------------ + // Actors + // ------------------------ + + class PingActor extends Actor with Serializable { + var count = 0 + var gameOverLatch: CountDownLatch = _ + + def receive = { + case Ball => + if (count < NrOfPings) { + println("---->> PING (%s)" format count) + count += 1 + self reply Ball + } else { + self.sender.foreach(_ !! Stop) + gameOverLatch.countDown + self.stop + } + case Latch(latch) => + gameOverLatch = latch + } + } + + class PongActor extends Actor with Serializable { + def receive = { + case Ball => + self reply Ball + case Stop => + self reply Stop + self.stop + } + } + + // ------------------------ + // Serialization + // ------------------------ + + object BinaryFormats { + implicit object PingActorFormat extends SerializerBasedActorFormat[PingActor] with Serializable { + val serializer = Serializer.Java + } + + implicit object PongActorFormat extends SerializerBasedActorFormat[PongActor] with Serializable { + val serializer = Serializer.Java + } + } +} + +object ClusteredPingPongSample { + import PingPong._ + import BinaryFormats._ + + val CLUSTER_NAME = "test-cluster" + val PING_SERVICE = classOf[PingActor].getName + val PONG_SERVICE = classOf[PongActor].getName + + def main(args: Array[String]) = run + + def run = { + + // ------------------------ + // Start cluster of 5 nodes + // ------------------------ + + Cluster.startLocalCluster() + val localNode = Cluster.newNode(NodeAddress(CLUSTER_NAME, "node0", port = 9991)).start + val remoteNodes = Cluster.newNode(NodeAddress(CLUSTER_NAME, "node1", port = 9992)).start :: + Cluster.newNode(NodeAddress(CLUSTER_NAME, "node2", port = 9993)).start :: + Cluster.newNode(NodeAddress(CLUSTER_NAME, "node3", port = 9994)).start :: + Cluster.newNode(NodeAddress(CLUSTER_NAME, "node4", port = 9995)).start :: Nil + + // ------------------------ + // Store the actors in the cluster + // ------------------------ + + // Store the PingActor in the cluster, but do not deploy it anywhere + localNode.store(classOf[PingActor]) + + // Store the PongActor in the cluster and deploy it + // to 5 (replication factor) nodes in the cluster + localNode.store(classOf[PongActor], 5) + + Thread.sleep(1000) // let the deployment finish + + // ------------------------ + // Get the actors from the cluster + // ------------------------ + + // Check out a local PingActor instance (not reference) + val ping = localNode.use[PingActor](ActorAddress(actorId = PING_SERVICE)).head + + // Get a reference to all the pong actors through a round-robin router ActorRef + val pong = localNode.ref(ActorAddress(actorId = PONG_SERVICE), router = Router.RoundRobin) + + // ------------------------ + // Play the game + // ------------------------ + + val latch = new CountDownLatch(1) + ping ! Latch(latch) // register latch for actor to know when to stop + + println("---->> SERVE") + + implicit val replyTo = Some(pong) // set the reply address to the PongActor + ping ! Ball // serve + + latch.await // wait for game to finish + + println("---->> GAME OVER") + + // ------------------------ + // Clean up + // ------------------------ + + localNode.stop + remoteNodes.foreach(_.stop) + Cluster.shutdownLocalCluster() + } +} diff --git a/akka-cluster/src/test/scala/akka/cloud/cluster/PingPongMultiJvmExample.scala b/akka-cluster/src/test/scala/akka/cloud/cluster/PingPongMultiJvmExample.scala new file mode 100644 index 0000000000..42bfc130f1 --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cloud/cluster/PingPongMultiJvmExample.scala @@ -0,0 +1,239 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package example.cluster + +import akka.cloud.cluster._ +import akka.cloud.monitoring._ + +import akka.actor._ +import akka.serialization.{Serializer, SerializerBasedActorFormat} +import akka.util.duration._ + +object PingPong { + val ClusterName = "ping-pong-cluster" + val NrOfNodes = 5 + val Pause = true + val PauseTimeout = 5 minutes + + // ----------------------------------------------- + // Messages + // ----------------------------------------------- + + sealed trait PingPong extends Serializable + case object Ping extends PingPong + case object Pong extends PingPong + case object Stop extends PingPong + + case class Serve(player: ActorRef) + + // ----------------------------------------------- + // Actors + // ----------------------------------------------- + + class PingActor extends Actor with Serializable { + var pong: ActorRef = _ + var play = true + + def receive = { + case Pong => + if (play) { + println("---->> PING") + pong ! Ping + } else { + println("---->> GAME OVER") + } + case Serve(player) => + pong = player + println("---->> SERVE") + pong ! Ping + case Stop => + play = false + } + } + + class PongActor extends Actor with Serializable { + def receive = { + case Ping => + println("---->> PONG") + self reply Pong + } + } + + // ----------------------------------------------- + // Serialization + // ----------------------------------------------- + + object BinaryFormats { + implicit object PingActorFormat extends SerializerBasedActorFormat[PingActor] with Serializable { + val serializer = Serializer.Java + } + + implicit object PongActorFormat extends SerializerBasedActorFormat[PongActor] with Serializable { + val serializer = Serializer.Java + } + } +} + +object PingPongMultiJvmNode1 { + import PingPong._ + import BinaryFormats._ + + val PingService = classOf[PingActor].getName + val PongService = classOf[PongActor].getName + + def main(args: Array[String]) { run } + + def run = { + // ----------------------------------------------- + // Start monitoring + // ----------------------------------------------- + + MonitoringServer.start + Monitoring.startLocalDaemons + + // ----------------------------------------------- + // Start cluster + // ----------------------------------------------- + + Cluster.startLocalCluster() + + // create node + val node = Cluster.newNode(NodeAddress(ClusterName, "node1", port = 9991)) + + def pause(name: String, message: String) = { + node.barrier("user-prompt-" + name, NrOfNodes, PauseTimeout) { + println(message) + if (Pause) { + println("Press enter to continue (timeout of %s) ..." format PauseTimeout) + System.in.read + } + } + } + + pause("start", "Ready to start all nodes") + println("Starting nodes ...") + + node.start + + node.barrier("start", NrOfNodes) { + // wait for others to start + } + + // ----------------------------------------------- + // Store pong actors in the cluster + // ----------------------------------------------- + + pause("create", "Ready to create all actors") + println("Creating actors ...") + + // store the ping actor in the cluster, but do not deploy it anywhere + node.store(classOf[PingActor]) + + // store the pong actor in the cluster and replicate it on all nodes + node.store(classOf[PongActor], NrOfNodes) + + // give some time for the deployment + Thread.sleep(3000) + + // ----------------------------------------------- + // Get actor references + // ----------------------------------------------- + + // check out a local ping actor + val ping = node.use[PingActor](ActorAddress(actorId = PingService)).head + + // get a reference to all the pong actors through a round-robin router actor ref + val pong = node.ref(ActorAddress(actorId = PongService), router = Router.RoundRobin) + + // ----------------------------------------------- + // Play the game + // ----------------------------------------------- + + pause("play", "Ready to play ping pong") + + ping ! Serve(pong) + + // let them play for 3 seconds + Thread.sleep(3000) + + ping ! Stop + + // give some time for the game to finish + Thread.sleep(3000) + + // ----------------------------------------------- + // Stop actors + // ----------------------------------------------- + + pause("stop", "Ready to stop actors") + println("Stopping actors ...") + + ping.stop + pong.stop + + // give remote actors time to stop + Thread.sleep(5000) + + // ----------------------------------------------- + // Stop everything + // ----------------------------------------------- + + pause("shutdown", "Ready to shutdown") + println("Stopping everything ...") + + Monitoring.stopLocalDaemons + MonitoringServer.stop + + Actor.remote.shutdown + Actor.registry.shutdownAll + + node.stop + + Cluster.shutdownLocalCluster + } +} + +object PingPongMultiJvmNode2 extends PongNode(2) +object PingPongMultiJvmNode3 extends PongNode(3) +object PingPongMultiJvmNode4 extends PongNode(4) +object PingPongMultiJvmNode5 extends PongNode(5) + +class PongNode(number: Int) { + import PingPong._ + + def main(args: Array[String]) { run } + + def run = { + val node = Cluster.newNode(NodeAddress(ClusterName, "node" + number, port = 9990 + number)) + + def pause(name: String) = { + node.barrier("user-prompt-" + name, NrOfNodes, PauseTimeout) { + // wait for user prompt + } + } + + pause("start") + + node.barrier("start", NrOfNodes) { + node.start + } + + pause("create") + + pause("play") + + pause("stop") + + pause("shutdown") + + // clean up and stop + + Actor.remote.shutdown + Actor.registry.shutdownAll + + node.stop + } +} + diff --git a/akka-cluster/src/test/scala/akka/cloud/cluster/ReplicationSpec.scala b/akka-cluster/src/test/scala/akka/cloud/cluster/ReplicationSpec.scala new file mode 100644 index 0000000000..d21875a72f --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cloud/cluster/ReplicationSpec.scala @@ -0,0 +1,281 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ +package akka.cloud.cluster + +import org.apache.bookkeeper.client.{BookKeeper, BKException} +import BKException._ +import org.apache.zookeeper.server.ZooKeeperServer + +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers +import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach, Spec } + +import akka.serialization._ +import akka.actor._ +import ActorSerialization._ +import Actor._ + +import java.util.concurrent.{ CyclicBarrier, TimeUnit } +import java.io.File +import java.nio.ByteBuffer + +import com.eaio.uuid.UUID + +import scala.collection.JavaConversions._ + +class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll { + private var bookKeeper: BookKeeper = _ + private var localBookKeeper: LocalBookKeeper = _ + + // synchronous API + "A Transaction Log" should { + "be able to record entries - synchronous" in { + val uuid = (new UUID).toString + val txlog = TransactionLog.newLogFor(uuid) + val entry = "hello".getBytes("UTF-8") + txlog.recordEntry(entry) + } + + "be able to record and delete entries - synchronous" in { + val uuid = (new UUID).toString + val txlog1 = TransactionLog.newLogFor(uuid) + val entry = "hello".getBytes("UTF-8") + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + txlog1.delete + txlog1.close + intercept[BKNoSuchLedgerExistsException](TransactionLog.logFor(uuid)) + } + + "be able to record entries and read entries with 'entriesInRange' - synchronous" in { + val uuid = (new UUID).toString + val txlog1 = TransactionLog.newLogFor(uuid) + val entry = "hello".getBytes("UTF-8") + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + txlog1.close + + val txlog2 = TransactionLog.logFor(uuid) + val entries = txlog2.entriesInRange(0, 1).map(bytes => new String(bytes, "UTF-8")) + entries.size must equal (2) + entries(0) must equal ("hello") + entries(1) must equal ("hello") + txlog2.close + } + + "be able to record entries and read entries with 'entries' - synchronous" in { + val uuid = (new UUID).toString + val txlog1 = TransactionLog.newLogFor(uuid) + val entry = "hello".getBytes("UTF-8") + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + txlog1.close + + val txlog2 = TransactionLog.logFor(uuid) + val entries = txlog2.entries.map(bytes => new String(bytes, "UTF-8")) + entries.size must equal (4) + entries(0) must equal ("hello") + entries(1) must equal ("hello") + entries(2) must equal ("hello") + entries(3) must equal ("hello") + txlog2.close + } + + "be able to record a snapshot - synchronous" in { + val uuid = (new UUID).toString + val txlog1 = TransactionLog.newLogFor(uuid) + val snapshot = "snapshot".getBytes("UTF-8") + txlog1.recordSnapshot(snapshot) + txlog1.close + } + + "be able to record and read a snapshot and following entries - synchronous" in { + val uuid = (new UUID).toString + val txlog1 = TransactionLog.newLogFor(uuid) + val snapshot = "snapshot".getBytes("UTF-8") + txlog1.recordSnapshot(snapshot) + + val entry = "hello".getBytes("UTF-8") + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + txlog1.close + + val txlog2 = TransactionLog.logFor(uuid) + val (snapshotAsBytes, entriesAsBytes) = txlog2.entriesFromLatestSnapshot + new String(snapshotAsBytes, "UTF-8") must equal ("snapshot") + + val entries = entriesAsBytes.map(bytes => new String(bytes, "UTF-8")) + entries.size must equal (4) + entries(0) must equal ("hello") + entries(1) must equal ("hello") + entries(2) must equal ("hello") + entries(3) must equal ("hello") + txlog2.close + } + + "be able to record entries then a snapshot then more entries - and then read from the snapshot and the following entries - synchronous" in { + val uuid = (new UUID).toString + val txlog1 = TransactionLog.newLogFor(uuid) + + val entry = "hello".getBytes("UTF-8") + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + + val snapshot = "snapshot".getBytes("UTF-8") + txlog1.recordSnapshot(snapshot) + + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + txlog1.close + + val txlog2 = TransactionLog.logFor(uuid) + val (snapshotAsBytes, entriesAsBytes) = txlog2.entriesFromLatestSnapshot + new String(snapshotAsBytes, "UTF-8") must equal ("snapshot") + + val entries = entriesAsBytes.map(bytes => new String(bytes, "UTF-8")) + entries.size must equal (2) + entries(0) must equal ("hello") + entries(1) must equal ("hello") + txlog2.close + } + } + + "A Transaction Log" should { + "be able to record entries - asynchronous" in { + val uuid = (new UUID).toString + val txlog = TransactionLog.newLogFor(uuid, true) + val entry = "hello".getBytes("UTF-8") + txlog.recordEntry(entry) + Thread.sleep(100) + txlog.close + } + + "be able to record and delete entries - asynchronous" in { + val uuid = (new UUID).toString + val txlog1 = TransactionLog.newLogFor(uuid, true) + val entry = "hello".getBytes("UTF-8") + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + txlog1.delete + Thread.sleep(100) + intercept[BKNoSuchLedgerExistsException](TransactionLog.logFor(uuid, true)) + } + "be able to record entries and read entries with 'entriesInRange' - asynchronous" in { + val uuid = (new UUID).toString + val txlog1 = TransactionLog.newLogFor(uuid, true) + val entry = "hello".getBytes("UTF-8") + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + Thread.sleep(100) + txlog1.close + + val txlog2 = TransactionLog.logFor(uuid, true) + val entries = txlog2.entriesInRange(0, 1).map(bytes => new String(bytes, "UTF-8")) + entries.size must equal (2) + entries(0) must equal ("hello") + entries(1) must equal ("hello") + Thread.sleep(100) + txlog2.close + } + + "be able to record entries and read entries with 'entries' - asynchronous" in { + val uuid = (new UUID).toString + val txlog1 = TransactionLog.newLogFor(uuid, true) + val entry = "hello".getBytes("UTF-8") + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + Thread.sleep(100) + txlog1.close + + val txlog2 = TransactionLog.logFor(uuid, true) + val entries = txlog2.entries.map(bytes => new String(bytes, "UTF-8")) + entries.size must equal (4) + entries(0) must equal ("hello") + entries(1) must equal ("hello") + entries(2) must equal ("hello") + entries(3) must equal ("hello") + Thread.sleep(100) + txlog2.close + } + + "be able to record a snapshot - asynchronous" in { + val uuid = (new UUID).toString + val txlog1 = TransactionLog.newLogFor(uuid, true) + val snapshot = "snapshot".getBytes("UTF-8") + txlog1.recordSnapshot(snapshot) + Thread.sleep(100) + txlog1.close + } + + "be able to record and read a snapshot and following entries - asynchronous" in { + val uuid = (new UUID).toString + val txlog1 = TransactionLog.newLogFor(uuid, true) + val snapshot = "snapshot".getBytes("UTF-8") + txlog1.recordSnapshot(snapshot) + + val entry = "hello".getBytes("UTF-8") + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + Thread.sleep(100) + txlog1.close + + val txlog2 = TransactionLog.logFor(uuid, true) + val (snapshotAsBytes, entriesAsBytes) = txlog2.entriesFromLatestSnapshot + new String(snapshotAsBytes, "UTF-8") must equal ("snapshot") + + val entries = entriesAsBytes.map(bytes => new String(bytes, "UTF-8")) + entries.size must equal (4) + entries(0) must equal ("hello") + entries(1) must equal ("hello") + entries(2) must equal ("hello") + entries(3) must equal ("hello") + Thread.sleep(100) + txlog2.close + } + + "be able to record entries then a snapshot then more entries - and then read from the snapshot and the following entries - asynchronous" in { + val uuid = (new UUID).toString + val txlog1 = TransactionLog.newLogFor(uuid, true) + + val entry = "hello".getBytes("UTF-8") + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + val snapshot = "snapshot".getBytes("UTF-8") + txlog1.recordSnapshot(snapshot) + txlog1.recordEntry(entry) + txlog1.recordEntry(entry) + Thread.sleep(100) + txlog1.close + + val txlog2 = TransactionLog.logFor(uuid, true) + val (snapshotAsBytes, entriesAsBytes) = txlog2.entriesFromLatestSnapshot + new String(snapshotAsBytes, "UTF-8") must equal ("snapshot") + val entries = entriesAsBytes.map(bytes => new String(bytes, "UTF-8")) + entries.size must equal (2) + entries(0) must equal ("hello") + entries(1) must equal ("hello") + Thread.sleep(100) + txlog2.close + } + } + + override def beforeAll() = { + LocalBookKeeperEnsemble.start + } + + override def afterAll() = { + TransactionLog.shutdown + LocalBookKeeperEnsemble.shutdown + } +} diff --git a/akka-zookeeper/src/main/java/akka/cloud/zookeeper/DistributedQueue.java b/akka-zookeeper/src/main/java/akka/cloud/zookeeper/DistributedQueue.java new file mode 100644 index 0000000000..47586436d2 --- /dev/null +++ b/akka-zookeeper/src/main/java/akka/cloud/zookeeper/DistributedQueue.java @@ -0,0 +1,312 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package akka.cloud.zookeeper; + +import java.util.List; +import java.util.NoSuchElementException; +import java.util.TreeMap; +import java.util.concurrent.CountDownLatch; + +import org.apache.log4j.Logger; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.data.ACL; +import org.apache.zookeeper.data.Stat; + +/** + * + * A protocol to implement a distributed queue. + * + */ + +public class DistributedQueue { + private static final Logger LOG = Logger.getLogger(DistributedQueue.class); + + private final String dir; + + private ZooKeeper zookeeper; + private List acl = ZooDefs.Ids.OPEN_ACL_UNSAFE; + + private final String prefix = "qn-"; + + + public DistributedQueue(ZooKeeper zookeeper, String dir, List acl){ + this.dir = dir; + + if(acl != null){ + this.acl = acl; + } + this.zookeeper = zookeeper; + + } + + + + /** + * Returns a Map of the children, ordered by id. + * @param watcher optional watcher on getChildren() operation. + * @return map from id to child name for all children + */ + private TreeMap orderedChildren(Watcher watcher) throws KeeperException, InterruptedException { + TreeMap orderedChildren = new TreeMap(); + + List childNames = null; + try{ + childNames = zookeeper.getChildren(dir, watcher); + }catch (KeeperException.NoNodeException e){ + throw e; + } + + for(String childName : childNames){ + try{ + //Check format + if(!childName.regionMatches(0, prefix, 0, prefix.length())){ + LOG.warn("Found child node with improper name: " + childName); + continue; + } + String suffix = childName.substring(prefix.length()); + Long childId = new Long(suffix); + orderedChildren.put(childId,childName); + }catch(NumberFormatException e){ + LOG.warn("Found child node with improper format : " + childName + " " + e,e); + } + } + + return orderedChildren; + } + + /** + * Find the smallest child node. + * @return The name of the smallest child node. + */ + private String smallestChildName() throws KeeperException, InterruptedException { + long minId = Long.MAX_VALUE; + String minName = ""; + + List childNames = null; + + try{ + childNames = zookeeper.getChildren(dir, false); + }catch(KeeperException.NoNodeException e){ + LOG.warn("Caught: " +e,e); + return null; + } + + for(String childName : childNames){ + try{ + //Check format + if(!childName.regionMatches(0, prefix, 0, prefix.length())){ + LOG.warn("Found child node with improper name: " + childName); + continue; + } + String suffix = childName.substring(prefix.length()); + long childId = Long.parseLong(suffix); + if(childId < minId){ + minId = childId; + minName = childName; + } + }catch(NumberFormatException e){ + LOG.warn("Found child node with improper format : " + childName + " " + e,e); + } + } + + + if(minId < Long.MAX_VALUE){ + return minName; + }else{ + return null; + } + } + + /** + * Return the head of the queue without modifying the queue. + * @return the data at the head of the queue. + * @throws NoSuchElementException + * @throws KeeperException + * @throws InterruptedException + */ + public byte[] element() throws NoSuchElementException, KeeperException, InterruptedException { + TreeMap orderedChildren; + + // element, take, and remove follow the same pattern. + // We want to return the child node with the smallest sequence number. + // Since other clients are remove()ing and take()ing nodes concurrently, + // the child with the smallest sequence number in orderedChildren might be gone by the time we check. + // We don't call getChildren again until we have tried the rest of the nodes in sequence order. + while(true){ + try{ + orderedChildren = orderedChildren(null); + }catch(KeeperException.NoNodeException e){ + throw new NoSuchElementException(); + } + if(orderedChildren.size() == 0 ) throw new NoSuchElementException(); + + for(String headNode : orderedChildren.values()){ + if(headNode != null){ + try{ + return zookeeper.getData(dir+"/"+headNode, false, null); + }catch(KeeperException.NoNodeException e){ + //Another client removed the node first, try next + } + } + } + + } + } + + + /** + * Attempts to remove the head of the queue and return it. + * @return The former head of the queue + * @throws NoSuchElementException + * @throws KeeperException + * @throws InterruptedException + */ + public byte[] remove() throws NoSuchElementException, KeeperException, InterruptedException { + TreeMap orderedChildren; + // Same as for element. Should refactor this. + while(true){ + try{ + orderedChildren = orderedChildren(null); + }catch(KeeperException.NoNodeException e){ + throw new NoSuchElementException(); + } + if(orderedChildren.size() == 0) throw new NoSuchElementException(); + + for(String headNode : orderedChildren.values()){ + String path = dir +"/"+headNode; + try{ + byte[] data = zookeeper.getData(path, false, null); + zookeeper.delete(path, -1); + return data; + }catch(KeeperException.NoNodeException e){ + // Another client deleted the node first. + } + } + + } + } + + private class LatchChildWatcher implements Watcher { + + CountDownLatch latch; + + public LatchChildWatcher(){ + latch = new CountDownLatch(1); + } + + public void process(WatchedEvent event){ + LOG.debug("Watcher fired on path: " + event.getPath() + " state: " + + event.getState() + " type " + event.getType()); + latch.countDown(); + } + public void await() throws InterruptedException { + latch.await(); + } + } + + /** + * Removes the head of the queue and returns it, blocks until it succeeds. + * @return The former head of the queue + * @throws NoSuchElementException + * @throws KeeperException + * @throws InterruptedException + */ + public byte[] take() throws KeeperException, InterruptedException { + TreeMap orderedChildren; + // Same as for element. Should refactor this. + while(true){ + LatchChildWatcher childWatcher = new LatchChildWatcher(); + try{ + orderedChildren = orderedChildren(childWatcher); + }catch(KeeperException.NoNodeException e){ + zookeeper.create(dir, new byte[0], acl, CreateMode.PERSISTENT); + continue; + } + if(orderedChildren.size() == 0){ + childWatcher.await(); + continue; + } + + for(String headNode : orderedChildren.values()){ + String path = dir +"/"+headNode; + try{ + byte[] data = zookeeper.getData(path, false, null); + zookeeper.delete(path, -1); + return data; + }catch(KeeperException.NoNodeException e){ + // Another client deleted the node first. + } + } + } + } + + /** + * Inserts data into queue. + * @param data + * @return true if data was successfully added + */ + public boolean offer(byte[] data) throws KeeperException, InterruptedException{ + for(;;){ + try{ + zookeeper.create(dir+"/"+prefix, data, acl, CreateMode.PERSISTENT_SEQUENTIAL); + return true; + }catch(KeeperException.NoNodeException e){ + zookeeper.create(dir, new byte[0], acl, CreateMode.PERSISTENT); + } + } + + } + + /** + * Returns the data at the first element of the queue, or null if the queue is empty. + * @return data at the first element of the queue, or null. + * @throws KeeperException + * @throws InterruptedException + */ + public byte[] peek() throws KeeperException, InterruptedException{ + try{ + return element(); + }catch(NoSuchElementException e){ + return null; + } + } + + + /** + * Attempts to remove the head of the queue and return it. Returns null if the queue is empty. + * @return Head of the queue or null. + * @throws KeeperException + * @throws InterruptedException + */ + public byte[] poll() throws KeeperException, InterruptedException { + try{ + return remove(); + }catch(NoSuchElementException e){ + return null; + } + } + + + +} diff --git a/akka-zookeeper/src/main/java/akka/cloud/zookeeper/ZooKeeperQueue.java b/akka-zookeeper/src/main/java/akka/cloud/zookeeper/ZooKeeperQueue.java new file mode 100644 index 0000000000..01a427180b --- /dev/null +++ b/akka-zookeeper/src/main/java/akka/cloud/zookeeper/ZooKeeperQueue.java @@ -0,0 +1,173 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package akka.cloud.zookeeper; + +import java.io.Serializable; +import java.util.List; +import java.util.ArrayList; + +import org.I0Itec.zkclient.ExceptionUtil; +import org.I0Itec.zkclient.IZkChildListener; +import org.I0Itec.zkclient.ZkClient; +import org.I0Itec.zkclient.exception.ZkNoNodeException; + +public class ZooKeeperQueue { + + protected static class Element { + private String _name; + private T _data; + + public Element(String name, T data) { + _name = name; + _data = data; + } + + public String getName() { + return _name; + } + + public T getData() { + return _data; + } + } + + protected final ZkClient _zkClient; + private final String _elementsPath; + private final String _rootPath; + private final boolean _isBlocking; + + public ZooKeeperQueue(ZkClient zkClient, String rootPath, boolean isBlocking) { + _zkClient = zkClient; + _rootPath = rootPath; + _isBlocking = isBlocking; + _elementsPath = rootPath + "/queue"; + if (!_zkClient.exists(rootPath)) { + _zkClient.createPersistent(rootPath, true); + _zkClient.createPersistent(_elementsPath, true); + } + } + + public String enqueue(T element) { + try { + String sequential = _zkClient.createPersistentSequential(getElementRoughPath(), element); + String elementId = sequential.substring(sequential.lastIndexOf('/') + 1); + return elementId; + } catch (Exception e) { + throw ExceptionUtil.convertToRuntimeException(e); + } + } + + public T dequeue() throws InterruptedException { + if (_isBlocking) { + Element element = getFirstElement(); + _zkClient.delete(getElementPath(element.getName())); + return element.getData(); + } else { + throw new UnsupportedOperationException("Non-blocking ZooKeeperQueue is not yet supported"); + /* FIXME DOES NOT WORK + try { + String headName = getSmallestElement(_zkClient.getChildren(_elementsPath)); + String headPath = getElementPath(headName); + return (T) _zkClient.readData(headPath); + } catch (ZkNoNodeException e) { + return null; + } + */ + } + } + + public boolean containsElement(String elementId) { + String zkPath = getElementPath(elementId); + return _zkClient.exists(zkPath); + } + + public T peek() throws InterruptedException { + Element element = getFirstElement(); + if (element == null) { + return null; + } + return element.getData(); + } + + @SuppressWarnings("unchecked") + public List getElements() { + List paths =_zkClient.getChildren(_elementsPath); + List elements = new ArrayList(); + for (String path: paths) { + elements.add((T)_zkClient.readData(path)); + } + return elements; + } + + public int size() { + return _zkClient.getChildren(_elementsPath).size(); + } + + public void clear() { + _zkClient.deleteRecursive(_rootPath); + } + + public boolean isEmpty() { + return size() == 0; + } + + private String getElementRoughPath() { + return getElementPath("item" + "-"); + } + + private String getElementPath(String elementId) { + return _elementsPath + "/" + elementId; + } + + private String getSmallestElement(List list) { + String smallestElement = list.get(0); + for (String element : list) { + if (element.compareTo(smallestElement) < 0) { + smallestElement = element; + } + } + return smallestElement; + } + + @SuppressWarnings("unchecked") + protected Element getFirstElement() throws InterruptedException { + final Object mutex = new Object(); + IZkChildListener notifyListener = new IZkChildListener() { + @Override + public void handleChildChange(String parentPath, List currentChilds) throws Exception { + synchronized (mutex) { + mutex.notify(); + } + } + }; + try { + while (true) { + List elementNames; + synchronized (mutex) { + elementNames = _zkClient.subscribeChildChanges(_elementsPath, notifyListener); + while (elementNames == null || elementNames.isEmpty()) { + mutex.wait(); + elementNames = _zkClient.getChildren(_elementsPath); + } + } + String elementName = getSmallestElement(elementNames); + try { + String elementPath = getElementPath(elementName); + return new Element(elementName, (T) _zkClient.readData(elementPath)); + } catch (ZkNoNodeException e) { + // somebody else picked up the element first, so we have to + // retry with the new first element + } + } + } catch (InterruptedException e) { + throw e; + } catch (Exception e) { + throw ExceptionUtil.convertToRuntimeException(e); + } finally { + _zkClient.unsubscribeChildChanges(_elementsPath, notifyListener); + } + } + +} diff --git a/akka-zookeeper/src/main/scala/akka/cloud/zookeeper/AkkaZkClient.scala b/akka-zookeeper/src/main/scala/akka/cloud/zookeeper/AkkaZkClient.scala new file mode 100644 index 0000000000..ea3e1e4676 --- /dev/null +++ b/akka-zookeeper/src/main/scala/akka/cloud/zookeeper/AkkaZkClient.scala @@ -0,0 +1,29 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ +package akka.cloud.zookeeper + +import org.I0Itec.zkclient._ +import org.I0Itec.zkclient.serialize._ +import org.I0Itec.zkclient.exception._ + +class AkkaZkClient(zkServers: String, + sessionTimeout: Int, + connectionTimeout: Int, + zkSerializer: ZkSerializer = new SerializableSerializer) + extends ZkClient(zkServers, sessionTimeout, connectionTimeout, zkSerializer) { + + def connection: ZkConnection = _connection.asInstanceOf[ZkConnection] + + def reconnect() { + getEventLock.lock + try { + _connection.close + _connection.connect(this) + } catch { + case e: InterruptedException => throw new ZkInterruptedException(e) + } finally { + getEventLock.unlock + } + } +} diff --git a/akka-zookeeper/src/main/scala/akka/cloud/zookeeper/AkkaZooKeeper.scala b/akka-zookeeper/src/main/scala/akka/cloud/zookeeper/AkkaZooKeeper.scala new file mode 100644 index 0000000000..ba35be78cc --- /dev/null +++ b/akka-zookeeper/src/main/scala/akka/cloud/zookeeper/AkkaZooKeeper.scala @@ -0,0 +1,32 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ +package akka.cloud.zookeeper + +import org.I0Itec.zkclient._ +import org.apache.commons.io.FileUtils +import java.io.File + +object AkkaZooKeeper { + /** + * Starts up a local ZooKeeper server. Should only be used for testing purposes. + */ + def startLocalServer(dataPath: String, logPath: String): ZkServer = + startLocalServer(dataPath, logPath, 2181, 500) + + /** + * Starts up a local ZooKeeper server. Should only be used for testing purposes. + */ + def startLocalServer(dataPath: String, logPath: String, port: Int, tickTime: Int): ZkServer = { + FileUtils.deleteDirectory(new File(dataPath)) + FileUtils.deleteDirectory(new File(logPath)) + val zkServer = new ZkServer( + dataPath, logPath, + new IDefaultNameSpace() { + def createDefaultNameSpace(zkClient: ZkClient) = {} + }, + port, tickTime) + zkServer.start + zkServer + } +} diff --git a/akka-zookeeper/src/main/scala/akka/cloud/zookeeper/ZooKeeperBarrier.scala b/akka-zookeeper/src/main/scala/akka/cloud/zookeeper/ZooKeeperBarrier.scala new file mode 100644 index 0000000000..134a7a1be4 --- /dev/null +++ b/akka-zookeeper/src/main/scala/akka/cloud/zookeeper/ZooKeeperBarrier.scala @@ -0,0 +1,94 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ +package akka.cloud.zookeeper + +import akka.util.Duration +import akka.util.duration._ + +import org.I0Itec.zkclient._ +import org.I0Itec.zkclient.exception._ + +import java.util.{List => JList} +import java.util.concurrent.CountDownLatch + + +class BarrierTimeoutException(message: String) extends RuntimeException(message) + +/** + * Barrier based on Zookeeper barrier tutorial. + */ +object ZooKeeperBarrier { + val BarriersNode = "/barriers" + val DefaultTimeout = 60 seconds + + def apply(zkClient: ZkClient, name: String, node: String, count: Int) = + new ZooKeeperBarrier(zkClient, name, node, count, DefaultTimeout) + + def apply(zkClient: ZkClient, name: String, node: String, count: Int, timeout: Duration) = + new ZooKeeperBarrier(zkClient, name, node, count, timeout) + + def apply(zkClient: ZkClient, cluster: String, name: String, node: String, count: Int) = + new ZooKeeperBarrier(zkClient, cluster + "-" + name, node, count, DefaultTimeout) + + def apply(zkClient: ZkClient, cluster: String, name: String, node: String, count: Int, timeout: Duration) = + new ZooKeeperBarrier(zkClient, cluster + "-" + name, node, count, timeout) + + def ignore[E : Manifest](body: => Unit): Unit = + try { + body + } catch { + case e if manifest[E].erasure.isAssignableFrom(e.getClass) => () + } +} + +/** + * Barrier based on Zookeeper barrier tutorial. + */ +class ZooKeeperBarrier(zkClient: ZkClient, name: String, node: String, count: Int, timeout: Duration) extends IZkChildListener { + import ZooKeeperBarrier.{BarriersNode, ignore} + + val barrier = BarriersNode + "/" + name + val entry = barrier + "/" + node + val ready = barrier + "/ready" + + val exitBarrier = new CountDownLatch(1) + + ignore[ZkNodeExistsException](zkClient.createPersistent(BarriersNode)) + ignore[ZkNodeExistsException](zkClient.createPersistent(barrier)) + + def apply(body: => Unit) = { + enter + body + leave + } + + def enter = { + zkClient.createEphemeral(entry) + if (zkClient.countChildren(barrier) >= count) + ignore[ZkNodeExistsException](zkClient.createPersistent(ready)) + else + zkClient.waitUntilExists(ready, timeout.unit, timeout.length) + if (!zkClient.exists(ready)) { + throw new BarrierTimeoutException("Timeout (%s) while waiting for entry barrier" format timeout) + } + zkClient.subscribeChildChanges(barrier, this) + } + + def leave = { + zkClient.delete(entry) + exitBarrier.await(timeout.length, timeout.unit) + if (zkClient.countChildren(barrier) > 0) { + zkClient.unsubscribeChildChanges(barrier, this) + throw new BarrierTimeoutException("Timeout (%s) while waiting for exit barrier" format timeout) + } + zkClient.unsubscribeChildChanges(barrier, this) + } + + def handleChildChange(path: String, children: JList[String]) = { + if (children.size <= 1) { + ignore[ZkNoNodeException](zkClient.delete(ready)) + exitBarrier.countDown + } + } +} diff --git a/embedded-repo/org/apache/hadoop/zookeeper/bookkeeper/3.4.0/bookkeeper-3.4.0.jar b/embedded-repo/org/apache/hadoop/zookeeper/bookkeeper/3.4.0/bookkeeper-3.4.0.jar new file mode 100644 index 0000000000..a19462f609 Binary files /dev/null and b/embedded-repo/org/apache/hadoop/zookeeper/bookkeeper/3.4.0/bookkeeper-3.4.0.jar differ diff --git a/embedded-repo/org/apache/hadoop/zookeeper/bookkeeper/3.4.0/bookkeeper-3.4.0.pom b/embedded-repo/org/apache/hadoop/zookeeper/bookkeeper/3.4.0/bookkeeper-3.4.0.pom new file mode 100644 index 0000000000..7ff2d24748 --- /dev/null +++ b/embedded-repo/org/apache/hadoop/zookeeper/bookkeeper/3.4.0/bookkeeper-3.4.0.pom @@ -0,0 +1,8 @@ + + + 4.0.0 + org.apache.hadoop.zookeeper + bookkeeper + jar + 3.4.0 + \ No newline at end of file diff --git a/embedded-repo/org/apache/hadoop/zookeeper/zookeeper-recipes-lock/3.4.0/zookeeper-recipes-lock-3.4.0.jar b/embedded-repo/org/apache/hadoop/zookeeper/zookeeper-recipes-lock/3.4.0/zookeeper-recipes-lock-3.4.0.jar new file mode 100644 index 0000000000..a142d36d76 Binary files /dev/null and b/embedded-repo/org/apache/hadoop/zookeeper/zookeeper-recipes-lock/3.4.0/zookeeper-recipes-lock-3.4.0.jar differ diff --git a/embedded-repo/org/apache/hadoop/zookeeper/zookeeper-recipes-lock/3.4.0/zookeeper-recipes-lock-3.4.0.pom b/embedded-repo/org/apache/hadoop/zookeeper/zookeeper-recipes-lock/3.4.0/zookeeper-recipes-lock-3.4.0.pom new file mode 100644 index 0000000000..5352103353 --- /dev/null +++ b/embedded-repo/org/apache/hadoop/zookeeper/zookeeper-recipes-lock/3.4.0/zookeeper-recipes-lock-3.4.0.pom @@ -0,0 +1,8 @@ + + + 4.0.0 + org.apache.hadoop.zookeeper + zookeeper-recipes-lock + jar + 3.4.0 + \ No newline at end of file diff --git a/embedded-repo/org/apache/hadoop/zookeeper/zookeeper-recipes-queue/3.4.0/zookeeper-recipes-queue-3.4.0.jar b/embedded-repo/org/apache/hadoop/zookeeper/zookeeper-recipes-queue/3.4.0/zookeeper-recipes-queue-3.4.0.jar new file mode 100644 index 0000000000..f65ee4ea1d Binary files /dev/null and b/embedded-repo/org/apache/hadoop/zookeeper/zookeeper-recipes-queue/3.4.0/zookeeper-recipes-queue-3.4.0.jar differ diff --git a/embedded-repo/org/apache/hadoop/zookeeper/zookeeper-recipes-queue/3.4.0/zookeeper-recipes-queue-3.4.0.pom b/embedded-repo/org/apache/hadoop/zookeeper/zookeeper-recipes-queue/3.4.0/zookeeper-recipes-queue-3.4.0.pom new file mode 100644 index 0000000000..757c5b87b7 --- /dev/null +++ b/embedded-repo/org/apache/hadoop/zookeeper/zookeeper-recipes-queue/3.4.0/zookeeper-recipes-queue-3.4.0.pom @@ -0,0 +1,8 @@ + + + 4.0.0 + org.apache.hadoop.zookeeper + zookeeper-recipes-queue + jar + 3.4.0 + \ No newline at end of file diff --git a/embedded-repo/org/apache/hadoop/zookeeper/zookeeper/3.4.0/zookeeper-3.4.0.jar b/embedded-repo/org/apache/hadoop/zookeeper/zookeeper/3.4.0/zookeeper-3.4.0.jar new file mode 100644 index 0000000000..757fe76c8b Binary files /dev/null and b/embedded-repo/org/apache/hadoop/zookeeper/zookeeper/3.4.0/zookeeper-3.4.0.jar differ diff --git a/embedded-repo/org/apache/hadoop/zookeeper/zookeeper/3.4.0/zookeeper-3.4.0.pom b/embedded-repo/org/apache/hadoop/zookeeper/zookeeper/3.4.0/zookeeper-3.4.0.pom new file mode 100644 index 0000000000..c7c4fed746 --- /dev/null +++ b/embedded-repo/org/apache/hadoop/zookeeper/zookeeper/3.4.0/zookeeper-3.4.0.pom @@ -0,0 +1,8 @@ + + + 4.0.0 + org.apache.hadoop.zookeeper + zookeeper + jar + 3.4.0 + \ No newline at end of file diff --git a/embedded-repo/zkclient/zkclient/0.2/zkclient-0.2.jar b/embedded-repo/zkclient/zkclient/0.2/zkclient-0.2.jar new file mode 100644 index 0000000000..c4c94b6de3 Binary files /dev/null and b/embedded-repo/zkclient/zkclient/0.2/zkclient-0.2.jar differ diff --git a/embedded-repo/zkclient/zkclient/0.2/zkclient-0.2.pom b/embedded-repo/zkclient/zkclient/0.2/zkclient-0.2.pom new file mode 100644 index 0000000000..b5a4e87449 --- /dev/null +++ b/embedded-repo/zkclient/zkclient/0.2/zkclient-0.2.pom @@ -0,0 +1,107 @@ + + + + 4.0.0 + + zkclient + zkclient + zkclient + 0.2 + jar + + + + 2.0.9 + + + + + org.apache.hadoop.zookeeper + zookeeper + 3.4.0 + + + log4j + log4j + 1.2.14 + + + junit + junit + 4.7 + test + + + commons-io + commons-io + 1.4 + test + + + org.mockito + mockito-core + 1.7 + test + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + 1.6 + 1.6 + UTF-8 + 1024m + + + + org.apache.maven.plugins + maven-resources-plugin + + UTF-8 + + + + org.apache.maven.plugins + maven-source-plugin + + + attach-sources + verify + + jar + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 2.5 + + 1.6 + 1.6 + UTF-8 + 1g + + http://java.sun.com/javase/6/docs/api/ + + + + + attach-javadocs + verify + + jar + + + + + + + +