Merging with master

This commit is contained in:
Viktor Klang 2012-01-31 17:56:49 +01:00
commit 815245a133
211 changed files with 1462 additions and 9696 deletions

View file

@ -14,6 +14,7 @@ import java.util.LinkedList;
import java.lang.Iterable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static akka.japi.Util.manifest;
import akka.testkit.AkkaSpec;
@ -45,7 +46,7 @@ public class JavaFutureTests {
}
}, system.dispatcher());
Future<String> f2 = f1.map(new Function<String, String>() {
Future<String> f2 = f1.map(new Mapper<String, String>() {
public String apply(String s) {
return s + " World";
}
@ -59,8 +60,8 @@ public class JavaFutureTests {
final CountDownLatch latch = new CountDownLatch(1);
Promise<String> cf = Futures.promise(system.dispatcher());
Future<String> f = cf;
f.onSuccess(new Procedure<String>() {
public void apply(String result) {
f.onSuccess(new OnSuccess<String>() {
public void onSuccess(String result) {
if (result.equals("foo"))
latch.countDown();
}
@ -76,8 +77,8 @@ public class JavaFutureTests {
final CountDownLatch latch = new CountDownLatch(1);
Promise<String> cf = Futures.promise(system.dispatcher());
Future<String> f = cf;
f.onFailure(new Procedure<Throwable>() {
public void apply(Throwable t) {
f.onFailure(new OnFailure() {
public void onFailure(Throwable t) {
if (t instanceof NullPointerException)
latch.countDown();
}
@ -94,8 +95,8 @@ public class JavaFutureTests {
final CountDownLatch latch = new CountDownLatch(1);
Promise<String> cf = Futures.promise(system.dispatcher());
Future<String> f = cf;
f.onComplete(new Procedure2<Throwable,String>() {
public void apply(Throwable t, String r) {
f.onComplete(new OnComplete<String>() {
public void onComplete(Throwable t, String r) {
latch.countDown();
}
});
@ -110,8 +111,8 @@ public class JavaFutureTests {
final CountDownLatch latch = new CountDownLatch(1);
Promise<String> cf = Futures.promise(system.dispatcher());
Future<String> f = cf;
f.foreach(new Procedure<String>() {
public void apply(String future) {
f.foreach(new Foreach<String>() {
public void each(String future) {
latch.countDown();
}
});
@ -127,7 +128,7 @@ public class JavaFutureTests {
Promise<String> cf = Futures.promise(system.dispatcher());
cf.success("1000");
Future<String> f = cf;
Future<Integer> r = f.flatMap(new Function<String, Future<Integer>>() {
Future<Integer> r = f.flatMap(new Mapper<String, Future<Integer>>() {
public Future<Integer> apply(String r) {
latch.countDown();
Promise<Integer> cf = Futures.promise(system.dispatcher());
@ -146,8 +147,8 @@ public class JavaFutureTests {
final CountDownLatch latch = new CountDownLatch(1);
Promise<String> cf = Futures.promise(system.dispatcher());
Future<String> f = cf;
Future<String> r = f.filter(new Function<String, Boolean>() {
public Boolean apply(String r) {
Future<String> r = f.filter(new Filter<String>() {
public boolean filter(String r) {
latch.countDown();
return r.equals("foo");
}
@ -267,15 +268,55 @@ public class JavaFutureTests {
}
}, system.dispatcher());
assertEquals(expect, Await.result(f, timeout));
assertEquals(expect, Await.result(f, timeout).get());
}
@Test
public void BlockMustBeCallable() {
public void blockMustBeCallable() {
Promise<String> p = Futures.promise(system.dispatcher());
Duration d = Duration.create(1, TimeUnit.SECONDS);
p.success("foo");
Await.ready(p, d);
assertEquals(Await.result(p, d), "foo");
}
@Test
public void mapToMustBeCallable() {
Promise<Object> p = Futures.promise(system.dispatcher());
Future<String> f = p.future().mapTo(manifest(String.class));
Duration d = Duration.create(1, TimeUnit.SECONDS);
p.success("foo");
Await.ready(p, d);
assertEquals(Await.result(p, d), "foo");
}
@Test
public void recoverToMustBeCallable() {
final IllegalStateException fail = new IllegalStateException("OHNOES");
Promise<Object> p = Futures.promise(system.dispatcher());
Future<Object> f = p.future().recover(new Recover<Object>() {
public Object recover(Throwable t) throws Throwable {
if (t == fail) return "foo";
else throw t;
}
});
Duration d = Duration.create(1, TimeUnit.SECONDS);
p.failure(fail);
assertEquals(Await.result(f, d), "foo");
}
@Test
public void tryRecoverToMustBeCallable() {
final IllegalStateException fail = new IllegalStateException("OHNOES");
Promise<Object> p = Futures.promise(system.dispatcher());
Future<Object> f = p.future().tryRecover(new Recover<Future<Object>>() {
public Future<Object> recover(Throwable t) throws Throwable {
if (t == fail) return Futures.<Object>successful("foo", system.dispatcher()).future();
else throw t;
}
});
Duration d = Duration.create(1, TimeUnit.SECONDS);
p.failure(fail);
assertEquals(Await.result(f, d), "foo");
}
}

View file

@ -13,10 +13,10 @@ import akka.testkit.AkkaSpec
import org.scalatest.junit.JUnitSuite
import akka.testkit.DefaultTimeout
import akka.testkit.TestLatch
import java.util.concurrent.{ TimeoutException, TimeUnit, CountDownLatch }
import scala.runtime.NonLocalReturnControl
import akka.pattern.ask
import java.lang.{ IllegalStateException, ArithmeticException }
import java.util.concurrent._
object FutureSpec {
class TestActor extends Actor {
@ -39,7 +39,6 @@ object FutureSpec {
}
}
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class JavaFutureSpec extends JavaFutureTests with JUnitSuite
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
@ -303,6 +302,32 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa
}
}
"tryRecover from exceptions" in {
val o = new IllegalStateException("original")
val r = new IllegalStateException("recovered")
intercept[IllegalStateException] {
Await.result(Promise.failed[String](o) tryRecover { case _ if false == true Promise.successful("yay!") }, timeout.duration)
} must be(o)
Await.result(Promise.failed[String](o) tryRecover { case _ Promise.successful("yay!") }, timeout.duration) must equal("yay!")
intercept[IllegalStateException] {
Await.result(Promise.failed[String](o) tryRecover { case _ Promise.failed[String](r) }, timeout.duration)
} must be(r)
}
"andThen like a boss" in {
val q = new LinkedBlockingQueue[Int]
for (i 1 to 1000) {
Await.result(Future { q.add(1); 3 } andThen { case _ q.add(2) } andThen { case Right(0) q.add(Int.MaxValue) } andThen { case _ q.add(3); }, timeout.duration) must be(3)
q.poll() must be(1)
q.poll() must be(2)
q.poll() must be(3)
q.clear()
}
}
"firstCompletedOf" in {
val futures = Vector.fill[Future[Int]](10)(Promise[Int]()) :+ Promise.successful[Int](5)
Await.result(Future.firstCompletedOf(futures), timeout.duration) must be(5)
@ -856,7 +881,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa
"be completed" in { f((future, _) future must be('completed)) }
"contain a value" in { f((future, result) future.value must be(Some(Right(result)))) }
"return result with 'get'" in { f((future, result) Await.result(future, timeout.duration) must be(result)) }
"return result with 'Await.sync'" in { f((future, result) Await.result(future, timeout.duration) must be(result)) }
"return result with 'Await.result'" in { f((future, result) Await.result(future, timeout.duration) must be(result)) }
"not timeout" in { f((future, _) Await.ready(future, 0 millis)) }
"filter result" in {
f { (future, result)
@ -907,7 +932,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa
})
}
"throw exception with 'get'" in { f((future, message) (evaluating { Await.result(future, timeout.duration) } must produce[E]).getMessage must be(message)) }
"throw exception with 'Await.sync'" in { f((future, message) (evaluating { Await.result(future, timeout.duration) } must produce[E]).getMessage must be(message)) }
"throw exception with 'Await.result'" in { f((future, message) (evaluating { Await.result(future, timeout.duration) } must produce[E]).getMessage must be(message)) }
"retain exception with filter" in {
f { (future, message)
(evaluating { Await.result(future filter (_ true), timeout.duration) } must produce[E]).getMessage must be(message)

View file

View file

@ -1,6 +1,7 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.actor
import akka.config.ConfigurationException

View file

@ -1,15 +1,7 @@
/*
* Copyright 2007 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
/**
* Copyright (C) 2009-2011 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.actor
import akka.util.Duration

View file

@ -340,9 +340,9 @@ object Future {
}
}
sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] {
sealed trait Future[+T] extends Await.Awaitable[T] {
implicit def executor: ExecutionContext
protected implicit def executor: ExecutionContext
protected final def resolve[X](source: Either[Throwable, X]): Either[Throwable, X] = source match {
case Left(t: scala.runtime.NonLocalReturnControl[_]) Right(t.value.asInstanceOf[X])
@ -362,7 +362,7 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] {
case Right(r) that onSuccess { case r2 p success ((r, r2)) }
}
that onFailure { case f p failure f }
p
p.future
}
/**
@ -435,7 +435,7 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] {
case Left(t) p success t
case Right(r) p failure new NoSuchElementException("Future.failed not completed with a throwable. Instead completed with: " + r)
}
p
p.future
}
/**
@ -448,7 +448,7 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] {
case r @ Right(_) p complete r
case _ p completeWith that
}
p
p.future
}
/**
@ -463,12 +463,59 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] {
* </pre>
*/
final def recover[A >: T](pf: PartialFunction[Throwable, A]): Future[A] = {
val future = Promise[A]()
val p = Promise[A]()
onComplete {
case Left(e) if pf isDefinedAt e future.complete(try { Right(pf(e)) } catch { case x: Exception Left(x) })
case otherwise future complete otherwise
case Left(e) if pf isDefinedAt e p.complete(try { Right(pf(e)) } catch { case x: Exception Left(x) })
case otherwise p complete otherwise
}
future
p.future
}
/**
* Returns a new Future that will, in case this future fails,
* be completed with the resulting Future of the given PartialFunction,
* if the given PartialFunction matches the failure of the original Future.
*
* If the PartialFunction throws, that Throwable will be propagated to the returned Future.
*
* Example:
*
* {{{
* val f = Future { Int.MaxValue }
* Future (6 / 0) tryRecover { case e: ArithmeticException => f } // result: Int.MaxValue
* }}}
*/
def tryRecover[U >: T](pf: PartialFunction[Throwable, Future[U]]): Future[U] = {
val p = Promise[U]()
onComplete {
case Left(t) if pf isDefinedAt t
try { p completeWith pf(t) } catch { case t: Throwable p complete resolve(Left(t)) }
case otherwise p complete otherwise
}
p.future
}
/**
* Returns a new Future that will contain the completed result of this Future,
* and which will invoke the supplied PartialFunction when completed.
*
* This allows for establishing order of side-effects.
*
* {{{
* Future { 5 } andThen {
* case something => assert(something is awesome)
* } andThen {
* case Left(t) => handleProblem(t)
* case Right(v) => dealWithSuccess(v)
* }
* }}}
*/
def andThen[U](pf: PartialFunction[Either[Throwable, T], U]): Future[T] = {
val p = Promise[T]()
onComplete { case r try if (pf isDefinedAt r) pf(r) finally p complete r }
p.future
}
/**
@ -503,6 +550,10 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] {
/**
* Creates a new Future[A] which is completed with this Future's result if
* that conforms to A's erased type or a ClassCastException otherwise.
*
* When used from Java, to create the Manifest, use:
* import static akka.japi.Util.manifest;
* future.mapTo(manifest(MyClass.class));
*/
final def mapTo[A](implicit m: Manifest[A]): Future[A] = {
val fa = Promise[A]()
@ -515,7 +566,7 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] {
case e: ClassCastException Left(e)
})
}
fa
fa.future
}
/**
@ -546,7 +597,7 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] {
logError("Future.flatMap", e)
}
}
p
p.future
}
/**
@ -586,7 +637,7 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] {
Left(e)
})
}
p
p.future
}
protected def logError(msg: String, problem: Throwable): Unit = {
@ -818,3 +869,158 @@ final class KeptPromise[T](suppliedValue: Either[Throwable, T])(implicit val exe
case Right(r) r
}
}
/**
* This class contains bridge classes between Scala and Java.
* Internal use only.
*/
object japi {
@deprecated("Do not use this directly, use subclasses of this", "2.0")
class CallbackBridge[-T] extends PartialFunction[T, Unit] {
override final def isDefinedAt(t: T): Boolean = true
override final def apply(t: T): Unit = internal(t)
protected def internal(result: T): Unit = ()
}
@deprecated("Do not use this directly, use 'Recover'", "2.0")
class RecoverBridge[+T] extends PartialFunction[Throwable, T] {
override final def isDefinedAt(t: Throwable): Boolean = true
override final def apply(t: Throwable): T = internal(t)
protected def internal(result: Throwable): T = null.asInstanceOf[T]
}
@deprecated("Do not use this directly, use subclasses of this", "2.0")
class BooleanFunctionBridge[-T] extends scala.Function1[T, Boolean] {
override final def apply(t: T): Boolean = internal(t)
protected def internal(result: T): Boolean = false
}
@deprecated("Do not use this directly, use subclasses of this", "2.0")
class UnitFunctionBridge[-T] extends (T Unit) {
override final def apply(t: T): Unit = internal(t)
protected def internal(result: T): Unit = ()
}
}
/**
* Callback for when a Future is completed successfully
* SAM (Single Abstract Method) class
*
* Java API
*/
abstract class OnSuccess[-T] extends japi.CallbackBridge[T] {
protected final override def internal(result: T) = onSuccess(result)
/**
* This method will be invoked once when/if a Future that this callback is registered on
* becomes successfully completed
*/
def onSuccess(result: T): Unit
}
/**
* Callback for when a Future is completed with a failure
* SAM (Single Abstract Method) class
*
* Java API
*/
abstract class OnFailure extends japi.CallbackBridge[Throwable] {
protected final override def internal(failure: Throwable) = onFailure(failure)
/**
* This method will be invoked once when/if a Future that this callback is registered on
* becomes completed with a failure
*/
def onFailure(failure: Throwable): Unit
}
/**
* Callback for when a Future is completed with either failure or a success
* SAM (Single Abstract Method) class
*
* Java API
*/
abstract class OnComplete[-T] extends japi.CallbackBridge[Either[Throwable, T]] {
protected final override def internal(value: Either[Throwable, T]): Unit = value match {
case Left(t) onComplete(t, null.asInstanceOf[T])
case Right(r) onComplete(null, r)
}
/**
* This method will be invoked once when/if a Future that this callback is registered on
* becomes completed with a failure or a success.
* In the case of success then "failure" will be null, and in the case of failure the "success" will be null.
*/
def onComplete(failure: Throwable, success: T): Unit
}
/**
* Callback for the Future.recover operation that conditionally turns failures into successes.
*
* SAM (Single Abstract Method) class
*
* Java API
*/
abstract class Recover[+T] extends japi.RecoverBridge[T] {
protected final override def internal(result: Throwable): T = recover(result)
/**
* This method will be invoked once when/if the Future this recover callback is registered on
* becomes completed with a failure.
*
* @returns a successful value for the passed in failure
* @throws the passed in failure to propagate it.
*
* Java API
*/
@throws(classOf[Throwable])
def recover(failure: Throwable): T
}
/**
* Callback for the Future.filter operation that creates a new Future which will
* conditionally contain the success of another Future.
*
* SAM (Single Abstract Method) class
* Java API
*/
abstract class Filter[-T] extends japi.BooleanFunctionBridge[T] {
override final def internal(t: T): Boolean = filter(t)
/**
* This method will be invoked once when/if a Future that this callback is registered on
* becomes completed with a success.
*
* @returns true if the successful value should be propagated to the new Future or not
*/
def filter(result: T): Boolean
}
/**
* Callback for the Future.foreach operation that will be invoked if the Future that this callback
* is registered on becomes completed with a success. This method is essentially the same operation
* as onSuccess.
*
* SAM (Single Abstract Method) class
* Java API
*/
abstract class Foreach[-T] extends japi.UnitFunctionBridge[T] {
override final def internal(t: T): Unit = each(t)
/**
* This method will be invoked once when/if a Future that this callback is registered on
* becomes successfully completed
*/
def each(result: T): Unit
}
/**
* Callback for the Future.map and Future.flatMap operations that will be invoked
* if the Future that this callback is registered on becomes completed with a success.
* This callback is the equivalent of an akka.japi.Function
*
* SAM (Single Abstract Method) class
*
* Java API
*/
abstract class Mapper[-T, +R] extends scala.runtime.AbstractFunction1[T, R]

View file

@ -1,62 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.dispatch.japi
import akka.japi.{ Procedure2, Procedure, Function JFunc }
/* Java API */
trait Future[+T] { self: akka.dispatch.Future[T]
/**
* Asynchronously called when this Future gets a successful result
*/
private[japi] final def onSuccess[A >: T](proc: Procedure[A]): this.type = self.onSuccess({ case r proc(r.asInstanceOf[A]) }: PartialFunction[T, Unit])
/**
* Asynchronously called when this Future gets a failed result
*/
private[japi] final def onFailure(proc: Procedure[Throwable]): this.type = self.onFailure({ case t: Throwable proc(t) }: PartialFunction[Throwable, Unit])
/**
* Asynchronously called when this future is completed with either a failed or a successful result
* In case of a success, the first parameter (Throwable) will be null
* In case of a failure, the second parameter (T) will be null
* For no reason will both be null or neither be null
*/
private[japi] final def onComplete[A >: T](proc: Procedure2[Throwable, A]): this.type = self.onComplete(_.fold(t proc(t, null.asInstanceOf[T]), r proc(null, r)))
/**
* Asynchronously applies the provided function to the (if any) successful result of this Future
* Any failure of this Future will be propagated to the Future returned by this method.
*/
private[japi] final def map[A >: T, B](f: JFunc[A, B]): akka.dispatch.Future[B] = self.map(f(_))
/**
* Asynchronously applies the provided function to the (if any) successful result of this Future and flattens it.
* Any failure of this Future will be propagated to the Future returned by this method.
*/
private[japi] final def flatMap[A >: T, B](f: JFunc[A, akka.dispatch.Future[B]]): akka.dispatch.Future[B] = self.flatMap(f(_))
/**
* Asynchronously applies the provided Procedure to the (if any) successful result of this Future
* Provided Procedure will not be called in case of no-result or in case of failed result
*/
private[japi] final def foreach[A >: T](proc: Procedure[A]): Unit = self.foreach(proc(_))
/**
* Returns a new Future whose successful result will be the successful result of this Future if that result conforms to the provided predicate
* Any failure of this Future will be propagated to the Future returned by this method.
*/
private[japi] final def filter[A >: T](p: JFunc[A, java.lang.Boolean]): akka.dispatch.Future[A] =
self.filter((a: Any) p(a.asInstanceOf[A])).asInstanceOf[akka.dispatch.Future[A]]
/**
* Returns a new Future whose value will be of the specified type if it really is
* Or a failure with a ClassCastException if it wasn't.
*/
private[japi] final def mapTo[A](clazz: Class[A]): akka.dispatch.Future[A] = {
implicit val manifest: Manifest[A] = Manifest.classType(clazz)
self.mapTo[A]
}
}

View file

@ -119,3 +119,13 @@ object Option {
implicit def java2ScalaOption[A](o: Option[A]): scala.Option[A] = o.asScala
implicit def scala2JavaOption[A](o: scala.Option[A]): Option[A] = if (o.isDefined) some(o.get) else none
}
/**
* This class hold common utilities for Java
*/
object Util {
/**
* Given a Class returns a Scala Manifest of that Class
*/
def manifest[T](clazz: Class[T]): Manifest[T] = Manifest.classType(clazz)
}

View file

@ -3,9 +3,8 @@
*/
package akka.util
import java.{ lang jl }
object BoxedType {
import java.{ lang jl }
private val toBoxed = Map[Class[_], Class[_]](
classOf[Boolean] -> classOf[jl.Boolean],
@ -18,8 +17,5 @@ object BoxedType {
classOf[Double] -> classOf[jl.Double],
classOf[Unit] -> classOf[scala.runtime.BoxedUnit])
def apply(c: Class[_]): Class[_] = {
if (c.isPrimitive) toBoxed(c) else c
}
final def apply(c: Class[_]): Class[_] = if (c.isPrimitive) toBoxed(c) else c
}

View file

@ -17,30 +17,35 @@ class TimerException(message: String) extends RuntimeException(message)
* import akka.util.duration._
* import akka.util.Timer
*
* val timer = Timer(30.seconds)
* val timer = Timer(30 seconds)
* while (timer.isTicking) { ... }
* </pre>
*/
case class Timer(duration: Duration, throwExceptionOnTimeout: Boolean = false) {
val startTimeInMillis = System.currentTimeMillis
val timeoutInMillis = duration.toMillis
case class Timer(timeout: Duration, throwExceptionOnTimeout: Boolean = false) {
val startTime = Duration(System.nanoTime, NANOSECONDS)
def timeLeft: Duration = {
val time = timeout.toNanos - (System.nanoTime - startTime.toNanos)
if (time <= 0) Duration(0, NANOSECONDS)
else Duration(time, NANOSECONDS)
}
/**
* Returns true while the timer is ticking. After that it either throws and exception or
* returns false. Depending on if the 'throwExceptionOnTimeout' argument is true or false.
*/
def isTicking: Boolean = {
if (!(timeoutInMillis > (System.currentTimeMillis - startTimeInMillis))) {
if (throwExceptionOnTimeout) throw new TimerException("Time out after " + duration)
if (!(timeout.toNanos > (System.nanoTime - startTime.toNanos))) {
if (throwExceptionOnTimeout) throw new TimerException("Time out after " + timeout)
else false
} else true
}
}
case class Deadline(d: Duration) {
def +(other: Duration): Deadline = copy(d = d + other)
def -(other: Duration): Deadline = copy(d = d - other)
def -(other: Deadline): Duration = d - other.d
case class Deadline(time: Duration) {
def +(other: Duration): Deadline = copy(time = time + other)
def -(other: Duration): Deadline = copy(time = time - other)
def -(other: Deadline): Duration = time - other.time
def timeLeft: Duration = this - Deadline.now
}
object Deadline {

View file

@ -1,187 +0,0 @@
package akka.cluster;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.Socket;
import org.apache.bookkeeper.proto.BookieServer;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.ZooDefs.Ids;
import org.apache.zookeeper.server.NIOServerCnxnFactory;
import org.apache.zookeeper.server.ZooKeeperServer;
public class LocalBookKeeper {
public static final int CONNECTION_TIMEOUT = 30000;
int numberOfBookies;
public LocalBookKeeper() {
numberOfBookies = 3;
}
public LocalBookKeeper(int numberOfBookies) {
this();
this.numberOfBookies = numberOfBookies;
}
private final String HOSTPORT = "127.0.0.1:2181";
NIOServerCnxnFactory serverFactory;
ZooKeeperServer zks;
ZooKeeper zkc;
int ZooKeeperDefaultPort = 2181;
File ZkTmpDir;
//BookKeeper variables
File tmpDirs[];
BookieServer bs[];
Integer initialPort = 5000;
/**
* @param args
*/
public void runZookeeper(int maxCC) throws IOException{
// create a ZooKeeper server(dataDir, dataLogDir, port)
//ServerStats.registerAsConcrete();
//ClientBase.setupTestEnv();
ZkTmpDir = File.createTempFile("zookeeper", "test");
ZkTmpDir.delete();
ZkTmpDir.mkdir();
try {
zks = new ZooKeeperServer(ZkTmpDir, ZkTmpDir, ZooKeeperDefaultPort);
serverFactory = new NIOServerCnxnFactory();
serverFactory.configure(new InetSocketAddress(ZooKeeperDefaultPort), maxCC);
serverFactory.startup(zks);
} catch (Exception e) {
// TODO Auto-generated catch block
}
boolean b = waitForServerUp(HOSTPORT, CONNECTION_TIMEOUT);
}
public void initializeZookeper() {
//initialize the zk client with values
try {
zkc = new ZooKeeper("127.0.0.1", ZooKeeperDefaultPort, new emptyWatcher());
zkc.create("/ledgers", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
zkc.create("/ledgers/available", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
// No need to create an entry for each requested bookie anymore as the
// BookieServers will register themselves with ZooKeeper on startup.
} catch (KeeperException e) {
} catch (InterruptedException e) {
} catch (IOException e) {
}
}
public void runBookies() throws IOException{
// Create Bookie Servers (B1, B2, B3)
tmpDirs = new File[numberOfBookies];
bs = new BookieServer[numberOfBookies];
for(int i = 0; i < numberOfBookies; i++) {
tmpDirs[i] = File.createTempFile("bookie" + Integer.toString(i), "test");
tmpDirs[i].delete();
tmpDirs[i].mkdir();
bs[i] = new BookieServer(initialPort + i, InetAddress.getLocalHost().getHostAddress() + ":"
+ ZooKeeperDefaultPort, tmpDirs[i], new File[]{tmpDirs[i]});
bs[i].start();
}
}
public static void main(String[] args) throws IOException, InterruptedException {
if(args.length < 1) {
usage();
System.exit(-1);
}
LocalBookKeeper lb = new LocalBookKeeper(Integer.parseInt(args[0]));
lb.runZookeeper(1000);
lb.initializeZookeper();
lb.runBookies();
while (true) {
Thread.sleep(5000);
}
}
private static void usage() {
System.err.println("Usage: LocalBookKeeper number-of-bookies");
}
/* User for testing purposes, void */
class emptyWatcher implements Watcher{
public void process(WatchedEvent event) {}
}
public static boolean waitForServerUp(String hp, long timeout) {
long start = System.currentTimeMillis();
String split[] = hp.split(":");
String host = split[0];
int port = Integer.parseInt(split[1]);
while (true) {
try {
Socket sock = new Socket(host, port);
BufferedReader reader = null;
try {
OutputStream outstream = sock.getOutputStream();
outstream.write("stat".getBytes());
outstream.flush();
reader =
new BufferedReader(
new InputStreamReader(sock.getInputStream()));
String line = reader.readLine();
if (line != null && line.startsWith("Zookeeper version:")) {
return true;
}
} finally {
sock.close();
if (reader != null) {
reader.close();
}
}
} catch (IOException e) {
// ignore as this is expected
}
if (System.currentTimeMillis() > start + timeout) {
break;
}
try {
Thread.sleep(250);
} catch (InterruptedException e) {
// ignore
}
}
return false;
}
}

View file

@ -1,312 +0,0 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package akka.cluster.zookeeper;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.TreeMap;
import java.util.concurrent.CountDownLatch;
import org.apache.log4j.Logger;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
/**
*
* A <a href="package.html">protocol to implement a distributed queue</a>.
*
*/
public class DistributedQueue {
private static final Logger LOG = Logger.getLogger(DistributedQueue.class);
private final String dir;
private ZooKeeper zookeeper;
private List<ACL> acl = ZooDefs.Ids.OPEN_ACL_UNSAFE;
private final String prefix = "qn-";
public DistributedQueue(ZooKeeper zookeeper, String dir, List<ACL> acl) {
this.dir = dir;
if(acl != null) {
this.acl = acl;
}
this.zookeeper = zookeeper;
}
/**
* Returns a Map of the children, ordered by id.
* @param watcher optional watcher on getChildren() operation.
* @return map from id to child name for all children
*/
private TreeMap<Long,String> orderedChildren(Watcher watcher) throws KeeperException, InterruptedException {
TreeMap<Long,String> orderedChildren = new TreeMap<Long,String>();
List<String> childNames = null;
try{
childNames = zookeeper.getChildren(dir, watcher);
}catch (KeeperException.NoNodeException e) {
throw e;
}
for(String childName : childNames) {
try{
//Check format
if(!childName.regionMatches(0, prefix, 0, prefix.length())) {
LOG.warn("Found child node with improper name: " + childName);
continue;
}
String suffix = childName.substring(prefix.length());
Long childId = new Long(suffix);
orderedChildren.put(childId,childName);
}catch(NumberFormatException e) {
LOG.warn("Found child node with improper format : " + childName + " " + e,e);
}
}
return orderedChildren;
}
/**
* Find the smallest child node.
* @return The name of the smallest child node.
*/
private String smallestChildName() throws KeeperException, InterruptedException {
long minId = Long.MAX_VALUE;
String minName = "";
List<String> childNames = null;
try{
childNames = zookeeper.getChildren(dir, false);
}catch(KeeperException.NoNodeException e) {
LOG.warn("Caught: " +e,e);
return null;
}
for(String childName : childNames) {
try{
//Check format
if(!childName.regionMatches(0, prefix, 0, prefix.length())) {
LOG.warn("Found child node with improper name: " + childName);
continue;
}
String suffix = childName.substring(prefix.length());
long childId = Long.parseLong(suffix);
if(childId < minId) {
minId = childId;
minName = childName;
}
}catch(NumberFormatException e) {
LOG.warn("Found child node with improper format : " + childName + " " + e,e);
}
}
if(minId < Long.MAX_VALUE) {
return minName;
}else{
return null;
}
}
/**
* Return the head of the queue without modifying the queue.
* @return the data at the head of the queue.
* @throws NoSuchElementException
* @throws KeeperException
* @throws InterruptedException
*/
public byte[] element() throws NoSuchElementException, KeeperException, InterruptedException {
TreeMap<Long,String> orderedChildren;
// element, take, and remove follow the same pattern.
// We want to return the child node with the smallest sequence number.
// Since other clients are remove()ing and take()ing nodes concurrently,
// the child with the smallest sequence number in orderedChildren might be gone by the time we check.
// We don't call getChildren again until we have tried the rest of the nodes in sequence order.
while(true) {
try{
orderedChildren = orderedChildren(null);
}catch(KeeperException.NoNodeException e) {
throw new NoSuchElementException();
}
if(orderedChildren.size() == 0 ) throw new NoSuchElementException();
for(String headNode : orderedChildren.values()) {
if(headNode != null) {
try{
return zookeeper.getData(dir+"/"+headNode, false, null);
}catch(KeeperException.NoNodeException e) {
//Another client removed the node first, try next
}
}
}
}
}
/**
* Attempts to remove the head of the queue and return it.
* @return The former head of the queue
* @throws NoSuchElementException
* @throws KeeperException
* @throws InterruptedException
*/
public byte[] remove() throws NoSuchElementException, KeeperException, InterruptedException {
TreeMap<Long,String> orderedChildren;
// Same as for element. Should refactor this.
while(true) {
try{
orderedChildren = orderedChildren(null);
}catch(KeeperException.NoNodeException e) {
throw new NoSuchElementException();
}
if(orderedChildren.size() == 0) throw new NoSuchElementException();
for(String headNode : orderedChildren.values()) {
String path = dir +"/"+headNode;
try{
byte[] data = zookeeper.getData(path, false, null);
zookeeper.delete(path, -1);
return data;
}catch(KeeperException.NoNodeException e) {
// Another client deleted the node first.
}
}
}
}
private class LatchChildWatcher implements Watcher {
CountDownLatch latch;
public LatchChildWatcher() {
latch = new CountDownLatch(1);
}
public void process(WatchedEvent event) {
LOG.debug("Watcher fired on path: " + event.getPath() + " state: " +
event.getState() + " type " + event.getType());
latch.countDown();
}
public void await() throws InterruptedException {
latch.await();
}
}
/**
* Removes the head of the queue and returns it, blocks until it succeeds.
* @return The former head of the queue
* @throws NoSuchElementException
* @throws KeeperException
* @throws InterruptedException
*/
public byte[] take() throws KeeperException, InterruptedException {
TreeMap<Long,String> orderedChildren;
// Same as for element. Should refactor this.
while(true) {
LatchChildWatcher childWatcher = new LatchChildWatcher();
try{
orderedChildren = orderedChildren(childWatcher);
}catch(KeeperException.NoNodeException e) {
zookeeper.create(dir, new byte[0], acl, CreateMode.PERSISTENT);
continue;
}
if(orderedChildren.size() == 0) {
childWatcher.await();
continue;
}
for(String headNode : orderedChildren.values()) {
String path = dir +"/"+headNode;
try{
byte[] data = zookeeper.getData(path, false, null);
zookeeper.delete(path, -1);
return data;
}catch(KeeperException.NoNodeException e) {
// Another client deleted the node first.
}
}
}
}
/**
* Inserts data into queue.
* @param data
* @return true if data was successfully added
*/
public boolean offer(byte[] data) throws KeeperException, InterruptedException{
for(;;) {
try{
zookeeper.create(dir+"/"+prefix, data, acl, CreateMode.PERSISTENT_SEQUENTIAL);
return true;
}catch(KeeperException.NoNodeException e) {
zookeeper.create(dir, new byte[0], acl, CreateMode.PERSISTENT);
}
}
}
/**
* Returns the data at the first element of the queue, or null if the queue is empty.
* @return data at the first element of the queue, or null.
* @throws KeeperException
* @throws InterruptedException
*/
public byte[] peek() throws KeeperException, InterruptedException{
try{
return element();
}catch(NoSuchElementException e) {
return null;
}
}
/**
* Attempts to remove the head of the queue and return it. Returns null if the queue is empty.
* @return Head of the queue or null.
* @throws KeeperException
* @throws InterruptedException
*/
public byte[] poll() throws KeeperException, InterruptedException {
try{
return remove();
}catch(NoSuchElementException e) {
return null;
}
}
}

View file

@ -1,173 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster.zookeeper;
import java.io.Serializable;
import java.util.List;
import java.util.ArrayList;
import org.I0Itec.zkclient.ExceptionUtil;
import org.I0Itec.zkclient.IZkChildListener;
import org.I0Itec.zkclient.ZkClient;
import org.I0Itec.zkclient.exception.ZkNoNodeException;
public class ZooKeeperQueue<T extends Object> {
protected static class Element<T> {
private String _name;
private T _data;
public Element(String name, T data) {
_name = name;
_data = data;
}
public String getName() {
return _name;
}
public T getData() {
return _data;
}
}
protected final ZkClient _zkClient;
private final String _elementsPath;
private final String _rootPath;
private final boolean _isBlocking;
public ZooKeeperQueue(ZkClient zkClient, String rootPath, boolean isBlocking) {
_zkClient = zkClient;
_rootPath = rootPath;
_isBlocking = isBlocking;
_elementsPath = rootPath + "/queue";
if (!_zkClient.exists(rootPath)) {
_zkClient.createPersistent(rootPath, true);
_zkClient.createPersistent(_elementsPath, true);
}
}
public String enqueue(T element) {
try {
String sequential = _zkClient.createPersistentSequential(getElementRoughPath(), element);
String elementId = sequential.substring(sequential.lastIndexOf('/') + 1);
return elementId;
} catch (Exception e) {
throw ExceptionUtil.convertToRuntimeException(e);
}
}
public T dequeue() throws InterruptedException {
if (_isBlocking) {
Element<T> element = getFirstElement();
_zkClient.delete(getElementPath(element.getName()));
return element.getData();
} else {
throw new UnsupportedOperationException("Non-blocking ZooKeeperQueue is not yet supported");
/* FIXME DOES NOT WORK
try {
String headName = getSmallestElement(_zkClient.getChildren(_elementsPath));
String headPath = getElementPath(headName);
return (T) _zkClient.readData(headPath);
} catch (ZkNoNodeException e) {
return null;
}
*/
}
}
public boolean containsElement(String elementId) {
String zkPath = getElementPath(elementId);
return _zkClient.exists(zkPath);
}
public T peek() throws InterruptedException {
Element<T> element = getFirstElement();
if (element == null) {
return null;
}
return element.getData();
}
@SuppressWarnings("unchecked")
public List<T> getElements() {
List<String> paths =_zkClient.getChildren(_elementsPath);
List<T> elements = new ArrayList<T>();
for (String path: paths) {
elements.add((T)_zkClient.readData(path));
}
return elements;
}
public int size() {
return _zkClient.getChildren(_elementsPath).size();
}
public void clear() {
_zkClient.deleteRecursive(_rootPath);
}
public boolean isEmpty() {
return size() == 0;
}
private String getElementRoughPath() {
return getElementPath("item" + "-");
}
private String getElementPath(String elementId) {
return _elementsPath + "/" + elementId;
}
private String getSmallestElement(List<String> list) {
String smallestElement = list.get(0);
for (String element : list) {
if (element.compareTo(smallestElement) < 0) {
smallestElement = element;
}
}
return smallestElement;
}
@SuppressWarnings("unchecked")
protected Element<T> getFirstElement() throws InterruptedException {
final Object mutex = new Object();
IZkChildListener notifyListener = new IZkChildListener() {
@Override
public void handleChildChange(String parentPath, List<String> currentChilds) throws Exception {
synchronized (mutex) {
mutex.notify();
}
}
};
try {
while (true) {
List<String> elementNames;
synchronized (mutex) {
elementNames = _zkClient.subscribeChildChanges(_elementsPath, notifyListener);
while (elementNames == null || elementNames.isEmpty()) {
mutex.wait();
elementNames = _zkClient.getChildren(_elementsPath);
}
}
String elementName = getSmallestElement(elementNames);
try {
String elementPath = getElementPath(elementName);
return new Element<T>(elementName, (T) _zkClient.readData(elementPath));
} catch (ZkNoNodeException e) {
// somebody else picked up the element first, so we have to
// retry with the new first element
}
}
} catch (InterruptedException e) {
throw e;
} catch (Exception e) {
throw ExceptionUtil.convertToRuntimeException(e);
} finally {
_zkClient.unsubscribeChildChanges(_elementsPath, notifyListener);
}
}
}

View file

@ -0,0 +1,33 @@
######################################
# Akka Cluster Reference Config File #
######################################
# This the reference config file has all the default settings.
# Make your edits/overrides in your application.conf.
akka {
cluster {
seed-nodes = []
seed-node-connection-timeout = 30s
max-time-to-retry-joining-cluster = 30s
# accrual failure detection config
failure-detector {
# defines the failure detector threshold
# A low threshold is prone to generate many wrong suspicions but ensures
# a quick detection in the event of a real crash. Conversely, a high
# threshold generates fewer mistakes but needs more time to detect
# actual crashes
threshold = 8
max-sample-size = 1000
}
gossip {
initialDelay = 5s
frequency = 1s
}
}
}

View file

@ -2,13 +2,16 @@
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.remote
package akka.cluster
import akka.actor.{ ActorSystem, Address }
import akka.event.Logging
import java.util.concurrent.atomic.AtomicReference
import scala.collection.immutable.Map
import scala.annotation.tailrec
import java.util.concurrent.atomic.AtomicReference
import System.{ currentTimeMillis newTimestamp }
import akka.actor.{ ActorSystem, Address }
/**
* Implementation of 'The Phi Accrual Failure Detector' by Hayashibara et al. as defined in their paper:
@ -20,12 +23,14 @@ import akka.actor.{ ActorSystem, Address }
* <p/>
* Default threshold is 8, but can be configured in the Akka config.
*/
class AccrualFailureDetector(val threshold: Int = 8, val maxSampleSize: Int = 1000) {
class AccrualFailureDetector(system: ActorSystem, val threshold: Int = 8, val maxSampleSize: Int = 1000) {
private final val PhiFactor = 1.0 / math.log(10.0)
private case class FailureStats(mean: Double = 0.0D, variance: Double = 0.0D, deviation: Double = 0.0D)
private val log = Logging(system, "FailureDetector")
/**
* Implement using optimistic lockless concurrency, all state is represented
* by this immutable case class and managed by an AtomicReference.
@ -49,6 +54,7 @@ class AccrualFailureDetector(val threshold: Int = 8, val maxSampleSize: Int = 10
*/
@tailrec
final def heartbeat(connection: Address) {
log.debug("Heartbeat from connection [{}] ", connection)
val oldState = state.get
val latestTimestamp = oldState.timestamps.get(connection)
@ -132,12 +138,15 @@ class AccrualFailureDetector(val threshold: Int = 8, val maxSampleSize: Int = 10
def phi(connection: Address): Double = {
val oldState = state.get
val oldTimestamp = oldState.timestamps.get(connection)
val phi =
if (oldTimestamp.isEmpty) 0.0D // treat unmanaged connections, e.g. with zero heartbeats, as healthy connections
else {
val timestampDiff = newTimestamp - oldTimestamp.get
val mean = oldState.failureStats.get(connection).getOrElse(FailureStats()).mean
PhiFactor * timestampDiff / mean
}
log.debug("Phi value [{}] and threshold [{}] for connection [{}] ", phi, threshold, connection)
phi
}
/**

View file

@ -1,35 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster
import org.apache.bookkeeper.proto.BookieServer
import java.io.File
/*
A simple use of BookKeeper is to implement a write-ahead transaction log. A server maintains an in-memory data structure
(with periodic snapshots for example) and logs changes to that structure before it applies the change. The system
server creates a ledger at startup and store the ledger id and password in a well known place (ZooKeeper maybe). When
it needs to make a change, the server adds an entry with the change information to a ledger and apply the change when
BookKeeper adds the entry successfully. The server can even use asyncAddEntry to queue up many changes for high change
throughput. BooKeeper meticulously logs the changes in order and call the completion functions in order.
When the system server dies, a backup server will come online, get the last snapshot and then it will open the
ledger of the old server and read all the entries from the time the snapshot was taken. (Since it doesn't know the last
entry number it will use MAX_INTEGER). Once all the entries have been processed, it will close the ledger and start a
new one for its use.
*/
object BookKeeperServer {
val port = 3181
val zkServers = "localhost:2181"
val journal = new File("./bk/journal")
val ledgers = Array(new File("./bk/ledger"))
val bookie = new BookieServer(port, zkServers, journal, ledgers)
def start() {
bookie.start()
bookie.join()
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,129 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster
import akka.actor._
import akka.util._
import ReflectiveAccess._
import akka.routing._
import akka.cluster._
import FailureDetector._
import akka.event.EventHandler
import akka.config.ConfigurationException
import java.net.InetSocketAddress
import java.util.concurrent.atomic.AtomicReference
import collection.immutable.Map
import annotation.tailrec
/**
* ClusterActorRef factory and locator.
*/
object ClusterActorRef {
import FailureDetectorType._
import RouterType._
def newRef(
actorAddress: String,
routerType: RouterType,
failureDetectorType: FailureDetectorType,
timeout: Long): ClusterActorRef = {
val routerFactory: () Router = routerType match {
case Direct () new DirectRouter
case Random () new RandomRouter
case RoundRobin () new RoundRobinRouter
case LeastCPU sys.error("Router LeastCPU not supported yet")
case LeastRAM sys.error("Router LeastRAM not supported yet")
case LeastMessages sys.error("Router LeastMessages not supported yet")
case Custom sys.error("Router Custom not supported yet")
}
val failureDetectorFactory: (Map[InetSocketAddress, ActorRef]) FailureDetector = failureDetectorType match {
case RemoveConnectionOnFirstFailureLocalFailureDetector
(connections: Map[InetSocketAddress, ActorRef]) new RemoveConnectionOnFirstFailureLocalFailureDetector(connections.values)
case RemoveConnectionOnFirstFailureRemoteFailureDetector
(connections: Map[InetSocketAddress, ActorRef]) new RemoveConnectionOnFirstFailureRemoteFailureDetector(connections)
case CustomFailureDetector(implClass)
(connections: Map[InetSocketAddress, ActorRef]) FailureDetector.createCustomFailureDetector(implClass, connections)
}
new ClusterActorRef(
RoutedProps()
.withTimeout(timeout)
.withRouter(routerFactory)
.withFailureDetector(failureDetectorFactory),
actorAddress)
}
/**
* Finds the cluster actor reference that has a specific address.
*/
def actorFor(address: String): Option[ActorRef] =
Actor.registry.local.actorFor(Address.clusterActorRefPrefix + address)
private[cluster] def createRemoteActorRef(actorAddress: String, inetSocketAddress: InetSocketAddress) = {
RemoteActorRef(inetSocketAddress, actorAddress, Actor.TIMEOUT, None)
}
}
/**
* ActorRef representing a one or many instances of a clustered, load-balanced and sometimes replicated actor
* where the instances can reside on other nodes in the cluster.
*/
private[akka] class ClusterActorRef(props: RoutedProps, val address: String) extends AbstractRoutedActorRef(props) {
import ClusterActorRef._
ClusterModule.ensureEnabled()
val addresses = Cluster.node.inetSocketAddressesForActor(address)
EventHandler.debug(this,
"Checking out cluster actor ref with address [%s] and router [%s] on [%s] connected to [\n\t%s]"
.format(address, router, Cluster.node.remoteServerAddress, addresses.map(_._2).mkString("\n\t")))
addresses foreach {
case (_, address) Cluster.node.clusterActorRefs.put(address, this)
}
val connections: FailureDetector = {
val remoteConnections = (Map[InetSocketAddress, ActorRef]() /: addresses) {
case (map, (uuid, inetSocketAddress))
map + (inetSocketAddress -> createRemoteActorRef(address, inetSocketAddress))
}
props.failureDetectorFactory(remoteConnections)
}
router.init(connections)
def nrOfConnections: Int = connections.size
private[akka] def failOver(from: InetSocketAddress, to: InetSocketAddress) {
connections.failOver(from, to)
}
def stop() {
synchronized {
if (_status == ActorRefInternals.RUNNING) {
Actor.registry.local.unregisterClusterActorRef(this)
_status = ActorRefInternals.SHUTDOWN
postMessageToMailbox(Terminate, None)
// FIXME here we need to fire off Actor.cluster.remove(address) (which needs to be properly implemented first, see ticket)
connections.stopAll()
}
}
}
/* If you start me up */
if (_status == ActorRefInternals.UNSTARTED) {
_status = ActorRefInternals.RUNNING
Actor.registry.local.registerClusterActorRef(this)
}
}

View file

@ -1,205 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster
import akka.actor.DeploymentConfig._
import akka.actor._
import akka.event.EventHandler
import akka.config.Config
import akka.util.Switch
import akka.util.Helpers._
import akka.cluster.zookeeper.AkkaZkClient
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.recipes.lock.{ WriteLock, LockListener }
import org.I0Itec.zkclient.exception.{ ZkNoNodeException, ZkNodeExistsException }
import scala.collection.immutable.Seq
import scala.collection.JavaConversions.collectionAsScalaIterable
import java.util.concurrent.{ CountDownLatch, TimeUnit }
/**
* A ClusterDeployer is responsible for deploying a Deploy.
*/
object ClusterDeployer extends ActorDeployer {
val clusterName = Cluster.name
val nodeName = Config.nodename
val clusterPath = "/%s" format clusterName
val deploymentPath = clusterPath + "/deployment"
val deploymentAddressPath = deploymentPath + "/%s"
val deploymentCoordinationPath = clusterPath + "/deployment-coordination"
val deploymentInProgressLockPath = deploymentCoordinationPath + "/in-progress"
val isDeploymentCompletedInClusterLockPath = deploymentCoordinationPath + "/completed" // should not be part of basePaths
val basePaths = List(clusterPath, deploymentPath, deploymentCoordinationPath, deploymentInProgressLockPath)
private val isConnected = new Switch(false)
private val deploymentCompleted = new CountDownLatch(1)
private val zkClient = new AkkaZkClient(
Cluster.zooKeeperServers,
Cluster.sessionTimeout,
Cluster.connectionTimeout,
Cluster.defaultZooKeeperSerializer)
private val deploymentInProgressLockListener = new LockListener {
def lockAcquired() {
EventHandler.info(this, "Clustered deployment started")
}
def lockReleased() {
EventHandler.info(this, "Clustered deployment completed")
deploymentCompleted.countDown()
}
}
private val deploymentInProgressLock = new WriteLock(
zkClient.connection.getZookeeper,
deploymentInProgressLockPath,
null,
deploymentInProgressLockListener)
private val systemDeployments: List[Deploy] = Nil
def shutdown() {
isConnected switchOff {
// undeploy all
try {
for {
child collectionAsScalaIterable(zkClient.getChildren(deploymentPath))
deployment zkClient.readData(deploymentAddressPath.format(child)).asInstanceOf[Deploy]
} zkClient.delete(deploymentAddressPath.format(deployment.address))
invalidateDeploymentInCluster()
} catch {
case e: Exception
handleError(new DeploymentException("Could not undeploy all deployment data in ZooKeeper due to: " + e))
}
// shut down ZooKeeper client
zkClient.close()
EventHandler.info(this, "ClusterDeployer shut down successfully")
}
}
def lookupDeploymentFor(address: String): Option[Deploy] = ensureRunning {
LocalDeployer.lookupDeploymentFor(address) match { // try local cache
case Some(deployment) // in local cache
deployment
case None // not in cache, check cluster
val deployment =
try {
Some(zkClient.readData(deploymentAddressPath.format(address)).asInstanceOf[Deploy])
} catch {
case e: ZkNoNodeException None
case e: Exception
EventHandler.warning(this, e.toString)
None
}
deployment foreach (LocalDeployer.deploy(_)) // cache it in local cache
deployment
}
}
def fetchDeploymentsFromCluster: List[Deploy] = ensureRunning {
val addresses =
try {
zkClient.getChildren(deploymentPath).toList
} catch {
case e: ZkNoNodeException List[String]()
}
val deployments = addresses map { address
zkClient.readData(deploymentAddressPath.format(address)).asInstanceOf[Deploy]
}
EventHandler.info(this, "Fetched deployment plans from cluster [\n\t%s\n]" format deployments.mkString("\n\t"))
deployments
}
private[akka] def init(deployments: Seq[Deploy]) {
isConnected switchOn {
EventHandler.info(this, "Initializing ClusterDeployer")
basePaths foreach { path
try {
ignore[ZkNodeExistsException](zkClient.create(path, null, CreateMode.PERSISTENT))
EventHandler.debug(this, "Created ZooKeeper path for deployment [%s]".format(path))
} catch {
case e
val error = new DeploymentException(e.toString)
EventHandler.error(error, this)
throw error
}
}
val allDeployments = deployments ++ systemDeployments
if (!isDeploymentCompletedInCluster) {
if (deploymentInProgressLock.lock()) {
// try to be the one doing the clustered deployment
EventHandler.info(this, "Pushing clustered deployment plans [\n\t" + allDeployments.mkString("\n\t") + "\n]")
allDeployments foreach (deploy(_)) // deploy
markDeploymentCompletedInCluster()
deploymentInProgressLock.unlock() // signal deployment complete
} else {
deploymentCompleted.await(30, TimeUnit.SECONDS) // wait until deployment is completed by other "master" node
}
}
// fetch clustered deployments and deploy them locally
fetchDeploymentsFromCluster foreach (LocalDeployer.deploy(_))
}
}
private[akka] def deploy(deployment: Deploy) {
ensureRunning {
LocalDeployer.deploy(deployment)
deployment match {
case Deploy(_, _, _, _, Local) | Deploy(_, _, _, _, _: Local) //TODO LocalDeployer.deploy(deployment)??
case Deploy(address, recipe, routing, _, _) // cluster deployment
/*TODO recipe foreach { r ⇒
Deployer.newClusterActorRef(() Actor.actorOf(r.implementationClass), address, deployment)
}*/
val path = deploymentAddressPath.format(address)
try {
ignore[ZkNodeExistsException](zkClient.create(path, null, CreateMode.PERSISTENT))
zkClient.writeData(path, deployment)
} catch {
case e: NullPointerException
handleError(new DeploymentException(
"Could not store deployment data [" + deployment + "] in ZooKeeper since client session is closed"))
case e: Exception
handleError(new DeploymentException(
"Could not store deployment data [" + deployment + "] in ZooKeeper due to: " + e))
}
}
}
}
private def markDeploymentCompletedInCluster() {
ignore[ZkNodeExistsException](zkClient.create(isDeploymentCompletedInClusterLockPath, null, CreateMode.PERSISTENT))
}
private def isDeploymentCompletedInCluster = zkClient.exists(isDeploymentCompletedInClusterLockPath)
// FIXME in future - add watch to this path to be able to trigger redeployment, and use this method to trigger redeployment
private def invalidateDeploymentInCluster() {
ignore[ZkNoNodeException](zkClient.delete(isDeploymentCompletedInClusterLockPath))
}
private def ensureRunning[T](body: T): T = {
if (isConnected.isOn) body
else throw new IllegalStateException("ClusterDeployer is not running")
}
private[akka] def handleError(e: Throwable): Nothing = {
EventHandler.error(e, this, e.toString)
throw e
}
}

View file

@ -0,0 +1,26 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster
import com.typesafe.config.Config
import akka.util.Duration
import java.util.concurrent.TimeUnit.MILLISECONDS
import akka.config.ConfigurationException
import scala.collection.JavaConverters._
import akka.actor.Address
import akka.actor.AddressExtractor
class ClusterSettings(val config: Config, val systemName: String) {
import config._
// cluster config section
val FailureDetectorThreshold = getInt("akka.cluster.failure-detector.threshold")
val FailureDetectorMaxSampleSize = getInt("akka.cluster.failure-detector.max-sample-size")
val SeedNodeConnectionTimeout = Duration(config.getMilliseconds("akka.cluster.seed-node-connection-timeout"), MILLISECONDS)
val MaxTimeToRetryJoiningCluster = Duration(config.getMilliseconds("akka.cluster.max-time-to-retry-joining-cluster"), MILLISECONDS)
val InitialDelayForGossip = Duration(getMilliseconds("akka.cluster.gossip.initialDelay"), MILLISECONDS)
val GossipFrequency = Duration(getMilliseconds("akka.cluster.gossip.frequency"), MILLISECONDS)
val SeedNodes = Set.empty[Address] ++ getStringList("akka.cluster.seed-nodes").asScala.collect {
case AddressExtractor(addr) addr
}
}

View file

@ -0,0 +1,438 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster
import akka.actor._
import akka.actor.Status._
import akka.remote._
import akka.event.Logging
import akka.dispatch.Await
import akka.pattern.ask
import akka.util._
import akka.config.ConfigurationException
import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean }
import java.util.concurrent.TimeUnit._
import java.util.concurrent.TimeoutException
import java.security.SecureRandom
import System.{ currentTimeMillis newTimestamp }
import scala.collection.immutable.{ Map, SortedSet }
import scala.annotation.tailrec
import com.google.protobuf.ByteString
/**
* Interface for member membership change listener.
*/
trait NodeMembershipChangeListener {
def memberConnected(member: Member)
def memberDisconnected(member: Member)
}
/**
* Base trait for all cluster messages. All ClusterMessage's are serializable.
*/
sealed trait ClusterMessage extends Serializable
/**
* Command to join the cluster.
*/
case object JoinCluster extends ClusterMessage
/**
* Represents the state of the cluster; cluster ring membership, ring convergence, meta data - all versioned by a vector clock.
*/
case class Gossip(
version: VectorClock = VectorClock(),
member: Address,
// sorted set of members with their status, sorted by name
members: SortedSet[Member] = SortedSet.empty[Member](Ordering.fromLessThan[Member](_.address.toString > _.address.toString)),
unavailableMembers: Set[Member] = Set.empty[Member],
// for ring convergence
seen: Map[Member, VectorClock] = Map.empty[Member, VectorClock],
// for handoff
//pendingChanges: Option[Vector[PendingPartitioningChange]] = None,
meta: Option[Map[String, Array[Byte]]] = None)
extends ClusterMessage // is a serializable cluster message
with Versioned // has a vector clock as version
/**
* Represents the address and the current status of a cluster member node.
*/
case class Member(address: Address, status: MemberStatus) extends ClusterMessage
/**
* Defines the current status of a cluster member node
*
* Can be one of: Joining, Up, Leaving, Exiting and Down.
*/
sealed trait MemberStatus extends ClusterMessage with Versioned
object MemberStatus {
case class Joining(version: VectorClock = VectorClock()) extends MemberStatus
case class Up(version: VectorClock = VectorClock()) extends MemberStatus
case class Leaving(version: VectorClock = VectorClock()) extends MemberStatus
case class Exiting(version: VectorClock = VectorClock()) extends MemberStatus
case class Down(version: VectorClock = VectorClock()) extends MemberStatus
}
// sealed trait PendingPartitioningStatus
// object PendingPartitioningStatus {
// case object Complete extends PendingPartitioningStatus
// case object Awaiting extends PendingPartitioningStatus
// }
// case class PendingPartitioningChange(
// owner: Address,
// nextOwner: Address,
// changes: Vector[VNodeMod],
// status: PendingPartitioningStatus)
final class ClusterDaemon(system: ActorSystem, gossiper: Gossiper) extends Actor {
val log = Logging(system, "ClusterDaemon")
def receive = {
case JoinCluster sender ! gossiper.latestGossip
case gossip: Gossip
gossiper.tell(gossip)
case unknown log.error("Unknown message sent to cluster daemon [" + unknown + "]")
}
}
/**
* This module is responsible for Gossiping cluster information. The abstraction maintains the list of live
* and dead members. Periodically i.e. every 1 second this module chooses a random member and initiates a round
* of Gossip with it. Whenever it gets gossip updates it updates the Failure Detector with the liveness
* information.
* <p/>
* During each of these runs the member initiates gossip exchange according to following rules (as defined in the
* Cassandra documentation [http://wiki.apache.org/cassandra/ArchitectureGossip]:
* <pre>
* 1) Gossip to random live member (if any)
* 2) Gossip to random unreachable member with certain probability depending on number of unreachable and live members
* 3) If the member gossiped to at (1) was not seed, or the number of live members is less than number of seeds,
* gossip to random seed with certain probability depending on number of unreachable, seed and live members.
* </pre>
*/
case class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) {
/**
* Represents the state for this Gossiper. Implemented using optimistic lockless concurrency,
* all state is represented by this immutable case class and managed by an AtomicReference.
*/
private case class State(
currentGossip: Gossip,
memberMembershipChangeListeners: Set[NodeMembershipChangeListener] = Set.empty[NodeMembershipChangeListener])
val remoteSettings = new RemoteSettings(system.settings.config, system.name)
val clusterSettings = new ClusterSettings(system.settings.config, system.name)
val protocol = "akka" // TODO should this be hardcoded?
val address = remote.transport.address
val memberFingerprint = address.##
val initialDelayForGossip = clusterSettings.InitialDelayForGossip
val gossipFrequency = clusterSettings.GossipFrequency
implicit val seedNodeConnectionTimeout = clusterSettings.SeedNodeConnectionTimeout
implicit val defaultTimeout = Timeout(remoteSettings.RemoteSystemDaemonAckTimeout)
// seed members
private val seeds: Set[Member] = {
if (clusterSettings.SeedNodes.isEmpty) throw new ConfigurationException(
"At least one seed member must be defined in the configuration [akka.cluster.seed-members]")
else clusterSettings.SeedNodes map (address Member(address, MemberStatus.Up()))
}
private val serialization = remote.serialization
private val failureDetector = new AccrualFailureDetector(system, clusterSettings.FailureDetectorThreshold, clusterSettings.FailureDetectorMaxSampleSize)
private val isRunning = new AtomicBoolean(true)
private val log = Logging(system, "Gossiper")
private val random = SecureRandom.getInstance("SHA1PRNG")
// Is it right to put this guy under the /system path or should we have a top-level /cluster or something else...?
private val clusterDaemon = system.systemActorOf(Props(new ClusterDaemon(system, this)), "cluster")
private val state = new AtomicReference[State](State(currentGossip = newGossip()))
// FIXME manage connections in some other way so we can delete the RemoteConnectionManager (SINCE IT SUCKS!!!)
private val connectionManager = new RemoteConnectionManager(system, remote, failureDetector, Map.empty[Address, ActorRef])
log.info("Starting cluster Gossiper...")
// join the cluster by connecting to one of the seed members and retrieve current cluster state (Gossip)
joinCluster(Deadline(clusterSettings.MaxTimeToRetryJoiningCluster))
// start periodic gossip and cluster scrutinization
val initateGossipCanceller = system.scheduler.schedule(initialDelayForGossip, gossipFrequency)(initateGossip())
val scrutinizeCanceller = system.scheduler.schedule(initialDelayForGossip, gossipFrequency)(scrutinize())
/**
* Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks.
*/
def shutdown() {
if (isRunning.compareAndSet(true, false)) {
log.info("Shutting down Gossiper for [{}]...", address)
try connectionManager.shutdown() finally {
try system.stop(clusterDaemon) finally {
try initateGossipCanceller.cancel() finally {
try scrutinizeCanceller.cancel() finally {
log.info("Gossiper for [{}] is shut down", address)
}
}
}
}
}
}
def latestGossip: Gossip = state.get.currentGossip
/**
* Tell the gossiper some gossip.
*/
//@tailrec
final def tell(newGossip: Gossip) {
val gossipingNode = newGossip.member
failureDetector heartbeat gossipingNode // update heartbeat in failure detector
// FIXME all below here is WRONG - redesign with cluster convergence in mind
// val oldState = state.get
// println("-------- NEW VERSION " + newGossip)
// println("-------- OLD VERSION " + oldState.currentGossip)
// val latestGossip = VectorClock.latestVersionOf(newGossip, oldState.currentGossip)
// println("-------- WINNING VERSION " + latestGossip)
// val latestAvailableNodes = latestGossip.members
// val latestUnavailableNodes = latestGossip.unavailableMembers
// println("=======>>> gossipingNode: " + gossipingNode)
// println("=======>>> latestAvailableNodes: " + latestAvailableNodes)
// if (!(latestAvailableNodes contains gossipingNode) && !(latestUnavailableNodes contains gossipingNode)) {
// println("-------- NEW NODE")
// // we have a new member
// val newGossip = latestGossip copy (availableNodes = latestAvailableNodes + gossipingNode)
// val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip))
// println("--------- new GOSSIP " + newGossip.members)
// println("--------- new STATE " + newState)
// // if we won the race then update else try again
// if (!state.compareAndSet(oldState, newState)) tell(newGossip) // recur
// else {
// println("---------- WON RACE - setting state")
// // create connections for all new members in the latest gossip
// (latestAvailableNodes + gossipingNode) foreach { member
// setUpConnectionToNode(member)
// oldState.memberMembershipChangeListeners foreach (_ memberConnected member) // notify listeners about the new members
// }
// }
// } else if (latestUnavailableNodes contains gossipingNode) {
// // gossip from an old former dead member
// val newUnavailableMembers = latestUnavailableNodes - gossipingNode
// val newMembers = latestAvailableNodes + gossipingNode
// val newGossip = latestGossip copy (availableNodes = newMembers, unavailableNodes = newUnavailableMembers)
// val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip))
// // if we won the race then update else try again
// if (!state.compareAndSet(oldState, newState)) tell(newGossip) // recur
// else oldState.memberMembershipChangeListeners foreach (_ memberConnected gossipingNode) // notify listeners on successful update of state
// }
}
/**
* Registers a listener to subscribe to cluster membership changes.
*/
@tailrec
final def registerListener(listener: NodeMembershipChangeListener) {
val oldState = state.get
val newListeners = oldState.memberMembershipChangeListeners + listener
val newState = oldState copy (memberMembershipChangeListeners = newListeners)
if (!state.compareAndSet(oldState, newState)) registerListener(listener) // recur
}
/**
* Unsubscribes to cluster membership changes.
*/
@tailrec
final def unregisterListener(listener: NodeMembershipChangeListener) {
val oldState = state.get
val newListeners = oldState.memberMembershipChangeListeners - listener
val newState = oldState copy (memberMembershipChangeListeners = newListeners)
if (!state.compareAndSet(oldState, newState)) unregisterListener(listener) // recur
}
/**
* Sets up remote connections to all the members in the argument list.
*/
private def connectToNodes(members: Seq[Member]) {
members foreach { member
setUpConnectionToNode(member)
state.get.memberMembershipChangeListeners foreach (_ memberConnected member) // notify listeners about the new members
}
}
// FIXME should shuffle list randomly before start traversing to avoid connecting to some member on every member
@tailrec
final private def connectToRandomNodeOf(members: Seq[Member]): ActorRef = {
members match {
case member :: rest
setUpConnectionToNode(member) match {
case Some(connection) connection
case None connectToRandomNodeOf(rest) // recur if
}
case Nil
throw new RemoteConnectionException(
"Could not establish connection to any of the members in the argument list")
}
}
/**
* Joins the cluster by connecting to one of the seed members and retrieve current cluster state (Gossip).
*/
private def joinCluster(deadline: Deadline) {
val seedNodes = seedNodesWithoutMyself // filter out myself
if (!seedNodes.isEmpty) { // if we have seed members to contact
connectToNodes(seedNodes)
try {
log.info("Trying to join cluster through one of the seed members [{}]", seedNodes.mkString(", "))
Await.result(connectToRandomNodeOf(seedNodes) ? JoinCluster, seedNodeConnectionTimeout) match {
case initialGossip: Gossip
// just sets/overwrites the state/gossip regardless of what it was before
// since it should be treated as the initial state
state.set(state.get copy (currentGossip = initialGossip))
log.debug("Received initial gossip [{}] from seed member", initialGossip)
case unknown
throw new IllegalStateException("Expected initial gossip from seed, received [" + unknown + "]")
}
} catch {
case e: Exception
log.error(
"Could not join cluster through any of the seed members - retrying for another {} seconds",
deadline.timeLeft.toSeconds)
// retry joining the cluster unless
// 1. Gossiper is shut down
// 2. The connection time window has expired
if (isRunning.get) {
if (deadline.timeLeft.toMillis > 0) joinCluster(deadline) // recur
else throw new RemoteConnectionException(
"Could not join cluster (any of the seed members) - giving up after trying for " +
deadline.time.toSeconds + " seconds")
}
}
}
}
/**
* Initates a new round of gossip.
*/
private def initateGossip() {
val oldState = state.get
val oldGossip = oldState.currentGossip
val oldMembers = oldGossip.members
val oldMembersSize = oldMembers.size
val oldUnavailableMembers = oldGossip.unavailableMembers
val oldUnavailableMembersSize = oldUnavailableMembers.size
// 1. gossip to alive members
val gossipedToSeed =
if (oldUnavailableMembersSize > 0) gossipToRandomNodeOf(oldMembers)
else false
// 2. gossip to dead members
if (oldUnavailableMembersSize > 0) {
val probability: Double = oldUnavailableMembersSize / (oldMembersSize + 1)
if (random.nextDouble() < probability) gossipToRandomNodeOf(oldUnavailableMembers)
}
// 3. gossip to a seed for facilitating partition healing
if ((!gossipedToSeed || oldMembersSize < 1) && (seeds.head != address)) {
if (oldMembersSize == 0) gossipToRandomNodeOf(seeds)
else {
val probability = 1.0 / oldMembersSize + oldUnavailableMembersSize
if (random.nextDouble() <= probability) gossipToRandomNodeOf(seeds)
}
}
}
/**
* Gossips to a random member in the set of members passed in as argument.
*
* @returns 'true' if it gossiped to a "seed" member.
*/
private def gossipToRandomNodeOf(members: Set[Member]): Boolean = {
val peers = members filter (_.address != address) // filter out myself
val peer = selectRandomNode(peers)
val oldState = state.get
val oldGossip = oldState.currentGossip
// if connection can't be established/found => ignore it since the failure detector will take care of the potential problem
setUpConnectionToNode(peer) foreach { _ ! newGossip }
seeds exists (peer == _)
}
/**
* Scrutinizes the cluster; marks members detected by the failure detector as unavailable, and notifies all listeners
* of the change in the cluster membership.
*/
@tailrec
final private def scrutinize() {
val oldState = state.get
val oldGossip = oldState.currentGossip
val oldMembers = oldGossip.members
val oldUnavailableMembers = oldGossip.unavailableMembers
val newlyDetectedUnavailableMembers = oldMembers filterNot (member failureDetector.isAvailable(member.address))
if (!newlyDetectedUnavailableMembers.isEmpty) { // we have newly detected members marked as unavailable
val newMembers = oldMembers diff newlyDetectedUnavailableMembers
val newUnavailableMembers = oldUnavailableMembers ++ newlyDetectedUnavailableMembers
val newGossip = oldGossip copy (members = newMembers, unavailableMembers = newUnavailableMembers)
val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip))
// if we won the race then update else try again
if (!state.compareAndSet(oldState, newState)) scrutinize() // recur
else {
// notify listeners on successful update of state
for {
deadNode newUnavailableMembers
listener oldState.memberMembershipChangeListeners
} listener memberDisconnected deadNode
}
}
}
private def setUpConnectionToNode(member: Member): Option[ActorRef] = {
val address = member.address
try {
Some(
connectionManager.putIfAbsent(
address,
() system.actorFor(RootActorPath(Address(protocol, system.name)) / "system" / "cluster")))
} catch {
case e: Exception None
}
}
private def newGossip(): Gossip = Gossip(member = address)
private def incrementVersionForGossip(from: Gossip): Gossip = {
val newVersion = from.version.increment(memberFingerprint, newTimestamp)
from copy (version = newVersion)
}
private def seedNodesWithoutMyself: List[Member] = seeds.filter(_.address != address).toList
private def selectRandomNode(members: Set[Member]): Member = members.toList(random.nextInt(members.size))
}

View file

@ -1,105 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster
import akka.config.Config
import Config._
import akka.util._
import Helpers._
import akka.actor._
import Actor._
import akka.event.EventHandler
import akka.cluster.zookeeper._
import org.apache.zookeeper._
import org.apache.zookeeper.Watcher.Event._
import org.apache.zookeeper.data.Stat
import org.apache.zookeeper.recipes.lock.{ WriteLock, LockListener }
import org.I0Itec.zkclient._
import org.I0Itec.zkclient.serialize._
import org.I0Itec.zkclient.exception._
import java.util.concurrent.atomic.{ AtomicBoolean, AtomicReference }
object LocalCluster {
val clusterDirectory = config.getString("akka.cluster.log-directory", "_akka_cluster")
val clusterDataDirectory = clusterDirectory + "/data"
val clusterLogDirectory = clusterDirectory + "/log"
val clusterName = Config.clusterName
val nodename = Config.nodename
val zooKeeperServers = config.getString("akka.cluster.zookeeper-server-addresses", "localhost:2181")
val sessionTimeout = Duration(config.getInt("akka.cluster.session-timeout", 60), TIME_UNIT).toMillis.toInt
val connectionTimeout = Duration(config.getInt("akka.cluster.connection-timeout", 60), TIME_UNIT).toMillis.toInt
val defaultZooKeeperSerializer = new SerializableSerializer
val zkServer = new AtomicReference[Option[ZkServer]](None)
lazy val zkClient = new AkkaZkClient(zooKeeperServers, sessionTimeout, connectionTimeout, defaultZooKeeperSerializer)
/**
* Looks up the local hostname.
*/
def lookupLocalhostName = NetworkUtil.getLocalhostName
/**
* Starts up a local ZooKeeper server. Should only be used for testing purposes.
*/
def startLocalCluster(): ZkServer =
startLocalCluster(clusterDataDirectory, clusterLogDirectory, 2181, 5000)
/**
* Starts up a local ZooKeeper server. Should only be used for testing purposes.
*/
def startLocalCluster(port: Int, tickTime: Int): ZkServer =
startLocalCluster(clusterDataDirectory, clusterLogDirectory, port, tickTime)
/**
* Starts up a local ZooKeeper server. Should only be used for testing purposes.
*/
def startLocalCluster(tickTime: Int): ZkServer =
startLocalCluster(clusterDataDirectory, clusterLogDirectory, 2181, tickTime)
/**
* Starts up a local ZooKeeper server. Should only be used for testing purposes.
*/
def startLocalCluster(dataPath: String, logPath: String): ZkServer =
startLocalCluster(dataPath, logPath, 2181, 500)
/**
* Starts up a local ZooKeeper server. Should only be used for testing purposes.
*/
def startLocalCluster(dataPath: String, logPath: String, port: Int, tickTime: Int): ZkServer = {
try {
val zk = AkkaZooKeeper.startLocalServer(dataPath, logPath, port, tickTime)
zkServer.set(Some(zk))
zk
} catch {
case e: Throwable
EventHandler.error(e, this, "Could not start local ZooKeeper cluster")
throw e
}
}
/**
* Shut down the local ZooKeeper server.
*/
def shutdownLocalCluster() {
withPrintStackTraceOnError {
EventHandler.debug(this, "Shuts down local cluster")
zkServer.getAndSet(None).foreach(_.shutdown())
}
}
def createQueue(rootPath: String, blocking: Boolean = true) =
new ZooKeeperQueue(zkClient, rootPath, blocking)
def barrier(name: String, count: Int): ZooKeeperBarrier =
ZooKeeperBarrier(zkClient, clusterName, name, nodename, count)
def barrier(name: String, count: Int, timeout: Duration): ZooKeeperBarrier =
ZooKeeperBarrier(zkClient, clusterName, name, nodename, count, timeout)
}

View file

@ -2,9 +2,10 @@
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.remote
package akka.cluster
import akka.actor._
import akka.remote._
import akka.routing._
import akka.event.Logging
@ -19,6 +20,7 @@ import java.util.concurrent.atomic.AtomicReference
class RemoteConnectionManager(
system: ActorSystemImpl,
remote: RemoteActorRefProvider,
failureDetector: AccrualFailureDetector,
initialConnections: Map[Address, ActorRef] = Map.empty[Address, ActorRef])
extends ConnectionManager {
@ -30,8 +32,6 @@ class RemoteConnectionManager(
def iterable: Iterable[ActorRef] = connections.values
}
def failureDetector = remote.failureDetector
private val state: AtomicReference[State] = new AtomicReference[State](newState())
/**
@ -145,6 +145,6 @@ class RemoteConnectionManager(
}
}
private[remote] def newConnection(remoteAddress: Address, actorPath: ActorPath) =
private[cluster] def newConnection(remoteAddress: Address, actorPath: ActorPath) =
new RemoteActorRef(remote, remote.transport, actorPath, Nobody)
}

View file

@ -1,604 +0,0 @@
package akka.cluster
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
import org.apache.bookkeeper.client.{ BookKeeper, LedgerHandle, LedgerEntry, BKException, AsyncCallback }
import org.apache.zookeeper.CreateMode
import org.I0Itec.zkclient.exception._
import akka.AkkaException
import akka.config._
import Config._
import akka.util._
import akka.actor._
import DeploymentConfig.ReplicationScheme
import akka.event.EventHandler
import akka.dispatch.{ DefaultPromise, Promise, MessageInvocation }
import akka.cluster.zookeeper._
import akka.serialization.ActorSerialization._
import akka.serialization.Compression.LZF
import java.util.Enumeration
// FIXME allow user to choose dynamically between 'async' and 'sync' tx logging (asyncAddEntry(byte[] data, AddCallback cb, Object ctx))
// FIXME clean up old entries in log after doing a snapshot
class ReplicationException(message: String, cause: Throwable = null) extends AkkaException(message) {
def this(msg: String) = this(msg, null)
}
/**
* A TransactionLog makes chunks of data durable.
*/
class TransactionLog private (
ledger: LedgerHandle,
val id: String,
val isAsync: Boolean,
replicationScheme: ReplicationScheme) {
import TransactionLog._
val logId = ledger.getId
val txLogPath = transactionLogPath(id)
val snapshotPath = txLogPath + "/snapshot"
private val isOpen = new Switch(true)
/**
* Record an Actor message invocation.
*
* @param invocation the MessageInvocation to record
* @param actorRef the LocalActorRef that received the message.
* @throws ReplicationException if the TransactionLog already is closed.
*/
def recordEntry(invocation: MessageInvocation, actorRef: LocalActorRef) {
val entryId = ledger.getLastAddPushed + 1
val needsSnapshot = entryId != 0 && (entryId % snapshotFrequency) == 0
if (needsSnapshot) {
//todo: could it be that the message is never persisted when a snapshot is added?
val bytes = toBinary(actorRef, false, replicationScheme)
recordSnapshot(bytes)
} else {
val bytes = MessageSerializer.serialize(invocation.message.asInstanceOf[AnyRef]).toByteArray
recordEntry(bytes)
}
}
/**
* Record an entry.
*
* @param entry the entry in byte form to record.
* @throws ReplicationException if the TransactionLog already is closed.
*/
def recordEntry(entry: Array[Byte]) {
if (isOpen.isOn) {
val entryBytes =
if (shouldCompressData) LZF.compress(entry)
else entry
try {
if (isAsync) {
ledger.asyncAddEntry(
entryBytes,
new AsyncCallback.AddCallback {
def addComplete(returnCode: Int, ledgerHandle: LedgerHandle, entryId: Long, ctx: AnyRef) {
handleReturnCode(returnCode)
EventHandler.debug(this, "Writing entry [%s] to log [%s]".format(entryId, logId))
}
},
null)
} else {
handleReturnCode(ledger.addEntry(entryBytes))
val entryId = ledger.getLastAddPushed
EventHandler.debug(this, "Writing entry [%s] to log [%s]".format(entryId, logId))
}
} catch {
case e: Throwable handleError(e)
}
} else transactionClosedError
}
/**
* Record a snapshot.
*
* @param snapshot the snapshot in byteform to record.
* @throws ReplicationException if the TransactionLog already is closed.
*/
def recordSnapshot(snapshot: Array[Byte]) {
if (isOpen.isOn) {
val snapshotBytes =
if (shouldCompressData) LZF.compress(snapshot)
else snapshot
try {
if (isAsync) {
ledger.asyncAddEntry(
snapshotBytes,
new AsyncCallback.AddCallback {
def addComplete(returnCode: Int, ledgerHandle: LedgerHandle, snapshotId: Long, ctx: AnyRef) {
handleReturnCode(returnCode)
EventHandler.debug(this, "Writing snapshot to log [%s]".format(snapshotId))
storeSnapshotMetaDataInZooKeeper(snapshotId)
}
},
null)
} else {
//todo: could this be racy, since writing the snapshot itself and storing the snapsnot id, is not
//an atomic operation?
//first store the snapshot.
handleReturnCode(ledger.addEntry(snapshotBytes))
val snapshotId = ledger.getLastAddPushed
//this is the location where all previous entries can be removed.
//TODO: how to remove data?
EventHandler.debug(this, "Writing snapshot to log [%s]".format(snapshotId))
//and now store the snapshot metadata.
storeSnapshotMetaDataInZooKeeper(snapshotId)
}
} catch {
case e: Throwable handleError(e)
}
} else transactionClosedError
}
/**
* Get all the entries for this transaction log.
*
* @throws ReplicationException if the TransactionLog already is closed.
*/
def entries: Vector[Array[Byte]] = entriesInRange(0, ledger.getLastAddConfirmed)
/**
* Get the latest snapshot and all subsequent entries from this snapshot.
*/
def latestSnapshotAndSubsequentEntries: (Option[Array[Byte]], Vector[Array[Byte]]) = {
latestSnapshotId match {
case Some(snapshotId)
EventHandler.debug(this, "Reading entries from snapshot id [%s] for log [%s]".format(snapshotId, logId))
val cursor = snapshotId + 1
val lastIndex = ledger.getLastAddConfirmed
val snapshot = Some(entriesInRange(snapshotId, snapshotId).head)
val entries =
if ((cursor - lastIndex) == 0) Vector.empty[Array[Byte]]
else entriesInRange(cursor, lastIndex)
(snapshot, entries)
case None
(None, entries)
}
}
/**
* Get a range of entries from 'from' to 'to' for this transaction log.
*
* @param from the first element of the range
* @param the last index from the range (including).
* @return a Vector containing Byte Arrays. Each element in the vector is a record.
* @throws IllegalArgumenException if from or to is negative, or if 'from' is bigger than 'to'.
* @throws ReplicationException if the TransactionLog already is closed.
*/
def entriesInRange(from: Long, to: Long): Vector[Array[Byte]] = if (isOpen.isOn) {
try {
if (from < 0) throw new IllegalArgumentException("'from' index can't be negative [" + from + "]")
if (to < 0) throw new IllegalArgumentException("'to' index can't be negative [" + from + "]")
if (to < from) throw new IllegalArgumentException("'to' index can't be smaller than 'from' index [" + from + "," + to + "]")
EventHandler.debug(this, "Reading entries [%s -> %s] for log [%s]".format(from, to, logId))
if (isAsync) {
val future = Promise[Vector[Array[Byte]]]()
ledger.asyncReadEntries(
from, to,
new AsyncCallback.ReadCallback {
def readComplete(returnCode: Int, ledgerHandle: LedgerHandle, enumeration: Enumeration[LedgerEntry], ctx: AnyRef) {
val future = ctx.asInstanceOf[Promise[Vector[Array[Byte]]]]
val entries = toByteArrays(enumeration)
if (returnCode == BKException.Code.OK) future.success(entries)
else future.failure(BKException.create(returnCode))
}
},
future)
await(future)
} else {
toByteArrays(ledger.readEntries(from, to))
}
} catch {
case e: Throwable handleError(e)
}
} else transactionClosedError
/**
* Get the last entry written to this transaction log.
*
* Returns -1 if there has never been an entry.
*/
def latestEntryId: Long = ledger.getLastAddConfirmed
/**
* Get the id for the last snapshot written to this transaction log.
*/
def latestSnapshotId: Option[Long] = {
try {
val snapshotId = zkClient.readData(snapshotPath).asInstanceOf[Long]
EventHandler.debug(this, "Retrieved latest snapshot id [%s] from transaction log [%s]".format(snapshotId, logId))
Some(snapshotId)
} catch {
case e: ZkNoNodeException None
case e: Throwable handleError(e)
}
}
/**
* Delete this transaction log. So all entries but also all metadata will be removed.
*
* TODO: Behavior unclear what happens when already deleted (what happens to the ledger).
* TODO: Behavior unclear what happens when already closed.
*/
def delete() {
if (isOpen.isOn) {
EventHandler.debug(this, "Deleting transaction log [%s]".format(logId))
try {
if (isAsync) {
bookieClient.asyncDeleteLedger(
logId,
new AsyncCallback.DeleteCallback {
def deleteComplete(returnCode: Int, ctx: AnyRef) {
(returnCode)
}
},
null)
} else {
bookieClient.deleteLedger(logId)
}
//also remote everything else that belongs to this TransactionLog.
zkClient.delete(snapshotPath)
zkClient.delete(txLogPath)
} catch {
case e: Throwable handleError(e)
}
}
}
/**
* Close this transaction log.
*
* If already closed, the call is ignored.
*/
def close() {
isOpen switchOff {
EventHandler.debug(this, "Closing transaction log [%s]".format(logId))
try {
if (isAsync) {
ledger.asyncClose(
new AsyncCallback.CloseCallback {
def closeComplete(
returnCode: Int,
ledgerHandle: LedgerHandle,
ctx: AnyRef) {
handleReturnCode(returnCode)
}
},
null)
} else {
ledger.close()
}
} catch {
case e: Throwable handleError(e)
}
}
}
private def toByteArrays(enumeration: Enumeration[LedgerEntry]): Vector[Array[Byte]] = {
var entries = Vector[Array[Byte]]()
while (enumeration.hasMoreElements) {
val bytes = enumeration.nextElement.getEntry
val entry =
if (shouldCompressData) LZF.uncompress(bytes)
else bytes
entries = entries :+ entry
}
entries
}
private def storeSnapshotMetaDataInZooKeeper(snapshotId: Long) {
if (isOpen.isOn) {
try {
zkClient.create(snapshotPath, null, CreateMode.PERSISTENT)
} catch {
case e: ZkNodeExistsException {} // do nothing
case e: Throwable handleError(e)
}
try {
zkClient.writeData(snapshotPath, snapshotId)
} catch {
case e: Throwable
handleError(new ReplicationException(
"Could not store transaction log snapshot meta-data in ZooKeeper for UUID [" + id + "]"))
}
EventHandler.debug(this, "Writing snapshot [%s] to log [%s]".format(snapshotId, logId))
} else transactionClosedError
}
private def handleReturnCode(block: Long) {
val code = block.toInt
if (code == BKException.Code.OK) {} // all fine
else handleError(BKException.create(code))
}
private def transactionClosedError: Nothing = {
handleError(new ReplicationException(
"Transaction log [" + logId +
"] is closed. You need to open up new a new one with 'TransactionLog.logFor(id)'"))
}
}
/**
* TODO: Documentation.
*/
object TransactionLog {
val zooKeeperServers = config.getString("akka.cluster.zookeeper-server-addresses", "localhost:2181")
val sessionTimeout = Duration(config.getInt("akka.cluster.session-timeout", 60), TIME_UNIT).toMillis.toInt
val connectionTimeout = Duration(config.getInt("akka.cluster.connection-timeout", 60), TIME_UNIT).toMillis.toInt
val digestType = config.getString("akka.cluster.replication.digest-type", "CRC32") match {
case "CRC32" BookKeeper.DigestType.CRC32
case "MAC" BookKeeper.DigestType.MAC
case unknown throw new ConfigurationException(
"akka.cluster.replication.digest-type is invalid [" + unknown + "], must be either 'CRC32' or 'MAC'")
}
val password = config.getString("akka.cluster.replication.password", "secret").getBytes("UTF-8")
val ensembleSize = config.getInt("akka.cluster.replication.ensemble-size", 3)
val quorumSize = config.getInt("akka.cluster.replication.quorum-size", 2)
val snapshotFrequency = config.getInt("akka.cluster.replication.snapshot-frequency", 1000)
val timeout = Duration(config.getInt("akka.cluster.replication.timeout", 30), TIME_UNIT).toMillis
val shouldCompressData = config.getBool("akka.remote.use-compression", false)
private[akka] val transactionLogNode = "/transaction-log-ids"
private val isConnected = new Switch(false)
@volatile
private[akka] var bookieClient: BookKeeper = _
@volatile
private[akka] var zkClient: AkkaZkClient = _
private[akka] def apply(
ledger: LedgerHandle,
id: String,
isAsync: Boolean,
replicationScheme: ReplicationScheme) =
new TransactionLog(ledger, id, isAsync, replicationScheme)
/**
* Starts up the transaction log.
*/
def start() {
isConnected switchOn {
bookieClient = new BookKeeper(zooKeeperServers)
zkClient = new AkkaZkClient(zooKeeperServers, sessionTimeout, connectionTimeout)
try {
zkClient.create(transactionLogNode, null, CreateMode.PERSISTENT)
} catch {
case e: ZkNodeExistsException {} // do nothing
case e: Throwable handleError(e)
}
EventHandler.info(this,
("Transaction log service started with" +
"\n\tdigest type [%s]" +
"\n\tensemble size [%s]" +
"\n\tquorum size [%s]" +
"\n\tlogging time out [%s]").format(
digestType,
ensembleSize,
quorumSize,
timeout))
}
}
/**
* Shuts down the transaction log.
*/
def shutdown() {
isConnected switchOff {
try {
EventHandler.info(this, "Shutting down transaction log...")
zkClient.close()
bookieClient.halt()
EventHandler.info(this, "Transaction log shut down successfully")
} catch {
case e: Throwable handleError(e)
}
}
}
def transactionLogPath(id: String): String = transactionLogNode + "/" + id
/**
* Checks if a TransactionLog for the given id already exists.
*/
def exists(id: String): Boolean = {
val txLogPath = transactionLogPath(id)
zkClient.exists(txLogPath)
}
/**
* Creates a new transaction log for the 'id' specified. If a TransactionLog already exists for the id,
* it will be overwritten.
*/
def newLogFor(id: String, isAsync: Boolean, replicationScheme: ReplicationScheme): TransactionLog = {
val txLogPath = transactionLogPath(id)
val ledger = try {
if (exists(id)) {
//if it exists, we need to delete it first. This gives it the overwrite semantics we are looking for.
try {
val ledger = bookieClient.createLedger(ensembleSize, quorumSize, digestType, password)
val txLog = TransactionLog(ledger, id, false, null)
txLog.delete()
txLog.close()
} catch {
case e: Throwable handleError(e)
}
}
val future = Promise[LedgerHandle]()
if (isAsync) {
bookieClient.asyncCreateLedger(
ensembleSize, quorumSize, digestType, password,
new AsyncCallback.CreateCallback {
def createComplete(
returnCode: Int,
ledgerHandle: LedgerHandle,
ctx: AnyRef) {
val future = ctx.asInstanceOf[Promise[LedgerHandle]]
if (returnCode == BKException.Code.OK) future.success(ledgerHandle)
else future.failure(BKException.create(returnCode))
}
},
future)
await(future)
} else {
bookieClient.createLedger(ensembleSize, quorumSize, digestType, password)
}
} catch {
case e: Throwable handleError(e)
}
val logId = ledger.getId
try {
zkClient.create(txLogPath, null, CreateMode.PERSISTENT)
zkClient.writeData(txLogPath, logId)
logId //TODO: does this have any effect?
} catch {
case e: Throwable
bookieClient.deleteLedger(logId) // clean up
handleError(new ReplicationException(
"Could not store transaction log [" + logId +
"] meta-data in ZooKeeper for UUID [" + id + "]", e))
}
EventHandler.info(this, "Created new transaction log [%s] for UUID [%s]".format(logId, id))
TransactionLog(ledger, id, isAsync, replicationScheme)
}
/**
* Fetches an existing transaction log for the 'id' specified.
*
* @throws ReplicationException if the log with the given id doesn't exist.
*/
def logFor(id: String, isAsync: Boolean, replicationScheme: ReplicationScheme): TransactionLog = {
val txLogPath = transactionLogPath(id)
val logId = try {
val logId = zkClient.readData(txLogPath).asInstanceOf[Long]
EventHandler.debug(this,
"Retrieved transaction log [%s] for UUID [%s]".format(logId, id))
logId
} catch {
case e: ZkNoNodeException
handleError(new ReplicationException(
"Transaction log for UUID [" + id + "] does not exist in ZooKeeper"))
case e: Throwable handleError(e)
}
val ledger = try {
if (isAsync) {
val future = Promise[LedgerHandle]()
bookieClient.asyncOpenLedger(
logId, digestType, password,
new AsyncCallback.OpenCallback {
def openComplete(returnCode: Int, ledgerHandle: LedgerHandle, ctx: AnyRef) {
val future = ctx.asInstanceOf[Promise[LedgerHandle]]
if (returnCode == BKException.Code.OK) future.success(ledgerHandle)
else future.failure(BKException.create(returnCode))
}
},
future)
await(future)
} else {
bookieClient.openLedger(logId, digestType, password)
}
} catch {
case e: Throwable handleError(e)
}
TransactionLog(ledger, id, isAsync, replicationScheme)
}
private[akka] def await[T](future: Promise[T]): T = {
future.await.value.get match {
case Right(result) => result
case Left(throwable) => handleError(throwable)
}
}
private[akka] def handleError(e: Throwable): Nothing = {
EventHandler.error(e, this, e.toString)
throw e
}
}
/**
* TODO: Documentation.
*/
object LocalBookKeeperEnsemble {
private val isRunning = new Switch(false)
//TODO: should probably come from the config file.
private val port = 5555
@volatile
private var localBookKeeper: LocalBookKeeper = _
/**
* Starts the LocalBookKeeperEnsemble.
*
* Call can safely be made when already started.
*
* This call will block until it is started.
*/
def start() {
isRunning switchOn {
EventHandler.info(this, "Starting up LocalBookKeeperEnsemble...")
localBookKeeper = new LocalBookKeeper(TransactionLog.ensembleSize)
localBookKeeper.runZookeeper(port)
localBookKeeper.initializeZookeper()
localBookKeeper.runBookies()
EventHandler.info(this, "LocalBookKeeperEnsemble started up successfully")
}
}
/**
* Shuts down the LocalBookKeeperEnsemble.
*
* Call can safely bemade when already shutdown.
*
* This call will block until the shutdown completes.
*/
def shutdown() {
isRunning switchOff {
EventHandler.info(this, "Shutting down LocalBookKeeperEnsemble...")
localBookKeeper.bs.foreach(_.shutdown()) // stop bookies
localBookKeeper.zkc.close() // stop zk client
localBookKeeper.zks.shutdown() // stop zk server
localBookKeeper.serverFactory.shutdown() // stop zk NIOServer
EventHandler.info(this, "LocalBookKeeperEnsemble shut down successfully")
}
}
}

View file

@ -2,18 +2,39 @@
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.remote
package akka.cluster
import akka.AkkaException
class VectorClockException(message: String) extends AkkaException(message)
/**
* Trait to be extended by classes that wants to be versioned using a VectorClock.
*/
trait Versioned {
def version: VectorClock
}
/**
* Utility methods for comparing Versioned instances.
*/
object Versioned {
def latestVersionOf[T <: Versioned](versioned1: T, versioned2: T): T = {
(versioned1.version compare versioned2.version) match {
case VectorClock.Before versioned2 // version 1 is BEFORE (older), use version 2
case VectorClock.After versioned1 // version 1 is AFTER (newer), use version 1
case VectorClock.Concurrent versioned1 // can't establish a causal relationship between versions => conflict - keeping version 1
}
}
}
/**
* Representation of a Vector-based clock (counting clock), inspired by Lamport logical clocks.
*
* {{
* Reference:
* Leslie Lamport (1978). "Time, clocks, and the ordering of events in a distributed system". Communications of the ACM 21 (7): 558-565.
* Friedemann Mattern (1988). "Virtual Time and Global States of Distributed Systems". Workshop on Parallel and Distributed Algorithms: pp. 215-226
* 1) Leslie Lamport (1978). "Time, clocks, and the ordering of events in a distributed system". Communications of the ACM 21 (7): 558-565.
* 2) Friedemann Mattern (1988). "Virtual Time and Global States of Distributed Systems". Workshop on Parallel and Distributed Algorithms: pp. 215-226
* }}
*/
case class VectorClock(
versions: Vector[VectorClock.Entry] = Vector.empty[VectorClock.Entry],
@ -55,9 +76,11 @@ object VectorClock {
/**
* The result of comparing two vector clocks.
* Either:
* {{
* 1) v1 is BEFORE v2
* 2) v1 is AFTER t2
* 3) v1 happens CONCURRENTLY to v2
* }}
*/
sealed trait Ordering
case object Before extends Ordering
@ -74,9 +97,11 @@ object VectorClock {
/**
* Compare two vector clocks. The outcomes will be one of the following:
* <p/>
* {{
* 1. Clock 1 is BEFORE clock 2 if there exists an i such that c1(i) <= c(2) and there does not exist a j such that c1(j) > c2(j).
* 2. Clock 1 is CONCURRENT to clock 2 if there exists an i, j such that c1(i) < c2(i) and c1(j) > c2(j).
* 3. Clock 1 is AFTER clock 2 otherwise.
* }}
*
* @param v1 The first VectorClock
* @param v2 The second VectorClock

View file

@ -1,226 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster.metrics
import akka.cluster._
import Cluster._
import akka.cluster.zookeeper._
import akka.actor._
import Actor._
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import java.util.concurrent.{ ConcurrentHashMap, ConcurrentSkipListSet }
import java.util.concurrent.atomic.AtomicReference
import akka.util.{ Duration, Switch }
import akka.util.Helpers._
import akka.util.duration._
import org.I0Itec.zkclient.exception.ZkNoNodeException
import akka.event.EventHandler
/*
* Instance of the metrics manager running on the node. To keep the fine performance, metrics of all the
* nodes in the cluster are cached internally, and refreshed from monitoring MBeans / Sigar (when if's local node),
* of ZooKeeper (if it's metrics of all the nodes in the cluster) after a specified timeout -
* <code>metricsRefreshTimeout</code>
* <code>metricsRefreshTimeout</code> defaults to 2 seconds, and can be declaratively defined through
* akka.conf:
*
* @exampl {{{
* akka.cluster.metrics-refresh-timeout = 2
* }}}
*/
class LocalNodeMetricsManager(zkClient: AkkaZkClient, private val metricsRefreshTimeout: Duration)
extends NodeMetricsManager {
/*
* Provides metrics of the system that the node is running on, through monitoring MBeans, Hyperic Sigar
* and other systems
*/
lazy private val metricsProvider = SigarMetricsProvider(refreshTimeout.toMillis.toInt) fold ((thrw) {
EventHandler.warning(this, """Hyperic Sigar library failed to load due to %s: %s.
All the metrics will be retreived from monitoring MBeans, and may be incorrect at some platforms.
In order to get better metrics, please put "sigar.jar" to the classpath, and add platform-specific native libary to "java.library.path"."""
.format(thrw.getClass.getName, thrw.getMessage))
new JMXMetricsProvider
},
sigar sigar)
/*
* Metrics of all nodes in the cluster
*/
private val localNodeMetricsCache = new ConcurrentHashMap[String, NodeMetrics]
@volatile
private var _refreshTimeout = metricsRefreshTimeout
/*
* Plugged monitors (both local and cluster-wide)
*/
private val alterationMonitors = new ConcurrentSkipListSet[MetricsAlterationMonitor]
private val _isRunning = new Switch(false)
/*
* If the value is <code>true</code>, metrics manages is started and running. Stopped, otherwise
*/
def isRunning = _isRunning.isOn
/*
* Starts metrics manager. When metrics manager is started, it refreshes cache from ZooKeeper
* after <code>refreshTimeout</code>, and invokes plugged monitors
*/
def start() = {
_isRunning.switchOn { refresh() }
this
}
private[cluster] def metricsForNode(nodeName: String): String = "%s/%s".format(node.NODE_METRICS, nodeName)
/*
* Adds monitor that reacts, when specific conditions are satisfied
*/
def addMonitor(monitor: MetricsAlterationMonitor) = alterationMonitors add monitor
def removeMonitor(monitor: MetricsAlterationMonitor) = alterationMonitors remove monitor
def refreshTimeout_=(newValue: Duration) = _refreshTimeout = newValue
/*
* Timeout after which metrics, cached in the metrics manager, will be refreshed from ZooKeeper
*/
def refreshTimeout = _refreshTimeout
/*
* Stores metrics of the node in ZooKeeper
*/
private[akka] def storeMetricsInZK(metrics: NodeMetrics) = {
val metricsPath = metricsForNode(metrics.nodeName)
if (zkClient.exists(metricsPath)) {
zkClient.writeData(metricsPath, metrics)
} else {
ignore[ZkNoNodeException](zkClient.createEphemeral(metricsPath, metrics))
}
}
/*
* Gets metrics of the node from ZooKeeper
*/
private[akka] def getMetricsFromZK(nodeName: String) = {
zkClient.readData[NodeMetrics](metricsForNode(nodeName))
}
/*
* Removed metrics of the node from local cache and ZooKeeper
*/
def removeNodeMetrics(nodeName: String) = {
val metricsPath = metricsForNode(nodeName)
if (zkClient.exists(metricsPath)) {
ignore[ZkNoNodeException](zkClient.delete(metricsPath))
}
localNodeMetricsCache.remove(nodeName)
}
/*
* Gets metrics of a local node directly from JMX monitoring beans/Hyperic Sigar
*/
def getLocalMetrics = metricsProvider.getLocalMetrics
/*
* Gets metrics of the node, specified by the name. If <code>useCached</code> is true (default value),
* metrics snapshot is taken from the local cache; otherwise, it's retreived from ZooKeeper'
*/
def getMetrics(nodeName: String, useCached: Boolean = true): Option[NodeMetrics] =
if (useCached)
Option(localNodeMetricsCache.get(nodeName))
else
try {
Some(getMetricsFromZK(nodeName))
} catch {
case ex: ZkNoNodeException None
}
/*
* Return metrics of all nodes in the cluster from ZooKeeper
*/
private[akka] def getAllMetricsFromZK: Map[String, NodeMetrics] = {
val metricsPaths = zkClient.getChildren(node.NODE_METRICS).toList.toArray.asInstanceOf[Array[String]]
metricsPaths.flatMap { nodeName getMetrics(nodeName, false).map((nodeName, _)) } toMap
}
/*
* Gets cached metrics of all nodes in the cluster
*/
def getAllMetrics: Array[NodeMetrics] = localNodeMetricsCache.values.asScala.toArray
/*
* Refreshes locally cached metrics from ZooKeeper, and invokes plugged monitors
*/
private[akka] def refresh() {
storeMetricsInZK(getLocalMetrics)
refreshMetricsCacheFromZK()
if (isRunning) {
Scheduler.schedule({ () refresh() }, refreshTimeout.length, refreshTimeout.length, refreshTimeout.unit)
invokeMonitors()
}
}
/*
* Refreshes metrics manager cache from ZooKeeper
*/
private def refreshMetricsCacheFromZK() {
val allMetricsFromZK = getAllMetricsFromZK
localNodeMetricsCache.keySet.foreach { key
if (!allMetricsFromZK.contains(key))
localNodeMetricsCache.remove(key)
}
// RACY: metrics for the node might have been removed both from ZK and local cache by the moment,
// but will be re-cached, since they're still present in allMetricsFromZK snapshot. Not important, because
// cache will be fixed soon, at the next iteration of refresh
allMetricsFromZK map {
case (node, metrics)
localNodeMetricsCache.put(node, metrics)
}
}
/*
* Invokes monitors with the cached metrics
*/
private def invokeMonitors(): Unit = if (!alterationMonitors.isEmpty) {
// RACY: metrics for some nodes might have been removed/added by that moment. Not important,
// because monitors will be fed with up-to-date metrics shortly, at the next iteration of refresh
val clusterNodesMetrics = getAllMetrics
val localNodeMetrics = clusterNodesMetrics.find(_.nodeName == nodeAddress.nodeName)
val iterator = alterationMonitors.iterator
// RACY: there might be new monitors added after the iterator has been obtained. Not important,
// becuse refresh interval is meant to be very short, and all the new monitors will be called ad the
// next refresh iteration
while (iterator.hasNext) {
val monitor = iterator.next
monitor match {
case localMonitor: LocalMetricsAlterationMonitor
localNodeMetrics.map { metrics
if (localMonitor reactsOn metrics)
localMonitor react metrics
}
case clusterMonitor: ClusterMetricsAlterationMonitor
if (clusterMonitor reactsOn clusterNodesMetrics)
clusterMonitor react clusterNodesMetrics
}
}
}
def stop() = _isRunning.switchOff
}

View file

@ -1,154 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster.metrics
import akka.cluster._
import akka.event.EventHandler
import java.lang.management.ManagementFactory
import akka.util.ReflectiveAccess._
import akka.util.Switch
/*
* Snapshot of the JVM / system that's the node is running on
*
* @param nodeName name of the node, where metrics are gathered at
* @param usedHeapMemory amount of heap memory currently used
* @param committedHeapMemory amount of heap memory guaranteed to be available
* @param maxHeapMemory maximum amount of heap memory that can be used
* @param avaiableProcessors number of the processors avalable to the JVM
* @param systemLoadAverage system load average. If OS-specific Sigar's native library is plugged,
* it's used to calculate average load on the CPUs in the system. Otherwise, value is retreived from monitoring
* MBeans. Hyperic Sigar provides more precise values, and, thus, if the library is provided, it's used by default.
*
*/
case class DefaultNodeMetrics(nodeName: String,
usedHeapMemory: Long,
committedHeapMemory: Long,
maxHeapMemory: Long,
avaiableProcessors: Int,
systemLoadAverage: Double) extends NodeMetrics
object MetricsProvider {
/*
* Maximum value of system load average
*/
val MAX_SYS_LOAD_AVG = 1
/*
* Minimum value of system load average
*/
val MIN_SYS_LOAD_AVG = 0
/*
* Default value of system load average
*/
val DEF_SYS_LOAD_AVG = 0.5
}
/*
* Abstracts metrics provider that returns metrics of the system the node is running at
*/
trait MetricsProvider {
/*
* Gets metrics of the local system
*/
def getLocalMetrics: NodeMetrics
}
/*
* Loads JVM metrics through JMX monitoring beans
*/
class JMXMetricsProvider extends MetricsProvider {
import MetricsProvider._
private val memoryMXBean = ManagementFactory.getMemoryMXBean
private val osMXBean = ManagementFactory.getOperatingSystemMXBean
/*
* Validates and calculates system load average
*
* @param avg system load average obtained from a specific monitoring provider (may be incorrect)
* @return system load average, or default value(<code>0.5</code>), if passed value was out of permitted
* bounds (0.0 to 1.0)
*/
@inline
protected final def calcSystemLoadAverage(avg: Double) =
if (avg >= MIN_SYS_LOAD_AVG && avg <= MAX_SYS_LOAD_AVG) avg else DEF_SYS_LOAD_AVG
protected def systemLoadAverage = calcSystemLoadAverage(osMXBean.getSystemLoadAverage)
def getLocalMetrics =
DefaultNodeMetrics(Cluster.nodeAddress.nodeName,
memoryMXBean.getHeapMemoryUsage.getUsed,
memoryMXBean.getHeapMemoryUsage.getCommitted,
memoryMXBean.getHeapMemoryUsage.getMax,
osMXBean.getAvailableProcessors,
systemLoadAverage)
}
/*
* Loads wider range of metrics of a better quality with Hyperic Sigar (native library)
*
* @param refreshTimeout Sigar gathers metrics during this interval
*/
class SigarMetricsProvider private (private val sigarInstance: AnyRef) extends JMXMetricsProvider {
private val reportErrors = new Switch(true)
private val getCpuPercMethod = sigarInstance.getClass.getMethod("getCpuPerc")
private val sigarCpuCombinedMethod = getCpuPercMethod.getReturnType.getMethod("getCombined")
/*
* Wraps reflective calls to Hyperic Sigar
*
* @param f reflective call to Hyperic Sigar
* @param fallback function, which is invoked, if call to Sigar has been finished with exception
*/
private def callSigarMethodOrElse[T](callSigar: T, fallback: T): T =
try callSigar catch {
case thrw
reportErrors.switchOff {
EventHandler.warning(this, "Failed to get metrics from Hyperic Sigar. %s: %s"
.format(thrw.getClass.getName, thrw.getMessage))
}
fallback
}
/*
* Obtains system load average from Sigar
* If the value cannot be obtained, falls back to system load average taken from JMX
*/
override def systemLoadAverage = callSigarMethodOrElse(
calcSystemLoadAverage(sigarCpuCombinedMethod
.invoke(getCpuPercMethod.invoke(sigarInstance)).asInstanceOf[Double]),
super.systemLoadAverage)
}
object SigarMetricsProvider {
/*
* Instantiates Sigar metrics provider through reflections, in order to avoid creating dependencies to
* Hiperic Sigar library
*/
def apply(refreshTimeout: Int): Either[Throwable, MetricsProvider] = try {
for {
sigarInstance createInstance[AnyRef]("org.hyperic.sigar.Sigar", noParams, noArgs).right
sigarProxyCacheClass: Class[_] getClassFor("org.hyperic.sigar.SigarProxyCache").right
} yield new SigarMetricsProvider(sigarProxyCacheClass
.getMethod("newInstance", Array(sigarInstance.getClass, classOf[Int]): _*)
.invoke(null, sigarInstance, new java.lang.Integer(refreshTimeout)))
} catch {
case thrw Left(thrw)
}
}

View file

@ -1,366 +0,0 @@
package akka.cluster.storage
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
import akka.cluster.zookeeper.AkkaZkClient
import akka.AkkaException
import org.apache.zookeeper.{ KeeperException, CreateMode }
import org.apache.zookeeper.data.Stat
import java.util.concurrent.ConcurrentHashMap
import annotation.tailrec
import java.lang.{ RuntimeException, UnsupportedOperationException }
/**
* Simple abstraction to store an Array of bytes based on some String key.
*
* Nothing is being said about ACID, transactions etc. It depends on the implementation
* of this Storage interface of what is and isn't done on the lowest level.
*
* The amount of data that is allowed to be insert/updated is implementation specific. The InMemoryStorage
* has no limits, but the ZooKeeperStorage has a maximum size of 1 mb.
*
* TODO: Class is up for better names.
* TODO: Instead of a String as key, perhaps also a byte-array.
*/
trait Storage {
/**
* Loads the VersionedData for the given key.
*
* This call doesn't care about the actual version of the data.
*
* @param key: the key of the VersionedData to load.
* @return the VersionedData for the given entry.
* @throws MissingDataException if the entry with the given key doesn't exist.
* @throws StorageException if anything goes wrong while accessing the storage
*/
def load(key: String): VersionedData
/**
* Loads the VersionedData for the given key and expectedVersion.
*
* This call can be used for optimistic locking since the version is included.
*
* @param key: the key of the VersionedData to load
* @param expectedVersion the version the data to load should have.
* @throws MissingDataException if the data with the given key doesn't exist.
* @throws BadVersionException if the version is not the expected version.
* @throws StorageException if anything goes wrong while accessing the storage
*/
def load(key: String, expectedVersion: Long): VersionedData
/**
* Checks if a VersionedData with the given key exists.
*
* @param key the key to check the existence for.
* @return true if exists, false if not.
* @throws StorageException if anything goes wrong while accessing the storage
*/
def exists(key: String): Boolean
/**
* Inserts a byte-array based on some key.
*
* @param key the key of the Data to insert.
* @param bytes the data to insert.
* @return the version of the written data (can be used for optimistic locking).
* @throws DataExistsException when VersionedData with the given Key already exists.
* @throws StorageException if anything goes wrong while accessing the storage
*/
def insert(key: String, bytes: Array[Byte]): Long
/**
* Inserts the data if there is no data for that key, or overwrites it if it is there.
*
* This is the method you want to call if you just want to save something and don't
* care about any lost update issues.
*
* @param key the key of the data
* @param bytes the data to insert
* @return the version of the written data (can be used for optimistic locking).
* @throws StorageException if anything goes wrong while accessing the storage
*/
def insertOrOverwrite(key: String, bytes: Array[Byte]): Long
/**
* Overwrites the current data for the given key. This call doesn't care about the version of the existing data.
*
* @param key the key of the data to overwrite
* @param bytes the data to insert.
* @return the version of the written data (can be used for optimistic locking).
* @throws MissingDataException when the entry with the given key doesn't exist.
* @throws StorageException if anything goes wrong while accessing the storage
*/
def overwrite(key: String, bytes: Array[Byte]): Long
/**
* Updates an existing value using an optimistic lock. So it expect the current data to have the expectedVersion
* and only then, it will do the update.
*
* @param key the key of the data to update
* @param bytes the content to write for the given key
* @param expectedVersion the version of the content that is expected to be there.
* @return the version of the written data (can be used for optimistic locking).
* @throws MissingDataException if no data for the given key exists
* @throws BadVersionException if the version if the found data doesn't match the expected version. So essentially
* if another update was already done.
* @throws StorageException if anything goes wrong while accessing the storage
*/
def update(key: String, bytes: Array[Byte], expectedVersion: Long): Long
}
/**
* The VersionedData is a container of data (some bytes) and a version (a Long).
*/
class VersionedData(val data: Array[Byte], val version: Long) {}
/**
* An AkkaException thrown by the Storage module.
*/
class StorageException(msg: String = null, cause: java.lang.Throwable = null) extends AkkaException(msg, cause) {
def this(msg: String) = this(msg, null);
}
/**
* *
* A StorageException thrown when an operation is done on a non existing node.
*/
class MissingDataException(msg: String = null, cause: java.lang.Throwable = null) extends StorageException(msg, cause) {
def this(msg: String) = this(msg, null);
}
/**
* A StorageException thrown when an operation is done on an existing node, but no node was expected.
*/
class DataExistsException(msg: String = null, cause: java.lang.Throwable = null) extends StorageException(msg, cause) {
def this(msg: String) = this(msg, null);
}
/**
* A StorageException thrown when an operation causes an optimistic locking failure.
*/
class BadVersionException(msg: String = null, cause: java.lang.Throwable = null) extends StorageException(msg, cause) {
def this(msg: String) = this(msg, null);
}
/**
* A Storage implementation based on ZooKeeper.
*
* The store method is atomic:
* - so everything is written or nothing is written
* - is isolated, so threadsafe,
* but it will not participate in any transactions.
*
*/
class ZooKeeperStorage(zkClient: AkkaZkClient, root: String = "/peter/storage") extends Storage {
var path = ""
//makes sure that the complete root exists on zookeeper.
root.split("/").foreach(
item if (item.size > 0) {
path = path + "/" + item
if (!zkClient.exists(path)) {
//it could be that another thread is going to create this root node as well, so ignore it when it happens.
try {
zkClient.create(path, "".getBytes, CreateMode.PERSISTENT)
} catch {
case ignore: KeeperException.NodeExistsException
}
}
})
def toZkPath(key: String): String = {
root + "/" + key
}
def load(key: String) = try {
val stat = new Stat
val arrayOfBytes = zkClient.connection.readData(root + "/" + key, stat, false)
new VersionedData(arrayOfBytes, stat.getVersion)
} catch {
case e: KeeperException.NoNodeException throw new MissingDataException(
String.format("Failed to load key [%s]: no data was found", key), e)
case e: KeeperException throw new StorageException(
String.format("Failed to load key [%s]", key), e)
}
def load(key: String, expectedVersion: Long) = try {
val stat = new Stat
val arrayOfBytes = zkClient.connection.readData(root + "/" + key, stat, false)
if (stat.getVersion != expectedVersion) throw new BadVersionException(
"Failed to update key [" + key + "]: version mismatch, expected [" + expectedVersion + "]" +
" but found [" + stat.getVersion + "]")
new VersionedData(arrayOfBytes, stat.getVersion)
} catch {
case e: KeeperException.NoNodeException throw new MissingDataException(
String.format("Failed to load key [%s]: no data was found", key), e)
case e: KeeperException throw new StorageException(
String.format("Failed to load key [%s]", key), e)
}
def insertOrOverwrite(key: String, bytes: Array[Byte]) = {
try {
throw new UnsupportedOperationException()
} catch {
case e: KeeperException.NodeExistsException throw new DataExistsException(
String.format("Failed to insert key [%s]: an entry already exists with the same key", key), e)
case e: KeeperException throw new StorageException(
String.format("Failed to insert key [%s]", key), e)
}
}
def insert(key: String, bytes: Array[Byte]): Long = {
try {
zkClient.connection.create(root + "/" + key, bytes, CreateMode.PERSISTENT)
//todo: how to get hold of the version.
val version: Long = 0
version
} catch {
case e: KeeperException.NodeExistsException throw new DataExistsException(
String.format("Failed to insert key [%s]: an entry already exists with the same key", key), e)
case e: KeeperException throw new StorageException(
String.format("Failed to insert key [%s]", key), e)
}
}
def exists(key: String) = try {
zkClient.connection.exists(toZkPath(key), false)
} catch {
case e: KeeperException throw new StorageException(
String.format("Failed to check existance for key [%s]", key), e)
}
def update(key: String, bytes: Array[Byte], expectedVersion: Long): Long = {
try {
zkClient.connection.writeData(root + "/" + key, bytes, expectedVersion.asInstanceOf[Int])
throw new RuntimeException()
} catch {
case e: KeeperException.BadVersionException throw new BadVersionException(
String.format("Failed to update key [%s]: version mismatch", key), e)
case e: KeeperException throw new StorageException(
String.format("Failed to update key [%s]", key), e)
}
}
def overwrite(key: String, bytes: Array[Byte]): Long = {
try {
zkClient.connection.writeData(root + "/" + key, bytes)
-1L
} catch {
case e: KeeperException.NoNodeException throw new MissingDataException(
String.format("Failed to overwrite key [%s]: a previous entry already exists", key), e)
case e: KeeperException throw new StorageException(
String.format("Failed to overwrite key [%s]", key), e)
}
}
}
object InMemoryStorage {
val InitialVersion = 0;
}
/**
* An in memory {@link RawStore} implementation. Useful for testing purposes.
*/
final class InMemoryStorage extends Storage {
private val map = new ConcurrentHashMap[String, VersionedData]()
def load(key: String) = {
val result = map.get(key)
if (result == null) throw new MissingDataException(
String.format("Failed to load key [%s]: no data was found", key))
result
}
def load(key: String, expectedVersion: Long) = {
val result = load(key)
if (result.version != expectedVersion) throw new BadVersionException(
"Failed to load key [" + key + "]: version mismatch, expected [" + result.version + "] " +
"but found [" + expectedVersion + "]")
result
}
def exists(key: String) = map.containsKey(key)
def insert(key: String, bytes: Array[Byte]): Long = {
val version: Long = InMemoryStorage.InitialVersion
val result = new VersionedData(bytes, version)
val previous = map.putIfAbsent(key, result)
if (previous != null) throw new DataExistsException(
String.format("Failed to insert key [%s]: the key already has been inserted previously", key))
version
}
@tailrec
def update(key: String, bytes: Array[Byte], expectedVersion: Long): Long = {
val found = map.get(key)
if (found == null) throw new MissingDataException(
String.format("Failed to update key [%s], no previous entry exist", key))
if (expectedVersion != found.version) throw new BadVersionException(
"Failed to update key [" + key + "]: version mismatch, expected [" + expectedVersion + "]" +
" but found [" + found.version + "]")
val newVersion: Long = expectedVersion + 1
if (map.replace(key, found, new VersionedData(bytes, newVersion))) newVersion
else update(key, bytes, expectedVersion)
}
@tailrec
def overwrite(key: String, bytes: Array[Byte]): Long = {
val current = map.get(key)
if (current == null) throw new MissingDataException(
String.format("Failed to overwrite key [%s], no previous entry exist", key))
val update = new VersionedData(bytes, current.version + 1)
if (map.replace(key, current, update)) update.version
else overwrite(key, bytes)
}
def insertOrOverwrite(key: String, bytes: Array[Byte]): Long = {
val version = InMemoryStorage.InitialVersion
val result = new VersionedData(bytes, version)
val previous = map.putIfAbsent(key, result)
if (previous == null) result.version
else overwrite(key, bytes)
}
}
//TODO: To minimize the number of dependencies, should the Storage not be placed in a seperate module?
//class VoldemortRawStorage(storeClient: StoreClient) extends Storage {
//
// def load(Key: String) = {
// try {
//
// } catch {
// case
// }
// }
//
// override def insert(key: String, bytes: Array[Byte]) {
// throw new UnsupportedOperationException()
// }
//
// def update(key: String, bytes: Array[Byte]) {
// throw new UnsupportedOperationException()
// }
//}

View file

@ -1,34 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster.zookeeper
import org.I0Itec.zkclient._
import org.I0Itec.zkclient.serialize._
import org.I0Itec.zkclient.exception._
/**
* ZooKeeper client. Holds the ZooKeeper connection and manages its session.
*/
class AkkaZkClient(zkServers: String,
sessionTimeout: Int,
connectionTimeout: Int,
zkSerializer: ZkSerializer = new SerializableSerializer)
extends ZkClient(zkServers, sessionTimeout, connectionTimeout, zkSerializer) {
def connection: ZkConnection = _connection.asInstanceOf[ZkConnection]
def reconnect() {
val zkLock = getEventLock
zkLock.lock()
try {
_connection.close()
_connection.connect(this)
} catch {
case e: InterruptedException throw new ZkInterruptedException(e)
} finally {
zkLock.unlock()
}
}
}

View file

@ -1,32 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster.zookeeper
import org.I0Itec.zkclient._
import org.apache.commons.io.FileUtils
import java.io.File
object AkkaZooKeeper {
/**
* Starts up a local ZooKeeper server. Should only be used for testing purposes.
*/
def startLocalServer(dataPath: String, logPath: String): ZkServer =
startLocalServer(dataPath, logPath, 2181, 500)
/**
* Starts up a local ZooKeeper server. Should only be used for testing purposes.
*/
def startLocalServer(dataPath: String, logPath: String, port: Int, tickTime: Int): ZkServer = {
FileUtils.deleteDirectory(new File(dataPath))
FileUtils.deleteDirectory(new File(logPath))
val zkServer = new ZkServer(
dataPath, logPath,
new IDefaultNameSpace() {
def createDefaultNameSpace(zkClient: ZkClient) {}
},
port, tickTime)
zkServer.start()
zkServer
}
}

View file

@ -1,104 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster.zookeeper
import akka.util.Duration
import akka.util.duration._
import org.I0Itec.zkclient._
import org.I0Itec.zkclient.exception._
import java.util.{ List JList }
import java.util.concurrent.CountDownLatch
class BarrierTimeoutException(message: String) extends RuntimeException(message)
/**
* Barrier based on Zookeeper barrier tutorial.
*/
object ZooKeeperBarrier {
val BarriersNode = "/barriers"
val DefaultTimeout = 60 seconds
def apply(zkClient: ZkClient, name: String, node: String, count: Int) =
new ZooKeeperBarrier(zkClient, name, node, count, DefaultTimeout)
def apply(zkClient: ZkClient, name: String, node: String, count: Int, timeout: Duration) =
new ZooKeeperBarrier(zkClient, name, node, count, timeout)
def apply(zkClient: ZkClient, cluster: String, name: String, node: String, count: Int) =
new ZooKeeperBarrier(zkClient, cluster + "-" + name, node, count, DefaultTimeout)
def apply(zkClient: ZkClient, cluster: String, name: String, node: String, count: Int, timeout: Duration) =
new ZooKeeperBarrier(zkClient, cluster + "-" + name, node, count, timeout)
def ignore[E: Manifest](body: Unit) {
try {
body
} catch {
case e if manifest[E].erasure.isAssignableFrom(e.getClass) ()
}
}
}
/**
* Barrier based on Zookeeper barrier tutorial.
*/
class ZooKeeperBarrier(zkClient: ZkClient, name: String, node: String, count: Int, timeout: Duration)
extends IZkChildListener {
import ZooKeeperBarrier.{ BarriersNode, ignore }
val barrier = BarriersNode + "/" + name
val entry = barrier + "/" + node
val ready = barrier + "/ready"
val exitBarrier = new CountDownLatch(1)
ignore[ZkNodeExistsException](zkClient.createPersistent(BarriersNode))
ignore[ZkNodeExistsException](zkClient.createPersistent(barrier))
def apply(body: Unit) {
enter()
body
leave()
}
/**
* An await does a enter/leave making this barrier a 'single' barrier instead of a double barrier.
*/
def await() {
enter()
leave()
}
def enter() = {
zkClient.createEphemeral(entry)
if (zkClient.countChildren(barrier) >= count)
ignore[ZkNodeExistsException](zkClient.createPersistent(ready))
else
zkClient.waitUntilExists(ready, timeout.unit, timeout.length)
if (!zkClient.exists(ready)) {
throw new BarrierTimeoutException("Timeout (%s) while waiting for entry barrier" format timeout)
}
zkClient.subscribeChildChanges(barrier, this)
}
def leave() {
zkClient.delete(entry)
exitBarrier.await(timeout.length, timeout.unit)
if (zkClient.countChildren(barrier) > 0) {
zkClient.unsubscribeChildChanges(barrier, this)
throw new BarrierTimeoutException("Timeout (%s) while waiting for exit barrier" format timeout)
}
zkClient.unsubscribeChildChanges(barrier, this)
}
def handleChildChange(path: String, children: JList[String]) {
if (children.size <= 1) {
ignore[ZkNoNodeException](zkClient.delete(ready))
exitBarrier.countDown()
}
}
}

View file

@ -1,4 +1,4 @@
// package akka.remote
// package akka.cluster
// import akka.actor.Actor
// import akka.remote._

View file

@ -1,2 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node1 -Dakka.remote.port=9991

View file

@ -1,2 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node2 -Dakka.remote.port=9992

View file

@ -1,63 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster.api.changelisteners.newleader
import org.scalatest.WordSpec
import org.scalatest.matchers.MustMatchers
import org.scalatest.BeforeAndAfterAll
import akka.cluster._
import ChangeListener._
import Cluster._
import akka.cluster.LocalCluster._
import java.util.concurrent._
object NewLeaderChangeListenerMultiJvmSpec {
var NrOfNodes = 2
}
class NewLeaderChangeListenerMultiJvmNode1 extends MasterClusterTestNode {
import NewLeaderChangeListenerMultiJvmSpec._
val testNodes = NrOfNodes
"A NewLeader change listener" must {
"be invoked after leader election is completed" ignore {
barrier("start-node1", NrOfNodes) {
Cluster.node.start()
}
barrier("start-node2", NrOfNodes).await()
System.exit(0)
}
}
}
class NewLeaderChangeListenerMultiJvmNode2 extends ClusterTestNode {
import NewLeaderChangeListenerMultiJvmSpec._
"A NewLeader change listener" must {
"be invoked after leader election is completed" ignore {
val latch = new CountDownLatch(1)
barrier("start-node1", NrOfNodes).await()
barrier("start-node2", NrOfNodes) {
node.register(new ChangeListener {
override def newLeader(node: String, client: ClusterNode) {
latch.countDown
}
})
}
latch.await(10, TimeUnit.SECONDS) must be === true
node.shutdown()
}
}
}

View file

@ -1,2 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"

View file

@ -1,2 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"

View file

@ -1,65 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster.api.changelisteners.nodeconnected
import org.scalatest.WordSpec
import org.scalatest.matchers.MustMatchers
import org.scalatest.BeforeAndAfterAll
import akka.cluster._
import ChangeListener._
import Cluster._
import akka.cluster.LocalCluster._
import java.util.concurrent._
object NodeConnectedChangeListenerMultiJvmSpec {
var NrOfNodes = 2
}
class NodeConnectedChangeListenerMultiJvmNode1 extends MasterClusterTestNode {
import NodeConnectedChangeListenerMultiJvmSpec._
val testNodes = NrOfNodes
"A NodeConnected change listener" must {
"be invoked when a new node joins the cluster" in {
val latch = new CountDownLatch(1)
node.register(new ChangeListener {
override def nodeConnected(node: String, client: ClusterNode) {
latch.countDown
}
})
barrier("start-node1", NrOfNodes) {
Cluster.node.start()
}
barrier("start-node2", NrOfNodes) {
latch.await(5, TimeUnit.SECONDS) must be === true
}
node.shutdown()
}
}
}
class NodeConnectedChangeListenerMultiJvmNode2 extends ClusterTestNode {
import NodeConnectedChangeListenerMultiJvmSpec._
"A NodeConnected change listener" must {
"be invoked when a new node joins the cluster" in {
barrier("start-node1", NrOfNodes).await()
barrier("start-node2", NrOfNodes) {
Cluster.node.start()
}
node.shutdown()
}
}
}

View file

@ -1,2 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"

View file

@ -1,2 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"

View file

@ -1,65 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster.api.changelisteners.nodedisconnected
import org.scalatest.WordSpec
import org.scalatest.matchers.MustMatchers
import org.scalatest.BeforeAndAfterAll
import akka.cluster._
import ChangeListener._
import Cluster._
import akka.cluster.LocalCluster._
import java.util.concurrent._
object NodeDisconnectedChangeListenerMultiJvmSpec {
var NrOfNodes = 2
}
class NodeDisconnectedChangeListenerMultiJvmNode1 extends MasterClusterTestNode {
import NodeDisconnectedChangeListenerMultiJvmSpec._
val testNodes = NrOfNodes
"A NodeDisconnected change listener" must {
"be invoked when a new node leaves the cluster" in {
val latch = new CountDownLatch(1)
node.register(new ChangeListener {
override def nodeDisconnected(node: String, client: ClusterNode) {
latch.countDown
}
})
barrier("start-node1", NrOfNodes) {
Cluster.node.start()
}
barrier("start-node2", NrOfNodes).await()
latch.await(10, TimeUnit.SECONDS) must be === true
node.shutdown()
}
}
}
class NodeDisconnectedChangeListenerMultiJvmNode2 extends ClusterTestNode {
import NodeDisconnectedChangeListenerMultiJvmSpec._
"A NodeDisconnected change listener" must {
"be invoked when a new node leaves the cluster" in {
barrier("start-node1", NrOfNodes).await()
barrier("start-node2", NrOfNodes) {
Cluster.node.start()
}
node.shutdown()
}
}
}

View file

@ -1,2 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node1 -Dakka.remote.port=9991

View file

@ -1,2 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node2 -Dakka.remote.port=9992

View file

@ -1,89 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster.api.configuration
import org.scalatest.WordSpec
import org.scalatest.matchers.MustMatchers
import org.scalatest.BeforeAndAfterAll
import akka.cluster._
import Cluster._
import akka.cluster.LocalCluster._
object ConfigurationStorageMultiJvmSpec {
var NrOfNodes = 2
}
class ConfigurationStorageMultiJvmNode1 extends MasterClusterTestNode {
import ConfigurationStorageMultiJvmSpec._
val testNodes = NrOfNodes
"A cluster" must {
"be able to store, read and remove custom configuration data" in {
barrier("start-node-1", NrOfNodes) {
Cluster.node.start()
}
barrier("start-node-2", NrOfNodes).await()
barrier("store-config-data-node-1", NrOfNodes) {
node.setConfigElement("key1", "value1".getBytes)
}
barrier("read-config-data-node-2", NrOfNodes).await()
barrier("remove-config-data-node-2", NrOfNodes).await()
barrier("try-read-config-data-node-1", NrOfNodes) {
val option = node.getConfigElement("key1")
option.isDefined must be(false)
val elements = node.getConfigElementKeys
elements.size must be(0)
}
node.shutdown()
}
}
}
class ConfigurationStorageMultiJvmNode2 extends ClusterTestNode {
import ConfigurationStorageMultiJvmSpec._
"A cluster" must {
"be able to store, read and remove custom configuration data" in {
barrier("start-node-1", NrOfNodes).await()
barrier("start-node-2", NrOfNodes) {
Cluster.node.start()
}
barrier("store-config-data-node-1", NrOfNodes).await()
barrier("read-config-data-node-2", NrOfNodes) {
val option = node.getConfigElement("key1")
option.isDefined must be(true)
option.get must be("value1".getBytes)
val elements = node.getConfigElementKeys
elements.size must be(1)
elements.head must be("key1")
}
barrier("remove-config-data-node-2", NrOfNodes) {
node.removeConfigElement("key1")
}
barrier("try-read-config-data-node-1", NrOfNodes).await()
node.shutdown()
}
}
}

View file

@ -1,2 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node1 -Dakka.remote.port=9991

View file

@ -1,2 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node2 -Dakka.remote.port=9992

View file

@ -1,71 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster.api.leader.election
import org.scalatest.WordSpec
import org.scalatest.matchers.MustMatchers
import org.scalatest.BeforeAndAfterAll
import akka.cluster._
import ChangeListener._
import Cluster._
import akka.cluster.LocalCluster._
import java.util.concurrent._
object LeaderElectionMultiJvmSpec {
var NrOfNodes = 2
}
/*
class LeaderElectionMultiJvmNode1 extends MasterClusterTestNode {
import LeaderElectionMultiJvmSpec._
val testNodes = NrOfNodes
"A cluster" must {
"be able to elect a single leader in the cluster and perform re-election if leader resigns" in {
barrier("start-node1", NrOfNodes) {
Cluster.node.start()
}
node.isLeader must be === true
barrier("start-node2", NrOfNodes) {
}
node.isLeader must be === true
barrier("stop-node1", NrOfNodes) {
node.resign()
}
}
}
}
class LeaderElectionMultiJvmNode2 extends ClusterTestNode {
import LeaderElectionMultiJvmSpec._
"A cluster" must {
"be able to elect a single leader in the cluster and perform re-election if leader resigns" in {
barrier("start-node1", NrOfNodes) {
}
node.isLeader must be === false
barrier("start-node2", NrOfNodes) {
Cluster.node.start()
}
node.isLeader must be === false
barrier("stop-node1", NrOfNodes) {
}
Thread.sleep(1000) // wait for re-election
node.isLeader must be === true
}
}
}
*/

View file

@ -1,2 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node1 -Dakka.remote.port=9991

View file

@ -1,2 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node2 -Dakka.remote.port=9992

View file

@ -1,116 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster.api.registry
import org.scalatest.WordSpec
import org.scalatest.matchers.MustMatchers
import org.scalatest.BeforeAndAfterAll
import akka.actor._
import Actor._
import akka.cluster._
import ChangeListener._
import Cluster._
import akka.config.Config
import akka.serialization.Serialization
import akka.cluster.LocalCluster._
import java.util.concurrent._
object RegistryStoreMultiJvmSpec {
var NrOfNodes = 2
class HelloWorld1 extends Actor with Serializable {
def receive = {
case "Hello"
reply("World from node [" + Config.nodename + "]")
}
}
class HelloWorld2 extends Actor with Serializable {
var counter = 0
def receive = {
case "Hello"
Thread.sleep(1000)
counter += 1
case "Count"
reply(counter)
}
}
}
class RegistryStoreMultiJvmNode1 extends MasterClusterTestNode {
import RegistryStoreMultiJvmSpec._
val testNodes = NrOfNodes
"A cluster" must {
"be able to store an ActorRef in the cluster without a replication strategy and retrieve it with 'use'" in {
barrier("start-node-1", NrOfNodes) {
Cluster.node.start()
}
barrier("start-node-2", NrOfNodes).await()
barrier("store-1-in-node-1", NrOfNodes) {
node.store("hello-world-1", classOf[HelloWorld1], Serialization.serializerFor(classOf[HelloWorld1]))
}
barrier("use-1-in-node-2", NrOfNodes).await()
barrier("store-2-in-node-1", NrOfNodes) {
node.store("hello-world-2", classOf[HelloWorld1], false, Serialization.serializerFor(classOf[HelloWorld1]))
}
barrier("use-2-in-node-2", NrOfNodes).await()
node.shutdown()
}
}
}
class RegistryStoreMultiJvmNode2 extends ClusterTestNode {
import RegistryStoreMultiJvmSpec._
"A cluster" must {
"be able to store an actor in the cluster with 'store' and retrieve it with 'use'" in {
barrier("start-node-1", NrOfNodes).await()
barrier("start-node-2", NrOfNodes) {
Cluster.node.start()
}
barrier("store-1-in-node-1", NrOfNodes).await()
barrier("use-1-in-node-2", NrOfNodes) {
val actorOrOption = node.use("hello-world-1")
if (actorOrOption.isEmpty) fail("Actor could not be retrieved")
val actorRef = actorOrOption.get
actorRef.address must be("hello-world-1")
(actorRef ? "Hello").as[String].get must be("World from node [node2]")
}
barrier("store-2-in-node-1", NrOfNodes).await()
barrier("use-2-in-node-2", NrOfNodes) {
val actorOrOption = node.use("hello-world-2")
if (actorOrOption.isEmpty) fail("Actor could not be retrieved")
val actorRef = actorOrOption.get
actorRef.address must be("hello-world-2")
(actorRef ? "Hello").as[String].get must be("World from node [node2]")
}
node.shutdown()
}
}
}

View file

@ -1,4 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"
akka.actor.deployment.service-hello.router = "round-robin"
akka.actor.deployment.service-hello.nr-of-instances = 1

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node1 -Dakka.remote.port=9991

View file

@ -1,4 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"
akka.actor.deployment.service-hello.router = "round-robin"
akka.actor.deployment.service-hello.nr-of-instances = 1

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node2 -Dakka.remote.port=9992

View file

@ -1,75 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster.deployment
import org.scalatest.WordSpec
import org.scalatest.matchers.MustMatchers
import org.scalatest.BeforeAndAfterAll
import akka.actor._
import Actor._
import akka.cluster._
import Cluster._
import akka.cluster.LocalCluster._
object DeploymentMultiJvmSpec {
var NrOfNodes = 2
}
class DeploymentMultiJvmNode1 extends MasterClusterTestNode {
import DeploymentMultiJvmSpec._
val testNodes = NrOfNodes
"A ClusterDeployer" must {
"be able to deploy deployments in akka.conf and lookup the deployments by 'address'" in {
barrier("start-node-1", NrOfNodes) {
Cluster.node.start()
}
barrier("start-node-2", NrOfNodes).await()
barrier("perform-deployment-on-node-1", NrOfNodes) {
Deployer.start()
}
barrier("lookup-deployment-node-2", NrOfNodes).await()
node.shutdown()
}
}
}
class DeploymentMultiJvmNode2 extends ClusterTestNode {
import DeploymentMultiJvmSpec._
"A cluster" must {
"be able to store, read and remove custom configuration data" in {
barrier("start-node-1", NrOfNodes).await()
barrier("start-node-2", NrOfNodes) {
Cluster.node.start()
}
barrier("perform-deployment-on-node-1", NrOfNodes).await()
barrier("lookup-deployment-node-2", NrOfNodes) {
Deployer.start()
val deployments = Deployer.deploymentsInConfig
deployments map { oldDeployment
val newDeployment = ClusterDeployer.lookupDeploymentFor(oldDeployment.address)
newDeployment must be('defined)
oldDeployment must equal(newDeployment.get)
}
}
node.shutdown()
}
}
}

View file

@ -1,4 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handlers = ["akka.testkit.TestEventListener"]
akka.event-handler-level = "WARNING"
akka.cluster.metrics-refresh-timeout = 1

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node1 -Dakka.remote.port=9991

View file

@ -1,134 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster.metrics.local
import akka.cluster._
import akka.actor._
import Actor._
import Cluster._
import akka.dispatch._
import akka.util.Duration
import akka.util.duration._
import akka.cluster.metrics._
import java.util.concurrent.atomic.AtomicInteger
object LocalMetricsMultiJvmSpec {
val NrOfNodes = 1
}
class LocalMetricsMultiJvmNode1 extends MasterClusterTestNode {
import LocalMetricsMultiJvmSpec._
val testNodes = NrOfNodes
override def beforeAll = {
super.beforeAll()
node
}
override def afterAll = {
node.shutdown()
super.afterAll()
}
"Metrics manager" must {
def timeout = node.metricsManager.refreshTimeout
"be initialized with refresh timeout value, specified in akka.conf" in {
timeout must be(1.second)
}
"return up-to-date local node metrics straight from MBeans/Sigar" in {
node.metricsManager.getLocalMetrics must not be (null)
node.metricsManager.getLocalMetrics.systemLoadAverage must be(0.5 plusOrMinus 0.5)
}
"return metrics cached in the MetricsManagerLocalMetrics" in {
node.metricsManager.getMetrics(nodeAddress.nodeName) must not be (null)
}
"return local node metrics from ZNode" in {
node.metricsManager.getMetrics(nodeAddress.nodeName, false) must not be (null)
}
"return cached metrics of all nodes in the cluster" in {
node.metricsManager.getAllMetrics.size must be(1)
node.metricsManager.getAllMetrics.find(_.nodeName == "node1") must not be (null)
}
"throw no exceptions, when user attempts to get metrics of a non-existing node" in {
node.metricsManager.getMetrics("nonexisting") must be(None)
node.metricsManager.getMetrics("nonexisting", false) must be(None)
}
"regularly update cached metrics" in {
val oldMetrics = node.metricsManager.getLocalMetrics
Thread sleep timeout.toMillis
node.metricsManager.getLocalMetrics must not be (oldMetrics)
}
"allow to track JVM state and bind handles through MetricsAlterationMonitors" in {
val monitorReponse = Promise[String]()
node.metricsManager.addMonitor(new LocalMetricsAlterationMonitor {
val id = "heapMemoryThresholdMonitor"
def reactsOn(metrics: NodeMetrics) = metrics.usedHeapMemory > 1
def react(metrics: NodeMetrics) = monitorReponse.success("Too much memory is used!")
})
Await.result(monitorReponse, 5 seconds) must be("Too much memory is used!")
}
class FooMonitor(monitorWorked: AtomicInteger) extends LocalMetricsAlterationMonitor {
val id = "fooMonitor"
def reactsOn(metrics: NodeMetrics) = true
def react(metrics: NodeMetrics) = monitorWorked.set(monitorWorked.get + 1)
}
"allow to unregister the monitor" in {
val monitorWorked = new AtomicInteger(0)
val fooMonitor = new FooMonitor(monitorWorked)
node.metricsManager.addMonitor(fooMonitor)
node.metricsManager.removeMonitor(fooMonitor)
val oldValue = monitorWorked.get
Thread sleep timeout.toMillis
monitorWorked.get must be(oldValue)
}
"stop notifying monitors, when stopped" in {
node.metricsManager.stop()
val monitorWorked = new AtomicInteger(0)
node.metricsManager.addMonitor(new LocalMetricsAlterationMonitor {
val id = "fooMonitor"
def reactsOn(metrics: NodeMetrics) = true
def react(metrics: NodeMetrics) = monitorWorked.set(monitorWorked.get + 1)
})
monitorWorked.get must be(0)
node.metricsManager.start()
Thread sleep (timeout.toMillis * 2)
monitorWorked.get must be > (1)
}
}
}

View file

@ -1,3 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handlers = ["akka.testkit.TestEventListener"]
akka.event-handler-level = "WARNING"

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node1 -Dakka.remote.port=9991

View file

@ -1,3 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handlers = ["akka.testkit.TestEventListener"]
akka.event-handler-level = "WARNING"

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node2 -Dakka.remote.port=9992

View file

@ -1,133 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster.metrics.remote
import akka.cluster._
import akka.actor._
import Actor._
import Cluster._
import akka.dispatch._
import akka.util.Duration
import akka.util.duration._
import akka.cluster.metrics._
import java.util.concurrent._
import atomic.AtomicInteger
object RemoteMetricsMultiJvmSpec {
val NrOfNodes = 2
val MetricsRefreshTimeout = 100.millis
}
class AllMetricsAvailableMonitor(_id: String, completionLatch: CountDownLatch, clusterSize: Int) extends ClusterMetricsAlterationMonitor {
val id = _id
def reactsOn(allMetrics: Array[NodeMetrics]) = allMetrics.size == clusterSize
def react(allMetrics: Array[NodeMetrics]) = completionLatch.countDown
}
class RemoteMetricsMultiJvmNode1 extends MasterClusterTestNode {
import RemoteMetricsMultiJvmSpec._
val testNodes = NrOfNodes
"Metrics manager" must {
"provide metrics of all nodes in the cluster" in {
val allMetricsAvaiable = new CountDownLatch(1)
node.metricsManager.refreshTimeout = MetricsRefreshTimeout
node.metricsManager.addMonitor(new AllMetricsAvailableMonitor("all-metrics-available", allMetricsAvaiable, NrOfNodes))
LocalCluster.barrier("node-start", NrOfNodes).await()
allMetricsAvaiable.await()
LocalCluster.barrier("check-all-remote-metrics", NrOfNodes) {
node.metricsManager.getAllMetrics.size must be(2)
}
val cachedMetrics = node.metricsManager.getMetrics("node2")
val metricsFromZnode = node.metricsManager.getMetrics("node2", false)
LocalCluster.barrier("check-single-remote-metrics", NrOfNodes) {
cachedMetrics must not be (null)
metricsFromZnode must not be (null)
}
Thread sleep MetricsRefreshTimeout.toMillis
LocalCluster.barrier("remote-metrics-is-updated", NrOfNodes) {
node.metricsManager.getMetrics("node2") must not be (cachedMetrics)
node.metricsManager.getMetrics("node2", false) must not be (metricsFromZnode)
}
val someMetricsGone = new CountDownLatch(1)
node.metricsManager.addMonitor(new AllMetricsAvailableMonitor("some-metrics-gone", someMetricsGone, 1))
LocalCluster.barrier("some-nodes-leave", NrOfNodes).await()
someMetricsGone.await(10, TimeUnit.SECONDS) must be(true)
node.metricsManager.getMetrics("node2") must be(None)
node.metricsManager.getMetrics("node2", false) must be(None)
node.metricsManager.getAllMetrics.size must be(1)
node.shutdown()
}
}
}
class RemoteMetricsMultiJvmNode2 extends ClusterTestNode {
import RemoteMetricsMultiJvmSpec._
val testNodes = NrOfNodes
"Metrics manager" must {
"provide metrics of all nodes in the cluster" in {
val allMetricsAvaiable = new CountDownLatch(1)
node.metricsManager.refreshTimeout = MetricsRefreshTimeout
node.metricsManager.addMonitor(new AllMetricsAvailableMonitor("all-metrics-available", allMetricsAvaiable, NrOfNodes))
LocalCluster.barrier("node-start", NrOfNodes).await()
allMetricsAvaiable.await()
LocalCluster.barrier("check-all-remote-metrics", NrOfNodes) {
node.metricsManager.getAllMetrics.size must be(2)
}
val cachedMetrics = node.metricsManager.getMetrics("node1")
val metricsFromZnode = node.metricsManager.getMetrics("node1", false)
LocalCluster.barrier("check-single-remote-metrics", NrOfNodes) {
cachedMetrics must not be (null)
metricsFromZnode must not be (null)
}
Thread sleep MetricsRefreshTimeout.toMillis
LocalCluster.barrier("remote-metrics-is-updated", NrOfNodes) {
node.metricsManager.getMetrics("node1") must not be (cachedMetrics)
node.metricsManager.getMetrics("node1", false) must not be (metricsFromZnode)
}
LocalCluster.barrier("some-nodes-leave", NrOfNodes) {
node.shutdown()
}
}
}
}

View file

@ -1,2 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node1 -Dakka.remote.port=9991

View file

@ -1,2 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node2 -Dakka.remote.port=9992

View file

@ -1,112 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*
*
* package akka.cluster.migration
*
* import org.scalatest.WordSpec
* import org.scalatest.matchers.MustMatchers
* import org.scalatest.BeforeAndAfterAll
*
* import akka.actor._
* import Actor._
* import akka.cluster._
* import ChangeListener._
* import Cluster._
* import akka.config.Config
* import akka.serialization.Serialization
* import akka.cluster.LocalCluster._
*
* import java.util.concurrent._
*
* object MigrationExplicitMultiJvmSpec {
* var NrOfNodes = 2
*
* class HelloWorld extends Actor with Serializable {
* def receive = {
* case "Hello"
* reply("World from node [" + Config.nodename + "]")
* }
* }
* }
*
* class MigrationExplicitMultiJvmNode1 extends MasterClusterTestNode {
* import MigrationExplicitMultiJvmSpec._
*
* val testNodes = NrOfNodes
*
* "A cluster" must {
*
* "be able to migrate an actor from one node to another" in {
*
* barrier("start-node-1", NrOfNodes) {
* Cluster.node.start()
* }
*
* barrier("start-node-2", NrOfNodes) {
* }
*
* barrier("store-1-in-node-1", NrOfNodes) {
* val serializer = Serialization.serializerFor(classOf[HelloWorld]).fold(x fail("No serializer found"), s s)
* node.store("hello-world", classOf[HelloWorld], serializer)
* }
*
* barrier("use-1-in-node-2", NrOfNodes) {
* }
*
* barrier("migrate-from-node2-to-node1", NrOfNodes) {
* }
*
* barrier("check-actor-is-moved-to-node1", NrOfNodes) {
* node.isInUseOnNode("hello-world") must be(true)
*
* val actorRef = Actor.registry.local.actorFor("hello-world").getOrElse(fail("Actor should have been in the local actor registry"))
* actorRef.address must be("hello-world")
* (actorRef ? "Hello").as[String].get must be("World from node [node1]")
* }
*
* node.shutdown()
* }
* }
* }
*
* class MigrationExplicitMultiJvmNode2 extends ClusterTestNode {
* import MigrationExplicitMultiJvmSpec._
*
* "A cluster" must {
*
* "be able to migrate an actor from one node to another" in {
*
* barrier("start-node-1", NrOfNodes) {
* }
*
* barrier("start-node-2", NrOfNodes) {
* Cluster.node.start()
* }
*
* barrier("store-1-in-node-1", NrOfNodes) {
* }
*
* barrier("use-1-in-node-2", NrOfNodes) {
* val actorOrOption = node.use("hello-world")
* if (actorOrOption.isEmpty) fail("Actor could not be retrieved")
*
* val actorRef = actorOrOption.get
* actorRef.address must be("hello-world")
*
* (actorRef ? "Hello").as[String].get must be("World from node [node2]")
* }
*
* barrier("migrate-from-node2-to-node1", NrOfNodes) {
* node.migrate(NodeAddress(node.nodeAddress.clusterName, "node1"), "hello-world")
* Thread.sleep(2000)
* }
*
* barrier("check-actor-is-moved-to-node1", NrOfNodes) {
* }
*
* node.shutdown()
* }
* }
* }
*/

View file

@ -1,6 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handlers = ["akka.testkit.TestEventListener"]
akka.event-handler-level = "WARNING"
akka.actor.deployment.service-test.router = "round-robin"
akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"]
akka.actor.deployment.service-test.nr-of-instances = 2

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node1 -Dakka.remote.port=9991

View file

@ -1,5 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"
akka.actor.deployment.service-test.router = "round-robin"
akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"]
akka.actor.deployment.service-test.nr-of-instances = 2

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node2 -Dakka.remote.port=9992

View file

@ -1,5 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"
akka.actor.deployment.service-test.router = "round-robin"
akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"]
akka.actor.deployment.service-test.nr-of-instances = 2

View file

@ -1 +0,0 @@
-Dakka.cluster.nodename=node3 -Dakka.remote.port=9993

View file

@ -1,154 +0,0 @@
/*
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.cluster.reflogic
import akka.cluster._
import akka.cluster.Cluster._
import akka.actor.Actor
import akka.event.EventHandler
import akka.testkit.{ EventFilter, TestEvent }
import akka.routing.RoutingException
import java.net.ConnectException
import java.nio.channels.{ ClosedChannelException, NotYetConnectedException }
import akka.cluster.LocalCluster._
object ClusterActorRefCleanupMultiJvmSpec {
val NrOfNodes = 3
class TestActor extends Actor with Serializable {
def receive = {
case _ {}
}
}
}
class ClusterActorRefCleanupMultiJvmNode1 extends MasterClusterTestNode {
import ClusterActorRefCleanupMultiJvmSpec._
val testNodes = NrOfNodes
"ClusterActorRef" must {
"cleanup itself" ignore {
Cluster.node.start()
barrier("awaitStarted", NrOfNodes).await()
val ref = Actor.actorOf(Props[ClusterActorRefCleanupMultiJvmSpec.TestActor]("service-test")
ref.isInstanceOf[ClusterActorRef] must be(true)
val clusteredRef = ref.asInstanceOf[ClusterActorRef]
barrier("awaitActorCreated", NrOfNodes).await()
//verify that all remote actors are there.
clusteredRef.nrOfConnections must be(2)
// ignore exceptions from killing nodes
val ignoreExceptions = Seq(
EventFilter[ClosedChannelException],
EventFilter[NotYetConnectedException],
EventFilter[RoutingException],
EventFilter[ConnectException])
EventHandler.notify(TestEvent.Mute(ignoreExceptions))
//just some waiting to make sure that the node has died.
Thread.sleep(5000)
//send some request, this should trigger the cleanup
try {
clusteredRef ! "hello"
clusteredRef ! "hello"
} catch {
case e: ClosedChannelException
case e: NotYetConnectedException
case e: RoutingException
}
barrier("node-3-dead", NrOfNodes - 1).await()
//since the call to the node failed, the node must have been removed from the list.
clusteredRef.nrOfConnections must be(1)
//just some waiting to make sure that the node has died.
Thread.sleep(5000)
//trigger the cleanup.
try {
clusteredRef ! "hello"
clusteredRef ! "hello"
} catch {
case e: ClosedChannelException
case e: NotYetConnectedException
case e: RoutingException
}
//now there must not be any remaining connections after the dead of the last actor.
clusteredRef.nrOfConnections must be(0)
//and lets make sure we now get the correct exception if we try to use the ref.
intercept[RoutingException] {
clusteredRef ! "Hello"
}
node.shutdown()
}
}
}
class ClusterActorRefCleanupMultiJvmNode2 extends ClusterTestNode {
import ClusterActorRefCleanupMultiJvmSpec._
val testNodes = NrOfNodes
//we are only using the nodes for their capacity, not for testing on this node itself.
"___" must {
"___" ignore {
Runtime.getRuntime.addShutdownHook(new Thread() {
override def run() {
ClusterTestNode.exit(classOf[ClusterActorRefCleanupMultiJvmNode2].getName)
}
})
Cluster.node.start()
barrier("awaitStarted", NrOfNodes).await()
barrier("awaitActorCreated", NrOfNodes).await()
barrier("node-3-dead", NrOfNodes - 1).await()
System.exit(0)
}
}
}
class ClusterActorRefCleanupMultiJvmNode3 extends ClusterTestNode {
import ClusterActorRefCleanupMultiJvmSpec._
val testNodes = NrOfNodes
//we are only using the nodes for their capacity, not for testing on this node itself.
"___" must {
"___" ignore {
Runtime.getRuntime.addShutdownHook(new Thread() {
override def run() {
ClusterTestNode.exit(classOf[ClusterActorRefCleanupMultiJvmNode3].getName)
}
})
Cluster.node.start()
barrier("awaitStarted", NrOfNodes).await()
barrier("awaitActorCreated", NrOfNodes).await()
System.exit(0)
}
}
}

View file

@ -1,7 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"
akka.actor.deployment.hello-world-write-behind-nosnapshot.router = "direct"
akka.actor.deployment.hello-world-write-behind-nosnapshot.nr-of-instances = 1
akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.storage = "transaction-log"
akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.strategy = "write-behind"
akka.cluster.replication.snapshot-frequency = 1000

View file

@ -1,7 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"
akka.actor.deployment.hello-world-write-behind-nosnapshot.router = "direct"
akka.actor.deployment.hello-world-write-behind-nosnapshot.nr-of-instances = 1
akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.storage = "transaction-log"
akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.strategy = "write-behind"
akka.cluster.replication.snapshot-frequency = 1000

View file

@ -1,99 +0,0 @@
/*
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
// package akka.cluster.replication.transactionlog.writebehind.nosnapshot
// import akka.actor._
// import akka.cluster._
// import Cluster._
// import akka.config.Config
// import akka.cluster.LocalCluster._
// object ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec {
// var NrOfNodes = 2
// sealed trait TransactionLogMessage extends Serializable
// case class Count(nr: Int) extends TransactionLogMessage
// case class Log(full: String) extends TransactionLogMessage
// case object GetLog extends TransactionLogMessage
// class HelloWorld extends Actor with Serializable {
// var log = ""
// def receive = {
// case Count(nr)
// log += nr.toString
// reply("World from node [" + Config.nodename + "]")
// case GetLog
// reply(Log(log))
// }
// }
// }
// class ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1 extends ClusterTestNode {
// import ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec._
// "A cluster" must {
// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore {
// barrier("start-node1", NrOfNodes) {
// Cluster.node.start()
// }
// barrier("create-actor-on-node1", NrOfNodes) {
// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-behind-nosnapshot")
// // node.isInUseOnNode("hello-world") must be(true)
// actorRef.address must be("hello-world-write-behind-nosnapshot")
// for (i 0 until 10) {
// (actorRef ? Count(i)).as[String] must be(Some("World from node [node1]"))
// }
// }
// barrier("start-node2", NrOfNodes).await()
// node.shutdown()
// }
// }
// }
// class ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2 extends MasterClusterTestNode {
// import ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec._
// val testNodes = NrOfNodes
// "A cluster" must {
// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore {
// barrier("start-node1", NrOfNodes).await()
// barrier("create-actor-on-node1", NrOfNodes).await()
// barrier("start-node2", NrOfNodes) {
// Cluster.node.start()
// }
// Thread.sleep(5000) // wait for fail-over from node1 to node2
// barrier("check-fail-over-to-node2", NrOfNodes - 1) {
// // both remaining nodes should now have the replica
// node.isInUseOnNode("hello-world-write-behind-nosnapshot") must be(true)
// val actorRef = Actor.registry.local.actorFor("hello-world-write-behind-nosnapshot").getOrElse(fail("Actor should have been in the local actor registry"))
// actorRef.address must be("hello-world-write-behind-nosnapshot")
// (actorRef ? GetLog).as[Log].get must be(Log("0123456789"))
// }
// node.shutdown()
// }
// }
// override def onReady() {
// LocalBookKeeperEnsemble.start()
// }
// override def onShutdown() {
// TransactionLog.shutdown()
// LocalBookKeeperEnsemble.shutdown()
// }
// }

View file

@ -1,7 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"
akka.actor.deployment.hello-world.router = "direct"
akka.actor.deployment.hello-world-write-behind-snapshot.nr-of-instances = 1
akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.storage = "transaction-log"
akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.strategy = "write-behind"
akka.cluster.replication.snapshot-frequency = 7

View file

@ -1,7 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "WARNING"
akka.actor.deployment.hello-world.router = "direct"
akka.actor.deployment.hello-world-write-behind-snapshot.nr-of-instances = 1
akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.storage = "transaction-log"
akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.strategy = "write-behind"
akka.cluster.replication.snapshot-frequency = 7

View file

@ -1,118 +0,0 @@
/*
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
// package akka.cluster.replication.transactionlog.writebehind.snapshot
// import akka.actor._
// import akka.cluster._
// import Cluster._
// import akka.config.Config
// import akka.cluster.LocalCluster._
// object ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec {
// var NrOfNodes = 2
// sealed trait TransactionLogMessage extends Serializable
// case class Count(nr: Int) extends TransactionLogMessage
// case class Log(full: String) extends TransactionLogMessage
// case object GetLog extends TransactionLogMessage
// class HelloWorld extends Actor with Serializable {
// var log = ""
// //println("Creating HelloWorld log =======> " + log)
// def receive = {
// case Count(nr)
// log += nr.toString
// //println("Message to HelloWorld log =======> " + log)
// reply("World from node [" + Config.nodename + "]")
// case GetLog
// reply(Log(log))
// }
// }
// }
// class ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1 extends ClusterTestNode {
// import ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec._
// "A cluster" must {
// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore {
// barrier("start-node1", NrOfNodes) {
// Cluster.node.start()
// }
// barrier("create-actor-on-node1", NrOfNodes) {
// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-behind-snapshot")
// node.isInUseOnNode("hello-world-write-behind-snapshot") must be(true)
// actorRef.address must be("hello-world-write-behind-snapshot")
// var counter = 0
// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
// counter += 1
// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
// counter += 1
// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
// counter += 1
// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
// counter += 1
// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
// counter += 1
// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
// counter += 1
// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
// counter += 1
// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
// counter += 1
// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
// counter += 1
// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
// }
// barrier("start-node2", NrOfNodes).await()
// node.shutdown()
// }
// }
// }
// class ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2 extends MasterClusterTestNode {
// import ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec._
// val testNodes = NrOfNodes
// "A cluster" must {
// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore {
// barrier("start-node1", NrOfNodes).await()
// barrier("create-actor-on-node1", NrOfNodes).await()
// barrier("start-node2", NrOfNodes) {
// Cluster.node.start()
// }
// Thread.sleep(5000) // wait for fail-over from node1 to node2
// barrier("check-fail-over-to-node2", NrOfNodes - 1) {
// // both remaining nodes should now have the replica
// node.isInUseOnNode("hello-world-write-behind-snapshot") must be(true)
// val actorRef = Actor.registry.local.actorFor("hello-world-write-behind-snapshot").getOrElse(fail("Actor should have been in the local actor registry"))
// actorRef.address must be("hello-world-write-behind-snapshot")
// (actorRef ? GetLog).as[Log].get must be(Log("0123456789"))
// }
// node.shutdown()
// }
// }
// override def onReady() {
// LocalBookKeeperEnsemble.start()
// }
// override def onShutdown() {
// TransactionLog.shutdown()
// LocalBookKeeperEnsemble.shutdown()
// }
// }

View file

@ -1,7 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "DEBUG"
akka.actor.deployment.hello-world-write-through-nosnapshot.router = "direct"
akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.storage = "transaction-log"
akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.strategy = "write-through"
akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"]
akka.cluster.replication.snapshot-frequency = 1000

View file

@ -1,7 +0,0 @@
akka.enabled-modules = ["cluster"]
akka.event-handler-level = "DEBUG"
akka.actor.deployment.hello-world-write-through-nosnapshot.router = "direct"
akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.storage = "transaction-log"
akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.strategy = "write-through"
akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"]
akka.cluster.replication.snapshot-frequency = 1000

Some files were not shown because too many files have changed in this diff Show more