Merge branch 'remove-cluster'

This commit is contained in:
Viktor Klang 2010-11-12 14:16:07 +01:00
commit ae319f56a2
7 changed files with 1 additions and 439 deletions

View file

@ -1,56 +0,0 @@
/**
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
*/
package akka.comet
import akka.actor.Actor
import akka.remote.Cluster
import scala.reflect.BeanProperty
import org.atmosphere.cpr.{BroadcastFilter, ClusterBroadcastFilter, Broadcaster}
sealed trait ClusterCometMessageType
case class ClusterCometBroadcast(name: String, msg: AnyRef) extends ClusterCometMessageType
/**
* Enables explicit clustering of Atmosphere (Comet) resources
* Annotate the endpoint which has the @Broadcast annotation with
* @org.atmosphere.annotation.Cluster(Array(classOf[AkkClusterBroadcastFilter])){ name = "someUniqueName" }
* thats all folks!
* Note: In the future, clustering comet will be transparent
*/
class AkkaClusterBroadcastFilter extends Actor with ClusterBroadcastFilter {
@BeanProperty var clusterName = ""
@BeanProperty var broadcaster : Broadcaster = null
def init() {
//Since this class is instantiated by Atmosphere, we need to make sure it's started
self.start
}
/**
* Stops the actor
*/
def destroy(): Unit = self.stop
/**
* Relays all non ClusterCometBroadcast messages to the other AkkaClusterBroadcastFilters in the cluster
* ClusterCometBroadcasts are not broadcasted because they originate from the cluster,
* otherwise we'd start a chain reaction.
*/
def filter(o : AnyRef) = new BroadcastFilter.BroadcastAction(o match {
case ClusterCometBroadcast(_,m) => m //Do not re-broadcast, just unbox and pass along
case m : AnyRef => { //Relay message to the cluster and pass along
Cluster.relayMessage(classOf[AkkaClusterBroadcastFilter],ClusterCometBroadcast(clusterName,m))
m
}
})
def receive = {
//Only handle messages intended for this particular instance
case b @ ClusterCometBroadcast(c, _) if (c == clusterName) && (broadcaster ne null) => broadcaster broadcast b
case _ =>
}
}

View file

@ -27,7 +27,6 @@ trait BootableRemoteActorService extends Bootable with Logging {
abstract override def onLoad = {
if (config.getBool("akka.remote.server.service", true)) {
if (config.getBool("akka.remote.cluster.service", true)) Cluster.start(self.applicationLoader)
log.info("Initializing Remote Actors Service...")
startRemoteService
log.info("Remote Actors Service initialized")
@ -39,8 +38,6 @@ trait BootableRemoteActorService extends Bootable with Logging {
log.info("Shutting down Remote Actors Service")
RemoteNode.shutdown
if (remoteServerThread.isAlive) remoteServerThread.join(1000)
log.info("Shutting down Cluster")
Cluster.shutdown
log.info("Remote Actors Service has been shut down")
super.onUnload
}

View file

@ -1,306 +0,0 @@
/**
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
*/
package akka.remote
import akka.config.Config.config
import akka.config.Supervision._
import akka.serialization.Serializer
import akka.actor.{Supervisor, SupervisorFactory, Actor, ActorRef, ActorRegistry}
import akka.util.Logging
import scala.collection.immutable.{Map, HashMap}
import akka.config.Supervision.{Permanent}
import akka.config.{RemoteAddress}
/**
* Interface for interacting with the Cluster Membership API.
*
* @author Viktor Klang
*/
trait Cluster {
/**
* Specifies the cluster name
*/
def name: String
/**
* Traverses all known remote addresses avaiable at all other nodes in the cluster
* and applies the given PartialFunction on the first address that it's defined at
* The order of application is undefined and may vary
*/
def lookup[T](pf: PartialFunction[RemoteAddress, T]): Option[T]
/**
* Applies the specified function to all known remote addresses on al other nodes in the cluster
* The order of application is undefined and may vary
*/
def foreach(f: (RemoteAddress) => Unit): Unit
/**
* Returns all the endpoints in the cluster.
*/
def endpoints: Array[RemoteAddress]
}
/**
* Base trait for Cluster implementations
*
* @author Viktor Klang
*/
trait ClusterActor extends Actor with Cluster {
val name = config.getString("akka.remote.cluster.name", "default")
@volatile protected var serializer : Serializer = _
}
/**
* Companion object to ClusterActor that defines some common messages
*
* @author Viktor Klang
*/
private[akka] object ClusterActor {
sealed trait ClusterMessage
private[akka] case class RelayedMessage(actorClassFQN: String, msg: AnyRef) extends ClusterMessage
private[akka] case class Message[ADDR_T](sender: ADDR_T, msg: Array[Byte])
private[akka] case object PapersPlease extends ClusterMessage
private[akka] case class Papers(addresses: List[RemoteAddress]) extends ClusterMessage
private[akka] case object Block extends ClusterMessage
private[akka] case object Unblock extends ClusterMessage
private[akka] case class View[ADDR_T](othersPresent: Set[ADDR_T]) extends ClusterMessage
private[akka] case class Zombie[ADDR_T](address: ADDR_T) extends ClusterMessage
private[akka] case class RegisterLocalNode(server: RemoteAddress) extends ClusterMessage
private[akka] case class DeregisterLocalNode(server: RemoteAddress) extends ClusterMessage
private[akka] case class Node(endpoints: List[RemoteAddress])
private[akka] case class InitClusterActor(serializer : Serializer)
}
/**
* Base class for cluster actor implementations.
* Provides most of the behavior out of the box
* only needs to be gives hooks into the underlaying cluster impl.
*/
abstract class BasicClusterActor extends ClusterActor with Logging {
import ClusterActor._
type ADDR_T
@volatile private var local: Node = Node(Nil)
@volatile private var remotes: Map[ADDR_T, Node] = Map()
override def preStart = {
remotes = new HashMap[ADDR_T, Node]
}
override def postStop = {
remotes = Map()
}
def receive = {
case v: View[ADDR_T] => {
// Not present in the cluster anymore = presumably zombies
// Nodes we have no prior knowledge existed = unknowns
val zombies = Set[ADDR_T]() ++ remotes.keySet -- v.othersPresent
val unknown = v.othersPresent -- remotes.keySet
log debug ("Updating view")
log debug ("Other memebers: [%s]", v.othersPresent)
log debug ("Zombies: [%s]", zombies)
log debug ("Unknowns: [%s]", unknown)
// Tell the zombies and unknowns to provide papers and prematurely treat the zombies as dead
broadcast(zombies ++ unknown, PapersPlease)
remotes = remotes -- zombies
}
case z: Zombie[ADDR_T] => { //Ask the presumed zombie for papers and prematurely treat it as dead
log debug ("Killing Zombie Node: %s", z.address)
broadcast(z.address :: Nil, PapersPlease)
remotes = remotes - z.address
}
case rm@RelayedMessage(_, _) => {
log debug ("Relaying message: %s", rm)
broadcast(rm)
}
case m: Message[ADDR_T] => {
val (src, msg) = (m.sender, m.msg)
(serializer fromBinary (msg, None)) match {
case PapersPlease => {
log debug ("Asked for papers by %s", src)
broadcast(src :: Nil, Papers(local.endpoints))
if (remotes.get(src).isEmpty) // If we were asked for papers from someone we don't know, ask them!
broadcast(src :: Nil, PapersPlease)
}
case Papers(x) => remotes = remotes + (src -> Node(x))
case RelayedMessage(c, m) => ActorRegistry.actorsFor(c).foreach(_ ! m)
case unknown => log debug ("Unknown message: %s", unknown.toString)
}
}
case RegisterLocalNode(s) => {
log debug ("RegisterLocalNode: %s", s)
local = Node(s :: local.endpoints)
broadcast(Papers(local.endpoints))
}
case DeregisterLocalNode(s) => {
log debug ("DeregisterLocalNode: %s", s)
local = Node(local.endpoints.filterNot(_ == s))
broadcast(Papers(local.endpoints))
}
case InitClusterActor(s) => {
serializer = s
boot
}
}
/**
* Implement this in a subclass to boot up the cluster implementation
*/
protected def boot: Unit
/**
* Implement this in a subclass to add node-to-node messaging
*/
protected def toOneNode(dest: ADDR_T, msg: Array[Byte]): Unit
/**
* Implement this in a subclass to add node-to-many-nodes messaging
*/
protected def toAllNodes(msg: Array[Byte]): Unit
/**
* Sends the specified message to the given recipients using the serializer
* that's been set in the akka-conf
*/
protected def broadcast[T <: AnyRef](recipients: Iterable[ADDR_T], msg: T): Unit = {
lazy val m = serializer toBinary msg
for (r <- recipients) toOneNode(r, m)
}
/**
* Sends the specified message toall other nodes using the serializer
* that's been set in the akka-conf
*/
protected def broadcast[T <: AnyRef](msg: T): Unit =
if (!remotes.isEmpty) toAllNodes(serializer toBinary msg)
/**
* Applies the given PartialFunction to all known RemoteAddresses
*/
def lookup[T](handleRemoteAddress: PartialFunction[RemoteAddress, T]): Option[T] =
remotes.valuesIterator.toList.flatMap(_.endpoints).find(handleRemoteAddress isDefinedAt _).map(handleRemoteAddress)
/**
* Applies the given function to all remote addresses known
*/
def foreach(f: (RemoteAddress) => Unit): Unit = remotes.valuesIterator.toList.flatMap(_.endpoints).foreach(f)
/**
* Returns all the endpoints in the cluster.
*/
def endpoints: Array[RemoteAddress] = remotes.toArray.asInstanceOf[Array[RemoteAddress]]
}
/**
* A singleton representing the Cluster.
* <p/>
* Loads a specified ClusterActor and delegates to that instance.
*/
object Cluster extends Cluster with Logging {
//Import messages
import ClusterActor._
lazy val DEFAULT_SERIALIZER_CLASS_NAME = Serializer.Java.getClass.getName
lazy val DEFAULT_CLUSTER_ACTOR_CLASS_NAME = classOf[JGroupsClusterActor].getName
@volatile private[remote] var clusterActorRef: Option[ActorRef] = None
@volatile private[akka] var classLoader: Option[ClassLoader] = Some(getClass.getClassLoader)
private[remote] def createClusterActor(): Option[ActorRef] = {
val name = config.getString("akka.remote.cluster.actor", DEFAULT_CLUSTER_ACTOR_CLASS_NAME)
if (name.isEmpty) throw new IllegalArgumentException(
"Can't start cluster since the 'akka.remote.cluster.actor' configuration option is not defined")
try {
Some(Actor.actorOf(Class.forName(name).newInstance.asInstanceOf[ClusterActor]))
} catch {
case e =>
log.error(e, "Couldn't load Cluster provider: [%s]", name)
None
}
}
private[akka] def createSupervisor(actor: ActorRef): Option[Supervisor] =
Some(Supervisor(
SupervisorConfig(OneForOneStrategy(List(classOf[Exception]), 5, 1000),
Supervise(actor, Permanent) :: Nil)))
private[this] def clusterActor = if (clusterActorRef.isEmpty) None else Some(clusterActorRef.get.actor.asInstanceOf[ClusterActor])
def name = clusterActor.map(_.name).getOrElse("No cluster")
def lookup[T](pf: PartialFunction[RemoteAddress, T]): Option[T] = clusterActor.flatMap(_.lookup(pf))
/**Adds the specified hostname + port as a local node
* This information will be propagated to other nodes in the cluster
* and will be available at the other nodes through lookup and foreach
*/
def registerLocalNode(hostname: String, port: Int): Unit = clusterActorRef.foreach(_ ! RegisterLocalNode(RemoteAddress(hostname, port)))
/**Removes the specified hostname + port from the local node
* This information will be propagated to other nodes in the cluster
* and will no longer be available at the other nodes through lookup and foreach
*/
def deregisterLocalNode(hostname: String, port: Int): Unit = clusterActorRef.foreach(_ ! DeregisterLocalNode(RemoteAddress(hostname, port)))
/**Sends the message to all Actors of the specified type on all other nodes in the cluster
*/
def relayMessage(to: Class[_ <: Actor], msg: AnyRef): Unit = clusterActorRef.foreach(_ ! RelayedMessage(to.getName, msg))
def foreach(f: (RemoteAddress) => Unit): Unit = clusterActor.foreach(_.foreach(f))
def endpoints: Array[RemoteAddress] = clusterActor
.getOrElse(throw new IllegalStateException("No cluster actor is defined"))
.endpoints
def start(): Unit = start(None)
def start(serializerClassLoader: Option[ClassLoader]): Unit = synchronized {
log.info("Starting up Cluster Service...")
if (clusterActorRef.isEmpty) {
for {
actorRef <- createClusterActor()
sup <- createSupervisor(actorRef)
} {
val serializer = Class.forName(config.getString(
"akka.remote.cluster.serializer", DEFAULT_SERIALIZER_CLASS_NAME))
.newInstance.asInstanceOf[Serializer]
classLoader = serializerClassLoader orElse classLoader
serializer.classLoader = classLoader
actorRef.start
sup.start
actorRef ! InitClusterActor(serializer)
clusterActorRef = Some(actorRef)
}
}
}
def shutdown(): Unit = synchronized {
log.info("Shutting down Cluster Service...")
for {
c <- clusterActorRef
s <- c.supervisor
} s.stop
classLoader = Some(getClass.getClassLoader)
}
}

View file

@ -1,64 +0,0 @@
package akka.remote
import org.jgroups.{JChannel, View => JG_VIEW, Address, Message => JG_MSG, ExtendedMembershipListener, Receiver}
import org.jgroups.util.Util
/**
* Clustering support via JGroups.
*
* @author Viktor Klang
*/
class JGroupsClusterActor extends BasicClusterActor {
import scala.collection.JavaConversions._
import akka.remote.ClusterActor._
type ADDR_T = Address
@volatile private var isActive = false
@volatile private var channel: Option[JChannel] = None
protected def boot = {
log info "Booting JGroups-based cluster"
isActive = true
// Set up the JGroups local endpoint
channel = Some(new JChannel {
setReceiver(new Receiver with ExtendedMembershipListener {
def getState: Array[Byte] = null
def setState(state: Array[Byte]): Unit = ()
def receive(m: JG_MSG): Unit =
if (isActive && m.getSrc != channel.map(_.getAddress).getOrElse(m.getSrc)) self ! Message(m.getSrc,m.getRawBuffer)
def viewAccepted(view: JG_VIEW): Unit =
if (isActive) self ! View(Set[ADDR_T]() ++ view.getMembers - channel.get.getAddress)
def suspect(a: Address): Unit =
if (isActive) self ! Zombie(a)
def block(): Unit =
log debug "UNSUPPORTED: JGroupsClusterActor::block" //TODO HotSwap to a buffering body
def unblock(): Unit =
log debug "UNSUPPORTED: JGroupsClusterActor::unblock" //TODO HotSwap back and flush the buffer
})
})
channel.foreach(_.connect(name))
}
protected def toOneNode(dest : Address, msg: Array[Byte]): Unit =
for (c <- channel) c.send(new JG_MSG(dest, null, msg))
protected def toAllNodes(msg : Array[Byte]): Unit =
for (c <- channel) c.send(new JG_MSG(null, null, msg))
override def postStop = {
super.postStop
log info ("Shutting down %s", toString)
isActive = false
channel.foreach(Util shutdown _)
channel = None
}
}

View file

@ -243,7 +243,6 @@ class RemoteServer extends Logging with ListenerManagement {
openChannels.add(bootstrap.bind(new InetSocketAddress(hostname, port)))
_isRunning = true
Cluster.registerLocalNode(hostname, port)
notifyListeners(RemoteServerStarted(this))
}
} catch {
@ -261,7 +260,6 @@ class RemoteServer extends Logging with ListenerManagement {
openChannels.disconnect
openChannels.close.awaitUninterruptibly
bootstrap.releaseExternalResources
Cluster.deregisterLocalNode(hostname, port)
notifyListeners(RemoteServerShutdown(this))
} catch {
case e: java.nio.channels.ClosedChannelException => {}
@ -595,7 +593,7 @@ class RemoteServerHandler(
if (request.getOneWay) messageReceiver.invoke(typedActor, args: _*)
else {
val result = messageReceiver.invoke(typedActor, args: _*) match {
case f: Future[_] => f.await.result.get
case f: Future[_] => f.await.result.get //TODO replace this with a Listener on the Future to avoid blocking
case other => other
}
log.debug("Returning result from remote typed actor invocation [%s]", result)

View file

@ -10,7 +10,6 @@ import akka.stm.TransactionalMap
import akka.persistence.cassandra.CassandraStorage
import akka.config.Supervision._
import akka.util.Logging
import akka.comet.AkkaClusterBroadcastFilter
import scala.xml.NodeSeq
import java.lang.Integer
import java.nio.ByteBuffer
@ -90,7 +89,6 @@ class PubSub {
@Broadcast
@Path("/topic/{topic}/{message}/")
@Produces(Array("text/plain;charset=ISO-8859-1"))
//FIXME @Cluster(value = Array(classOf[AkkaClusterBroadcastFilter]),name = "foo")
def say(@PathParam("topic") topic: Broadcaster, @PathParam("message") message: String): Broadcastable = new Broadcastable(message, topic)
}
@ -148,7 +146,6 @@ class Chat {
@POST
@Broadcast(Array(classOf[XSSHtmlFilter], classOf[JsonpFilter]))
//FIXME @Cluster(value = Array(classOf[AkkaClusterBroadcastFilter]),name = "bar")
@Consumes(Array("application/x-www-form-urlencoded"))
@Produces(Array("text/html"))
def publishMessage(form: MultivaluedMap[String, String]) = {

View file

@ -101,7 +101,6 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
lazy val jmxModuleConfig = ModuleConfiguration("com.sun.jmx", SunJDMKRepo)
lazy val jerseyContrModuleConfig = ModuleConfiguration("com.sun.jersey.contribs", JavaNetRepo)
lazy val jerseyModuleConfig = ModuleConfiguration("com.sun.jersey", JavaNetRepo)
lazy val jgroupsModuleConfig = ModuleConfiguration("jgroups", JBossRepo)
lazy val multiverseModuleConfig = ModuleConfiguration("org.multiverse", CodehausRepo)
lazy val nettyModuleConfig = ModuleConfiguration("org.jboss.netty", JBossRepo)
lazy val scalaTestModuleConfig = ModuleConfiguration("org.scalatest", ScalaToolsRelRepo)
@ -191,8 +190,6 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
lazy val jersey_server = "com.sun.jersey" % "jersey-server" % JERSEY_VERSION % "compile" //CDDL v1
lazy val jersey_contrib = "com.sun.jersey.contribs" % "jersey-scala" % JERSEY_VERSION % "compile" //CDDL v1
lazy val jgroups = "jgroups" % "jgroups" % "2.9.0.GA" % "compile" //LGPL 2.1
lazy val jsr166x = "jsr166x" % "jsr166x" % "1.0" % "compile" //CC Public Domain
lazy val jsr250 = "javax.annotation" % "jsr250-api" % "1.0" % "compile" //CDDL v1
@ -453,7 +450,6 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
val h2_lzf = Dependencies.h2_lzf
val jackson = Dependencies.jackson
val jackson_core = Dependencies.jackson_core
val jgroups = Dependencies.jgroups
val jta_1_1 = Dependencies.jta_1_1
val netty = Dependencies.netty
val protobuf = Dependencies.protobuf