merge master
This commit is contained in:
commit
16a796205a
43 changed files with 2143 additions and 543 deletions
|
|
@ -45,11 +45,11 @@ object Kernel extends Logging {
|
|||
private var applicationLoader: Option[ClassLoader] = None
|
||||
|
||||
def main(args: Array[String]) = boot
|
||||
|
||||
|
||||
def boot = synchronized {
|
||||
if (!hasBooted) {
|
||||
printBanner
|
||||
log.info("Starting Akka kernel...")
|
||||
log.info("Starting Akka...")
|
||||
|
||||
runApplicationBootClasses
|
||||
|
||||
|
|
@ -68,7 +68,9 @@ object Kernel extends Logging {
|
|||
|
||||
if (RUN_REST_SERVICE) startJersey
|
||||
|
||||
log.info("Akka kernel started successfully")
|
||||
runApplicationBootClasses
|
||||
|
||||
log.info("Akka started successfully")
|
||||
hasBooted = true
|
||||
}
|
||||
}
|
||||
|
|
@ -77,17 +79,27 @@ object Kernel extends Logging {
|
|||
|
||||
def setupConfig: Config = {
|
||||
try {
|
||||
Configgy.configure(akka.Boot.CONFIG + "/akka.conf")
|
||||
//runtime.load(args)
|
||||
val config = Configgy.config
|
||||
config.registerWithJmx("se.scalablesolutions.akka")
|
||||
|
||||
// FIXME fix Configgy JMX subscription to allow management
|
||||
// config.subscribe { c => configure(c.getOrElse(new Config)) }
|
||||
config
|
||||
Configgy.configureFromResource("akka.conf", getClass.getClassLoader)
|
||||
log.info("Config loaded from the application classpath.")
|
||||
} catch {
|
||||
case e: ParseException => throw new Error("Could not retreive the akka.conf config file. Make sure you have set the AKKA_HOME environment variable to the root of the distribution.")
|
||||
case e: ParseException =>
|
||||
try {
|
||||
if (HOME.isDefined) {
|
||||
val configFile = HOME.get + "/config/akka.conf"
|
||||
log.info("AKKA_HOME is defined to [%s], loading config from [%s].", HOME.get, configFile)
|
||||
Configgy.configure(configFile)
|
||||
} else throw new IllegalStateException("AKKA_HOME is not defined and no 'akka.conf' can be found on the classpath, aborting")
|
||||
} catch {
|
||||
case e: ParseException => throw new IllegalStateException("AKKA_HOME is not defined and no 'akka.conf' can be found on the classpath, aborting")
|
||||
}
|
||||
}
|
||||
//val runtime = new RuntimeEnvironment(getClass)
|
||||
//runtime.load(args)
|
||||
val config = Configgy.config
|
||||
config.registerWithJmx("com.scalablesolutions.akka.config")
|
||||
// FIXME fix Configgy JMX subscription to allow management
|
||||
// config.subscribe { c => configure(c.getOrElse(new Config)) }
|
||||
config
|
||||
}
|
||||
|
||||
private[akka] def runApplicationBootClasses = {
|
||||
|
|
@ -104,7 +116,7 @@ object Kernel extends Logging {
|
|||
val loader = new URLClassLoader(toDeploy.toArray, getClass.getClassLoader)
|
||||
if (BOOT_CLASSES.isEmpty) throw new IllegalStateException("No boot class specificed. Add an application boot class to the 'akka.conf' file such as 'boot = \"com.biz.myapp.Boot\"")
|
||||
for (clazz <- BOOT_CLASSES) {
|
||||
log.info("Booting with boot class [%s]", clazz)
|
||||
log.info("Loading boot class [%s]", clazz)
|
||||
loader.loadClass(clazz).newInstance
|
||||
}
|
||||
applicationLoader = Some(loader)
|
||||
|
|
@ -125,7 +137,8 @@ object Kernel extends Logging {
|
|||
|
||||
private[akka] def startCassandra = if (config.getBool("akka.storage.cassandra.service", true)) {
|
||||
System.setProperty("cassandra", "")
|
||||
System.setProperty("storage-config", akka.Boot.CONFIG + "/")
|
||||
if (HOME.isDefined) System.setProperty("storage-config", HOME.get + "/config/")
|
||||
else if (System.getProperty("storage-config", "NIL") == "NIL") throw new IllegalStateException("AKKA_HOME and -Dstorage-config=... is not set. Can't start up Cassandra. Either set AKKA_HOME or set the -Dstorage-config=... variable to the directory with the Cassandra storage-conf.xml file.")
|
||||
CassandraStorage.start
|
||||
}
|
||||
|
||||
|
|
@ -139,7 +152,7 @@ object Kernel extends Logging {
|
|||
adapter.setHandleStaticResources(true)
|
||||
adapter.setServletInstance(new AkkaCometServlet)
|
||||
adapter.setContextPath(uri.getPath)
|
||||
adapter.setRootFolder(System.getenv("AKKA_HOME") + "/deploy/root")
|
||||
if (HOME.isDefined) adapter.setRootFolder(HOME.get + "/deploy/root")
|
||||
log.info("REST service root path: [" + adapter.getRootFolder + "] and context path [" + adapter.getContextPath + "] ")
|
||||
|
||||
val ah = new com.sun.grizzly.arp.DefaultAsyncHandler
|
||||
|
|
@ -165,7 +178,7 @@ object Kernel extends Logging {
|
|||
(____ /__|_ \__|_ \(____ /
|
||||
\/ \/ \/ \/
|
||||
""")
|
||||
log.info(" Running version " + kernel.Kernel.config.getString("akka.version", "awesome"))
|
||||
log.info(" Running version " + config.getString("akka.version", "Awesome"))
|
||||
log.info("==============================")
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -141,6 +141,7 @@ class ActiveObjectFactory {
|
|||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
object ActiveObject {
|
||||
|
||||
val MATCH_ALL = "execution(* *.*(..))"
|
||||
val AKKA_CAMEL_ROUTING_SCHEME = "akka"
|
||||
|
||||
|
|
|
|||
|
|
@ -77,7 +77,6 @@ object Serializer {
|
|||
message.toBuilder().mergeFrom(bytes).build
|
||||
}
|
||||
|
||||
// For Java
|
||||
def in(bytes: Array[Byte], clazz: Class[_]): AnyRef = {
|
||||
if (clazz == null) throw new IllegalArgumentException("Protobuf message can't be null")
|
||||
in(bytes, Some(clazz))
|
||||
|
|
|
|||
240
kernel/src/main/scala/state/CassandraSession.scala
Normal file
240
kernel/src/main/scala/state/CassandraSession.scala
Normal file
|
|
@ -0,0 +1,240 @@
|
|||
/**
|
||||
* Copyright (C) 2009 Scalable Solutions.
|
||||
*/
|
||||
|
||||
package se.scalablesolutions.akka.kernel.state
|
||||
|
||||
import java.io.{Flushable, Closeable}
|
||||
|
||||
import util.Logging
|
||||
import util.Helpers._
|
||||
import serialization.Serializer
|
||||
import kernel.Kernel.config
|
||||
|
||||
import org.apache.cassandra.db.ColumnFamily
|
||||
import org.apache.cassandra.service._
|
||||
|
||||
import org.apache.thrift.transport._
|
||||
import org.apache.thrift.protocol._
|
||||
|
||||
/**
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
trait CassandraSession extends Closeable with Flushable {
|
||||
import scala.collection.jcl.Conversions._
|
||||
import org.scala_tools.javautils.Imports._
|
||||
import java.util.{Map => JMap}
|
||||
|
||||
protected val client: Cassandra.Client
|
||||
protected val keyspace: String
|
||||
|
||||
val obtainedAt: Long
|
||||
val consistencyLevel: Int
|
||||
val schema: JMap[String, JMap[String, String]]
|
||||
|
||||
/**
|
||||
* Count is always the max number of results to return.
|
||||
|
||||
So it means, starting with `start`, or the first one if start is
|
||||
empty, go until you hit `finish` or `count`, whichever comes first.
|
||||
Empty is not a legal column name so if finish is empty it is ignored
|
||||
and only count is used.
|
||||
|
||||
We don't offer a numeric offset since that can't be supported
|
||||
efficiently with a log-structured merge disk format.
|
||||
*/
|
||||
def /(key: String, columnParent: ColumnParent, start: Array[Byte], end: Array[Byte], ascending: Boolean, count: Int): List[Column] =
|
||||
/(key, columnParent, start, end, ascending, count, consistencyLevel)
|
||||
|
||||
def /(key: String, columnParent: ColumnParent, start: Array[Byte], end: Array[Byte], ascending: Boolean, count: Int, consistencyLevel: Int): List[Column] =
|
||||
client.get_slice(keyspace, key, columnParent, start, end, ascending, count, consistencyLevel).toList
|
||||
|
||||
def /(key: String, columnParent: ColumnParent, colNames: List[Array[Byte]]): List[Column] =
|
||||
/(key, columnParent, colNames, consistencyLevel)
|
||||
|
||||
def /(key: String, columnParent: ColumnParent, colNames: List[Array[Byte]], consistencyLevel: Int): List[Column] =
|
||||
client.get_slice_by_names(keyspace, key, columnParent, colNames.asJava, consistencyLevel).toList
|
||||
|
||||
def |(key: String, colPath: ColumnPath): Option[Column] =
|
||||
|(key, colPath, consistencyLevel)
|
||||
|
||||
def |(key: String, colPath: ColumnPath, consistencyLevel: Int): Option[Column] =
|
||||
client.get_column(keyspace, key, colPath, consistencyLevel)
|
||||
|
||||
def |#(key: String, columnParent: ColumnParent): Int =
|
||||
|#(key, columnParent, consistencyLevel)
|
||||
|
||||
def |#(key: String, columnParent: ColumnParent, consistencyLevel: Int): Int =
|
||||
client.get_column_count(keyspace, key, columnParent, consistencyLevel)
|
||||
|
||||
def ++|(key: String, colPath: ColumnPath, value: Array[Byte]): Unit =
|
||||
++|(key, colPath, value, obtainedAt, consistencyLevel)
|
||||
|
||||
def ++|(key: String, colPath: ColumnPath, value: Array[Byte], timestamp: Long): Unit =
|
||||
++|(key, colPath, value, timestamp, consistencyLevel)
|
||||
|
||||
def ++|(key: String, colPath: ColumnPath, value: Array[Byte], timestamp: Long, consistencyLevel: Int) =
|
||||
client.insert(keyspace, key, colPath, value, timestamp, consistencyLevel)
|
||||
|
||||
def ++|(batch: BatchMutation): Unit =
|
||||
++|(batch, consistencyLevel)
|
||||
|
||||
def ++|(batch: BatchMutation, consistencyLevel: Int): Unit =
|
||||
client.batch_insert(keyspace, batch, consistencyLevel)
|
||||
|
||||
def --(key: String, columnPathOrParent: ColumnPathOrParent, timestamp: Long): Unit =
|
||||
--(key, columnPathOrParent, timestamp, consistencyLevel)
|
||||
|
||||
def --(key: String, columnPathOrParent: ColumnPathOrParent, timestamp: Long, consistencyLevel: Int): Unit =
|
||||
client.remove(keyspace, key, columnPathOrParent, timestamp, consistencyLevel)
|
||||
|
||||
def /^(key: String, columnFamily: String, start: Array[Byte], end: Array[Byte], ascending: Boolean, count: Int): List[SuperColumn] =
|
||||
/^(key, columnFamily, start, end, ascending, count, consistencyLevel)
|
||||
|
||||
def /^(key: String, columnFamily: String, start: Array[Byte], end: Array[Byte], ascending: Boolean, count: Int, consistencyLevel: Int): List[SuperColumn] =
|
||||
client.get_slice_super(keyspace, key, columnFamily, start, end, ascending, count, consistencyLevel).toList
|
||||
|
||||
def /^(key: String, columnFamily: String, superColNames: List[Array[Byte]]): List[SuperColumn] =
|
||||
/^(key, columnFamily, superColNames, consistencyLevel)
|
||||
|
||||
def /^(key: String, columnFamily: String, superColNames: List[Array[Byte]], consistencyLevel: Int): List[SuperColumn] =
|
||||
client.get_slice_super_by_names(keyspace, key, columnFamily, superColNames.asJava, consistencyLevel).toList
|
||||
|
||||
def |^(key: String, superColumnPath: SuperColumnPath): Option[SuperColumn] =
|
||||
|^(key, superColumnPath, consistencyLevel)
|
||||
|
||||
def |^(key: String, superColumnPath: SuperColumnPath, consistencyLevel: Int): Option[SuperColumn] =
|
||||
client.get_super_column(keyspace, key, superColumnPath, consistencyLevel)
|
||||
|
||||
def ++|^(batch: BatchMutationSuper): Unit =
|
||||
++|^(batch, consistencyLevel)
|
||||
|
||||
def ++|^(batch: BatchMutationSuper, consistencyLevel: Int): Unit =
|
||||
client.batch_insert_super_column(keyspace, batch, consistencyLevel)
|
||||
|
||||
def getRange(key: String, columnParent: ColumnParent, start: Array[Byte], end: Array[Byte], ascending: Boolean, count: Int): List[Column] =
|
||||
getRange(key, columnParent, start, end, ascending, count, consistencyLevel)
|
||||
|
||||
def getRange(key: String, columnParent: ColumnParent, start: Array[Byte], end: Array[Byte], ascending: Boolean, count: Int, consistencyLevel: Int): List[Column] =
|
||||
client.get_slice(keyspace, key, columnParent, start, end, ascending, count, consistencyLevel).toList
|
||||
|
||||
def getRange(key: String, columnParent: ColumnParent, colNames: List[Array[Byte]]): List[Column] =
|
||||
getRange(key, columnParent, colNames, consistencyLevel)
|
||||
|
||||
def getRange(key: String, columnParent: ColumnParent, colNames: List[Array[Byte]], consistencyLevel: Int): List[Column] =
|
||||
client.get_slice_by_names(keyspace, key, columnParent, colNames.asJava, consistencyLevel).toList
|
||||
|
||||
def getColumn(key: String, colPath: ColumnPath): Option[Column] =
|
||||
getColumn(key, colPath, consistencyLevel)
|
||||
|
||||
def getColumn(key: String, colPath: ColumnPath, consistencyLevel: Int): Option[Column] =
|
||||
client.get_column(keyspace, key, colPath, consistencyLevel)
|
||||
|
||||
def getColumnCount(key: String, columnParent: ColumnParent): Int =
|
||||
getColumnCount(key, columnParent, consistencyLevel)
|
||||
|
||||
def getColumnCount(key: String, columnParent: ColumnParent, consistencyLevel: Int): Int =
|
||||
client.get_column_count(keyspace, key, columnParent, consistencyLevel)
|
||||
|
||||
def insertColumn(key: String, colPath: ColumnPath, value: Array[Byte]): Unit =
|
||||
insertColumn(key, colPath, value, obtainedAt, consistencyLevel)
|
||||
|
||||
def insertColumn(key: String, colPath: ColumnPath, value: Array[Byte], timestamp: Long): Unit =
|
||||
insertColumn(key, colPath, value, timestamp, consistencyLevel)
|
||||
|
||||
def insertColumn(key: String, colPath: ColumnPath, value: Array[Byte], timestamp: Long, consistencyLevel: Int) =
|
||||
client.insert(keyspace, key, colPath, value, timestamp, consistencyLevel)
|
||||
|
||||
def insertColumn(batch: BatchMutation): Unit =
|
||||
insertColumn(batch, consistencyLevel)
|
||||
|
||||
def insertColumn(batch: BatchMutation, consistencyLevel: Int): Unit =
|
||||
client.batch_insert(keyspace, batch, consistencyLevel)
|
||||
|
||||
def removeColumn(key: String, columnPathOrParent: ColumnPathOrParent, timestamp: Long): Unit =
|
||||
removeColumn(key, columnPathOrParent, timestamp, consistencyLevel)
|
||||
|
||||
def removeColumn(key: String, columnPathOrParent: ColumnPathOrParent, timestamp: Long, consistencyLevel: Int): Unit =
|
||||
client.remove(keyspace, key, columnPathOrParent, timestamp, consistencyLevel)
|
||||
|
||||
def getSuperRange(key: String, columnFamily: String, start: Array[Byte], end: Array[Byte], ascending: Boolean, count: Int): List[SuperColumn] =
|
||||
getSuperRange(key, columnFamily, start, end, ascending, count, consistencyLevel)
|
||||
|
||||
def getSuperRange(key: String, columnFamily: String, start: Array[Byte], end: Array[Byte], ascending: Boolean, count: Int, consistencyLevel: Int): List[SuperColumn] =
|
||||
client.get_slice_super(keyspace, key, columnFamily, start, end, ascending, count, consistencyLevel).toList
|
||||
|
||||
def getSuperRange(key: String, columnFamily: String, superColNames: List[Array[Byte]]): List[SuperColumn] =
|
||||
getSuperRange(key, columnFamily, superColNames, consistencyLevel)
|
||||
|
||||
def getSuperRange(key: String, columnFamily: String, superColNames: List[Array[Byte]], consistencyLevel: Int): List[SuperColumn] =
|
||||
client.get_slice_super_by_names(keyspace, key, columnFamily, superColNames.asJava, consistencyLevel).toList
|
||||
|
||||
def getSuperColumn(key: String, superColumnPath: SuperColumnPath): Option[SuperColumn] =
|
||||
getSuperColumn(key, superColumnPath, consistencyLevel)
|
||||
|
||||
def getSuperColumn(key: String, superColumnPath: SuperColumnPath, consistencyLevel: Int): Option[SuperColumn] =
|
||||
client.get_super_column(keyspace, key, superColumnPath, consistencyLevel)
|
||||
|
||||
def insertSuperColumn(batch: BatchMutationSuper): Unit =
|
||||
insertSuperColumn(batch, consistencyLevel)
|
||||
|
||||
def insertSuperColumn(batch: BatchMutationSuper, consistencyLevel: Int): Unit =
|
||||
client.batch_insert_super_column(keyspace, batch, consistencyLevel)
|
||||
|
||||
def keys(columnFamily: String, startsWith: String, stopsAt: String, maxResults: Option[Int]): List[String] =
|
||||
client.get_key_range(keyspace, columnFamily, startsWith, stopsAt, maxResults.getOrElse(-1)).toList
|
||||
}
|
||||
|
||||
class CassandraSessionPool[T <: TTransport](
|
||||
space: String,
|
||||
transportPool: Pool[T],
|
||||
inputProtocol: Protocol,
|
||||
outputProtocol: Protocol,
|
||||
consistency: Int) extends Closeable with Logging {
|
||||
|
||||
def this(space: String, transportPool: Pool[T], ioProtocol: Protocol, consistency: Int) =
|
||||
this (space, transportPool, ioProtocol, ioProtocol, consistency)
|
||||
|
||||
def newSession: CassandraSession = newSession(consistency)
|
||||
|
||||
def newSession(consistencyLevel: Int): CassandraSession = {
|
||||
val socket = transportPool.borrowObject
|
||||
val cassandraClient = new Cassandra.Client(inputProtocol(socket), outputProtocol(socket))
|
||||
val cassandraSchema = cassandraClient.describe_keyspace(space)
|
||||
new CassandraSession {
|
||||
val keyspace = space
|
||||
val client = cassandraClient
|
||||
val obtainedAt = System.currentTimeMillis
|
||||
val consistencyLevel = consistency
|
||||
val schema = cassandraSchema
|
||||
log.debug("Creating %s", toString)
|
||||
|
||||
def flush = socket.flush
|
||||
def close = transportPool.returnObject(socket)
|
||||
override def toString = "[CassandraSession]\n\tkeyspace = " + keyspace + "\n\tschema = " + schema
|
||||
}
|
||||
}
|
||||
|
||||
def withSession[T](body: CassandraSession => T) = {
|
||||
val session = newSession(consistency)
|
||||
try {
|
||||
val result = body(session)
|
||||
session.flush
|
||||
result
|
||||
} finally {
|
||||
session.close
|
||||
}
|
||||
}
|
||||
|
||||
def close = transportPool.close
|
||||
}
|
||||
|
||||
sealed abstract class Protocol(val factory: TProtocolFactory) {
|
||||
def apply(transport: TTransport) = factory.getProtocol(transport)
|
||||
}
|
||||
|
||||
object Protocol {
|
||||
object Binary extends Protocol(new TBinaryProtocol.Factory)
|
||||
object SimpleJSON extends Protocol(new TSimpleJSONProtocol.Factory)
|
||||
object JSON extends Protocol(new TJSONProtocol.Factory)
|
||||
}
|
||||
|
|
@ -4,42 +4,46 @@
|
|||
|
||||
package se.scalablesolutions.akka.kernel.state
|
||||
|
||||
import java.io.File
|
||||
import java.io.{Flushable, Closeable}
|
||||
|
||||
import kernel.util.Logging
|
||||
import serialization.{Serializer, Serializable, SerializationProtocol}
|
||||
import util.Logging
|
||||
import util.Helpers._
|
||||
import serialization.Serializer
|
||||
import kernel.Kernel.config
|
||||
|
||||
import org.apache.cassandra.config.DatabaseDescriptor
|
||||
import org.apache.cassandra.db.ColumnFamily
|
||||
import org.apache.cassandra.service._
|
||||
|
||||
import org.apache.thrift.server.TThreadPoolServer
|
||||
import org.apache.thrift.protocol.TBinaryProtocol
|
||||
import org.apache.thrift.transport.TServerSocket
|
||||
import org.apache.thrift.transport.TTransportFactory
|
||||
import org.apache.thrift.TProcessorFactory
|
||||
import org.apache.thrift.transport._
|
||||
import org.apache.thrift.protocol._
|
||||
|
||||
/**
|
||||
* NOTE: requires command line options:
|
||||
* <br/>
|
||||
* <code>-Dcassandra -Dstorage-config=config/ -Dpidfile=akka.pid</code>
|
||||
* <p/>
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
object CassandraStorage extends Logging {
|
||||
val TABLE_NAME = "akka"
|
||||
val MAP_COLUMN_FAMILY = "map"
|
||||
val VECTOR_COLUMN_FAMILY = "vector"
|
||||
val REF_COLUMN_FAMILY = "ref:item"
|
||||
val KEYSPACE = "akka"
|
||||
val MAP_COLUMN_PARENT = new ColumnParent("map", null)
|
||||
val VECTOR_COLUMN_PARENT = new ColumnParent("vector", null)
|
||||
val REF_COLUMN_PARENT = new ColumnParent("ref", null)
|
||||
val REF_KEY = "item".getBytes("UTF-8")
|
||||
|
||||
val CASSANDRA_SERVER_HOSTNAME = config.getString("akka.storage.cassandra.hostname", "127.0.0.1")
|
||||
val CASSANDRA_SERVER_PORT = config.getInt("akka.storage.cassandra.port", 9160)
|
||||
val CONSISTENCY_LEVEL = config.getInt("akka.storage.cassandra.consistency-level", 1)
|
||||
val IS_ASCENDING = true
|
||||
|
||||
val RUN_THRIFT_SERVICE = kernel.Kernel.config.getBool("akka.storage.cassandra.thrift-server.service", false)
|
||||
val BLOCKING_CALL = {
|
||||
if (kernel.Kernel.config.getBool("akka.storage.cassandra.blocking", true)) 0
|
||||
else 1
|
||||
}
|
||||
|
||||
@volatile private[this] var isRunning = false
|
||||
private[this] val protocol: Protocol = Protocol.Binary
|
||||
/* {
|
||||
config.getString("akka.storage.cassandra.procotol", "binary") match {
|
||||
case "binary" => Protocol.Binary
|
||||
case "json" => Protocol.JSON
|
||||
case "simple-json" => Protocol.SimpleJSON
|
||||
case unknown => throw new UnsupportedOperationException("Unknown storage serialization protocol [" + unknown + "]")
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
private[this] val serializer: Serializer = {
|
||||
kernel.Kernel.config.getString("akka.storage.cassandra.storage-format", "java") match {
|
||||
case "scala-json" => Serializer.ScalaJSON
|
||||
|
|
@ -51,193 +55,400 @@ object CassandraStorage extends Logging {
|
|||
case unknown => throw new UnsupportedOperationException("Unknown storage serialization protocol [" + unknown + "]")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: is this server thread-safe or needed to be wrapped up in an actor?
|
||||
private[this] val server = classOf[CassandraServer].newInstance.asInstanceOf[CassandraServer]
|
||||
|
||||
private[this] var thriftServer: CassandraThriftServer = _
|
||||
|
||||
private[this] var sessions: Option[CassandraSessionPool[_]] = None
|
||||
|
||||
def start = synchronized {
|
||||
if (!isRunning) {
|
||||
try {
|
||||
server.start
|
||||
sessions = Some(new CassandraSessionPool(
|
||||
KEYSPACE,
|
||||
StackPool(SocketProvider(CASSANDRA_SERVER_HOSTNAME, CASSANDRA_SERVER_PORT)),
|
||||
protocol,
|
||||
CONSISTENCY_LEVEL))
|
||||
log.info("Cassandra persistent storage has started up successfully");
|
||||
} catch {
|
||||
case e =>
|
||||
log.error("Could not start up Cassandra persistent storage")
|
||||
throw e
|
||||
}
|
||||
if (RUN_THRIFT_SERVICE) {
|
||||
thriftServer = new CassandraThriftServer(server)
|
||||
thriftServer.start
|
||||
}
|
||||
isRunning
|
||||
}
|
||||
}
|
||||
|
||||
def stop = if (isRunning) {
|
||||
//server.storageService.shutdown
|
||||
if (RUN_THRIFT_SERVICE) thriftServer.stop
|
||||
def stop = synchronized {
|
||||
if (isRunning && sessions.isDefined) sessions.get.close
|
||||
}
|
||||
|
||||
// ===============================================================
|
||||
// For Ref
|
||||
// ===============================================================
|
||||
|
||||
def insertRefStorageFor(name: String, element: AnyRef) = {
|
||||
server.insert(
|
||||
TABLE_NAME,
|
||||
name,
|
||||
REF_COLUMN_FAMILY,
|
||||
serializer.out(element),
|
||||
System.currentTimeMillis,
|
||||
BLOCKING_CALL)
|
||||
}
|
||||
|
||||
def getRefStorageFor(name: String): Option[AnyRef] = {
|
||||
try {
|
||||
val column = server.get_column(TABLE_NAME, name, REF_COLUMN_FAMILY)
|
||||
Some(serializer.in(column.value, None))
|
||||
} catch {
|
||||
case e =>
|
||||
e.printStackTrace
|
||||
None
|
||||
def insertRefStorageFor(name: String, element: AnyRef) = if (sessions.isDefined) {
|
||||
sessions.get.withSession {
|
||||
_ ++| (name,
|
||||
new ColumnPath(REF_COLUMN_PARENT.getColumn_family, null, REF_KEY),
|
||||
serializer.out(element),
|
||||
System.currentTimeMillis,
|
||||
CONSISTENCY_LEVEL)
|
||||
}
|
||||
}
|
||||
} else throw new IllegalStateException("CassandraStorage is not started")
|
||||
|
||||
// ===============================================================
|
||||
// For Vector
|
||||
// ===============================================================
|
||||
|
||||
def insertVectorStorageEntryFor(name: String, element: AnyRef) = {
|
||||
server.insert(
|
||||
TABLE_NAME,
|
||||
name,
|
||||
VECTOR_COLUMN_FAMILY + ":" + getVectorStorageSizeFor(name),
|
||||
serializer.out(element),
|
||||
System.currentTimeMillis,
|
||||
BLOCKING_CALL)
|
||||
}
|
||||
|
||||
def getVectorStorageEntryFor(name: String, index: Int): AnyRef = {
|
||||
def getRefStorageFor(name: String): Option[AnyRef] = if (sessions.isDefined) {
|
||||
try {
|
||||
val column = server.get_column(TABLE_NAME, name, VECTOR_COLUMN_FAMILY + ":" + index)
|
||||
serializer.in(column.value, None)
|
||||
} catch {
|
||||
case e =>
|
||||
e.printStackTrace
|
||||
throw new Predef.NoSuchElementException(e.getMessage)
|
||||
}
|
||||
}
|
||||
|
||||
def getVectorStorageRangeFor(name: String, start: Int, count: Int): List[AnyRef] =
|
||||
server.get_slice(TABLE_NAME, name, VECTOR_COLUMN_FAMILY, IS_ASCENDING, count)
|
||||
.toArray.toList.asInstanceOf[List[Tuple2[String, AnyRef]]].map(tuple => tuple._2)
|
||||
|
||||
def getVectorStorageSizeFor(name: String): Int =
|
||||
server.get_column_count(TABLE_NAME, name, VECTOR_COLUMN_FAMILY)
|
||||
|
||||
// ===============================================================
|
||||
// For Map
|
||||
// ===============================================================
|
||||
|
||||
def insertMapStorageEntryFor(name: String, key: String, value: AnyRef) = {
|
||||
server.insert(
|
||||
TABLE_NAME,
|
||||
name,
|
||||
MAP_COLUMN_FAMILY + ":" + key,
|
||||
serializer.out(value),
|
||||
System.currentTimeMillis,
|
||||
BLOCKING_CALL)
|
||||
}
|
||||
|
||||
def insertMapStorageEntriesFor(name: String, entries: List[Tuple2[String, AnyRef]]) = {
|
||||
import java.util.{Map, HashMap, List, ArrayList}
|
||||
val columns: Map[String, List[column_t]] = new HashMap
|
||||
for (entry <- entries) {
|
||||
val cls: List[column_t] = new ArrayList
|
||||
cls.add(new column_t(entry._1, serializer.out(entry._2), System.currentTimeMillis))
|
||||
columns.put(MAP_COLUMN_FAMILY, cls)
|
||||
}
|
||||
server.batch_insert(new batch_mutation_t(
|
||||
TABLE_NAME,
|
||||
name,
|
||||
columns),
|
||||
BLOCKING_CALL)
|
||||
}
|
||||
|
||||
def getMapStorageEntryFor(name: String, key: AnyRef): Option[AnyRef] = {
|
||||
try {
|
||||
val column = server.get_column(TABLE_NAME, name, MAP_COLUMN_FAMILY + ":" + key)
|
||||
Some(serializer.in(column.value, None))
|
||||
val column: Option[Column] = sessions.get.withSession {
|
||||
_ | (name, new ColumnPath(REF_COLUMN_PARENT.getColumn_family, null, REF_KEY))
|
||||
}
|
||||
if (column.isDefined) Some(serializer.in(column.get.value, None))
|
||||
else None
|
||||
} catch {
|
||||
case e =>
|
||||
e.printStackTrace
|
||||
None
|
||||
}
|
||||
}
|
||||
} else throw new IllegalStateException("CassandraStorage is not started")
|
||||
|
||||
def getMapStorageFor(name: String): List[Tuple2[String, AnyRef]] = {
|
||||
val columns = server.get_columns_since(TABLE_NAME, name, MAP_COLUMN_FAMILY, -1)
|
||||
// ===============================================================
|
||||
// For Vector
|
||||
// ===============================================================
|
||||
|
||||
def insertVectorStorageEntryFor(name: String, element: AnyRef) = if (sessions.isDefined) {
|
||||
sessions.get.withSession {
|
||||
_ ++| (name,
|
||||
new ColumnPath(VECTOR_COLUMN_PARENT.getColumn_family, null, intToBytes(getVectorStorageSizeFor(name))),
|
||||
serializer.out(element),
|
||||
System.currentTimeMillis,
|
||||
CONSISTENCY_LEVEL)
|
||||
}
|
||||
} else throw new IllegalStateException("CassandraStorage is not started")
|
||||
|
||||
def getVectorStorageEntryFor(name: String, index: Int): AnyRef = if (sessions.isDefined) {
|
||||
val column: Option[Column] = sessions.get.withSession {
|
||||
_ | (name, new ColumnPath(VECTOR_COLUMN_PARENT.getColumn_family, null, intToBytes(index)))
|
||||
}
|
||||
if (column.isDefined) serializer.in(column.get.value, None)
|
||||
else throw new NoSuchElementException("No element for vector [" + name + "] and index [" + index + "]")
|
||||
} else throw new IllegalStateException("CassandraStorage is not started")
|
||||
|
||||
def getVectorStorageRangeFor(name: String, start: Option[Int], finish: Option[Int], count: Int): List[AnyRef] = if (sessions.isDefined) {
|
||||
val startBytes = if (start.isDefined) intToBytes(start.get) else null
|
||||
val finishBytes = if (finish.isDefined) intToBytes(finish.get) else null
|
||||
val columns: List[Column] = sessions.get.withSession {
|
||||
_ / (name,
|
||||
VECTOR_COLUMN_PARENT,
|
||||
startBytes, finishBytes,
|
||||
IS_ASCENDING,
|
||||
count,
|
||||
CONSISTENCY_LEVEL)
|
||||
}
|
||||
columns.map(column => serializer.in(column.value, None))
|
||||
} else throw new IllegalStateException("CassandraStorage is not started")
|
||||
|
||||
def getVectorStorageSizeFor(name: String): Int = if (sessions.isDefined) {
|
||||
sessions.get.withSession {
|
||||
_ |# (name, VECTOR_COLUMN_PARENT)
|
||||
}
|
||||
} else throw new IllegalStateException("CassandraStorage is not started")
|
||||
|
||||
// ===============================================================
|
||||
// For Map
|
||||
// ===============================================================
|
||||
|
||||
def insertMapStorageEntryFor(name: String, key: AnyRef, element: AnyRef) = if (sessions.isDefined) {
|
||||
sessions.get.withSession {
|
||||
_ ++| (name,
|
||||
new ColumnPath(MAP_COLUMN_PARENT.getColumn_family, null, serializer.out(key)),
|
||||
serializer.out(element),
|
||||
System.currentTimeMillis,
|
||||
CONSISTENCY_LEVEL)
|
||||
}
|
||||
} else throw new IllegalStateException("CassandraStorage is not started")
|
||||
|
||||
def insertMapStorageEntriesFor(name: String, entries: List[Tuple2[AnyRef, AnyRef]]) = if (sessions.isDefined) {
|
||||
val cf2columns: java.util.Map[String, java.util.List[Column]] = new java.util.HashMap
|
||||
for (entry <- entries) {
|
||||
val columns: java.util.List[Column] = new java.util.ArrayList
|
||||
columns.add(new Column(serializer.out(entry._1), serializer.out(entry._2), System.currentTimeMillis))
|
||||
cf2columns.put(MAP_COLUMN_PARENT.getColumn_family, columns)
|
||||
}
|
||||
sessions.get.withSession {
|
||||
_ ++| (new BatchMutation(name, cf2columns), CONSISTENCY_LEVEL)
|
||||
}
|
||||
} else throw new IllegalStateException("CassandraStorage is not started")
|
||||
|
||||
def getMapStorageEntryFor(name: String, key: AnyRef): Option[AnyRef] = if (sessions.isDefined) {
|
||||
try {
|
||||
val column: Option[Column] = sessions.get.withSession {
|
||||
_ | (name, new ColumnPath(MAP_COLUMN_PARENT.getColumn_family, null, serializer.out(key)))
|
||||
}
|
||||
if (column.isDefined) Some(serializer.in(column.get.value, None))
|
||||
else None
|
||||
} catch {
|
||||
case e =>
|
||||
e.printStackTrace
|
||||
None
|
||||
}
|
||||
} else throw new IllegalStateException("CassandraStorage is not started")
|
||||
|
||||
def getMapStorageFor(name: String): List[Tuple2[AnyRef, AnyRef]] = if (sessions.isDefined) {
|
||||
throw new UnsupportedOperationException
|
||||
/*
|
||||
val columns = server.get_columns_since(name, MAP_COLUMN_FAMILY, -1)
|
||||
.toArray.toList.asInstanceOf[List[org.apache.cassandra.service.column_t]]
|
||||
for {
|
||||
column <- columns
|
||||
col = (column.columnName, serializer.in(column.value, None))
|
||||
col = (column.columnName, column.value)
|
||||
} yield col
|
||||
}
|
||||
|
||||
def getMapStorageSizeFor(name: String): Int =
|
||||
server.get_column_count(TABLE_NAME, name, MAP_COLUMN_FAMILY)
|
||||
*/
|
||||
} else throw new IllegalStateException("CassandraStorage is not started")
|
||||
|
||||
def removeMapStorageFor(name: String) =
|
||||
server.remove(TABLE_NAME, name, MAP_COLUMN_FAMILY, System.currentTimeMillis, BLOCKING_CALL)
|
||||
|
||||
def getMapStorageRangeFor(name: String, start: Int, count: Int): List[Tuple2[String, AnyRef]] = {
|
||||
server.get_slice(TABLE_NAME, name, MAP_COLUMN_FAMILY, IS_ASCENDING, count)
|
||||
.toArray.toList.asInstanceOf[List[Tuple2[String, AnyRef]]]
|
||||
}
|
||||
}
|
||||
|
||||
class CassandraThriftServer(server: CassandraServer) extends Logging {
|
||||
case object Start
|
||||
case object Stop
|
||||
|
||||
private[this] val serverEngine: TThreadPoolServer = try {
|
||||
val pidFile = kernel.Kernel.config.getString("akka.storage.cassandra.thrift-server.pidfile", "akka.pid")
|
||||
if (pidFile != null) new File(pidFile).deleteOnExit();
|
||||
val listenPort = DatabaseDescriptor.getThriftPort
|
||||
|
||||
val processor = new Cassandra.Processor(server)
|
||||
val tServerSocket = new TServerSocket(listenPort)
|
||||
val tProtocolFactory = new TBinaryProtocol.Factory
|
||||
|
||||
val options = new TThreadPoolServer.Options
|
||||
options.minWorkerThreads = 64
|
||||
new TThreadPoolServer(new TProcessorFactory(processor),
|
||||
tServerSocket,
|
||||
new TTransportFactory,
|
||||
new TTransportFactory,
|
||||
tProtocolFactory,
|
||||
tProtocolFactory,
|
||||
options)
|
||||
} catch {
|
||||
case e =>
|
||||
log.error("Could not start up Cassandra thrift service")
|
||||
throw e
|
||||
}
|
||||
|
||||
import scala.actors.Actor._
|
||||
private[this] val serverDaemon = actor {
|
||||
receive {
|
||||
case Start =>
|
||||
serverEngine.serve
|
||||
log.info("Cassandra thrift service has starting up successfully")
|
||||
case Stop =>
|
||||
log.info("Cassandra thrift service is shutting down...")
|
||||
serverEngine.stop
|
||||
def getMapStorageSizeFor(name: String): Int = if (sessions.isDefined) {
|
||||
sessions.get.withSession {
|
||||
_ |# (name, MAP_COLUMN_PARENT)
|
||||
}
|
||||
}
|
||||
} else throw new IllegalStateException("CassandraStorage is not started")
|
||||
|
||||
def start = serverDaemon ! Start
|
||||
def stop = serverDaemon ! Stop
|
||||
def removeMapStorageFor(name: String): Unit = removeMapStorageFor(name, null)
|
||||
|
||||
def removeMapStorageFor(name: String, key: AnyRef): Unit = if (sessions.isDefined) {
|
||||
val keyBytes = if (key == null) null else serializer.out(key)
|
||||
sessions.get.withSession {
|
||||
_ -- (name,
|
||||
new ColumnPathOrParent(MAP_COLUMN_PARENT.getColumn_family, null, keyBytes),
|
||||
System.currentTimeMillis,
|
||||
CONSISTENCY_LEVEL)
|
||||
}
|
||||
} else throw new IllegalStateException("CassandraStorage is not started")
|
||||
|
||||
def getMapStorageRangeFor(name: String, start: Option[AnyRef], finish: Option[AnyRef], count: Int):
|
||||
List[Tuple2[AnyRef, AnyRef]] = if (sessions.isDefined) {
|
||||
val startBytes = if (start.isDefined) serializer.out(start.get) else null
|
||||
val finishBytes = if (finish.isDefined) serializer.out(finish.get) else null
|
||||
val columns: List[Column] = sessions.get.withSession {
|
||||
_ / (name, MAP_COLUMN_PARENT, startBytes, finishBytes, IS_ASCENDING, count, CONSISTENCY_LEVEL)
|
||||
}
|
||||
columns.map(column => (column.name, serializer.in(column.value, None)))
|
||||
} else throw new IllegalStateException("CassandraStorage is not started")
|
||||
}
|
||||
|
||||
/**
|
||||
* NOTE: requires command line options:
|
||||
* <br/>
|
||||
* <code>-Dcassandra -Dstorage-config=config/ -Dpidfile=akka.pid</code>
|
||||
* <p/>
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*
|
||||
object EmbeddedCassandraStorage extends Logging {
|
||||
val KEYSPACE = "akka"
|
||||
val MAP_COLUMN_FAMILY = "map"
|
||||
val VECTOR_COLUMN_FAMILY = "vector"
|
||||
val REF_COLUMN_FAMILY = "ref:item"
|
||||
|
||||
val IS_ASCENDING = true
|
||||
|
||||
val RUN_THRIFT_SERVICE = kernel.Kernel.config.getBool("akka.storage.cassandra.thrift-server.service", false)
|
||||
val CONSISTENCY_LEVEL = {
|
||||
if (kernel.Kernel.config.getBool("akka.storage.cassandra.blocking", true)) 0
|
||||
else 1 }
|
||||
|
||||
@volatile private[this] var isRunning = false
|
||||
private[this] val serializer: Serializer = {
|
||||
kernel.Kernel.config.getString("akka.storage.cassandra.storage-format", "java") match {
|
||||
case "scala-json" => Serializer.ScalaJSON
|
||||
case "java-json" => Serializer.JavaJSON
|
||||
case "protobuf" => Serializer.Protobuf
|
||||
case "java" => Serializer.Java
|
||||
case "sbinary" => throw new UnsupportedOperationException("SBinary serialization protocol is not yet supported for storage")
|
||||
case "avro" => throw new UnsupportedOperationException("Avro serialization protocol is not yet supported for storage")
|
||||
case unknown => throw new UnsupportedOperationException("Unknown storage serialization protocol [" + unknown + "]")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: is this server thread-safe or needed to be wrapped up in an actor?
|
||||
private[this] val server = classOf[CassandraServer].newInstance.asInstanceOf[CassandraServer]
|
||||
|
||||
private[this] var thriftServer: CassandraThriftServer = _
|
||||
|
||||
def start = synchronized {
|
||||
if (!isRunning) {
|
||||
try {
|
||||
server.start
|
||||
log.info("Cassandra persistent storage has started up successfully");
|
||||
} catch {
|
||||
case e =>
|
||||
log.error("Could not start up Cassandra persistent storage")
|
||||
throw e
|
||||
}
|
||||
if (RUN_THRIFT_SERVICE) {
|
||||
thriftServer = new CassandraThriftServer(server)
|
||||
thriftServer.start
|
||||
}
|
||||
isRunning
|
||||
}
|
||||
}
|
||||
|
||||
def stop = if (isRunning) {
|
||||
//server.storageService.shutdown
|
||||
if (RUN_THRIFT_SERVICE) thriftServer.stop
|
||||
}
|
||||
|
||||
// ===============================================================
|
||||
// For Ref
|
||||
// ===============================================================
|
||||
|
||||
def insertRefStorageFor(name: String, element: AnyRef) = {
|
||||
server.insert(
|
||||
KEYSPACE,
|
||||
name,
|
||||
REF_COLUMN_FAMILY,
|
||||
element,
|
||||
System.currentTimeMillis,
|
||||
CONSISTENCY_LEVEL)
|
||||
}
|
||||
|
||||
def getRefStorageFor(name: String): Option[AnyRef] = {
|
||||
try {
|
||||
val column = server.get_column(KEYSPACE, name, REF_COLUMN_FAMILY)
|
||||
Some(serializer.in(column.value, None))
|
||||
} catch {
|
||||
case e =>
|
||||
e.printStackTrace
|
||||
None }
|
||||
}
|
||||
|
||||
// ===============================================================
|
||||
// For Vector
|
||||
// ===============================================================
|
||||
|
||||
def insertVectorStorageEntryFor(name: String, element: AnyRef) = {
|
||||
server.insert(
|
||||
KEYSPACE,
|
||||
name,
|
||||
VECTOR_COLUMN_FAMILY + ":" + getVectorStorageSizeFor(name),
|
||||
element,
|
||||
System.currentTimeMillis,
|
||||
CONSISTENCY_LEVEL)
|
||||
}
|
||||
|
||||
def getVectorStorageEntryFor(name: String, index: Int): AnyRef = {
|
||||
try {
|
||||
val column = server.get_column(KEYSPACE, name, VECTOR_COLUMN_FAMILY + ":" + index)
|
||||
serializer.in(column.value, None)
|
||||
} catch {
|
||||
case e =>
|
||||
e.printStackTrace
|
||||
throw new Predef.NoSuchElementException(e.getMessage)
|
||||
}
|
||||
}
|
||||
|
||||
def getVectorStorageRangeFor(name: String, start: Int, count: Int): List[AnyRef] =
|
||||
server.get_slice(KEYSPACE, name, VECTOR_COLUMN_FAMILY, IS_ASCENDING, count)
|
||||
.toArray.toList.asInstanceOf[List[Tuple2[String, AnyRef]]].map(tuple => tuple._2)
|
||||
|
||||
def getVectorStorageSizeFor(name: String): Int =
|
||||
server.get_column_count(KEYSPACE, name, VECTOR_COLUMN_FAMILY)
|
||||
|
||||
// ===============================================================
|
||||
// For Map
|
||||
// ===============================================================
|
||||
|
||||
def insertMapStorageEntryFor(name: String, key: String, value: AnyRef) = {
|
||||
server.insert(
|
||||
KEYSPACE, name,
|
||||
MAP_COLUMN_FAMILY + ":" + key,
|
||||
serializer.out(value),
|
||||
System.currentTimeMillis,
|
||||
CONSISTENCY_LEVEL)
|
||||
}
|
||||
|
||||
def insertMapStorageEntriesFor(name: String, entries: List[Tuple2[String, AnyRef]]) = {
|
||||
import java.util.{ Map, HashMap, List, ArrayList }
|
||||
val columns: Map[String, List[column_t]] = new HashMap
|
||||
for (entry <- entries) {
|
||||
val cls: List[column_t] = new ArrayList
|
||||
cls.add(new column_t(entry._1, serializer.out(entry._2), System.currentTimeMillis))
|
||||
columns.put(MAP_COLUMN_FAMILY, cls)
|
||||
}
|
||||
server.batch_insert(new BatchMutation(
|
||||
KEYSPACE, name,
|
||||
columns),
|
||||
CONSISTENCY_LEVEL)
|
||||
}
|
||||
|
||||
def getMapStorageEntryFor(name: String, key: AnyRef): Option[AnyRef] = {
|
||||
try {
|
||||
val column = server.get_column(KEYSPACE, name, MAP_COLUMN_FAMILY + ":" + key)
|
||||
Some(serializer.in(column.value, None))
|
||||
} catch {
|
||||
case e =>
|
||||
e.printStackTrace
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
def getMapStorageFor(name: String): List[Tuple2[String, AnyRef]] = {
|
||||
val columns = server.get_columns_since(KEYSPACE, name, MAP_COLUMN_FAMILY, -1)
|
||||
.toArray.toList.asInstanceOf[List[org.apache.cassandra.service.column_t]]
|
||||
for {
|
||||
column <- columns
|
||||
col = (column.columnName, serializer.in(column.value, None))
|
||||
} yield col
|
||||
}
|
||||
|
||||
def getMapStorageSizeFor(name: String): Int =
|
||||
server.get_column_count(KEYSPACE, name, MAP_COLUMN_FAMILY)
|
||||
|
||||
def removeMapStorageFor(name: String) =
|
||||
server.remove(KEYSPACE, name, MAP_COLUMN_FAMILY, System.currentTimeMillis, CONSISTENCY_LEVEL)
|
||||
|
||||
def getMapStorageRangeFor(name: String, start: Int, count: Int): List[Tuple2[String, AnyRef]] = {
|
||||
server.get_slice(KEYSPACE, name, MAP_COLUMN_FAMILY, IS_ASCENDING, count)
|
||||
.toArray.toList.asInstanceOf[List[Tuple2[String, AnyRef]]]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class CassandraThriftServer(server: CassandraServer) extends Logging {
|
||||
case object Start
|
||||
case object Stop
|
||||
|
||||
private[this] val serverEngine: TThreadPoolServer = try {
|
||||
val pidFile = kernel.Kernel.config.getString("akka.storage.cassandra.thrift-server.pidfile", "akka.pid")
|
||||
if (pidFile != null) new File(pidFile).deleteOnExit();
|
||||
val listenPort = DatabaseDescriptor.getThriftPort
|
||||
|
||||
val processor = new Cassandra.Processor(server)
|
||||
val tServerSocket = new TServerSocket(listenPort)
|
||||
val tProtocolFactory = new TBinaryProtocol.Factory
|
||||
|
||||
val options = new TThreadPoolServer.Options
|
||||
options.minWorkerThreads = 64
|
||||
new TThreadPoolServer(new TProcessorFactory(processor),
|
||||
tServerSocket,
|
||||
new TTransportFactory,
|
||||
new TTransportFactory,
|
||||
tProtocolFactory,
|
||||
tProtocolFactory,
|
||||
options)
|
||||
} catch {
|
||||
case e =>
|
||||
log.error("Could not start up Cassandra thrift service")
|
||||
throw e
|
||||
}
|
||||
|
||||
import scala.actors.Actor._
|
||||
private[this] val serverDaemon = actor {
|
||||
receive {
|
||||
case Start =>
|
||||
serverEngine.serve
|
||||
log.info("Cassandra thrift service has starting up successfully")
|
||||
case Stop =>
|
||||
log.info("Cassandra thrift service is shutting down...")
|
||||
serverEngine.stop
|
||||
}
|
||||
}
|
||||
|
||||
def start = serverDaemon ! Start
|
||||
def stop = serverDaemon ! Stop
|
||||
}
|
||||
*/
|
||||
|
|
|
|||
96
kernel/src/main/scala/state/Pool.scala
Normal file
96
kernel/src/main/scala/state/Pool.scala
Normal file
|
|
@ -0,0 +1,96 @@
|
|||
/**
|
||||
* Copyright (C) 2009 Scalable Solutions.
|
||||
*/
|
||||
|
||||
package se.scalablesolutions.akka.kernel.state
|
||||
|
||||
import org.apache.commons.pool._
|
||||
import org.apache.commons.pool.impl._
|
||||
|
||||
import org.apache.thrift.transport._
|
||||
|
||||
trait Pool[T] extends java.io.Closeable {
|
||||
def borrowObject: T
|
||||
def returnObject(t: T): Unit
|
||||
def invalidateObject(t: T): Unit
|
||||
def addObject: Unit
|
||||
def getNumIdle: Int
|
||||
def getNumActive: Int
|
||||
def clear: Unit
|
||||
def setFactory(factory: PoolItemFactory[T]): Unit
|
||||
}
|
||||
|
||||
trait PoolFactory[T] {
|
||||
def createPool: Pool[T]
|
||||
}
|
||||
|
||||
trait PoolItemFactory[T] {
|
||||
def makeObject: T
|
||||
def destroyObject(t: T): Unit
|
||||
def validateObject(t: T): Boolean
|
||||
def activateObject(t: T): Unit
|
||||
def passivateObject(t: T): Unit
|
||||
}
|
||||
|
||||
trait PoolBridge[T, OP <: ObjectPool] extends Pool[T] {
|
||||
val impl: OP
|
||||
override def borrowObject: T = impl.borrowObject.asInstanceOf[T]
|
||||
override def returnObject(t: T) = impl.returnObject(t)
|
||||
override def invalidateObject(t: T) = impl.invalidateObject(t)
|
||||
override def addObject = impl.addObject
|
||||
override def getNumIdle: Int = impl.getNumIdle
|
||||
override def getNumActive: Int = impl.getNumActive
|
||||
override def clear: Unit = impl.clear
|
||||
override def close: Unit = impl.close
|
||||
override def setFactory(factory: PoolItemFactory[T]) = impl.setFactory(toPoolableObjectFactory(factory))
|
||||
|
||||
def toPoolableObjectFactory[T](pif: PoolItemFactory[T]) = new PoolableObjectFactory {
|
||||
def makeObject: Object = pif.makeObject.asInstanceOf[Object]
|
||||
def destroyObject(o: Object): Unit = pif.destroyObject(o.asInstanceOf[T])
|
||||
def validateObject(o: Object): Boolean = pif.validateObject(o.asInstanceOf[T])
|
||||
def activateObject(o: Object): Unit = pif.activateObject(o.asInstanceOf[T])
|
||||
def passivateObject(o: Object): Unit = pif.passivateObject(o.asInstanceOf[T])
|
||||
}
|
||||
}
|
||||
|
||||
object StackPool {
|
||||
def apply[T](factory: PoolItemFactory[T]) = new PoolBridge[T,StackObjectPool] {
|
||||
val impl = new StackObjectPool(toPoolableObjectFactory(factory))
|
||||
}
|
||||
|
||||
def apply[T](factory: PoolItemFactory[T], maxIdle: Int) = new PoolBridge[T,StackObjectPool] {
|
||||
val impl = new StackObjectPool(toPoolableObjectFactory(factory),maxIdle)
|
||||
}
|
||||
|
||||
def apply[T](factory: PoolItemFactory[T], maxIdle: Int, initIdleCapacity: Int) = new PoolBridge[T,StackObjectPool] {
|
||||
val impl = new StackObjectPool(toPoolableObjectFactory(factory),maxIdle,initIdleCapacity)
|
||||
}
|
||||
}
|
||||
|
||||
object SoftRefPool {
|
||||
def apply[T](factory: PoolItemFactory[T]) = new PoolBridge[T,SoftReferenceObjectPool] {
|
||||
val impl = new SoftReferenceObjectPool(toPoolableObjectFactory(factory))
|
||||
}
|
||||
|
||||
def apply[T](factory: PoolItemFactory[T], initSize: Int) = new PoolBridge[T,SoftReferenceObjectPool] {
|
||||
val impl = new SoftReferenceObjectPool(toPoolableObjectFactory(factory),initSize)
|
||||
}
|
||||
}
|
||||
|
||||
trait TransportFactory[T <: TTransport] extends PoolItemFactory[T] {
|
||||
def createTransport: T
|
||||
def makeObject: T = createTransport
|
||||
def destroyObject(transport: T): Unit = transport.close
|
||||
def validateObject(transport: T) = transport.isOpen
|
||||
def activateObject(transport: T): Unit = if( !transport.isOpen ) transport.open else ()
|
||||
def passivateObject(transport: T): Unit = transport.flush
|
||||
}
|
||||
|
||||
case class SocketProvider(val host: String, val port: Int) extends TransportFactory[TSocket] {
|
||||
def createTransport = {
|
||||
val t = new TSocket(host, port)
|
||||
t.open
|
||||
t
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -37,7 +37,7 @@ object TransactionalState extends TransactionalState
|
|||
* </pre>
|
||||
*/
|
||||
class TransactionalState {
|
||||
def newPersistentMap(config: PersistentStorageConfig): TransactionalMap[String, AnyRef] = config match {
|
||||
def newPersistentMap(config: PersistentStorageConfig): TransactionalMap[AnyRef, AnyRef] = config match {
|
||||
case CassandraStorageConfig() => new CassandraPersistentTransactionalMap
|
||||
case TerracottaStorageConfig() => throw new UnsupportedOperationException
|
||||
case TokyoCabinetStorageConfig() => throw new UnsupportedOperationException
|
||||
|
|
@ -174,7 +174,7 @@ abstract class PersistentTransactionalMap[K, V] extends TransactionalMap[K, V] {
|
|||
// FIXME: need to handle remove in another changeSet
|
||||
protected[kernel] val changeSet = new HashMap[K, V]
|
||||
|
||||
def getRange(start: Int, count: Int)
|
||||
def getRange(start: Option[AnyRef], count: Int)
|
||||
|
||||
// ---- For Transactional ----
|
||||
override def begin = {}
|
||||
|
|
@ -188,11 +188,6 @@ abstract class PersistentTransactionalMap[K, V] extends TransactionalMap[K, V] {
|
|||
None // always return None to speed up writes (else need to go to DB to get
|
||||
}
|
||||
|
||||
override def remove(key: K) = {
|
||||
verifyTransaction
|
||||
changeSet -= key
|
||||
}
|
||||
|
||||
override def -=(key: K) = remove(key)
|
||||
|
||||
override def update(key: K, value: V) = put(key, value)
|
||||
|
|
@ -203,12 +198,21 @@ abstract class PersistentTransactionalMap[K, V] extends TransactionalMap[K, V] {
|
|||
*
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
class CassandraPersistentTransactionalMap extends PersistentTransactionalMap[String, AnyRef] {
|
||||
class CassandraPersistentTransactionalMap extends PersistentTransactionalMap[AnyRef, AnyRef] {
|
||||
|
||||
override def getRange(start: Int, count: Int) = {
|
||||
override def remove(key: AnyRef) = {
|
||||
verifyTransaction
|
||||
if (changeSet.contains(key)) changeSet -= key
|
||||
else CassandraStorage.removeMapStorageFor(uuid, key)
|
||||
}
|
||||
|
||||
override def getRange(start: Option[AnyRef], count: Int) =
|
||||
getRange(start, None, count)
|
||||
|
||||
def getRange(start: Option[AnyRef], finish: Option[AnyRef], count: Int) = {
|
||||
verifyTransaction
|
||||
try {
|
||||
CassandraStorage.getMapStorageRangeFor(uuid, start, count)
|
||||
CassandraStorage.getMapStorageRangeFor(uuid, start, finish, count)
|
||||
} catch {
|
||||
case e: Exception => Nil
|
||||
}
|
||||
|
|
@ -230,7 +234,7 @@ class CassandraPersistentTransactionalMap extends PersistentTransactionalMap[Str
|
|||
}
|
||||
}
|
||||
|
||||
override def contains(key: String): Boolean = {
|
||||
override def contains(key: AnyRef): Boolean = {
|
||||
try {
|
||||
verifyTransaction
|
||||
CassandraStorage.getMapStorageEntryFor(uuid, key).isDefined
|
||||
|
|
@ -249,7 +253,7 @@ class CassandraPersistentTransactionalMap extends PersistentTransactionalMap[Str
|
|||
}
|
||||
|
||||
// ---- For scala.collection.mutable.Map ----
|
||||
override def get(key: String): Option[AnyRef] = {
|
||||
override def get(key: AnyRef): Option[AnyRef] = {
|
||||
verifyTransaction
|
||||
// if (changeSet.contains(key)) changeSet.get(key)
|
||||
// else {
|
||||
|
|
@ -262,16 +266,16 @@ class CassandraPersistentTransactionalMap extends PersistentTransactionalMap[Str
|
|||
//}
|
||||
}
|
||||
|
||||
override def elements: Iterator[Tuple2[String, AnyRef]] = {
|
||||
override def elements: Iterator[Tuple2[AnyRef, AnyRef]] = {
|
||||
//verifyTransaction
|
||||
new Iterator[Tuple2[String, AnyRef]] {
|
||||
private val originalList: List[Tuple2[String, AnyRef]] = try {
|
||||
new Iterator[Tuple2[AnyRef, AnyRef]] {
|
||||
private val originalList: List[Tuple2[AnyRef, AnyRef]] = try {
|
||||
CassandraStorage.getMapStorageFor(uuid)
|
||||
} catch {
|
||||
case e: Throwable => Nil
|
||||
}
|
||||
private var elements = originalList.reverse
|
||||
override def next: Tuple2[String, AnyRef]= synchronized {
|
||||
override def next: Tuple2[AnyRef, AnyRef]= synchronized {
|
||||
val element = elements.head
|
||||
elements = elements.tail
|
||||
element
|
||||
|
|
@ -391,9 +395,12 @@ class CassandraPersistentTransactionalVector extends PersistentTransactionalVect
|
|||
else CassandraStorage.getVectorStorageEntryFor(uuid, index)
|
||||
}
|
||||
|
||||
override def getRange(start: Int, count: Int): List[AnyRef] = {
|
||||
override def getRange(start: Int, count: Int): List[AnyRef] =
|
||||
getRange(Some(start), None, count)
|
||||
|
||||
def getRange(start: Option[Int], finish: Option[Int], count: Int): List[AnyRef] = {
|
||||
verifyTransaction
|
||||
CassandraStorage.getVectorStorageRangeFor(uuid, start, count)
|
||||
CassandraStorage.getVectorStorageRangeFor(uuid, start, finish, count)
|
||||
}
|
||||
|
||||
override def length: Int = {
|
||||
|
|
@ -478,4 +485,4 @@ class CassandraPersistentTransactionalRef extends TransactionalRef[AnyRef] {
|
|||
if (ref.isDefined) ref
|
||||
else default
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,15 +4,9 @@
|
|||
|
||||
package se.scalablesolutions.akka.kernel.util
|
||||
|
||||
import java.io.UnsupportedEncodingException
|
||||
import java.security.{NoSuchAlgorithmException, MessageDigest}
|
||||
import java.security.MessageDigest
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock
|
||||
|
||||
import scala.actors._
|
||||
import scala.actors.Actor._
|
||||
|
||||
import net.lag.logging.Logger
|
||||
|
||||
class SystemFailure(cause: Throwable) extends RuntimeException(cause)
|
||||
|
||||
/**
|
||||
|
|
@ -20,7 +14,18 @@ class SystemFailure(cause: Throwable) extends RuntimeException(cause)
|
|||
*/
|
||||
object Helpers extends Logging {
|
||||
|
||||
def getDigestFor(s: String) = {
|
||||
implicit def null2Option[T](t: T): Option[T] = if (t != null) Some(t) else None
|
||||
|
||||
def intToBytes(value: Int): Array[Byte] = {
|
||||
val bytes = new Array[Byte](4)
|
||||
bytes(0) = (value >>> 24).asInstanceOf[Byte]
|
||||
bytes(1) = (value >>> 16).asInstanceOf[Byte]
|
||||
bytes(2) = (value >>> 8).asInstanceOf[Byte]
|
||||
bytes(3) = value.asInstanceOf[Byte]
|
||||
bytes
|
||||
}
|
||||
|
||||
def getMD5For(s: String) = {
|
||||
val digest = MessageDigest.getInstance("MD5")
|
||||
digest.update(s.getBytes("ASCII"))
|
||||
val bytes = digest.digest
|
||||
|
|
@ -59,51 +64,5 @@ object Helpers extends Logging {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ================================================
|
||||
// implicit conversion between regular actor and actor with a type future
|
||||
implicit def actorWithFuture(a: Actor) = new ActorWithTypedFuture(a)
|
||||
|
||||
abstract class FutureWithTimeout[T](ch: InputChannel[T]) extends Future[T](ch) {
|
||||
def receiveWithin(timeout: Int) : Option[T]
|
||||
override def respond(f: T => Unit): Unit = throw new UnsupportedOperationException("Does not support the Responder API")
|
||||
}
|
||||
|
||||
def receiveOrFail[T](future: => FutureWithTimeout[T], timeout: Int, errorHandler: => T): T = {
|
||||
future.receiveWithin(timeout) match {
|
||||
case None => errorHandler
|
||||
case Some(reply) => reply
|
||||
}
|
||||
}
|
||||
|
||||
class ActorWithTypedFuture(a: Actor) {
|
||||
require(a != null)
|
||||
|
||||
def !!: FutureWithTimeout[A] = {
|
||||
val ftch = new Channel[A](Actor.self)
|
||||
a.send(msg, ftch.asInstanceOf[OutputChannel[Any]])
|
||||
new FutureWithTimeout[A](ftch) {
|
||||
def apply() =
|
||||
if (isSet) value.get.asInstanceOf[A]
|
||||
else ch.receive {
|
||||
case a =>
|
||||
value = Some(a)
|
||||
value.get.asInstanceOf[A]
|
||||
}
|
||||
def isSet = receiveWithin(0).isDefined
|
||||
def receiveWithin(timeout: Int): Option[A] = value match {
|
||||
case None => ch.receiveWithin(timeout) {
|
||||
case TIMEOUT =>
|
||||
log.debug("Future timed out while waiting for actor [%s]", a)
|
||||
None
|
||||
case a =>
|
||||
value = Some(a)
|
||||
value.asInstanceOf[Option[A]]
|
||||
}
|
||||
case a => a.asInstanceOf[Option[A]]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ object AllTest extends TestCase {
|
|||
suite.addTestSuite(classOf[EventBasedThreadPoolDispatcherTest])
|
||||
suite.addTestSuite(classOf[ActorSpec])
|
||||
suite.addTestSuite(classOf[RemoteActorSpec])
|
||||
suite.addTestSuite(classOf[PersistentActorSpec])
|
||||
//suite.addTestSuite(classOf[PersistentActorSpec])
|
||||
suite.addTestSuite(classOf[InMemoryActorSpec])
|
||||
//suite.addTestSuite(classOf[TransactionClasherSpec])
|
||||
suite
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue