2009-04-19 10:58:20 +02:00
|
|
|
/**
|
|
|
|
|
* Copyright (C) 2009 Scalable Solutions.
|
|
|
|
|
*/
|
|
|
|
|
|
2009-06-21 14:08:43 +02:00
|
|
|
package se.scalablesolutions.akka.kernel.state
|
2009-04-19 10:58:20 +02:00
|
|
|
|
|
|
|
|
import java.io.File
|
2009-06-21 14:08:43 +02:00
|
|
|
import kernel.util.{Serializer, JavaSerializationSerializer, Logging}
|
2009-04-19 10:58:20 +02:00
|
|
|
|
|
|
|
|
import org.apache.cassandra.config.DatabaseDescriptor
|
|
|
|
|
import org.apache.cassandra.service._
|
|
|
|
|
|
2009-07-03 17:15:36 +02:00
|
|
|
import org.apache.thrift.server.TThreadPoolServer
|
|
|
|
|
import org.apache.thrift.protocol.TBinaryProtocol
|
|
|
|
|
import org.apache.thrift.transport.TServerSocket
|
|
|
|
|
import org.apache.thrift.transport.TTransportFactory
|
|
|
|
|
import org.apache.thrift.TProcessorFactory
|
|
|
|
|
|
2009-04-19 10:58:20 +02:00
|
|
|
/**
|
2009-06-11 13:47:07 +02:00
|
|
|
* NOTE: requires command line options:
|
|
|
|
|
* <br/>
|
|
|
|
|
* <code>-Dcassandra -Dstorage-config=config/ -Dpidfile=akka.pid</code>
|
|
|
|
|
* <p/>
|
2009-04-19 10:58:20 +02:00
|
|
|
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
|
|
|
|
*/
|
2009-07-03 17:15:36 +02:00
|
|
|
final object CassandraStorage extends Logging {
|
2009-04-27 19:55:57 +02:00
|
|
|
val TABLE_NAME = "akka"
|
2009-06-11 13:47:07 +02:00
|
|
|
val MAP_COLUMN_FAMILY = "map"
|
|
|
|
|
val VECTOR_COLUMN_FAMILY = "vector"
|
|
|
|
|
val REF_COLUMN_FAMILY = "ref:item"
|
2009-07-03 17:15:36 +02:00
|
|
|
val IS_ASCENDING = true
|
2009-06-10 20:04:33 +02:00
|
|
|
|
2009-07-03 17:15:36 +02:00
|
|
|
val RUN_THRIFT_SERVICE = kernel.Kernel.config.getBool("akka.storage.cassandra.thrift-server.service", false)
|
|
|
|
|
val BLOCKING_CALL = kernel.Kernel.config.getInt("akka.storage.cassandra.blocking", 0)
|
2009-07-04 06:38:47 +02:00
|
|
|
|
|
|
|
|
@volatile private[this] var isRunning = false
|
2009-07-03 17:15:36 +02:00
|
|
|
private[this] val serializer: Serializer = {
|
|
|
|
|
kernel.Kernel.config.getString("akka.storage.cassandra.storage-format", "serialization") match {
|
|
|
|
|
case "serialization" => new JavaSerializationSerializer
|
|
|
|
|
case "json" => throw new UnsupportedOperationException("json storage protocol is not yet supported")
|
|
|
|
|
case "avro" => throw new UnsupportedOperationException("avro storage protocol is not yet supported")
|
|
|
|
|
case "thrift" => throw new UnsupportedOperationException("thrift storage protocol is not yet supported")
|
|
|
|
|
case "protobuf" => throw new UnsupportedOperationException("protobuf storage protocol is not yet supported")
|
|
|
|
|
}
|
|
|
|
|
}
|
2009-05-01 13:25:43 +02:00
|
|
|
|
2009-04-27 19:55:57 +02:00
|
|
|
// TODO: is this server thread-safe or needed to be wrapped up in an actor?
|
2009-05-13 19:28:55 +02:00
|
|
|
private[this] val server = classOf[CassandraServer].newInstance.asInstanceOf[CassandraServer]
|
2009-07-03 17:15:36 +02:00
|
|
|
|
|
|
|
|
private[this] var thriftServer: CassandraThriftServer = _
|
2009-05-01 13:25:43 +02:00
|
|
|
|
2009-07-04 06:38:47 +02:00
|
|
|
def start = synchronized {
|
|
|
|
|
if (!isRunning) {
|
|
|
|
|
try {
|
|
|
|
|
server.start
|
2009-07-06 23:45:15 +02:00
|
|
|
log.info("Cassandra persistent storage has started up successfully");
|
2009-07-04 06:38:47 +02:00
|
|
|
} catch {
|
|
|
|
|
case e =>
|
2009-07-06 23:45:15 +02:00
|
|
|
log.error("Could not start up Cassandra persistent storage")
|
2009-07-04 06:38:47 +02:00
|
|
|
throw e
|
|
|
|
|
}
|
|
|
|
|
if (RUN_THRIFT_SERVICE) {
|
|
|
|
|
thriftServer = new CassandraThriftServer(server)
|
|
|
|
|
thriftServer.start
|
|
|
|
|
}
|
|
|
|
|
isRunning
|
2009-07-03 17:15:36 +02:00
|
|
|
}
|
2009-04-19 10:58:20 +02:00
|
|
|
}
|
|
|
|
|
|
2009-07-04 06:38:47 +02:00
|
|
|
def stop = if (isRunning) {
|
2009-07-03 17:15:36 +02:00
|
|
|
//server.storageService.shutdown
|
|
|
|
|
if (RUN_THRIFT_SERVICE) thriftServer.stop
|
|
|
|
|
}
|
2009-04-27 19:55:57 +02:00
|
|
|
|
2009-06-10 20:04:33 +02:00
|
|
|
// ===============================================================
|
|
|
|
|
// For Ref
|
|
|
|
|
// ===============================================================
|
|
|
|
|
|
2009-06-11 13:47:07 +02:00
|
|
|
def insertRefStorageFor(name: String, element: AnyRef) = {
|
|
|
|
|
server.insert(
|
|
|
|
|
TABLE_NAME,
|
|
|
|
|
name,
|
|
|
|
|
REF_COLUMN_FAMILY,
|
|
|
|
|
serializer.out(element),
|
|
|
|
|
System.currentTimeMillis,
|
2009-07-03 17:15:36 +02:00
|
|
|
BLOCKING_CALL)
|
2009-06-11 13:47:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
def getRefStorageFor(name: String): Option[AnyRef] = {
|
|
|
|
|
try {
|
|
|
|
|
val column = server.get_column(TABLE_NAME, name, REF_COLUMN_FAMILY)
|
|
|
|
|
Some(serializer.in(column.value))
|
|
|
|
|
} catch {
|
|
|
|
|
case e =>
|
|
|
|
|
e.printStackTrace
|
|
|
|
|
None //throw new Predef.NoSuchElementException(e.getMessage)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-10 20:04:33 +02:00
|
|
|
// ===============================================================
|
|
|
|
|
// For Vector
|
|
|
|
|
// ===============================================================
|
|
|
|
|
|
|
|
|
|
def insertVectorStorageEntryFor(name: String, element: AnyRef) = {
|
|
|
|
|
server.insert(
|
|
|
|
|
TABLE_NAME,
|
2009-06-11 13:47:07 +02:00
|
|
|
name,
|
|
|
|
|
VECTOR_COLUMN_FAMILY + ":" + getVectorStorageSizeFor(name),
|
2009-06-10 20:04:33 +02:00
|
|
|
serializer.out(element),
|
|
|
|
|
System.currentTimeMillis,
|
2009-07-03 17:15:36 +02:00
|
|
|
BLOCKING_CALL)
|
2009-06-10 20:04:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
def getVectorStorageEntryFor(name: String, index: Int): AnyRef = {
|
|
|
|
|
try {
|
2009-06-11 13:47:07 +02:00
|
|
|
val column = server.get_column(TABLE_NAME, name, VECTOR_COLUMN_FAMILY + ":" + index)
|
2009-06-10 20:04:33 +02:00
|
|
|
serializer.in(column.value)
|
|
|
|
|
} catch {
|
2009-06-25 13:07:58 +02:00
|
|
|
case e =>
|
|
|
|
|
e.printStackTrace
|
|
|
|
|
throw new Predef.NoSuchElementException(e.getMessage)
|
2009-06-10 20:04:33 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
def getVectorStorageRangeFor(name: String, start: Int, count: Int): List[AnyRef] =
|
2009-07-03 17:15:36 +02:00
|
|
|
server.get_slice(TABLE_NAME, name, VECTOR_COLUMN_FAMILY, IS_ASCENDING, count)
|
2009-06-10 20:04:33 +02:00
|
|
|
.toArray.toList.asInstanceOf[List[Tuple2[String, AnyRef]]].map(tuple => tuple._2)
|
|
|
|
|
|
|
|
|
|
def getVectorStorageSizeFor(name: String): Int =
|
2009-06-11 13:47:07 +02:00
|
|
|
server.get_column_count(TABLE_NAME, name, VECTOR_COLUMN_FAMILY)
|
2009-06-10 20:04:33 +02:00
|
|
|
|
|
|
|
|
// ===============================================================
|
|
|
|
|
// For Map
|
|
|
|
|
// ===============================================================
|
|
|
|
|
|
|
|
|
|
def insertMapStorageEntryFor(name: String, key: String, value: AnyRef) = {
|
2009-04-27 19:55:57 +02:00
|
|
|
server.insert(
|
|
|
|
|
TABLE_NAME,
|
2009-06-11 13:47:07 +02:00
|
|
|
name,
|
|
|
|
|
MAP_COLUMN_FAMILY + ":" + key,
|
2009-06-10 20:04:33 +02:00
|
|
|
serializer.out(value),
|
2009-05-13 19:28:55 +02:00
|
|
|
System.currentTimeMillis,
|
2009-07-03 17:15:36 +02:00
|
|
|
BLOCKING_CALL)
|
2009-04-27 19:55:57 +02:00
|
|
|
}
|
|
|
|
|
|
2009-06-10 20:04:33 +02:00
|
|
|
def insertMapStorageEntriesFor(name: String, entries: List[Tuple2[String, AnyRef]]) = {
|
2009-04-27 19:55:57 +02:00
|
|
|
import java.util.{Map, HashMap, List, ArrayList}
|
|
|
|
|
val columns: Map[String, List[column_t]] = new HashMap
|
|
|
|
|
for (entry <- entries) {
|
|
|
|
|
val cls: List[column_t] = new ArrayList
|
2009-05-01 13:25:43 +02:00
|
|
|
cls.add(new column_t(entry._1, serializer.out(entry._2), System.currentTimeMillis))
|
2009-06-11 13:47:07 +02:00
|
|
|
columns.put(MAP_COLUMN_FAMILY, cls)
|
2009-04-27 19:55:57 +02:00
|
|
|
}
|
2009-05-13 19:28:55 +02:00
|
|
|
server.batch_insert(new batch_mutation_t(
|
2009-04-27 19:55:57 +02:00
|
|
|
TABLE_NAME,
|
2009-06-11 13:47:07 +02:00
|
|
|
name,
|
2009-05-13 19:28:55 +02:00
|
|
|
columns),
|
2009-07-03 17:15:36 +02:00
|
|
|
BLOCKING_CALL)
|
2009-04-27 19:55:57 +02:00
|
|
|
}
|
|
|
|
|
|
2009-06-10 20:04:33 +02:00
|
|
|
def getMapStorageEntryFor(name: String, key: AnyRef): Option[AnyRef] = {
|
2009-04-27 19:55:57 +02:00
|
|
|
try {
|
2009-06-11 13:47:07 +02:00
|
|
|
val column = server.get_column(TABLE_NAME, name, MAP_COLUMN_FAMILY + ":" + key)
|
2009-05-01 13:25:43 +02:00
|
|
|
Some(serializer.in(column.value))
|
2009-06-25 13:07:58 +02:00
|
|
|
} catch {
|
|
|
|
|
case e =>
|
|
|
|
|
e.printStackTrace
|
|
|
|
|
None
|
|
|
|
|
}
|
2009-04-27 19:55:57 +02:00
|
|
|
}
|
|
|
|
|
|
2009-06-10 20:04:33 +02:00
|
|
|
def getMapStorageFor(name: String): List[Tuple2[String, AnyRef]] = {
|
2009-06-11 13:47:07 +02:00
|
|
|
val columns = server.get_columns_since(TABLE_NAME, name, MAP_COLUMN_FAMILY, -1)
|
2009-04-27 19:55:57 +02:00
|
|
|
.toArray.toList.asInstanceOf[List[org.apache.cassandra.service.column_t]]
|
|
|
|
|
for {
|
|
|
|
|
column <- columns
|
2009-05-01 13:25:43 +02:00
|
|
|
col = (column.columnName, serializer.in(column.value))
|
2009-04-27 19:55:57 +02:00
|
|
|
} yield col
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-10 20:04:33 +02:00
|
|
|
def getMapStorageSizeFor(name: String): Int =
|
2009-06-11 13:47:07 +02:00
|
|
|
server.get_column_count(TABLE_NAME, name, MAP_COLUMN_FAMILY)
|
2009-04-27 19:55:57 +02:00
|
|
|
|
2009-06-10 20:04:33 +02:00
|
|
|
def removeMapStorageFor(name: String) =
|
2009-07-03 17:15:36 +02:00
|
|
|
server.remove(TABLE_NAME, name, MAP_COLUMN_FAMILY, System.currentTimeMillis, BLOCKING_CALL)
|
2009-04-27 19:55:57 +02:00
|
|
|
|
2009-07-03 17:15:36 +02:00
|
|
|
def getMapStorageRangeFor(name: String, start: Int, count: Int): List[Tuple2[String, AnyRef]] = {
|
|
|
|
|
server.get_slice(TABLE_NAME, name, MAP_COLUMN_FAMILY, IS_ASCENDING, count)
|
|
|
|
|
.toArray.toList.asInstanceOf[List[Tuple2[String, AnyRef]]]
|
|
|
|
|
}
|
2009-04-27 19:55:57 +02:00
|
|
|
}
|
|
|
|
|
|
2009-07-03 17:15:36 +02:00
|
|
|
class CassandraThriftServer(server: CassandraServer) extends Logging {
|
|
|
|
|
case object Start
|
|
|
|
|
case object Stop
|
2009-04-27 19:55:57 +02:00
|
|
|
|
|
|
|
|
private[this] val serverEngine: TThreadPoolServer = try {
|
2009-07-03 17:15:36 +02:00
|
|
|
val pidFile = kernel.Kernel.config.getString("akka.storage.cassandra.thrift-server.pidfile", "akka.pid")
|
2009-04-19 10:58:20 +02:00
|
|
|
if (pidFile != null) new File(pidFile).deleteOnExit();
|
|
|
|
|
val listenPort = DatabaseDescriptor.getThriftPort
|
|
|
|
|
|
|
|
|
|
val processor = new Cassandra.Processor(server)
|
|
|
|
|
val tServerSocket = new TServerSocket(listenPort)
|
|
|
|
|
val tProtocolFactory = new TBinaryProtocol.Factory
|
|
|
|
|
|
|
|
|
|
val options = new TThreadPoolServer.Options
|
|
|
|
|
options.minWorkerThreads = 64
|
|
|
|
|
new TThreadPoolServer(new TProcessorFactory(processor),
|
|
|
|
|
tServerSocket,
|
|
|
|
|
new TTransportFactory,
|
|
|
|
|
new TTransportFactory,
|
|
|
|
|
tProtocolFactory,
|
|
|
|
|
tProtocolFactory,
|
|
|
|
|
options)
|
|
|
|
|
} catch {
|
|
|
|
|
case e =>
|
2009-07-06 23:45:15 +02:00
|
|
|
log.error("Could not start up Cassandra thrift service")
|
2009-04-19 10:58:20 +02:00
|
|
|
throw e
|
|
|
|
|
}
|
2009-07-03 17:15:36 +02:00
|
|
|
|
|
|
|
|
import scala.actors.Actor._
|
2009-04-27 19:55:57 +02:00
|
|
|
private[this] val serverDaemon = actor {
|
|
|
|
|
receive {
|
2009-07-03 17:15:36 +02:00
|
|
|
case Start =>
|
2009-04-27 19:55:57 +02:00
|
|
|
serverEngine.serve
|
2009-07-06 23:45:15 +02:00
|
|
|
log.info("Cassandra thrift service has starting up successfully")
|
|
|
|
|
case Stop =>
|
2009-07-03 17:15:36 +02:00
|
|
|
log.info("Cassandra thrift service is shutting down...")
|
2009-04-27 19:55:57 +02:00
|
|
|
serverEngine.stop
|
2009-04-19 10:58:20 +02:00
|
|
|
}
|
|
|
|
|
}
|
2009-04-27 19:55:57 +02:00
|
|
|
|
2009-07-03 17:15:36 +02:00
|
|
|
def start = serverDaemon ! Start
|
|
|
|
|
def stop = serverDaemon ! Stop
|
|
|
|
|
}
|