externalized MongoDB configurations in akka-reference.conf

This commit is contained in:
debasishg 2009-08-13 22:55:39 +05:30
parent 7eeede9ac3
commit cee665ba9a
2 changed files with 92 additions and 68 deletions

View file

@ -1,62 +1,73 @@
#################### ####################
# Akka Config File # # Akka Config File #
#################### ####################
# This file has all the default settings, so all these could be remove with no visible effect. # This file has all the default settings, so all these could be remove with no visible effect.
# Modify as needed. # Modify as needed.
<log> <log>
filename = "./logs/akka.log" filename = "./logs/akka.log"
roll = "daily" # Options: never, hourly, daily, sunday/monday/... roll = "daily" # Options: never, hourly, daily, sunday/monday/...
level = "debug" # Options: fatal, critical, error, warning, info, debug, trace level = "debug" # Options: fatal, critical, error, warning, info, debug, trace
console = on console = on
# syslog_host = "" # syslog_host = ""
# syslog_server_name = "" # syslog_server_name = ""
</log> </log>
<akka> <akka>
version = "v0.5" version = "v0.5"
#boot = ["sample.scala.Boot"] # FQN to the class doing initial active object/actor #boot = ["sample.scala.Boot"] # FQN to the class doing initial active object/actor
boot = ["sample.java.Boot", "sample.scala.Boot"] # FQN to the class doing initial active object/actor boot = ["sample.java.Boot", "sample.scala.Boot"] # FQN to the class doing initial active object/actor
# supervisor bootstrap, should be defined in default constructor # supervisor bootstrap, should be defined in default constructor
<actor> <actor>
timeout = 5000 # default timeout for future based invocations timeout = 5000 # default timeout for future based invocations
serialize-messages = off # does a deep clone of (non-primitive) messages to ensure immutability serialize-messages = off # does a deep clone of (non-primitive) messages to ensure immutability
</actor> </actor>
<stm> <stm>
service = on service = on
restart-on-collision = off # (not implemented yet) if 'on' then it reschedules the transaction, restart-on-collision = off # (not implemented yet) if 'on' then it reschedules the transaction,
# if 'off' then throws an exception or rollback for user to handle # if 'off' then throws an exception or rollback for user to handle
wait-for-completion = 100 # how long time in millis a transaction should be given time to complete when a collision is detected wait-for-completion = 100 # how long time in millis a transaction should be given time to complete when a collision is detected
wait-nr-of-times = 3 # the number of times it should check for completion of a pending transaction upon collision wait-nr-of-times = 3 # the number of times it should check for completion of a pending transaction upon collision
distributed = off # not implemented yet distributed = off # not implemented yet
</stm> </stm>
<remote> <remote>
service = on service = on
hostname = "localhost" hostname = "localhost"
port = 9999 port = 9999
connection-timeout = 1000 # in millis connection-timeout = 1000 # in millis
</remote> </remote>
<rest> <rest>
service = on service = on
hostname = "localhost" hostname = "localhost"
port = 9998 port = 9998
</rest> </rest>
<storage> <storage>
system = "cassandra" # Options: cassandra (coming: terracotta, mongodb, redis, tokyo-cabinet, voldemort, memcached) system = "cassandra" # Options: cassandra (coming: terracotta, mongodb, redis, tokyo-cabinet, voldemort, memcached)
<cassandra> <cassandra>
service = on service = on
hostname = "127.0.0.1" # ip address or hostname of one of the Cassandra cluster's seeds hostname = "127.0.0.1" # ip address or hostname of one of the Cassandra cluster's seeds
port = 9160 port = 9160
storage-format = "java" # Options: java, scala-json, java-json, protobuf storage-format = "java" # Options: java, scala-json, java-json, protobuf
consistency-level = 1 # consistency-level = 1 #
</cassandra> </cassandra>
</rest> </storage>
</akka> <storage>
system = "mongodb"
<mongodb>
service = on
hostname = "127.0.0.1" # ip address or hostname of one of the Cassandra cluster's seeds
port = 27017
dbname = "mydb"
storage-format = "scala-json" # Options: java, scala-json, java-json, protobuf
</mongodb>
</storage>
</akka>

View file

@ -3,6 +3,7 @@ package se.scalablesolutions.akka.kernel.state
import com.mongodb._ import com.mongodb._
import se.scalablesolutions.akka.kernel.util.Logging import se.scalablesolutions.akka.kernel.util.Logging
import serialization.{Serializer} import serialization.{Serializer}
import kernel.Kernel.config
import java.util.{Map=>JMap, List=>JList, ArrayList=>JArrayList} import java.util.{Map=>JMap, List=>JList, ArrayList=>JArrayList}
@ -23,18 +24,28 @@ object MongoStorage extends MapStorage
val KEY = "key" val KEY = "key"
val VALUE = "value" val VALUE = "value"
val db = new Mongo("mydb"); // @fixme: need to externalize
val COLLECTION = "akka_coll" val COLLECTION = "akka_coll"
val MONGODB_SERVER_HOSTNAME =
config.getString("akka.storage.mongodb.hostname", "127.0.0.1")
val MONGODB_SERVER_DBNAME =
config.getString("akka.storage.mongodb.dbname", "testdb")
val MONGODB_SERVER_PORT =
config.getInt("akka.storage.mongodb.port", 27017)
val db = new Mongo(MONGODB_SERVER_HOSTNAME,
MONGODB_SERVER_PORT, MONGODB_SERVER_DBNAME)
val coll = db.getCollection(COLLECTION) val coll = db.getCollection(COLLECTION)
// @fixme: make this pluggable // @fixme: make this pluggable
private[this] val serializer: Serializer = Serializer.ScalaJSON private[this] val serializer: Serializer = Serializer.ScalaJSON
override def insertMapStorageEntryFor(name: String, key: AnyRef, value: AnyRef) { override def insertMapStorageEntryFor(name: String,
key: AnyRef, value: AnyRef) {
insertMapStorageEntriesFor(name, List((key, value))) insertMapStorageEntriesFor(name, List((key, value)))
} }
override def insertMapStorageEntriesFor(name: String, entries: List[Tuple2[AnyRef, AnyRef]]) { override def insertMapStorageEntriesFor(name: String,
entries: List[Tuple2[AnyRef, AnyRef]]) {
import java.util.{Map, HashMap} import java.util.{Map, HashMap}
val m: Map[AnyRef, AnyRef] = new HashMap val m: Map[AnyRef, AnyRef] = new HashMap
@ -79,7 +90,8 @@ object MongoStorage extends MapStorage
} }
} }
override def getMapStorageEntryFor(name: String, key: AnyRef): Option[AnyRef] = { override def getMapStorageEntryFor(name: String,
key: AnyRef): Option[AnyRef] = {
getValueForKey(name, key.asInstanceOf[String]) getValueForKey(name, key.asInstanceOf[String])
} }
@ -206,7 +218,8 @@ object MongoStorage extends MapStorage
} }
} }
override def getVectorStorageRangeFor(name: String, start: Option[Int], finish: Option[Int], count: Int): List[AnyRef] = { override def getVectorStorageRangeFor(name: String,
start: Option[Int], finish: Option[Int], count: Int): List[AnyRef] = {
try { try {
val o = val o =
nullSafeFindOne(name) match { nullSafeFindOne(name) match {