Fixed issues with config - Ticket #535
This commit is contained in:
parent
80adb71850
commit
1ee3a54372
22 changed files with 97 additions and 101 deletions
|
|
@ -26,7 +26,7 @@ trait CamelService extends Bootable with Logging {
|
||||||
private[camel] val consumerPublisher = actorOf[ConsumerPublisher]
|
private[camel] val consumerPublisher = actorOf[ConsumerPublisher]
|
||||||
private[camel] val publishRequestor = actorOf[PublishRequestor]
|
private[camel] val publishRequestor = actorOf[PublishRequestor]
|
||||||
|
|
||||||
private val serviceEnabled = config.getBool("akka.camel.service", true)
|
private val serviceEnabled = config.getList("akka.enabled-modules").exists(_ == "camel")
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Starts this CamelService unless <code>akka.camel.service</code> is set to <code>false</code>.
|
* Starts this CamelService unless <code>akka.camel.service</code> is set to <code>false</code>.
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@ import akka.dispatch.Dispatchers
|
||||||
import org.atmosphere.jersey.util.JerseyBroadcasterUtil
|
import org.atmosphere.jersey.util.JerseyBroadcasterUtil
|
||||||
|
|
||||||
object AkkaBroadcaster {
|
object AkkaBroadcaster {
|
||||||
val broadcasterDispatcher = Dispatchers.fromConfig("akka.rest.comet-dispatcher")
|
val broadcasterDispatcher = Dispatchers.fromConfig("akka.http.comet-dispatcher")
|
||||||
|
|
||||||
type Event = AtmosphereResourceEvent[_,_]
|
type Event = AtmosphereResourceEvent[_,_]
|
||||||
type Resource = AtmosphereResource[_,_]
|
type Resource = AtmosphereResource[_,_]
|
||||||
|
|
|
||||||
|
|
@ -51,11 +51,11 @@ class AkkaServlet extends AtmosphereServlet {
|
||||||
addInitParameter(AtmosphereServlet.DISABLE_ONSTATE_EVENT,"true")
|
addInitParameter(AtmosphereServlet.DISABLE_ONSTATE_EVENT,"true")
|
||||||
addInitParameter(AtmosphereServlet.BROADCASTER_CLASS,classOf[AkkaBroadcaster].getName)
|
addInitParameter(AtmosphereServlet.BROADCASTER_CLASS,classOf[AkkaBroadcaster].getName)
|
||||||
addInitParameter(AtmosphereServlet.PROPERTY_USE_STREAM,"true")
|
addInitParameter(AtmosphereServlet.PROPERTY_USE_STREAM,"true")
|
||||||
addInitParameter("com.sun.jersey.config.property.packages",c.getList("akka.rest.resource_packages").mkString(";"))
|
addInitParameter("com.sun.jersey.config.property.packages",c.getList("akka.http.resource_packages").mkString(";"))
|
||||||
addInitParameter("com.sun.jersey.spi.container.ResourceFilters",c.getList("akka.rest.filters").mkString(","))
|
addInitParameter("com.sun.jersey.spi.container.ResourceFilters",c.getList("akka.http.filters").mkString(","))
|
||||||
|
|
||||||
c.getInt("akka.rest.maxInactiveActivity") foreach { value => addInitParameter(CometSupport.MAX_INACTIVE,value.toString) }
|
c.getInt("akka.http.maxInactiveActivity") foreach { value => addInitParameter(CometSupport.MAX_INACTIVE,value.toString) }
|
||||||
c.getString("akka.rest.cometSupport") foreach { value => addInitParameter("cometSupport",value) }
|
c.getString("akka.http.cometSupport") foreach { value => addInitParameter("cometSupport",value) }
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Provide a fallback for default values
|
* Provide a fallback for default values
|
||||||
|
|
|
||||||
|
|
@ -24,15 +24,17 @@ trait EmbeddedAppServer extends Bootable with Logging {
|
||||||
|
|
||||||
import akka.config.Config._
|
import akka.config.Config._
|
||||||
|
|
||||||
val REST_HOSTNAME = config.getString("akka.rest.hostname", "localhost")
|
val REST_HOSTNAME = config.getString("akka.http.hostname", "localhost")
|
||||||
val REST_PORT = config.getInt("akka.rest.port", 9998)
|
val REST_PORT = config.getInt("akka.http.port", 9998)
|
||||||
|
|
||||||
|
val isRestEnabled = config.getList("akka.enabled-modules").exists(_ == "http")
|
||||||
|
|
||||||
protected var server: Option[Server] = None
|
protected var server: Option[Server] = None
|
||||||
|
|
||||||
abstract override def onLoad = {
|
abstract override def onLoad = {
|
||||||
super.onLoad
|
super.onLoad
|
||||||
if (config.getBool("akka.rest.service", true)) {
|
if (isRestEnabled) {
|
||||||
log.info("Attempting to start Akka REST service (Jersey)")
|
log.info("Attempting to start Akka HTTP service")
|
||||||
|
|
||||||
System.setProperty("jetty.port", REST_PORT.toString)
|
System.setProperty("jetty.port", REST_PORT.toString)
|
||||||
System.setProperty("jetty.host", REST_HOSTNAME)
|
System.setProperty("jetty.host", REST_HOSTNAME)
|
||||||
|
|
@ -57,16 +59,15 @@ trait EmbeddedAppServer extends Bootable with Logging {
|
||||||
s.start()
|
s.start()
|
||||||
s
|
s
|
||||||
}
|
}
|
||||||
log.info("Akka REST service started (Jersey)")
|
log.info("Akka HTTP service started")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
abstract override def onUnload = {
|
abstract override def onUnload = {
|
||||||
super.onUnload
|
super.onUnload
|
||||||
server foreach { t => {
|
server foreach { t =>
|
||||||
log.info("Shutting down REST service (Jersey)")
|
log.info("Shutting down REST service (Jersey)")
|
||||||
t.stop()
|
t.stop()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,11 @@
|
||||||
/**
|
/**
|
||||||
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
|
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
|
||||||
*/
|
*/
|
||||||
package akka.rest
|
package akka.http
|
||||||
|
|
||||||
|
import akka.serialization.Serializer
|
||||||
|
|
||||||
import java.io.OutputStream
|
import java.io.OutputStream
|
||||||
import akka.serialization.Serializer
|
|
||||||
import javax.ws.rs.core.{MultivaluedMap, MediaType}
|
import javax.ws.rs.core.{MultivaluedMap, MediaType}
|
||||||
import javax.ws.rs.ext.{MessageBodyWriter, Provider}
|
import javax.ws.rs.ext.{MessageBodyWriter, Provider}
|
||||||
import javax.ws.rs.Produces
|
import javax.ws.rs.Produces
|
||||||
|
|
|
||||||
|
|
@ -101,8 +101,8 @@ class AkkaSecurityFilterFactory extends ResourceFilterFactory with Logging {
|
||||||
}
|
}
|
||||||
|
|
||||||
lazy val authenticatorFQN = {
|
lazy val authenticatorFQN = {
|
||||||
val auth = Config.config.getString("akka.rest.authenticator", "N/A")
|
val auth = Config.config.getString("akka.http.authenticator", "N/A")
|
||||||
if (auth == "N/A") throw new IllegalActorStateException("The config option 'akka.rest.authenticator' is not defined in 'akka.conf'")
|
if (auth == "N/A") throw new IllegalActorStateException("The config option 'akka.http.authenticator' is not defined in 'akka.conf'")
|
||||||
auth
|
auth
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -399,8 +399,8 @@ trait SpnegoAuthenticationActor extends AuthenticationActor[SpnegoCredentials] w
|
||||||
* principal name for the HTTP kerberos service, i.e HTTP/ { server } @ { realm }
|
* principal name for the HTTP kerberos service, i.e HTTP/ { server } @ { realm }
|
||||||
*/
|
*/
|
||||||
lazy val servicePrincipal = {
|
lazy val servicePrincipal = {
|
||||||
val p = Config.config.getString("akka.rest.kerberos.servicePrincipal", "N/A")
|
val p = Config.config.getString("akka.http.kerberos.servicePrincipal", "N/A")
|
||||||
if (p == "N/A") throw new IllegalActorStateException("The config option 'akka.rest.kerberos.servicePrincipal' is not defined in 'akka.conf'")
|
if (p == "N/A") throw new IllegalActorStateException("The config option 'akka.http.kerberos.servicePrincipal' is not defined in 'akka.conf'")
|
||||||
p
|
p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -408,21 +408,21 @@ trait SpnegoAuthenticationActor extends AuthenticationActor[SpnegoCredentials] w
|
||||||
* keytab location with credentials for the service principal
|
* keytab location with credentials for the service principal
|
||||||
*/
|
*/
|
||||||
lazy val keyTabLocation = {
|
lazy val keyTabLocation = {
|
||||||
val p = Config.config.getString("akka.rest.kerberos.keyTabLocation", "N/A")
|
val p = Config.config.getString("akka.http.kerberos.keyTabLocation", "N/A")
|
||||||
if (p == "N/A") throw new IllegalActorStateException("The config option 'akka.rest.kerberos.keyTabLocation' is not defined in 'akka.conf'")
|
if (p == "N/A") throw new IllegalActorStateException("The config option 'akka.http.kerberos.keyTabLocation' is not defined in 'akka.conf'")
|
||||||
p
|
p
|
||||||
}
|
}
|
||||||
|
|
||||||
lazy val kerberosDebug = {
|
lazy val kerberosDebug = {
|
||||||
val p = Config.config.getString("akka.rest.kerberos.kerberosDebug", "N/A")
|
val p = Config.config.getString("akka.http.kerberos.kerberosDebug", "N/A")
|
||||||
if (p == "N/A") throw new IllegalActorStateException("The config option 'akka.rest.kerberos.kerberosDebug' is not defined in 'akka.conf'")
|
if (p == "N/A") throw new IllegalActorStateException("The config option 'akka.http.kerberos.kerberosDebug' is not defined in 'akka.conf'")
|
||||||
p
|
p
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* is not used by this authenticator, so accept an empty value
|
* is not used by this authenticator, so accept an empty value
|
||||||
*/
|
*/
|
||||||
lazy val realm = Config.config.getString("akka.rest.kerberos.realm", "")
|
lazy val realm = Config.config.getString("akka.http.kerberos.realm", "")
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* verify the kerberos token from a client with the server
|
* verify the kerberos token from a client with the server
|
||||||
|
|
|
||||||
|
|
@ -34,10 +34,10 @@ private[akka] object CassandraStorageBackend extends CommonStorageBackend {
|
||||||
val REF_KEY = "item".getBytes("UTF-8")
|
val REF_KEY = "item".getBytes("UTF-8")
|
||||||
val EMPTY_BYTE_ARRAY = new Array[Byte](0)
|
val EMPTY_BYTE_ARRAY = new Array[Byte](0)
|
||||||
|
|
||||||
val CASSANDRA_SERVER_HOSTNAME = config.getString("akka.storage.cassandra.hostname", "127.0.0.1")
|
val CASSANDRA_SERVER_HOSTNAME = config.getString("akka.persistence.cassandra.hostname", "127.0.0.1")
|
||||||
val CASSANDRA_SERVER_PORT = config.getInt("akka.storage.cassandra.port", 9160)
|
val CASSANDRA_SERVER_PORT = config.getInt("akka.persistence.cassandra.port", 9160)
|
||||||
val CONSISTENCY_LEVEL = {
|
val CONSISTENCY_LEVEL = {
|
||||||
config.getString("akka.storage.cassandra.consistency-level", "QUORUM") match {
|
config.getString("akka.persistence.cassandra.consistency-level", "QUORUM") match {
|
||||||
case "ZERO" => ConsistencyLevel.ZERO
|
case "ZERO" => ConsistencyLevel.ZERO
|
||||||
case "ONE" => ConsistencyLevel.ONE
|
case "ONE" => ConsistencyLevel.ONE
|
||||||
case "QUORUM" => ConsistencyLevel.QUORUM
|
case "QUORUM" => ConsistencyLevel.QUORUM
|
||||||
|
|
|
||||||
|
|
@ -31,8 +31,8 @@ private [akka] object CouchDBStorageBackend extends
|
||||||
}
|
}
|
||||||
|
|
||||||
lazy val URL = config.
|
lazy val URL = config.
|
||||||
getString("akka.storage.couchdb.url").
|
getString("akka.persistence.couchdb.url").
|
||||||
getOrElse(throw new IllegalArgumentException("'akka.storage.couchdb.url' not found in config"))
|
getOrElse(throw new IllegalArgumentException("'akka.persistence.couchdb.url' not found in config"))
|
||||||
|
|
||||||
def drop() = {
|
def drop() = {
|
||||||
val client = new HttpClient()
|
val client = new HttpClient()
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.util.Bytes
|
||||||
*/
|
*/
|
||||||
private[akka] object HbaseStorageBackend extends MapStorageBackend[Array[Byte], Array[Byte]] with VectorStorageBackend[Array[Byte]] with RefStorageBackend[Array[Byte]] with Logging {
|
private[akka] object HbaseStorageBackend extends MapStorageBackend[Array[Byte], Array[Byte]] with VectorStorageBackend[Array[Byte]] with RefStorageBackend[Array[Byte]] with Logging {
|
||||||
|
|
||||||
val HBASE_ZOOKEEPER_QUORUM = config.getString("akka.storage.hbase.zookeeper-quorum", "localhost")
|
val HBASE_ZOOKEEPER_QUORUM = config.getString("akka.persistence.hbase.zookeeper-quorum", "localhost")
|
||||||
val CONFIGURATION = new HBaseConfiguration
|
val CONFIGURATION = new HBaseConfiguration
|
||||||
val REF_TABLE_NAME = "__REF_TABLE"
|
val REF_TABLE_NAME = "__REF_TABLE"
|
||||||
val VECTOR_TABLE_NAME = "__VECTOR_TABLE"
|
val VECTOR_TABLE_NAME = "__VECTOR_TABLE"
|
||||||
|
|
|
||||||
|
|
@ -48,7 +48,7 @@ class SimpleHbaseSpecTestIntegration extends Spec with BeforeAndAfterAll with Sh
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin
|
import org.apache.hadoop.hbase.client.HBaseAdmin
|
||||||
import org.apache.hadoop.hbase.client.HTable
|
import org.apache.hadoop.hbase.client.HTable
|
||||||
|
|
||||||
val HBASE_ZOOKEEPER_QUORUM = config.getString("akka.storage.hbase.zookeeper-quorum", "0")
|
val HBASE_ZOOKEEPER_QUORUM = config.getString("akka.persistence.hbase.zookeeper-quorum", "0")
|
||||||
HBASE_ZOOKEEPER_QUORUM should not equal ("0")
|
HBASE_ZOOKEEPER_QUORUM should not equal ("0")
|
||||||
HBASE_ZOOKEEPER_QUORUM should equal("localhost")
|
HBASE_ZOOKEEPER_QUORUM should equal("localhost")
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ private[akka] object MemcachedStorageBackend extends CommonStorageBackend {
|
||||||
import KVStorageBackend._
|
import KVStorageBackend._
|
||||||
import org.apache.commons.codec.binary.Base64
|
import org.apache.commons.codec.binary.Base64
|
||||||
|
|
||||||
val clientAddresses = config.getString("akka.storage.memcached.client.addresses", "localhost:11211")
|
val clientAddresses = config.getString("akka.persistence.memcached.client.addresses", "localhost:11211")
|
||||||
val factory = new KetamaConnectionFactory
|
val factory = new KetamaConnectionFactory
|
||||||
val client = new MemcachedClient(factory, AddrUtil.getAddresses(clientAddresses))
|
val client = new MemcachedClient(factory, AddrUtil.getAddresses(clientAddresses))
|
||||||
val base64 = new Base64(76, Array.empty[Byte], true)
|
val base64 = new Base64(76, Array.empty[Byte], true)
|
||||||
|
|
|
||||||
|
|
@ -31,9 +31,9 @@ private[akka] object MongoStorageBackend extends
|
||||||
val REF = "__ref"
|
val REF = "__ref"
|
||||||
val COLLECTION = "akka_coll"
|
val COLLECTION = "akka_coll"
|
||||||
|
|
||||||
val HOSTNAME = config.getString("akka.storage.mongodb.hostname", "127.0.0.1")
|
val HOSTNAME = config.getString("akka.persistence.mongodb.hostname", "127.0.0.1")
|
||||||
val DBNAME = config.getString("akka.storage.mongodb.dbname", "testdb")
|
val DBNAME = config.getString("akka.persistence.mongodb.dbname", "testdb")
|
||||||
val PORT = config.getInt("akka.storage.mongodb.port", 27017)
|
val PORT = config.getInt("akka.persistence.mongodb.port", 27017)
|
||||||
|
|
||||||
val db: MongoDB = MongoConnection(HOSTNAME, PORT)(DBNAME)
|
val db: MongoDB = MongoConnection(HOSTNAME, PORT)(DBNAME)
|
||||||
val coll: MongoCollection = db(COLLECTION)
|
val coll: MongoCollection = db(COLLECTION)
|
||||||
|
|
|
||||||
|
|
@ -52,14 +52,14 @@ private [akka] object RedisStorageBackend extends
|
||||||
Logging {
|
Logging {
|
||||||
|
|
||||||
// need an explicit definition in akka-conf
|
// need an explicit definition in akka-conf
|
||||||
val nodes = config.getList("akka.storage.redis.cluster")
|
val nodes = config.getList("akka.persistence.redis.cluster")
|
||||||
|
|
||||||
def connect() =
|
def connect() =
|
||||||
nodes match {
|
nodes match {
|
||||||
case Seq() =>
|
case Seq() =>
|
||||||
// no cluster defined
|
// no cluster defined
|
||||||
val REDIS_SERVER_HOSTNAME = config.getString("akka.storage.redis.hostname", "127.0.0.1")
|
val REDIS_SERVER_HOSTNAME = config.getString("akka.persistence.redis.hostname", "127.0.0.1")
|
||||||
val REDIS_SERVER_PORT = config.getInt("akka.storage.redis.port", 6379)
|
val REDIS_SERVER_PORT = config.getInt("akka.persistence.redis.port", 6379)
|
||||||
new RedisClient(REDIS_SERVER_HOSTNAME, REDIS_SERVER_PORT)
|
new RedisClient(REDIS_SERVER_HOSTNAME, REDIS_SERVER_PORT)
|
||||||
|
|
||||||
case s =>
|
case s =>
|
||||||
|
|
|
||||||
|
|
@ -18,12 +18,12 @@ import com.trifork.riak.{RequestMeta, RiakObject, RiakClient}
|
||||||
|
|
||||||
|
|
||||||
private[akka] object RiakStorageBackend extends CommonStorageBackend {
|
private[akka] object RiakStorageBackend extends CommonStorageBackend {
|
||||||
val refBucket = config.getString("akka.storage.riak.bucket.ref", "Refs")
|
val refBucket = config.getString("akka.persistence.riak.bucket.ref", "Refs")
|
||||||
val mapBucket = config.getString("akka.storage.riak.bucket.map", "Maps")
|
val mapBucket = config.getString("akka.persistence.riak.bucket.map", "Maps")
|
||||||
val vectorBucket = config.getString("akka.storage.riak.bucket.vector", "Vectors")
|
val vectorBucket = config.getString("akka.persistence.riak.bucket.vector", "Vectors")
|
||||||
val queueBucket = config.getString("akka.storage.riak.bucket.queue", "Queues")
|
val queueBucket = config.getString("akka.persistence.riak.bucket.queue", "Queues")
|
||||||
val clientHost = config.getString("akka.storage.riak.client.host", "localhost")
|
val clientHost = config.getString("akka.persistence.riak.client.host", "localhost")
|
||||||
val clientPort = config.getInt("akka.storage.riak.client.port", 8087)
|
val clientPort = config.getInt("akka.persistence.riak.client.port", 8087)
|
||||||
val riakClient: RiakClient = new RiakClient(clientHost, clientPort);
|
val riakClient: RiakClient = new RiakClient(clientHost, clientPort);
|
||||||
|
|
||||||
import CommonStorageBackendAccess._
|
import CommonStorageBackendAccess._
|
||||||
|
|
|
||||||
|
|
@ -28,42 +28,42 @@ private[akka] object SimpledbStorageBackend extends CommonStorageBackend {
|
||||||
val ownerAtt = "owner"
|
val ownerAtt = "owner"
|
||||||
val base64 = new Base64(1024, seperatorBytes, true)
|
val base64 = new Base64(1024, seperatorBytes, true)
|
||||||
val base64key = new Base64(1024, Array.empty[Byte], true)
|
val base64key = new Base64(1024, Array.empty[Byte], true)
|
||||||
val id = config.getString("akka.storage.simpledb.account.id").getOrElse{
|
val id = config.getString("akka.persistence.simpledb.account.id").getOrElse{
|
||||||
val e = new IllegalStateException("You must provide an AWS id")
|
val e = new IllegalStateException("You must provide an AWS id")
|
||||||
log.error(e, "You Must Provide an AWS id to use the SimpledbStorageBackend")
|
log.error(e, "You Must Provide an AWS id to use the SimpledbStorageBackend")
|
||||||
throw e
|
throw e
|
||||||
}
|
}
|
||||||
val secretKey = config.getString("akka.storage.simpledb.account.secretKey").getOrElse{
|
val secretKey = config.getString("akka.persistence.simpledb.account.secretKey").getOrElse{
|
||||||
val e = new IllegalStateException("You must provide an AWS secretKey")
|
val e = new IllegalStateException("You must provide an AWS secretKey")
|
||||||
log.error(e, "You Must Provide an AWS secretKey to use the SimpledbStorageBackend")
|
log.error(e, "You Must Provide an AWS secretKey to use the SimpledbStorageBackend")
|
||||||
throw e
|
throw e
|
||||||
}
|
}
|
||||||
val refDomain = config.getString("akka.storage.simpledb.domain.ref", "ref")
|
val refDomain = config.getString("akka.persistence.simpledb.domain.ref", "ref")
|
||||||
val mapDomain = config.getString("akka.storage.simpledb.domain.map", "map")
|
val mapDomain = config.getString("akka.persistence.simpledb.domain.map", "map")
|
||||||
val queueDomain = config.getString("akka.storage.simpledb.domain.queue", "queue")
|
val queueDomain = config.getString("akka.persistence.simpledb.domain.queue", "queue")
|
||||||
val vectorDomain = config.getString("akka.storage.simpledb.domain.vector", "vector")
|
val vectorDomain = config.getString("akka.persistence.simpledb.domain.vector", "vector")
|
||||||
val credentials = new BasicAWSCredentials(id, secretKey);
|
val credentials = new BasicAWSCredentials(id, secretKey);
|
||||||
val clientConfig = new ClientConfiguration()
|
val clientConfig = new ClientConfiguration()
|
||||||
for (i <- config.getInt("akka.storage.simpledb.client.timeout")) {
|
for (i <- config.getInt("akka.persistence.simpledb.client.timeout")) {
|
||||||
clientConfig.setConnectionTimeout(i)
|
clientConfig.setConnectionTimeout(i)
|
||||||
}
|
}
|
||||||
for (i <- config.getInt("akka.storage.simpledb.client.maxconnections")) {
|
for (i <- config.getInt("akka.persistence.simpledb.client.maxconnections")) {
|
||||||
clientConfig.setMaxConnections(i)
|
clientConfig.setMaxConnections(i)
|
||||||
}
|
}
|
||||||
clientConfig.setMaxErrorRetry(config.getInt("akka.storage.simpledb.client.maxretries", 10))
|
clientConfig.setMaxErrorRetry(config.getInt("akka.persistence.simpledb.client.maxretries", 10))
|
||||||
|
|
||||||
for (s <- config.getString("akka.storage.simpledb.client.protocol")) {
|
for (s <- config.getString("akka.persistence.simpledb.client.protocol")) {
|
||||||
clientConfig.setProtocol(Protocol.valueOf(s))
|
clientConfig.setProtocol(Protocol.valueOf(s))
|
||||||
}
|
}
|
||||||
for (i <- config.getInt("akka.storage.simpledb.client.sockettimeout")) {
|
for (i <- config.getInt("akka.persistence.simpledb.client.sockettimeout")) {
|
||||||
clientConfig.setSocketTimeout(i)
|
clientConfig.setSocketTimeout(i)
|
||||||
}
|
}
|
||||||
for {s <- config.getInt("akka.storage.simpledb.client.sendbuffer")
|
for {s <- config.getInt("akka.persistence.simpledb.client.sendbuffer")
|
||||||
r <- config.getInt("akka.storage.simpledb.client.receivebuffer")} {
|
r <- config.getInt("akka.persistence.simpledb.client.receivebuffer")} {
|
||||||
clientConfig.setSocketBufferSizeHints(s, r)
|
clientConfig.setSocketBufferSizeHints(s, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (s <- config.getString("akka.storage.simpledb.client.useragent")) {
|
for (s <- config.getString("akka.persistence.simpledb.client.useragent")) {
|
||||||
clientConfig.setUserAgent(s)
|
clientConfig.setUserAgent(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -28,14 +28,14 @@ private[akka] object VoldemortStorageBackend extends CommonStorageBackend {
|
||||||
import VoldemortAccess._
|
import VoldemortAccess._
|
||||||
|
|
||||||
val bootstrapUrlsProp = "bootstrap_urls"
|
val bootstrapUrlsProp = "bootstrap_urls"
|
||||||
val clientConfig = config.getConfigMap("akka.storage.voldemort.client") match {
|
val clientConfig = config.getConfigMap("akka.persistence.voldemort.client") match {
|
||||||
case Some(configMap) => getClientConfig(configMap.asMap)
|
case Some(configMap) => getClientConfig(configMap.asMap)
|
||||||
case None => getClientConfig(new HashMap[String, String] + (bootstrapUrlsProp -> "tcp://localhost:6666"))
|
case None => getClientConfig(new HashMap[String, String] + (bootstrapUrlsProp -> "tcp://localhost:6666"))
|
||||||
}
|
}
|
||||||
val refStore = config.getString("akka.storage.voldemort.store.ref", "Refs")
|
val refStore = config.getString("akka.persistence.voldemort.store.ref", "Refs")
|
||||||
val mapStore = config.getString("akka.storage.voldemort.store.map", "Maps")
|
val mapStore = config.getString("akka.persistence.voldemort.store.map", "Maps")
|
||||||
val vectorStore = config.getString("akka.storage.voldemort.store.vector", "Vectors")
|
val vectorStore = config.getString("akka.persistence.voldemort.store.vector", "Vectors")
|
||||||
val queueStore = config.getString("akka.storage.voldemort.store.queue", "Queues")
|
val queueStore = config.getString("akka.persistence.voldemort.store.queue", "Queues")
|
||||||
|
|
||||||
|
|
||||||
var storeClientFactory: StoreClientFactory = null
|
var storeClientFactory: StoreClientFactory = null
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ trait BootableRemoteActorService extends Bootable with Logging {
|
||||||
def startRemoteService = remoteServerThread.start
|
def startRemoteService = remoteServerThread.start
|
||||||
|
|
||||||
abstract override def onLoad = {
|
abstract override def onLoad = {
|
||||||
if (config.getBool("akka.remote.server.service", true)) {
|
if (RemoteServer.isRemotingEnabled) {
|
||||||
log.info("Initializing Remote Actors Service...")
|
log.info("Initializing Remote Actors Service...")
|
||||||
startRemoteService
|
startRemoteService
|
||||||
log.info("Remote Actors Service initialized")
|
log.info("Remote Actors Service initialized")
|
||||||
|
|
|
||||||
|
|
@ -66,12 +66,14 @@ object RemoteNode extends RemoteServer
|
||||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||||
*/
|
*/
|
||||||
object RemoteServer {
|
object RemoteServer {
|
||||||
|
val isRemotingEnabled = config.getList("akka.enabled-modules").exists(_ == "remote")
|
||||||
|
|
||||||
val UUID_PREFIX = "uuid:"
|
val UUID_PREFIX = "uuid:"
|
||||||
val MESSAGE_FRAME_SIZE = config.getInt("akka.remote.server.message-frame-size", 1048576)
|
val MESSAGE_FRAME_SIZE = config.getInt("akka.remote.server.message-frame-size", 1048576)
|
||||||
val SECURE_COOKIE = config.getString("akka.remote.secure-cookie")
|
val SECURE_COOKIE = config.getString("akka.remote.secure-cookie")
|
||||||
val REQUIRE_COOKIE = {
|
val REQUIRE_COOKIE = {
|
||||||
val requireCookie = config.getBool("akka.remote.server.require-cookie", true)
|
val requireCookie = config.getBool("akka.remote.server.require-cookie", true)
|
||||||
if (requireCookie && RemoteServer.SECURE_COOKIE.isEmpty) throw new ConfigurationException(
|
if (isRemotingEnabled && requireCookie && RemoteServer.SECURE_COOKIE.isEmpty) throw new ConfigurationException(
|
||||||
"Configuration option 'akka.remote.server.require-cookie' is turned on but no secure cookie is defined in 'akka.remote.secure-cookie'.")
|
"Configuration option 'akka.remote.server.require-cookie' is turned on but no secure cookie is defined in 'akka.remote.secure-cookie'.")
|
||||||
requireCookie
|
requireCookie
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -39,7 +39,7 @@ class Boot {
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In akka.conf you can set the FQN of any AuthenticationActor of your wish, under the property name: akka.rest.authenticator
|
* In akka.conf you can set the FQN of any AuthenticationActor of your wish, under the property name: akka.http.authenticator
|
||||||
*/
|
*/
|
||||||
class DigestAuthenticationService extends DigestAuthenticationActor {
|
class DigestAuthenticationService extends DigestAuthenticationActor {
|
||||||
//If you want to have a distributed nonce-map, you can use something like below,
|
//If you want to have a distributed nonce-map, you can use something like below,
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@
|
||||||
|
|
||||||
<servlet>
|
<servlet>
|
||||||
<servlet-name>AkkaServlet</servlet-name>
|
<servlet-name>AkkaServlet</servlet-name>
|
||||||
<servlet-class>akka.rest.AkkaServlet</servlet-class>
|
<servlet-class>akka.http.AkkaServlet</servlet-class>
|
||||||
</servlet>
|
</servlet>
|
||||||
|
|
||||||
<servlet-mapping>
|
<servlet-mapping>
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,9 @@
|
||||||
akka {
|
akka {
|
||||||
version = "1.0-SNAPSHOT" # Akka version, checked against the runtime version of Akka.
|
version = "1.0-SNAPSHOT" # Akka version, checked against the runtime version of Akka.
|
||||||
|
|
||||||
time-unit = "seconds" # Default timeout time unit for all timeout properties throughout the config
|
enabled-modules = [] # Comma separated list of the enabled modules. Options: ["remote", "remote.ssl", "camel", "http"]
|
||||||
|
|
||||||
|
time-unit = "seconds" # Time unit for all timeout properties throughout the config
|
||||||
|
|
||||||
# These boot classes are loaded (and created) automatically when the Akka Microkernel boots up
|
# These boot classes are loaded (and created) automatically when the Akka Microkernel boots up
|
||||||
# Can be used to bootstrap your application(s)
|
# Can be used to bootstrap your application(s)
|
||||||
|
|
@ -84,8 +86,7 @@ akka {
|
||||||
timeout = 60
|
timeout = 60
|
||||||
}
|
}
|
||||||
|
|
||||||
rest {
|
http {
|
||||||
service = on
|
|
||||||
hostname = "localhost"
|
hostname = "localhost"
|
||||||
port = 9998
|
port = 9998
|
||||||
#cometSupport = "org.atmosphere.container.Jetty7CometSupport" # Disregard autodetection, for valid values: http://doc.akkasource.org/comet
|
#cometSupport = "org.atmosphere.container.Jetty7CometSupport" # Disregard autodetection, for valid values: http://doc.akkasource.org/comet
|
||||||
|
|
@ -133,13 +134,12 @@ akka {
|
||||||
}
|
}
|
||||||
|
|
||||||
server {
|
server {
|
||||||
service = on
|
|
||||||
hostname = "localhost" # The hostname or IP that clients should connect to
|
hostname = "localhost" # The hostname or IP that clients should connect to
|
||||||
port = 2552 # The port clients should connect to
|
port = 2552 # The port clients should connect to. Default is 2552 (AKKA)
|
||||||
message-frame-size = 1048576
|
message-frame-size = 1048576 # Increase this if you want to be able to send messages with large payloads
|
||||||
connection-timeout = 1
|
connection-timeout = 1
|
||||||
require-cookie = on
|
require-cookie = on # Should the remote server require that it peers share the same secure-cookie (defined in the 'remote' section)?
|
||||||
untrusted-mode = off
|
untrusted-mode = off # Enable untrusted mode for full security of server managed actors, allows untrusted clients to connect.
|
||||||
}
|
}
|
||||||
|
|
||||||
client {
|
client {
|
||||||
|
|
@ -148,15 +148,9 @@ akka {
|
||||||
message-frame-size = 1048576
|
message-frame-size = 1048576
|
||||||
reconnection-time-window = 600 # Maximum time window that a client should try to reconnect for
|
reconnection-time-window = 600 # Maximum time window that a client should try to reconnect for
|
||||||
}
|
}
|
||||||
|
|
||||||
cluster {
|
|
||||||
service = on
|
|
||||||
name = "default" # The name of the cluster
|
|
||||||
serializer = "akka.serialization.Serializer$Java$" # FQN of the serializer class
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
storage {
|
persistence {
|
||||||
cassandra {
|
cassandra {
|
||||||
hostname = "127.0.0.1" # IP address or hostname of one of the Cassandra cluster's seeds
|
hostname = "127.0.0.1" # IP address or hostname of one of the Cassandra cluster's seeds
|
||||||
port = 9160 # Port to Cassandra
|
port = 9160 # Port to Cassandra
|
||||||
|
|
@ -220,6 +214,7 @@ akka {
|
||||||
id = "YOU NEED TO PROVIDE AN AWS ID"
|
id = "YOU NEED TO PROVIDE AN AWS ID"
|
||||||
secretKey = "YOU NEED TO PROVIDE AN AWS SECRETKEY"
|
secretKey = "YOU NEED TO PROVIDE AN AWS SECRETKEY"
|
||||||
}
|
}
|
||||||
|
|
||||||
client {
|
client {
|
||||||
#Defaults to default AWS ClientConfiguration
|
#Defaults to default AWS ClientConfiguration
|
||||||
timeout =50000
|
timeout =50000
|
||||||
|
|
@ -231,6 +226,7 @@ akka {
|
||||||
#receivebuffer = 0
|
#receivebuffer = 0
|
||||||
#useragent = "AWS Java SDK-1.0.14"
|
#useragent = "AWS Java SDK-1.0.14"
|
||||||
}
|
}
|
||||||
|
|
||||||
domain {
|
domain {
|
||||||
ref = "ref"
|
ref = "ref"
|
||||||
map = "map"
|
map = "map"
|
||||||
|
|
@ -239,8 +235,4 @@ akka {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
camel {
|
|
||||||
service = on
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue