Fixed issues with config - Ticket #535

This commit is contained in:
Jonas Bonér 2010-11-22 15:32:54 +01:00
parent 80adb71850
commit 1ee3a54372
22 changed files with 97 additions and 101 deletions

View file

@ -26,7 +26,7 @@ trait CamelService extends Bootable with Logging {
private[camel] val consumerPublisher = actorOf[ConsumerPublisher]
private[camel] val publishRequestor = actorOf[PublishRequestor]
private val serviceEnabled = config.getBool("akka.camel.service", true)
private val serviceEnabled = config.getList("akka.enabled-modules").exists(_ == "camel")
/**
* Starts this CamelService unless <code>akka.camel.service</code> is set to <code>false</code>.

View file

@ -12,7 +12,7 @@ import akka.dispatch.Dispatchers
import org.atmosphere.jersey.util.JerseyBroadcasterUtil
object AkkaBroadcaster {
val broadcasterDispatcher = Dispatchers.fromConfig("akka.rest.comet-dispatcher")
val broadcasterDispatcher = Dispatchers.fromConfig("akka.http.comet-dispatcher")
type Event = AtmosphereResourceEvent[_,_]
type Resource = AtmosphereResource[_,_]

View file

@ -51,11 +51,11 @@ class AkkaServlet extends AtmosphereServlet {
addInitParameter(AtmosphereServlet.DISABLE_ONSTATE_EVENT,"true")
addInitParameter(AtmosphereServlet.BROADCASTER_CLASS,classOf[AkkaBroadcaster].getName)
addInitParameter(AtmosphereServlet.PROPERTY_USE_STREAM,"true")
addInitParameter("com.sun.jersey.config.property.packages",c.getList("akka.rest.resource_packages").mkString(";"))
addInitParameter("com.sun.jersey.spi.container.ResourceFilters",c.getList("akka.rest.filters").mkString(","))
addInitParameter("com.sun.jersey.config.property.packages",c.getList("akka.http.resource_packages").mkString(";"))
addInitParameter("com.sun.jersey.spi.container.ResourceFilters",c.getList("akka.http.filters").mkString(","))
c.getInt("akka.rest.maxInactiveActivity") foreach { value => addInitParameter(CometSupport.MAX_INACTIVE,value.toString) }
c.getString("akka.rest.cometSupport") foreach { value => addInitParameter("cometSupport",value) }
c.getInt("akka.http.maxInactiveActivity") foreach { value => addInitParameter(CometSupport.MAX_INACTIVE,value.toString) }
c.getString("akka.http.cometSupport") foreach { value => addInitParameter("cometSupport",value) }
/*
* Provide a fallback for default values

View file

@ -24,15 +24,17 @@ trait EmbeddedAppServer extends Bootable with Logging {
import akka.config.Config._
val REST_HOSTNAME = config.getString("akka.rest.hostname", "localhost")
val REST_PORT = config.getInt("akka.rest.port", 9998)
val REST_HOSTNAME = config.getString("akka.http.hostname", "localhost")
val REST_PORT = config.getInt("akka.http.port", 9998)
val isRestEnabled = config.getList("akka.enabled-modules").exists(_ == "http")
protected var server: Option[Server] = None
abstract override def onLoad = {
super.onLoad
if (config.getBool("akka.rest.service", true)) {
log.info("Attempting to start Akka REST service (Jersey)")
if (isRestEnabled) {
log.info("Attempting to start Akka HTTP service")
System.setProperty("jetty.port", REST_PORT.toString)
System.setProperty("jetty.host", REST_HOSTNAME)
@ -57,16 +59,15 @@ trait EmbeddedAppServer extends Bootable with Logging {
s.start()
s
}
log.info("Akka REST service started (Jersey)")
log.info("Akka HTTP service started")
}
}
abstract override def onUnload = {
super.onUnload
server foreach { t => {
server foreach { t =>
log.info("Shutting down REST service (Jersey)")
t.stop()
}
}
}
}

View file

@ -1,10 +1,11 @@
/**
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
*/
package akka.rest
package akka.http
import akka.serialization.Serializer
import java.io.OutputStream
import akka.serialization.Serializer
import javax.ws.rs.core.{MultivaluedMap, MediaType}
import javax.ws.rs.ext.{MessageBodyWriter, Provider}
import javax.ws.rs.Produces

View file

@ -101,8 +101,8 @@ class AkkaSecurityFilterFactory extends ResourceFilterFactory with Logging {
}
lazy val authenticatorFQN = {
val auth = Config.config.getString("akka.rest.authenticator", "N/A")
if (auth == "N/A") throw new IllegalActorStateException("The config option 'akka.rest.authenticator' is not defined in 'akka.conf'")
val auth = Config.config.getString("akka.http.authenticator", "N/A")
if (auth == "N/A") throw new IllegalActorStateException("The config option 'akka.http.authenticator' is not defined in 'akka.conf'")
auth
}
@ -399,8 +399,8 @@ trait SpnegoAuthenticationActor extends AuthenticationActor[SpnegoCredentials] w
* principal name for the HTTP kerberos service, i.e HTTP/ { server } @ { realm }
*/
lazy val servicePrincipal = {
val p = Config.config.getString("akka.rest.kerberos.servicePrincipal", "N/A")
if (p == "N/A") throw new IllegalActorStateException("The config option 'akka.rest.kerberos.servicePrincipal' is not defined in 'akka.conf'")
val p = Config.config.getString("akka.http.kerberos.servicePrincipal", "N/A")
if (p == "N/A") throw new IllegalActorStateException("The config option 'akka.http.kerberos.servicePrincipal' is not defined in 'akka.conf'")
p
}
@ -408,21 +408,21 @@ trait SpnegoAuthenticationActor extends AuthenticationActor[SpnegoCredentials] w
* keytab location with credentials for the service principal
*/
lazy val keyTabLocation = {
val p = Config.config.getString("akka.rest.kerberos.keyTabLocation", "N/A")
if (p == "N/A") throw new IllegalActorStateException("The config option 'akka.rest.kerberos.keyTabLocation' is not defined in 'akka.conf'")
val p = Config.config.getString("akka.http.kerberos.keyTabLocation", "N/A")
if (p == "N/A") throw new IllegalActorStateException("The config option 'akka.http.kerberos.keyTabLocation' is not defined in 'akka.conf'")
p
}
lazy val kerberosDebug = {
val p = Config.config.getString("akka.rest.kerberos.kerberosDebug", "N/A")
if (p == "N/A") throw new IllegalActorStateException("The config option 'akka.rest.kerberos.kerberosDebug' is not defined in 'akka.conf'")
val p = Config.config.getString("akka.http.kerberos.kerberosDebug", "N/A")
if (p == "N/A") throw new IllegalActorStateException("The config option 'akka.http.kerberos.kerberosDebug' is not defined in 'akka.conf'")
p
}
/**
* is not used by this authenticator, so accept an empty value
*/
lazy val realm = Config.config.getString("akka.rest.kerberos.realm", "")
lazy val realm = Config.config.getString("akka.http.kerberos.realm", "")
/**
* verify the kerberos token from a client with the server

View file

@ -34,10 +34,10 @@ private[akka] object CassandraStorageBackend extends CommonStorageBackend {
val REF_KEY = "item".getBytes("UTF-8")
val EMPTY_BYTE_ARRAY = new Array[Byte](0)
val CASSANDRA_SERVER_HOSTNAME = config.getString("akka.storage.cassandra.hostname", "127.0.0.1")
val CASSANDRA_SERVER_PORT = config.getInt("akka.storage.cassandra.port", 9160)
val CASSANDRA_SERVER_HOSTNAME = config.getString("akka.persistence.cassandra.hostname", "127.0.0.1")
val CASSANDRA_SERVER_PORT = config.getInt("akka.persistence.cassandra.port", 9160)
val CONSISTENCY_LEVEL = {
config.getString("akka.storage.cassandra.consistency-level", "QUORUM") match {
config.getString("akka.persistence.cassandra.consistency-level", "QUORUM") match {
case "ZERO" => ConsistencyLevel.ZERO
case "ONE" => ConsistencyLevel.ONE
case "QUORUM" => ConsistencyLevel.QUORUM

View file

@ -31,8 +31,8 @@ private [akka] object CouchDBStorageBackend extends
}
lazy val URL = config.
getString("akka.storage.couchdb.url").
getOrElse(throw new IllegalArgumentException("'akka.storage.couchdb.url' not found in config"))
getString("akka.persistence.couchdb.url").
getOrElse(throw new IllegalArgumentException("'akka.persistence.couchdb.url' not found in config"))
def drop() = {
val client = new HttpClient()

View file

@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.util.Bytes
*/
private[akka] object HbaseStorageBackend extends MapStorageBackend[Array[Byte], Array[Byte]] with VectorStorageBackend[Array[Byte]] with RefStorageBackend[Array[Byte]] with Logging {
val HBASE_ZOOKEEPER_QUORUM = config.getString("akka.storage.hbase.zookeeper-quorum", "localhost")
val HBASE_ZOOKEEPER_QUORUM = config.getString("akka.persistence.hbase.zookeeper-quorum", "localhost")
val CONFIGURATION = new HBaseConfiguration
val REF_TABLE_NAME = "__REF_TABLE"
val VECTOR_TABLE_NAME = "__VECTOR_TABLE"

View file

@ -48,7 +48,7 @@ class SimpleHbaseSpecTestIntegration extends Spec with BeforeAndAfterAll with Sh
import org.apache.hadoop.hbase.client.HBaseAdmin
import org.apache.hadoop.hbase.client.HTable
val HBASE_ZOOKEEPER_QUORUM = config.getString("akka.storage.hbase.zookeeper-quorum", "0")
val HBASE_ZOOKEEPER_QUORUM = config.getString("akka.persistence.hbase.zookeeper-quorum", "0")
HBASE_ZOOKEEPER_QUORUM should not equal ("0")
HBASE_ZOOKEEPER_QUORUM should equal("localhost")

View file

@ -20,7 +20,7 @@ private[akka] object MemcachedStorageBackend extends CommonStorageBackend {
import KVStorageBackend._
import org.apache.commons.codec.binary.Base64
val clientAddresses = config.getString("akka.storage.memcached.client.addresses", "localhost:11211")
val clientAddresses = config.getString("akka.persistence.memcached.client.addresses", "localhost:11211")
val factory = new KetamaConnectionFactory
val client = new MemcachedClient(factory, AddrUtil.getAddresses(clientAddresses))
val base64 = new Base64(76, Array.empty[Byte], true)

View file

@ -31,9 +31,9 @@ private[akka] object MongoStorageBackend extends
val REF = "__ref"
val COLLECTION = "akka_coll"
val HOSTNAME = config.getString("akka.storage.mongodb.hostname", "127.0.0.1")
val DBNAME = config.getString("akka.storage.mongodb.dbname", "testdb")
val PORT = config.getInt("akka.storage.mongodb.port", 27017)
val HOSTNAME = config.getString("akka.persistence.mongodb.hostname", "127.0.0.1")
val DBNAME = config.getString("akka.persistence.mongodb.dbname", "testdb")
val PORT = config.getInt("akka.persistence.mongodb.port", 27017)
val db: MongoDB = MongoConnection(HOSTNAME, PORT)(DBNAME)
val coll: MongoCollection = db(COLLECTION)

View file

@ -52,14 +52,14 @@ private [akka] object RedisStorageBackend extends
Logging {
// need an explicit definition in akka-conf
val nodes = config.getList("akka.storage.redis.cluster")
val nodes = config.getList("akka.persistence.redis.cluster")
def connect() =
nodes match {
case Seq() =>
// no cluster defined
val REDIS_SERVER_HOSTNAME = config.getString("akka.storage.redis.hostname", "127.0.0.1")
val REDIS_SERVER_PORT = config.getInt("akka.storage.redis.port", 6379)
val REDIS_SERVER_HOSTNAME = config.getString("akka.persistence.redis.hostname", "127.0.0.1")
val REDIS_SERVER_PORT = config.getInt("akka.persistence.redis.port", 6379)
new RedisClient(REDIS_SERVER_HOSTNAME, REDIS_SERVER_PORT)
case s =>

View file

@ -18,12 +18,12 @@ import com.trifork.riak.{RequestMeta, RiakObject, RiakClient}
private[akka] object RiakStorageBackend extends CommonStorageBackend {
val refBucket = config.getString("akka.storage.riak.bucket.ref", "Refs")
val mapBucket = config.getString("akka.storage.riak.bucket.map", "Maps")
val vectorBucket = config.getString("akka.storage.riak.bucket.vector", "Vectors")
val queueBucket = config.getString("akka.storage.riak.bucket.queue", "Queues")
val clientHost = config.getString("akka.storage.riak.client.host", "localhost")
val clientPort = config.getInt("akka.storage.riak.client.port", 8087)
val refBucket = config.getString("akka.persistence.riak.bucket.ref", "Refs")
val mapBucket = config.getString("akka.persistence.riak.bucket.map", "Maps")
val vectorBucket = config.getString("akka.persistence.riak.bucket.vector", "Vectors")
val queueBucket = config.getString("akka.persistence.riak.bucket.queue", "Queues")
val clientHost = config.getString("akka.persistence.riak.client.host", "localhost")
val clientPort = config.getInt("akka.persistence.riak.client.port", 8087)
val riakClient: RiakClient = new RiakClient(clientHost, clientPort);
import CommonStorageBackendAccess._

View file

@ -28,42 +28,42 @@ private[akka] object SimpledbStorageBackend extends CommonStorageBackend {
val ownerAtt = "owner"
val base64 = new Base64(1024, seperatorBytes, true)
val base64key = new Base64(1024, Array.empty[Byte], true)
val id = config.getString("akka.storage.simpledb.account.id").getOrElse{
val id = config.getString("akka.persistence.simpledb.account.id").getOrElse{
val e = new IllegalStateException("You must provide an AWS id")
log.error(e, "You Must Provide an AWS id to use the SimpledbStorageBackend")
throw e
}
val secretKey = config.getString("akka.storage.simpledb.account.secretKey").getOrElse{
val secretKey = config.getString("akka.persistence.simpledb.account.secretKey").getOrElse{
val e = new IllegalStateException("You must provide an AWS secretKey")
log.error(e, "You Must Provide an AWS secretKey to use the SimpledbStorageBackend")
throw e
}
val refDomain = config.getString("akka.storage.simpledb.domain.ref", "ref")
val mapDomain = config.getString("akka.storage.simpledb.domain.map", "map")
val queueDomain = config.getString("akka.storage.simpledb.domain.queue", "queue")
val vectorDomain = config.getString("akka.storage.simpledb.domain.vector", "vector")
val refDomain = config.getString("akka.persistence.simpledb.domain.ref", "ref")
val mapDomain = config.getString("akka.persistence.simpledb.domain.map", "map")
val queueDomain = config.getString("akka.persistence.simpledb.domain.queue", "queue")
val vectorDomain = config.getString("akka.persistence.simpledb.domain.vector", "vector")
val credentials = new BasicAWSCredentials(id, secretKey);
val clientConfig = new ClientConfiguration()
for (i <- config.getInt("akka.storage.simpledb.client.timeout")) {
for (i <- config.getInt("akka.persistence.simpledb.client.timeout")) {
clientConfig.setConnectionTimeout(i)
}
for (i <- config.getInt("akka.storage.simpledb.client.maxconnections")) {
for (i <- config.getInt("akka.persistence.simpledb.client.maxconnections")) {
clientConfig.setMaxConnections(i)
}
clientConfig.setMaxErrorRetry(config.getInt("akka.storage.simpledb.client.maxretries", 10))
clientConfig.setMaxErrorRetry(config.getInt("akka.persistence.simpledb.client.maxretries", 10))
for (s <- config.getString("akka.storage.simpledb.client.protocol")) {
for (s <- config.getString("akka.persistence.simpledb.client.protocol")) {
clientConfig.setProtocol(Protocol.valueOf(s))
}
for (i <- config.getInt("akka.storage.simpledb.client.sockettimeout")) {
for (i <- config.getInt("akka.persistence.simpledb.client.sockettimeout")) {
clientConfig.setSocketTimeout(i)
}
for {s <- config.getInt("akka.storage.simpledb.client.sendbuffer")
r <- config.getInt("akka.storage.simpledb.client.receivebuffer")} {
for {s <- config.getInt("akka.persistence.simpledb.client.sendbuffer")
r <- config.getInt("akka.persistence.simpledb.client.receivebuffer")} {
clientConfig.setSocketBufferSizeHints(s, r)
}
for (s <- config.getString("akka.storage.simpledb.client.useragent")) {
for (s <- config.getString("akka.persistence.simpledb.client.useragent")) {
clientConfig.setUserAgent(s)
}

View file

@ -28,14 +28,14 @@ private[akka] object VoldemortStorageBackend extends CommonStorageBackend {
import VoldemortAccess._
val bootstrapUrlsProp = "bootstrap_urls"
val clientConfig = config.getConfigMap("akka.storage.voldemort.client") match {
val clientConfig = config.getConfigMap("akka.persistence.voldemort.client") match {
case Some(configMap) => getClientConfig(configMap.asMap)
case None => getClientConfig(new HashMap[String, String] + (bootstrapUrlsProp -> "tcp://localhost:6666"))
}
val refStore = config.getString("akka.storage.voldemort.store.ref", "Refs")
val mapStore = config.getString("akka.storage.voldemort.store.map", "Maps")
val vectorStore = config.getString("akka.storage.voldemort.store.vector", "Vectors")
val queueStore = config.getString("akka.storage.voldemort.store.queue", "Queues")
val refStore = config.getString("akka.persistence.voldemort.store.ref", "Refs")
val mapStore = config.getString("akka.persistence.voldemort.store.map", "Maps")
val vectorStore = config.getString("akka.persistence.voldemort.store.vector", "Vectors")
val queueStore = config.getString("akka.persistence.voldemort.store.queue", "Queues")
var storeClientFactory: StoreClientFactory = null

View file

@ -26,7 +26,7 @@ trait BootableRemoteActorService extends Bootable with Logging {
def startRemoteService = remoteServerThread.start
abstract override def onLoad = {
if (config.getBool("akka.remote.server.service", true)) {
if (RemoteServer.isRemotingEnabled) {
log.info("Initializing Remote Actors Service...")
startRemoteService
log.info("Remote Actors Service initialized")

View file

@ -66,12 +66,14 @@ object RemoteNode extends RemoteServer
* @author <a href="http://jonasboner.com">Jonas Bon&#233;r</a>
*/
object RemoteServer {
val isRemotingEnabled = config.getList("akka.enabled-modules").exists(_ == "remote")
val UUID_PREFIX = "uuid:"
val MESSAGE_FRAME_SIZE = config.getInt("akka.remote.server.message-frame-size", 1048576)
val SECURE_COOKIE = config.getString("akka.remote.secure-cookie")
val REQUIRE_COOKIE = {
val requireCookie = config.getBool("akka.remote.server.require-cookie", true)
if (requireCookie && RemoteServer.SECURE_COOKIE.isEmpty) throw new ConfigurationException(
if (isRemotingEnabled && requireCookie && RemoteServer.SECURE_COOKIE.isEmpty) throw new ConfigurationException(
"Configuration option 'akka.remote.server.require-cookie' is turned on but no secure cookie is defined in 'akka.remote.secure-cookie'.")
requireCookie
}

View file

@ -39,7 +39,7 @@ class Boot {
}
/*
* In akka.conf you can set the FQN of any AuthenticationActor of your wish, under the property name: akka.rest.authenticator
* In akka.conf you can set the FQN of any AuthenticationActor of your wish, under the property name: akka.http.authenticator
*/
class DigestAuthenticationService extends DigestAuthenticationActor {
//If you want to have a distributed nonce-map, you can use something like below,

View file

@ -8,7 +8,7 @@
<servlet>
<servlet-name>AkkaServlet</servlet-name>
<servlet-class>akka.rest.AkkaServlet</servlet-class>
<servlet-class>akka.http.AkkaServlet</servlet-class>
</servlet>
<servlet-mapping>

View file

@ -8,7 +8,9 @@
akka {
version = "1.0-SNAPSHOT" # Akka version, checked against the runtime version of Akka.
time-unit = "seconds" # Default timeout time unit for all timeout properties throughout the config
enabled-modules = [] # Comma separated list of the enabled modules. Options: ["remote", "remote.ssl", "camel", "http"]
time-unit = "seconds" # Time unit for all timeout properties throughout the config
# These boot classes are loaded (and created) automatically when the Akka Microkernel boots up
# Can be used to bootstrap your application(s)
@ -84,8 +86,7 @@ akka {
timeout = 60
}
rest {
service = on
http {
hostname = "localhost"
port = 9998
#cometSupport = "org.atmosphere.container.Jetty7CometSupport" # Disregard autodetection, for valid values: http://doc.akkasource.org/comet
@ -133,13 +134,12 @@ akka {
}
server {
service = on
hostname = "localhost" # The hostname or IP that clients should connect to
port = 2552 # The port clients should connect to
message-frame-size = 1048576
port = 2552 # The port clients should connect to. Default is 2552 (AKKA)
message-frame-size = 1048576 # Increase this if you want to be able to send messages with large payloads
connection-timeout = 1
require-cookie = on
untrusted-mode = off
require-cookie = on # Should the remote server require that it peers share the same secure-cookie (defined in the 'remote' section)?
untrusted-mode = off # Enable untrusted mode for full security of server managed actors, allows untrusted clients to connect.
}
client {
@ -148,15 +148,9 @@ akka {
message-frame-size = 1048576
reconnection-time-window = 600 # Maximum time window that a client should try to reconnect for
}
cluster {
service = on
name = "default" # The name of the cluster
serializer = "akka.serialization.Serializer$Java$" # FQN of the serializer class
}
}
storage {
persistence {
cassandra {
hostname = "127.0.0.1" # IP address or hostname of one of the Cassandra cluster's seeds
port = 9160 # Port to Cassandra
@ -220,6 +214,7 @@ akka {
id = "YOU NEED TO PROVIDE AN AWS ID"
secretKey = "YOU NEED TO PROVIDE AN AWS SECRETKEY"
}
client {
#Defaults to default AWS ClientConfiguration
timeout =50000
@ -231,6 +226,7 @@ akka {
#receivebuffer = 0
#useragent = "AWS Java SDK-1.0.14"
}
domain {
ref = "ref"
map = "map"
@ -239,8 +235,4 @@ akka {
}
}
}
camel {
service = on
}
}