Removed trailing whitespace

This commit is contained in:
Jonas Bonér 2010-06-01 18:41:39 +02:00
parent c8732569a7
commit 56e7428e47
34 changed files with 273 additions and 273 deletions

View file

@ -66,23 +66,23 @@ final class ActiveObjectConfiguration {
}
/**
* Holds RTTI (runtime type information) for the Active Object, f.e. current 'sender'
* Holds RTTI (runtime type information) for the Active Object, f.e. current 'sender'
* reference, the 'senderFuture' reference etc.
* <p/>
* In order to make use of this context you have to create a member field in your
* Active Object that has the type 'ActiveObjectContext', then an instance will
* be injected for you to use.
* In order to make use of this context you have to create a member field in your
* Active Object that has the type 'ActiveObjectContext', then an instance will
* be injected for you to use.
* <p/>
* This class does not contain static information but is updated by the runtime system
* at runtime.
* This class does not contain static information but is updated by the runtime system
* at runtime.
* <p/>
* Here is an example of usage:
* Here is an example of usage:
* <pre>
* class Ping {
* // This context will be injected, holds RTTI (runtime type information)
* // for the current message send
* // This context will be injected, holds RTTI (runtime type information)
* // for the current message send
* private ActiveObjectContext context = null;
*
*
* public void hit(int count) {
* Pong pong = (Pong) context.getSender();
* pong.hit(count++)
@ -100,19 +100,19 @@ final class ActiveObjectContext {
* Returns the current sender Active Object reference.
* Scala style getter.
*/
def sender: AnyRef = {
def sender: AnyRef = {
if (_sender eq null) throw new IllegalStateException("Sender reference should not be null.")
else _sender
}
}
/**
* Returns the current sender Active Object reference.
* Java style getter.
*/
def getSender: AnyRef = {
def getSender: AnyRef = {
if (_sender eq null) throw new IllegalStateException("Sender reference should not be null.")
else _sender
}
}
/**
* Returns the current sender future Active Object reference.
@ -364,7 +364,7 @@ object ActiveObject extends Logging {
proxy.asInstanceOf[T]
}
private[akka] def newInstance[T](intf: Class[T], target: AnyRef, actorRef: ActorRef,
private[akka] def newInstance[T](intf: Class[T], target: AnyRef, actorRef: ActorRef,
remoteAddress: Option[InetSocketAddress], timeout: Long): T = {
val context = injectActiveObjectContext(target)
val proxy = Proxy.newInstance(Array(intf), Array(target), false, false)
@ -462,7 +462,7 @@ object ActiveObject extends Logging {
if (parent != null) injectActiveObjectContext0(activeObject, parent)
else {
log.warning(
"Can't set 'ActiveObjectContext' for ActiveObject [%s] since no field of this type could be found.",
"Can't set 'ActiveObjectContext' for ActiveObject [%s] since no field of this type could be found.",
activeObject.getClass.getName)
None
}
@ -522,7 +522,7 @@ private[akka] sealed class ActiveObjectAspect {
remoteAddress = init.remoteAddress
timeout = init.timeout
isInitialized = true
}
dispatch(joinPoint)
}
@ -583,7 +583,7 @@ private[akka] sealed class ActiveObjectAspect {
} else future.result
private def isVoid(rtti: MethodRtti) = rtti.getMethod.getReturnType == java.lang.Void.TYPE
private def escapeArguments(args: Array[AnyRef]): Tuple2[Array[AnyRef], Boolean] = {
var isEscaped = false
val escapedArgs = for (arg <- args) yield {
@ -606,11 +606,11 @@ private[akka] sealed class ActiveObjectAspect {
joinPoint: JoinPoint, isOneWay: Boolean, isVoid: Boolean, sender: AnyRef, senderFuture: CompletableFuture[Any]) {
override def toString: String = synchronized {
"Invocation [joinPoint: " + joinPoint.toString +
", isOneWay: " + isOneWay +
"Invocation [joinPoint: " + joinPoint.toString +
", isOneWay: " + isOneWay +
", isVoid: " + isVoid +
", sender: " + sender +
", senderFuture: " + senderFuture +
", sender: " + sender +
", senderFuture: " + senderFuture +
"]"
}
@ -653,11 +653,11 @@ private[akka] class Dispatcher(transactionalRequired: Boolean, val callbacks: Op
private var postRestart: Option[Method] = None
private var initTxState: Option[Method] = None
private var context: Option[ActiveObjectContext] = None
def this(transactionalRequired: Boolean) = this(transactionalRequired,None)
private[actor] def initialize(targetClass: Class[_], targetInstance: AnyRef, ctx: Option[ActiveObjectContext]) = {
if (transactionalRequired || targetClass.isAnnotationPresent(Annotations.transactionrequired))
if (transactionalRequired || targetClass.isAnnotationPresent(Annotations.transactionrequired))
self.makeTransactionRequired
self.id = targetClass.getName
target = Some(targetInstance)
@ -705,7 +705,7 @@ private[akka] class Dispatcher(transactionalRequired: Boolean, val callbacks: Op
def receive = {
case Invocation(joinPoint, isOneWay, _, sender, senderFuture) =>
context.foreach { ctx =>
context.foreach { ctx =>
if (sender ne null) ctx._sender = sender
if (senderFuture ne null) ctx._senderFuture = senderFuture
}

View file

@ -329,7 +329,7 @@ trait Actor extends Logging {
* <pre>
* self ! message
* </pre>
* Here you also find most of the Actor API.
* Here you also find most of the Actor API.
* <p/>
* For example fields like:
* <pre>
@ -384,7 +384,7 @@ trait Actor extends Logging {
* Is called when an Actor is started by invoking 'actor.start'.
*/
def init {}
/**
* User overridable callback.
* <p/>

View file

@ -222,7 +222,7 @@ trait ActorRef extends TransactionManagement {
* Is defined if the message was sent with sent with '!!' or '!!!', else None.
*/
def senderFuture: Option[CompletableFuture[Any]] = guard.withGuard { _senderFuture }
/**
* Is the actor being restarted?
*/
@ -356,7 +356,7 @@ trait ActorRef extends TransactionManagement {
"\n\tYou have probably: " +
"\n\t\t1. Sent a message to an Actor from an instance that is NOT an Actor." +
"\n\t\t2. Invoked a method on an Active Object from an instance NOT an Active Object.")
/**
* Use <code>reply_?(..)</code> to reply with a message to the original sender of the message currently
* being processed.
@ -1224,7 +1224,7 @@ private[akka] case class RemoteActorRef private[akka] (
extends ActorRef {
_uuid = uuuid
timeout = _timeout
start
lazy val remoteClient = RemoteClient.clientFor(hostname, port, loader)

View file

@ -82,19 +82,19 @@ object RemoteClient extends Logging {
private[akka] def actorFor(uuid: String, className: String, timeout: Long, hostname: String, port: Int, loader: Option[ClassLoader]): ActorRef =
RemoteActorRef(uuid, className, hostname, port, timeout, loader)
def clientFor(hostname: String, port: Int): RemoteClient =
def clientFor(hostname: String, port: Int): RemoteClient =
clientFor(new InetSocketAddress(hostname, port), None)
def clientFor(hostname: String, port: Int, loader: ClassLoader): RemoteClient =
def clientFor(hostname: String, port: Int, loader: ClassLoader): RemoteClient =
clientFor(new InetSocketAddress(hostname, port), Some(loader))
def clientFor(address: InetSocketAddress): RemoteClient =
def clientFor(address: InetSocketAddress): RemoteClient =
clientFor(address, None)
def clientFor(address: InetSocketAddress, loader: ClassLoader): RemoteClient =
def clientFor(address: InetSocketAddress, loader: ClassLoader): RemoteClient =
clientFor(address, Some(loader))
private[akka] def clientFor(hostname: String, port: Int, loader: Option[ClassLoader]): RemoteClient =
private[akka] def clientFor(hostname: String, port: Int, loader: Option[ClassLoader]): RemoteClient =
clientFor(new InetSocketAddress(hostname, port), loader)
private[akka] def clientFor(address: InetSocketAddress, loader: Option[ClassLoader]): RemoteClient = synchronized {
@ -330,7 +330,7 @@ class RemoteClientHandler(val name: String,
client.connection = bootstrap.connect(remoteAddress)
client.connection.awaitUninterruptibly // Wait until the connection attempt succeeds or fails.
if (!client.connection.isSuccess) {
client.listeners.toArray.foreach(l =>
client.listeners.toArray.foreach(l =>
l.asInstanceOf[ActorRef] ! RemoteClientError(client.connection.getCause))
log.error(client.connection.getCause, "Reconnection to [%s] has failed", remoteAddress)
}
@ -339,13 +339,13 @@ class RemoteClientHandler(val name: String,
}
override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = {
client.listeners.toArray.foreach(l =>
client.listeners.toArray.foreach(l =>
l.asInstanceOf[ActorRef] ! RemoteClientConnected(client.hostname, client.port))
log.debug("Remote client connected to [%s]", ctx.getChannel.getRemoteAddress)
}
override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = {
client.listeners.toArray.foreach(l =>
client.listeners.toArray.foreach(l =>
l.asInstanceOf[ActorRef] ! RemoteClientDisconnected(client.hostname, client.port))
log.debug("Remote client disconnected from [%s]", ctx.getChannel.getRemoteAddress)
}

View file

@ -184,7 +184,7 @@ class RemoteServer extends Logging {
def start(_hostname: String, _port: Int): RemoteServer =
start(_hostname, _port, None)
private def start(_hostname: String, _port: Int, loader: ClassLoader): RemoteServer =
private def start(_hostname: String, _port: Int, loader: ClassLoader): RemoteServer =
start(_hostname, _port, Some(loader))
private def start(_hostname: String, _port: Int, loader: Option[ClassLoader]): RemoteServer = synchronized {

View file

@ -13,7 +13,7 @@ case class Listen(listener: ActorRef) extends ListenerMessage
case class Deafen(listener: ActorRef) extends ListenerMessage
case class WithListeners(f: List[ActorRef] => Unit) extends ListenerMessage
/**
/**
* Listeners is a generic trait to implement listening capability on an Actor.
* <p/>
* Use the <code>gossip(msg)</code> method to have it sent to the listeners.
@ -34,6 +34,6 @@ trait Listeners { self: Actor =>
}
protected def gossip(msg: Any) = listenersAsList foreach (_ ! msg)
private def listenersAsList: List[ActorRef] = listeners.toArray.toList.asInstanceOf[List[ActorRef]]
}

View file

@ -282,10 +282,10 @@ object Transaction {
setTransaction(Some(tx))
mtx.registerLifecycleListener(new TransactionLifecycleListener() {
def notify(mtx: MultiverseTransaction, event: TransactionLifecycleEvent) = event.name match {
case "postCommit" =>
case "postCommit" =>
log.trace("Committing transaction [%s]", mtx)
tx.commit
case "postAbort" =>
case "postAbort" =>
log.trace("Aborting transaction [%s]", mtx)
tx.abort
case _ => {}

View file

@ -43,7 +43,7 @@ class ActorPatternsTest extends junit.framework.TestCase with Suite with MustMat
b <- (d.!![Int](testMsg2,5000))
c <- (d.!![Int](testMsg3,5000))
} yield a + b + c
result.get must be(21)
for(a <- List(t1,t2,d)) a.stop
}

View file

@ -95,7 +95,7 @@ class StmSpec extends
val size2: Int = (actor !! Size).getOrElse(fail("Could not get Vector::size"))
size2 should equal(3)
} catch {
case e =>
case e =>
e.printStackTrace
fail(e.toString)
}
@ -122,7 +122,7 @@ class StmSpec extends
val size4: Int = (actor !! Size).getOrElse(fail("Could not get size"))
size4 should equal(3)
} catch {
case e =>
case e =>
fail(e.toString)
}
}
@ -130,7 +130,7 @@ class StmSpec extends
/*
describe("Multiverse API") {
it("should blablabla") {
import org.multiverse.api.programmatic._
// import org.multiverse.api._
import org.multiverse.templates._
@ -139,13 +139,13 @@ class StmSpec extends
import org.multiverse.api.{GlobalStmInstance, ThreadLocalTransaction, Transaction => MultiverseTransaction}
import org.multiverse.api.lifecycle.{TransactionLifecycleListener, TransactionLifecycleEvent}
import org.multiverse.commitbarriers._
def createRef[T]: ProgrammaticReference[T] = GlobalStmInstance
.getGlobalStmInstance
.getProgrammaticReferenceFactoryBuilder
.build
.atomicCreateReference(null.asInstanceOf[T])
val ref1 = Ref(0)//createRef[Int]
val ref2 = Ref(0)//createRef[Int]
@ -185,13 +185,13 @@ class GlobalTransactionVectorTestActor extends Actor {
import se.scalablesolutions.akka.stm.Transaction.Global
private val vector: TransactionalVector[Int] = Global.atomic { TransactionalVector(1) }
def receive = {
case Add(value) =>
case Add(value) =>
Global.atomic { vector + value}
self.reply(Success)
case Size =>
case Size =>
val size = Global.atomic { vector.size }
self.reply(size)
}
@ -200,12 +200,12 @@ class GlobalTransactionVectorTestActor extends Actor {
class NestedTransactorLevelOneActor extends Actor {
import GlobalTransactionVectorTestActor._
private val nested = actorOf[NestedTransactorLevelTwoActor].start
def receive = {
case add @ Add(_) =>
case add @ Add(_) =>
self.reply((nested !! add).get)
case Size =>
case Size =>
self.reply((nested !! Size).get)
case "HiLevelOne" => println("HiLevelOne")
@ -216,15 +216,15 @@ class NestedTransactorLevelOneActor extends Actor {
class NestedTransactorLevelTwoActor extends Actor {
import GlobalTransactionVectorTestActor._
private val ref = Ref(0)
def receive = {
case Add(value) =>
case Add(value) =>
ref.swap(value)
self.reply(Success)
case Size =>
case Size =>
self.reply(ref.getOrElse(-1))
case "HiLevelTwo" => println("HiLevelTwo")
}
}

View file

@ -47,29 +47,29 @@ class AkkaLoader extends Logging {
private def printBanner = {
log.info(
"""
t
t t t
t t tt t
tt t t tt t
t ttttttt t ttt t
t tt ttt t ttt t
t t ttt t ttt t t
tt t ttt ttt ttt t
t t ttt ttt t tt t
t ttt ttt t t
tt ttt ttt t
ttt ttt
tttttttt ttt ttt ttt ttt tttttttt
ttt tt ttt ttt ttt ttt ttt ttt
ttt ttt ttt ttt ttt ttt ttt ttt
ttt ttt ttt ttt ttt tt ttt ttt
tttt ttttttttt tttttttt tttt
ttttttttt ttt ttt ttt ttt ttttttttt
ttt ttt ttt ttt ttt ttt ttt ttt
ttt ttt ttt ttt ttt ttt ttt ttt
ttt tt ttt ttt ttt ttt ttt ttt
t
t t t
t t tt t
tt t t tt t
t ttttttt t ttt t
t tt ttt t ttt t
t t ttt t ttt t t
tt t ttt ttt ttt t
t t ttt ttt t tt t
t ttt ttt t t
tt ttt ttt t
ttt ttt
tttttttt ttt ttt ttt ttt tttttttt
ttt tt ttt ttt ttt ttt ttt ttt
ttt ttt ttt ttt ttt ttt ttt ttt
ttt ttt ttt ttt ttt tt ttt ttt
tttt ttttttttt tttttttt tttt
ttttttttt ttt ttt ttt ttt ttttttttt
ttt ttt ttt ttt ttt ttt ttt ttt
ttt ttt ttt ttt ttt ttt ttt ttt
ttt tt ttt ttt ttt ttt ttt ttt
tttttttt ttt ttt ttt ttt tttttttt
==================================================
""")
log.info(" Running version %s", Config.VERSION)

View file

@ -50,14 +50,14 @@ trait EmbeddedAppServer extends Bootable with Logging {
Thread.currentThread.setContextClassLoader(applicationLoader.get)
super.init(sc)
}
finally {
finally {
Thread.currentThread.setContextClassLoader(cl)
}
}
})
adapter.setContextPath(uri.getPath)
adapter.addInitParameter("cometSupport",
adapter.addInitParameter("cometSupport",
"org.atmosphere.container.GrizzlyCometSupport")
adapter.addInitParameter("com.sun.jersey.config.property.resourceConfigClass",
"com.sun.jersey.api.core.PackagesResourceConfig")

View file

@ -42,7 +42,7 @@ private[akka] object CassandraStorageBackend extends
case "ALL" => ConsistencyLevel.ALL
case "ANY" => ConsistencyLevel.ANY
case unknown => throw new IllegalArgumentException(
"Cassandra consistency level [" + unknown + "] is not supported." +
"Cassandra consistency level [" + unknown + "] is not supported." +
"\n\tExpected one of [ZERO, ONE, QUORUM, DCQUORUM, DCQUORUMSYNC, ALL, ANY] in the akka.conf configuration file.")
}
}
@ -105,9 +105,9 @@ private[akka] object CassandraStorageBackend extends
}
}
def insertVectorStorageEntriesFor(name: String, elements: List[Array[Byte]]) =
def insertVectorStorageEntriesFor(name: String, elements: List[Array[Byte]]) =
elements.foreach(insertVectorStorageEntryFor(name, _))
def updateVectorStorageEntryFor(name: String, index: Int, elem: Array[Byte]) = {
val columnPath = new ColumnPath(VECTOR_COLUMN_PARENT.getColumn_family)
columnPath.setColumn(intToBytes(index))

View file

@ -171,4 +171,4 @@ object EmbeddedCassandraService {
def start: Unit = {}
}
*/
*/

View file

@ -30,7 +30,7 @@ class StorageException(message: String) extends RuntimeException(message)
* <pre>
* val myMap = CassandraStorage.getMap(id)
* </pre>
*
*
* Example Java usage:
* <pre>
* PersistentMap<Object, Object> myMap = MongoStorage.newMap();
@ -72,7 +72,7 @@ trait Storage {
}
/**
* Implementation of <tt>PersistentMap</tt> for every concrete
* Implementation of <tt>PersistentMap</tt> for every concrete
* storage will have the same workflow. This abstracts the workflow.
*
* Subclasses just need to provide the actual concrete instance for the
@ -117,23 +117,23 @@ trait PersistentMap[K, V] extends scala.collection.mutable.Map[K, V]
put(key, value)
this
}
override def put(key: K, value: V): Option[V] = {
register
newAndUpdatedEntries.put(key, value)
}
override def update(key: K, value: V) = {
override def update(key: K, value: V) = {
register
newAndUpdatedEntries.update(key, value)
}
override def remove(key: K) = {
register
removedEntries.add(key)
newAndUpdatedEntries.get(key)
}
def slice(start: Option[K], count: Int): List[Tuple2[K, V]] =
slice(start, None, count)
@ -141,11 +141,11 @@ trait PersistentMap[K, V] extends scala.collection.mutable.Map[K, V]
storage.getMapStorageRangeFor(uuid, start, finish, count)
} catch { case e: Exception => Nil }
override def clear = {
override def clear = {
register
shouldClearOnCommit.swap(true)
}
override def contains(key: K): Boolean = try {
newAndUpdatedEntries.contains(key) ||
storage.getMapStorageEntryFor(uuid, key).isDefined
@ -163,9 +163,9 @@ trait PersistentMap[K, V] extends scala.collection.mutable.Map[K, V]
storage.getMapStorageEntryFor(uuid, key)
} catch { case e: Exception => None }
}
def iterator = elements
override def elements: Iterator[Tuple2[K, V]] = {
new Iterator[Tuple2[K, V]] {
private val originalList: List[Tuple2[K, V]] = try {
@ -173,10 +173,10 @@ trait PersistentMap[K, V] extends scala.collection.mutable.Map[K, V]
} catch {
case e: Throwable => Nil
}
private var elements = newAndUpdatedEntries.toList union originalList.reverse
private var elements = newAndUpdatedEntries.toList union originalList.reverse
override def next: Tuple2[K, V]= synchronized {
val element = elements.head
elements = elements.tail
elements = elements.tail
element
}
override def hasNext: Boolean = synchronized { !elements.isEmpty }
@ -217,12 +217,12 @@ trait PersistentVector[T] extends IndexedSeq[T] with Transactional with Committa
}
def +(elem: T) = add(elem)
def add(elem: T) = {
register
newElems + elem
}
def apply(index: Int): T = get(index)
def get(index: Int): T = {
@ -231,7 +231,7 @@ trait PersistentVector[T] extends IndexedSeq[T] with Transactional with Committa
}
override def slice(start: Int, finish: Int): IndexedSeq[T] = slice(Some(start), Some(finish))
def slice(start: Option[Int], finish: Option[Int], count: Int = 0): IndexedSeq[T] = {
val buffer = new scala.collection.mutable.ArrayBuffer[T]
storage.getVectorStorageRangeFor(uuid, start, finish, count).foreach(buffer.append(_))
@ -277,21 +277,21 @@ trait PersistentVector[T] extends IndexedSeq[T] with Transactional with Committa
*/
trait PersistentRef[T] extends Transactional with Committable with Abortable {
protected val ref = new TransactionalRef[T]
val storage: RefStorageBackend[T]
def commit = if (ref.isDefined) {
storage.insertRefStorageFor(uuid, ref.get.get)
ref.swap(null.asInstanceOf[T])
ref.swap(null.asInstanceOf[T])
}
def abort = ref.swap(null.asInstanceOf[T])
def abort = ref.swap(null.asInstanceOf[T])
def swap(elem: T) = {
register
ref.swap(elem)
}
def get: Option[T] = if (ref.isDefined) ref.get else storage.getRefStorageFor(uuid)
def isDefined: Boolean = ref.isDefined || storage.getRefStorageFor(uuid).isDefined
@ -309,7 +309,7 @@ trait PersistentRef[T] extends Transactional with Committable with Abortable {
}
/**
* Implementation of <tt>PersistentQueue</tt> for every concrete
* Implementation of <tt>PersistentQueue</tt> for every concrete
* storage will have the same workflow. This abstracts the workflow.
* <p/>
* Enqueue is simpler, we just have to record the operation in a local
@ -410,13 +410,13 @@ trait PersistentQueue[A] extends scala.collection.mutable.Queue[A]
}
}
override def clear = {
override def clear = {
register
shouldClearOnCommit.swap(true)
localQ.swap(Queue.empty)
pickMeForDQ.swap(0)
}
override def size: Int = try {
storage.size(uuid) + localQ.get.get.length
} catch { case e: Exception => 0 }
@ -424,11 +424,11 @@ trait PersistentQueue[A] extends scala.collection.mutable.Queue[A]
override def isEmpty: Boolean =
size == 0
override def +=(elem: A) = {
override def +=(elem: A) = {
enqueue(elem)
this
}
def ++=(elems: Iterator[A]) = {
def ++=(elems: Iterator[A]) = {
enqueue(elems.toList: _*)
this
}
@ -450,7 +450,7 @@ trait PersistentQueue[A] extends scala.collection.mutable.Queue[A]
* Implements a template for a concrete persistent transactional sorted set based storage.
* <p/>
* Sorting is done based on a <i>zscore</i>. But the computation of zscore has been kept
* outside the abstraction.
* outside the abstraction.
* <p/>
* zscore can be implemented in a variety of ways by the calling class:
* <pre>
@ -467,7 +467,7 @@ trait PersistentQueue[A] extends scala.collection.mutable.Queue[A]
* class Foo {
* //..
* }
*
*
* implicit def Foo2Scorable(foo: Foo): ZScorable = new ZScorable {
* def toZScore = {
* //..
@ -526,7 +526,7 @@ trait PersistentSortedSet[A] extends Transactional with Committable with Abortab
}
}
}
def size: Int = newElems.size + storage.zcard(uuid) - removedElems.size
def zscore(elem: A): Float = {
@ -541,9 +541,9 @@ trait PersistentSortedSet[A] extends Transactional with Committable with Abortab
implicit def order(x: (A, Float)) = new Ordered[(A, Float)] {
def compare(that: (A, Float)) = x._2 compare that._2
}
implicit def ordering = new scala.math.Ordering[(A,Float)] {
def compare(x: (A, Float),y : (A,Float)) = x._2 compare y._2
def compare(x: (A, Float),y : (A,Float)) = x._2 compare y._2
}
@ -556,7 +556,7 @@ trait PersistentSortedSet[A] extends Transactional with Committable with Abortab
// -1 means the last element, -2 means the second last
val s = if (start < 0) start + l else start
val e =
val e =
if (end < 0) end + l
else if (end >= l) (l - 1)
else end

View file

@ -26,7 +26,7 @@ trait VectorStorageBackend[T] extends StorageBackend {
def updateVectorStorageEntryFor(name: String, index: Int, elem: T)
def getVectorStorageEntryFor(name: String, index: Int): T
def getVectorStorageRangeFor(name: String, start: Option[Int], finish: Option[Int], count: Int): List[T]
def getVectorStorageSizeFor(name: String): Int
def getVectorStorageSizeFor(name: String): Int
}
// for Ref
@ -47,17 +47,17 @@ trait RefStorageBackend[T] extends StorageBackend {
trait QueueStorageBackend[T] extends StorageBackend {
// add to the end of the queue
def enqueue(name: String, item: T): Boolean
// pop from the front of the queue
def dequeue(name: String): Option[T]
// get the size of the queue
def size(name: String): Int
// return an array of items currently stored in the queue
// start is the item to begin, count is how many items to return
def peek(name: String, start: Int, count: Int): List[T]
// completely delete the queue
def remove(name: String): Boolean
}
@ -65,19 +65,19 @@ trait QueueStorageBackend[T] extends StorageBackend {
trait SortedSetStorageBackend[T] extends StorageBackend {
// add item to sorted set identified by name
def zadd(name: String, zscore: String, item: T): Boolean
// remove item from sorted set identified by name
def zrem(name: String, item: T): Boolean
// cardinality of the set identified by name
def zcard(name: String): Int
// zscore of the item from sorted set identified by name
def zscore(name: String, item: T): Option[Float]
// zrange from the sorted set identified by name
def zrange(name: String, start: Int, end: Int): List[T]
// zrange with score from the sorted set identified by name
def zrangeWithScore(name: String, start: Int, end: Int): List[(T, Float)]
def zrangeWithScore(name: String, start: Int, end: Int): List[(T, Float)]
}

View file

@ -280,7 +280,7 @@ private[akka] object MongoStorageBackend extends
}
val currentList = dbobj.get(VALUE).asInstanceOf[JArrayList[AnyRef]]
currentList.set(index, serializer.out(elem))
coll.update(q,
coll.update(q,
new BasicDBObject().append(KEY, name).append(VALUE, currentList))
}

View file

@ -87,7 +87,7 @@ object World {
pingEvery(evaporator, EvapMillis)
}
private def pingEvery(actor: ActorRef, millis: Long) =
private def pingEvery(actor: ActorRef, millis: Long) =
Scheduler.schedule(actor, "ping", Config.StartDelay, millis, TimeUnit.MILLISECONDS)
}

View file

@ -26,4 +26,4 @@ object Application1 {
println(actor2 !! Message("actor2"))
}
}
}

View file

@ -19,4 +19,4 @@ object Application2 {
RemoteNode.start("localhost", 7777)
RemoteNode.register("remote2", actorOf[RemoteActor2].start)
}
}
}

View file

@ -74,4 +74,4 @@ class CustomRouteBuilder extends RouteBuilder {
}
})
}
}
}

View file

@ -27,17 +27,17 @@ First we need to download, build and start up Redis:
4. Run: ./redis-server.
For details on how to set up Redis server have a look at http://code.google.com/p/redis/wiki/QuickStart.
Then to run the sample:
Then to run the sample:
1. Fire up two shells. For each of them:
- Step down into to the root of the Akka distribution.
- Set 'export AKKA_HOME=<root of distribution>.
- Run 'sbt console' to start up a REPL (interpreter).
2. In the first REPL you get execute:
2. In the first REPL you get execute:
- scala> import sample.chat._
- scala> import se.scalablesolutions.akka.actor.Actor._
- scala> val chatService = actorOf[ChatService].start
3. In the second REPL you get execute:
3. In the second REPL you get execute:
- scala> import sample.chat._
- scala> Runner.run
4. See the chat simulation run.
@ -60,12 +60,12 @@ case class ChatMessage(from: String, message: String) extends Event
/**
* Chat client.
*/
class ChatClient(val name: String) {
class ChatClient(val name: String) {
val chat = RemoteClient.actorFor("chat:service", "localhost", 9999)
def login = chat ! Login(name)
def logout = chat ! Logout(name)
def post(message: String) = chat ! ChatMessage(name, name + ": " + message)
def login = chat ! Login(name)
def logout = chat ! Logout(name)
def post(message: String) = chat ! ChatMessage(name, name + ": " + message)
def chatLog: ChatLog = (chat !! GetChatLog(name)).getOrElse(throw new Exception("Couldn't get the chat log from ChatServer"))
}
@ -75,15 +75,15 @@ class ChatClient(val name: String) {
class Session(user: String, storage: ActorRef) extends Actor {
private val loginTime = System.currentTimeMillis
private var userLog: List[String] = Nil
log.info("New session for user [%s] has been created at [%s]", user, loginTime)
def receive = {
case msg @ ChatMessage(from, message) =>
case msg @ ChatMessage(from, message) =>
userLog ::= message
storage ! msg
case msg @ GetChatLog(_) =>
case msg @ GetChatLog(_) =>
storage forward msg
}
}
@ -97,24 +97,24 @@ trait ChatStorage extends Actor
* Redis-backed chat storage implementation.
*/
class RedisChatStorage extends ChatStorage {
self.lifeCycle = Some(LifeCycle(Permanent))
self.lifeCycle = Some(LifeCycle(Permanent))
val CHAT_LOG = "akka.chat.log"
private var chatLog = atomic { RedisStorage.getVector(CHAT_LOG) }
log.info("Redis-based chat storage is starting up...")
def receive = {
case msg @ ChatMessage(from, message) =>
case msg @ ChatMessage(from, message) =>
log.debug("New chat message [%s]", message)
atomic { chatLog + message.getBytes("UTF-8") }
case GetChatLog(_) =>
case GetChatLog(_) =>
val messageList = atomic { chatLog.map(bytes => new String(bytes, "UTF-8")).toList }
self.reply(ChatLog(messageList))
}
override def postRestart(reason: Throwable) = chatLog = RedisStorage.getVector(CHAT_LOG)
override def postRestart(reason: Throwable) = chatLog = RedisStorage.getVector(CHAT_LOG)
}
/**
@ -122,27 +122,27 @@ class RedisChatStorage extends ChatStorage {
* <p/>
* Uses self-type annotation (this: Actor =>) to declare that it needs to be mixed in with an Actor.
*/
trait SessionManagement { this: Actor =>
trait SessionManagement { this: Actor =>
val storage: ActorRef // needs someone to provide the ChatStorage
val sessions = new HashMap[String, ActorRef]
protected def sessionManagement: Receive = {
case Login(username) =>
case Login(username) =>
log.info("User [%s] has logged in", username)
val session = actorOf(new Session(username, storage))
session.start
sessions += (username -> session)
case Logout(username) =>
case Logout(username) =>
log.info("User [%s] has logged out", username)
val session = sessions(username)
session.stop
sessions -= username
}
protected def shutdownSessions =
sessions.foreach { case (_, session) => session.stop }
sessions -= username
}
protected def shutdownSessions =
sessions.foreach { case (_, session) => session.stop }
}
/**
@ -152,7 +152,7 @@ trait SessionManagement { this: Actor =>
*/
trait ChatManagement { this: Actor =>
val sessions: HashMap[String, ActorRef] // needs someone to provide the Session map
protected def chatManagement: Receive = {
case msg @ ChatMessage(from, _) => sessions(from) ! msg
case msg @ GetChatLog(from) => sessions(from) forward msg
@ -172,20 +172,20 @@ trait RedisChatStorageFactory { this: Actor =>
trait ChatServer extends Actor {
self.faultHandler = Some(OneForOneStrategy(5, 5000))
self.trapExit = List(classOf[Exception])
val storage: ActorRef
log.info("Chat server is starting up...")
// actor message handler
def receive = sessionManagement orElse chatManagement
// abstract methods to be defined somewhere else
protected def chatManagement: Receive
protected def sessionManagement: Receive
protected def sessionManagement: Receive
protected def shutdownSessions: Unit
override def shutdown = {
override def shutdown = {
log.info("Chat server is shutting down...")
shutdownSessions
self.unlink(storage)
@ -200,10 +200,10 @@ trait ChatServer extends Actor {
* val chatService = Actor.actorOf[ChatService].start
* </pre>
*/
class ChatService extends
ChatServer with
SessionManagement with
ChatManagement with
class ChatService extends
ChatServer with
SessionManagement with
ChatManagement with
RedisChatStorageFactory {
override def init = {
RemoteNode.start("localhost", 9999)
@ -217,7 +217,7 @@ class ChatService extends
object Runner {
def run = {
val client = new ChatClient("jonas")
client.login
client.post("Hi there")
@ -228,4 +228,4 @@ object Runner {
client.logout
}
}
}

View file

@ -23,7 +23,7 @@ class Boot extends Logging {
def boot {
// where to search snippet
LiftRules.addToPackages("sample.lift")
LiftRules.httpAuthProtectedResource.prepend {
case (Req("liftcount" :: Nil, _, _)) => Full(AuthRole("admin"))
}
@ -35,9 +35,9 @@ class Boot extends Logging {
true
}
}
LiftRules.passNotFoundToChain = true
val factory = SupervisorFactory(
SupervisorConfig(
RestartStrategy(OneForOne, 3, 100, List(classOf[Exception])),
@ -49,7 +49,7 @@ class Boot extends Logging {
LifeCycle(Permanent)) ::
Nil))
factory.newInstance.start
// Build SiteMap
// val entries = Menu(Loc("Home", List("index"), "Home")) :: Nil
// LiftRules.setSiteMap(SiteMap(entries:_*))

View file

@ -13,4 +13,4 @@ object LiftConsole {
exit(0)
}
}
*/
*/

View file

@ -10,7 +10,7 @@ import se.scalablesolutions.akka.actor.Actor._
/**
* Sample Akka application for Redis PubSub
*
*
* Prerequisite: Need Redis Server running (the version that supports pubsub)
* <pre>
* 1. Download redis from http://github.com/antirez/redis
@ -65,7 +65,7 @@ object Sub {
val r = new RedisClient("localhost", 6379)
val s = actorOf(new Subscriber(r))
s.start
s ! Register(callback)
s ! Register(callback)
def sub(channels: String*) = {
s ! Subscribe(channels.toArray)
@ -78,29 +78,29 @@ object Sub {
def callback(pubsub: PubSubMessage) = pubsub match {
case S(channel, no) => println("subscribed to " + channel + " and count = " + no)
case U(channel, no) => println("unsubscribed from " + channel + " and count = " + no)
case M(channel, msg) =>
case M(channel, msg) =>
msg match {
// exit will unsubscribe from all channels and stop subscription service
case "exit" =>
case "exit" =>
println("unsubscribe all ..")
r.unsubscribe
// message "+x" will subscribe to channel x
case x if x startsWith "+" =>
case x if x startsWith "+" =>
val s: Seq[Char] = x
s match {
case Seq('+', rest @ _*) => r.subscribe(rest.toString){ m => }
}
// message "-x" will unsubscribe from channel x
case x if x startsWith "-" =>
case x if x startsWith "-" =>
val s: Seq[Char] = x
s match {
case Seq('-', rest @ _*) => r.unsubscribe(rest.toString)
}
// other message receive
case x =>
case x =>
println("received message on channel " + channel + " as : " + x)
}
}

View file

@ -11,7 +11,7 @@ import se.scalablesolutions.akka.util.Logging
class RemoteHelloWorldActor extends RemoteActor("localhost", 9999) {
def receive = {
case "Hello" =>
case "Hello" =>
log.info("Received 'Hello'")
self.reply("World")
}
@ -27,7 +27,7 @@ object ClientManagedRemoteActorServer extends Logging {
}
object ClientManagedRemoteActorClient extends Logging {
def run = {
val actor = actorOf[RemoteHelloWorldActor].start
log.info("Remote actor created, moved to the server")

View file

@ -11,7 +11,7 @@ import se.scalablesolutions.akka.util.Logging
class HelloWorldActor extends Actor {
def receive = {
case "Hello" =>
case "Hello" =>
log.info("Received 'Hello'")
self.reply("World")
}
@ -30,7 +30,7 @@ object ServerManagedRemoteActorServer extends Logging {
}
object ServerManagedRemoteActorClient extends Logging {
def run = {
val actor = RemoteClient.actorFor("hello-service", "localhost", 9999)
log.info("Remote client created")

View file

@ -158,7 +158,7 @@ class Chat {
}
object ChatActor {
case class ChatMsg(val who: String, val what: String, val msg: String)
case class ChatMsg(val who: String, val what: String, val msg: String)
}
class ChatActor extends Actor with Logging {

View file

@ -147,4 +147,4 @@ class SecureTickActor extends Transactor with Logging {
self.reply(new Integer(0))
}
}
}
}

View file

@ -1,10 +1,10 @@
####################
# Akka Config File #
####################
# This file has all the default settings, so all these could be removed with no visible effect.
# Modify as needed.
<log>
filename = "./logs/akka.log"
roll = "daily" # Options: never, hourly, daily, sunday/monday/...
@ -13,14 +13,14 @@
# syslog_host = ""
# syslog_server_name = ""
</log>
<akka>
version = "0.9"
# FQN (Fully Qualified Name) to the class doing initial active object/actor
# supervisor bootstrap, should be defined in default constructor
boot = ["sample.camel.Boot",
"sample.rest.java.Boot",
"sample.rest.java.Boot",
"sample.rest.scala.Boot",
"sample.security.Boot"]
@ -41,10 +41,10 @@
jta-aware = off # 'on' means that if there JTA Transaction Manager available then the STM will
# begin (or join), commit or rollback the JTA transaction. Default is 'off'.
</stm>
<jta>
provider = "from-jndi" # Options: "from-jndi" (means that Akka will try to detect a TransactionManager in the JNDI)
# "atomikos" (means that Akka will use the Atomikos based JTA impl in 'akka-jta',
provider = "from-jndi" # Options: "from-jndi" (means that Akka will try to detect a TransactionManager in the JNDI)
# "atomikos" (means that Akka will use the Atomikos based JTA impl in 'akka-jta',
# e.g. you need the akka-jta JARs on classpath).
timeout = 60000
</jta>
@ -56,7 +56,7 @@
filters = ["se.scalablesolutions.akka.security.AkkaSecurityFilterFactory"] # List with all jersey filters to use
resource_packages = ["sample.rest.scala","sample.rest.java","sample.security"] # List with all resource packages for your Jersey services
authenticator = "sample.security.BasicAuthenticationService" # The authentication service to use. Need to be overridden (uses sample now)
#IF you are using a KerberosAuthenticationActor
# <kerberos>
# servicePrincipal = "HTTP/localhost@EXAMPLE.COM"
@ -75,8 +75,8 @@
name = "default" # The name of the cluster
serializer = "se.scalablesolutions.akka.serialization.Serializer$Java$" # FQN of the serializer class
</cluster>
<server>
<server>
service = on
hostname = "localhost"
port = 9999
@ -88,14 +88,14 @@
read-timeout = 10000 # in millis (10 sec default)
</client>
</remote>
<storage>
<cassandra>
hostname = "127.0.0.1" # IP address or hostname of one of the Cassandra cluster's seeds
port = 9160
consistency-level = "QUORUM" # Options: ZERO, ONE, QUORUM, DCQUORUM, DCQUORUMSYNC, ALL, ANY
</cassandra>
<mongodb>
hostname = "127.0.0.1" # IP address or hostname of the MongoDB DB instance
port = 27017

View file

@ -1,20 +1,20 @@
# for production, you should probably set the root to INFO
# and the pattern to %c instead of %l. (%l is slower.)
# output messages into a rolling log file as well as stdout
log4j.rootLogger=INFO,stdout,R
# stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.SimpleLayout
# rolling log file ("system.log
log4j.appender.R=org.apache.log4j.DailyRollingFileAppender
log4j.appender.R.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.R.layout=org.apache.log4j.PatternLayout
log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
# Edit the next line to point to your logs directory
log4j.appender.R.File=./logs/akka.log
log4j.logger.org.atmosphere=DEBUG
# for production, you should probably set the root to INFO
# and the pattern to %c instead of %l. (%l is slower.)
# output messages into a rolling log file as well as stdout
log4j.rootLogger=INFO,stdout,R
# stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.SimpleLayout
# rolling log file ("system.log
log4j.appender.R=org.apache.log4j.DailyRollingFileAppender
log4j.appender.R.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.R.layout=org.apache.log4j.PatternLayout
log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
# Edit the next line to point to your logs directory
log4j.appender.R.File=./logs/akka.log
log4j.logger.org.atmosphere=DEBUG

View file

@ -21,15 +21,15 @@
<!-- Basic Configuration -->
<!--======================================================================-->
<!--
<!--
~ The name of this cluster. This is mainly used to prevent machines in
~ one logical cluster from joining another.
-->
<ClusterName>akka</ClusterName>
<!--
~ Turn on to make new [non-seed] nodes automatically migrate the right data
~ to themselves. (If no InitialToken is specified, they will pick one
~ Turn on to make new [non-seed] nodes automatically migrate the right data
~ to themselves. (If no InitialToken is specified, they will pick one
~ such that they will get half the range of the most-loaded node.)
~ If a node starts up without bootstrapping, it will mark itself bootstrapped
~ so that you can't subsequently accidently bootstrap a node with
@ -66,11 +66,11 @@
~ and LongType. You can also specify the fully-qualified class
~ name to a class of your choice extending
~ org.apache.cassandra.db.marshal.AbstractType.
~
~
~ SuperColumns have a similar CompareSubcolumnsWith attribute.
~
~
~ BytesType: Simple sort by byte value. No validation is performed.
~ AsciiType: Like BytesType, but validates that the input can be
~ AsciiType: Like BytesType, but validates that the input can be
~ parsed as US-ASCII.
~ UTF8Type: A string encoded as UTF8
~ LongType: A 64bit long
@ -82,7 +82,7 @@
~
~ An optional `Comment` attribute may be used to attach additional
~ human-readable information about the column family to its definition.
~
~
~ The optional KeysCached attribute specifies
~ the number of keys per sstable whose locations we keep in
~ memory in "mostly LRU" order. (JUST the key locations, NOT any
@ -94,25 +94,25 @@
~ whose entire contents we cache in memory. Do not use this on
~ ColumnFamilies with large rows, or ColumnFamilies with high write:read
~ ratios. Specify a fraction (value less than 1), a percentage (ending in
~ a % sign) or an absolute number of rows to cache.
~ a % sign) or an absolute number of rows to cache.
~ RowsCached defaults to 0, i.e., row cache is off by default.
~
~ Remember, when using caches as a percentage, they WILL grow with
~ your data set!
-->
<ColumnFamily Name="map"
CompareWith="UTF8Type"
<ColumnFamily Name="map"
CompareWith="UTF8Type"
KeysCached="100%" />
<!-- FIXME: change vector to a super column -->
<ColumnFamily Name="vector"
CompareWith="UTF8Type"
<ColumnFamily Name="vector"
CompareWith="UTF8Type"
KeysCached="100%" />
<ColumnFamily Name="ref"
CompareWith="UTF8Type"
<ColumnFamily Name="ref"
CompareWith="UTF8Type"
KeysCached="100%" />
<!--ColumnFamily Name="Standard1" CompareWith="BytesType"/>
<ColumnFamily Name="Standard2"
<ColumnFamily Name="Standard2"
CompareWith="UTF8Type"
KeysCached="100%"/>
<ColumnFamily Name="StandardByUUID1" CompareWith="TimeUUIDType" />
@ -150,7 +150,7 @@
~ and PropertyFileEndPointSnitch is available in contrib/.
-->
<EndPointSnitch>org.apache.cassandra.locator.EndPointSnitch</EndPointSnitch>
</Keyspace>
</Keyspaces>
@ -158,7 +158,7 @@
~ Authenticator: any IAuthenticator may be used, including your own as long
~ as it is on the classpath. Out of the box, Cassandra provides
~ org.apache.cassandra.auth.AllowAllAuthenticator and,
~ org.apache.cassandra.auth.SimpleAuthenticator
~ org.apache.cassandra.auth.SimpleAuthenticator
~ (SimpleAuthenticator uses access.properties and passwd.properties by
~ default).
~
@ -188,7 +188,7 @@
~ are sent to the node with the "closest" token, so distributing your
~ tokens equally along the key distribution space will spread keys
~ evenly across your cluster.) This setting is only checked the first
~ time a node is started.
~ time a node is started.
~ This can also be useful with RandomPartitioner to force equal spacing
~ of tokens around the hash space, especially for clusters with a small
@ -227,9 +227,9 @@
<!-- Local hosts and ports -->
<!--
<!--
~ Address to bind to and tell other nodes to connect to. You _must_
~ change this if you want multiple nodes to be able to communicate!
~ change this if you want multiple nodes to be able to communicate!
~
~ Leaving it blank leaves it up to InetAddress.getLocalHost(). This
~ will always do the Right Thing *if* the node is properly configured
@ -251,9 +251,9 @@
<ThriftAddress>localhost</ThriftAddress>
<!-- Thrift RPC port (the port clients connect to). -->
<ThriftPort>9160</ThriftPort>
<!--
<!--
~ Whether or not to use a framed transport for Thrift. If this option
~ is set to true then you must also use a framed transport on the
~ is set to true then you must also use a framed transport on the
~ client-side, (framed and non-framed transports are not compatible).
-->
<ThriftFramedTransport>false</ThriftFramedTransport>
@ -285,16 +285,16 @@
<!--
~ Buffer size to use when performing contiguous column slices. Increase
~ this to the size of the column slices you typically perform.
~ (Name-based queries are performed with a buffer size of
~ this to the size of the column slices you typically perform.
~ (Name-based queries are performed with a buffer size of
~ ColumnIndexSizeInKB.)
-->
<SlicedBufferSizeInKB>64</SlicedBufferSizeInKB>
<!--
~ Buffer size to use when flushing memtables to disk. (Only one
~ Buffer size to use when flushing memtables to disk. (Only one
~ memtable is ever flushed at a time.) Increase (decrease) the index
~ buffer size relative to the data buffer if you have few (many)
~ buffer size relative to the data buffer if you have few (many)
~ columns per key. Bigger is only better _if_ your memtables get large
~ enough to use the space. (Check in your data directory after your
~ app has been running long enough.) -->
@ -314,7 +314,7 @@
<!--
~ Flush memtable after this much data has been inserted, including
~ overwritten data. There is one memtable per column family, and
~ overwritten data. There is one memtable per column family, and
~ this threshold is based solely on the amount of data stored, not
~ actual heap memory usage (there is some overhead in indexing the
~ columns).
@ -379,7 +379,7 @@
~ individually). Reasonable values range from a minimal 0.1 to 10 or
~ even more if throughput matters more than latency.
-->
<!-- <CommitLogSyncBatchWindowInMS>1</CommitLogSyncBatchWindowInMS> -->
<!-- <CommitLogSyncBatchWindowInMS>1</CommitLogSyncBatchWindowInMS> -->
<!--
~ Time to wait before garbage-collection deletion markers. Set this to

View file

@ -5,7 +5,7 @@
import sbt._
import sbt.CompileOrder._
import spde._
import java.util.jar.Attributes
import java.util.jar.Attributes.Name._
import java.io.File
@ -26,11 +26,11 @@ class AkkaParent(info: ProjectInfo) extends DefaultProject(info) {
lazy val distPath = info.projectPath / "dist"
override def compileOptions = super.compileOptions ++
Seq("-deprecation",
"-Xmigration",
"-Xcheckinit",
"-Xstrict-warnings",
"-Xwarninit",
Seq("-deprecation",
"-Xmigration",
"-Xcheckinit",
"-Xstrict-warnings",
"-Xwarninit",
"-encoding", "utf8")
.map(x => CompileOption(x))
@ -151,21 +151,21 @@ class AkkaParent(info: ProjectInfo) extends DefaultProject(info) {
<distribution>repo</distribution>
</license>
</licenses>
// publish to local mvn
import Process._
lazy val publishLocalMvn = runMvnInstall
lazy val publishLocalMvn = runMvnInstall
def runMvnInstall = task {
for (absPath <- akkaArtifacts.getPaths) {
val artifactRE = """(.*)/dist/(.*)-(.*).jar""".r
val artifactRE(path, artifactId, artifactVersion) = absPath
val command = "mvn install:install-file" +
val artifactRE(path, artifactId, artifactVersion) = absPath
val command = "mvn install:install-file" +
" -Dfile=" + absPath +
" -DgroupId=se.scalablesolutions.akka" +
" -DartifactId=" + artifactId +
" -DgroupId=se.scalablesolutions.akka" +
" -DartifactId=" + artifactId +
" -Dversion=" + version +
" -Dpackaging=jar -DgeneratePom=true"
command ! log
command ! log
}
None
} dependsOn(dist) describedAs("Run mvn install for artifacts in dist.")
@ -192,7 +192,7 @@ class AkkaParent(info: ProjectInfo) extends DefaultProject(info) {
val protobuf = "com.google.protobuf" % "protobuf-java" % "2.3.0" % "compile"
val multiverse = "org.multiverse" % "multiverse-alpha" % MULTIVERSE_VERSION % "compile"
val jgroups = "jgroups" % "jgroups" % "2.9.0.GA" % "compile"
// testing
val scalatest = "org.scalatest" % "scalatest" % SCALATEST_VERSION % "test"
val junit = "junit" % "junit" % "4.5" % "test"
@ -220,7 +220,7 @@ class AkkaParent(info: ProjectInfo) extends DefaultProject(info) {
val annotation = "javax.annotation" % "jsr250-api" % "1.0" % "compile"
val lift_common = "net.liftweb" % "lift-common" % LIFT_VERSION % "compile"
val lift_util = "net.liftweb" % "lift-util" % LIFT_VERSION % "compile"
// testing
val scalatest = "org.scalatest" % "scalatest" % SCALATEST_VERSION % "test"
val junit = "junit" % "junit" % "4.5" % "test"
@ -302,7 +302,7 @@ class AkkaParent(info: ProjectInfo) extends DefaultProject(info) {
val junit = "junit" % "junit" % "4.5" % "test"
val jmock = "org.jmock" % "jmock" % "2.4.0" % "test"
}
// ================= EXAMPLES ==================
class AkkaSampleAntsProject(info: ProjectInfo) extends DefaultSpdeProject(info) {
val scalaToolsSnapshots = ScalaToolsSnapshots
@ -389,9 +389,9 @@ class AkkaParent(info: ProjectInfo) extends DefaultProject(info) {
!jar.toString.endsWith("scala-library-2.7.7.jar")
)
}
def akkaArtifacts = descendents(info.projectPath / "dist", "*" + buildScalaVersion + "-" + version + ".jar")
def akkaArtifacts = descendents(info.projectPath / "dist", "*" + buildScalaVersion + "-" + version + ".jar")
// ------------------------------------------------------------
class AkkaDefaultProject(info: ProjectInfo, val deployPath: Path) extends DefaultProject(info) with DeployProject
@ -402,7 +402,7 @@ class AkkaParent(info: ProjectInfo) extends DefaultProject(info) {
lazy val dist = distAction
def distAction = deployTask(jarPath, packageDocsJar, packageSrcJar, deployPath, true, true, true) dependsOn(
`package`, packageDocs, packageSrc) describedAs("Deploying")
def deployTask(jar: Path, docs: Path, src: Path, toDir: Path,
def deployTask(jar: Path, docs: Path, src: Path, toDir: Path,
genJar: Boolean, genDocs: Boolean, genSource: Boolean) = task {
gen(jar, toDir, genJar, "Deploying bits") orElse
gen(docs, toDir, genDocs, "Deploying docs") orElse

View file

@ -5,4 +5,4 @@ class Plugins(info: ProjectInfo) extends PluginDefinition(info) {
val spdeSbt = "us.technically.spde" % "spde-sbt-plugin" % "0.4.1"
// val repo = "GH-pages repo" at "http://mpeltonen.github.com/maven/"
// val idea = "com.github.mpeltonen" % "sbt-idea-plugin" % "0.1-SNAPSHOT"
}
}

View file

@ -1,2 +1,2 @@
#!/bin/sh
sed -i '' 's/[[:space:]]*$//g' **/*.*
sed -i '' 's/[[:space:]]*$//g' **/*.scala