diff --git a/akka-core/src/main/scala/stm/HashTrie.scala b/akka-core/src/main/scala/stm/HashTrie.scala
index b26b31bba1..b1cd992428 100644
--- a/akka-core/src/main/scala/stm/HashTrie.scala
+++ b/akka-core/src/main/scala/stm/HashTrie.scala
@@ -218,7 +218,7 @@ private[stm] class BitmappedNode[K, +V](shift: Int)(table: Array[Node[K, V]], bi
new BitmappedNode(shift)(newTable, bits)
}
} else {
- val newTable = new Array[Node[K, A]](Math.max(table.length, i + 1))
+ val newTable = new Array[Node[K, A]](math.max(table.length, i + 1))
Array.copy(table, 0, newTable, 0, table.length)
newTable(i) = new LeafNode(key, hash, value)
@@ -244,7 +244,7 @@ private[stm] class BitmappedNode[K, +V](shift: Int)(table: Array[Node[K, V]], bi
} else if (node.isInstanceOf[EmptyNode[_]]) {
if (size == 1) new EmptyNode[K] else {
val adjustedBits = bits ^ mask
- val log = Math.log(adjustedBits) / Math.log(2)
+ val log = math.log(adjustedBits) / math.log(2)
if (log.toInt.toDouble == log) { // last one
table(log.toInt)
@@ -286,7 +286,7 @@ private[stm] class BitmappedNode[K, +V](shift: Int)(table: Array[Node[K, V]], bi
private[stm] object BitmappedNode {
def apply[K, V](shift: Int)(node: SingleNode[K, V], key: K, hash: Int, value: V) = {
- val table = new Array[Node[K, V]](Math.max((hash >>> shift) & 0x01f, (node.hash >>> shift) & 0x01f) + 1)
+ val table = new Array[Node[K, V]](math.max((hash >>> shift) & 0x01f, (node.hash >>> shift) & 0x01f) + 1)
val preBits = {
val i = (node.hash >>> shift) & 0x01f
diff --git a/akka-core/src/main/scala/stm/Vector.scala b/akka-core/src/main/scala/stm/Vector.scala
index 3f1f866692..7d524cd2a8 100644
--- a/akka-core/src/main/scala/stm/Vector.scala
+++ b/akka-core/src/main/scala/stm/Vector.scala
@@ -276,7 +276,7 @@ class Vector[+T] private (val length: Int, shift: Int, root: Array[AnyRef], tail
var back = new Vector[(T, A)]
var i = 0
- val limit = Math.min(length, that.length)
+ val limit = math.min(length, that.length)
while (i < limit) {
back += (apply(i), that(i))
i += 1
diff --git a/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraSession.scala b/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraSession.scala
index 78527ca3e8..34ef7eca62 100644
--- a/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraSession.scala
+++ b/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraSession.scala
@@ -15,7 +15,7 @@ import se.scalablesolutions.akka.config.Config.config
import scala.collection.mutable.Map
import org.apache.cassandra.db.ColumnFamily
-import org.apache.cassandra.service._
+import org.apache.cassandra.thrift._
import org.apache.thrift.transport._
import org.apache.thrift.protocol._
@@ -31,7 +31,7 @@ trait CassandraSession extends Closeable with Flushable {
protected val keyspace: String
val obtainedAt: Long
- val consistencyLevel: Int
+ val consistencyLevel: ConsistencyLevel
val schema: JMap[String, JMap[String, String]]
/**
@@ -53,43 +53,46 @@ trait CassandraSession extends Closeable with Flushable {
def /(key: String, columnParent: ColumnParent, start: Array[Byte], end: Array[Byte], ascending: Boolean, count: Int): List[ColumnOrSuperColumn] =
/(key, columnParent, start, end, ascending, count, consistencyLevel)
- def /(key: String, columnParent: ColumnParent, start: Array[Byte], end: Array[Byte], ascending: Boolean, count: Int, consistencyLevel: Int): List[ColumnOrSuperColumn] =
- client.get_slice(keyspace, key, columnParent, new SlicePredicate(null, new SliceRange(start, end, ascending, count)), consistencyLevel).toList
+ def /(key: String, columnParent: ColumnParent, start: Array[Byte], end: Array[Byte], ascending: Boolean, count: Int, consistencyLevel: ConsistencyLevel): List[ColumnOrSuperColumn] = {
+ val slicePredicate = new SlicePredicate
+ slicePredicate.setSlice_range(new SliceRange(start, end, ascending, count))
+ client.get_slice(keyspace, key, columnParent, slicePredicate, consistencyLevel).toList
+ }
def /(key: String, columnParent: ColumnParent, slicePredicate: SlicePredicate): List[ColumnOrSuperColumn] =
client.get_slice(keyspace, key, columnParent, slicePredicate, consistencyLevel).toList
- def /(key: String, columnParent: ColumnParent, slicePredicate: SlicePredicate, consistencyLevel: Int): List[ColumnOrSuperColumn] =
+ def /(key: String, columnParent: ColumnParent, slicePredicate: SlicePredicate, consistencyLevel: ConsistencyLevel): List[ColumnOrSuperColumn] =
client.get_slice(keyspace, key, columnParent, slicePredicate, consistencyLevel).toList
def |(key: String, colPath: ColumnPath): Option[ColumnOrSuperColumn] =
|(key, colPath, consistencyLevel)
- def |(key: String, colPath: ColumnPath, consistencyLevel: Int): Option[ColumnOrSuperColumn] =
+ def |(key: String, colPath: ColumnPath, consistencyLevel: ConsistencyLevel): Option[ColumnOrSuperColumn] =
client.get(keyspace, key, colPath, consistencyLevel)
def |#(key: String, columnParent: ColumnParent): Int =
|#(key, columnParent, consistencyLevel)
- def |#(key: String, columnParent: ColumnParent, consistencyLevel: Int): Int =
+ def |#(key: String, columnParent: ColumnParent, consistencyLevel: ConsistencyLevel): Int =
client.get_count(keyspace, key, columnParent, consistencyLevel)
def ++|(key: String, colPath: ColumnPath, value: Array[Byte]): Unit =
++|(key, colPath, value, obtainedAt, consistencyLevel)
- def ++|(key: String, colPath: ColumnPath, value: Array[Byte], consistencyLevel: Int): Unit =
+ def ++|(key: String, colPath: ColumnPath, value: Array[Byte], consistencyLevel: ConsistencyLevel): Unit =
++|(key, colPath, value, obtainedAt, consistencyLevel)
def ++|(key: String, colPath: ColumnPath, value: Array[Byte], timestamp: Long): Unit =
++|(key, colPath, value, timestamp, consistencyLevel)
- def ++|(key: String, colPath: ColumnPath, value: Array[Byte], timestamp: Long, consistencyLevel: Int) =
+ def ++|(key: String, colPath: ColumnPath, value: Array[Byte], timestamp: Long, consistencyLevel: ConsistencyLevel) =
client.insert(keyspace, key, colPath, value, timestamp, consistencyLevel)
def ++|(key: String, batch: Map[String, List[ColumnOrSuperColumn]]): Unit =
++|(key, batch, consistencyLevel)
- def ++|(key: String, batch: Map[String, List[ColumnOrSuperColumn]], consistencyLevel: Int): Unit = {
+ def ++|(key: String, batch: Map[String, List[ColumnOrSuperColumn]], consistencyLevel: ConsistencyLevel): Unit = {
val jmap = new java.util.HashMap[String, JList[ColumnOrSuperColumn]]
for (entry <- batch; (key, value) = entry) jmap.put(key, new java.util.ArrayList(value))
client.batch_insert(keyspace, key, jmap, consistencyLevel)
@@ -98,7 +101,7 @@ trait CassandraSession extends Closeable with Flushable {
def --(key: String, columnPath: ColumnPath, timestamp: Long): Unit =
--(key, columnPath, timestamp, consistencyLevel)
- def --(key: String, columnPath: ColumnPath, timestamp: Long, consistencyLevel: Int): Unit =
+ def --(key: String, columnPath: ColumnPath, timestamp: Long, consistencyLevel: ConsistencyLevel): Unit =
client.remove(keyspace, key, columnPath, timestamp, consistencyLevel)
// ====================================
@@ -107,37 +110,37 @@ trait CassandraSession extends Closeable with Flushable {
def getSlice(key: String, columnParent: ColumnParent, start: Array[Byte], end: Array[Byte], ascending: Boolean, count: Int) = / (key, columnParent, start, end, ascending, count, consistencyLevel)
- def getSlice(key: String, columnParent: ColumnParent, start: Array[Byte], end: Array[Byte], ascending: Boolean, count: Int, consistencyLevel: Int) = / (key, columnParent, start, end, ascending, count, consistencyLevel)
+ def getSlice(key: String, columnParent: ColumnParent, start: Array[Byte], end: Array[Byte], ascending: Boolean, count: Int, consistencyLevel: ConsistencyLevel) = / (key, columnParent, start, end, ascending, count, consistencyLevel)
def getSlice(key: String, columnParent: ColumnParent, slicePredicate: SlicePredicate) = / (key, columnParent, slicePredicate)
- def getSlice(key: String, columnParent: ColumnParent, slicePredicate: SlicePredicate, consistencyLevel: Int) = / (key, columnParent, slicePredicate, consistencyLevel)
+ def getSlice(key: String, columnParent: ColumnParent, slicePredicate: SlicePredicate, consistencyLevel: ConsistencyLevel) = / (key, columnParent, slicePredicate, consistencyLevel)
def get(key: String, colPath: ColumnPath) = |(key, colPath)
- def get(key: String, colPath: ColumnPath, consistencyLevel: Int) = |(key, colPath, consistencyLevel)
+ def get(key: String, colPath: ColumnPath, consistencyLevel: ConsistencyLevel) = |(key, colPath, consistencyLevel)
def getCount(key: String, columnParent: ColumnParent)= |#(key, columnParent)
- def getCount(key: String, columnParent: ColumnParent, consistencyLevel: Int) = |#(key, columnParent, consistencyLevel)
+ def getCount(key: String, columnParent: ColumnParent, consistencyLevel: ConsistencyLevel) = |#(key, columnParent, consistencyLevel)
def insert(key: String, colPath: ColumnPath, value: Array[Byte]): Unit = ++|(key, colPath, value)
- def insert(key: String, colPath: ColumnPath, value: Array[Byte], consistencyLevel: Int): Unit = ++|(key, colPath, value, consistencyLevel)
+ def insert(key: String, colPath: ColumnPath, value: Array[Byte], consistencyLevel: ConsistencyLevel): Unit = ++|(key, colPath, value, consistencyLevel)
def insert(key: String, colPath: ColumnPath, value: Array[Byte], timestamp: Long): Unit = ++|(key, colPath, value, timestamp)
- def insert(key: String, colPath: ColumnPath, value: Array[Byte], timestamp: Long, consistencyLevel: Int) = ++|(key, colPath, value, timestamp, consistencyLevel)
+ def insert(key: String, colPath: ColumnPath, value: Array[Byte], timestamp: Long, consistencyLevel: ConsistencyLevel) = ++|(key, colPath, value, timestamp, consistencyLevel)
def insert(key: String, batch: Map[String, List[ColumnOrSuperColumn]]): Unit = ++|(key, batch)
- def insert(key: String, batch: Map[String, List[ColumnOrSuperColumn]], consistencyLevel: Int): Unit = ++|(key, batch, consistencyLevel)
+ def insert(key: String, batch: Map[String, List[ColumnOrSuperColumn]], consistencyLevel: ConsistencyLevel): Unit = ++|(key, batch, consistencyLevel)
def remove(key: String, columnPath: ColumnPath, timestamp: Long): Unit = --(key, columnPath, timestamp)
- def remove(key: String, columnPath: ColumnPath, timestamp: Long, consistencyLevel: Int): Unit = --(key, columnPath, timestamp, consistencyLevel)
+ def remove(key: String, columnPath: ColumnPath, timestamp: Long, consistencyLevel: ConsistencyLevel): Unit = --(key, columnPath, timestamp, consistencyLevel)
}
@@ -146,14 +149,14 @@ class CassandraSessionPool[T <: TTransport](
transportPool: Pool[T],
inputProtocol: Protocol,
outputProtocol: Protocol,
- consistency: Int) extends Closeable with Logging {
+ consistency: ConsistencyLevel) extends Closeable with Logging {
- def this(space: String, transportPool: Pool[T], ioProtocol: Protocol, consistency: Int) =
+ def this(space: String, transportPool: Pool[T], ioProtocol: Protocol, consistency: ConsistencyLevel) =
this (space, transportPool, ioProtocol, ioProtocol, consistency)
def newSession: CassandraSession = newSession(consistency)
- def newSession(consistencyLevel: Int): CassandraSession = {
+ def newSession(consistencyLevel: ConsistencyLevel): CassandraSession = {
val socket = transportPool.borrowObject
val cassandraClient = new Cassandra.Client(inputProtocol(socket), outputProtocol(socket))
val cassandraSchema = cassandraClient.describe_keyspace(space)
diff --git a/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorageBackend.scala b/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorageBackend.scala
index d380953159..420ca88429 100644
--- a/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorageBackend.scala
+++ b/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorageBackend.scala
@@ -10,7 +10,7 @@ import se.scalablesolutions.akka.util.Logging
import se.scalablesolutions.akka.util.Helpers._
import se.scalablesolutions.akka.config.Config.config
-import org.apache.cassandra.service._
+import org.apache.cassandra.thrift._
/**
* @author Jonas Bonér
@@ -23,23 +23,27 @@ private[akka] object CassandraStorageBackend extends
type ElementType = Array[Byte]
- val KEYSPACE = "akka"
- val MAP_COLUMN_PARENT = new ColumnParent("map", null)
- val VECTOR_COLUMN_PARENT = new ColumnParent("vector", null)
- val REF_COLUMN_PARENT = new ColumnParent("ref", null)
- val REF_KEY = "item".getBytes("UTF-8")
- val EMPTY_BYTE_ARRAY = new Array[Byte](0)
+ val KEYSPACE = "akka"
+ val MAP_COLUMN_PARENT = new ColumnParent("map")
+ val VECTOR_COLUMN_PARENT = new ColumnParent("vector")
+ val REF_COLUMN_PARENT = new ColumnParent("ref")
+ val REF_KEY = "item".getBytes("UTF-8")
+ val EMPTY_BYTE_ARRAY = new Array[Byte](0)
val CASSANDRA_SERVER_HOSTNAME = config.getString("akka.storage.cassandra.hostname", "127.0.0.1")
- val CASSANDRA_SERVER_PORT = config.getInt("akka.storage.cassandra.port", 9160)
+ val CASSANDRA_SERVER_PORT = config.getInt("akka.storage.cassandra.port", 9160)
val CONSISTENCY_LEVEL = {
config.getString("akka.storage.cassandra.consistency-level", "QUORUM") match {
- case "ZERO" => 0
- case "ONE" => 1
- case "QUORUM" => 2
- case "ALL" => 3
- case unknown => throw new IllegalArgumentException(
- "Cassandra consistency level [" + unknown + "] is not supported. Expected one of [ZERO, ONE, QUORUM, ALL]")
+ case "ZERO" => ConsistencyLevel.ZERO
+ case "ONE" => ConsistencyLevel.ONE
+ case "QUORUM" => ConsistencyLevel.QUORUM
+ case "DCQUORUM" => ConsistencyLevel.DCQUORUM
+ case "DCQUORUMSYNC" => ConsistencyLevel.DCQUORUMSYNC
+ case "ALL" => ConsistencyLevel.ALL
+ case "ANY" => ConsistencyLevel.ANY
+ case unknown => throw new IllegalArgumentException(
+ "Cassandra consistency level [" + unknown + "] is not supported." +
+ "\n\tExpected one of [ZERO, ONE, QUORUM, DCQUORUM, DCQUORUMSYNC, ALL, ANY] in the akka.conf configuration file.")
}
}
val IS_ASCENDING = true
@@ -58,9 +62,11 @@ private[akka] object CassandraStorageBackend extends
// ===============================================================
def insertRefStorageFor(name: String, element: Array[Byte]) = {
+ val columnPath = new ColumnPath(REF_COLUMN_PARENT.getColumn_family)
+ columnPath.setColumn(REF_KEY)
sessions.withSession {
_ ++| (name,
- new ColumnPath(REF_COLUMN_PARENT.getColumn_family, null, REF_KEY),
+ columnPath,
element,
System.currentTimeMillis,
CONSISTENCY_LEVEL)
@@ -68,9 +74,11 @@ private[akka] object CassandraStorageBackend extends
}
def getRefStorageFor(name: String): Option[Array[Byte]] = {
+ val columnPath = new ColumnPath(REF_COLUMN_PARENT.getColumn_family)
+ columnPath.setColumn(REF_KEY)
try {
val column: Option[ColumnOrSuperColumn] = sessions.withSession {
- _ | (name, new ColumnPath(REF_COLUMN_PARENT.getColumn_family, null, REF_KEY))
+ _ | (name, columnPath)
}
if (column.isDefined) Some(column.get.getColumn.value)
else None
@@ -86,9 +94,11 @@ private[akka] object CassandraStorageBackend extends
// ===============================================================
def insertVectorStorageEntryFor(name: String, element: Array[Byte]) = {
+ val columnPath = new ColumnPath(VECTOR_COLUMN_PARENT.getColumn_family)
+ columnPath.setColumn(intToBytes(getVectorStorageSizeFor(name)))
sessions.withSession {
_ ++| (name,
- new ColumnPath(VECTOR_COLUMN_PARENT.getColumn_family, null, intToBytes(getVectorStorageSizeFor(name))),
+ columnPath,
element,
System.currentTimeMillis,
CONSISTENCY_LEVEL)
@@ -100,9 +110,11 @@ private[akka] object CassandraStorageBackend extends
}
def updateVectorStorageEntryFor(name: String, index: Int, elem: Array[Byte]) = {
+ val columnPath = new ColumnPath(VECTOR_COLUMN_PARENT.getColumn_family)
+ columnPath.setColumn(intToBytes(index))
sessions.withSession {
_ ++| (name,
- new ColumnPath(VECTOR_COLUMN_PARENT.getColumn_family, null, intToBytes(index)),
+ columnPath,
elem,
System.currentTimeMillis,
CONSISTENCY_LEVEL)
@@ -110,8 +122,10 @@ private[akka] object CassandraStorageBackend extends
}
def getVectorStorageEntryFor(name: String, index: Int): Array[Byte] = {
+ val columnPath = new ColumnPath(VECTOR_COLUMN_PARENT.getColumn_family)
+ columnPath.setColumn(intToBytes(index))
val column: Option[ColumnOrSuperColumn] = sessions.withSession {
- _ | (name, new ColumnPath(VECTOR_COLUMN_PARENT.getColumn_family, null, intToBytes(index)))
+ _ | (name, columnPath)
}
if (column.isDefined) column.get.column.value
else throw new NoSuchElementException("No element for vector [" + name + "] and index [" + index + "]")
@@ -149,9 +163,11 @@ private[akka] object CassandraStorageBackend extends
// ===============================================================
def insertMapStorageEntryFor(name: String, key: Array[Byte], element: Array[Byte]) = {
+ val columnPath = new ColumnPath(MAP_COLUMN_PARENT.getColumn_family)
+ columnPath.setColumn(key)
sessions.withSession {
_ ++| (name,
- new ColumnPath(MAP_COLUMN_PARENT.getColumn_family, null, key),
+ columnPath,
element,
System.currentTimeMillis,
CONSISTENCY_LEVEL)
@@ -172,8 +188,10 @@ private[akka] object CassandraStorageBackend extends
def getMapStorageEntryFor(name: String, key: Array[Byte]): Option[Array[Byte]] = {
try {
+ val columnPath = new ColumnPath(MAP_COLUMN_PARENT.getColumn_family)
+ columnPath.setColumn(key)
val column: Option[ColumnOrSuperColumn] = sessions.withSession {
- _ | (name, new ColumnPath(MAP_COLUMN_PARENT.getColumn_family, null, key))
+ _ | (name, columnPath)
}
if (column.isDefined) Some(column.get.getColumn.value)
else None
@@ -206,9 +224,11 @@ private[akka] object CassandraStorageBackend extends
def removeMapStorageFor(name: String, key: Array[Byte]): Unit = {
val keyBytes = if (key eq null) null else key
+ val columnPath = new ColumnPath(MAP_COLUMN_PARENT.getColumn_family)
+ columnPath.setColumn(keyBytes)
sessions.withSession {
_ -- (name,
- new ColumnPath(MAP_COLUMN_PARENT.getColumn_family, null, keyBytes),
+ columnPath,
System.currentTimeMillis,
CONSISTENCY_LEVEL)
}
diff --git a/akka-persistence/akka-persistence-cassandra/src/test/scala/CassandraPersistentActorSpec.scala b/akka-persistence/akka-persistence-cassandra/src/test/scala/CassandraPersistentActorSpec.scala
index 0a16f9e286..6746fca529 100644
--- a/akka-persistence/akka-persistence-cassandra/src/test/scala/CassandraPersistentActorSpec.scala
+++ b/akka-persistence/akka-persistence-cassandra/src/test/scala/CassandraPersistentActorSpec.scala
@@ -105,7 +105,7 @@ class CassandraPersistentActorSpec extends JUnitSuite {
stateful.start
stateful !! SetVectorState("init") // set init state
stateful !! Success("testShouldNotRollbackStateForStatefulServerInCaseOfSuccess", "new state") // transactionrequired
- assertEquals(2, (stateful !! GetVectorSize).get)
+ assertEquals(2, (stateful !! GetVectorSize).get.asInstanceOf[java.lang.Integer].intValue)
}
@Test
@@ -119,7 +119,7 @@ class CassandraPersistentActorSpec extends JUnitSuite {
stateful !! Failure("testShouldRollbackStateForStatefulServerInCaseOfFailure", "new state", failer) // call failing transactionrequired method
fail("should have thrown an exception")
} catch {case e: RuntimeException => {}}
- assertEquals(1, (stateful !! GetVectorSize).get)
+ assertEquals(1, (stateful !! GetVectorSize).get.asInstanceOf[java.lang.Integer].intValue)
}
@Test
@@ -148,7 +148,7 @@ class CassandraPersistentActorSpec extends JUnitSuite {
}
}
-
+/*
import org.apache.cassandra.service.CassandraDaemon
object EmbeddedCassandraService {
@@ -171,3 +171,4 @@ object EmbeddedCassandraService {
def start: Unit = {}
}
+*/
\ No newline at end of file
diff --git a/config/akka-reference.conf b/config/akka-reference.conf
index 48f91b4d8c..b61bdd773f 100644
--- a/config/akka-reference.conf
+++ b/config/akka-reference.conf
@@ -8,7 +8,7 @@
filename = "./logs/akka.log"
roll = "daily" # Options: never, hourly, daily, sunday/monday/...
- level = "trace" # Options: fatal, critical, error, warning, info, debug, trace
+ level = "debug" # Options: fatal, critical, error, warning, info, debug, trace
console = on
# syslog_host = ""
# syslog_server_name = ""
@@ -89,7 +89,7 @@
hostname = "127.0.0.1" # IP address or hostname of one of the Cassandra cluster's seeds
port = 9160
- consistency-level = "QUORUM" # Options: ZERO, ONE, QUORUM, ALL
+ consistency-level = "QUORUM" # Options: ZERO, ONE, QUORUM, DCQUORUM, DCQUORUMSYNC, ALL, ANY
diff --git a/config/storage-conf.xml b/config/storage-conf.xml
index 8ffdce0a5f..06ba8007a2 100644
--- a/config/storage-conf.xml
+++ b/config/storage-conf.xml
@@ -55,54 +55,117 @@
-->
-
- 0.01
-
+
-
-
+
+
-
+
+
+
+ org.apache.cassandra.locator.RackUnawareStrategy
+
+
+ 1
+
+
+ org.apache.cassandra.locator.EndPointSnitch
+
+
+ org.apache.cassandra.auth.AllowAllAuthenticator
+
-
- org.apache.cassandra.locator.EndPointSnitch
-
-
- org.apache.cassandra.locator.RackUnawareStrategy
-
-
- 1
-
- 5000
+ 10000
128
@@ -198,10 +237,8 @@
~ address associated with the hostname (it might not be).
-->
localhost
-
+
7000
-
- 7001
+
+ auto
+
+
+ 512
+
- 64
+ 64
+
+ 256
- 0.1
+ 0.3
@@ -327,11 +389,4 @@
~ ten days.
-->
864000
-
-
- 256
-
diff --git a/embedded-repo/com/facebook/thrift/r917130/thrift-r917130.jar b/embedded-repo/com/facebook/thrift/r917130/thrift-r917130.jar
new file mode 100644
index 0000000000..896cdb2af8
Binary files /dev/null and b/embedded-repo/com/facebook/thrift/r917130/thrift-r917130.jar differ
diff --git a/embedded-repo/com/facebook/thrift/r917130/thrift-r917130.pom b/embedded-repo/com/facebook/thrift/r917130/thrift-r917130.pom
new file mode 100644
index 0000000000..23b4177b38
--- /dev/null
+++ b/embedded-repo/com/facebook/thrift/r917130/thrift-r917130.pom
@@ -0,0 +1,8 @@
+
+
+ 4.0.0
+ com.facebook
+ thrift
+ r917130
+ jar
+
\ No newline at end of file
diff --git a/embedded-repo/org/apache/cassandra/cassandra/0.6.1/cassandra-0.6.1.jar b/embedded-repo/org/apache/cassandra/cassandra/0.6.1/cassandra-0.6.1.jar
new file mode 100644
index 0000000000..c7c71b1750
Binary files /dev/null and b/embedded-repo/org/apache/cassandra/cassandra/0.6.1/cassandra-0.6.1.jar differ
diff --git a/embedded-repo/org/apache/cassandra/cassandra/0.6.1/cassandra-0.6.1.pom b/embedded-repo/org/apache/cassandra/cassandra/0.6.1/cassandra-0.6.1.pom
new file mode 100755
index 0000000000..4969b74564
--- /dev/null
+++ b/embedded-repo/org/apache/cassandra/cassandra/0.6.1/cassandra-0.6.1.pom
@@ -0,0 +1,8 @@
+
+
+ 4.0.0
+ org.apache.cassandra
+ cassandra
+ 0.6.1
+ jar
+
\ No newline at end of file
diff --git a/embedded-repo/org/apache/cassandra/clhm-production/0.6.1/clhm-production-0.6.1.jar b/embedded-repo/org/apache/cassandra/clhm-production/0.6.1/clhm-production-0.6.1.jar
new file mode 100644
index 0000000000..028f505bb9
Binary files /dev/null and b/embedded-repo/org/apache/cassandra/clhm-production/0.6.1/clhm-production-0.6.1.jar differ
diff --git a/embedded-repo/org/apache/cassandra/clhm-production/0.6.1/clhm-production-0.6.1.pom b/embedded-repo/org/apache/cassandra/clhm-production/0.6.1/clhm-production-0.6.1.pom
new file mode 100755
index 0000000000..432c1c225d
--- /dev/null
+++ b/embedded-repo/org/apache/cassandra/clhm-production/0.6.1/clhm-production-0.6.1.pom
@@ -0,0 +1,8 @@
+
+
+ 4.0.0
+ org.apache.cassandra
+ clhm-production
+ 0.6.1
+ jar
+
\ No newline at end of file
diff --git a/embedded-repo/org/apache/cassandra/high-scale-lib/0.6.1/high-scale-lib-0.6.1.jar b/embedded-repo/org/apache/cassandra/high-scale-lib/0.6.1/high-scale-lib-0.6.1.jar
new file mode 100644
index 0000000000..421a436eed
Binary files /dev/null and b/embedded-repo/org/apache/cassandra/high-scale-lib/0.6.1/high-scale-lib-0.6.1.jar differ
diff --git a/embedded-repo/org/apache/cassandra/high-scale-lib/0.6.1/high-scale-lib-0.6.1.pom b/embedded-repo/org/apache/cassandra/high-scale-lib/0.6.1/high-scale-lib-0.6.1.pom
new file mode 100755
index 0000000000..c361dbef9f
--- /dev/null
+++ b/embedded-repo/org/apache/cassandra/high-scale-lib/0.6.1/high-scale-lib-0.6.1.pom
@@ -0,0 +1,8 @@
+
+
+ 4.0.0
+ org.apache.cassandra
+ high-scale-lib
+ 0.6.1
+ jar
+
\ No newline at end of file
diff --git a/project/build/AkkaProject.scala b/project/build/AkkaProject.scala
index 0864963800..1986727fef 100644
--- a/project/build/AkkaProject.scala
+++ b/project/build/AkkaProject.scala
@@ -16,7 +16,7 @@ class AkkaParent(info: ProjectInfo) extends DefaultProject(info) {
// project versions
val JERSEY_VERSION = "1.1.5"
val ATMO_VERSION = "0.5.4"
- val CASSANDRA_VERSION = "0.5.0"
+ val CASSANDRA_VERSION = "0.6.1"
val LIFT_VERSION = "2.0-scala280-SNAPSHOT"
val SCALATEST_VERSION = "1.2-for-scala-2.8.0.RC2-SNAPSHOT"
@@ -25,8 +25,12 @@ class AkkaParent(info: ProjectInfo) extends DefaultProject(info) {
lazy val distPath = info.projectPath / "dist"
override def compileOptions = super.compileOptions ++
- Seq("-deprecation", "-Xmigration", "-Xcheckinit",
- "-Xstrict-warnings", "-Xwarninit", "-encoding", "utf8")
+ Seq("-deprecation",
+ "-Xmigration",
+ "-Xcheckinit",
+ "-Xstrict-warnings",
+ "-Xwarninit",
+ "-encoding", "utf8")
.map(x => CompileOption(x))
override def javaCompileOptions = JavaCompileOption("-Xlint:unchecked") :: super.javaCompileOptions.toList
@@ -194,7 +198,7 @@ class AkkaParent(info: ProjectInfo) extends DefaultProject(info) {
}
class AkkaPersistenceCommonProject(info: ProjectInfo) extends AkkaDefaultProject(info, distPath) {
- val thrift = "com.facebook" % "thrift" % "1.0" % "compile"
+ val thrift = "com.facebook" % "thrift" % "r917130" % "compile"
val commons_pool = "commons-pool" % "commons-pool" % "1.5.4" % "compile"
}
@@ -218,7 +222,7 @@ class AkkaParent(info: ProjectInfo) extends DefaultProject(info) {
val cassandra_clhm = "org.apache.cassandra" % "clhm-production" % CASSANDRA_VERSION % "test"
val commons_coll = "commons-collections" % "commons-collections" % "3.2.1" % "test"
val google_coll = "com.google.collections" % "google-collections" % "1.0" % "test"
- override def testOptions = TestFilter((name: String) => name.endsWith("Test")) :: Nil
+ //override def testOptions = TestFilter((name: String) => name.endsWith("Test")) :: Nil
}
class AkkaPersistenceParentProject(info: ProjectInfo) extends ParentProject(info) {