diff --git a/akka-amqp/src/main/scala/akka/amqp/ConsumerActor.scala b/akka-amqp/src/main/scala/akka/amqp/ConsumerActor.scala index b339cf4727..d0324f05c6 100644 --- a/akka-amqp/src/main/scala/akka/amqp/ConsumerActor.scala +++ b/akka-amqp/src/main/scala/akka/amqp/ConsumerActor.scala @@ -89,7 +89,7 @@ private[amqp] class ConsumerActor(consumerParameters: ConsumerParameters) case Some(params) => params.configurationArguments case _ => Map.empty } - ch.queueDeclare(queueName, durable, exclusive, autoDelete, JavaConversions.asMap(configurationArguments.toMap)) + ch.queueDeclare(queueName, durable, exclusive, autoDelete, JavaConversions.asJavaMap(configurationArguments.toMap)) case NoActionDeclaration => new com.rabbitmq.client.impl.AMQImpl.Queue.DeclareOk(queueName, 0, 0) // do nothing here } } diff --git a/akka-amqp/src/main/scala/akka/amqp/FaultTolerantChannelActor.scala b/akka-amqp/src/main/scala/akka/amqp/FaultTolerantChannelActor.scala index 78b532623a..057ceab257 100644 --- a/akka-amqp/src/main/scala/akka/amqp/FaultTolerantChannelActor.scala +++ b/akka-amqp/src/main/scala/akka/amqp/FaultTolerantChannelActor.scala @@ -68,7 +68,7 @@ abstract private[amqp] class FaultTolerantChannelActor( exchangeDeclaration match { case PassiveDeclaration => ch.exchangeDeclarePassive(exchangeName) case ActiveDeclaration(durable, autoDelete, _) => - ch.exchangeDeclare(exchangeName, exchangeType.toString, durable, autoDelete, JavaConversions.asMap(configurationArguments)) + ch.exchangeDeclare(exchangeName, exchangeType.toString, durable, autoDelete, JavaConversions.asJavaMap(configurationArguments)) case NoActionDeclaration => // ignore } } diff --git a/akka-http/src/main/scala/Mist.scala b/akka-http/src/main/scala/Mist.scala index 44c66c8f05..f37beb6437 100644 --- a/akka-http/src/main/scala/Mist.scala +++ b/akka-http/src/main/scala/Mist.scala @@ -368,10 +368,20 @@ trait RequestMethod extends Logging case s => s } - def complete(status: Int, body: String): Boolean = complete(status, body, Headers()) def complete(status: Int, body: String, headers: Headers): Boolean = + rawComplete { + res => { + res.setStatus(status) + headers foreach {h => response.setHeader(h._1, h._2)} + res.getWriter.write(body) + res.getWriter.close + res.flushBuffer + } + } + + def rawComplete(completion: HttpServletResponse => Unit): Boolean = context match { case Some(pipe) => { try { @@ -380,11 +390,7 @@ trait RequestMethod extends Logging false } else { - response.setStatus(status) - headers foreach {h => response.setHeader(h._1, h._2)} - response.getWriter.write(body) - response.getWriter.close - response.flushBuffer + completion(response) pipe.complete true } @@ -396,7 +402,7 @@ trait RequestMethod extends Logging } case None => - log.error("Attempt to complete request with no context. STATUS (" + status + ") BODY (" + body + ") HEADERS (" + headers + ")") + log.error("Attempt to complete request with no context.") false } diff --git a/akka-persistence/akka-persistence-cassandra/src/main/scala/akka/CassandraStorageBackend.scala b/akka-persistence/akka-persistence-cassandra/src/main/scala/akka/CassandraStorageBackend.scala index d077215a7f..a835866713 100644 --- a/akka-persistence/akka-persistence-cassandra/src/main/scala/akka/CassandraStorageBackend.scala +++ b/akka-persistence/akka-persistence-cassandra/src/main/scala/akka/CassandraStorageBackend.scala @@ -79,7 +79,7 @@ private[akka] object CassandraStorageBackend extends CommonStorageBackend { override def getAll(owner: String, keys: Iterable[Array[Byte]]): Map[Array[Byte], Array[Byte]] = { sessions.withSession{ session => { - var predicate = new SlicePredicate().setColumn_names(JavaConversions.asList(keys.toList)) + var predicate = new SlicePredicate().setColumn_names(JavaConversions.asJavaList(keys.toList)) val cols = session / (owner, parent, predicate, CONSISTENCY_LEVEL) var map = new TreeMap[Array[Byte], Array[Byte]]()(ordering) cols.foreach{ @@ -124,10 +124,10 @@ private[akka] object CassandraStorageBackend extends CommonStorageBackend { new KeyRange().setStart_key("").setEnd_key(""), CONSISTENCY_LEVEL) val mutations = new JHMap[String, JMap[String, JList[Mutation]]] - JavaConversions.asIterable(slices).foreach{ + JavaConversions.asScalaIterable(slices).foreach{ keySlice: KeySlice => { val key = keySlice.getKey - val keyMutations = JavaConversions.asMap(mutations).getOrElse(key, { + val keyMutations = JavaConversions.asScalaMap(mutations).getOrElse(key, { val km = new JHMap[String, JList[Mutation]] mutations.put(key, km) km @@ -135,7 +135,7 @@ private[akka] object CassandraStorageBackend extends CommonStorageBackend { val amutation = new JAList[Mutation] val cols = new JAList[Array[Byte]] keyMutations.put(parent.getColumn_family, amutation) - JavaConversions.asIterable(keySlice.getColumns) foreach { + JavaConversions.asScalaIterable(keySlice.getColumns) foreach { cosc: ColumnOrSuperColumn => { cols.add(cosc.getColumn.getName) } diff --git a/akka-persistence/akka-persistence-memcached/src/main/scala/akka/MemcachedStorageBackend.scala b/akka-persistence/akka-persistence-memcached/src/main/scala/akka/MemcachedStorageBackend.scala index 0859e6d88e..21c4772fe1 100644 --- a/akka-persistence/akka-persistence-memcached/src/main/scala/akka/MemcachedStorageBackend.scala +++ b/akka-persistence/akka-persistence-memcached/src/main/scala/akka/MemcachedStorageBackend.scala @@ -63,11 +63,11 @@ private[akka] object MemcachedStorageBackend extends CommonStorageBackend { } def getAll(keys: Iterable[Array[Byte]]) = { - val jmap = client.getBulk(JavaConversions.asList(keys.map{ + val jmap = client.getBulk(JavaConversions.asJavaList(keys.map{ k: Array[Byte] => keyStr(encodeKey(k)) }.toList)) - JavaConversions.asMap(jmap).map{ + JavaConversions.asScalaMap(jmap).map{ kv => kv match { case (key, value) => (base64.decode(key) -> value.asInstanceOf[Array[Byte]]) } diff --git a/akka-persistence/akka-persistence-riak/src/main/scala/akka/RiakStorageBackend.scala b/akka-persistence/akka-persistence-riak/src/main/scala/akka/RiakStorageBackend.scala index 149576da05..e18890e93e 100644 --- a/akka-persistence/akka-persistence-riak/src/main/scala/akka/RiakStorageBackend.scala +++ b/akka-persistence/akka-persistence-riak/src/main/scala/akka/RiakStorageBackend.scala @@ -112,7 +112,7 @@ private[akka] object RiakStorageBackend extends CommonStorageBackend { def drop() { val keys = riakClient.listKeys(bucket) - JavaConversions.asIterable(keys) foreach { + JavaConversions.asScalaIterable(keys) foreach { delete(_) } keys.close diff --git a/akka-persistence/akka-persistence-simpledb/src/main/scala/akka/SimpledbStorageBackend.scala b/akka-persistence/akka-persistence-simpledb/src/main/scala/akka/SimpledbStorageBackend.scala index dba6579e41..3addda797f 100644 --- a/akka-persistence/akka-persistence-simpledb/src/main/scala/akka/SimpledbStorageBackend.scala +++ b/akka-persistence/akka-persistence-simpledb/src/main/scala/akka/SimpledbStorageBackend.scala @@ -113,7 +113,7 @@ private[akka] object SimpledbStorageBackend extends CommonStorageBackend { var res = getClient.select(req) var continue = true do { - JavaConversions.asIterable(res.getItems) foreach { + JavaConversions.asScalaIterable(res.getItems) foreach { item => map += (base64key.decode(item.getName) -> recomposeValue(item.getAttributes).get) } if (res.getNextToken ne null) { @@ -272,7 +272,7 @@ private[akka] object SimpledbStorageBackend extends CommonStorageBackend { } def recomposeValue(atts: JList[Attribute]): Option[Array[Byte]] = { - val itemSnapshot = JavaConversions.asIterable(atts).foldLeft(new MMap[String, String]) { + val itemSnapshot = JavaConversions.asScalaIterable(atts).foldLeft(new MMap[String, String]) { (map, att) => { map += (att.getName -> att.getValue) } diff --git a/akka-persistence/akka-persistence-voldemort/src/main/scala/akka/VoldemortStorageBackend.scala b/akka-persistence/akka-persistence-voldemort/src/main/scala/akka/VoldemortStorageBackend.scala index 8f2779eb06..ad9c8d4df8 100644 --- a/akka-persistence/akka-persistence-voldemort/src/main/scala/akka/VoldemortStorageBackend.scala +++ b/akka-persistence/akka-persistence-voldemort/src/main/scala/akka/VoldemortStorageBackend.scala @@ -77,8 +77,8 @@ private[akka] object VoldemortStorageBackend extends CommonStorageBackend { } def getAll(keys: Iterable[Array[Byte]]): Map[Array[Byte], Array[Byte]] = { - val jmap = client.getAll(JavaConversions.asIterable(keys)) - JavaConversions.asMap(jmap).map{ + val jmap = client.getAll(JavaConversions.asJavaIterable(keys)) + JavaConversions.asScalaMap(jmap).map{ kv => kv match { case (key: Array[Byte], versioned: Versioned[Array[Byte]]) => (key -> versioned.getValue) diff --git a/akka-persistence/akka-persistence-voldemort/src/test/scala/EmbeddedVoldemort.scala b/akka-persistence/akka-persistence-voldemort/src/test/scala/EmbeddedVoldemort.scala index 23940fbadf..e3f956396d 100644 --- a/akka-persistence/akka-persistence-voldemort/src/test/scala/EmbeddedVoldemort.scala +++ b/akka-persistence/akka-persistence-voldemort/src/test/scala/EmbeddedVoldemort.scala @@ -22,7 +22,7 @@ trait EmbeddedVoldemort extends BeforeAndAfterAll with Logging { val home = new File(dir) log.info("Creating Voldemort Config") val config = VoldemortConfig.loadFromVoldemortHome(home.getCanonicalPath) - config.setStorageConfigurations(JavaConversions.asList(List(classOf[InMemoryStorageConfiguration].getName))) + config.setStorageConfigurations(JavaConversions.asJavaList(List(classOf[InMemoryStorageConfiguration].getName))) log.info("Starting Voldemort") server = new VoldemortServer(config) server.start