2014-04-17 16:38:48 +02:00
package akka
import sbt._
import Keys._
2014-05-05 16:45:48 +02:00
import com.timgroup.statsd. { StatsDClientErrorHandler , NonBlockingStatsDClient }
2014-04-17 16:38:48 +02:00
import sbt.testing. { TestSelector , Status , Event }
import scala.util.Try
2014-05-05 16:45:48 +02:00
import java.io. { InputStreamReader , BufferedReader , DataOutputStream , OutputStreamWriter }
import java.net. { InetAddress , URLEncoder , HttpURLConnection , Socket }
import com.typesafe.sbt.SbtGit
import com.typesafe.sbt.SbtGit.GitKeys._
2014-04-17 16:38:48 +02:00
object TestExtras {
object JUnitFileReporting {
val settings = Seq (
// we can enable junit-style reports everywhere with this
testOptions += Tests . Argument ( TestFrameworks . JUnit , "-v" , "-a" , "-u" , ( target . value / "test-reports" ) . getAbsolutePath ) ,
testOptions += Tests . Argument ( TestFrameworks . ScalaTest , "-u" , ( target . value / "test-reports" ) . getAbsolutePath )
)
}
2014-05-05 16:45:48 +02:00
object GraphiteBuildEvents {
val graphite = config ( "graphite" )
val enabled = settingKey [ Boolean ] ( "Set to true when you want to send build events to graphite; Enable with `-Dakka.sbt.graphite=true`" )
val host = settingKey [ String ] ( "Host where graphite is located (ip, or hostname)" )
val port = settingKey [ Int ] ( "Port on which graphite is listening, defaults to 80" )
private val notifier = settingKey [ Option [ GraphiteBuildNotifier ] ] ( "Notifies graphite about this build" )
val settings = SbtGit . settings ++ SbtGit . projectSettings ++ Seq (
enabled in graphite : = sys . props ( "akka.sbt.graphite" ) == "true" ,
host in graphite : = sys . props . get ( "akka.sbt.graphite.host" ) . getOrElse ( "54.72.154.120" ) ,
port in graphite : = sys . props . get ( "akka.sbt.graphite.port" ) . flatMap ( p => Try ( p . toInt ) . toOption ) . getOrElse ( 80 ) ,
notifier : = ( enabled . in ( graphite ) . value match {
case true => Some ( new GraphiteBuildNotifier ( gitCurrentBranch . value , gitHeadCommit . value , host . in ( graphite ) . value , port . in ( graphite ) . value ) )
case _ => None
} ) ,
// this wraps the test task in order to send events before and after it
test in Test : = Def . settingDyn {
val g = notifier . value
g . foreach ( _ . start ( ) )
// todo support complete(failed / successful)
val task = ( test in Test ) . taskValue andFinally { g . foreach ( _ . complete ( ) ) }
Def . setting ( task )
} . value
)
/* *
* Notifies graphite by sending an * event * , when a build starts .
* It will be tagged as "akka-build" and "branch:..." , for filtering in UIs .
*
* Event includes branch and commit id of the build that is running .
*/
class GraphiteBuildNotifier ( branch : String , commitId : Option [ String ] , host : String , port : Int ) {
private val url = new URL ( s" http:// $host : $port /events/ " )
private val hostname = InetAddress . getLocalHost . getHostName
private val marker = branch + commitId . fold ( "" ) ( id => s" @ $id " )
private def json ( what : String , tag : String , data : String = "" ) =
s""" { " what " : " $what " , " tags " : " akka-build,branch: ${ sanitize ( branch ) } , $tag " , " data " : " $data " } """ . stripMargin
def start ( ) : Unit = send ( s" Build started: $marker " , data = "host = " + hostname , tag = "started" )
def complete ( ) : Unit = send ( s" Build completed: $marker " , data = "host = " + hostname , tag = "completed" )
def send ( msg : String , data : String , tag : String ) = try {
// specifically not using Akka-IO (even though I'd love to), in order to not make the akka build depend on akka itself
val con = url . openConnection ( ) . asInstanceOf [ HttpURLConnection ]
try {
val bytes = json ( msg , data , tag ) . getBytes ( "UTF-8" )
con . setDoOutput ( true ) // triggers POST
con . connect ( )
val out = new DataOutputStream ( con . getOutputStream )
try {
out . write ( bytes )
out . flush ( )
// sigh, if left un-consumed graphite wouldn't take the write (*really*)!
consume ( con )
} finally {
out . close ( )
}
} finally {
con . disconnect ( )
}
}
private def sanitize ( s : String ) : String = s . replaceAll ( """[^\w]+""" , "-" )
private def consume ( con : HttpURLConnection ) {
val in = new BufferedReader ( new InputStreamReader ( con . getInputStream ) )
var inputLine = ""
try {
while ( inputLine != null ) {
inputLine = in . readLine ( )
}
} finally {
in . close ( )
}
}
}
}
2014-04-17 16:38:48 +02:00
object StatsDMetrics {
=act,tes Initial draft of internal MetricsKit
Note: This is NOT aimed to provide an micro-benchmarking solution.
The goal is to provide data for broad trend analysis. For techniques
that fight the inliner and other specialised techniques, refer to JMH.
+ custom console and graphite reporters
- had to be custom because it's not possible to add custom metric
types to the existing reporters
+ initial hdr.Histogram histogram() provider, see
http://latencyutils.github.io/LatencyUtils/
+ Not using timers provided by Metrics, instead use the above histogram
+ Added average Actor size measurement
+ Measuring the "blocking time" when an actor is created, before we fire
of the async part of this process; Measures in loop and will fluctuate
a lot. Times are in `us` -- System.nanoTime should provide good enough
resolution.
+ Measuring total actor creation time by using
`KnownOpsInTimespanTimer`, which given a known number of ops, in a
large amount of time, roughtly estimates time per one operation.
// Yes, we are aware of the possibility of GC pauses and other horrors
+ All classes are `private[akka]`, we should not encourage people to use
this yet
+ Counters use Java 8's `LongAdder`, which is metric's private;
The new trend in Java land will be copy paste-ing this class ;)
+ Metrics are logged to Graphite, so we can long-term analyse these
+ Reporters are configurable using typesafe-config
! I'm not very happy about how I work around Metrics not being too open
for adding additional custom metrics. Seems like a hack at places.
I will consider removing the Metrics dependency all together.
numbers
Example output:
```
-- KnownOpsInTimespanTimer-------------------------------------------
actor-creation.total.creating-100000-actors.Props|new-EmptyArgsActor|…||-same
ops = 100000
time = 1.969 s
ops/s = 50782.22
avg = 19.69 μs
-- AveragingGauge---------------------------------------------------
actor-creation.Props|new-EmptyArgsActor|…||-same.avg-mem-per-actor
avg = 439.67
```
2014-04-29 10:50:36 +02:00
val statsd = config ( "statsd" )
2014-04-17 16:38:48 +02:00
val enabled = settingKey [ Boolean ] ( "Set to true when you want to send stats to statsd; Enable with `-Dakka.sbt.statsd=true`" )
val prefix = settingKey [ String ] ( "Prefix given to all metrics sent to statsd" )
val host = settingKey [ String ] ( "Host where statsd is located (ip, or hostname)" )
val port = settingKey [ Int ] ( "Port on which statsd is listening, defaults to 8125" )
val settings = Seq (
// configuration
enabled in statsd : = sys . props ( "akka.sbt.statsd" ) == "true" ,
prefix in statsd : = Option ( sys . props ( "akka.sbt.statsd.prefix" ) ) . getOrElse ( "akka_master" ) ,
host in statsd : = Option ( sys . props ( "akka.sbt.statsd.host" ) ) . getOrElse ( "54.72.154.120" ) ,
port in statsd : = Option ( sys . props ( "akka.sbt.statsd.port" ) ) . flatMap ( p => Try ( p . toInt ) . toOption ) . getOrElse ( 8125 ) ,
testListeners in ( Test , test ) ++= {
2014-05-05 16:45:48 +02:00
// for `test`
2014-04-17 16:38:48 +02:00
enabled . in ( statsd ) . value match {
case true => Seq ( StatsDTestListener ( streams . value . log , prefix . in ( statsd ) . value , host . in ( statsd ) . value , port . in ( statsd ) . value ) )
case _ => Nil
}
} ,
testListeners ++= {
2014-05-05 16:45:48 +02:00
// for `testOnly`
2014-04-17 16:38:48 +02:00
enabled . in ( statsd ) . value match {
case true => Seq ( StatsDTestListener ( streams . value . log , prefix . in ( statsd ) . value , host . in ( statsd ) . value , port . in ( statsd ) . value ) )
case _ => Nil
}
}
)
case class StatsDTestListener ( log : Logger , prefix : String , host : String , port : Int ) extends TestsListener {
var client : NonBlockingStatsDClient = _
override def doInit ( ) : Unit = {
log . info ( s" Initialised StatsDTestsListener (sending stats to $host : $port ) " )
client = new NonBlockingStatsDClient ( prefix , host , port , new StatsDClientErrorHandler {
override def handle ( exception : Exception ) : Unit = log . error ( exception . toString )
} )
}
override def testEvent ( event : TestEvent ) {
2014-05-05 16:45:48 +02:00
event . detail foreach { det =>
det . status match {
case Status . Success =>
client . incrementCounter ( testCounterKey ( det , det . status ) )
client . recordExecutionTime ( testTimerKey ( det ) , det . duration . toInt )
case status =>
client . incrementCounter ( testCounterKey ( det , status ) )
}
2014-04-17 16:38:48 +02:00
}
}
override def endGroup ( name : String , result : TestResult . Value ) {
// manual switch instead of toStringing class name all the time
result match {
case TestResult . Passed => client . incrementCounter ( keySuccess ( name ) )
case TestResult . Failed => client . incrementCounter ( keyFail ( name ) )
case TestResult . Error => client . incrementCounter ( keyError ( name ) )
}
}
override def endGroup ( name : String , t : Throwable ) {
client . incrementCounter ( keyError ( name ) )
}
override def startGroup ( name : String ) {
// do nothing
}
override def doComplete ( finalResult : TestResult . Value ) : Unit = {
log . debug ( "Final test run result: " + finalResult )
log . info ( "Shutting down StatsDTestsListener client..." )
if ( client != null )
client . stop ( )
}
private def testTimerKey ( det : Event ) : String = s" ${ det . fullyQualifiedName } . ${ testSelectorToId ( det . selector ) } "
=act,tes Initial draft of internal MetricsKit
Note: This is NOT aimed to provide an micro-benchmarking solution.
The goal is to provide data for broad trend analysis. For techniques
that fight the inliner and other specialised techniques, refer to JMH.
+ custom console and graphite reporters
- had to be custom because it's not possible to add custom metric
types to the existing reporters
+ initial hdr.Histogram histogram() provider, see
http://latencyutils.github.io/LatencyUtils/
+ Not using timers provided by Metrics, instead use the above histogram
+ Added average Actor size measurement
+ Measuring the "blocking time" when an actor is created, before we fire
of the async part of this process; Measures in loop and will fluctuate
a lot. Times are in `us` -- System.nanoTime should provide good enough
resolution.
+ Measuring total actor creation time by using
`KnownOpsInTimespanTimer`, which given a known number of ops, in a
large amount of time, roughtly estimates time per one operation.
// Yes, we are aware of the possibility of GC pauses and other horrors
+ All classes are `private[akka]`, we should not encourage people to use
this yet
+ Counters use Java 8's `LongAdder`, which is metric's private;
The new trend in Java land will be copy paste-ing this class ;)
+ Metrics are logged to Graphite, so we can long-term analyse these
+ Reporters are configurable using typesafe-config
! I'm not very happy about how I work around Metrics not being too open
for adding additional custom metrics. Seems like a hack at places.
I will consider removing the Metrics dependency all together.
numbers
Example output:
```
-- KnownOpsInTimespanTimer-------------------------------------------
actor-creation.total.creating-100000-actors.Props|new-EmptyArgsActor|…||-same
ops = 100000
time = 1.969 s
ops/s = 50782.22
avg = 19.69 μs
-- AveragingGauge---------------------------------------------------
actor-creation.Props|new-EmptyArgsActor|…||-same.avg-mem-per-actor
avg = 439.67
```
2014-04-29 10:50:36 +02:00
private def testSelectorToId ( sel : testing . Selector ) : String = sanitize ( sel . asInstanceOf [ TestSelector ] . testName ( ) )
2014-04-17 16:38:48 +02:00
=act,tes Initial draft of internal MetricsKit
Note: This is NOT aimed to provide an micro-benchmarking solution.
The goal is to provide data for broad trend analysis. For techniques
that fight the inliner and other specialised techniques, refer to JMH.
+ custom console and graphite reporters
- had to be custom because it's not possible to add custom metric
types to the existing reporters
+ initial hdr.Histogram histogram() provider, see
http://latencyutils.github.io/LatencyUtils/
+ Not using timers provided by Metrics, instead use the above histogram
+ Added average Actor size measurement
+ Measuring the "blocking time" when an actor is created, before we fire
of the async part of this process; Measures in loop and will fluctuate
a lot. Times are in `us` -- System.nanoTime should provide good enough
resolution.
+ Measuring total actor creation time by using
`KnownOpsInTimespanTimer`, which given a known number of ops, in a
large amount of time, roughtly estimates time per one operation.
// Yes, we are aware of the possibility of GC pauses and other horrors
+ All classes are `private[akka]`, we should not encourage people to use
this yet
+ Counters use Java 8's `LongAdder`, which is metric's private;
The new trend in Java land will be copy paste-ing this class ;)
+ Metrics are logged to Graphite, so we can long-term analyse these
+ Reporters are configurable using typesafe-config
! I'm not very happy about how I work around Metrics not being too open
for adding additional custom metrics. Seems like a hack at places.
I will consider removing the Metrics dependency all together.
numbers
Example output:
```
-- KnownOpsInTimespanTimer-------------------------------------------
actor-creation.total.creating-100000-actors.Props|new-EmptyArgsActor|…||-same
ops = 100000
time = 1.969 s
ops/s = 50782.22
avg = 19.69 μs
-- AveragingGauge---------------------------------------------------
actor-creation.Props|new-EmptyArgsActor|…||-same.avg-mem-per-actor
avg = 439.67
```
2014-04-29 10:50:36 +02:00
private def testCounterKey ( det : Event , status : Status ) : String = s" ${ sanitize ( det . fullyQualifiedName ) } . ${ status . toString . toLowerCase } "
2014-04-17 16:38:48 +02:00
private def keySuccess ( fullyQualifiedName : String ) : String = fullyQualifiedName + ".success"
private def keyFail ( fullyQualifiedName : String ) : String = fullyQualifiedName + ".fail"
private def keyError ( fullyQualifiedName : String ) : String = fullyQualifiedName + ".error"
=act,tes Initial draft of internal MetricsKit
Note: This is NOT aimed to provide an micro-benchmarking solution.
The goal is to provide data for broad trend analysis. For techniques
that fight the inliner and other specialised techniques, refer to JMH.
+ custom console and graphite reporters
- had to be custom because it's not possible to add custom metric
types to the existing reporters
+ initial hdr.Histogram histogram() provider, see
http://latencyutils.github.io/LatencyUtils/
+ Not using timers provided by Metrics, instead use the above histogram
+ Added average Actor size measurement
+ Measuring the "blocking time" when an actor is created, before we fire
of the async part of this process; Measures in loop and will fluctuate
a lot. Times are in `us` -- System.nanoTime should provide good enough
resolution.
+ Measuring total actor creation time by using
`KnownOpsInTimespanTimer`, which given a known number of ops, in a
large amount of time, roughtly estimates time per one operation.
// Yes, we are aware of the possibility of GC pauses and other horrors
+ All classes are `private[akka]`, we should not encourage people to use
this yet
+ Counters use Java 8's `LongAdder`, which is metric's private;
The new trend in Java land will be copy paste-ing this class ;)
+ Metrics are logged to Graphite, so we can long-term analyse these
+ Reporters are configurable using typesafe-config
! I'm not very happy about how I work around Metrics not being too open
for adding additional custom metrics. Seems like a hack at places.
I will consider removing the Metrics dependency all together.
numbers
Example output:
```
-- KnownOpsInTimespanTimer-------------------------------------------
actor-creation.total.creating-100000-actors.Props|new-EmptyArgsActor|…||-same
ops = 100000
time = 1.969 s
ops/s = 50782.22
avg = 19.69 μs
-- AveragingGauge---------------------------------------------------
actor-creation.Props|new-EmptyArgsActor|…||-same.avg-mem-per-actor
avg = 439.67
```
2014-04-29 10:50:36 +02:00
private def sanitize ( s : String ) : String = s . replaceAll ( """[^\w]""" , "_" )
2014-04-17 16:38:48 +02:00
}
}
2014-05-07 14:49:35 +02:00
object Filter {
object Keys {
val excludeTestNames = settingKey [ Set [ String ] ] ( "Names of tests to be excluded. Not supported by MultiJVM tests. Example usage: -Dakka.test.names.exclude=TimingSpec" )
val excludeTestTags = settingKey [ Set [ String ] ] ( "Tags of tests to be excluded. It will not be used if you specify -Dakka.test.tags.only. Example usage: -Dakka.test.tags.exclude=long-running" )
val onlyTestTags = settingKey [ Set [ String ] ] ( "Tags of tests to be ran. Example usage: -Dakka.test.tags.only=long-running" )
}
import Keys._
def settings = {
Seq (
excludeTestNames : = systemPropertyAsSeq ( "akka.test.names.exclude" ) . toSet ,
excludeTestTags : = {
if ( onlyTestTags . value . isEmpty ) systemPropertyAsSeq ( "akka.test.tags.exclude" ) . toSet
else Set . empty
} ,
onlyTestTags : = systemPropertyAsSeq ( "akka.test.tags.only" ) . toSet ,
// add filters for tests excluded by name
testOptions in Test <++= excludeTestNames map { _ . toSeq . map ( exclude => Tests . Filter ( test => ! test . contains ( exclude ) ) ) } ,
// add arguments for tests excluded by tag
testOptions in Test <++= excludeTestTags map { tags =>
if ( tags . isEmpty ) Seq . empty else Seq ( Tests . Argument ( "-l" , tags . mkString ( " " ) ) )
} ,
// add arguments for running only tests by tag
testOptions in Test <++= onlyTestTags map { tags =>
if ( tags . isEmpty ) Seq . empty else Seq ( Tests . Argument ( "-n" , tags . mkString ( " " ) ) )
}
)
}
def systemPropertyAsSeq ( name : String ) : Seq [ String ] = {
val prop = sys . props . get ( name ) . getOrElse ( "" )
if ( prop . isEmpty ) Seq . empty else prop . split ( "," ) . toSeq
}
}
}