diff --git a/build.sbt b/build.sbt index 9c60ff7fa5..1397effc47 100644 --- a/build.sbt +++ b/build.sbt @@ -504,7 +504,6 @@ lazy val perfServerJavaOptions = List( "-Xmx16g", "-XX:+AlwaysPreTouch" ) -lazy val flightRecordingJavaOpts = "-XX:StartFlightRecording=filename=recording.jfr,dumponexit=true,duration=120s" lazy val perfTests: ProjectMatrix = (projectMatrix in file("perf-tests")) .enablePlugins(GatlingPlugin) @@ -535,8 +534,8 @@ lazy val perfTests: ProjectMatrix = (projectMatrix in file("perf-tests")) .settings( fork := true, connectInput := true, - Compile / run / javaOptions ++= flightRecordingJavaOpts :: perfServerJavaOptions, - Test / run / javaOptions --= flightRecordingJavaOpts :: perfServerJavaOptions + Compile / run / javaOptions ++= perfServerJavaOptions, + Test / run / javaOptions --= perfServerJavaOptions ) .jvmPlatform(scalaVersions = List(scala2_13)) .dependsOn(core, pekkoHttpServer, http4sServer, nettyServer, nettyServerCats, playServer, vertxServer, vertxServerCats) diff --git a/perf-tests/README.md b/perf-tests/README.md index 11da164988..4d58194842 100644 --- a/perf-tests/README.md +++ b/perf-tests/README.md @@ -34,8 +34,25 @@ If you want to run a test server separately from simulations, use a separate sbt perfTests/runMain sttp.tapir.perf.apis.ServerRunner http4s.TapirMulti ``` -This is useful when profiling, as `perfTests/runMain` will be a forked JVM isolated from the JVM that runs Gatling, configured with additional options like `"-XX:StartFlightRecording=filename=recording.jfr,...` -After the simulations, you can open `recording.jfr` in Java Mission Control to analyze performance metrics like heap and CPU usage. +This is useful when profiling, as `perfTests/runMain` will be a forked JVM isolated from the JVM that runs Gatling. + +## Profiling + +To atach the profiler to a running server, it is recommended to use [async-profiler](https://github.com/async-profiler/async-profiler). +Start the profiler by calling: +``` +asprof -e cpu,alloc,lock -f profile.jfr +``` + +After the simulations, you can open `recording.jfr` in IntelliJ IDEA or Java Mission Control to analyze performance metrics like heap and CPU usage. +It's also useful to build CPU flamegraphs with the [async-profiler converter](https://github.com/async-profiler/async-profiler?tab=readme-ov-file#download): +``` +java -cp ./converter.jar jfr2flame ./profile.jfr flamegraph.html +``` + +After opening the flamegraph in your browser, use the spyglass icon to search for regular expressions and find the total % of registered samples matching the query. Searching for `tapir` will show you what's the Tapir's total share of the load. This can be a useful metric to compare before and after implementing performance improvements. + +Note that profiling noticeably affects performance, so it's recommended to measure throughput/latency without the profiler attached. ## Examples