Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

TelemetryAPIJourney - Retrieving the telemetry payload (cached vs. fresh) #211

Merged
merged 4 commits into from
Feb 24, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 34 additions & 0 deletions src/test/scala/org/kibanaLoadTest/scenario/TelemetryAPI.scala
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
package org.kibanaLoadTest.scenario

import cats.implicits.catsSyntaxSemigroup

import io.gatling.core.Predef.{exec, _}
import io.gatling.http.Predef._

object TelemetryAPI {
def load(baseUrl: String, headers: Map[String, String]) = {
val defaultHeaders =
headers.combine(Map("Referer" -> s"$baseUrl/app/home"))

exec(
http("telemetry: /api/telemetry/v2/clusters/_stats")
.post("/api/telemetry/v2/clusters/_stats")
.body(StringBody("""{ "refreshCache": true }""")) // Request for fresh cache so we can measure the effect of generating the telemetry report multiple times
Bamieh marked this conversation as resolved.
Show resolved Hide resolved
.headers(defaultHeaders)
.check(status.is(200))
)
}

def cached(baseUrl: String, headers: Map[String, String]) = {
val defaultHeaders =
headers.combine(Map("Referer" -> s"$baseUrl/app/home"))

exec(
http("telemetry: cached /api/telemetry/v2/clusters/_stats")
.post("/api/telemetry/v2/clusters/_stats")
.body(StringBody("""{ "refreshCache": false }"""))
.headers(defaultHeaders)
.check(status.is(200))
)
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
package org.kibanaLoadTest.simulation.branch

import io.gatling.core.Predef._
import io.gatling.core.structure.ScenarioBuilder
import org.kibanaLoadTest.scenario.{Login, Home, TelemetryAPI}
import org.kibanaLoadTest.simulation.BaseSimulation

class TelemetryAPICachedJourney extends BaseSimulation {
val scenarioName = s"Branch cached telemetry journey ${appConfig.buildVersion}"

props.maxUsers = 400
Bamieh marked this conversation as resolved.
Show resolved Hide resolved

val scn: ScenarioBuilder = scenario(scenarioName)
.exec(
Login
.doLogin(
appConfig.isSecurityEnabled,
appConfig.loginPayload,
appConfig.loginStatusCode
)
.pause(5)
)
.exec(TelemetryAPI.cached(appConfig.baseUrl, defaultHeaders).pause(1))


setUp(
scn
.inject(
constantConcurrentUsers(20) during (3 * 60), // 1
rampConcurrentUsers(20) to props.maxUsers during (3 * 60) // 2
)
.protocols(httpProtocol)
).maxDuration(props.simulationTimeout * 2)

// generate a closed workload injection profile
// with levels of 10, 15, 20, 25 and 30 concurrent users
Bamieh marked this conversation as resolved.
Show resolved Hide resolved
// each level lasting 10 seconds
// separated by linear ramps lasting 10 seconds
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
package org.kibanaLoadTest.simulation.branch

import io.gatling.core.Predef._
import io.gatling.core.structure.ScenarioBuilder
import org.kibanaLoadTest.scenario.{Login, Home, TelemetryAPI}
import org.kibanaLoadTest.simulation.BaseSimulation

class TelemetryAPIJourney extends BaseSimulation {
val scenarioName = s"Branch telemetry journey ${appConfig.buildVersion}"

props.maxUsers = 400

val scn: ScenarioBuilder = scenario(scenarioName)
.exec(
Login
.doLogin(
appConfig.isSecurityEnabled,
appConfig.loginPayload,
appConfig.loginStatusCode
)
.pause(5)
)
.exec(TelemetryAPI.load(appConfig.baseUrl, defaultHeaders).pause(1))


setUp(
scn
.inject(
constantConcurrentUsers(20) during (3 * 60), // 1
rampConcurrentUsers(20) to props.maxUsers during (3 * 60) // 2
)
.protocols(httpProtocol)
).maxDuration(props.simulationTimeout * 2)

// generate a closed workload injection profile
// with levels of 10, 15, 20, 25 and 30 concurrent users
// each level lasting 10 seconds
// separated by linear ramps lasting 10 seconds
}