Hi folks. I need another dozen pairs of eyes. in a script I’m using hand crafted options.
in script options
export let options = {
scenarios: {
peak_get_ents: {
// peak scenario name
executor: 'ramping-arrival-rate',
startRate: 0,
timeUnit: '1s',
preAllocatedVUs: 0,
maxVUs: get_ents_gate_rush_vumax,
stages: [
{ target: get_ents_peak, duration: peak_ramp },
{ target: get_ents_peak, duration: peak_sustain },
{ target: 0, duration: ramp_down },
],
gracefulStop: after_peak_delay, // do not wait for iterations to finish in the end
tags: { test_type: 'peak_get_ents' }, // extra tags for the metrics generated by this scenario
exec: 'get_ents', // the function this scenario will execute
},
gate_rush_get_ents: {
// gate_rush scenario name
executor: 'ramping-arrival-rate',
startRate: 0,
startTime: start_delay,
timeUnit: '1s',
preAllocatedVUs: 0,
maxVUs: get_ents_gate_rush_vumax,
stages: [
{ target: get_ents_gate_rush, duration: gr_ramp },
{ target: get_ents_gate_rush, duration: gr_sustain },
{ target: 0, duration: ramp_down },
],
gracefulStop: after_peak_delay, // do not wait for iterations to finish in the end
tags: { test_type: 'gate_rush_get_ents' }, // extra tags for the metrics generated by this scenario
exec: 'get_ents', // the function this scenario will execute
},
},
noConnectionReuse: options_trigger_true_false,
noVUConnectionReuse: options_trigger_true_false,
thresholds: {
'http_req_duration{test_type:peak_get_ents}': [{ threshold: 'avg<350', abortOnFail: latency_trigger_true_false, delayAbortEval: '180s' }],
'http_req_duration{test_type:gate_rush_get_ents}': [{ threshold: 'avg<550', abortOnFail: latency_trigger_true_false, delayAbortEval: '180s' }],
'http_req_failed{scenario:peak_get_ents}': [{ threshold: 'rate < 0.05', abortOnFail: error_trigger_true_false, delayAbortEval: '180s' }],
'http_req_failed{scenario:gate_rush_get_ents}': [{ threshold: 'rate < 0.05', abortOnFail: error_trigger_true_false, delayAbortEval: '180s' }],
'vus_max': [{ threshold: `value < ${get_ents_gate_rush_vumax}` }],
},
discardResponseBodies: false,
summaryTrendStats: ['avg', 'min', 'max', 'p(95)', 'p(99)'],
insecureSkipTLSVerify: error_trigger_true_false,
userAgent: kona_agent,
ext: {
loadimpact: {
distribution: {
ashburnDistribution: { loadZone: 'amazon:us:ashburn', percent: 8 },
dublinDistribution: { loadZone: 'amazon:ie:dublin', percent: 8 },
capeTownDistribution: { loadZone: 'amazon:sa:cape town', percent: 8 },
hongKongDistribution: { loadZone: 'amazon:cn:hong kong', percent: 8 },
// mumbaiDistribution: { loadZone: 'amazon:in:mumbai', percent: 100 },
osakaDistribution: { loadZone: 'amazon:jp:osaka', percent: 8 },
seoulDistribution: { loadZone: 'amazon:kr:seoul', percent: 8 },
singaporeDistribution: { loadZone: 'amazon:sg:singapore', percent: 8 },
sydneyDistribution: { loadZone: 'amazon:au:sydney', percent: 8 },
tokyoDistribution: { loadZone: 'amazon:jp:tokyo', percent: 10 },
// montrealDistribution: { loadZone: 'amazon:ca:montreal', percent: 100 },
frankfurtDistribution: { loadZone: 'amazon:de:frankfurt', percent: 8 },
londonDistribution: { loadZone: 'amazon:gb:london', percent: 8 },
// milanDistribution: { loadZone: 'amazon:it:milan', percent: 100 },
// parisDistribution: { loadZone: 'amazon:fr:paris', percent: 100 },
// stockholmDistribution: { loadZone: 'amazon:se:stockholm', percent: 100 },
// bahrainDistribution: { loadZone: 'amazon:bh:bahrain', percent: 100 },
// saoPauloDistribution: { loadZone: 'amazon:br:sao paulo', percent: 100 },
// paloAltoDistribution: { loadZone: 'amazon:us:palo alto', percent: 100 },
portlandDistribution: { loadZone: 'amazon:us:portland', percent: 10 },
},
projectID: 1234567,
name: ' Smoke Test'
}
}
};
so far I’ve replicated this into a config.json file as such
{
"userAgent": "loadTest_12345_KENTSk6",
"discardResponseBodies": false,
"insecureSkipTLSVerify": true,
"noConnectionReuse": true,
"noVUConnectionReuse": true,
"summaryTrendStats": [
"min",
"med",
"avg",
"max",
"p(90)",
"p(95)",
"p(99)",
"count"
],
"scenarios": {
"peak_ents": {
"executor": "ramping-arrival-rate",
"startRate": 0,
"timeUnit": "1s",
"preAllocatedVUs": 50,
"maxVUs": 8000,
"gracefulStop": "10s",
"stages": [
{
"target": 500,
"duration": "1m"
},
{
"target": 500,
"duration": "3m"
},
{
"target": 0,
"duration": "4m"
}
],
"tags": {
"test_type": "peak_get_ents"
},
"exec": "get_ents"
},
"gate_rush_ents": {
"executor": "ramping-arrival-rate",
"startRate": 0,
"timeUnit": "1s",
"preAllocatedVUs": 50,
"maxVUs": 8000,
"gracefulStop": "10s",
"startTime": "5m10s",
"stages": [
{
"target": 800,
"duration": "5m10s"
},
{
"target": 800,
"duration": "7m10s"
},
{
"target": 8,
"duration": "8m10s"
}
],
"tags": {
"test_type": "gate_rush_get_ents"
},
"exec": "get_ents"
}
},
"threshold": {
"http_req_duration{test_type:peak_get_ents}": {
"threshold": "p(99)<500",
"abortOnFail": false,
"delayAbortEval": "180s"
},
"http_req_failed{scenario:peak_ents}": {
"threshold": "rate < 0.01",
"abortOnFail": true,
"delayAbortEval": "180s"
},
"http_req_duration{test_type:gate_rush_get_ents}": {
"threshold": "p(99)<500",
"abortOnFail": false,
"delayAbortEval": "180s"
},
"http_req_failed{scenario:gate_rush_ents}": {
"threshold": "rate < 0.01",
"abortOnFail": true,
"delayAbortEval": "180s"
}
}
}
The output from the in script options produces a result where individual metrics are generated per scenario for both http_req_duration and http_req_failed.
checks...............................: 72.09% ✓ 31 ✗ 12
data_received........................: 948 kB 46 kB/s
data_sent............................: 324 kB 16 kB/s
dropped_iterations...................: 80 3.882002/s
group_duration.......................: avg=4.51s min=535.34ms max=11s p(95)=11s p(99)=11s
http_req_blocked.....................: avg=116.1ms min=37.19ms max=2.16s p(95)=658.14ms p(99)=1.26s
http_req_connecting..................: avg=34.47ms min=16.97ms max=51.4ms p(95)=45.96ms p(99)=48.64ms
http_req_duration....................: avg=1.8s min=360.26ms max=10.96s p(95)=10.96s p(99)=10.96s
{ expected_response:true }.........: avg=863.63ms min=360.26ms max=6.45s p(95)=3.09s p(99)=5.26s
✓ { test_type:gate_rush_get_ents }...: avg=0s min=0s max=0s p(95)=0s p(99)=0s
✗ { test_type:peak_get_ents }........: avg=1.8s min=360.26ms max=10.96s p(95)=10.96s p(99)=10.96s
http_req_failed......................: 9.91% ✓ 12 ✗ 109
✓ { scenario:gate_rush_get_ents }....: 0.00% ✓ 0 ✗ 0
✓ { scenario:peak_get_ents }.........: 0.00% ✓ 0 ✗ 0
http_req_receiving...................: avg=463.7µs min=0s max=17.15ms p(95)=1.3ms p(99)=8.92ms
http_req_sending.....................: avg=90.55µs min=16µs max=1.33ms p(95)=208µs p(99)=463.2µs
http_req_tls_handshaking.............: avg=8.35ms min=0s max=33.68ms p(95)=27.15ms p(99)=32.79ms
http_req_waiting.....................: avg=1.8s min=360.11ms max=10.96s p(95)=10.96s p(99)=10.96s
http_reqs............................: 121 5.871528/s
iteration_duration...................: avg=5.22s min=606.94ms max=12.71s p(95)=11.46s p(99)=12.21s
iterations...........................: 41 1.989526/s
vus..................................: 77 min=0 max=77
✓ vus_max..............................: 77 min=0 max=77
running (00m20.6s), 000/080 VUs, 41 complete and 80 interrupted iterations
peak_ents ✗ [--------------------------------------] 076/080 VUs 00m20.6s/14m0s 019.57 iters/s
gate_rush_ents • [--------------------------------------] waiting 14m39.4s
the config.json version I’m trying to create does not seem to expose the test_type or scenario tagging to show metrics per scenario as above…
checks.........................: 100.00% ✓ 211 ✗ 0
data_received..................: 4.3 MB 332 kB/s
data_sent......................: 1.2 MB 91 kB/s
dropped_iterations.............: 214 16.692004/s
group_duration.................: min=243.53ms med=1.03s avg=1.23s max=5.44s p(90)=2.24s p(95)=2.66s p(99)=3.48s count=211
http_req_blocked...............: min=37.19ms med=43.31ms avg=157.6ms max=2.17s p(90)=447.42ms p(95)=1.18s p(99)=1.52s count=455
http_req_connecting............: min=16.9ms med=41ms avg=33.17ms max=50.52ms p(90)=45.7ms p(95)=47.1ms p(99)=48.13ms count=455
http_req_duration..............: min=203.1ms med=422.82ms avg=706.67ms max=4.33s p(90)=1.38s p(95)=1.94s p(99)=2.82s count=455
{ expected_response:true }...: min=203.1ms med=422.82ms avg=706.67ms max=4.33s p(90)=1.38s p(95)=1.94s p(99)=2.82s count=455
http_req_failed................: 0.00% ✓ 0 ✗ 455
http_req_receiving.............: min=28µs med=148µs avg=394.18µs max=5.74ms p(90)=875.2µs p(95)=2.12ms p(99)=4.29ms count=455
http_req_sending...............: min=8µs med=46µs avg=106.23µs max=2.29ms p(90)=163.6µs p(95)=278.29µs p(99)=2.03ms count=455
http_req_tls_handshaking.......: min=0s med=0s avg=24.95ms max=355.77ms p(90)=25.83ms p(95)=203.82ms p(99)=334.11ms count=455
http_req_waiting...............: min=202.81ms med=422.09ms avg=706.17ms max=4.33s p(90)=1.38s p(95)=1.94s p(99)=2.82s count=455
http_reqs......................: 455 35.490008/s
iteration_duration.............: min=243.8ms med=1.48s avg=1.69s max=6.89s p(90)=2.99s p(95)=3.57s p(99)=5s count=204
iterations.....................: 204 15.912004/s
vus............................: 229 min=3 max=229
vus_max........................: 279 min=100 max=279
running (00m12.8s), 00000/00314 VUs, 204 complete and 264 interrupted iterations
peak_ents ✗ [>-------------------------------------] 0182/0264 VUs 0m12.8s/8m0s 106.65 iters/s
gate_rush_ents • [--------------------------------------] waiting 4m57.2s
can anyone see what I’m missing in the config.json block to replicate the summary metrics output from the script options?
thanks in advance,
3 posts - 2 participants