Skip to content

Commit

Permalink
Added measurements for each sub-test on CDash, and prefixed performan…
Browse files Browse the repository at this point in the history
…ce cmdline options with hpx:

Signed-off-by: Vedant <vedantnimjed@gmail.com>
  • Loading branch information
vrnimje committed Aug 22, 2024
1 parent ff62d46 commit 17dfd82
Show file tree
Hide file tree
Showing 6 changed files with 68 additions and 20 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/linux_with_bench_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,4 @@ jobs:
shell: bash
run: |
cd build
./bin/minmax_element_performance_test --detailed_bench
./bin/minmax_element_performance_test --hpx:detailed_bench
2 changes: 1 addition & 1 deletion .github/workflows/linux_with_nanobench_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,4 +45,4 @@ jobs:
shell: bash
run: |
cd build
./bin/minmax_element_performance_test --detailed_bench
./bin/minmax_element_performance_test --hpx:detailed_bench
4 changes: 1 addition & 3 deletions .jenkins/lsu-perftests/comment_github.sh
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,4 @@ curl \
-X POST \
-H "Authorization: token ${GITHUB_TOKEN}" \
https://api.github.com/repos/STEllAR-GROUP/hpx/issues/${ghprbPullId}/comments \
-d "{\"body\": \"<details><summary>Performance test report<\/summary><table><tr><th>Test name<\/th><th>Executor<\/th><th>Mean Percentage Diff.<\/th><th>Coefficient of variation<\/th><\/tr>${report}<\/table><\/details>\"}"

# popd
-d "{\"body\": \"<details><summary>Performance test report<\/summary><table><tr><th>Test name<\/th><th>Executor<\/th><th>Percentage Diff.<\/th><th>Classification<\/th><\/tr>${report}<\/table><\/details>\"}"
4 changes: 2 additions & 2 deletions cmake/HPX_AddTest.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ function(add_hpx_performance_report_test subcategory name)
${name}
${ARGN}
RUN_SERIAL
"--print_cdash_img_path"
"--hpx:print_cdash_img_path"
)
find_package(Python REQUIRED)

Expand All @@ -314,7 +314,7 @@ function(add_hpx_performance_report_test subcategory name)
${name}_cdash_results
COMMAND
sh -c
"${CMAKE_BINARY_DIR}/bin/${name}_test ${ARGN} --detailed_bench >${CMAKE_BINARY_DIR}/${name}.json"
"${CMAKE_BINARY_DIR}/bin/${name}_test ${ARGN} --hpx:detailed_bench >${CMAKE_BINARY_DIR}/${name}.json"
COMMAND
${Python_EXECUTABLE} ${CMAKE_SOURCE_DIR}/tools/perftests_plot.py
${CMAKE_BINARY_DIR}/${name}.json
Expand Down
26 changes: 20 additions & 6 deletions libs/core/testing/src/performance.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,20 +24,20 @@ namespace hpx::util {

void perftests_cfg(hpx::program_options::options_description& cmdline)
{
cmdline.add_options()("detailed_bench",
cmdline.add_options()("hpx:detailed_bench",
"Use if detailed benchmarks are required, showing the execution "
"time taken for each epoch")("print_cdash_img_path",
"time taken for each epoch")("hpx:print_cdash_img_path",
"Print the path to the images to be uploaded, in CDash XML format");
}

void perftests_init(const hpx::program_options::variables_map& vm,
const std::string test_name)
{
if (vm.count("detailed_bench"))
if (vm.count("hpx:detailed_bench"))
{
detailed_ = true;
}
if (vm.count("print_cdash_img_path"))
if (vm.count("hpx:print_cdash_img_path"))
{
print_cdash_img = true;
}
Expand All @@ -56,8 +56,18 @@ namespace hpx::util {
{{#result}}
name: {{name}},
executor: {{context(executor)}},
average: {{average(elapsed)}}{{^-last}}
{{/-last}}
average: {{average(elapsed)}}
{{/result}})DELIM";
}

char const* nanobench_hpx_cdash_template() noexcept
{
return R"DELIM(Results:
{{#result}}
name: {{name}},
executor: {{context(executor)}},
average: {{average(elapsed)}}
<CTestMeasurement type=\"numeric/double\" name=\"{{name}}_{{context(executor)}}\">{{average(elapsed)}}</CTestMeasurement>
{{/result}})DELIM";
}

Expand Down Expand Up @@ -227,9 +237,11 @@ average: {{average(elapsed)}}{{^-last}}
if (!detailed_ && print_cdash_img)
{
for (long unsigned int i = 0; i < detail::bench().results().size(); i++)
{
strm << "<CTestMeasurementFile type=\"image/png\" "
"name=\"perftest\">"
<< "./" << test_name_ << "_" << i << ".png</CTestMeasurementFile>\n";
}
}
}

Expand All @@ -244,6 +256,8 @@ average: {{average(elapsed)}}{{^-last}}
{
if (detailed_)
perftests_print_times(detail::nanobench_hpx_template(), std::cout);
else if (print_cdash_img)
perftests_print_times(detail::nanobench_hpx_cdash_template(), std::cout);
else
perftests_print_times(
detail::nanobench_hpx_simple_template(), std::cout);
Expand Down
50 changes: 43 additions & 7 deletions tools/perftests_plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,46 @@

sns.set_style("ticks",{'axes.grid' : True})

def classify(lower, higher):
if upper - lower > 0.1:
return '??'

if -0.01 <= lower <= 0 <= upper <= 0.01:
return '='
if -0.02 <= lower <= upper <= 0.02:
return '(=)'

# probably no change, but quite large uncertainty
if -0.05 <= lower <= 0 <= upper <= 0.05:
return '?'

# faster
if -0.01 <= lower <= 0.0:
return '(+)'
if -0.05 <= lower <= -0.01:
return '+'
if -0.1 <= lower <= -0.05:
return '++'
if lower <= -0.1:
return '+++'

# slower
if 0.01 >= upper >= 0.0:
return '(-)'
if 0.05 >= upper >= 0.01:
return '-'
if 0.1 >= upper >= 0.05:
return '--'
if upper >= 0.1:
return '---'

# no idea
return '???'

def median_statistic(sample1, sample2, axis=-1):
median1 = np.median(sample1, axis=axis)
median2 = np.median(sample2, axis=axis)
return (median2 - median1) / median1
return (median1 - median2)

rng = np.random.default_rng()

Expand Down Expand Up @@ -47,11 +83,11 @@ def median_statistic(sample1, sample2, axis=-1):
category.append("current")
samples.append(test1["series"])

data = (test2["series"], test1["series"])
data = (test2["series"], test1["series"]) / np.median(test1["series"])
res = scipy.stats.bootstrap(data, median_statistic, method='basic', random_state=rng)

mean2 = np.mean(test2["series"])
mean1 = np.mean(test1["series"])
mean2 = np.median(test2["series"])
mean1 = np.median(test1["series"])

# if n != 1:
# curr_plot = ax[i // n, i % n]
Expand All @@ -74,16 +110,16 @@ def median_statistic(sample1, sample2, axis=-1):

lower, upper = res.confidence_interval

if not (-0.02 <= lower <= 0 <= upper <= 0.02 or -0.01 <= lower <= 0.0 or 0.01 >= upper >= 0.0):
if ('=' not in classify(lower, upper)):
if header_flag:
html_file.writelines("<tr><th scope=\"row\" colspan=\"5\">{}</th></tr>".format(sys.argv[3].split('/')[-1]))
html_file.writelines("<tr><th scope=\"row\" colspan=\"4\">{}</th></tr>".format(sys.argv[3].split('/')[-1]))
header_flag = False
if flag:
html_file.writelines("<tr><th>{}</th>".format(test1["name"]))
flag = False
html_file.writelines("<td>{}</td>".format(test1["executor"].replace('<', '&lt;').replace('>', '&gt;')))
html_file.writelines("<td>{:.2f} %</td>".format(percentage_diff))
html_file.writelines("<td>{:.5f}</td>".format(abs(res.standard_error/np.mean(res.bootstrap_distribution))))
html_file.writelines("<td>{}</td>".format(classify(lower, upper)))
if not flag:
html_file.writelines("</tr>")
else:
Expand Down

0 comments on commit 17dfd82

Please sign in to comment.