Skip to content

Commit

Permalink
Add new GHA workflow for running micro benchmarks on nightly basis and
Browse files Browse the repository at this point in the history
test it.
  • Loading branch information
Kami committed Apr 20, 2021
1 parent a628574 commit 174417b
Show file tree
Hide file tree
Showing 4 changed files with 190 additions and 12 deletions.
175 changes: 175 additions & 0 deletions .github/workflows/microbenchmarks.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
# Workflow which runs micro benchmarks on nightly basis
# We run it nightly instead on every PR / merge to master since some of those benchmarks take quite a while.
name: Micro Benchmarks

on:
push:
branches:
- message_bus_compression_support
pull_request:
type: [opened, reopened, edited]
branches:
- master
schedule:
- cron: '30 3 * * *'

jobs:
# Special job which automatically cancels old runs for the same branch, prevents runs for the
# same file set which has already passed, etc.
pre_job:
name: Skip Duplicate Jobs Pre Job
runs-on: ubuntu-latest
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
- id: skip_check
uses: fkirc/skip-duplicate-actions@4c656bbdb6906310fa6213604828008bc28fe55d # v3.3.0
with:
cancel_others: 'true'
github_token: ${{ github.token }}

micro-benchmarks:
needs: pre_job
# NOTE: We always want to run job on master since we run some additional checks there (code
# coverage, etc)
if: ${{ needs.pre_job.outputs.should_skip != 'true' || github.ref == 'refs/heads/master' }}
name: '${{ matrix.name }} - Python ${{ matrix.python-version }}'
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- name: 'Microbenchmarks'
task: 'micro-benchmarks'
nosetests_node_total: 1
nosetests_node_index: 0
python-version: '3.6'
services:
mongo:
image: mongo:4.4
ports:
- 27017:27017

# In GHA, these services are started first before the code is checked out.
# We use bitnami images to facilitate reconfiguring RabbitMQ during ci-integration tests.
# We rely on custom config and SSL certs that are in the repo.
# Many images require config in env vars (which we can't change during the test job)
# or they require config in entrypoint args (which we can't override for GHA services)
# bitnami builds ways to get config files from mounted volumes.
rabbitmq:
image: bitnami/rabbitmq:3.8
volumes:
- /home/runner/rabbitmq_conf:/bitnami/conf # RABBITMQ_MOUNTED_CONF_DIR
env:
# tell bitnami/rabbitmq to enable this by default
RABBITMQ_PLUGINS: rabbitmq_management
RABBITMQ_USERNAME: guest
RABBITMQ_PASSWORD: guest

# These are strictly docker options, not entrypoint args (GHA restriction)
options: >-
--name rabbitmq
ports:
# These 6 ports are exposed by bitnami/rabbitmq (see https://www.rabbitmq.com/networking.html#ports)
# host_port:container_port/protocol
- 5671:5671/tcp # AMQP SSL port
- 5672:5672/tcp # AMQP standard port
- 15672:15672/tcp # Management: HTTP, CLI
#- 15671:15671/tcp # Management: SSL port
#- 25672:25672/tcp # inter-node or CLI
#- 4369:4369/tcp # epmd
#

env:
TASK: '${{ matrix.task }}'

NODE_TOTAL: '${{ matrix.nosetests_node_total }}'
NODE_INDEX: '${{ matrix.nosetests_node_index }}'

COLUMNS: '120'
ST2_CI: 'true'

# GitHub is juggling how to set vars for multiple shells. Protect our PATH assumptions.
PATH: /home/runner/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: 'Set up Python (${{ matrix.python-version }})'
uses: actions/setup-python@v2
with:
python-version: '${{ matrix.python-version }}'
- name: Cache Python Dependencies
uses: actions/cache@v2
with:
path: |
~/.cache/pip
virtualenv
~/virtualenv
key: ${{ runner.os }}-python-${{ matrix.python-version }}-${{ hashFiles('requirements.txt', 'test-requirements.txt') }}
restore-keys: |
${{ runner.os }}-python-${{ matrix.python }}-
- name: Cache APT Dependencies
id: cache-apt-deps
uses: actions/cache@v2
with:
path: |
~/apt_cache
key: ${{ runner.os }}-apt-v5-${{ hashFiles('scripts/github/apt-packages.txt') }}
restore-keys: |
${{ runner.os }}-apt-v5-
- name: Install APT Dependencies
env:
CACHE_HIT: ${{steps.cache-apt-deps.outputs.cache-hit}}
run: |
./scripts/github/install-apt-packages-use-cache.sh
- name: Install virtualenv
run: |
./scripts/github/install-virtualenv.sh
- name: Install requirements
run: |
./scripts/ci/install-requirements.sh
- name: Print versions
run: |
./scripts/ci/print-versions.sh
- name: Run Micro Benchmarks
timeout-minutes: 45
# use: script -e -c to print colors
run: |
make ${TASK}
- name: Codecov
# NOTE: We only generate and submit coverage report for master and version branches and only when the build succeeds (default on GitHub Actions, this was not the case on Travis so we had to explicitly check success)
if: "${{ success() && env.ENABLE_COVERAGE == 'yes' }}"
run: |
./scripts/ci/submit-codecov-coverage.sh
- name: Compress Service Logs Before upload
if: ${{ failure() }}
run: |
tar cvzpf logs.tar.gz logs/*
- name: Upload Histograms
if: ${{ failure() }}
uses: actions/upload-artifact@v2
with:
name: benchmark_histograms
path: benchmark_histograms/
retention-days: 30

slack-notification:
name: Slack notification for failed builds
if: always()
needs:
- integration-tests
runs-on: ubuntu-latest
steps:
- name: Workflow conclusion
# this step creates an environment variable WORKFLOW_CONCLUSION and is the most reliable way to check the status of previous jobs
uses: technote-space/workflow-conclusion-action@v2
- name: CI Run Failure Slack Notification
if: ${{ env.WORKFLOW_CONCLUSION == 'failure' }}
env:
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
uses: voxmedia/github-action-slack-notify-build@v1
with:
channel: development
status: FAILED
color: danger
20 changes: 11 additions & 9 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -558,15 +558,17 @@ micro-benchmarks: requirements .micro-benchmarks
@echo
@echo "==================== micro-benchmarks ===================="
@echo
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file -s -v st2common/benchmarks/micro/test_mongo_field_types.py -k "test_save_large_execution"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file -s -v st2common/benchmarks/micro/test_mongo_field_types.py -k "test_read_large_execution"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file -s -v st2common/benchmarks/micro/test_mongo_field_types.py -k "test_save_multiple_fields"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file -s -v st2common/benchmarks/micro/test_mongo_field_types.py -k "test_save_large_string_value"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file -s -v st2common/benchmarks/micro/test_mongo_field_types.py -k "test_read_large_string_value"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:dict_keys_count_and_depth -s -v st2common/benchmarks/micro/test_fast_deepcopy.py -k "test_fast_deepcopy_with_dict_values"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file -s -v st2common/benchmarks/micro/test_fast_deepcopy.py -k "test_fast_deepcopy_with_json_fixture_file"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file,param:indent_sort_keys_tuple -s -v st2common/benchmarks/micro/test_json_serialization_and_deserialization.py -k "test_json_dumps"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file -s -v st2common/benchmarks/micro/test_json_serialization_and_deserialization.py -k "test_json_loads"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-histogram=benchmark_histograms/benchmark --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file -s -v st2common/benchmarks/micro/test_mongo_field_types.py -k "test_save_large_execution"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-histogram=benchmark_histograms/benchmark --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file -s -v st2common/benchmarks/micro/test_mongo_field_types.py -k "test_read_large_execution"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-histogram=benchmark_histograms/benchmark --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file -s -v st2common/benchmarks/micro/test_mongo_field_types.py -k "test_save_multiple_fields"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-histogram=benchmark_histograms/benchmark --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file -s -v st2common/benchmarks/micro/test_mongo_field_types.py -k "test_save_large_string_value"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-histogram=benchmark_histograms/benchmark --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file -s -v st2common/benchmarks/micro/test_mongo_field_types.py -k "test_read_large_string_value"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-histogram=benchmark_histograms/benchmark --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:dict_keys_count_and_depth -s -v st2common/benchmarks/micro/test_fast_deepcopy.py -k "test_fast_deepcopy_with_dict_values"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-histogram=benchmark_histograms/benchmark --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file -s -v st2common/benchmarks/micro/test_fast_deepcopy.py -k "test_fast_deepcopy_with_json_fixture_file"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-histogram=benchmark_histograms/benchmark --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file,param:indent_sort_keys_tuple -s -v st2common/benchmarks/micro/test_json_serialization_and_deserialization.py -k "test_json_dumps"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-histogram=benchmark_histograms/benchmark --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file -s -v st2common/benchmarks/micro/test_json_serialization_and_deserialization.py -k "test_json_loads"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-histogram=benchmark_histograms/benchmark --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file -s -v st2common/benchmarks/micro/test_publisher_compression.py -k "test_pickled_object_compression"
. $(VIRTUALENV_DIR)/bin/activate; pytest --benchmark-histogram=benchmark_histograms/benchmark --benchmark-only --benchmark-name=short --benchmark-columns=min,max,mean,stddev,median,ops,rounds --benchmark-group-by=group,param:fixture_file -s -v st2common/benchmarks/micro/test_publisher_compression.py -k "test_pickled_object_compression_publish"

.PHONY: .cleanmongodb
.cleanmongodb:
Expand Down
6 changes: 3 additions & 3 deletions st2common/benchmarks/micro/test_mongo_field_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ def run_benchmark():
inserted_live_action_db = LiveAction.add_or_update(live_action_db)
return inserted_live_action_db

inserted_live_action_db = benchmark.pedantic(run_benchmark, iterations=3, rounds=3)
inserted_live_action_db = benchmark.pedantic(run_benchmark)
retrieved_live_action_db = LiveAction.get_by_id(inserted_live_action_db.id)
# Assert that result is correctly converted back to dict on retrieval
assert inserted_live_action_db.result == data
Expand Down Expand Up @@ -257,7 +257,7 @@ def run_benchmark():
inserted_live_action_db = LiveAction.add_or_update(live_action_db)
return inserted_live_action_db

inserted_live_action_db = benchmark.pedantic(run_benchmark, iterations=3, rounds=3)
inserted_live_action_db = benchmark.pedantic(run_benchmark)
retrieved_live_action_db = LiveAction.get_by_id(inserted_live_action_db.id)
# Assert that result is correctly converted back to dict on retrieval
assert inserted_live_action_db.field1 == data
Expand Down Expand Up @@ -325,7 +325,7 @@ def run_benchmark():
retrieved_live_action_db = LiveAction.get_by_id(inserted_live_action_db.id)
return retrieved_live_action_db

retrieved_live_action_db = benchmark.pedantic(run_benchmark, iterations=3, rounds=3)
retrieved_live_action_db = benchmark.pedantic(run_benchmark)
# Assert that result is correctly converted back to dict on retrieval
assert retrieved_live_action_db == inserted_live_action_db
assert retrieved_live_action_db.result == data
Expand Down
1 change: 1 addition & 0 deletions test-requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ prance==0.15.0
pip-tools>=5.4,<6.1
pytest==5.4.3
pytest-benchmark==3.2.3
pytest-benchmark[histogram]==3.2.3
# zstandard is used for micro benchmarks
zstandard==0.15.2
# ujson is used for micro benchmarks
Expand Down

0 comments on commit 174417b

Please sign in to comment.