From d59d0f9fa41d63c8f22fdba1eb56cecfc527caaa Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Tue, 30 Apr 2024 20:42:36 +0200 Subject: [PATCH] chore(ci): add macrobenchmarks to pipeline (#9131) Enables first stage (only tracing) macrobenchmarks on gitlab CI. ## Checklist - [x] Change(s) are motivated and described in the PR description - [x] Testing strategy is described if automated tests are not included in the PR - [x] Risks are described (performance impact, potential for breakage, maintainability) - [x] Change is maintainable (easy to change, telemetry, documentation) - [x] [Library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) are followed or label `changelog/no-changelog` is set - [x] Documentation is included (in-code, generated user docs, [public corp docs](https://github.com/DataDog/documentation/)) - [x] Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) - [x] If this PR changes the public interface, I've notified `@DataDog/apm-tees`. ## Reviewer Checklist - [x] Title is accurate - [x] All changes are related to the pull request's stated goal - [x] Description motivates each change - [x] Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - [x] Testing strategy adequately addresses listed risks - [x] Change is maintainable (easy to change, telemetry, documentation) - [x] Release note makes sense to a user of the library - [x] Author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - [x] Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .gitlab-ci.yml | 2 + .gitlab/macrobenchmarks.yml | 86 +++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+) create mode 100644 .gitlab/macrobenchmarks.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a2cd2e1ff53..071dde14005 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -3,10 +3,12 @@ stages: - deploy - benchmarks - benchmarks-pr-comment + - macrobenchmarks include: - remote: https://gitlab-templates.ddbuild.io/apm/packaging.yml - local: ".gitlab/benchmarks.yml" + - local: ".gitlab/macrobenchmarks.yml" variables: DOWNSTREAM_BRANCH: diff --git a/.gitlab/macrobenchmarks.yml b/.gitlab/macrobenchmarks.yml new file mode 100644 index 00000000000..16cf2b3b9be --- /dev/null +++ b/.gitlab/macrobenchmarks.yml @@ -0,0 +1,86 @@ +variables: + BASE_CI_IMAGE: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/benchmarking-platform:dd-trace-py-macrobenchmarks + +.macrobenchmarks: + stage: macrobenchmarks + needs: [] + tags: ["runner:apm-k8s-same-cpu"] + timeout: 1h + rules: + - if: $CI_PIPELINE_SOURCE == "schedule" + when: always + - when: manual + ## Next step, enable: + # - if: $CI_COMMIT_REF_NAME == "main" + # when: always + # If you have a problem with Gitlab cache, see Troubleshooting section in Benchmarking Platform docs + image: $BENCHMARKS_CI_IMAGE + script: | + git clone --branch python/macrobenchmarks https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.ddbuild.io/DataDog/benchmarking-platform platform && cd platform + if [ "$BP_PYTHON_SCENARIO_DIR" == "flask-realworld" ]; then + bp-runner bp-runner.flask-realworld.yml --debug + else + bp-runner bp-runner.simple.yml --debug + fi + artifacts: + name: "artifacts" + when: always + paths: + - platform/artifacts/ + expire_in: 3 months + variables: + # Benchmark's env variables. Modify to tweak benchmark parameters. + DD_TRACE_DEBUG: "false" + DD_RUNTIME_METRICS_ENABLED: "true" + DD_REMOTE_CONFIGURATION_ENABLED: "false" + DD_INSTRUMENTATION_TELEMETRY_ENABLED: "false" + + K6_OPTIONS_NORMAL_OPERATION_RATE: 40 + K6_OPTIONS_NORMAL_OPERATION_DURATION: 5m + K6_OPTIONS_NORMAL_OPERATION_GRACEFUL_STOP: 1m + K6_OPTIONS_NORMAL_OPERATION_PRE_ALLOCATED_VUS: 4 + K6_OPTIONS_NORMAL_OPERATION_MAX_VUS: 4 + + K6_OPTIONS_HIGH_LOAD_RATE: 500 + K6_OPTIONS_HIGH_LOAD_DURATION: 1m + K6_OPTIONS_HIGH_LOAD_GRACEFUL_STOP: 30s + K6_OPTIONS_HIGH_LOAD_PRE_ALLOCATED_VUS: 4 + K6_OPTIONS_HIGH_LOAD_MAX_VUS: 4 + + # Gitlab and BP specific env vars. Do not modify. + FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY: "true" + + # Workaround: Currently we're not running the benchmarks on every PR, but GitHub still shows them as pending. + # By marking the benchmarks as allow_failure, this should go away. (This workaround should be removed once the + # benchmarks get changed to run on every PR) + allow_failure: true + +macrobenchmarks: + extends: .macrobenchmarks + parallel: + matrix: + - DD_BENCHMARKS_CONFIGURATION: baseline + BP_PYTHON_SCENARIO_DIR: flask-realworld + DDTRACE_INSTALL_VERSION: "git+https://github.com/Datadog/dd-trace-py@${CI_COMMIT_SHA}" + + - DD_BENCHMARKS_CONFIGURATION: only-tracing + BP_PYTHON_SCENARIO_DIR: flask-realworld + DDTRACE_INSTALL_VERSION: "git+https://github.com/Datadog/dd-trace-py@${CI_COMMIT_SHA}" + + - DD_BENCHMARKS_CONFIGURATION: only-tracing + BP_PYTHON_SCENARIO_DIR: flask-realworld + DDTRACE_INSTALL_VERSION: "git+https://github.com/Datadog/dd-trace-py@${CI_COMMIT_SHA}" + DD_REMOTE_CONFIGURATION_ENABLED: "false" + DD_INSTRUMENTATION_TELEMETRY_ENABLED: "true" + + - DD_BENCHMARKS_CONFIGURATION: only-tracing + BP_PYTHON_SCENARIO_DIR: flask-realworld + DDTRACE_INSTALL_VERSION: "git+https://github.com/Datadog/dd-trace-py@${CI_COMMIT_SHA}" + DD_REMOTE_CONFIGURATION_ENABLED: "false" + DD_INSTRUMENTATION_TELEMETRY_ENABLED: "false" + + - DD_BENCHMARKS_CONFIGURATION: only-tracing + BP_PYTHON_SCENARIO_DIR: flask-realworld + DDTRACE_INSTALL_VERSION: "git+https://github.com/Datadog/dd-trace-py@${CI_COMMIT_SHA}" + DD_REMOTE_CONFIGURATION_ENABLED: "true" + DD_INSTRUMENTATION_TELEMETRY_ENABLED: "true"