Skip to content

Commit

Permalink
Add Service Extensions macro benchmarks
Browse files Browse the repository at this point in the history
  • Loading branch information
e-n-0 committed Jan 22, 2025
1 parent 45ce7b6 commit bd304cc
Showing 1 changed file with 85 additions and 4 deletions.
89 changes: 85 additions & 4 deletions .gitlab/macrobenchmarks.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
variables:
BENCHMARKS_CI_IMAGE: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/benchmarking-platform:go-go-prof-app
BENCHMARKS_CI_IMAGE: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/benchmarking-platform:go-go-prof-app-and-serviceextensions-0001

.benchmarks:
.benchmarks-default:
stage: macrobenchmarks
needs: []
tags: ["runner:apm-k8s-same-cpu"]
Expand Down Expand Up @@ -57,12 +57,12 @@ variables:
#

.go123-benchmarks:
extends: .benchmarks
extends: .benchmarks-default
variables:
GO_VERSION: "1.23.0"

.go122-benchmarks:
extends: .benchmarks
extends: .benchmarks-default
variables:
GO_VERSION: "1.22.5"

Expand Down Expand Up @@ -176,3 +176,84 @@ go123-profile-trace-asm:
ENABLE_PROFILING: "true"
ENABLE_APPSEC: "true"
DD_PROFILING_EXECUTION_TRACE_ENABLED: "false"

#
# Macro benchmarks for Service Extensions
# (using Envoy External Processing)
#

.benchmarks-serviceextensions:
stage: macrobenchmarks
needs: []
tags: ["runner:apm-k8s-same-cpu"]
timeout: 1h
rules:
- if: $CI_COMMIT_REF_NAME == "main"
when: always
- when: manual
# If you have a problem with Gitlab cache, see Troubleshooting section in Benchmarking Platform docs
image: $BENCHMARKS_CI_IMAGE
script:
- git clone --branch go/go-prof-app https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.ddbuild.io/DataDog/benchmarking-platform platform && cd platform
- bp-runner bp-runner.envoy_serviceextension.yml --debug
artifacts:
name: "artifacts"
when: always
paths:
- platform/artifacts-se/
expire_in: 3 months
variables:
FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY: "true" # Important tweak for stability of benchmarks
GO_VERSION: "1.23.0"
ARTIFACTS_DIR: "./artifacts-se"

# Workaround: Currently we're not running the benchmarks on every PR, but GitHub still shows them as pending.
# By marking the benchmarks as allow_failure, this should go away. (This workaround should be removed once the
# benchmarks get changed to run on every PR)
allow_failure: true

retry:
max: 2
when:
- unknown_failure
- data_integrity_failure
- runner_system_failure
- scheduler_failure
- api_failure

# Scenario with external processor, webserver without tracer
se-ext_proc-appsec:
extends: .benchmarks-serviceextensions
variables:
EXT_PROC: true
ENABLE_APPSEC: true
TRACER: false

se-ext_proc-only-tracing:
extends: .benchmarks-serviceextensions
variables:
EXT_PROC: true
ENABLE_APPSEC: false
TRACER: false

# Scenarios without external processor, webserver with tracer
se-tracer-no-ext_proc-appsec:
extends: .benchmarks-serviceextensions
variables:
EXT_PROC: false
ENABLE_APPSEC: true
TRACER: true

se-tracer-no-ext_proc-only-tracing:
extends: .benchmarks-serviceextensions
variables:
EXT_PROC: false
ENABLE_APPSEC: false
TRACER: true

# Scenario without tracer, only direct connection through envoy to the webserver
se-no-tracer-no-ext_proc:
extends: .benchmarks-serviceextensions
variables:
EXT_PROC: false
TRACER: false

0 comments on commit bd304cc

Please sign in to comment.