From fa9d2cb0cb62f9fd550a812279f172d373e4406b Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Sun, 23 Jun 2024 05:08:26 +0800 Subject: [PATCH 1/2] Cherry pick changes for 2.8.5 release (#13269) Co-authored-by: Zachary Hu <6426329+outsinre@users.noreply.github.com> Co-authored-by: Niklaus Schen <8458369+Water-Melon@users.noreply.github.com> Co-authored-by: Zhongwei Yao Co-authored-by: samugi Co-authored-by: Water-Melon Co-authored-by: Michael Martin Co-authored-by: Kong Team Gateway Bot <98048765+team-gateway-bot@users.noreply.github.com> Co-authored-by: Zachary Hu Co-authored-by: Qi Co-authored-by: Mayo Co-authored-by: Wangchong Zhou Co-authored-by: Harry Co-authored-by: Datong Sun Co-authored-by: Vinicius Mignot fix directory when stop_kong called (#12713) fix directory when stop_kong called (#12691) fix the workflow that comments the docker image on the commit (#12693) (#12711) fix(ci): replace "cpio" rpm extraction (#13233) (#13263) fix label-check test (#9717) --- .bazelignore | 37 + .bazelrc | 51 + .bazelversion | 1 + .ci/luacov-stats-aggregator.lua | 62 + .devcontainer/devcontainer.json | 2 +- .devcontainer/docker-compose.yml | 8 +- .github/ISSUE_TEMPLATE/bug_report.yaml | 5 +- .github/ISSUE_TEMPLATE/config.yml | 2 + .github/PULL_REQUEST_TEMPLATE.md | 12 +- .github/actions/build-cache-key/action.yml | 62 + .github/dependabot.yml | 4 +- .github/labeler.yml | 53 +- .github/matrix-commitly.yml | 24 + .github/matrix-full.yml | 209 +++ .github/stale.yml | 24 - .github/workflows/auto-assignee.yml | 5 +- .github/workflows/autodocs.yml | 24 +- .github/workflows/backport-fail-bot.yml | 37 + .github/workflows/backport.yml | 24 + .github/workflows/build.yml | 81 + .github/workflows/build_and_test.yml | 463 ++--- .github/workflows/buildifier.yml | 55 + .github/workflows/cherry-picks.yml | 41 + .github/workflows/community-stale.yml | 53 + .github/workflows/label-check.yml | 16 + .github/workflows/label-community-pr.yml | 34 + .github/workflows/label-schema.yml | 14 + .github/workflows/label.yml | 2 +- .github/workflows/package.yml | 128 -- .github/workflows/perf.yml | 280 ++- .../workflows/release-and-tests-fail-bot.yml | 47 + .github/workflows/release.yml | 694 ++++++++ .gitignore | 4 + .requirements | 6 + .travis.yml | 2 +- BUILD.bazel | 239 +++ Makefile | 80 +- WORKSPACE | 53 + bin/busted | 19 +- build/BUILD.bazel | 176 ++ build/README.md | 137 ++ build/build_system.bzl | 209 +++ build/cross_deps/BUILD.bazel | 0 build/cross_deps/libxcrypt/BUILD.bazel | 6 + .../libxcrypt/BUILD.libxcrypt.bazel | 60 + build/cross_deps/libxcrypt/repositories.bzl | 18 + build/cross_deps/libyaml/BUILD.bazel | 16 + build/cross_deps/libyaml/BUILD.libyaml.bazel | 40 + build/cross_deps/libyaml/repositories.bzl | 15 + build/cross_deps/repositories.bzl | 8 + build/cross_deps/zlib/BUILD.bazel | 16 + build/cross_deps/zlib/BUILD.zlib.bazel | 49 + build/cross_deps/zlib/repositories.bzl | 18 + build/dockerfiles/apk.Dockerfile | 56 + build/dockerfiles/deb.Dockerfile | 47 + build/dockerfiles/entrypoint.sh | 57 + build/dockerfiles/rpm.Dockerfile | 60 + build/kong_bindings.bzl | 108 ++ build/luarocks/BUILD.bazel | 21 + build/luarocks/BUILD.luarocks.bazel | 110 ++ build/luarocks/luarocks_repositories.bzl | 19 + build/luarocks/luarocks_wrap_script.lua | 40 + build/luarocks/templates/luarocks_exec.sh | 83 + build/luarocks/templates/luarocks_make.sh | 21 + build/luarocks/templates/luarocks_target.sh | 59 + build/nfpm/BUILD.bazel | 5 + build/nfpm/repositories.bzl | 55 + build/nfpm/rules.bzl | 90 + build/openresty/BUILD.bazel | 6 + build/openresty/BUILD.openresty.bazel | 262 +++ build/openresty/atc_router/BUILD.bazel | 0 build/openresty/lua-resty-lmdb-cross.patch | 51 + build/openresty/openssl/BUILD.bazel | 5 + build/openresty/openssl/README.md | 10 + build/openresty/openssl/openssl.bzl | 82 + .../openssl/openssl_repositories.bzl | 21 + ...arm64-macos-fix-vararg-call-handling.patch | 62 + ...210510_02-arm64-fix-pcall-error-case.patch | 29 + .../LuaJIT-2.1-20210510_04_pass_cc_env.patch | 40 + ...rt_Detect_SSE4.2_support_dynamically.patch | 562 ++++++ ...xed_compatibility_regression_with_Mi.patch | 30 + ...E4.1_str_hash_to_replace_hash_sparse.patch | 1145 ++++++++++++ .../lua-cjson-2.1.0.8_01-empty_array.patch | 12 + ...a-resty-core-0.1.22_01-cosocket-mtls.patch | 566 ++++++ ...ore-0.1.22_02-dyn_upstream_keepalive.patch | 230 +++ ...resty.core.shdict-compatible-with-m1.patch | 270 +++ ...sty.core.response-compatible-with-m1.patch | 101 ++ ...-resty-websocket-0.08_01-client-mtls.patch | 92 + ...am_client_certificate_and_ssl_verify.patch | 52 + ...tokens-from-special-responses-output.patch | 37 + ...x-1.19.9_03-stream_proxy_ssl_disable.patch | 33 + ...nx-1.19.9_04-grpc_authority_override.patch | 25 + ...eaders-from-ngx-header-filter-module.patch | 70 + .../patches/nginx-cross-endianness-fix.patch | 79 + build/openresty/patches/nginx-cross.patch | 214 +++ .../ngx_lua-0.10.20_01-cosocket-mtls.patch | 1554 +++++++++++++++++ ...ua-0.10.20_02-dyn_upstream_keepalive.patch | 1319 ++++++++++++++ ..._lua-0.0.10_01-expose_request_struct.patch | 26 + .../openresty-custom_prefix_and_cc.patch | 107 ++ build/openresty/pcre/BUILD.bazel | 16 + build/openresty/pcre/BUILD.pcre.bazel | 36 + build/openresty/pcre/README.md | 5 + build/openresty/pcre/pcre_repositories.bzl | 20 + build/openresty/repositories.bzl | 66 + build/package/kong.logrotate | 15 + build/package/kong.service | 25 + build/package/nfpm.yaml | 73 + build/package/postinstall.sh | 30 + build/platforms/distro/BUILD | 37 + build/repositories.bzl | 73 + build/templates/venv-commons | 65 + build/templates/venv.fish | 93 + build/templates/venv.sh | 61 + build/tests/01-base.sh | 129 ++ build/tests/02-admin-api.sh | 38 + build/tests/03-http2-admin-api.sh | 18 + build/tests/util.sh | 174 ++ build/toolchain/.gitignore | 1 + build/toolchain/BUILD | 75 + build/toolchain/cc_toolchain_config.bzl | 213 +++ build/toolchain/generate_wrappers.sh | 31 + build/toolchain/managed_toolchain.bzl | 152 ++ build/toolchain/repositories.bzl | 70 + build/toolchain/templates/wrapper | 14 + changelog/Makefile | 95 + changelog/README.md | 137 ++ changelog/create_pr | 25 + changelog/unreleased/kong/add_zlib1g-dev.yml | 2 + changelog/unreleased/kong/fix_hash.yml | 3 + changelog/verify-prs | 464 +++++ ...-2.8.4-0.rockspec => kong-2.8.5-0.rockspec | 8 +- kong/meta.lua | 2 +- scripts/autodoc | 16 +- scripts/check-labeler.pl | 40 + scripts/check_spec_files_spelling.sh | 26 + .../dependency_services/00-create-pg-db.sh | 22 + scripts/dependency_services/common.sh | 80 + .../docker-compose-test-services.yml | 48 + scripts/dependency_services/up.fish | 35 + scripts/dependency_services/up.sh | 40 + scripts/explain_manifest/.gitignore | 1 + scripts/explain_manifest/config.py | 203 +++ .../docker_image_filelist.txt | 21 + scripts/explain_manifest/expect.py | 333 ++++ scripts/explain_manifest/explain.py | 246 +++ scripts/explain_manifest/filelist.txt | 10 + .../fixtures/alpine-amd64.txt | 116 ++ .../fixtures/amazonlinux-2-amd64.txt | 135 ++ .../fixtures/amazonlinux-2023-amd64.txt | 128 ++ .../fixtures/debian-10-amd64.txt | 135 ++ .../fixtures/debian-11-amd64.txt | 126 ++ .../explain_manifest/fixtures/el7-amd64.txt | 135 ++ .../explain_manifest/fixtures/el8-amd64.txt | 135 ++ .../fixtures/ubuntu-20.04-amd64.txt | 130 ++ .../fixtures/ubuntu-22.04-amd64.txt | 123 ++ scripts/explain_manifest/main.py | 215 +++ scripts/explain_manifest/requirements.txt | 4 + scripts/explain_manifest/suites.py | 128 ++ scripts/grep-kong-version.sh | 13 + scripts/release-kong.sh | 159 ++ scripts/upgrade-tests/docker-compose.yml | 62 + scripts/validate-rockspec | 106 ++ spec/02-integration/02-cmd/03-reload_spec.lua | 4 +- spec/02-integration/02-cmd/12-hybrid_spec.lua | 9 +- .../04-admin_api/03-consumers_routes_spec.lua | 2 +- .../04-admin_api/04-plugins_routes_spec.lua | 2 +- .../04-admin_api/09-routes_routes_spec.lua | 4 +- .../04-admin_api/10-services_routes_spec.lua | 2 +- .../04-admin_api/15-off_spec.lua | 8 +- .../04-admin_api/17-foreign-entity_spec.lua | 2 +- .../04-admin_api/18-worker-events.lua | 2 +- .../04-admin_api/19-vaults_spec.lua | 2 +- .../21-truncated_arguments_spec.lua | 2 +- .../05-proxy/04-plugins_triggering_spec.lua | 18 +- .../05-proxy/09-websockets_spec.lua | 2 +- .../05-proxy/11-handler_spec.lua | 6 +- .../05-proxy/13-error_handlers_spec.lua | 2 +- .../05-proxy/25-upstream_keepalive_spec.lua | 2 +- .../02-core_entities_invalidations_spec.lua | 12 +- .../11-dbless/01-respawn_spec.lua | 2 +- .../11-dbless/02-workers_spec.lua | 2 +- .../03-plugins/09-key-auth/02-access_spec.lua | 4 +- .../19-hmac-auth/04-invalidations_spec.lua | 2 +- .../20-ldap-auth/02-invalidations_spec.lua | 2 +- .../23-rate-limiting/03-api_spec.lua | 2 +- .../04-access_spec.lua | 12 +- .../31-proxy-cache/02-access_spec.lua | 2 +- .../03-plugins/31-proxy-cache/03-api_spec.lua | 2 +- .../31-proxy-cache/04-invalidations_spec.lua | 4 +- spec/03-plugins/34-zipkin/zipkin_spec.lua | 1 + 190 files changed, 16447 insertions(+), 554 deletions(-) create mode 100644 .bazelignore create mode 100644 .bazelrc create mode 100644 .bazelversion create mode 100644 .ci/luacov-stats-aggregator.lua create mode 100644 .github/actions/build-cache-key/action.yml create mode 100644 .github/matrix-commitly.yml create mode 100644 .github/matrix-full.yml delete mode 100644 .github/stale.yml create mode 100644 .github/workflows/backport-fail-bot.yml create mode 100644 .github/workflows/backport.yml create mode 100644 .github/workflows/build.yml create mode 100644 .github/workflows/buildifier.yml create mode 100644 .github/workflows/cherry-picks.yml create mode 100644 .github/workflows/community-stale.yml create mode 100644 .github/workflows/label-check.yml create mode 100644 .github/workflows/label-community-pr.yml create mode 100644 .github/workflows/label-schema.yml delete mode 100644 .github/workflows/package.yml create mode 100644 .github/workflows/release-and-tests-fail-bot.yml create mode 100644 .github/workflows/release.yml create mode 100644 BUILD.bazel create mode 100644 WORKSPACE create mode 100644 build/BUILD.bazel create mode 100644 build/README.md create mode 100644 build/build_system.bzl create mode 100644 build/cross_deps/BUILD.bazel create mode 100644 build/cross_deps/libxcrypt/BUILD.bazel create mode 100644 build/cross_deps/libxcrypt/BUILD.libxcrypt.bazel create mode 100644 build/cross_deps/libxcrypt/repositories.bzl create mode 100644 build/cross_deps/libyaml/BUILD.bazel create mode 100644 build/cross_deps/libyaml/BUILD.libyaml.bazel create mode 100644 build/cross_deps/libyaml/repositories.bzl create mode 100644 build/cross_deps/repositories.bzl create mode 100644 build/cross_deps/zlib/BUILD.bazel create mode 100644 build/cross_deps/zlib/BUILD.zlib.bazel create mode 100644 build/cross_deps/zlib/repositories.bzl create mode 100644 build/dockerfiles/apk.Dockerfile create mode 100644 build/dockerfiles/deb.Dockerfile create mode 100755 build/dockerfiles/entrypoint.sh create mode 100644 build/dockerfiles/rpm.Dockerfile create mode 100644 build/kong_bindings.bzl create mode 100644 build/luarocks/BUILD.bazel create mode 100644 build/luarocks/BUILD.luarocks.bazel create mode 100644 build/luarocks/luarocks_repositories.bzl create mode 100644 build/luarocks/luarocks_wrap_script.lua create mode 100644 build/luarocks/templates/luarocks_exec.sh create mode 100644 build/luarocks/templates/luarocks_make.sh create mode 100644 build/luarocks/templates/luarocks_target.sh create mode 100644 build/nfpm/BUILD.bazel create mode 100644 build/nfpm/repositories.bzl create mode 100644 build/nfpm/rules.bzl create mode 100644 build/openresty/BUILD.bazel create mode 100644 build/openresty/BUILD.openresty.bazel create mode 100644 build/openresty/atc_router/BUILD.bazel create mode 100644 build/openresty/lua-resty-lmdb-cross.patch create mode 100644 build/openresty/openssl/BUILD.bazel create mode 100644 build/openresty/openssl/README.md create mode 100644 build/openresty/openssl/openssl.bzl create mode 100644 build/openresty/openssl/openssl_repositories.bzl create mode 100644 build/openresty/patches/LuaJIT-2.1-20210510_01-ffi-arm64-macos-fix-vararg-call-handling.patch create mode 100644 build/openresty/patches/LuaJIT-2.1-20210510_02-arm64-fix-pcall-error-case.patch create mode 100644 build/openresty/patches/LuaJIT-2.1-20210510_04_pass_cc_env.patch create mode 100644 build/openresty/patches/LuaJIT-2.1-20210510_05_Revert_Detect_SSE4.2_support_dynamically.patch create mode 100644 build/openresty/patches/LuaJIT-2.1-20210510_06_Revert_bugfix_fixed_compatibility_regression_with_Mi.patch create mode 100644 build/openresty/patches/LuaJIT-2.1-20210510_07_Revert_Adjust_SSE4.1_str_hash_to_replace_hash_sparse.patch create mode 100644 build/openresty/patches/lua-cjson-2.1.0.8_01-empty_array.patch create mode 100644 build/openresty/patches/lua-resty-core-0.1.22_01-cosocket-mtls.patch create mode 100644 build/openresty/patches/lua-resty-core-0.1.22_02-dyn_upstream_keepalive.patch create mode 100644 build/openresty/patches/lua-resty-core-0.1.22_03-make-resty.core.shdict-compatible-with-m1.patch create mode 100644 build/openresty/patches/lua-resty-core-0.1.22_04-make-resty.core.response-compatible-with-m1.patch create mode 100644 build/openresty/patches/lua-resty-websocket-0.08_01-client-mtls.patch create mode 100644 build/openresty/patches/nginx-1.19.9_01-upstream_client_certificate_and_ssl_verify.patch create mode 100644 build/openresty/patches/nginx-1.19.9_02-remove-server-tokens-from-special-responses-output.patch create mode 100644 build/openresty/patches/nginx-1.19.9_03-stream_proxy_ssl_disable.patch create mode 100644 build/openresty/patches/nginx-1.19.9_04-grpc_authority_override.patch create mode 100644 build/openresty/patches/nginx-1.19.9_05-remove-server-headers-from-ngx-header-filter-module.patch create mode 100644 build/openresty/patches/nginx-cross-endianness-fix.patch create mode 100644 build/openresty/patches/nginx-cross.patch create mode 100644 build/openresty/patches/ngx_lua-0.10.20_01-cosocket-mtls.patch create mode 100644 build/openresty/patches/ngx_lua-0.10.20_02-dyn_upstream_keepalive.patch create mode 100644 build/openresty/patches/ngx_stream_lua-0.0.10_01-expose_request_struct.patch create mode 100644 build/openresty/patches/openresty-custom_prefix_and_cc.patch create mode 100644 build/openresty/pcre/BUILD.bazel create mode 100644 build/openresty/pcre/BUILD.pcre.bazel create mode 100644 build/openresty/pcre/README.md create mode 100644 build/openresty/pcre/pcre_repositories.bzl create mode 100644 build/openresty/repositories.bzl create mode 100644 build/package/kong.logrotate create mode 100644 build/package/kong.service create mode 100644 build/package/nfpm.yaml create mode 100644 build/package/postinstall.sh create mode 100644 build/platforms/distro/BUILD create mode 100644 build/repositories.bzl create mode 100644 build/templates/venv-commons create mode 100644 build/templates/venv.fish create mode 100644 build/templates/venv.sh create mode 100755 build/tests/01-base.sh create mode 100755 build/tests/02-admin-api.sh create mode 100755 build/tests/03-http2-admin-api.sh create mode 100755 build/tests/util.sh create mode 100644 build/toolchain/.gitignore create mode 100644 build/toolchain/BUILD create mode 100644 build/toolchain/cc_toolchain_config.bzl create mode 100755 build/toolchain/generate_wrappers.sh create mode 100644 build/toolchain/managed_toolchain.bzl create mode 100644 build/toolchain/repositories.bzl create mode 100644 build/toolchain/templates/wrapper create mode 100644 changelog/Makefile create mode 100644 changelog/README.md create mode 100644 changelog/create_pr create mode 100644 changelog/unreleased/kong/add_zlib1g-dev.yml create mode 100644 changelog/unreleased/kong/fix_hash.yml create mode 100755 changelog/verify-prs rename kong-2.8.4-0.rockspec => kong-2.8.5-0.rockspec (99%) create mode 100755 scripts/check-labeler.pl create mode 100755 scripts/check_spec_files_spelling.sh create mode 100755 scripts/dependency_services/00-create-pg-db.sh create mode 100644 scripts/dependency_services/common.sh create mode 100644 scripts/dependency_services/docker-compose-test-services.yml create mode 100755 scripts/dependency_services/up.fish create mode 100755 scripts/dependency_services/up.sh create mode 100644 scripts/explain_manifest/.gitignore create mode 100644 scripts/explain_manifest/config.py create mode 100644 scripts/explain_manifest/docker_image_filelist.txt create mode 100644 scripts/explain_manifest/expect.py create mode 100644 scripts/explain_manifest/explain.py create mode 100644 scripts/explain_manifest/filelist.txt create mode 100644 scripts/explain_manifest/fixtures/alpine-amd64.txt create mode 100644 scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt create mode 100644 scripts/explain_manifest/fixtures/amazonlinux-2023-amd64.txt create mode 100644 scripts/explain_manifest/fixtures/debian-10-amd64.txt create mode 100644 scripts/explain_manifest/fixtures/debian-11-amd64.txt create mode 100644 scripts/explain_manifest/fixtures/el7-amd64.txt create mode 100644 scripts/explain_manifest/fixtures/el8-amd64.txt create mode 100644 scripts/explain_manifest/fixtures/ubuntu-20.04-amd64.txt create mode 100644 scripts/explain_manifest/fixtures/ubuntu-22.04-amd64.txt create mode 100755 scripts/explain_manifest/main.py create mode 100644 scripts/explain_manifest/requirements.txt create mode 100644 scripts/explain_manifest/suites.py create mode 100755 scripts/grep-kong-version.sh create mode 100755 scripts/release-kong.sh create mode 100644 scripts/upgrade-tests/docker-compose.yml create mode 100755 scripts/validate-rockspec diff --git a/.bazelignore b/.bazelignore new file mode 100644 index 00000000000..d40366529ee --- /dev/null +++ b/.bazelignore @@ -0,0 +1,37 @@ +# NB: sematics here are not the same as .gitignore +# see https://github.com/bazelbuild/bazel/issues/8106 +# Ignore backup files. +*~ +# Ignore Vim swap files. +.*.swp +# Ignore files generated by IDEs. +/.aswb/ +/.cache/ +/.classpath +/.clwb/ +/.factorypath +/.idea/ +/.ijwb/ +/.project +/.settings +/.vscode/ +/bazel.iml +# Ignore all bazel-* symlinks. There is no full list since this can change +# based on the name of the directory bazel is cloned into. +/bazel-* +# Ignore outputs generated during Bazel bootstrapping. +/output/ +# Ignore jekyll build output. +/production +/.sass-cache +# Bazelisk version file +.bazelversion +# User-specific .bazelrc +user.bazelrc + +/t/ +/spec/ +/spec-ee/ +/servroot/ +/autodoc/ +/.github/ diff --git a/.bazelrc b/.bazelrc new file mode 100644 index 00000000000..50990b60936 --- /dev/null +++ b/.bazelrc @@ -0,0 +1,51 @@ +# Bazel doesn't need more than 200MB of memory for local build based on memory profiling: +# https://docs.bazel.build/versions/master/skylark/performance.html#memory-profiling +# The default JVM max heapsize is 1/4 of physical memory up to 32GB which could be large +# enough to consume all memory constrained by cgroup in large host. +# Limiting JVM heapsize here to let it do GC more when approaching the limit to +# leave room for compiler/linker. +# The number 3G is chosen heuristically to both support large VM and small VM with RBE. +# Startup options cannot be selected via config. +startup --host_jvm_args=-Xmx512m + +run --color=yes + +common --color=yes +common --curses=auto + +build --experimental_ui_max_stdouterr_bytes=10485760 + +build --show_progress_rate_limit=0 +build --show_timestamps +build --worker_verbose + +build --incompatible_strict_action_env + +# Enable --platforms API based cpu,compiler,crosstool_top selection; remove this in 7.0.0 as it's enabled by default +build --incompatible_enable_cc_toolchain_resolution + +# Pass PATH, CC, CXX variables from the environment. +build --action_env=CC --host_action_env=CC +build --action_env=CXX --host_action_env=CXX +build --action_env=PATH --host_action_env=PATH + +build --action_env=BAZEL_BUILD=1 + +# temporary fix for https://github.com/bazelbuild/bazel/issues/12905 on macOS +build --features=-debug_prefix_map_pwd_is_dot + +# Build flags. +build --action_env=BUILD_NAME=kong-dev +build --action_env=INSTALL_DESTDIR=MANAGED +build --strip=never + +# Release flags +build:release --//:debug=false +build:release --action_env=BUILD_NAME=kong-dev +build:release --action_env=INSTALL_DESTDIR=/usr/local +build:release --copt="-g" +build:release --strip=never + +build --spawn_strategy=local + +build --action_env=GITHUB_TOKEN --host_action_env=GITHUB_TOKEN diff --git a/.bazelversion b/.bazelversion new file mode 100644 index 00000000000..dfda3e0b4f0 --- /dev/null +++ b/.bazelversion @@ -0,0 +1 @@ +6.1.0 diff --git a/.ci/luacov-stats-aggregator.lua b/.ci/luacov-stats-aggregator.lua new file mode 100644 index 00000000000..f64e4f9a779 --- /dev/null +++ b/.ci/luacov-stats-aggregator.lua @@ -0,0 +1,62 @@ +-- Aggregates stats from multiple luacov stat files. +-- Example stats for a 12 lines file `my/file.lua` +-- that received hits on lines 3, 4, 9: +-- +-- ["my/file.lua"] = { +-- [3] = 1, +-- [4] = 3, +-- [9] = 2, +-- max = 12, +-- max_hits = 3 +-- } +-- + +local luacov_stats = require "luacov.stats" +local luacov_reporter = require "luacov.reporter" +local luacov_runner = require "luacov.runner" +local lfs = require "lfs" + + +-- load parameters +local params = {...} +local stats_folders_prefix = params[1] or "luacov-stats-out-" +local file_name = params[2] or "luacov.stats.out" +local strip_prefix = params[3] or "" +local base_path = "." + + +-- load stats from different folders named using the format: +-- luacov-stats-out-${timestamp} +local loaded_stats = {} +for folder in lfs.dir(base_path) do + if folder:find(stats_folders_prefix, 1, true) then + local stats_file = folder .. "/" .. file_name + local loaded = luacov_stats.load(stats_file) + if loaded then + loaded_stats[#loaded_stats + 1] = loaded + print("loading file: " .. stats_file) + end + end +end + + +-- aggregate +luacov_runner.load_config() +for _, stat_data in ipairs(loaded_stats) do + -- make all paths relative to ensure file keys have the same format + -- and avoid having separate counters for the same file + local rel_stat_data = {} + for f_name, data in pairs(stat_data) do + if f_name:sub(0, #strip_prefix) == strip_prefix then + f_name = f_name:sub(#strip_prefix + 1) + end + rel_stat_data[f_name] = data + end + + luacov_runner.data = rel_stat_data + luacov_runner.save_stats() +end + + +-- generate report +luacov_reporter.report() diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 50512dad137..9f7cd2201e2 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -16,7 +16,7 @@ // Use 'forwardPorts' to make a list of ports inside the container available locally. "forwardPorts": [8000, 8001, "db:5432"], - "postCreateCommand": "make dev", + "postCreateCommand": "make venv-dev", // Set *default* container specific settings.json values on container create. // "settings": {}, diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 0b69da3395d..48e238d3f9c 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -19,8 +19,8 @@ services: - ..:/workspace:cached # Uncomment the next line to use Docker from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker-compose for details. - - /var/run/docker.sock:/var/run/docker.sock - + - /var/run/docker.sock:/var/run/docker.sock + # Uncomment the next four lines if you will use a ptrace-based debugger like C++, Go, and Rust. cap_add: - SYS_PTRACE @@ -37,12 +37,12 @@ services: CRYPTO_DIR: /usr/local/kong # Overrides default command so things don't shut down after the process ends. - command: /bin/sh -c "while sleep 1000; do :; done" + command: /bin/sh -c "while sleep 1000; do :; done" # Runs app on the same network as the service container, allows "forwardPorts" in devcontainer.json function. network_mode: service:db - # Use "forwardPorts" in **devcontainer.json** to forward an app port locally. + # Use "forwardPorts" in **devcontainer.json** to forward an app port locally. # (Adding the "ports" property to this file will not forward from a Codespace.) # Uncomment the next line to use a non-root user for all processes - See https://aka.ms/vscode-remote/containers/non-root for details. diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index fd76c149b9a..969c944eb5f 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -4,7 +4,7 @@ body: - type: checkboxes attributes: label: Is there an existing issue for this? - description: Please search to see if an issue already exists for the bug you encountered. Make sure you upgrade to the latest version of Kong. + description: Please search to see if an issue already exists for the bug you encountered. Make sure you are also using the latest version of Kong. options: - label: I have searched the existing issues required: true @@ -12,7 +12,7 @@ body: attributes: label: Kong version (`$ kong version`) description: 'example: Kong 2.5' - placeholder: 'Please put the Kong Gateway version here.' + placeholder: 'Please provide the current Kong Gateway version you are using here.' validations: required: true - type: textarea @@ -40,7 +40,6 @@ body: 2. With this config... 3. Run '...' 4. See error... - render: markdown validations: required: false - type: textarea diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 3dc15d9267c..87022f12a3b 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,5 +1,7 @@ blank_issues_enabled: true contact_links: + - name: Kong Gateway Open Source Community Pledge + url: https://github.com/Kong/kong/blob/master/COMMUNITY_PLEDGE.md - name: Feature Request url: https://github.com/Kong/kong/discussions/categories/ideas about: Propose your cool ideas and feature requests at the Kong discussion forum diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d83e17e39ad..ba036d07043 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -2,17 +2,25 @@ NOTE: Please read the CONTRIBUTING.md guidelines before submitting your patch, and ensure you followed them all: https://github.com/Kong/kong/blob/master/CONTRIBUTING.md#contributing + +Refer to the Kong Gateway Community Pledge to understand how we work +with the open source community: +https://github.com/Kong/kong/blob/master/COMMUNITY_PLEDGE.md --> ### Summary +### Checklist + +- [ ] The Pull Request has tests +- [ ] A changelog file has been created under `changelog/unreleased/kong` or `skip-changelog` label added on PR if changelog is unnecessary. [README.md](https://github.com/Kong/gateway-changelog/README.md) +- [ ] There is a user-facing docs PR against https://github.com/Kong/docs.konghq.com - PUT DOCS PR HERE + ### Full changelog * [Implement ...] -* [Add related tests] -* ... ### Issue reference diff --git a/.github/actions/build-cache-key/action.yml b/.github/actions/build-cache-key/action.yml new file mode 100644 index 00000000000..3edde92a342 --- /dev/null +++ b/.github/actions/build-cache-key/action.yml @@ -0,0 +1,62 @@ +name: Build Cache Key + +description: > + Generates a cache key suitable for save/restore of Kong builds. + +inputs: + prefix: + description: 'String prefix applied to the build cache key' + required: false + default: 'build' + extra: + description: 'Additional values/file hashes to use in the cache key' + required: false + +outputs: + cache-key: + description: 'The generated cache key' + value: ${{ steps.cache-key.outputs.CACHE_KEY }} + +runs: + using: composite + steps: + - name: Generate cache key + id: cache-key + shell: bash + env: + PREFIX: ${{ inputs.prefix }} + EXTRA: ${{ inputs.extra }} + run: | + # please keep these sorted + FILE_HASHES=( + ${{ hashFiles('.bazelignore') }} + ${{ hashFiles('.bazelrc') }} + ${{ hashFiles('.bazelversion') }} + ${{ hashFiles('.github/actions/build-cache-key/**') }} + ${{ hashFiles('.github/workflows/build.yml') }} + ${{ hashFiles('.requirements') }} + ${{ hashFiles('BUILD.bazel') }} + ${{ hashFiles('WORKSPACE') }} + ${{ hashFiles('bin/kong') }} + ${{ hashFiles('bin/kong-health') }} + ${{ hashFiles('build/**') }} + ${{ hashFiles('kong-*.rockspec') }} + ${{ hashFiles('kong.conf.default') }} + ) + + if [[ -n ${EXTRA:-} ]]; then + readarray \ + -O "${#FILE_HASHES[@]}" \ + -t \ + FILE_HASHES \ + <<< "$EXTRA" + fi + + HASH=$(printf '%s\n' "${FILE_HASHES[@]}" \ + | grep -vE '^$' \ + | sort --stable --unique \ + | sha256sum - \ + | awk '{print $1}' + ) + + echo "CACHE_KEY=${PREFIX}::${HASH}" | tee -a $GITHUB_OUTPUT diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 5737055179c..dfd0e308618 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -6,5 +6,5 @@ updates: - package-ecosystem: "github-actions" directory: "/" schedule: - # Check for updates to GitHub Actions every weekday - interval: "daily" + # Check for updates to GitHub Actions every week + interval: "weekly" diff --git a/.github/labeler.yml b/.github/labeler.yml index f401408e9cf..7f6b10e3093 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -20,10 +20,14 @@ core/db/migrations: core/db: - any: ['kong/db/**/*', '!kong/db/migrations/**/*'] +changelog: +- CHANGELOG.md + core/docs: -- kong/autodoc/**/* -- ./**/*.md -- ./*.md +- any: ['**/*.md', '!CHANGELOG.md'] + +autodoc: +- 'autodoc/**/*' core/language/go: - kong/runloop/plugin_servers/* @@ -49,6 +53,10 @@ core/router: core/templates: - kong/templates/* +core/tracing: +- kong/tracing/**/* +- kong/pdk/tracing.lua + chore: - .github/**/* - .devcontainer/**/* @@ -107,21 +115,12 @@ plugins/key-auth: plugins/ldap-auth: - kong/plugins/ldap-auth/**/* -plugins/log-serializers: -- kong/plugins/log-serializers/**/* - plugins/loggly: - kong/plugins/loggly/**/* plugins/oauth2: - kong/plugins/oauth2/**/* -plugins/post-function: -- kong/plugins/post-function/**/* - -plugins/pre-function: -- kong/plugins/pre-function/**/* - plugins/prometheus: - kong/plugins/prometheus/**/* @@ -149,6 +148,10 @@ plugins/response-transformer: plugins/session: - kong/plugins/session/**/* +plugins/serverless-functions: +- kong/plugins/post-function/**/* +- kong/plugins/pre-function/**/* + plugins/statsd: - kong/plugins/statsd/**/* @@ -164,4 +167,30 @@ plugins/udp-log: plugins/zipkin: - kong/plugins/zipkin/**/* +plugins/opentelemetry: +- kong/plugins/opentelemetry/**/* + +schema-change-noteworthy: +- kong/db/schema/**/*.lua +- kong/**/schema.lua +- kong/plugins/**/daos.lua +- plugins-ee/**/daos.lua +- plugins-ee/**/schema.lua +- kong/db/dao/*.lua +- kong/enterprise_edition/redis/init.lua + +build/bazel: +- '**/*.bazel' +- '**/*.bzl' +- build/**/* +- WORKSPACE +- .bazelignore +- .bazelrc +- .bazelversion +- scripts/build-*.sh + +plugins/base_plugin.lua: +- kong/plugins/base_plugin.lua/**/* +plugins/log-serializers: +- kong/plugins/log-serializers/**/* diff --git a/.github/matrix-commitly.yml b/.github/matrix-commitly.yml new file mode 100644 index 00000000000..7685340597c --- /dev/null +++ b/.github/matrix-commitly.yml @@ -0,0 +1,24 @@ +# please see matrix-full.yml for meaning of each field +build-packages: +- label: ubuntu-22.04 + os: ubuntu-22.04 + package: deb + check-manifest-suite: ubuntu-22.04-amd64 + +build-images: +- label: ubuntu + base-image: ubuntu:22.04 + package: deb + artifact-from: ubuntu-22.04 + +smoke-tests: +- label: ubuntu + +scan-vulnerabilities: +- label: ubuntu + +release-packages: + +release-images: +- label: ubuntu + package: deb diff --git a/.github/matrix-full.yml b/.github/matrix-full.yml new file mode 100644 index 00000000000..8c957abb5f9 --- /dev/null +++ b/.github/matrix-full.yml @@ -0,0 +1,209 @@ +build-packages: +# label: used to distinguish artifacts for later use +# image: docker image name if the build is running in side a container +# package: package type +# package-type: the nfpm packaging target, //:kong_{package} target; only used when package is rpm +# bazel-args: additional bazel build flags +# check-manifest-suite: the check manifest suite as defined in scripts/explain_manifest/config.py + +# Ubuntu +- label: ubuntu-20.04 + image: ubuntu:20.04 + package: deb + check-manifest-suite: ubuntu-20.04-amd64 +- label: ubuntu-22.04 + package: deb + check-manifest-suite: ubuntu-22.04-amd64 + +# Debian +- label: debian-10 + image: debian:10 + package: deb + check-manifest-suite: debian-10-amd64 +- label: debian-11 + image: debian:11 + package: deb + check-manifest-suite: debian-11-amd64 + +# Alpine +- label: alpine + os: vars.RELEASE_RUNS_ON + package: apk + bazel-args: --platforms=//:alpine-crossbuild-x86_64 + check-manifest-suite: alpine-amd64 + +# CentOS +- label: centos-7 + os: vars.RELEASE_RUNS_ON + image: centos:7 + package: rpm + package-type: el7 + check-manifest-suite: el7-amd64 + +# RHEL +- label: rhel-7 + image: centos:7 + package: rpm + package-type: el7 + check-manifest-suite: el7-amd64 +- label: rhel-8 + image: rockylinux:8 + package: rpm + package-type: el8 + check-manifest-suite: el8-amd64 + + # Amazon Linux +- label: amazonlinux-2 + image: amazonlinux:2 + package: rpm + package-type: aws2 + check-manifest-suite: amazonlinux-2-amd64 +- label: amazonlinux-2023 + image: amazonlinux:2023 + package: rpm + package-type: aws2023 + check-manifest-suite: amazonlinux-2023-amd64 + +build-images: +# Only build images for the latest version of each major release. + +# label: used as compose docker image label ${github.sha}-${label} +# base-image: docker image to use as base +# package: package type +# artifact-from: label of build-packages to use +# artifact-from-alt: another label of build-packages to use for downloading package (to build multi-arch image) +# docker-platforms: comma separated list of docker buildx platforms to build for + +# Ubuntu +- label: ubuntu + base-image: ubuntu:22.04 + package: deb + artifact-from: ubuntu-22.04 + docker-platforms: linux/amd64 + +# Centos +- label: centos7 + base-image: centos:7 + package: rpm + package-distro: el7 + artifact-from: centos-7 + +- label: rhel7 + base-image: centos:7 + package: rpm + package-distro: el7 + artifact-from: rhel-7 + +# Alpine +- label: alpine + base-image: alpine:3.16 + package: apk + artifact-from: alpine +# Debian +- label: debian + base-image: debian:11-slim + package: deb + artifact-from: debian-11 + +# RHEL +- label: rhel + base-image: centos:7 + package: rpm + rpm_platform: el7 + artifact-from: rhel-7 + docker-platforms: linux/amd64 + +smoke-tests: +- label: ubuntu +- label: debian +- label: rhel +- label: alpine + +scan-vulnerabilities: +- label: ubuntu +- label: debian +- label: rhel +- label: alpine + +release-packages: +# Ubuntu +- label: ubuntu-20.04 + package: deb + artifact-from: ubuntu-20.04 + artifact-version: 20.04 + artifact-type: ubuntu + artifact: kong.amd64.deb +- label: ubuntu-22.04 + package: deb + artifact-from: ubuntu-22.04 + artifact-version: 22.04 + artifact-type: ubuntu + artifact: kong.amd64.deb + +# Debian +- label: debian-10 + package: deb + artifact-from: debian-10 + artifact-version: 10 + artifact-type: debian + artifact: kong.amd64.deb +- label: debian-11 + package: deb + artifact-from: debian-11 + artifact-version: 11 + artifact-type: debian + artifact: kong.amd64.deb + +# CentOS +- label: centos-7 + package: rpm + artifact-from: centos-7 + artifact-version: 7 + artifact-type: centos + artifact: kong.el7.amd64.rpm + +# RHEL +- label: rhel-7 + package: rpm + artifact-from: rhel-7 + artifact-version: 7 + artifact-type: rhel + artifact: kong.el7.amd64.rpm +- label: rhel-8 + package: rpm + artifact-from: rhel-8 + artifact-version: 8 + artifact-type: rhel + artifact: kong.el8.amd64.rpm + +# Amazon Linux +- label: amazonlinux-2 + package: rpm + artifact-from: amazonlinux-2 + artifact-version: 2 + artifact-type: amazonlinux + artifact: kong.aws2.amd64.rpm +- label: amazonlinux-2023 + package: rpm + artifact-from: amazonlinux-2023 + artifact-version: 2023 + artifact-type: amazonlinux + artifact: kong.aws2023.amd64.rpm + +# Alpine +- label: alpine + package: apk + artifact-from: alpine + artifact-type: alpine + artifact: kong.amd64.apk.tar.gz + +release-images: +- label: centos7 + package: rpm +- label: rhel7 + package: rpm +- label: alpine + package: apk +- label: ubuntu +- label: debian +- label: rhel diff --git a/.github/stale.yml b/.github/stale.yml deleted file mode 100644 index db9908d6de1..00000000000 --- a/.github/stale.yml +++ /dev/null @@ -1,24 +0,0 @@ -# Configuration for probot-stale - https://github.com/probot/stale - -# Number of days of inactivity before an Issue or Pull Request becomes stale -daysUntilStale: 14 - -# Number of days of inactivity before an Issue or Pull Request with the stale label is closed. -daysUntilClose: 7 - -onlyLabels: - - "pending author feedback" - -# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable -exemptLabels: - - pinned - - security - -# Label to use when marking as stale -staleLabel: stale - -# Comment to post when marking as stale. Set to `false` to disable -markComment: > - This issue has been automatically marked as stale because it has not had - recent activity. It will be closed if no further activity occurs. Thank you - for your contributions. diff --git a/.github/workflows/auto-assignee.yml b/.github/workflows/auto-assignee.yml index 12fa2933c44..dcd8f1c4c34 100644 --- a/.github/workflows/auto-assignee.yml +++ b/.github/workflows/auto-assignee.yml @@ -8,5 +8,8 @@ jobs: assign-author: runs-on: ubuntu-latest steps: - - uses: toshimaru/auto-author-assign@2daaeb2988aef24bf37e636fe733f365c046aba0 + - name: assign-author + # ignore the pull requests opened from PR because token is not correct + if: github.event.pull_request.head.repo.full_name == github.repository && github.actor != 'dependabot[bot]' + uses: toshimaru/auto-author-assign@c1ffd6f64e20f8f5f61f4620a1e5f0b0908790ef diff --git a/.github/workflows/autodocs.yml b/.github/workflows/autodocs.yml index 6a511ebfc3d..453a7d6c0b9 100644 --- a/.github/workflows/autodocs.yml +++ b/.github/workflows/autodocs.yml @@ -19,7 +19,7 @@ on: jobs: build: name: Build dependencies - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 env: DOWNLOAD_ROOT: $HOME/download-root @@ -32,10 +32,10 @@ jobs: echo "LD_LIBRARY_PATH=$INSTALL_ROOT/openssl/lib:$LD_LIBRARY_PATH" >> $GITHUB_ENV - name: Checkout Kong source code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Lookup build cache - uses: actions/cache@v2 + uses: actions/cache@v3 id: cache-deps with: path: ${{ env.INSTALL_ROOT }} @@ -43,7 +43,7 @@ jobs: - name: Checkout kong-build-tools if: steps.cache-deps.outputs.cache-hit != 'true' || github.event.inputs.force_build == 'true' - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: repository: Kong/kong-build-tools path: kong-build-tools @@ -51,7 +51,7 @@ jobs: - name: Checkout go-pluginserver if: steps.cache-deps.outputs.cache-hit != 'true' || github.event.inputs.force_build == 'true' - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: repository: Kong/go-pluginserver path: go-pluginserver @@ -68,9 +68,9 @@ jobs: if: steps.cache-deps.outputs.cache-hit != 'true' || github.event.inputs.force_build == 'true' run: | source .ci/setup_env_github.sh - make dev + make venv-dev autodoc: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 needs: [build] steps: - name: Set environment variables @@ -80,13 +80,13 @@ jobs: echo "LD_LIBRARY_PATH=$INSTALL_ROOT/openssl/lib:$LD_LIBRARY_PATH" >> $GITHUB_ENV - name: Checkout Kong source code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: path: kong ref: ${{ github.event.inputs.source_branch }} - name: Checkout Kong Docs - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: repository: kong/docs.konghq.com path: docs.konghq.com @@ -94,7 +94,7 @@ jobs: ref: ${{ github.event.inputs.target_branch }} - name: Lookup build cache - uses: actions/cache@v2 + uses: actions/cache@v3 id: cache-deps with: path: ${{ env.INSTALL_ROOT }} @@ -114,7 +114,7 @@ jobs: run: | cd kong output="$(git branch --show-current)" - echo "::set-output name=name::$output" + echo "name=$output" >> $GITHUB_OUTPUT - name: Show Docs status run: | @@ -123,7 +123,7 @@ jobs: git checkout -b "autodocs-${{ steps.kong-branch.outputs.name }}" - name: Commit autodoc changes - uses: stefanzweifel/git-auto-commit-action@v4 + uses: stefanzweifel/git-auto-commit-action@v5 with: repository: "./docs.konghq.com" commit_message: "Autodocs update" diff --git a/.github/workflows/backport-fail-bot.yml b/.github/workflows/backport-fail-bot.yml new file mode 100644 index 00000000000..f8393da0352 --- /dev/null +++ b/.github/workflows/backport-fail-bot.yml @@ -0,0 +1,37 @@ +name: Forward failed backport alert to Slack + +on: + issue_comment: + types: [created] + +jobs: + check_comment: + runs-on: ubuntu-latest + if: github.event.issue.pull_request != null && contains(github.event.comment.body, 'To backport manually, run these commands in your terminal') + steps: + - name: Generate Slack Payload + id: generate-payload + uses: actions/github-script@v7 + with: + script: | + const slack_mapping = JSON.parse(process.env.SLACK_MAPPING); + const pr_url = "${{ github.event.issue.pull_request.html_url}}"; + const pr_author_github_id = "${{ github.event.issue.user.login }}" + const pr_author_slack_id = slack_mapping[pr_author_github_id]; + const author = (pr_author_slack_id ? `<@${pr_author_slack_id}>` : pr_author_github_id); + const payload = { + text: `Backport failed in PR: ${pr_url}. Please check it ${author}.`, + channel: process.env.SLACK_CHANNEL, + }; + return JSON.stringify(payload); + result-encoding: string + env: + SLACK_CHANNEL: gateway-notifications + SLACK_MAPPING: "${{ vars.GH_ID_2_SLACK_ID_MAPPING }}" + + - name: Send Slack Message + uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + with: + payload: ${{ steps.generate-payload.outputs.result }} + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_GATEWAY_NOTIFICATIONS_WEBHOOK }} diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml new file mode 100644 index 00000000000..7cc4b9c134a --- /dev/null +++ b/.github/workflows/backport.yml @@ -0,0 +1,24 @@ +name: Backport +on: + pull_request_target: + types: + - closed + - labeled + +jobs: + backport: + name: Backport + runs-on: ubuntu-latest + if: > + github.event.pull_request.merged + && ( + github.event.action == 'closed' + || ( + github.event.action == 'labeled' + && contains(github.event.label.name, 'backport') + ) + ) + steps: + - uses: tibdex/backport@9565281eda0731b1d20c4025c43339fb0a23812e # v2.0.4 + with: + github_token: ${{ secrets.PAT }} diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 00000000000..456d7280913 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,81 @@ +name: Build +on: + workflow_call: + inputs: + relative-build-root: + required: true + type: string + outputs: + cache-key: + description: 'Computed cache key, used for restoring cache in other workflows' + value: ${{ jobs.build.outputs.cache-key }} + +env: + BUILD_ROOT: ${{ github.workspace }}/${{ inputs.relative-build-root }} + +jobs: + build: + name: Build dependencies + runs-on: ubuntu-22.04 + + outputs: + cache-key: ${{ steps.cache-key.outputs.cache-key }} + + steps: + - name: Checkout Kong source code + uses: actions/checkout@v4 + + - name: Generate cache key + id: cache-key + uses: ./.github/actions/build-cache-key + + - name: Lookup build cache + id: cache-deps + uses: actions/cache@v3 + with: + path: ${{ env.BUILD_ROOT }} + key: ${{ steps.cache-key.outputs.cache-key }} + + - name: Install packages + if: steps.cache-deps.outputs.cache-hit != 'true' + run: sudo apt update && sudo apt install libyaml-dev valgrind libprotobuf-dev + + - name: Build Kong + if: steps.cache-deps.outputs.cache-hit != 'true' + env: + GH_TOKEN: ${{ github.token }} + run: | + make build-kong + chmod +rw -R "$BUILD_ROOT/kong-dev" + + - name: Update PATH + run: | + echo "$BUILD_ROOT/kong-dev/bin" >> $GITHUB_PATH + echo "$BUILD_ROOT/kong-dev/openresty/nginx/sbin" >> $GITHUB_PATH + echo "$BUILD_ROOT/kong-dev/openresty/bin" >> $GITHUB_PATH + + - name: Debug (nginx) + run: | + echo nginx: $(which nginx) + nginx -V 2>&1 | sed -re 's/ --/\n--/g' + ldd $(which nginx) + + - name: Debug (luarocks) + run: | + echo luarocks: $(which luarocks) + luarocks --version + luarocks config + + - name: Bazel Outputs + uses: actions/upload-artifact@v3 + if: failure() + with: + name: bazel-outputs + path: | + bazel-out/_tmp/actions + retention-days: 3 + + - name: Build Dev Kong dependencies + if: steps.cache-deps.outputs.cache-hit != 'true' + run: | + make install-dev-rocks diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 1a7137a62eb..614d26d8c48 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -1,68 +1,50 @@ name: Build & Test -on: [push, pull_request] +on: + pull_request: + paths-ignore: + # ignore markdown files (CHANGELOG.md, README.md, etc.) + - '**/*.md' + - '.github/workflows/release.yml' + - 'changelog/**' + - 'kong.conf.default' + push: + paths-ignore: + # ignore markdown files (CHANGELOG.md, README.md, etc.) + - '**/*.md' + # ignore PRs for the generated COPYRIGHT file + - 'COPYRIGHT' + branches: + - master + - release/* + - test-please/* + workflow_dispatch: + inputs: + coverage: + description: 'Coverage enabled' + required: false + type: boolean + default: false + +# cancel previous runs if new commits are pushed to the PR, but run for each commit on master +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +env: + BUILD_ROOT: ${{ github.workspace }}/bazel-bin/build + KONG_TEST_COVERAGE: ${{ inputs.coverage == true || github.event_name == 'schedule' }} jobs: build: - name: Build dependencies - runs-on: ubuntu-20.04 - - env: - DOWNLOAD_ROOT: $HOME/download-root - - steps: - - name: Set environment variables - run: | - echo "INSTALL_ROOT=$HOME/install-root" >> $GITHUB_ENV - echo "DOWNLOAD_ROOT=$HOME/download-root" >> $GITHUB_ENV - echo "LD_LIBRARY_PATH=$INSTALL_ROOT/openssl/lib:$LD_LIBRARY_PATH" >> $GITHUB_ENV - - name: Checkout Kong source code - uses: actions/checkout@v2 - - - name: Lookup build cache - uses: actions/cache@v2 - id: cache-deps - with: - path: ${{ env.INSTALL_ROOT }} - key: ${{ hashFiles('.ci/setup_env_github.sh') }}-${{ hashFiles('.github/workflows/build_and_test.yml') }}-${{ hashFiles('.requirements') }}-${{ hashFiles('kong-*.rockspec') }} - - - name: Checkout kong-build-tools - if: steps.cache-deps.outputs.cache-hit != 'true' - uses: actions/checkout@v2 - with: - repository: Kong/kong-build-tools - path: kong-build-tools - ref: master - - - name: Checkout go-pluginserver - if: steps.cache-deps.outputs.cache-hit != 'true' - uses: actions/checkout@v2 - with: - repository: Kong/go-pluginserver - path: go-pluginserver - - - name: Add to Path - if: steps.cache-deps.outputs.cache-hit != 'true' - run: echo "$INSTALL_ROOT/openssl/bin:$INSTALL_ROOT/openresty/nginx/sbin:$INSTALL_ROOT/openresty/bin:$INSTALL_ROOT/luarocks/bin:$GITHUB_WORKSPACE/kong-build-tools/openresty-build-tools" >> $GITHUB_PATH - - - name: Install packages - if: steps.cache-deps.outputs.cache-hit != 'true' - run: sudo apt update && sudo apt install libyaml-dev valgrind libprotobuf-dev - - - name: Build Kong dependencies - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - source .ci/setup_env_github.sh - make dev + uses: ./.github/workflows/build.yml + with: + relative-build-root: bazel-bin/build lint-doc-and-unit-tests: name: Lint, Doc and Unit tests - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 needs: build - env: - KONG_TEST_PG_DATABASE: kong - KONG_TEST_PG_USER: kong - services: postgres: image: postgres:13 @@ -75,62 +57,78 @@ jobs: options: --health-cmd pg_isready --health-interval 5s --health-timeout 5s --health-retries 8 steps: - - name: Set environment variables - run: | - echo "INSTALL_ROOT=$HOME/install-root" >> $GITHUB_ENV - echo "DOWNLOAD_ROOT=$HOME/download-root" >> $GITHUB_ENV - echo "LD_LIBRARY_PATH=$INSTALL_ROOT/openssl/lib:$LD_LIBRARY_PATH" >> $GITHUB_ENV - - name: Checkout Kong source code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Lookup build cache - uses: actions/cache@v2 id: cache-deps + uses: actions/cache@v3 with: - path: ${{ env.INSTALL_ROOT }} - key: ${{ hashFiles('.ci/setup_env_github.sh') }}-${{ hashFiles('.github/workflows/build_and_test.yml') }}-${{ hashFiles('.requirements') }}-${{ hashFiles('kong-*.rockspec') }} + path: ${{ env.BUILD_ROOT }} + key: ${{ needs.build.outputs.cache-key }} - - name: Add to Path - run: echo "$INSTALL_ROOT/openssl/bin:$INSTALL_ROOT/openresty/nginx/sbin:$INSTALL_ROOT/openresty/bin:$INSTALL_ROOT/luarocks/bin:$GITHUB_WORKSPACE/kong-build-tools/openresty-build-tools" >> $GITHUB_PATH + - name: Check test-helpers doc generation + run: | + source ${{ env.BUILD_ROOT }}/kong-dev-venv.sh + pushd ./spec && ldoc . - name: Check autodoc generation run: | - eval `luarocks path` + source ${{ env.BUILD_ROOT }}/kong-dev-venv.sh scripts/autodoc - - name: Check Admin API definition generation + - name: Lint Lua code run: | - eval `luarocks path` - scripts/gen-admin-api-def.sh + source ${{ env.BUILD_ROOT }}/kong-dev-venv.sh + make lint - - name: Lint Lua code + - name: Validate rockspec file run: | - eval `luarocks path` - luacheck -q . + source ${{ env.BUILD_ROOT }}/kong-dev-venv.sh + scripts/validate-rockspec + + - name: Check spec file misspelling + run: | + scripts/check_spec_files_spelling.sh + + - name: Check labeler configuration + run: scripts/check-labeler.pl .github/labeler.yml - name: Unit tests + env: + KONG_TEST_PG_DATABASE: kong + KONG_TEST_PG_USER: kong run: | - eval `luarocks path` - make dev - bin/busted -v -o htest spec/01-unit + source ${{ env.BUILD_ROOT }}/kong-dev-venv.sh + TEST_CMD="bin/busted -v -o htest spec/01-unit" + if [[ $KONG_TEST_COVERAGE = true ]]; then + TEST_CMD="$TEST_CMD --coverage" + fi + $TEST_CMD + + - name: Archive coverage stats file + uses: actions/upload-artifact@v3 + if: ${{ always() && (inputs.coverage == true || github.event_name == 'schedule') }} + with: + name: luacov-stats-out-${{ github.job }}-${{ github.run_id }} + retention-days: 1 + path: | + luacov.stats.out + + - name: Get kernel message + if: failure() + run: | + sudo dmesg -T integration-tests-postgres: name: Postgres ${{ matrix.suite }} - ${{ matrix.split }} tests - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 needs: build - strategy: + fail-fast: false matrix: suite: [integration, plugins] - split: [first (01-04), second (>= 05)] - - env: - KONG_TEST_PG_DATABASE: kong - KONG_TEST_PG_USER: kong - KONG_TEST_DATABASE: postgres - TEST_SUITE: ${{ matrix.suite }} - TEST_SPLIT: ${{ matrix.split }} + split: [first, second] services: postgres: @@ -144,7 +142,7 @@ jobs: options: --health-cmd pg_isready --health-interval 5s --health-timeout 5s --health-retries 8 grpcbin: - image: moul/grpcbin + image: kong/grpcbin ports: - 15002:9000 - 15003:9001 @@ -153,210 +151,237 @@ jobs: image: redis ports: - 6379:6379 - options: --entrypoint redis-server + - 6380:6380 + options: >- + --name kong_redis zipkin: - image: openzipkin/zipkin:2.19 + image: openzipkin/zipkin:2 ports: - 9411:9411 steps: - - name: Set environment variables - run: | - echo "INSTALL_ROOT=$HOME/install-root" >> $GITHUB_ENV - echo "DOWNLOAD_ROOT=$HOME/download-root" >> $GITHUB_ENV - echo "LD_LIBRARY_PATH=$INSTALL_ROOT/openssl/lib:$LD_LIBRARY_PATH" >> $GITHUB_ENV - - name: Checkout Kong source code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Lookup build cache - uses: actions/cache@v2 id: cache-deps + uses: actions/cache@v3 with: - path: ${{ env.INSTALL_ROOT }} - key: ${{ hashFiles('.ci/setup_env_github.sh') }}-${{ hashFiles('.github/workflows/build_and_test.yml') }}-${{ hashFiles('.requirements') }}-${{ hashFiles('kong-*.rockspec') }} - - - name: Add to Path - run: echo "$INSTALL_ROOT/openssl/bin:$INSTALL_ROOT/openresty/nginx/sbin:$INSTALL_ROOT/openresty/bin:$INSTALL_ROOT/luarocks/bin:$GITHUB_WORKSPACE/kong-build-tools/openresty-build-tools:$INSTALL_ROOT/go-pluginserver" >> $GITHUB_PATH + path: ${{ env.BUILD_ROOT }} + key: ${{ needs.build.outputs.cache-key }} - name: Add gRPC test host names run: | echo "127.0.0.1 grpcs_1.test" | sudo tee -a /etc/hosts echo "127.0.0.1 grpcs_2.test" | sudo tee -a /etc/hosts - - name: Tests + - name: Install AWS SAM cli tool + if: ${{ matrix.suite == 'plugins' }} run: | - eval `luarocks path` - make dev - .ci/run_tests.sh - - integration-tests-dbless: - name: DB-less integration tests - runs-on: ubuntu-20.04 - needs: build - - env: - KONG_TEST_PG_DATABASE: kong - KONG_TEST_PG_USER: kong - KONG_TEST_DATABASE: 'off' - TEST_SUITE: dbless + curl -L -s -o /tmp/aws-sam-cli.zip https://github.com/aws/aws-sam-cli/releases/latest/download/aws-sam-cli-linux-x86_64.zip + unzip -o /tmp/aws-sam-cli.zip -d /tmp/aws-sam-cli + sudo /tmp/aws-sam-cli/install --update - services: - grpcbin: - image: moul/grpcbin - ports: - - 15002:9000 - - 15003:9001 - - steps: - - name: Set environment variables + - name: Update PATH run: | - echo "INSTALL_ROOT=$HOME/install-root" >> $GITHUB_ENV - echo "DOWNLOAD_ROOT=$HOME/download-root" >> $GITHUB_ENV - echo "LD_LIBRARY_PATH=$INSTALL_ROOT/openssl/lib:$LD_LIBRARY_PATH" >> $GITHUB_ENV - - - name: Checkout Kong source code - uses: actions/checkout@v2 + echo "$BUILD_ROOT/kong-dev/bin" >> $GITHUB_PATH + echo "$BUILD_ROOT/kong-dev/openresty/nginx/sbin" >> $GITHUB_PATH + echo "$BUILD_ROOT/kong-dev/openresty/bin" >> $GITHUB_PATH - - name: Lookup build cache - uses: actions/cache@v2 - id: cache-deps - with: - path: ${{ env.INSTALL_ROOT }} - key: ${{ hashFiles('.ci/setup_env_github.sh') }}-${{ hashFiles('.github/workflows/build_and_test.yml') }}-${{ hashFiles('.requirements') }}-${{ hashFiles('kong-*.rockspec') }} + - name: Debug (nginx) + run: | + echo nginx: $(which nginx) + nginx -V 2>&1 | sed -re 's/ --/\n--/g' + ldd $(which nginx) - - name: Add to Path - run: echo "$INSTALL_ROOT/openssl/bin:$INSTALL_ROOT/openresty/nginx/sbin:$INSTALL_ROOT/openresty/bin:$INSTALL_ROOT/luarocks/bin:$GITHUB_WORKSPACE/kong-build-tools/openresty-build-tools:$INSTALL_ROOT/go-pluginserver" >> $GITHUB_PATH + - name: Debug (luarocks) + run: | + echo luarocks: $(which luarocks) + luarocks --version + luarocks config - - name: Add gRPC test host names + - name: Tune up postgres max_connections run: | - echo "127.0.0.1 grpcs_1.test" | sudo tee -a /etc/hosts - echo "127.0.0.1 grpcs_2.test" | sudo tee -a /etc/hosts + # arm64 runners may use more connections due to more worker cores + psql -hlocalhost -Ukong kong -tAc 'alter system set max_connections = 5000;' - name: Tests + env: + KONG_TEST_PG_DATABASE: kong + KONG_TEST_PG_USER: kong + KONG_TEST_DATABASE: postgres + KONG_SPEC_TEST_GRPCBIN_PORT: "15002" + KONG_SPEC_TEST_GRPCBIN_SSL_PORT: "15003" + KONG_SPEC_TEST_OTELCOL_FILE_EXPORTER_PATH: ${{ github.workspace }}/tmp/otel/file_exporter.json + TEST_SUITE: ${{ matrix.suite }} + TEST_SPLIT: ${{ matrix.split }} run: | - eval `luarocks path` - make dev + make venv-dev # required to install other dependencies like bin/grpcurl + source ${{ env.BUILD_ROOT }}/kong-dev-venv.sh .ci/run_tests.sh - integration-tests-cassandra: - name: C* ${{ matrix.cassandra_version }} ${{ matrix.suite }} - ${{ matrix.split }} tests - runs-on: ubuntu-20.04 - needs: build + - name: Archive coverage stats file + uses: actions/upload-artifact@v3 + if: ${{ always() && (inputs.coverage == true || github.event_name == 'schedule') }} + with: + name: luacov-stats-out-${{ github.job }}-${{ github.run_id }}-${{ matrix.suite }}-${{ contains(matrix.split, 'first') && '1' || '2' }} + retention-days: 1 + path: | + luacov.stats.out - strategy: - matrix: - suite: [integration, plugins] - cassandra_version: [3] - split: [first (01-04), second (>= 05)] + - name: Get kernel message + if: failure() + run: | + sudo dmesg -T - env: - KONG_TEST_DATABASE: cassandra - TEST_SUITE: ${{ matrix.suite }} - TEST_SPLIT: ${{ matrix.split }} + integration-tests-dbless: + name: DB-less integration tests + runs-on: ubuntu-22.04 + needs: build services: - cassandra: - image: cassandra:${{ matrix.cassandra_version }} - ports: - - 7199:7199 - - 7000:7000 - - 9160:9160 - - 9042:9042 - options: --health-cmd "cqlsh -e 'describe cluster'" --health-interval 5s --health-timeout 5s --health-retries 8 - grpcbin: - image: moul/grpcbin + image: kong/grpcbin ports: - 15002:9000 - 15003:9001 - redis: - image: redis - ports: - - 6379:6379 - options: --entrypoint redis-server - - zipkin: - image: openzipkin/zipkin:2.19 - ports: - - 9411:9411 - steps: - - name: Set environment variables - run: | - echo "INSTALL_ROOT=$HOME/install-root" >> $GITHUB_ENV - echo "DOWNLOAD_ROOT=$HOME/download-root" >> $GITHUB_ENV - echo "LD_LIBRARY_PATH=$INSTALL_ROOT/openssl/lib:$LD_LIBRARY_PATH" >> $GITHUB_ENV - - name: Checkout Kong source code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Lookup build cache - uses: actions/cache@v2 id: cache-deps + uses: actions/cache@v3 with: - path: ${{ env.INSTALL_ROOT }} - key: ${{ hashFiles('.ci/setup_env_github.sh') }}-${{ hashFiles('.github/workflows/build_and_test.yml') }}-${{ hashFiles('.requirements') }}-${{ hashFiles('kong-*.rockspec') }} - - - name: Add to Path - run: echo "$INSTALL_ROOT/openssl/bin:$INSTALL_ROOT/openresty/nginx/sbin:$INSTALL_ROOT/openresty/bin:$INSTALL_ROOT/luarocks/bin:$GITHUB_WORKSPACE/kong-build-tools/openresty-build-tools:$INSTALL_ROOT/go-pluginserver" >> $GITHUB_PATH + path: ${{ env.BUILD_ROOT }} + key: ${{ needs.build.outputs.cache-key }} - name: Add gRPC test host names run: | echo "127.0.0.1 grpcs_1.test" | sudo tee -a /etc/hosts echo "127.0.0.1 grpcs_2.test" | sudo tee -a /etc/hosts + - name: Run OpenTelemetry Collector + run: | + mkdir -p ${{ github.workspace }}/tmp/otel + touch ${{ github.workspace }}/tmp/otel/file_exporter.json + sudo chmod 777 -R ${{ github.workspace }}/tmp/otel + docker run -p 4317:4317 -p 4318:4318 -p 55679:55679 \ + -v ${{ github.workspace }}/spec/fixtures/opentelemetry/otelcol.yaml:/etc/otel-collector-config.yaml \ + -v ${{ github.workspace }}/tmp/otel:/etc/otel \ + --name opentelemetry-collector -d \ + otel/opentelemetry-collector-contrib:0.52.0 \ + --config=/etc/otel-collector-config.yaml + sleep 2 + docker logs opentelemetry-collector + - name: Tests + env: + KONG_TEST_PG_DATABASE: kong + KONG_TEST_PG_USER: kong + KONG_TEST_DATABASE: 'off' + KONG_SPEC_TEST_GRPCBIN_PORT: "15002" + KONG_SPEC_TEST_GRPCBIN_SSL_PORT: "15003" + KONG_SPEC_TEST_OTELCOL_FILE_EXPORTER_PATH: ${{ github.workspace }}/tmp/otel/file_exporter.json + TEST_SUITE: dbless run: | - eval `luarocks path` - make dev + make venv-dev # required to install other dependencies like bin/grpcurl + source ${{ env.BUILD_ROOT }}/kong-dev-venv.sh .ci/run_tests.sh + - name: Archive coverage stats file + uses: actions/upload-artifact@v3 + if: ${{ always() && (inputs.coverage == true || github.event_name == 'schedule') }} + with: + name: luacov-stats-out-${{ github.job }}-${{ github.run_id }} + retention-days: 1 + path: | + luacov.stats.out + + - name: Get kernel message + if: failure() + run: | + sudo dmesg -T + pdk-tests: name: PDK tests - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 needs: build - env: - TEST_SUITE: pdk - steps: - - name: Set environment variables - run: | - echo "INSTALL_ROOT=$HOME/install-root" >> $GITHUB_ENV - echo "DOWNLOAD_ROOT=$HOME/download-root" >> $GITHUB_ENV - echo "LD_LIBRARY_PATH=$INSTALL_ROOT/openssl/lib:$LD_LIBRARY_PATH" >> $GITHUB_ENV - - name: Checkout Kong source code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Lookup build cache - uses: actions/cache@v2 id: cache-deps + uses: actions/cache@v3 with: - path: ${{ env.INSTALL_ROOT }} - key: ${{ hashFiles('.ci/setup_env_github.sh') }}-${{ hashFiles('.github/workflows/build_and_test.yml') }}-${{ hashFiles('.requirements') }}-${{ hashFiles('kong-*.rockspec') }} - - - name: Add to Path - run: echo "$INSTALL_ROOT/openssl/bin:$INSTALL_ROOT/openresty/nginx/sbin:$INSTALL_ROOT/openresty/bin:$INSTALL_ROOT/luarocks/bin:$GITHUB_WORKSPACE/kong-build-tools/openresty-build-tools:$DOWNLOAD_ROOT/cpanm" >> $GITHUB_PATH + path: ${{ env.BUILD_ROOT }} + key: ${{ needs.build.outputs.cache-key }} - name: Install Test::Nginx run: | - CPAN_DOWNLOAD=$DOWNLOAD_ROOT/cpanm + CPAN_DOWNLOAD=./cpanm mkdir -p $CPAN_DOWNLOAD curl -o $CPAN_DOWNLOAD/cpanm https://cpanmin.us chmod +x $CPAN_DOWNLOAD/cpanm echo "Installing CPAN dependencies..." - cpanm --notest --local-lib=$HOME/perl5 local::lib && eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) - cpanm --notest Test::Nginx + $CPAN_DOWNLOAD/cpanm --notest --local-lib=$HOME/perl5 local::lib && eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) + $CPAN_DOWNLOAD/cpanm --notest Test::Nginx - name: Tests + env: + TEST_SUITE: pdk run: | - eval `luarocks path` - make dev - + source ${{ env.BUILD_ROOT }}/kong-dev-venv.sh + if [[ $KONG_TEST_COVERAGE = true ]]; then + export PDK_LUACOV=1 + fi eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) .ci/run_tests.sh + + - name: Archive coverage stats file + uses: actions/upload-artifact@v3 + if: ${{ always() && (inputs.coverage == true || github.event_name == 'schedule') }} + with: + name: luacov-stats-out-${{ github.job }}-${{ github.run_id }} + retention-days: 1 + path: | + luacov.stats.out + + - name: Get kernel message + if: failure() + run: | + sudo dmesg -T + + aggregator: + needs: [lint-doc-and-unit-tests,pdk-tests,integration-tests-postgres,integration-tests-dbless] + name: Luacov stats aggregator + if: ${{ always() && (inputs.coverage == true || github.event_name == 'schedule') }} + runs-on: ubuntu-22.04 + + steps: + - name: Checkout source code + uses: actions/checkout@v4 + + - name: Install requirements + run: | + sudo apt-get update && sudo apt-get install -y luarocks + sudo luarocks install luacov + sudo luarocks install luafilesystem + + # Download all archived coverage stats files + - uses: actions/download-artifact@v3 + + - name: Stats aggregation + shell: bash + run: | + lua .ci/luacov-stats-aggregator.lua "luacov-stats-out-" "luacov.stats.out" ${{ github.workspace }}/ + # The following prints a report with each file sorted by coverage percentage, and the total coverage + printf "\n\nCoverage File\n\n" + awk -v RS='Coverage\n-+\n' 'NR>1{print $0}' luacov.report.out | grep -vE "^-|^$" > summary.out + cat summary.out | grep -v "^Total" | awk '{printf "%7d%% %s\n", $4, $1}' | sort -n + cat summary.out | grep "^Total" | awk '{printf "%7d%% %s\n", $4, $1}' diff --git a/.github/workflows/buildifier.yml b/.github/workflows/buildifier.yml new file mode 100644 index 00000000000..85d3aaab0c2 --- /dev/null +++ b/.github/workflows/buildifier.yml @@ -0,0 +1,55 @@ +name: Buildifier + +on: + pull_request: + paths: + - '**/*.bzl' + - '**/*.bazel' + - 'BUILD*' + - 'WORKSPACE*' + push: + paths: + - '**/*.bzl' + - '**/*.bazel' + - 'BUILD*' + - 'WORKSPACE*' + branches: + - master + - release/* + +jobs: + + autoformat: + name: Auto-format and Check + runs-on: ubuntu-22.04 + + steps: + - name: Check out code + uses: actions/checkout@v4 + + - name: Install Dependencies + run: | + sudo wget -O /bin/buildifier https://github.com/bazelbuild/buildtools/releases/download/5.1.0/buildifier-linux-amd64 + sudo chmod +x /bin/buildifier + + - name: Run buildifier + run: | + buildifier -mode=fix $(find . -name 'BUILD*' -o -name 'WORKSPACE*' -o -name '*.bzl' -o -name '*.bazel' -type f) + + - name: Verify buildifier + shell: bash + run: | + # From: https://backreference.org/2009/12/23/how-to-match-newlines-in-sed/ + # This is to leverage this workaround: + # https://github.com/actions/toolkit/issues/193#issuecomment-605394935 + function urlencode() { + sed ':begin;$!N;s/\n/%0A/;tbegin' + } + if [[ $(git diff-index --name-only HEAD --) ]]; then + for x in $(git diff-index --name-only HEAD --); do + echo "::error file=$x::Please run buildifier.%0A$(git diff $x | urlencode)" + done + echo "${{ github.repository }} is out of style. Please run buildifier." + exit 1 + fi + echo "${{ github.repository }} is formatted correctly." diff --git a/.github/workflows/cherry-picks.yml b/.github/workflows/cherry-picks.yml new file mode 100644 index 00000000000..6383c1d5fd6 --- /dev/null +++ b/.github/workflows/cherry-picks.yml @@ -0,0 +1,41 @@ +name: Cherry Pick to remote repository +on: + pull_request_target: + types: [closed, labeled] + issue_comment: + types: [created] +jobs: + cross-repo-cherrypick: + name: Cherry pick to remote repository + runs-on: ubuntu-latest + # Only run when pull request is merged, or labeled + # or when a comment containing `/cherry-pick` is created + # and the author is a member, collaborator or owner + if: > + ( + github.event_name == 'pull_request_target' && + github.event.pull_request.merged + ) || ( + github.event_name == 'issue_comment' && + github.event.issue.pull_request && + contains(fromJSON('["MEMBER", "COLLABORATOR", "OWNER"]'), github.event.comment.author_association) && + contains(github.event.comment.body, '/cherry-pick') + ) + steps: + - uses: actions/checkout@v4 + with: + token: ${{ secrets.CHERRY_PICK_TOKEN }} + - name: Create backport pull requests + uses: jschmid1/cross-repo-cherrypick-action@2366f50fd85e8966aa024a4dd6fbf70e7019d7e1 + with: + token: ${{ secrets.CHERRY_PICK_TOKEN }} + pull_title: '[cherry-pick -> ${target_branch}] ${pull_title}' + merge_commits: 'skip' + trigger_label: 'cherry-pick kong-ee' # trigger based on this label + pull_description: |- + Automated cherry-pick to `${target_branch}`, triggered by a label in https://github.com/${owner}/${repo}/pull/${pull_number} :robot:. + upstream_repo: 'kong/kong-ee' + branch_map: |- + { + "master": "master" + } diff --git a/.github/workflows/community-stale.yml b/.github/workflows/community-stale.yml new file mode 100644 index 00000000000..395aa82978e --- /dev/null +++ b/.github/workflows/community-stale.yml @@ -0,0 +1,53 @@ +name: Close inactive issues +on: + schedule: + - cron: "30 1 * * *" + +jobs: + close-issues: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/stale@v8 + with: + days-before-stale: 14 + days-before-close: 7 + only-labels: "pending author feedback" + exempt-pr-labels: "pinned,security" + exempt-issue-labels: "pinned,security" + stale-issue-label: "stale" + stale-issue-message: "This issue is marked as stale because it has been open for 14 days with no activity." + close-issue-message: | + Dear contributor, + + We are automatically closing this issue because it has not seen any activity for three weeks. + We're sorry that your issue could not be resolved. If any new information comes up that could + help resolving it, please feel free to reopen it. + + Your contribution is greatly appreciated! + + Please have a look + [our pledge to the community](https://github.com/Kong/kong/blob/master/COMMUNITY_PLEDGE.md) + for more information. + + Sincerely, + Your Kong Gateway team + stale-pr-message: "This PR is marked as stale because it has been open for 14 days with no activity." + close-pr-message: | + Dear contributor, + + We are automatically closing this pull request because it has not seen any activity for three weeks. + We're sorry that we could not merge it. If you still want to pursure your patch, please feel free to + reopen it and address any remaining issues. + + Your contribution is greatly appreciated! + + Please have a look + [our pledge to the community](https://github.com/Kong/kong/blob/master/COMMUNITY_PLEDGE.md) + for more information. + + Sincerely, + Your Kong Gateway team + repo-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/label-check.yml b/.github/workflows/label-check.yml new file mode 100644 index 00000000000..bfa8b67a798 --- /dev/null +++ b/.github/workflows/label-check.yml @@ -0,0 +1,16 @@ +name: Pull Request Label Checker +on: + pull_request: + types: [opened, edited, synchronize, labeled, unlabeled] +jobs: + check-labels: + name: prevent merge labels + runs-on: ubuntu-latest + + steps: + - name: do-not-merge label found + run: echo "do-not-merge label found, this PR will not be merged"; exit 1 + if: ${{ contains(github.event.*.labels.*.name, 'pr/do not merge') || contains(github.event.*.labels.*.name, 'DO NOT MERGE') }} + - name: backport master label found + run: echo "Please do not backport into master, instead, create a PR targeting master and backport from it instead."; exit 1 + if: ${{ contains(github.event.*.labels.*.name, 'backport master') }} diff --git a/.github/workflows/label-community-pr.yml b/.github/workflows/label-community-pr.yml new file mode 100644 index 00000000000..b1eb9d1fdda --- /dev/null +++ b/.github/workflows/label-community-pr.yml @@ -0,0 +1,34 @@ +name: Label community PRs + +on: + schedule: + - cron: '*/30 * * * *' + +permissions: + pull-requests: write + +jobs: + check_author: + runs-on: ubuntu-latest + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@v4 + - name: Label Community PR + env: + GH_TOKEN: ${{ secrets.COMMUNITY_PRS_TOKEN }} + LABEL: "author/community" + BOTS: "team-gateway-bot app/dependabot" + run: | + set +e + for id in `gh pr list -S 'draft:false' -s 'open'|awk '{print $1}'` + do + name=`gh pr view $id --json author -q '.author.login'` + ret=`gh api orgs/Kong/members --paginate -q '.[].login'|grep "^${name}$"` + if [[ -z $ret && ! "${BOTS[@]}" =~ $name ]]; then + gh pr edit $id --add-label "${{ env.LABEL }}" + else + gh pr edit $id --remove-label "${{ env.LABEL }}" + fi + done diff --git a/.github/workflows/label-schema.yml b/.github/workflows/label-schema.yml new file mode 100644 index 00000000000..38af629d9aa --- /dev/null +++ b/.github/workflows/label-schema.yml @@ -0,0 +1,14 @@ +name: Pull Request Schema Labeler +on: + pull_request: + types: [opened, edited, synchronize, labeled, unlabeled] +jobs: + schema-change-labels: + if: "${{ contains(github.event.*.labels.*.name, 'schema-change-noteworthy') }}" + runs-on: ubuntu-latest + steps: + - name: Schema change label found + uses: rtCamp/action-slack-notify@v2 + continue-on-error: true + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_SCHEMA_CHANGE }} diff --git a/.github/workflows/label.yml b/.github/workflows/label.yml index d102b8c96e4..4613569074b 100644 --- a/.github/workflows/label.yml +++ b/.github/workflows/label.yml @@ -17,6 +17,6 @@ jobs: pull-requests: write steps: - - uses: actions/labeler@v3.0.2 + - uses: actions/labeler@v4 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/package.yml b/.github/workflows/package.yml deleted file mode 100644 index 7a75989bf67..00000000000 --- a/.github/workflows/package.yml +++ /dev/null @@ -1,128 +0,0 @@ -name: Package & Smoke Test - -on: # yamllint disable-line rule:truthy - pull_request: - push: - branches: - - master - - next/* - - release/* - -env: - DOCKER_REPOSITORY: kong/kong-build-tools - -jobs: - package-and-test: - if: github.event_name == 'pull_request' - name: Build & Smoke Test Packages - runs-on: ubuntu-22.04 - - steps: - - name: Swap git with https - run: git config --global url."https://github".insteadOf git://github - - - name: Setup some environment variables - run: | - echo "KONG_SOURCE_LOCATION=$GITHUB_WORKSPACE/kong-src" >> $GITHUB_ENV - echo "KONG_BUILD_TOOLS_LOCATION=$GITHUB_WORKSPACE/kong-build-tools" >> $GITHUB_ENV - - - name: Checkout Kong source code - uses: actions/checkout@v3 - with: - path: ${{ env.KONG_SOURCE_LOCATION }} - submodules: recursive - token: ${{ secrets.GHA_KONG_BOT_READ_TOKEN }} - - - name: Setup kong-build-tools - run: | - pushd ${{ env.KONG_SOURCE_LOCATION }} - make setup-kong-build-tools - - - name: Setup package naming environment variables - run: | - grep -v '^#' ${{ env.KONG_SOURCE_LOCATION}}/.requirements >> $GITHUB_ENV - - - name: Package & Test - env: - GITHUB_TOKEN: ${{ secrets.GHA_KONG_BOT_READ_TOKEN }} - run: | - pushd ${{ env.KONG_SOURCE_LOCATION }} - make package/test/deb - - package-test-and-unofficial-release: - if: github.event_name == 'push' - name: Build & Smoke & Unofficial Release Packages - runs-on: ubuntu-22.04 - strategy: - matrix: - package_type: [deb, rpm, apk] - - steps: - - name: Login to Docker Hub - uses: docker/login-action@v2 - with: - username: ${{ secrets.GHA_DOCKERHUB_PUSH_USER }} - password: ${{ secrets.GHA_KONG_ORG_DOCKERHUB_PUSH_TOKEN }} - - - name: Swap git with https - run: git config --global url."https://github".insteadOf git://github - - - name: Setup directory environment variables - run: | - echo "KONG_SOURCE_LOCATION=$GITHUB_WORKSPACE/kong-src" >> $GITHUB_ENV - echo "KONG_BUILD_TOOLS_LOCATION=$GITHUB_WORKSPACE/kong-build-tools" >> $GITHUB_ENV - - - name: Checkout Kong source code - uses: actions/checkout@v3 - with: - path: ${{ env.KONG_SOURCE_LOCATION }} - submodules: recursive - token: ${{ secrets.GHA_KONG_BOT_READ_TOKEN }} - - - name: Setup kong-build-tools - run: | - pushd ${{ env.KONG_SOURCE_LOCATION }} - make setup-kong-build-tools - - - name: Setup package naming environment variables - run: | - grep -v '^#' ${{ env.KONG_SOURCE_LOCATION}}/.requirements >> $GITHUB_ENV - echo "DOCKER_RELEASE_REPOSITORY=kong/kong" >> $GITHUB_ENV - echo "KONG_TEST_CONTAINER_TAG=${GITHUB_REF_NAME##*/}-${{ matrix.package_type }}" >> $GITHUB_ENV - if [[ ${{matrix.package_type }} == "apk" ]]; then - echo "ADDITIONAL_TAG_LIST=${GITHUB_REF_NAME##*/}-alpine" >> $GITHUB_ENV - fi - if [[ ${{matrix.package_type }} == "deb" ]]; then - echo "ADDITIONAL_TAG_LIST=${GITHUB_REF_NAME##*/}-debian ${GITHUB_REF_NAME##*/} $GITHUB_SHA" >> $GITHUB_ENV - fi - - - name: Package & Test - env: - GITHUB_TOKEN: ${{ secrets.GHA_KONG_BOT_READ_TOKEN }} - run: | - pushd ${{ env.KONG_SOURCE_LOCATION }} - make package/test/${{ matrix.package_type }} - - - name: Push Docker Image - continue-on-error: true - env: - SKIP_TESTS: true - run: | - pushd ${{ env.KONG_SOURCE_LOCATION }} - make release/docker/${{ matrix.package_type }} - - - name: Store the package artifacts - continue-on-error: true - uses: actions/upload-artifact@v3 - with: - name: ${{ matrix.package_type }} - path: ${{ env.KONG_BUILD_TOOLS_LOCATION }}/output/* - - - name: Comment on commit - continue-on-error: true - uses: peter-evans/commit-comment@v2 - with: - token: ${{ secrets.GHA_COMMENT_TOKEN }} - body: | - Docker image avaialble ${{ env.DOCKER_RELEASE_REPOSITORY }}:${{ env.KONG_TEST_CONTAINER_TAG }} - Artifacts availabe https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} diff --git a/.github/workflows/perf.yml b/.github/workflows/perf.yml index f681199a397..f30b1b95a13 100644 --- a/.github/workflows/perf.yml +++ b/.github/workflows/perf.yml @@ -2,18 +2,91 @@ name: Performance Test on: pull_request: - issue_comment: schedule: # don't know the timezone but it's daily at least - cron: '0 7 * * *' env: - terraform_version: '1.1.2' + terraform_version: '1.2.4' + HAS_ACCESS_TO_GITHUB_TOKEN: ${{ github.event_name != 'pull_request' || (github.event.pull_request.head.repo.full_name == github.repository && github.actor != 'dependabot[bot]') }} + BUILD_ROOT: ${{ github.workspace }}/bazel-bin/build + + # only for pr + GHA_CACHE: ${{ github.event_name == 'pull_request' }} jobs: + build-packages: + name: Build dependencies + runs-on: ubuntu-22.04 + if: | + github.event_name == 'schedule' || + (github.event_name == 'pull_request' && startsWith(github.event.pull_request.title, 'perf(')) || + (github.event_name == 'issue_comment' && github.event.action == 'created' && + github.event.issue.pull_request && + contains('["OWNER", "COLLABORATOR", "MEMBER"]', github.event.comment.author_association) && + (startsWith(github.event.comment.body, '/perf') || startsWith(github.event.comment.body, '/flamegraph')) + ) + + outputs: + cache-key: ${{ steps.cache-key.outputs.cache-key }} + + steps: + - name: Checkout Kong source code + uses: actions/checkout@v4 + + - name: Generate cache key + id: cache-key + uses: ./.github/actions/build-cache-key + with: + prefix: perf + + - name: Lookup build cache + id: cache-deps + uses: actions/cache@v3 + with: + path: ${{ env.BUILD_ROOT }} + key: ${{ steps.cache-key.outputs.cache-key }} + + - name: Install packages + if: steps.cache-deps.outputs.cache-hit != 'true' + run: sudo apt update && sudo apt install libyaml-dev valgrind libprotobuf-dev + + - name: Build Kong + if: steps.cache-deps.outputs.cache-hit != 'true' + env: + GH_TOKEN: ${{ github.token }} + run: | + make build-kong + BUILD_PREFIX=$BUILD_ROOT/kong-dev + export PATH="$BUILD_PREFIX/bin:$BUILD_PREFIX/openresty/nginx/sbin:$BUILD_PREFIX/openresty/bin:$PATH" + chmod +rw -R $BUILD_PREFIX + nginx -V + ldd $(which nginx) + luarocks + + - name: Bazel Outputs + uses: actions/upload-artifact@v3 + if: failure() + with: + name: bazel-outputs + path: | + bazel-out/_tmp/actions + retention-days: 3 + + - name: Build Dev Kong dependencies + if: steps.cache-deps.outputs.cache-hit != 'true' + run: | + make install-dev-rocks + perf: name: RPS, latency and flamegraphs - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 + needs: build-packages + + permissions: + # required to send comment of graphs and results in the PR + pull-requests: write + if: | github.event_name == 'schedule' || (github.event_name == 'pull_request' && startsWith(github.event.pull_request.title, 'perf(')) || @@ -23,45 +96,37 @@ jobs: (startsWith(github.event.comment.body, '/perf') || startsWith(github.event.comment.body, '/flamegraph')) ) + # perf test can only run one at a time per repo for now + concurrency: + group: perf-ce + steps: + # set up mutex across CE and EE to avoid resource race + - name: Set up mutex + uses: ben-z/gh-action-mutex@9709ba4d8596ad4f9f8bbe8e0f626ae249b1b3ac # v1.0-alpha-6 + with: + repository: "Kong/kong-perf-mutex-lock" + branch: "gh-mutex" + repo-token: ${{ secrets.PAT }} + - name: Checkout Kong source code - uses: actions/checkout@v2 - # Fetch all history for all tags and branches + uses: actions/checkout@v4 with: + # Fetch all history for all tags and branches fetch-depth: 0 - - - name: Install OpenResty - run: | - openresty_version=$(cat .requirements | grep RESTY_VERSION= | cut -d= -f2) - sudo apt-get -y install --no-install-recommends wget gnupg ca-certificates - wget -O - https://openresty.org/package/pubkey.gpg | sudo apt-key add - - echo "deb http://openresty.org/package/ubuntu $(lsb_release -sc) main" | \ - sudo tee /etc/apt/sources.list.d/openresty.list - sudo apt-get update - sudo apt-get install "openresty=${openresty_version}*" "openresty-resty=${openresty_version}*" -y - sudo apt-mark hold openresty + - name: Load Cached Packages + id: cache-deps + if: env.GHA_CACHE == 'true' + uses: actions/cache@v3 + with: + path: ${{ env.BUILD_ROOT }} + key: ${{ needs.build-packages.outputs.cache-key }} - - name: Install Dependencies + - name: Install performance test Dependencies run: | - wget https://luarocks.org/releases/luarocks-3.7.0.tar.gz -O - |tar zxvf - - pushd luarocks-*/ - ./configure --with-lua=/usr/local/openresty/luajit/ \ - --lua-suffix=jit \ - --with-lua-include=/usr/local/openresty/luajit/include/luajit-2.1 - sudo make install - popd - - # just need the lua files to let all imports happy - # the CI won't actually run Kong locally - git clone https://github.com/kong/lua-kong-nginx-module /tmp/lua-kong-nginx-module - pushd /tmp/lua-kong-nginx-module - sudo make LUA_LIB_DIR=/usr/local/share/lua/5.1/ install - popd - # in Kong repository - sudo apt install libyaml-dev -y - sudo make dev + sudo apt update && sudo apt install inkscape -y # terraform! wget https://releases.hashicorp.com/terraform/${{ env.terraform_version }}/terraform_${{ env.terraform_version }}_linux_amd64.zip @@ -71,8 +136,8 @@ jobs: - name: Choose perf suites id: choose_perf run: | - suites=$(echo "${{ github.event.comment.body }}" | awk '{print $1}') - tags=$(echo "${{ github.event.comment.body }}" | awk '{print $2}') + suites="$(printf '%s' "${{ github.event.comment.body }}" | awk '{print $1}')" + tags="$(printf '%s' "${{ github.event.comment.body }}" | awk '{print $2}')" if [[ $suite == "/flamegraph" ]]; then suites="02-flamegraph" @@ -92,63 +157,137 @@ jobs: fi fi - echo ::set-output name=suites::"$suites" - echo ::set-output name=tags::"$tags" + echo "suites=$suites" >> $GITHUB_OUTPUT + echo "tags=$tags" >> $GITHUB_OUTPUT + + - uses: xt0rted/pull-request-comment-branch@d97294d304604fa98a2600a6e2f916a84b596dc7 # v1.4.1 + id: comment-branch + if: github.event_name == 'issue_comment' && github.event.action == 'created' + + - name: Find compared versions + id: compare_versions + run: | + pr_ref=$(echo "${{ github.event.pull_request.base.ref }}") + custom_vers="$(printf '%s' "${{ github.event.comment.body }}" | awk '{print $3}')" + + if [[ ! -z "${pr_ref}" ]]; then + vers="git:${{ github.head_ref }},git:${pr_ref}" + elif [[ ! -z "${custom_vers}" ]]; then + vers="${custom_vers}" + elif [[ ! -z "${{ github.event.comment.body }}" ]]; then + vers="git:${{ steps.comment-branch.outputs.head_ref}},git:${{ steps.comment-branch.outputs.base_ref}}" + else # is cron job/on master + vers="git:master,git:origin/master~10,git:origin/master~50" + fi + + echo $vers + + echo "vers=$vers" >> $GITHUB_OUTPUT + - name: Run Tests env: - PERF_TEST_VERSIONS: git:${{ github.sha }},git:master - PERF_TEST_METAL_PROJECT_ID: ${{ secrets.PERF_TEST_PACKET_PROJECT_ID }} - PERF_TEST_METAL_AUTH_TOKEN: ${{ secrets.PERF_TEST_PACKET_AUTH_TOKEN }} + PERF_TEST_VERSIONS: ${{ steps.compare_versions.outputs.vers }} PERF_TEST_DRIVER: terraform - timeout-minutes: 60 + PERF_TEST_TERRAFORM_PROVIDER: bring-your-own + PERF_TEST_BYO_KONG_IP: ${{ secrets.PERF_TEST_BYO_KONG_IP }} + PERF_TEST_BYO_WORKER_IP: ${{ secrets.PERF_TEST_BYO_WORKER_IP }} + PERF_TEST_BYO_SSH_USER: gha + PERF_TEST_USE_DAILY_IMAGE: true + PERF_TEST_DISABLE_EXEC_OUTPUT: true + timeout-minutes: 180 run: | + export PERF_TEST_BYO_SSH_KEY_PATH=$(pwd)/ssh_key + echo "${{ secrets.PERF_TEST_BYO_SSH_KEY }}" > ${PERF_TEST_BYO_SSH_KEY_PATH} + + chmod 600 ${PERF_TEST_BYO_SSH_KEY_PATH} + # setup tunnel for psql and admin port + ssh -o StrictHostKeyChecking=no -o TCPKeepAlive=yes -o ServerAliveInterval=10 \ + -o ExitOnForwardFailure=yes -o ConnectTimeout=5 \ + -L 15432:localhost:5432 -L 39001:localhost:39001 \ + -i ${PERF_TEST_BYO_SSH_KEY_PATH} \ + ${PERF_TEST_BYO_SSH_USER}@${PERF_TEST_BYO_KONG_IP} tail -f /dev/null & + sleep 5 + + sudo iptables -t nat -I OUTPUT -p tcp --dport 5432 -d ${PERF_TEST_BYO_KONG_IP} -j DNAT --to 127.0.0.1:15432 + sudo iptables -t nat -I OUTPUT -p tcp --dport 39001 -d ${PERF_TEST_BYO_KONG_IP} -j DNAT --to 127.0.0.1:39001 + + make venv-dev # required to install other dependencies like bin/grpcurl + source ${{ env.BUILD_ROOT }}/kong-dev-venv.sh for suite in ${{ steps.choose_perf.outputs.suites }}; do # Run each test individually, ngx.pipe doesn't like to be imported twice # maybe bin/busted --no-auto-insulate for f in $(find "spec/04-perf/$suite/" -type f); do - bin/busted -o gtest "$f" \ + bin/busted "$f" \ -t "${{ steps.choose_perf.outputs.tags }}" done done - + - name: Teardown # Note: by default each job has if: ${{ success() }} if: always() env: - PERF_TEST_VERSIONS: git:${{ github.sha }},git:master - PERF_TEST_METAL_PROJECT_ID: ${{ secrets.PERF_TEST_PACKET_PROJECT_ID }} - PERF_TEST_METAL_AUTH_TOKEN: ${{ secrets.PERF_TEST_PACKET_AUTH_TOKEN }} + PERF_TEST_VERSIONS: git:${{ github.sha }} PERF_TEST_DRIVER: terraform - PERF_TEST_TEARDOWN_ALL: "true" + PERF_TEST_TERRAFORM_PROVIDER: bring-your-own + PERF_TEST_BYO_KONG_IP: ${{ secrets.PERF_TEST_BYO_KONG_IP }} + PERF_TEST_BYO_WORKER_IP: ${{ secrets.PERF_TEST_BYO_WORKER_IP }} + PERF_TEST_BYO_SSH_USER: gha + PERF_TEST_TEARDOWN_ALL: true run: | - bin/busted -o gtest spec/04-perf/99-teardown/ + export PERF_TEST_BYO_SSH_KEY_PATH=$(pwd)/ssh_key + echo "${{ secrets.PERF_TEST_BYO_SSH_KEY }}" > ${PERF_TEST_BYO_SSH_KEY_PATH} - - name: Save results - uses: actions/upload-artifact@v2 + make venv-dev # required to install other dependencies like bin/grpcurl + source ${{ env.BUILD_ROOT }}/kong-dev-venv.sh + bin/busted spec/04-perf/99-teardown/ + + rm -f ${PERF_TEST_BYO_SSH_KEY_PATH} + + - name: Generate high DPI graphs + if: always() + run: | + for i in $(ls output/*.svg); do + inkscape --export-area-drawing --export-png="${i%.*}.png" --export-dpi=300 -b FFFFFF $i + done + + - uses: actions/setup-python@v4 with: - name: rps-and-latency - path: | - output/result.txt - retention-days: 31 + python-version: '3.10' + cache: 'pip' - - name: Save flamegrpahs - uses: actions/upload-artifact@v2 + - name: Generate plots + if: always() + run: | + cwd=$(pwd) + cd spec/helpers/perf/charts/ + pip install -r requirements.txt + for i in $(ls ${cwd}/output/*.data.json); do + python ./charts.py $i -o "${cwd}/output/" + done + + - name: Save results + uses: actions/upload-artifact@v3 + if: always() with: - name: flamegraphs + name: perf-results path: | - output/*.svg + output/ + !output/**/*.log + retention-days: 31 - name: Save error logs - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 + if: always() with: name: error_logs path: | - output/*.log + output/**/*.log retention-days: 31 - name: Output + if: always() id: output run: | if [[ "${{ steps.choose_perf.outputs.suites }}" =~ "02-flamegraph" ]]; then @@ -164,13 +303,22 @@ jobs: result="${result//$'\n'/'%0A'}" result="${result//$'\r'/'%0D'}" - echo ::set-output name=result::"$result" - + echo "result=$results" >> $GITHUB_OUTPUT + + - name: Upload charts + if: always() + id: charts + uses: devicons/public-upload-to-imgur@352cf5f2805c692539a96cfe49a09669e6fca88e # v2.2.2 + continue-on-error: true + with: + path: output/*.png + client_id: ${{ secrets.PERF_TEST_IMGUR_CLIENT_ID }} + - name: Comment if: | github.event_name == 'pull_request' || (github.event_name == 'issue_comment' && github.event.issue.pull_request) - uses: actions-ecosystem/action-create-comment@v1 + uses: actions-ecosystem/action-create-comment@e23bc59fbff7aac7f9044bd66c2dc0fe1286f80b # v1.0.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} body: | @@ -178,6 +326,8 @@ jobs: **Test Suite**: ${{ steps.choose_perf.outputs.suites }} (${{ steps.choose_perf.outputs.tags }}) + ${{ join(fromJSON(steps.charts.outputs.markdown_urls), ' ') }} +
Click to expand ``` @@ -188,4 +338,4 @@ jobs:
- [Download Artifacts](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}#artifacts) + [Download Artifacts](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}#artifacts) for detailed results and interactive SVG flamegraphs. diff --git a/.github/workflows/release-and-tests-fail-bot.yml b/.github/workflows/release-and-tests-fail-bot.yml new file mode 100644 index 00000000000..d651bef5290 --- /dev/null +++ b/.github/workflows/release-and-tests-fail-bot.yml @@ -0,0 +1,47 @@ +name: Notify Slack user on workflow failure + +on: + workflow_run: + workflows: ["Package & Release", "Build & Test"] + types: + - completed + branches: + - master + - release/* + - next/* + +jobs: + notify_failure: + runs-on: ubuntu-latest + if: ${{ github.event.workflow_run.conclusion == 'failure' && github.event.workflow_run.event != 'schedule' }} + steps: + - name: Generate Slack Payload + id: generate-payload + env: + SLACK_CHANNEL: gateway-notifications + SLACK_MAPPING: "${{ vars.GH_ID_2_SLACK_ID_MAPPING }}" + uses: actions/github-script@v7 + with: + script: | + const slack_mapping = JSON.parse(process.env.SLACK_MAPPING); + const repo_name = "${{ github.event.workflow_run.repository.full_name }}"; + const run_id = ${{ github.event.workflow_run.id }}; + const run_url = `https://github.com/${repo_name}/actions/runs/${run_id}`; + const workflow_name = "${{ github.event.workflow_run.name }}"; + const branch_name = "${{ github.event.workflow_run.head_branch }}"; + const actor_github_id = "${{ github.event.workflow_run.actor.login }}"; + const actor_slack_id = slack_mapping[actor_github_id]; + const actor = actor_slack_id ? `<@${actor_slack_id}>` : actor_github_id; + const payload = { + text: `Workflow “${workflow_name}” failed in repo: "${repo_name}", branch: "${branch_name}". Run URL: ${run_url}. Please check it ${actor} .`, + channel: process.env.SLACK_CHANNEL, + }; + return JSON.stringify(payload); + result-encoding: string + + - name: Send Slack Message + uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + with: + payload: ${{ steps.generate-payload.outputs.result }} + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_GATEWAY_NOTIFICATIONS_WEBHOOK }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000000..517eb7e1b03 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,694 @@ +name: Package & Release + +# The workflow to build and release official Kong packages and images. +# +# TODO: +# Do not bump the version of actions/checkout to v4 before dropping rhel7 and amazonlinux2. + +on: # yamllint disable-line rule:truthy + pull_request: + paths-ignore: + - '**/*.md' + - '.github/workflows/build_and_test.yml' + - 'changelog/**' + - 'kong.conf.default' + schedule: + - cron: '0 0 * * *' + push: + branches: + - master + workflow_dispatch: + inputs: + official: + description: 'Official release?' + required: true + type: boolean + default: false + version: + description: 'Release version, e.g. `3.0.0.0-beta.2`' + required: true + type: string + +# `commit-ly` is a flag that indicates whether the build should be run per commit. + +env: + # official release repo + DOCKER_REPOSITORY: kong/kong + PRERELEASE_DOCKER_REPOSITORY: kong/kong + FULL_RELEASE: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} + + # only for pr + GHA_CACHE: ${{ github.event_name == 'pull_request' }} + + HAS_ACCESS_TO_GITHUB_TOKEN: ${{ github.event_name != 'pull_request' || (github.event.pull_request.head.repo.full_name == github.repository && github.actor != 'dependabot[bot]') }} + + +jobs: + metadata: + name: Metadata + runs-on: ubuntu-22.04 + outputs: + kong-version: ${{ steps.build-info.outputs.kong-version }} + prerelease-docker-repository: ${{ env.PRERELEASE_DOCKER_REPOSITORY }} + docker-repository: ${{ steps.build-info.outputs.docker-repository }} + release-desc: ${{ steps.build-info.outputs.release-desc }} + release-label: ${{ steps.build-info.outputs.release-label || '' }} + deploy-environment: ${{ steps.build-info.outputs.deploy-environment }} + matrix: ${{ steps.build-info.outputs.matrix }} + arch: ${{ steps.build-info.outputs.arch }} + + steps: + - uses: actions/checkout@v3 + - name: Build Info + id: build-info + run: | + KONG_VERSION=$(bash scripts/grep-kong-version.sh) + echo "kong-version=$KONG_VERSION" >> $GITHUB_OUTPUT + + if [ "${{ github.event_name == 'schedule' }}" == "true" ]; then + echo "release-label=$(date -u +'%Y%m%d')" >> $GITHUB_OUTPUT + fi + + matrix_file=".github/matrix-commitly.yml" + if [ "$FULL_RELEASE" == "true" ]; then + matrix_file=".github/matrix-full.yml" + fi + + if [ "${{ github.event.inputs.official }}" == "true" ]; then + release_desc="$KONG_VERSION (official)" + echo "docker-repository=$DOCKER_REPOSITORY" >> $GITHUB_OUTPUT + echo "deploy-environment=release" >> $GITHUB_OUTPUT + else + release_desc="$KONG_VERSION (pre-release)" + echo "docker-repository=$PRERELEASE_DOCKER_REPOSITORY" >> $GITHUB_OUTPUT + fi + + echo "release-desc=$release_desc" >> $GITHUB_OUTPUT + + echo "matrix=$(yq -I=0 -o=json $matrix_file)" >> $GITHUB_OUTPUT + + cat $GITHUB_OUTPUT + + echo "### :package: Building and packaging for $release_desc" >> $GITHUB_STEP_SUMMARY + echo >> $GITHUB_STEP_SUMMARY + echo '- event_name: ${{ github.event_name }}' >> $GITHUB_STEP_SUMMARY + echo '- ref_name: ${{ github.ref_name }}' >> $GITHUB_STEP_SUMMARY + echo '- inputs.version: ${{ github.event.inputs.version }}' >> $GITHUB_STEP_SUMMARY + echo >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + cat $GITHUB_OUTPUT >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + + build-packages: + needs: metadata + name: Build & Package - ${{ matrix.label }} + environment: ${{ needs.metadata.outputs.deploy-environment }} + + strategy: + fail-fast: false + matrix: + include: "${{ fromJSON(needs.metadata.outputs.matrix)['build-packages'] }}" + + runs-on: ubuntu-22.04 + container: + image: ${{ matrix.image }} + options: --privileged + + steps: + - name: Early Rpm Setup + if: matrix.package == 'rpm' && matrix.image != '' + run: | + # tar/gzip is needed to restore git cache (if available) + yum install -y tar gzip which file zlib-devel + + - name: Early Deb in Container Setup + if: matrix.package == 'deb' && matrix.image != '' + run: | + # tar/gzip is needed to restore git cache (if available) + apt-get update + apt-get install -y git tar gzip file sudo + + - name: Cache Git + id: cache-git + if: (matrix.package == 'rpm' || matrix.image == 'debian:10') && matrix.image != '' + uses: actions/cache@v3 + with: + path: /usr/local/git + key: ${{ matrix.label }}-git-2.41.0 + + # el-7,8, amazonlinux-2,2023, debian-10 doesn't have git 2.18+, so we need to install it manually + - name: Install newer Git + if: (matrix.package == 'rpm' || matrix.image == 'debian:10') && matrix.image != '' && steps.cache-git.outputs.cache-hit != 'true' + run: | + if which apt 2>/dev/null; then + apt update + apt install -y wget libz-dev libssl-dev libcurl4-gnutls-dev gettext make gcc autoconf sudo + else + yum update -y + yum groupinstall -y 'Development Tools' + yum install -y wget zlib-devel openssl-devel curl-devel expat-devel gettext-devel perl-CPAN perl-devel + fi + wget https://mirrors.edge.kernel.org/pub/software/scm/git/git-2.41.0.tar.gz + tar xf git-2.41.0.tar.gz + cd git-2.41.0 + + # https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/5948/diffs + if [[ ${{ matrix.image }} == "centos:7" ]]; then + echo 'CFLAGS=-std=gnu99' >> config.mak + fi + + make configure + ./configure --prefix=/usr/local/git + make -j$(nproc) + make install + + - name: Add Git to PATH + if: (matrix.package == 'rpm' || matrix.image == 'debian:10') && matrix.image != '' + run: | + echo "/usr/local/git/bin" >> $GITHUB_PATH + + - name: Debian Git dependencies + if: matrix.image == 'debian:10' + run: | + apt update + # dependencies for git + apt install -y wget libz-dev libssl-dev libcurl4-gnutls-dev sudo + + - name: Checkout Kong source code + uses: actions/checkout@v3 + + - name: Swap git with https + run: git config --global url."https://github".insteadOf git://github + + - name: Generate build cache key + id: cache-key + if: env.GHA_CACHE == 'true' + uses: ./.github/actions/build-cache-key + with: + prefix: ${{ matrix.label }}-build + extra: | + ${{ hashFiles('kong/**') }} + + - name: Cache Packages + id: cache-deps + if: env.GHA_CACHE == 'true' + uses: actions/cache@v3 + with: + path: bazel-bin/pkg + key: ${{ steps.cache-key.outputs.cache-key }} + + - name: Set .requirements into environment variables + run: | + grep -v '^#' .requirements >> $GITHUB_ENV + + - name: Setup Bazel + uses: bazelbuild/setup-bazelisk@95c9bf48d0c570bb3e28e57108f3450cd67c1a44 # v2.0.0 + + - name: Install Deb Dependencies + if: matrix.package == 'deb' && steps.cache-deps.outputs.cache-hit != 'true' + run: | + sudo apt-get update && sudo DEBIAN_FRONTEND=noninteractive apt-get install -y \ + automake \ + build-essential \ + curl \ + file \ + libyaml-dev \ + m4 \ + perl \ + pkg-config \ + unzip \ + zlib1g-dev + + - name: Install Rpm Dependencies + if: matrix.package == 'rpm' && matrix.image != '' + run: | + yum groupinstall -y 'Development Tools' + dnf config-manager --set-enabled powertools || true # enable devel packages on rockylinux:8 + dnf config-manager --set-enabled crb || true # enable devel packages on rockylinux:9 + yum install -y libyaml-devel zlib-devel + yum install -y cpanminus || (yum install -y perl && curl -L https://raw.githubusercontent.com/miyagawa/cpanminus/master/cpanm | perl - App::cpanminus) # amazonlinux2023 removed cpanminus + # required for openssl 3.x config + cpanm IPC/Cmd.pm + + - name: Build Kong dependencies + if: steps.cache-deps.outputs.cache-hit != 'true' + env: + GH_TOKEN: ${{ github.token }} + run: | + bazel build --config release //build:kong --verbose_failures ${{ matrix.bazel-args }} + + - name: Package Kong - ${{ matrix.package }} + if: matrix.package != 'rpm' && steps.cache-deps.outputs.cache-hit != 'true' + run: | + bazel build --config release :kong_${{ matrix.package }} --verbose_failures ${{ matrix.bazel-args }} + + - name: Package Kong - rpm + if: matrix.package == 'rpm' && steps.cache-deps.outputs.cache-hit != 'true' + env: + RELEASE_SIGNING_GPG_KEY: ${{ secrets.RELEASE_SIGNING_GPG_KEY }} + NFPM_RPM_PASSPHRASE: ${{ secrets.RELEASE_SIGNING_GPG_KEY_PASSPHRASE }} + run: | + if [ -n "${RELEASE_SIGNING_GPG_KEY:-}" ]; then + RPM_SIGNING_KEY_FILE=$(mktemp) + echo "$RELEASE_SIGNING_GPG_KEY" > $RPM_SIGNING_KEY_FILE + export RPM_SIGNING_KEY_FILE=$RPM_SIGNING_KEY_FILE + fi + + bazel build --config release :kong_${{ matrix.package-type }} --action_env=RPM_SIGNING_KEY_FILE --action_env=NFPM_RPM_PASSPHRASE ${{ matrix.bazel-args }} + + - name: Bazel Debug Outputs + if: failure() + run: | + cat bazel-out/_tmp/actions/stderr-* + sudo dmesg || true + tail -n500 bazel-out/**/*/CMake.log || true + + - name: Upload artifact + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.label }}-packages + path: bazel-bin/pkg + retention-days: 3 + + verify-manifest-packages: + needs: [metadata, build-packages] + name: Verify Manifest - Package ${{ matrix.label }} + runs-on: ubuntu-22.04 + + strategy: + fail-fast: false + matrix: + include: "${{ fromJSON(needs.metadata.outputs.matrix)['build-packages'] }}" + + steps: + - uses: actions/checkout@v3 + + - name: Download artifact + uses: actions/download-artifact@v3 + with: + name: ${{ matrix.label }}-packages + path: bazel-bin/pkg + + - name: Install Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + cache: 'pip' # caching pip dependencies + + - name: Verify + run: | + cd scripts/explain_manifest + pip install -r requirements.txt + pkg=$(ls ../../bazel-bin/pkg/kong* |head -n1) + python ./main.py -f filelist.txt -p $pkg -o test.txt -s ${{ matrix.check-manifest-suite }} + + build-images: + name: Build Images - ${{ matrix.label }} + needs: [metadata, build-packages] + runs-on: ubuntu-22.04 + + permissions: + # create comments on commits for docker images needs the `write` permission + contents: write + + strategy: + fail-fast: false + matrix: + include: "${{ fromJSON(needs.metadata.outputs.matrix)['build-images'] }}" + + steps: + - uses: actions/checkout@v3 + + - name: Download artifact + uses: actions/download-artifact@v3 + with: + name: ${{ matrix.artifact-from }}-packages + path: bazel-bin/pkg + + - name: Download artifact (alt) + if: matrix.artifact-from-alt != '' + uses: actions/download-artifact@v3 + with: + name: ${{ matrix.artifact-from-alt }}-packages + path: bazel-bin/pkg + + - name: Login to Docker Hub + if: ${{ env.HAS_ACCESS_TO_GITHUB_TOKEN == 'true' }} + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v2.1.0 + with: + username: ${{ secrets.GHA_DOCKERHUB_PUSH_USER }} + password: ${{ secrets.GHA_KONG_ORG_DOCKERHUB_PUSH_TOKEN }} + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ needs.metadata.outputs.prerelease-docker-repository }} + tags: | + type=raw,${{ github.sha }}-${{ matrix.label }} + type=raw,enable=${{ matrix.label == 'ubuntu' }},${{ github.sha }} + + - name: Set up QEMU + if: matrix.docker-platforms != '' + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Set platforms + id: docker_platforms_arg + run: | + platforms="${{ matrix.docker-platforms }}" + if [[ -z "$platforms" ]]; then + platforms="linux/amd64" + fi + + echo "platforms=$platforms" + echo "platforms=$platforms" >> $GITHUB_OUTPUT + + - name: Set rpm platform + id: docker_rpm_platform_arg + if: matrix.package == 'rpm' + run: | + rpm_platform="${{ matrix.rpm_platform }}" + if [[ -z "$rpm_platform" ]]; then + rpm_platform="el9" + fi + + echo "rpm_platform=$rpm_platform" + echo "rpm_platform=$rpm_platform" >> $GITHUB_OUTPUT + + - name: Build Docker Image + uses: docker/build-push-action@v5 + with: + file: build/dockerfiles/${{ matrix.package }}.Dockerfile + context: . + push: ${{ env.HAS_ACCESS_TO_GITHUB_TOKEN == 'true' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + platforms: ${{ steps.docker_platforms_arg.outputs.platforms }} + build-args: | + KONG_BASE_IMAGE=${{ matrix.base-image }} + KONG_ARTIFACT_PATH=bazel-bin/pkg/ + RPM_PLATFORM=${{ steps.docker_rpm_platform_arg.outputs.rpm_platform }} + EE_PORTS=8002 8445 8003 8446 8004 8447 + + - name: Comment on commit + if: github.event_name == 'push' && matrix.label == 'ubuntu' + uses: peter-evans/commit-comment@5a6f8285b8f2e8376e41fe1b563db48e6cf78c09 # v3.0.0 + continue-on-error: true # TODO: temporary fix until the token is back + with: + token: ${{ secrets.GITHUB_TOKEN }} + body: | + ### Bazel Build + Docker image available `${{ needs.metadata.outputs.prerelease-docker-repository }}:${{ github.sha }}` + Artifacts available https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} + + verify-manifest-images: + needs: [metadata, build-images] + name: Verify Manifest - Image ${{ matrix.label }} + runs-on: ubuntu-22.04 + if: github.event_name != 'pull_request' || (github.event.pull_request.head.repo.full_name == github.repository && github.actor != 'dependabot[bot]') + + strategy: + fail-fast: false + matrix: + include: "${{ fromJSON(needs.metadata.outputs.matrix)['build-images'] }}" + + steps: + - uses: actions/checkout@v4 + + - name: Install Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + cache: 'pip' # caching pip dependencies + + - name: Verify + run: | + cd scripts/explain_manifest + # docker image verify requires sudo to set correct permissions, so we + # also install deps for root + sudo -E pip install -r requirements.txt + IMAGE=${{ env.PRERELEASE_DOCKER_REPOSITORY }}:${{ github.sha }}-${{ matrix.label }} + + sudo -E python ./main.py --image $IMAGE -f docker_image_filelist.txt -s docker-image + + scan-images: + name: Scan Images - ${{ matrix.label }} + needs: [metadata, build-images] + runs-on: ubuntu-22.04 + if: |- + always() + && fromJSON(needs.metadata.outputs.matrix)['scan-vulnerabilities'] != '' + && needs.build-images.result == 'success' + && (github.event_name != 'pull_request' || (github.event.pull_request.head.repo.full_name == github.repository && github.actor != 'dependabot[bot]')) + strategy: + fail-fast: false + matrix: + include: "${{ fromJSON(needs.metadata.outputs.matrix)['scan-vulnerabilities'] }}" + env: + IMAGE: ${{ needs.metadata.outputs.prerelease-docker-repository }}:${{ github.sha }}-${{ matrix.label }} + steps: + - name: Install regctl + uses: regclient/actions/regctl-installer@main + + - name: Login to Docker Hub + if: ${{ env.HAS_ACCESS_TO_GITHUB_TOKEN }} + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v2.1.0 + with: + username: ${{ secrets.GHA_DOCKERHUB_PUSH_USER }} + password: ${{ secrets.GHA_KONG_ORG_DOCKERHUB_PUSH_TOKEN }} + + # TODO: Refactor matrix file to support and parse platforms specific to distro + # Workaround: Look for specific amd64 hardcooded architectures + - name: Parse Architecture Specific Image Manifest Digests + id: image_manifest_metadata + run: | + manifest_list_exists="$( + if regctl manifest get "${IMAGE}" --format raw-body --require-list -v panic &> /dev/null; then + echo true + else + echo false + fi + )" + echo "manifest_list_exists=$manifest_list_exists" + echo "manifest_list_exists=$manifest_list_exists" >> $GITHUB_OUTPUT + + amd64_sha="$(regctl image digest "${IMAGE}" --platform linux/amd64 || echo '')" + echo "amd64_sha=$amd64_sha" + echo "amd64_sha=$amd64_sha" >> $GITHUB_OUTPUT + + - name: Scan AMD64 Image digest + id: sbom_action_amd64 + if: steps.image_manifest_metadata.outputs.amd64_sha != '' + uses: Kong/public-shared-actions/security-actions/scan-docker-image@v1 + with: + asset_prefix: kong-${{ github.sha }}-${{ matrix.label }}-linux-amd64 + image: ${{ needs.metadata.outputs.prerelease-docker-repository }}:${{ github.sha }}-${{ matrix.label }} + + smoke-tests: + name: Smoke Tests - ${{ matrix.label }} + needs: [metadata, build-images] + runs-on: ubuntu-22.04 + if: |- + fromJSON(needs.metadata.outputs.matrix)['smoke-tests'] != '' + && (github.event_name != 'pull_request' || (github.event.pull_request.head.repo.full_name == github.repository && github.actor != 'dependabot[bot]')) + + # TODO: test packages + strategy: + fail-fast: false + matrix: + include: "${{ fromJSON(needs.metadata.outputs.matrix)['smoke-tests'] }}" + + services: + postgres: + image: postgres:13 + env: + POSTGRES_USER: kong + POSTGRES_DB: kong + POSTGRES_PASSWORD: kong + ports: + - "5432:5432" + options: --health-cmd pg_isready --health-interval 5s --health-timeout 5s --health-retries 8 + + env: + KONG_ADMIN_URI: http://localhost:8001 + KONG_ADMIN_HTTP2_URI: https://localhost:8444 + KONG_PROXY_URI: http://localhost:8000 + + steps: + - uses: actions/checkout@v4 + + - name: Login to Docker Hub + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v2.1.0 + with: + username: ${{ secrets.GHA_DOCKERHUB_PUSH_USER }} + password: ${{ secrets.GHA_KONG_ORG_DOCKERHUB_PUSH_TOKEN }} + + - name: Setup Kong instance + # always pull the latest image to ensure we're testing the latest version. + run: | + docker run \ + -p 8000:8000 -p 8001:8001 -p 8444:8444\ + -e KONG_PG_PASSWORD=kong \ + -e KONG_ADMIN_LISTEN="0.0.0.0:8001, 0.0.0.0:8444 ssl http2" \ + -e KONG_ANONYMOUS_REPORTS=off \ + --name kong \ + --restart always \ + --network=host -d \ + --pull always \ + ${{ env.PRERELEASE_DOCKER_REPOSITORY }}:${{ github.sha }}-${{ matrix.label }} \ + sh -c "kong migrations bootstrap && kong start" + sleep 3 + docker logs kong + + - name: Smoke Tests - Version Test + run: | + workflow_version="$( + echo '${{ steps.metadata.outputs.kong-version }}' \ + | sed -e 's@\.@\\\.@g' + )" + + # confirm workflow's version and built container version match with + # dots escaped, and end-line delimited + if ! docker exec kong kong version | grep -E "${workflow_version}$"; then + echo "Built container's 'kong version' didn't match workflow's." + echo "Ensure that versions in the meta.lua files are as expected." + exit 1 + fi + + - name: Smoke Tests - Base Tests + env: + VERBOSE: ${{ runner.debug == '1' && '1' || '' }} + run: build/tests/01-base.sh + + - name: Smoke Tests - Admin API + env: + VERBOSE: ${{ runner.debug == '1' && '1' || '' }} + run: build/tests/02-admin-api.sh + + - name: Smoke Tests - HTTP2 Admin API + env: + VERBOSE: ${{ runner.debug == '1' && '1' || '' }} + run: build/tests/03-http2-admin-api.sh + + release-packages: + name: Release Packages - ${{ matrix.label }} - ${{ needs.metadata.outputs.release-desc }} + needs: [metadata, build-packages, build-images, smoke-tests] + runs-on: ubuntu-22.04 + if: fromJSON(needs.metadata.outputs.matrix)['release-packages'] != '' + timeout-minutes: 5 # PULP takes a while to publish + environment: release + + strategy: + # limit to 3 jobs at a time + max-parallel: 3 + fail-fast: false + matrix: + include: "${{ fromJSON(needs.metadata.outputs.matrix)['release-packages'] }}" + + steps: + - uses: actions/checkout@v4 + + - name: Download artifact + uses: actions/download-artifact@v3 + with: + name: ${{ matrix.artifact-from }}-packages + path: bazel-bin/pkg + + - name: Set package architecture + id: pkg-arch + run: | + arch='amd64' + echo "arch=$arch" + echo "arch=$arch" >> $GITHUB_OUTPUT + + - name: Upload Packages + env: + ARCHITECTURE: ${{ steps.pkg-arch.outputs.arch }} + OFFICIAL_RELEASE: ${{ github.event.inputs.official }} + ARTIFACT_VERSION: ${{ matrix.artifact-version }} + ARTIFACT_TYPE: ${{ matrix.artifact-type }} + ARTIFACT: ${{ matrix.artifact }} + INPUT_VERSION: ${{ github.event.inputs.version }} + PACKAGE_TYPE: ${{ matrix.package }} + KONG_RELEASE_LABEL: ${{ needs.metadata.outputs.release-label }} + VERBOSE: ${{ runner.debug == '1' && '1' || '' }} + CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }} + CLOUDSMITH_DRY_RUN: '' + IGNORE_CLOUDSMITH_FAILURES: ${{ vars.IGNORE_CLOUDSMITH_FAILURES }} + USE_CLOUDSMITH: ${{ vars.USE_CLOUDSMITH }} + run: | + sha256sum bazel-bin/pkg/* + + # set the version input as tags passed to release-scripts + # note: release-scripts rejects user tags if missing internal flag + # + # this can be a comma-sepratated list of tags to apply + if [[ "$OFFICIAL_RELEASE" == 'false' ]]; then + if echo "$INPUT_VERSION" | grep -qs -E 'rc|alpha|beta|nightly'; then + PACKAGE_TAGS="$INPUT_VERSION" + export PACKAGE_TAGS + fi + fi + + scripts/release-kong.sh + + release-images: + name: Release Images - ${{ matrix.label }} - ${{ needs.metadata.outputs.release-desc }} + needs: [metadata, build-images, smoke-tests] + runs-on: ubuntu-22.04 + if: github.repository_owner == 'Kong' && fromJSON(needs.metadata.outputs.matrix)['release-images'] != '' + + strategy: + # limit to 3 jobs at a time + max-parallel: 3 + fail-fast: false + matrix: + include: "${{ fromJSON(needs.metadata.outputs.matrix)['release-images'] }}" + + steps: + - name: Login to Docker Hub + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v2.1.0 + with: + username: ${{ secrets.GHA_DOCKERHUB_PUSH_USER }} + password: ${{ secrets.GHA_KONG_ORG_DOCKERHUB_PUSH_TOKEN }} + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ needs.metadata.outputs.docker-repository }} + sep-tags: " " + tags: | + type=raw,value=latest,enable=${{ matrix.label == 'ubuntu' }} + type=match,enable=${{ github.event_name == 'workflow_dispatch' }},pattern=\d.\d,value=${{ github.event.inputs.version }} + type=match,enable=${{ github.event_name == 'workflow_dispatch' && matrix.label == 'ubuntu' }},pattern=\d.\d,value=${{ github.event.inputs.version }},suffix= + type=raw,enable=${{ github.event_name == 'workflow_dispatch' }},${{ github.event.inputs.version }} + type=raw,enable=${{ github.event_name == 'workflow_dispatch' && matrix.label == 'ubuntu' }},${{ github.event.inputs.version }},suffix= + type=ref,event=branch + type=ref,enable=${{ matrix.label == 'ubuntu' }},event=branch,suffix= + type=ref,event=tag + type=ref,enable=${{ matrix.label == 'ubuntu' }},event=tag,suffix= + type=ref,event=pr + type=schedule,pattern=nightly + type=schedule,enable=${{ matrix.label == 'ubuntu' }},pattern=nightly,suffix= + type=schedule,pattern={{date 'YYYYMMDD'}} + type=schedule,enable=${{ matrix.label == 'ubuntu' }},pattern={{date 'YYYYMMDD'}},suffix= + flavor: | + latest=false + suffix=-${{ matrix.label }} + + - name: Install regctl + uses: regclient/actions/regctl-installer@b6614f5f56245066b533343a85f4109bdc38c8cc + + - name: Push Images + env: + TAGS: "${{ steps.meta.outputs.tags }}" + run: | + PRERELEASE_IMAGE=${{ env.PRERELEASE_DOCKER_REPOSITORY }}:${{ github.sha }}-${{ matrix.label }} + docker pull $PRERELEASE_IMAGE + for tag in $TAGS; do + regctl -v debug image copy $PRERELEASE_IMAGE $tag + done diff --git a/.gitignore b/.gitignore index 8ecd5806197..e5d2a13a8e9 100644 --- a/.gitignore +++ b/.gitignore @@ -26,3 +26,7 @@ bin/grpcurl *.so *.bak + +bazel-* +bin/bazel +bin/h2client diff --git a/.requirements b/.requirements index 2dacfc1c3b5..3625afb2c7b 100644 --- a/.requirements +++ b/.requirements @@ -10,3 +10,9 @@ LIBYAML_VERSION=0.2.5 KONG_GO_PLUGINSERVER_VERSION=v0.6.1 KONG_BUILD_TOOLS_VERSION=4.40.1 KONG_NGINX_MODULE_BRANCH=0.2.1 + +PCRE=8.45 +OPENSSL=1.1.1o +OPENRESTY=1.19.9.1 +LUAROCKS=3.8.0 +LUA_KONG_NGINX_MODULE=6b2fa308e091e2daed2407dc38d54fbcd8fae768 # 0.2.1-sr1 diff --git a/.travis.yml b/.travis.yml index d5a0c1afe65..eba13869155 100644 --- a/.travis.yml +++ b/.travis.yml @@ -48,7 +48,7 @@ env: install: - source .ci/setup_env.sh - - make dev + - make venv-dev cache: apt: true diff --git a/BUILD.bazel b/BUILD.bazel new file mode 100644 index 00000000000..632194b18c1 --- /dev/null +++ b/BUILD.bazel @@ -0,0 +1,239 @@ +load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") +load("@bazel_skylib//lib:selects.bzl", "selects") +load("//build/nfpm:rules.bzl", "nfpm_pkg") +load("//build/toolchain:managed_toolchain.bzl", "aarch64_glibc_distros") + +filegroup( + name = "srcs", + srcs = glob(["**"]), + visibility = ["//visibility:public"], +) + +filegroup( + name = "rockspec_srcs", + srcs = glob(["*.rockspec"]), + visibility = ["//visibility:public"], +) + +nfpm_env = { + "KONG_NAME": "kong", + "KONG_REPLACES_1": "kong-community-edition", + "KONG_REPLACES_2": "kong-enterprise-edition-fips", + "KONG_CONFLICTS_1": "kong-community-edition", + "KONG_CONFLICTS_2": "kong-enterprise-edition-fips", +} + +nfpm_pkg( + name = "kong_deb", + config = "//build:package/nfpm.yaml", + env = nfpm_env, + packager = "deb", + pkg_name = "kong", + visibility = ["//visibility:public"], +) + +nfpm_pkg( + name = "kong_apk", + config = "//build:package/nfpm.yaml", + env = nfpm_env, + packager = "apk", + pkg_name = "kong", + visibility = ["//visibility:public"], +) + +nfpm_pkg( + name = "kong_el8", + config = "//build:package/nfpm.yaml", + env = nfpm_env, + packager = "rpm", + pkg_name = "kong.el8", + visibility = ["//visibility:public"], +) + +nfpm_pkg( + name = "kong_el7", + config = "//build:package/nfpm.yaml", + env = nfpm_env, + extra_env = { + "RPM_EXTRA_DEPS": "hostname", + }, + packager = "rpm", + pkg_name = "kong.el7", + visibility = ["//visibility:public"], +) + +nfpm_pkg( + name = "kong_aws2", + config = "//build:package/nfpm.yaml", + env = nfpm_env, + extra_env = { + "RPM_EXTRA_DEPS": "/usr/sbin/useradd", + "RPM_EXTRA_DEPS_2": "/usr/sbin/groupadd", + }, + packager = "rpm", + pkg_name = "kong.aws2", + visibility = ["//visibility:public"], +) + +nfpm_pkg( + name = "kong_aws2023", + config = "//build:package/nfpm.yaml", + env = nfpm_env, + extra_env = { + "RPM_EXTRA_DEPS": "/usr/sbin/useradd", + "RPM_EXTRA_DEPS_2": "/usr/sbin/groupadd", + "RPM_EXTRA_DEPS_3": "libxcrypt-compat", + }, + packager = "rpm", + pkg_name = "kong.aws2023", + visibility = ["//visibility:public"], +) + +###### flags + +# --//:debug=true +bool_flag( + name = "debug", + build_setting_default = True, +) + +config_setting( + name = "debug_flag", + flag_values = { + ":debug": "true", + }, + visibility = ["//visibility:public"], +) + +config_setting( + name = "debug_linux_flag", + constraint_values = [ + "@platforms//os:linux", + ], + flag_values = { + ":debug": "true", + }, + visibility = ["//visibility:public"], +) + +# --//:skip_webui=false +bool_flag( + name = "skip_webui", + build_setting_default = False, +) + +config_setting( + name = "skip_webui_flags", + flag_values = { + ":skip_webui": "true", + }, + visibility = ["//visibility:public"], +) + +##### constraints, platforms and config_settings for cross-compile + +constraint_setting(name = "cross_build_setting") + +constraint_value( + name = "cross_build", + constraint_setting = ":cross_build_setting", +) + +# platform sets the constraint values based on user input (--platform=//:PLATFOTM) +platform( + name = "generic-crossbuild-x86_64", + constraint_values = [ + "@platforms//os:linux", + "@platforms//cpu:x86_64", + "//build/platforms/distro:generic", + ":cross_build", + ], +) + +platform( + name = "generic-crossbuild-aarch64", + constraint_values = [ + "@platforms//os:linux", + "@platforms//cpu:aarch64", + "//build/platforms/distro:generic", + ":cross_build", + ], +) + +# backward compatibility +alias( + name = "ubuntu-22.04-arm64", + actual = ":generic-crossbuild-aarch64", +) + +platform( + name = "alpine-crossbuild-x86_64", + constraint_values = [ + "@platforms//os:linux", + "@platforms//cpu:x86_64", + "//build/platforms/distro:alpine", + ":cross_build", + ], +) + +# backward compatibility +alias( + name = "alpine-x86_64", + actual = ":alpine-crossbuild-x86_64", +) + +platform( + name = "alpine-crossbuild-aarch64", + constraint_values = [ + "@platforms//os:linux", + "@platforms//cpu:aarch64", + "//build/platforms/distro:alpine", + ":cross_build", + ], +) + +[ + platform( + name = vendor + "-crossbuild-aarch64", + constraint_values = [ + "@platforms//os:linux", + "@platforms//cpu:aarch64", + "//build/platforms/distro:" + vendor, + ":cross_build", + ], + ) + for vendor in aarch64_glibc_distros +] + +# config_settings define a select() condition based on user-set constraint_values +# see https://bazel.build/docs/configurable-attributes +config_setting( + name = "aarch64-linux-anylibc-cross", + constraint_values = [ + "@platforms//os:linux", + "@platforms//cpu:aarch64", + ":cross_build", + ], + visibility = ["//visibility:public"], +) + +config_setting( + name = "x86_64-linux-musl-cross", + constraint_values = [ + "@platforms//os:linux", + "@platforms//cpu:x86_64", + "//build/platforms/distro:alpine", + ":cross_build", + ], + visibility = ["//visibility:public"], +) + +selects.config_setting_group( + # matches all cross build platforms + name = "any-cross", + match_any = [ + ":aarch64-linux-anylibc-cross", + ":x86_64-linux-musl-cross", + ], + visibility = ["//visibility:public"], +) diff --git a/Makefile b/Makefile index f9a23c8bb9d..27832cfea24 100644 --- a/Makefile +++ b/Makefile @@ -6,6 +6,9 @@ WIN_SCRIPTS = "bin/busted" "bin/kong" BUSTED_ARGS ?= -v TEST_CMD ?= bin/busted $(BUSTED_ARGS) +BUILD_NAME ?= kong-dev +BAZEL_ARGS ?= --verbose_failures --action_env=BUILD_NAME=$(BUILD_NAME) --//:skip_webui=true + ifeq ($(OS), darwin) OPENSSL_DIR ?= /usr/local/opt/openssl GRPCURL_OS ?= osx @@ -16,15 +19,26 @@ endif ifeq ($(MACHINE), aarch64) GRPCURL_MACHINE ?= arm64 +H2CLIENT_MACHINE ?= arm64 else GRPCURL_MACHINE ?= $(MACHINE) +H2CLIENT_MACHINE ?= $(MACHINE) +endif + +ifeq ($(MACHINE), aarch64) +BAZELISK_MACHINE ?= arm64 +else ifeq ($(MACHINE), x86_64) +BAZELISK_MACHINE ?= amd64 +else +BAZELISK_MACHINE ?= $(MACHINE) endif .PHONY: install dependencies dev remove grpcurl \ setup-ci setup-kong-build-tools \ lint test test-integration test-plugins test-all \ pdk-phase-check functional-tests \ - fix-windows release + fix-windows release \ + nightly-release release ROOT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) KONG_SOURCE_LOCATION ?= $(ROOT_DIR) @@ -35,8 +49,18 @@ RESTY_OPENSSL_VERSION ?= `grep RESTY_OPENSSL_VERSION $(KONG_SOURCE_LOCATION)/.re RESTY_PCRE_VERSION ?= `grep RESTY_PCRE_VERSION $(KONG_SOURCE_LOCATION)/.requirements | awk -F"=" '{print $$2}'` KONG_BUILD_TOOLS ?= `grep KONG_BUILD_TOOLS_VERSION $(KONG_SOURCE_LOCATION)/.requirements | awk -F"=" '{print $$2}'` GRPCURL_VERSION ?= 1.8.5 +BAZLISK_VERSION ?= 1.18.0 OPENRESTY_PATCHES_BRANCH ?= master KONG_NGINX_MODULE_BRANCH ?= master +BAZEL := $(shell command -v bazel 2> /dev/null) +VENV = /dev/null # backward compatibility when no venv is built + +# Use x86_64 grpcurl v1.8.5 for Apple silicon chips +ifeq ($(GRPCURL_OS)_$(MACHINE)_$(GRPCURL_VERSION), osx_arm64_1.8.5) +GRPCURL_MACHINE = x86_64 +endif + +H2CLIENT_VERSION ?= 0.4.0 PACKAGE_TYPE ?= deb REPOSITORY_NAME ?= kong-${PACKAGE_TYPE} @@ -82,6 +106,21 @@ release-docker-images: KONG_SOURCE_LOCATION=${KONG_SOURCE_LOCATION} \ release-kong-docker-images +bin/bazel: + @curl -s -S -L \ + https://github.com/bazelbuild/bazelisk/releases/download/v$(BAZLISK_VERSION)/bazelisk-$(OS)-$(BAZELISK_MACHINE) -o bin/bazel + @chmod +x bin/bazel + +bin/grpcurl: + @curl -s -S -L \ + https://github.com/fullstorydev/grpcurl/releases/download/v$(GRPCURL_VERSION)/grpcurl_$(GRPCURL_VERSION)_$(GRPCURL_OS)_$(GRPCURL_MACHINE).tar.gz | tar xz -C bin; + @$(RM) bin/LICENSE + +bin/h2client: + @curl -s -S -L \ + https://github.com/Kong/h2client/releases/download/v$(H2CLIENT_VERSION)/h2client_$(H2CLIENT_VERSION)_$(OS)_$(H2CLIENT_MACHINE).tar.gz | tar xz -C bin; + @$(RM) bin/README.md + release: ifeq ($(ISTAG),false) sed -i -e '/return string\.format/,/\"\")/c\return "$(KONG_VERSION)\"' kong/meta.lua @@ -188,15 +227,44 @@ dependencies: bin/grpcurl fi \ done; -bin/grpcurl: - @curl -s -S -L \ - https://github.com/fullstorydev/grpcurl/releases/download/v$(GRPCURL_VERSION)/grpcurl_$(GRPCURL_VERSION)_$(GRPCURL_OS)_$(GRPCURL_MACHINE).tar.gz | tar xz -C bin; - @rm bin/LICENSE +build-kong: check-bazel + $(BAZEL) build //build:kong --verbose_failures --action_env=BUILD_NAME=$(BUILD_NAME) + +build-venv: check-bazel + $(eval VENV := bazel-bin/build/$(BUILD_NAME)-venv.sh) + + @if [ ! -e bazel-bin/build/$(BUILD_NAME)-venv.sh ]; then \ + $(BAZEL) build //build:venv $(BAZEL_ARGS); \ + fi + +install-dev-rocks: build-venv + @. $(VENV) ;\ + for rock in $(DEV_ROCKS) ; do \ + if luarocks list --porcelain $$rock | grep -q "installed" ; then \ + echo $$rock already installed, skipping ; \ + else \ + echo $$rock not found, installing via luarocks... ; \ + LIBRARY_PREFIX=$$(pwd)/bazel-bin/build/$(BUILD_NAME)/kong ; \ + luarocks install $$rock OPENSSL_DIR=$$LIBRARY_PREFIX CRYPTO_DIR=$$LIBRARY_PREFIX YAML_DIR=$(YAML_DIR) || exit 1; \ + fi \ + done; dev: remove install dependencies +venv-dev: build-venv install-dev-rocks bin/grpcurl bin/h2client + +check-bazel: bin/bazel +ifndef BAZEL + $(eval BAZEL := bin/bazel) +endif + +clean: check-bazel + $(BAZEL) clean + $(RM) bin/bazel bin/grpcurl bin/h2client + + lint: - @luacheck -q . + @luacheck -q . --exclude-files=bazel-* @!(grep -R -E -I -n -w '#only|#o' spec && echo "#only or #o tag detected") >&2 @!(grep -R -E -I -n -- '---\s+ONLY' t && echo "--- ONLY block detected") >&2 diff --git a/WORKSPACE b/WORKSPACE new file mode 100644 index 00000000000..d2f1c961030 --- /dev/null +++ b/WORKSPACE @@ -0,0 +1,53 @@ +workspace(name = "kong") + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( + name = "bazel_skylib", + sha256 = "74d544d96f4a5bb630d465ca8bbcfe231e3594e5aae57e1edbf17a6eb3ca2506", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.3.0/bazel-skylib-1.3.0.tar.gz", + "https://github.com/bazelbuild/bazel-skylib/releases/download/1.3.0/bazel-skylib-1.3.0.tar.gz", + ], +) + +load("//build:kong_bindings.bzl", "load_bindings") + +load_bindings(name = "kong_bindings") + +http_archive( + name = "rules_foreign_cc", + sha256 = "2a4d07cd64b0719b39a7c12218a3e507672b82a97b98c6a89d38565894cf7c51", + strip_prefix = "rules_foreign_cc-0.9.0", + url = "https://github.com/bazelbuild/rules_foreign_cc/archive/refs/tags/0.9.0.tar.gz", +) + +load("@rules_foreign_cc//foreign_cc:repositories.bzl", "rules_foreign_cc_dependencies") + +# This sets up some common toolchains for building targets. For more details, please see +# https://bazelbuild.github.io/rules_foreign_cc/0.9.0/flatten.html#rules_foreign_cc_dependencies +rules_foreign_cc_dependencies( + register_built_tools = False, # don't build toolchains like make + register_default_tools = True, # register cmake and ninja that are managed by bazel + register_preinstalled_tools = True, # use preinstalled toolchains like make +) + +load("//build/openresty:repositories.bzl", "openresty_repositories") + +openresty_repositories() + +load("//build/nfpm:repositories.bzl", "nfpm_repositories") + +nfpm_repositories() + +load("//build:repositories.bzl", "build_repositories") + +build_repositories() + +load("//build/toolchain:repositories.bzl", "toolchain_repositories") + +toolchain_repositories() + +load("//build/toolchain:managed_toolchain.bzl", "register_all_toolchains") + +register_all_toolchains() diff --git a/bin/busted b/bin/busted index 3aa7fa44710..dfc41fec123 100755 --- a/bin/busted +++ b/bin/busted @@ -1,6 +1,12 @@ #!/usr/bin/env resty -local DEFAULT_RESTY_FLAGS="-c 4096" +setmetatable(_G, nil) + +local pl_path = require("pl.path") + +local cert_path = pl_path.abspath("spec/fixtures/kong_spec.crt") + +local DEFAULT_RESTY_FLAGS=string.format(" -c 4096 --http-conf 'lua_ssl_trusted_certificate %s;' ", cert_path) if not os.getenv("KONG_BUSTED_RESPAWNED") then -- initial run, so go update the environment @@ -24,8 +30,9 @@ if not os.getenv("KONG_BUSTED_RESPAWNED") then -- rebuild the invoked commandline, while inserting extra resty-flags local resty_flags = DEFAULT_RESTY_FLAGS - local cmd = { "exec" } - for i = -1, #arg do + local cmd = { "exec", "/usr/bin/env", "resty" } + local cmd_prefix_count = #cmd + for i = 0, #arg do if arg[i]:sub(1, 12) == "RESTY_FLAGS=" then resty_flags = arg[i]:sub(13, -1) @@ -35,7 +42,7 @@ if not os.getenv("KONG_BUSTED_RESPAWNED") then end if resty_flags then - table.insert(cmd, 3, resty_flags) + table.insert(cmd, cmd_prefix_count+1, resty_flags) end table.insert(script, table.concat(cmd, " ")) @@ -45,8 +52,6 @@ if not os.getenv("KONG_BUSTED_RESPAWNED") then os.exit(rc) end -setmetatable(_G, nil) - pcall(require, "luarocks.loader") require("kong.globalpatches")({ @@ -56,3 +61,5 @@ require("kong.globalpatches")({ -- Busted command-line runner require 'busted.runner'({ standalone = false }) + +-- vim: set ft=lua ts=2 sw=2 sts=2 et : diff --git a/build/BUILD.bazel b/build/BUILD.bazel new file mode 100644 index 00000000000..4d7a467a176 --- /dev/null +++ b/build/BUILD.bazel @@ -0,0 +1,176 @@ +load("@kong_bindings//:variables.bzl", "KONG_VAR") +load("//build:build_system.bzl", "kong_directory_genrule", "kong_rules_group", "kong_template_file") + +exports_files([ + "package/nfpm.yaml", + "package/nfpm.enterprise.yaml", +]) + +lib_deps = [] + +install_lib_deps_cmd = "\n".join([ + """ + DEP=${WORKSPACE_PATH}/$(echo $(locations %s) | awk '{print $1}') + # use tar magic to exclude files and create with correct permission + copy_with_filter ${DEP} ${BUILD_DESTDIR}/kong +""" % dep + for dep in lib_deps +]) + +lualib_deps = [ + "@lua-kong-nginx-module//:all_srcs", +] + +install_lualib_deps_cmd = "\n".join([ + """ + DEP=$(pwd)/external/%s + INSTALL=/usr/bin/install make --silent -C ${DEP} LUA_LIB_DIR=${BUILD_DESTDIR}/openresty/lualib install +""" % dep.lstrip("@").split("/")[0] + for dep in lualib_deps +]) + +install_webui_cmd = select({ + "//conditions:default": """ + """, + "@kong//:skip_webui_flags": "\n", +}) + +kong_directory_genrule( + name = "kong", + srcs = [ + "@openresty//:openresty", + "@openresty//:luajit", + "@luarocks//:luarocks_make", + "@luarocks//:luarocks_target", + "@protoc//:all_srcs", + "@openssl", + ] + select({ + "@kong//:skip_webui_flags": [], + "//conditions:default": [], + }) + lib_deps + lualib_deps, + cmd = """ set -e + function copy_with_filter { + mkdir -p $2 + tar -cC $1 --exclude="*.a" --exclude="*.la" \ + --exclude="*/share/*" --exclude="*/bin/*" \ + --exclude="*.log" . | tar -xC $2/. + chmod -R "+rw" $2 + } + function LN { + if [[ "$OSTYPE" == "darwin"* ]]; then + # TODO: support relative path links once we start to cross compile on macOS + ln -sf $@ + else + ln -srf $@ + fi + } + rm -rf ${BUILD_DESTDIR} + mkdir -p ${BUILD_DESTDIR}/kong/lib ${BUILD_DESTDIR}/openresty ${BUILD_DESTDIR}/bin + + if [[ "$OSTYPE" == "darwin"* ]]; then + libext="dylib" + else # assume linux + libext="so" + fi + + OPENRESTY=${WORKSPACE_PATH}/$(echo '$(locations @openresty//:openresty)' | awk '{print $1}') + cp -r ${OPENRESTY}/. ${BUILD_DESTDIR}/openresty/. + LN ${BUILD_DESTDIR}/openresty/bin/resty ${BUILD_DESTDIR}/bin/resty + chmod -R "+rw" ${BUILD_DESTDIR}/openresty + + LUAJIT=${WORKSPACE_PATH}/$(echo '$(locations @openresty//:luajit)' | awk '{print $1}') + copy_with_filter ${LUAJIT} ${BUILD_DESTDIR}/openresty/luajit + cp ${LUAJIT}/bin/luajit ${BUILD_DESTDIR}/openresty/luajit/bin/luajit + tar -cC ${LUAJIT}/share . | tar -xC ${BUILD_DESTDIR}/openresty/luajit/share + chmod -R "+rw" ${BUILD_DESTDIR}/openresty/luajit + + LUAROCKS=${WORKSPACE_PATH}/$(dirname '$(location @luarocks//:luarocks_make)')/luarocks_tree + cp -r ${LUAROCKS}/. ${BUILD_DESTDIR}/. + rm ${BUILD_DESTDIR}/bin/lapis ${BUILD_DESTDIR}/bin/luarocks-admin + + cp -r $(locations @protoc//:all_srcs) ${BUILD_DESTDIR}/kong/. + + OPENSSL=${WORKSPACE_PATH}/$(echo $(locations @openssl) | awk '{print $1}') + # use tar magic to exclude files and create with correct permission + copy_with_filter $OPENSSL ${BUILD_DESTDIR}/kong + + + """ + install_lib_deps_cmd + install_lualib_deps_cmd + install_webui_cmd + + """ + mkdir -p ${BUILD_DESTDIR}/etc/kong + cp kong.conf.default ${BUILD_DESTDIR}/etc/kong/kong.conf.default + + # housecleaning + mv ${BUILD_DESTDIR}/kong/*.${libext}* ${BUILD_DESTDIR}/kong/lib 2>/dev/null || true + if [[ -d ${BUILD_DESTDIR}/kong/lib64 ]]; then + copy_with_filter ${BUILD_DESTDIR}/kong/lib64 ${BUILD_DESTDIR}/kong/lib + rm -rf ${BUILD_DESTDIR}/kong/lib64 + fi + + # remove pkgconfig since they are invalid anyway + find ${BUILD_DESTDIR} -name "*.pc" -delete + + # clean empty directory + find ${BUILD_DESTDIR} -empty -type d -delete + + # foreign_cc rule dereferences symlink, we will dedup them here + # TODO: patch https://github.com/bazelbuild/rules_foreign_cc/blob/main/foreign_cc/private/framework.bzl#L450 to not remove symlink + for f in $(find ${BUILD_DESTDIR}/kong/lib ${BUILD_DESTDIR}/openresty/luajit/lib -type f -name "*.${libext}*" ); do + if [[ -L "$f" ]]; then continue; fi # already a symlink + target=$(ls -r1 $f.* 2>/dev/null | head -n1) + if [[ ! -z "$target" && "$f" != "$target" ]]; then + LN "$target" "$f" + fi + done + + cp kong/pluginsocket.proto ${BUILD_DESTDIR}/kong/include/pluginsocket.proto + + LN ${BUILD_DESTDIR}/openresty/nginx/sbin/nginx ${BUILD_DESTDIR}/openresty/bin/openresty + """, + # XXX: bazel forces 0555 as artifact permission, which is not correct for packagin + # here we deliberately use a different directory so file permission is preserved + # see also https://github.com/bazelbuild/bazel/issues/5588 + output_dir = KONG_VAR["BUILD_NAME"] + ".nop", + visibility = ["//visibility:public"], +) + +kong_template_file( + name = "venv.sh", + output = "%s-venv.sh" % KONG_VAR["BUILD_NAME"], + substitutions = { + "{{build_name}}": KONG_VAR["BUILD_NAME"], + "{{workspace_path}}": KONG_VAR["WORKSPACE_PATH"], + }, + template = "//build:templates/venv.sh", +) + +kong_template_file( + name = "venv.fish", + output = "%s-venv.fish" % KONG_VAR["BUILD_NAME"], + substitutions = { + "{{build_name}}": KONG_VAR["BUILD_NAME"], + "{{workspace_path}}": KONG_VAR["WORKSPACE_PATH"], + }, + template = "//build:templates/venv.fish", +) + +kong_template_file( + name = "venv-commons", + is_executable = True, + output = "%s-venv/lib/venv-commons" % KONG_VAR["BUILD_NAME"], + substitutions = { + "{{workspace_path}}": KONG_VAR["WORKSPACE_PATH"], + }, + template = "//build:templates/venv-commons", +) + +kong_rules_group( + name = "venv", + propagates = [ + ":kong", + ":venv.sh", + ":venv.fish", + ":venv-commons", + ], + visibility = ["//visibility:public"], +) diff --git a/build/README.md b/build/README.md new file mode 100644 index 00000000000..7af1d2b7f04 --- /dev/null +++ b/build/README.md @@ -0,0 +1,137 @@ +# Build + +This directory contains the build system for the project. +The build system is designed to be used with the [Bazel](https://bazel.build/). +It is designed to be running on Linux without root privileges, and no virtualization technology is required. + +The build system is tested on Linux (x86_64 and aarch64) and macOS (Intel chip and AppleSilicon Chip). + +## Prerequisites + +The build system requires the following tools to be installed: + +- [Bazel/Bazelisk](https://bazel.build/install/bazelisk), Bazelisk is recommended to ensure the correct version of Bazel is used. +- [Build Dependencies](https://github.com/Kong/kong/blob/master/DEVELOPER.md#prerequisites), the build system requires the same dependencies as Kong itself. + +## Building + +To build Kong and all its dependencies, run the following command: + +Bash/Zsh: + +```bash +git submodule update --init +GITHUB_TOKEN=token bazel build //build:kong --verbose_failures +``` + +The build output is in `bazel-bin/build/kong-dev`. + +To use the build as a virtual development environment, run: + +```bash +bazel build //build:venv --verbose_failures +. ./bazel-bin/build/kong-dev-venv.sh +``` + +Some other targets one might find useful for debugging are: + +- `@openresty//:openresty`: builds openresty +- `@luarocks//:luarocks_make`: builds luarocks for Kong dependencies + +### Build Options + +Following build options can be used to set specific features: + +- **--//:debug=true** turn on debug opitons for OpenResty and LuaJIT, default to true. +- **--action_env=BUILD_NAME=** set the `build_name`, multiple build can exist at same time to allow you +switch between different Kong versions or branches. Default to `kong-dev`; don't set this when you are +building a building an binary package. +- **--action_env=INSTALL_DESTDIR=** set the directory when the build is intended to be installed. Bazel won't +actually install files into this directory, but this will make sure certain hard coded paths and RPATH is +correctly set when building a package. Default to `bazel-bin/build/`. + + +### Official build + +`--config release` specifies the build configuration to use for release, it sets following build options: + +``` +build:release --//:debug=false +build:release --action_env=BUILD_NAME=kong-dev +build:release --action_env=INSTALL_DESTDIR=/usr/local +``` + +To build an official release, use: + +```bash +GITHUB_TOKEN=token bazel build --config release //build:kong --verbose_failures +``` + +Supported build targets for binary packages: +- `:kong_deb` +- `:kong_el7` +- `:kong_el8` +- `:kong_aws2` +- `:kong_aws2023` +- `:kong_apk` + +For example, to build the deb package: + +```bash +bazel build --verbose_failures --config release :kong_deb + +``` + +Run `bazel clean` to clean the bazel build cache. + +#### GPG Signing + +GPG singing is supported for the rpm packages (`el*` and `aws*`). + +```bash +bazel build //:kong_el8 --action_env=RPM_SIGNING_KEY_FILE --action_env=NFPM_RPM_PASSPHRASE +``` + +## Cross compiling + +Cross compiling is currently only tested on Ubuntu 22.04 x86_64 with following targeting platforms: + +- **//:generic-crossbuild-aarch64** Use the system installed aarch64 toolchain. + - Requires user to manually install `crossbuild-essential-arm64` on Debian/Ubuntu. +- **//:alpine-crossbuild-x86_64** Alpine Linux x86_64; bazel manages the build toolchain. +- **//:alpine-crossbuild-aarch64** Alpine Linux aarch64; bazel manages the build toolchain. + +Make sure platforms are selected both in building Kong and packaing kong: + +```bash +bazel build --config release //build:kong --platforms=//:generic-crossbuild-aarch64 +bazel build --config release :kong_deb --platforms=//:generic-crossbuild-aarch64 +``` + +## Troubleshooting + +Run `bazel build` with `--sandbox_debug --verbose_failures` to get more information about the error. + +The `.log` files in `bazel-bin` contain the build logs. + +## FAQ + +### Caching + +Bazel utilizes a cache to speed up the build process. You might want to clear the cache actively +if you recently changed `BUILD_NAME` or `INSTALL_DESTDIR`. + +To completely remove the entire working tree created by a Bazel instance, run: + +```shell +bazel clean --expunge +``` + +### Cleanup + +In some cases where the build fails or the build is interrupted, the build system may leave behind some temporary files. To clean up the build system, run the following command or simply rerun the build: + +```shell +bazel clean +``` + diff --git a/build/build_system.bzl b/build/build_system.bzl new file mode 100644 index 00000000000..5b2feeb1fc3 --- /dev/null +++ b/build/build_system.bzl @@ -0,0 +1,209 @@ +""" +Load this file for all Kong-specific build macros +and rules that you'd like to use in your BUILD files. +""" + +load("@bazel_skylib//lib:dicts.bzl", "dicts") +load("@kong_bindings//:variables.bzl", "KONG_VAR") + +# A genrule variant that can output a directory. +def _kong_directory_genrule_impl(ctx): + tree = ctx.actions.declare_directory(ctx.attr.output_dir) + env = dicts.add(KONG_VAR, ctx.configuration.default_shell_env, { + "GENRULE_OUTPUT_DIR": tree.path, + }) + + # XXX: remove the "env" from KONG_VAR which is a list + env["OPENRESTY_PATCHES"] = "" + + ctx.actions.run_shell( + inputs = ctx.files.srcs, + tools = ctx.files.tools, + outputs = [tree], + command = "mkdir -p " + tree.path + " && " + ctx.expand_location(ctx.attr.cmd), + env = env, + ) + return [DefaultInfo(files = depset([tree]))] + +kong_directory_genrule = rule( + implementation = _kong_directory_genrule_impl, + attrs = { + "srcs": attr.label_list(), + "cmd": attr.string(), + "tools": attr.label_list(), + "output_dir": attr.string(), + }, +) + +# A rule that can be used as a meta rule that propagates multiple other rules +def _kong_rules_group_impl(ctx): + return [DefaultInfo(files = depset(ctx.files.propagates))] + +kong_rules_group = rule( + implementation = _kong_rules_group_impl, + attrs = { + "propagates": attr.label_list(), + }, +) + +_kong_template_attrs = { + "template": attr.label( + mandatory = True, + allow_single_file = True, + ), + "output": attr.output( + mandatory = True, + ), + "substitutions": attr.string_dict(), + "srcs": attr.label_list(allow_files = True, doc = "List of locations to expand the template, in target configuration"), + "tools": attr.label_list(allow_files = True, cfg = "exec", doc = "List of locations to expand the template, in exec configuration"), + "is_executable": attr.bool(default = False), + # hidden attributes + "_cc_toolchain": attr.label( + default = "@bazel_tools//tools/cpp:current_cc_toolchain", + ), +} + +def _render_template(ctx, output): + substitutions = dict(ctx.attr.substitutions) + for l in ctx.attr.srcs + ctx.attr.tools: + files = l.files.to_list() + if len(files) == 1: + p = files[0].path + else: + p = "/".join(files[0].path.split("/")[:-1]) # get the directory + substitutions["{{%s}}" % l.label] = p + + substitutions["{{CC}}"] = ctx.attr._cc_toolchain[cc_common.CcToolchainInfo].compiler_executable + + # yes, not a typo, use gcc for linker + substitutions["{{LD}}"] = substitutions["{{CC}}"] + + ctx.actions.expand_template( + template = ctx.file.template, + output = output, + substitutions = substitutions, + is_executable = ctx.attr.is_executable, + ) + +def _kong_template_file_impl(ctx): + _render_template(ctx, ctx.outputs.output) + + return [ + DefaultInfo(files = depset([ctx.outputs.output])), + ] + +kong_template_file = rule( + implementation = _kong_template_file_impl, + attrs = _kong_template_attrs, +) + +def _kong_template_genrule_impl(ctx): + f = ctx.actions.declare_file(ctx.attr.name + ".rendered.sh") + _render_template(ctx, f) + + ctx.actions.run_shell( + outputs = [ctx.outputs.output], + inputs = ctx.files.srcs + ctx.files.tools + [f], + command = "{} {}".format(f.path, ctx.outputs.output.path), + progress_message = ctx.attr.progress_message, + ) + + return [ + # don't list f as files/real output + DefaultInfo(files = depset([ctx.outputs.output])), + ] + +kong_template_genrule = rule( + implementation = _kong_template_genrule_impl, + attrs = _kong_template_attrs | { + "progress_message": attr.string(doc = "Message to display when running the command"), + }, +) + +def _copyright_header(ctx): + paths = ctx.execute(["find", ctx.path("."), "-type", "f"]).stdout.split("\n") + + copyright_content = ctx.read(ctx.path(Label("@kong//:distribution/COPYRIGHT-HEADER"))).replace("--", " ") + copyright_content_js = "/*\n" + copyright_content + "*/\n\n" + copyright_content_html = "\n\n" + for path in paths: + if path.endswith(".js") or path.endswith(".map") or path.endswith(".css"): + content = ctx.read(path) + if not content.startswith(copyright_content_js): + # the default enabled |legacy_utf8| leads to a double-encoded utf-8 + # while writing utf-8 content read by |ctx.read|, let's disable it + ctx.file(path, copyright_content_js + content, legacy_utf8 = False) + + elif path.endswith(".html"): + content = ctx.read(path) + if not content.startswith(copyright_content_html): + # the default enabled |legacy_utf8| leads to a double-encoded utf-8 + # while writing utf-8 content read by |ctx.read|, let's disable it + ctx.file(path, copyright_content_html + content, legacy_utf8 = False) + +def _github_release_impl(ctx): + ctx.file("WORKSPACE", "workspace(name = \"%s\")\n" % ctx.name) + + if ctx.attr.build_file: + ctx.file("BUILD.bazel", ctx.read(ctx.attr.build_file)) + elif ctx.attr.build_file_content: + ctx.file("BUILD.bazel", ctx.attr.build_file_content) + + os_name = ctx.os.name + os_arch = ctx.os.arch + + if os_arch == "aarch64": + os_arch = "arm64" + elif os_arch == "x86_64": + os_arch = "amd64" + elif os_arch != "amd64": + fail("Unsupported arch %s" % os_arch) + + if os_name == "mac os x": + os_name = "macOS" + elif os_name != "linux": + fail("Unsupported OS %s" % os_name) + + gh_bin = "%s" % ctx.path(Label("@gh_%s_%s//:bin/gh" % (os_name, os_arch))) + args = [gh_bin, "release", "download", ctx.attr.tag, "-R", ctx.attr.repo] + downloaded_file = None + if ctx.attr.pattern: + if "/" in ctx.attr.pattern or ".." in ctx.attr.pattern: + fail("/ and .. are not allowed in pattern") + downloaded_file = ctx.attr.pattern.replace("*", "_") + args += ["-p", ctx.attr.pattern] + elif ctx.attr.archive: + args.append("--archive=" + ctx.attr.archive) + downloaded_file = "gh-release." + ctx.attr.archive.split(".")[-1] + else: + fail("at least one of pattern or archive must be set") + + args += ["-O", downloaded_file] + + ret = ctx.execute(args) + + if ret.return_code != 0: + gh_token_set = "GITHUB_TOKEN is set, is it valid?" + if not ctx.os.environ.get("GITHUB_TOKEN", ""): + gh_token_set = "GITHUB_TOKEN is not set, is this a private repo?" + fail("Failed to download release (%s): %s, exit: %d" % (gh_token_set, ret.stderr, ret.return_code)) + + ctx.extract(downloaded_file, stripPrefix = ctx.attr.strip_prefix) + + if not ctx.attr.skip_add_copyright_header: + _copyright_header(ctx) + +github_release = repository_rule( + implementation = _github_release_impl, + attrs = { + "tag": attr.string(mandatory = True), + "pattern": attr.string(mandatory = False), + "archive": attr.string(mandatory = False, values = ["zip", "tar.gz"]), + "strip_prefix": attr.string(default = "", doc = "Strip prefix from downloaded files"), + "repo": attr.string(mandatory = True), + "build_file": attr.label(allow_single_file = True), + "build_file_content": attr.string(), + "skip_add_copyright_header": attr.bool(default = False, doc = "Whether to inject COPYRIGHT-HEADER into downloaded files, only required for webuis"), + }, +) diff --git a/build/cross_deps/BUILD.bazel b/build/cross_deps/BUILD.bazel new file mode 100644 index 00000000000..e69de29bb2d diff --git a/build/cross_deps/libxcrypt/BUILD.bazel b/build/cross_deps/libxcrypt/BUILD.bazel new file mode 100644 index 00000000000..d7862e9d016 --- /dev/null +++ b/build/cross_deps/libxcrypt/BUILD.bazel @@ -0,0 +1,6 @@ +exports_files( + [ + "BUILD.libxcrypt.bazel", + ], + visibility = ["//visibility:public"], +) diff --git a/build/cross_deps/libxcrypt/BUILD.libxcrypt.bazel b/build/cross_deps/libxcrypt/BUILD.libxcrypt.bazel new file mode 100644 index 00000000000..933172eec78 --- /dev/null +++ b/build/cross_deps/libxcrypt/BUILD.libxcrypt.bazel @@ -0,0 +1,60 @@ +load("@rules_foreign_cc//foreign_cc:defs.bzl", "configure_make") +load("@bazel_skylib//lib:selects.bzl", "selects") +load("@kong_bindings//:variables.bzl", "KONG_VAR") + +filegroup( + name = "all_srcs", + srcs = glob( + include = ["**"], + exclude = ["*.bazel"], + ), +) + +selects.config_setting_group( + name = "disable-obsolete-api", + # looks like RHEL is aggressive on migrating to libxcrypt + # set this option if any distro is looking for "libcrypto.so.2" + # instead of "libcrypt.so.1" (i.e. "error while loading shared libraries: libcrypt.so.1") + match_any = [ + "@kong//build/platforms/distro:rhel9", + "@kong//build/platforms/distro:aws2023", + ], +) + +configure_make( + name = "libxcrypt", + configure_command = "configure", + configure_in_place = True, + configure_options = select({ + "@kong//:aarch64-linux-anylibc-cross": [ + "--host=aarch64-linux", + ], + "@kong//:x86_64-linux-musl-cross": [ + "--host=x86_64-linux-musl", + ], + "//conditions:default": [], + }) + select({ + ":disable-obsolete-api": [ + "--enable-obsolete-api=no", + ], + "//conditions:default": [], + }), + lib_source = ":all_srcs", + # out_lib_dir = "lib", + out_shared_libs = select({ + "@platforms//os:macos": [ + "libcrypt.1.dylib", + ], + ":disable-obsolete-api": [ + "libcrypt.so.2", + ], + "//conditions:default": [ + "libcrypt.so.1", + ], + }), + targets = [ + "-j" + KONG_VAR["NPROC"], + "install -j" + KONG_VAR["NPROC"], + ], + visibility = ["//visibility:public"], +) diff --git a/build/cross_deps/libxcrypt/repositories.bzl b/build/cross_deps/libxcrypt/repositories.bzl new file mode 100644 index 00000000000..f6c28d02244 --- /dev/null +++ b/build/cross_deps/libxcrypt/repositories.bzl @@ -0,0 +1,18 @@ +"""A module defining the third party dependency OpenResty""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def libxcrypt_repositories(): + """Defines the libcrypt repository""" + + # many distros starts replace glibc/libcrypt with libxcrypt + # thus crypt.h and libcrypt.so.1 are missing from cross tool chain + # ubuntu2004: 4.4.10 + # ubuntu2204: 4.4.27 + http_archive( + name = "cross_deps_libxcrypt", + url = "https://github.com/besser82/libxcrypt/releases/download/v4.4.27/libxcrypt-4.4.27.tar.xz", + sha256 = "500898e80dc0d027ddaadb5637fa2bf1baffb9ccd73cd3ab51d92ef5b8a1f420", + strip_prefix = "libxcrypt-4.4.27", + build_file = "//build/cross_deps/libxcrypt:BUILD.libxcrypt.bazel", + ) diff --git a/build/cross_deps/libyaml/BUILD.bazel b/build/cross_deps/libyaml/BUILD.bazel new file mode 100644 index 00000000000..588b8759be7 --- /dev/null +++ b/build/cross_deps/libyaml/BUILD.bazel @@ -0,0 +1,16 @@ +load("@bazel_skylib//rules:build_test.bzl", "build_test") + +exports_files( + [ + "BUILD.libyaml.bazel", + ], + visibility = ["//visibility:public"], +) + +build_test( + name = "build", + targets = [ + "@cross_deps_libyaml//:libyaml", + ], + visibility = ["//:__pkg__"], +) diff --git a/build/cross_deps/libyaml/BUILD.libyaml.bazel b/build/cross_deps/libyaml/BUILD.libyaml.bazel new file mode 100644 index 00000000000..ad4e48560df --- /dev/null +++ b/build/cross_deps/libyaml/BUILD.libyaml.bazel @@ -0,0 +1,40 @@ +load("@rules_foreign_cc//foreign_cc:defs.bzl", "configure_make") +load("@kong_bindings//:variables.bzl", "KONG_VAR") + +filegroup( + name = "all_srcs", + srcs = glob( + include = ["**"], + exclude = ["*.bazel"], + ), +) + +configure_make( + name = "libyaml", + configure_command = "configure", + configure_in_place = True, + configure_options = select({ + "@kong//:aarch64-linux-anylibc-cross": [ + "--host=aarch64-linux", + ], + "@kong//:x86_64-linux-musl-cross": [ + "--host=x86_64-linux-musl", + ], + "//conditions:default": [], + }), + lib_source = ":all_srcs", + # out_lib_dir = "lib", + out_shared_libs = select({ + "@platforms//os:macos": [ + "libyaml-0.2.dylib", + ], + "//conditions:default": [ + "libyaml-0.so.2", + ], + }), + targets = [ + "-j" + KONG_VAR["NPROC"], + "install -j" + KONG_VAR["NPROC"], + ], + visibility = ["//visibility:public"], +) diff --git a/build/cross_deps/libyaml/repositories.bzl b/build/cross_deps/libyaml/repositories.bzl new file mode 100644 index 00000000000..b7b2800cf96 --- /dev/null +++ b/build/cross_deps/libyaml/repositories.bzl @@ -0,0 +1,15 @@ +"""A module defining the third party dependency OpenResty""" + +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def libyaml_repositories(): + """Defines the libyaml repository""" + + http_archive( + name = "cross_deps_libyaml", + url = "https://pyyaml.org/download/libyaml/yaml-0.2.5.tar.gz", + sha256 = "c642ae9b75fee120b2d96c712538bd2cf283228d2337df2cf2988e3c02678ef4", + strip_prefix = "yaml-0.2.5", + build_file = "//build/cross_deps/libyaml:BUILD.libyaml.bazel", + ) diff --git a/build/cross_deps/repositories.bzl b/build/cross_deps/repositories.bzl new file mode 100644 index 00000000000..a2afddfc9e9 --- /dev/null +++ b/build/cross_deps/repositories.bzl @@ -0,0 +1,8 @@ +load("//build/cross_deps/zlib:repositories.bzl", "zlib_repositories") +load("//build/cross_deps/libyaml:repositories.bzl", "libyaml_repositories") +load("//build/cross_deps/libxcrypt:repositories.bzl", "libxcrypt_repositories") + +def cross_deps_repositories(): + zlib_repositories() + libyaml_repositories() + libxcrypt_repositories() diff --git a/build/cross_deps/zlib/BUILD.bazel b/build/cross_deps/zlib/BUILD.bazel new file mode 100644 index 00000000000..d650c675249 --- /dev/null +++ b/build/cross_deps/zlib/BUILD.bazel @@ -0,0 +1,16 @@ +load("@bazel_skylib//rules:build_test.bzl", "build_test") + +exports_files( + [ + "BUILD.zlib.bazel", + ], + visibility = ["//visibility:public"], +) + +build_test( + name = "build", + targets = [ + "@cross_deps_zlib//:zlib", + ], + visibility = ["//:__pkg__"], +) diff --git a/build/cross_deps/zlib/BUILD.zlib.bazel b/build/cross_deps/zlib/BUILD.zlib.bazel new file mode 100644 index 00000000000..a82ac697781 --- /dev/null +++ b/build/cross_deps/zlib/BUILD.zlib.bazel @@ -0,0 +1,49 @@ +load("@rules_foreign_cc//foreign_cc:defs.bzl", "cmake") +load("@kong_bindings//:variables.bzl", "KONG_VAR") + +filegroup( + name = "all_srcs", + srcs = glob( + include = ["**"], + exclude = ["*.bazel"], + ), +) + +cmake( + name = "zlib", + build_args = [ + "--", # <- Pass remaining options to the native tool. + "-j" + KONG_VAR["NPROC"], + ], + # partially from https://github.com/envoyproxy/envoy/blob/main/bazel/foreign_cc/BUILD#L546 + cache_entries = { + "CMAKE_CXX_COMPILER_FORCED": "on", + "CMAKE_C_COMPILER_FORCED": "on", + "SKIP_BUILD_EXAMPLES": "on", + "BUILD_SHARED_LIBS": "ON", + + # The following entries are for zlib-ng. Since zlib and zlib-ng are compatible source + # codes and CMake ignores unknown cache entries, it is fine to combine it into one + # dictionary. + # + # Reference: https://github.com/zlib-ng/zlib-ng#build-options. + "ZLIB_COMPAT": "on", + "ZLIB_ENABLE_TESTS": "off", + + # Warning: Turning WITH_OPTIM to "on" doesn't pass ZlibCompressorImplTest.CallingChecksum. + "WITH_OPTIM": "on", + # However turning off SSE4 fixes it. + "WITH_SSE4": "off", + + # Warning: Turning WITH_NEW_STRATEGIES to "on" doesn't pass gzip compressor fuzz test. + # Turning this off means falling into NO_QUICK_STRATEGY route. + "WITH_NEW_STRATEGIES": "off", + + # Only allow aligned address. + # Reference: https://github.com/zlib-ng/zlib-ng#advanced-build-options. + "UNALIGNED_OK": "off", + }, + lib_source = ":all_srcs", + out_shared_libs = ["libz.so.1"], + visibility = ["//visibility:public"], +) diff --git a/build/cross_deps/zlib/repositories.bzl b/build/cross_deps/zlib/repositories.bzl new file mode 100644 index 00000000000..3185b65222a --- /dev/null +++ b/build/cross_deps/zlib/repositories.bzl @@ -0,0 +1,18 @@ +"""A module defining the third party dependency OpenResty""" + +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def zlib_repositories(): + """Defines the zlib repository""" + + http_archive( + name = "cross_deps_zlib", + urls = [ + "https://zlib.net/zlib-1.2.13.tar.gz", + "https://zlib.net/fossils/zlib-1.2.13.tar.gz", + ], + sha256 = "b3a24de97a8fdbc835b9833169501030b8977031bcb54b3b3ac13740f846ab30", + strip_prefix = "zlib-1.2.13", + build_file = "//build/cross_deps/zlib:BUILD.zlib.bazel", + ) diff --git a/build/dockerfiles/apk.Dockerfile b/build/dockerfiles/apk.Dockerfile new file mode 100644 index 00000000000..5dfcb2f37bc --- /dev/null +++ b/build/dockerfiles/apk.Dockerfile @@ -0,0 +1,56 @@ +ARG KONG_BASE_IMAGE=alpine:3.16 +FROM --platform=$TARGETPLATFORM $KONG_BASE_IMAGE + +LABEL maintainer="Kong Docker Maintainers (@team-gateway-bot)" + +ARG KONG_VERSION +ENV KONG_VERSION $KONG_VERSION + +ARG KONG_PREFIX=/usr/local/kong +ENV KONG_PREFIX $KONG_PREFIX + +ARG EE_PORTS + +ARG TARGETARCH + +ARG KONG_ARTIFACT=kong.${TARGETARCH}.apk.tar.gz +ARG KONG_ARTIFACT_PATH= +COPY ${KONG_ARTIFACT_PATH}${KONG_ARTIFACT} /tmp/kong.apk.tar.gz + +RUN apk upgrade --update-cache \ + && apk add --virtual .build-deps tar gzip \ + && tar -C / -xzf /tmp/kong.apk.tar.gz \ + && apk add --no-cache libstdc++ libgcc pcre perl tzdata libcap zlib zlib-dev bash yaml \ + && adduser -u 1000 -S kong \ + && addgroup -g 1000 -S kong \ + && mkdir -p "${KONG_PREFIX}" \ + && chown -R kong:0 ${KONG_PREFIX} \ + && chown kong:0 /usr/local/bin/kong \ + && chmod -R g=u ${KONG_PREFIX} \ + && chown -R kong:kong /usr/local/bin/luarocks \ + && chown -R kong:kong /usr/local/lib/lua \ + && chown -R kong:kong /usr/local/lib/luarocks \ + && chown -R kong:kong /usr/local/openresty \ + && chown -R kong:kong /usr/local/etc/luarocks \ + && chown -R kong:kong /usr/local/share/lua \ + && rm -rf /tmp/kong.apk.tar.gz \ + && ln -sf /usr/local/openresty/bin/resty /usr/local/bin/resty \ + && ln -sf /usr/local/openresty/luajit/bin/luajit /usr/local/bin/luajit \ + && ln -sf /usr/local/openresty/luajit/bin/luajit /usr/local/bin/lua \ + && ln -sf /usr/local/openresty/nginx/sbin/nginx /usr/local/bin/nginx \ + && apk del .build-deps \ + && kong version + +COPY build/dockerfiles/entrypoint.sh /entrypoint.sh + +USER kong + +ENTRYPOINT ["/entrypoint.sh"] + +EXPOSE 8000 8443 8001 8444 $EE_PORTS + +STOPSIGNAL SIGQUIT + +HEALTHCHECK --interval=60s --timeout=10s --retries=10 CMD kong health + +CMD ["kong", "docker-start"] diff --git a/build/dockerfiles/deb.Dockerfile b/build/dockerfiles/deb.Dockerfile new file mode 100644 index 00000000000..4a44abd0cd4 --- /dev/null +++ b/build/dockerfiles/deb.Dockerfile @@ -0,0 +1,47 @@ +ARG KONG_BASE_IMAGE=debian:bullseye-slim +FROM --platform=$TARGETPLATFORM $KONG_BASE_IMAGE + +LABEL maintainer="Kong Docker Maintainers (@team-gateway-bot)" + +ARG KONG_VERSION +ENV KONG_VERSION $KONG_VERSION + +ARG KONG_PREFIX=/usr/local/kong +ENV KONG_PREFIX $KONG_PREFIX + +ARG EE_PORTS + +ARG TARGETARCH + +ARG KONG_ARTIFACT=kong.${TARGETARCH}.deb +ARG KONG_ARTIFACT_PATH= +COPY ${KONG_ARTIFACT_PATH}${KONG_ARTIFACT} /tmp/kong.deb + +RUN apt-get update \ + && apt-get -y upgrade \ + && apt-get -y autoremove \ + && apt-get install -y --no-install-recommends /tmp/kong.deb \ + && rm -rf /var/lib/apt/lists/* \ + && rm -rf /tmp/kong.deb \ + && chown kong:0 /usr/local/bin/kong \ + && chown -R kong:0 ${KONG_PREFIX} \ + && ln -sf /usr/local/openresty/bin/resty /usr/local/bin/resty \ + && ln -sf /usr/local/openresty/luajit/bin/luajit /usr/local/bin/luajit \ + && ln -sf /usr/local/openresty/luajit/bin/luajit /usr/local/bin/lua \ + && ln -sf /usr/local/openresty/nginx/sbin/nginx /usr/local/bin/nginx \ + && ln -sf "$(find / -name "libz.*" 2>/dev/null | head -n 1)" /usr/lib/libz.so \ + && kong version + +COPY build/dockerfiles/entrypoint.sh /entrypoint.sh + +USER kong + +ENTRYPOINT ["/entrypoint.sh"] + +EXPOSE 8000 8443 8001 8444 $EE_PORTS + +STOPSIGNAL SIGQUIT + +HEALTHCHECK --interval=60s --timeout=10s --retries=10 CMD kong health + +CMD ["kong", "docker-start"] diff --git a/build/dockerfiles/entrypoint.sh b/build/dockerfiles/entrypoint.sh new file mode 100755 index 00000000000..f4f2a499b77 --- /dev/null +++ b/build/dockerfiles/entrypoint.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash +set -Eeo pipefail + +# usage: file_env VAR [DEFAULT] +# ie: file_env 'XYZ_DB_PASSWORD' 'example' +# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of +# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature) +file_env() { + local var="$1" + local fileVar="${var}_FILE" + local def="${2:-}" + # Do not continue if _FILE env is not set + if ! [ "${!fileVar:-}" ]; then + return + elif [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then + echo >&2 "error: both $var and $fileVar are set (but are exclusive)" + exit 1 + fi + local val="$def" + if [ "${!var:-}" ]; then + val="${!var}" + elif [ "${!fileVar:-}" ]; then + val="$(< "${!fileVar}")" + fi + export "$var"="$val" + unset "$fileVar" +} + +export KONG_NGINX_DAEMON=${KONG_NGINX_DAEMON:=off} + +if [[ "$1" == "kong" ]]; then + + all_kong_options="/usr/local/share/lua/5.1/kong/templates/kong_defaults.lua" + set +Eeo pipefail + while IFS='' read -r LINE || [ -n "${LINE}" ]; do + opt=$(echo "$LINE" | grep "=" | sed "s/=.*$//" | sed "s/ //" | tr '[:lower:]' '[:upper:]') + file_env "KONG_$opt" + done < $all_kong_options + set -Eeo pipefail + + file_env KONG_PASSWORD + PREFIX=${KONG_PREFIX:=/usr/local/kong} + + if [[ "$2" == "docker-start" ]]; then + kong prepare -p "$PREFIX" "$@" + + ln -sfn /dev/stdout $PREFIX/logs/access.log + ln -sfn /dev/stdout $PREFIX/logs/admin_access.log + ln -sfn /dev/stderr $PREFIX/logs/error.log + + exec /usr/local/openresty/nginx/sbin/nginx \ + -p "$PREFIX" \ + -c nginx.conf + fi +fi + +exec "$@" diff --git a/build/dockerfiles/rpm.Dockerfile b/build/dockerfiles/rpm.Dockerfile new file mode 100644 index 00000000000..51067e2aa78 --- /dev/null +++ b/build/dockerfiles/rpm.Dockerfile @@ -0,0 +1,60 @@ +ARG KONG_BASE_IMAGE=redhat/ubi8 +FROM --platform=$TARGETPLATFORM $KONG_BASE_IMAGE + +LABEL maintainer="Kong Docker Maintainers (@team-gateway-bot)" + +ARG KONG_VERSION +ENV KONG_VERSION $KONG_VERSION + +# RedHat required labels +LABEL name="Kong" \ + vendor="Kong" \ + version="$KONG_VERSION" \ + release="1" \ + url="https://konghq.com" \ + summary="Next-Generation API Platform for Modern Architectures" \ + description="Next-Generation API Platform for Modern Architectures" + +# RedHat required LICENSE file approved path +COPY LICENSE /licenses/ + +ARG PACKAGE_DISTRO=el7 + +ARG KONG_PREFIX=/usr/local/kong +ENV KONG_PREFIX $KONG_PREFIX + +ARG EE_PORTS + +ARG TARGETARCH + +ARG KONG_ARTIFACT=kong.${PACKAGE_DISTRO}.${TARGETARCH}.rpm +ARG KONG_ARTIFACT_PATH= +COPY ${KONG_ARTIFACT_PATH}${KONG_ARTIFACT} /tmp/kong.rpm + +# hadolint ignore=DL3015 +RUN yum update -y \ + && yum install -y /tmp/kong.rpm \ + && rm /tmp/kong.rpm \ + && chown kong:0 /usr/local/bin/kong \ + && chown -R kong:0 /usr/local/kong \ + && ln -sf /usr/local/openresty/bin/resty /usr/local/bin/resty \ + && ln -sf /usr/local/openresty/luajit/bin/luajit /usr/local/bin/luajit \ + && ln -sf /usr/local/openresty/luajit/bin/luajit /usr/local/bin/lua \ + && ln -sf /usr/local/openresty/nginx/sbin/nginx /usr/local/bin/nginx \ + && ln -sf "$(find / -name "libz.*" 2>/dev/null | head -n 1)" /usr/lib/libz.so \ + && ln -sf "$(find / -name "libz.*" 2>/dev/null | head -n 1)" /usr/lib64/libz.so \ + && kong version + +COPY build/dockerfiles/entrypoint.sh /entrypoint.sh + +USER kong + +ENTRYPOINT ["/entrypoint.sh"] + +EXPOSE 8000 8443 8001 8444 $EE_PORTS + +STOPSIGNAL SIGQUIT + +HEALTHCHECK --interval=60s --timeout=10s --retries=10 CMD kong health + +CMD ["kong", "docker-start"] diff --git a/build/kong_bindings.bzl b/build/kong_bindings.bzl new file mode 100644 index 00000000000..f81b2230102 --- /dev/null +++ b/build/kong_bindings.bzl @@ -0,0 +1,108 @@ +""" +Global varibles +""" + +def _load_vars(ctx): + # Read env from .requirements + requirements = ctx.read(Label("@kong//:.requirements")) + content = ctx.execute(["bash", "-c", "echo '%s' | " % requirements + + """grep -E '^(\\w*)=(.+)$' | sed -E 's/^(.*)=([^# ]+).*$/"\\1": "\\2",/'"""]).stdout + content = content.replace('""', '"') + + # Workspace path + workspace_path = "%s" % ctx.path(Label("@//:WORKSPACE")).dirname + content += '"WORKSPACE_PATH": "%s",\n' % workspace_path + + # Local env + # Temporarily fix for https://github.com/bazelbuild/bazel/issues/14693#issuecomment-1079006291 + for key in [ + "GITHUB_TOKEN", + "RPM_SIGNING_KEY_FILE", + "NFPM_RPM_PASSPHRASE", + ]: + value = ctx.os.environ.get(key, "") + if value: + content += '"%s": "%s",\n' % (key, value) + + build_name = ctx.os.environ.get("BUILD_NAME", "") + content += '"BUILD_NAME": "%s",\n' % build_name + + build_destdir = workspace_path + "/bazel-bin/build/" + build_name + content += '"BUILD_DESTDIR": "%s",\n' % build_destdir + + install_destdir = ctx.os.environ.get("INSTALL_DESTDIR", "MANAGED") + if install_destdir == "MANAGED": + install_destdir = build_destdir + content += '"INSTALL_DESTDIR": "%s",\n' % install_destdir + + # Kong Version + # TODO: this may not change after a bazel clean if cache exists + kong_version = ctx.execute(["bash", "scripts/grep-kong-version.sh"], working_directory = workspace_path).stdout + content += '"KONG_VERSION": "%s",' % kong_version.strip() + + if ctx.os.name == "mac os x": + nproc = ctx.execute(["sysctl", "-n", "hw.ncpu"]).stdout.strip() + else: # assume linux + nproc = ctx.execute(["nproc"]).stdout.strip() + + content += '"%s": "%s",' % ("NPROC", nproc) + + macos_target = "" + if ctx.os.name == "mac os x": + macos_target = ctx.execute(["sw_vers", "-productVersion"]).stdout.strip() + content += '"MACOSX_DEPLOYMENT_TARGET": "%s",' % macos_target + + # convert them into a list of labels relative to the workspace root + # TODO: this may not change after a bazel clean if cache exists + patches = sorted([ + '"@kong//:%s"' % str(p).replace(workspace_path, "").lstrip("/") + for p in ctx.path(workspace_path + "/build/openresty/patches").readdir() + ]) + + content += '"OPENRESTY_PATCHES": [%s],' % (", ".join(patches)) + + ctx.file("BUILD.bazel", "") + ctx.file("variables.bzl", "KONG_VAR = {\n" + content + "\n}") + +def _check_sanity(ctx): + if ctx.os.name == "mac os x": + xcode_prefix = ctx.execute(["xcode-select", "-p"]).stdout.strip() + if "CommandLineTools" in xcode_prefix: + fail("Command Line Tools is not supported, please install Xcode from App Store.\n" + + "If you recently installed Xcode, please run `sudo xcode-select -s /Applications/Xcode.app/Contents/Developer` to switch to Xcode,\n" + + "then do a `bazel clean --expunge` and try again.\n" + + "The following command is useful to check if Xcode is picked up by Bazel:\n" + + "eval `find /private/var/tmp/_bazel_*/|grep xcode-locator|head -n1`") + + python = ctx.execute(["which", "python"]).stdout.strip() + if not python: + fail("rules_foreign_cc hasn't migrated to python3 on macOS yet, and your system doens't \n" + + "have a `python` binary. Consider create a symlink to `python3` and include in PATH:\n" + + "ln -s `which python3` /usr/local/bin/python\n" + + "export PATH=/usr/local/bin:$PATH bazel build \n") + + user = ctx.os.environ.get("USER", "") + if "@" in user: + fail("Bazel uses $USER in cache and rule_foreign_cc uses `@` in its sed command.\n" + + "However, your username contains a `@` character, which will cause build failure.\n" + + "Please rerun this build with:\n" + + "export USER=" + user.replace("@", "_") + " bazel build ") + +def _load_bindings_impl(ctx): + _check_sanity(ctx) + + _load_vars(ctx) + +load_bindings = repository_rule( + implementation = _load_bindings_impl, + # force "fetch"/invalidation of this repository every time it runs + # so that environ vars, patches and kong version is up to date + # see https://blog.bazel.build/2017/02/22/repository-invalidation.html + local = True, + environ = [ + "BUILD_NAME", + "INSTALL_DESTDIR", + "RPM_SIGNING_KEY_FILE", + "NFPM_RPM_PASSPHRASE", + ], +) diff --git a/build/luarocks/BUILD.bazel b/build/luarocks/BUILD.bazel new file mode 100644 index 00000000000..79168c44d85 --- /dev/null +++ b/build/luarocks/BUILD.bazel @@ -0,0 +1,21 @@ +load("//build:build_system.bzl", "kong_rules_group") + +exports_files( + [ + "BUILD.luarocks.bazel", + "luarocks_wrap_script.lua", + "templates/luarocks_exec.sh", + "templates/luarocks_make.sh", + "templates/luarocks_target.sh", + ], + visibility = ["//visibility:public"], +) + +kong_rules_group( + name = "luarocks", + propagates = [ + "@luarocks//:luarocks_make", + "@luarocks//:luarocks_target", + ], + visibility = ["//:__pkg__"], +) diff --git a/build/luarocks/BUILD.luarocks.bazel b/build/luarocks/BUILD.luarocks.bazel new file mode 100644 index 00000000000..0d7924e3bef --- /dev/null +++ b/build/luarocks/BUILD.luarocks.bazel @@ -0,0 +1,110 @@ +load("@rules_foreign_cc//foreign_cc:defs.bzl", "configure_make") +load("@kong//build:build_system.bzl", "kong_template_genrule") +load("@kong_bindings//:variables.bzl", "KONG_VAR") + +filegroup( + name = "all_srcs", + srcs = glob( + include = ["**"], + exclude = ["*.bazel"], + ), +) + +# This rules is used to bootstrap luarocks to install rocks dependencies +# A different rule is used to install luarocks in the release artifact +# so that we got correct interpreter path, lua paths, etc. +configure_make( + name = "luarocks_host", + configure_command = "configure", + configure_in_place = True, + configure_options = [ + "--lua-suffix=jit", + "--with-lua=$$EXT_BUILD_DEPS$$/luajit", + "--with-lua-include=$$EXT_BUILD_DEPS$$/luajit/include/luajit-2.1", + ], + lib_source = ":all_srcs", + out_bin_dir = "", + out_binaries = ["bin/luarocks"], # fake binary + out_data_dirs = ["luarocks"], # mark all files as data + targets = [ + "build", + "install", + ], + visibility = ["//visibility:public"], + deps = [ + "@openresty//:luajit", + ], +) + +kong_template_genrule( + name = "luarocks_exec", + srcs = [ + "@openssl//:openssl", + ] + select({ + "@kong//:any-cross": ["@cross_deps_libyaml//:libyaml"], + "//conditions:default": [ + "@luarocks//:luarocks_host", + "@openresty//:luajit", + ], + }), + is_executable = True, + output = "luarocks_exec.sh", + substitutions = { + "{{lib_rpath}}": "%s/kong/lib" % KONG_VAR["INSTALL_DESTDIR"], + }, + template = "@//build/luarocks:templates/luarocks_exec.sh", + tools = select({ + "@kong//:any-cross": [ + "@luarocks//:luarocks_host", + "@openresty//:luajit", + ], + "//conditions:default": [], + }), + visibility = ["//visibility:public"], +) + +kong_template_genrule( + name = "luarocks_make", + srcs = [ + "@kong//:rockspec_srcs", + "@luarocks//:luarocks_exec", + "@luarocks//:luarocks_target", # to avoid concurrency issue, run this after luarocks_target + ], + is_executable = True, + output = "luarocks_make.log", + progress_message = "Luarocks: Install Kong rocks dependencies", + template = "@//build/luarocks:templates/luarocks_make.sh", + visibility = ["//visibility:public"], +) + +# install luarocks itself in target configuration +kong_template_genrule( + name = "luarocks_target", + srcs = [":luarocks_exec"] + select({ + "@kong//:any-cross": [], + "//conditions:default": [ + "@luarocks//:luarocks_host", + "@openresty//:luajit", + ], + }), + is_executable = True, + output = "luarocks_target.log", + progress_message = "Luarocks: Install luarocks on target system", + substitutions = { + "{{build_destdir}}": KONG_VAR["BUILD_DESTDIR"], + "{{install_destdir}}": KONG_VAR["INSTALL_DESTDIR"], + "{{luarocks_version}}": KONG_VAR["LUAROCKS"], + "{{workspace_path}}": KONG_VAR["WORKSPACE_PATH"], + }, + template = "@//build/luarocks:templates/luarocks_target.sh", + tools = [ + "@//build/luarocks:luarocks_wrap_script.lua", + ] + select({ + "@//:any-cross": [ + "@luarocks//:luarocks_host", + "@openresty//:luajit", + ], + "//conditions:default": [], + }), + visibility = ["//visibility:public"], +) diff --git a/build/luarocks/luarocks_repositories.bzl b/build/luarocks/luarocks_repositories.bzl new file mode 100644 index 00000000000..87ad74f8515 --- /dev/null +++ b/build/luarocks/luarocks_repositories.bzl @@ -0,0 +1,19 @@ +"""A module defining the third party dependency luarocks""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") +load("@kong_bindings//:variables.bzl", "KONG_VAR") + +def luarocks_repositories(): + version = KONG_VAR["LUAROCKS"] + + maybe( + http_archive, + name = "luarocks", + build_file = "//build/luarocks:BUILD.luarocks.bazel", + strip_prefix = "luarocks-" + version, + sha256 = "56ab9b90f5acbc42eb7a94cf482e6c058a63e8a1effdf572b8b2a6323a06d923", + urls = [ + "https://luarocks.org/releases/luarocks-" + version + ".tar.gz", + ], + ) diff --git a/build/luarocks/luarocks_wrap_script.lua b/build/luarocks/luarocks_wrap_script.lua new file mode 100644 index 00000000000..049f93e115c --- /dev/null +++ b/build/luarocks/luarocks_wrap_script.lua @@ -0,0 +1,40 @@ +-- This software is copyright Kong Inc. and its licensors. +-- Use of the software is subject to the agreement between your organization +-- and Kong Inc. If there is no such agreement, use is governed by and +-- subject to the terms of the Kong Master Software License Agreement found +-- at https://konghq.com/enterprisesoftwarelicense/. +-- [ END OF LICENSE 0867164ffc95e54f04670b5169c09574bdbd9bba ] + +local cfg = require("luarocks.core.cfg") +assert(cfg.init()) +-- print(require("inspect")(cfg)) + +local fs = require "luarocks.fs" +fs.init() + +local queries = require("luarocks.queries") +local search = require("luarocks.search") + +local name = arg[1] +local tree = arg[2] +local install_dest = arg[3] + +local query = queries.new(name, nil, nil, true) + +local _, ver = assert(search.pick_installed_rock(query)) + +if install_dest:sub(-1) ~= "/" then + install_dest = install_dest .. "/" +end +-- HACK +cfg.lua_interpreter = "luajit" +cfg.sysconfdir = install_dest .. "etc/luarocks" +cfg.variables["LUA_DIR"] = install_dest .. "openresty/luajit" +cfg.variables["LUA_INCDIR"] = install_dest .. "openresty/luajit/include/luajit-2.1" +cfg.variables["LUA_BINDIR"] = install_dest .. "openresty/luajit/bin" + +local wrap = fs.wrap_script + +wrap( + string.format("%s/lib/luarocks/rocks-5.1/luarocks/%s/bin/%s", tree, ver, name), + string.format("%s/bin/%s", tree, name), "one", name, ver) diff --git a/build/luarocks/templates/luarocks_exec.sh b/build/luarocks/templates/luarocks_exec.sh new file mode 100644 index 00000000000..220d92eb6a4 --- /dev/null +++ b/build/luarocks/templates/luarocks_exec.sh @@ -0,0 +1,83 @@ +#!/bin/bash -e + +# template variables starts +openssl_path="{{@openssl//:openssl}}" +luarocks_host_path="{{@luarocks//:luarocks_host}}" +luajit_path="{{@openresty//:luajit}}" +cross_deps_libyaml_path="{{@cross_deps_libyaml//:libyaml}}" +CC={{CC}} +LD={{LD}} +LIB_RPATH={{lib_rpath}} +# template variables ends + +root_path=$(pwd) + +ROCKS_DIR=$root_path/$(dirname $@)/luarocks_tree +if [ ! -d $ROCKS_DIR ]; then + mkdir -p $ROCKS_DIR +fi +# pre create the dir and file so bsd readlink is happy +mkdir -p "$ROCKS_DIR/../cache" +CACHE_DIR=$(readlink -f "$ROCKS_DIR/../cache") +touch "$ROCKS_DIR/../luarocks_config.lua" +ROCKS_CONFIG=$(readlink -f "$ROCKS_DIR/../luarocks_config.lua") + +OPENSSL_DIR=$root_path/$openssl_path + +# we use system libyaml on macos +if [[ "$OSTYPE" == "darwin"* ]]; then + YAML_DIR=$(HOME=~$(whoami) PATH=/opt/homebrew/bin:$PATH brew --prefix)/opt/libyaml +elif [[ -d $cross_deps_libyaml_path ]]; then + # TODO: is there a good way to use locations but doesn't break non-cross builds? + YAML_DIR=$root_path/$cross_deps_libyaml_path +else + YAML_DIR=/usr +fi + +if [[ $CC != /* ]]; then + # point to our relative path of managed toolchain + CC=$root_path/$CC + LD=$root_path/$LD +fi + +echo " +rocks_trees = { + { name = [[system]], root = [[$ROCKS_DIR]] } +} +local_cache = '$CACHE_DIR' +show_downloads = true +gcc_rpath = false -- disable default rpath, add our own +variables = { + CC = '$CC', + LD = '$LD', + LDFLAGS = '-Wl,-rpath,$LIB_RPATH', +} +" > $ROCKS_CONFIG + +LUAROCKS_HOST=$luarocks_host_path + +host_luajit=$root_path/$luajit_path/bin/luajit + +cat << EOF > $@ +LIB_RPATH=$LIB_RPATH +LUAROCKS_HOST=$LUAROCKS_HOST +ROCKS_DIR=$ROCKS_DIR +CACHE_DIR=$CACHE_DIR +ROCKS_CONFIG=$ROCKS_CONFIG + +export LUAROCKS_CONFIG=$ROCKS_CONFIG +export CC=$CC +export LD=$LD +export EXT_BUILD_ROOT=$root_path # for musl + +# no idea why PATH is not preserved in ctx.actions.run_shell +export PATH=$PATH + +# force the interpreter here instead of invoking luarocks directly, +# some distros has BINPRM_BUF_SIZE smaller than the shebang generated, +# which is usually more than 160 bytes +$host_luajit $root_path/$LUAROCKS_HOST/bin/luarocks \$private_rocks_args \$@ \\ + OPENSSL_DIR=$OPENSSL_DIR \\ + CRYPTO_DIR=$OPENSSL_DIR \\ + YAML_DIR=$YAML_DIR +EOF diff --git a/build/luarocks/templates/luarocks_make.sh b/build/luarocks/templates/luarocks_make.sh new file mode 100644 index 00000000000..dc5d6105f3c --- /dev/null +++ b/build/luarocks/templates/luarocks_make.sh @@ -0,0 +1,21 @@ +#!/bin/bash -e + +# template variables starts +luarocks_exec="{{@luarocks//:luarocks_exec}}" +# template variables ends + +if [[ "$OSTYPE" == "darwin"* ]]; then + export DEVELOPER_DIR=$(xcode-select -p) + export SDKROOT=$(xcrun --sdk macosx --show-sdk-path) +fi +mkdir -p $(dirname $@) +# lyaml needs this and doesn't honor --no-doc +# the alternate will populate a non-existent HOME +# env var just to let ldoc happy +# alias LDOC command to true(1) command +export LDOC=true + +$luarocks_exec make --no-doc 2>&1 >$@.tmp + +# only generate the output when the command succeeds +mv $@.tmp $@ \ No newline at end of file diff --git a/build/luarocks/templates/luarocks_target.sh b/build/luarocks/templates/luarocks_target.sh new file mode 100644 index 00000000000..f84d52dcb4c --- /dev/null +++ b/build/luarocks/templates/luarocks_target.sh @@ -0,0 +1,59 @@ +#!/bin/bash -e + +# template variables starts +workspace_path="{{workspace_path}}" +luarocks_version="{{luarocks_version}}" +install_destdir="{{install_destdir}}" +build_destdir="{{build_destdir}}" + +luarocks_exec="{{@luarocks//:luarocks_exec}}" +luajit_path="{{@openresty//:luajit}}" +luarocks_host_path="{{@luarocks//:luarocks_host}}" +luarocks_wrap_script="{{@//build/luarocks:luarocks_wrap_script.lua}}" +# template variables ends + +mkdir -p $(dirname $@) + + +# install luarocks +$luarocks_exec install "luarocks $luarocks_version" + +# use host configuration to invoke luarocks API to wrap a correct bin/luarocks script +rocks_tree=$workspace_path/$(dirname $luarocks_exec)/luarocks_tree +host_luajit=$workspace_path/$luajit_path/bin/luajit + +host_luarocks_tree=$luarocks_host_path +export LUA_PATH="$build_destdir/share/lua/5.1/?.lua;$build_destdir/share/lua/5.1/?/init.lua;$host_luarocks_tree/share/lua/5.1/?.lua;$host_luarocks_tree/share/lua/5.1/?/init.lua;;" + +ROCKS_CONFIG="luarocks_make_config.lua" +cat << EOF > $ROCKS_CONFIG +rocks_trees = { + { name = [[system]], root = [[$rocks_tree]] } +} +EOF +export LUAROCKS_CONFIG=$ROCKS_CONFIG + +$host_luajit $luarocks_wrap_script \ + luarocks $rocks_tree $install_destdir 2>&1 > $@.tmp + +# write the luarocks config with host configuration +mkdir -p $rocks_tree/etc/luarocks +cat << EOF > $rocks_tree/etc/luarocks/config-5.1.lua +-- LuaRocks configuration +rocks_trees = { + { name = "user", root = home .. "/.luarocks" }; + { name = "system", root = "$install_destdir" }; + } + lua_interpreter = "luajit"; + variables = { + LUA_DIR = "$install_destdir/openresty/luajit"; + LUA_INCDIR = "$install_destdir/openresty/luajit/include/luajit-2.1"; + LUA_BINDIR = "$install_destdir/openresty/luajit/bin"; +} +EOF + +# TODO: this still doesn't work +sed -i -e "s|$rocks_tree|$install_destdir|g" $rocks_tree/bin/luarocks + +# only generate the output when the command succeeds +mv $@.tmp $@ \ No newline at end of file diff --git a/build/nfpm/BUILD.bazel b/build/nfpm/BUILD.bazel new file mode 100644 index 00000000000..d70ebc0efe1 --- /dev/null +++ b/build/nfpm/BUILD.bazel @@ -0,0 +1,5 @@ +filegroup( + name = "all_srcs", + srcs = glob(["**"]), + visibility = ["//visibility:public"], +) diff --git a/build/nfpm/repositories.bzl b/build/nfpm/repositories.bzl new file mode 100644 index 00000000000..3f4f1a4e974 --- /dev/null +++ b/build/nfpm/repositories.bzl @@ -0,0 +1,55 @@ +"""A module defining the third party dependency OpenResty""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def _nfpm_release_select_impl(ctx): + if ctx.attr.build_file: + ctx.file("BUILD.bazel", ctx.read(ctx.attr.build_file)) + elif ctx.attr.build_file_content: + ctx.file("BUILD.bazel", ctx.attr.build_file_content) + + os_name = ctx.os.name + os_arch = ctx.os.arch + + if os_arch == "aarch64": + os_arch = "arm64" + elif os_arch == "amd64": + os_arch = "x86_64" + else: + fail("Unsupported arch %s" % os_arch) + + if os_name == "mac os x": + os_name = "Darwin" + elif os_name != "linux": + fail("Unsupported OS %s" % os_name) + + nfpm_bin = "%s" % ctx.path(Label("@nfpm_%s_%s//:nfpm" % (os_name, os_arch))) + ctx.symlink(nfpm_bin, "nfpm") + +nfpm_release_select = repository_rule( + implementation = _nfpm_release_select_impl, + attrs = { + "build_file": attr.label(allow_single_file = True), + "build_file_content": attr.string(), + }, +) + +def nfpm_repositories(): + gh_matrix = [ + ["linux", "x86_64", "4c63031ddbef198e21c8561c438dde4c93c3457ffdc868d7d28fa670e0cc14e5"], + ["linux", "arm64", "2af1717cc9d5dcad5a7e42301dabc538acf5d12ce9ee39956c66f30215311069"], + ["Darwin", "x86_64", "fb3b8ab5595117f621c69cc51db71d481fbe733fa3c35500e1b64319dc8fd5b4"], + ["Darwin", "arm64", "9ca3ac6e0c4139a9de214f78040d1d11dd221496471696cc8ab5d357850ccc54"], + ] + for name, arch, sha in gh_matrix: + http_archive( + name = "nfpm_%s_%s" % (name, arch), + url = "https://github.com/goreleaser/nfpm/releases/download/v2.23.0/nfpm_2.23.0_%s_%s.tar.gz" % (name, arch), + sha256 = sha, + build_file = "//build/nfpm:BUILD.bazel", + ) + + nfpm_release_select( + name = "nfpm", + build_file = "//build/nfpm:BUILD.bazel", + ) diff --git a/build/nfpm/rules.bzl b/build/nfpm/rules.bzl new file mode 100644 index 00000000000..23fe413d224 --- /dev/null +++ b/build/nfpm/rules.bzl @@ -0,0 +1,90 @@ +""" +NFPM package rule. +""" + +load("@bazel_skylib//lib:dicts.bzl", "dicts") +load("@kong_bindings//:variables.bzl", "KONG_VAR") + +def _nfpm_pkg_impl(ctx): + env = dicts.add(ctx.attr.env, ctx.attr.extra_env, KONG_VAR, ctx.configuration.default_shell_env) + + target_cpu = ctx.attr._cc_toolchain[cc_common.CcToolchainInfo].cpu + if target_cpu == "k8" or target_cpu == "x86_64" or target_cpu == "amd64": + target_arch = "amd64" + else: + fail("Unsupported platform cpu: %s" % target_cpu) + env["ARCH"] = target_arch + + # XXX: remove the "env" from KONG_VAR which is a list + env["OPENRESTY_PATCHES"] = "" + + pkg_ext = ctx.attr.packager + if pkg_ext == "apk": + pkg_ext = "apk.tar.gz" + + # create like kong.amd64.deb + out = ctx.actions.declare_file("%s/%s.%s.%s" % ( + ctx.attr.out_dir, + ctx.attr.pkg_name, + target_arch, + pkg_ext, + )) + + nfpm_args = ctx.actions.args() + nfpm_args.add("pkg") + nfpm_args.add("-f", ctx.file.config.path) + nfpm_args.add("-p", ctx.attr.packager) + nfpm_args.add("-t", out.path) + + ctx.actions.run_shell( + inputs = ctx.files._nfpm_bin, + mnemonic = "nFPM", + command = "ln -sf %s nfpm-prefix; external/nfpm/nfpm $@" % KONG_VAR["BUILD_DESTDIR"], + arguments = [nfpm_args], + outputs = [out], + env = env, + ) + + # TODO: fix runfiles so that it can used as a dep + return [DefaultInfo(files = depset([out]), runfiles = ctx.runfiles(files = ctx.files.config + ctx.files.deps))] + +nfpm_pkg = rule( + _nfpm_pkg_impl, + attrs = { + "config": attr.label( + mandatory = True, + allow_single_file = True, + doc = "nFPM configuration file.", + ), + "packager": attr.string( + mandatory = True, + doc = "Packager name.", + ), + "env": attr.string_dict( + doc = "Environment variables to set when running nFPM.", + ), + "extra_env": attr.string_dict( + # https://github.com/bazelbuild/bazel/issues/12457 + doc = "Additional environment variables to set when running nFPM. This is a workaround since Bazel doesn't support union operator for select yet.", + ), + "pkg_name": attr.string( + mandatory = True, + doc = "Output package name.", + ), + "out_dir": attr.string( + doc = "Output directory name.", + default = "pkg", + ), + "deps": attr.label_list( + mandatory = False, + doc = "extra deps should exist in the package", + ), + # hidden attributes + "_nfpm_bin": attr.label( + default = "@nfpm//:all_srcs", + ), + "_cc_toolchain": attr.label( + default = "@bazel_tools//tools/cpp:current_cc_toolchain", + ), + }, +) diff --git a/build/openresty/BUILD.bazel b/build/openresty/BUILD.bazel new file mode 100644 index 00000000000..c527359e1f0 --- /dev/null +++ b/build/openresty/BUILD.bazel @@ -0,0 +1,6 @@ +exports_files( + [ + "BUILD.openresty.bazel", + ], + visibility = ["//visibility:public"], +) diff --git a/build/openresty/BUILD.openresty.bazel b/build/openresty/BUILD.openresty.bazel new file mode 100644 index 00000000000..cc4107cabc7 --- /dev/null +++ b/build/openresty/BUILD.openresty.bazel @@ -0,0 +1,262 @@ +load("@rules_foreign_cc//foreign_cc:defs.bzl", "configure_make", "make") +load("@bazel_skylib//lib:selects.bzl", "selects") +load("@kong_bindings//:variables.bzl", "KONG_VAR") +load("@openresty_binding//:variables.bzl", "LUAJIT_VERSION") + +filegroup( + name = "luajit_srcs", + srcs = glob( + include = ["bundle/LuaJIT*/**"], + ), +) + +genrule( + name = "luajit_xcflags", + outs = ["luajit_xcflags.txt"], + cmd = "macos=" + select({ + "@platforms//os:macos": "1", + "//conditions:default": "0", + }) + "\n" + + "aarch64=" + select({ + "@platforms//cpu:aarch64": "1", + "//conditions:default": "0", + }) + "\n" + + "debug=" + select({ + "@kong//:debug_flag": "1", + "//conditions:default": "0", + }) + "\n" + + "cross=" + select({ + "@kong//:any-cross": "1", + "//conditions:default": "0", + }) + + """ + flags="-DLUAJIT_ENABLE_LUA52COMPAT -DLUAJIT_VERSION=\\\\\\"{luajit_version}\\\\\\"" + if [[ $$debug -eq 1 ]]; then + flags="$$flags -DLUA_USE_ASSERT -DLUA_USE_APICHECK" + if [[ $$macos -ne 1 ]]; then + if [[ $$cross -ne 1 ]]; then + flags="$$flags -DLUA_USE_VALGRIND" + fi + if [[ $$aarch64 -ne 1 ]]; then + flags="$$flags -DLUAJIT_USE_SYSMALLOC" + fi + fi + fi + + if [[ $$macos -eq 1 ]]; then + flags="$$flags -fno-stack-check" + fi + + echo "$$flags" >$@ + + """.format(luajit_version = LUAJIT_VERSION), + toolchains = [ + "@bazel_tools//tools/cpp:current_cc_toolchain", + ], +) + +make( + name = "luajit", + args = [ + "LDFLAGS=\"-Wl,-rpath,%s/kong/lib\"" % ( + KONG_VAR["INSTALL_DESTDIR"], + ), # make ffi.load happy, even when it's invoked without nginx + "XCFLAGS=\"$(cat $$EXT_BUILD_ROOT$$/$(execpath :luajit_xcflags))\"", + "LUA_ROOT=%s/openresty/luajit" % KONG_VAR["INSTALL_DESTDIR"].rstrip("/"), + "MACOSX_DEPLOYMENT_TARGET=" + KONG_VAR["MACOSX_DEPLOYMENT_TARGET"], + ] + select({ + "@kong//:any-cross": [ + "HOST_CC=cc", + ], + "@platforms//os:macos": [ + "AR=/usr/bin/ar", + ], + "//conditions:default": [ + ], + }), + data = [ + ":luajit_xcflags", + ], + lib_source = ":luajit_srcs", + out_binaries = [ + "luajit", + ], + out_shared_libs = select({ + "@platforms//os:macos": [ + "libluajit-5.1.2.dylib", + ], + "//conditions:default": [ + "libluajit-5.1.so.2", + ], + }), + targets = [ + "-j" + KONG_VAR["NPROC"], + "install", + ], + visibility = ["//visibility:public"], + deps = [ + ], +) + +selects.config_setting_group( + name = "nogroup-name-as-nobody", + match_any = [ + "@kong//build/platforms/distro:rhel9", + "@kong//build/platforms/distro:rhel8", + "@kong//build/platforms/distro:aws2023", + "@kong//build/platforms/distro:aws2", + ], +) + +selects.config_setting_group( + name = "needs-xcrypt2", + match_any = [ + "@kong//build/platforms/distro:generic", + "@kong//build/platforms/distro:rhel9", + "@kong//build/platforms/distro:aws2023", + ], +) + +CONFIGURE_OPTIONS = [ + "--with-pcre-jit", + "--with-http_ssl_module", + "--with-http_sub_module", + "--with-http_realip_module", + "--with-http_stub_status_module", + "--with-http_v2_module", + "--with-stream_realip_module", # >= 1.11.4 + "--with-stream_ssl_preread_module", # >= 1.11.5 + "--without-http_encrypted_session_module", + "--with-luajit=$$EXT_BUILD_DEPS$$/luajit", + "--with-cc-opt=\"-I$$EXT_BUILD_DEPS$$/pcre/include\"", + "--with-cc-opt=\"-I$$EXT_BUILD_DEPS$$/luajit/include\"", + "--with-ld-opt=\"-L$$EXT_BUILD_DEPS$$/pcre/lib\"", + "--with-ld-opt=\"-L$$EXT_BUILD_DEPS$$/luajit/lib\"", + "--with-cc-opt=\"-I$$EXT_BUILD_DEPS$$/openssl/include\"", + "--with-ld-opt=\"-L$$EXT_BUILD_DEPS$$/openssl/lib\"", + # here let's try not having --disable-new-dtags; --disable-new-dtags creates runpath instead of rpath + # note runpath can't handle indirect dependency (nginx -> luajit -> dlopen("other")), so each indirect + # dependency should have its rpath set (luajit, libxslt etc); on the other side, rpath is not + # overridable by LD_LIBRARY_PATH and it may cause trouble debugging, so we _should_ prefer runpath. + # if it doesn't work, then add --disable-new-dtags back + "--with-ld-opt=\"-Wl,-rpath,%s/kong/lib\"" % KONG_VAR["INSTALL_DESTDIR"], + "-j%s" % KONG_VAR["NPROC"], + + # options from our customed patch + "--with-install-prefix=%s" % KONG_VAR["INSTALL_DESTDIR"], + + # Note $$EXT_BUILD_ROOT$$ is bazel variable not from environment variable + # which points to the directory of current WORKSPACE + + # external modules + "--add-module=$$EXT_BUILD_ROOT$$/external/lua-kong-nginx-module", + "--add-module=$$EXT_BUILD_ROOT$$/external/lua-kong-nginx-module/stream", +] + select({ + "@kong//:aarch64-linux-anylibc-cross": [ + "--crossbuild=Linux:aarch64", + "--with-endian=little", + "--with-int=4", + "--with-long=8", + "--with-long-long=8", + "--with-ptr-size=8", + "--with-sig-atomic-t=4", + "--with-size-t=8", + "--with-off-t=8", + "--with-time-t=8", + "--with-sys-nerr=132", + ], + "@kong//:x86_64-linux-musl-cross": [ + "--crossbuild=Linux:x86_64", + "--with-endian=little", + "--with-int=4", + "--with-long=8", + "--with-long-long=8", + "--with-ptr-size=8", + "--with-sig-atomic-t=4", + "--with-size-t=8", + "--with-off-t=8", + "--with-time-t=8", + "--with-sys-nerr=132", + ], + "//conditions:default": [], +}) + select({ + "@kong//:any-cross": [ + "--with-cc-opt=\"-I$$EXT_BUILD_DEPS$$/zlib/include\"", + "--with-ld-opt=\"-L$$EXT_BUILD_DEPS$$/zlib/lib\"", + ], + "//conditions:default": [], +}) + select({ + # any cross build that migrated to use libxcrypt needs those flags + # alpine uses different libc so doesn't need it + "@kong//:aarch64-linux-anylibc-cross": [ + "--with-cc-opt=\"-I$$EXT_BUILD_DEPS$$/libxcrypt/include\"", + "--with-ld-opt=\"-L$$EXT_BUILD_DEPS$$/libxcrypt/lib\"", + ], + "//conditions:default": [], +}) + select({ + "@kong//:debug_flag": [ + "--with-debug", + "--with-no-pool-patch", + "--with-cc-opt=\"-DNGX_LUA_USE_ASSERT -DNGX_LUA_ABORT_AT_PANIC -O0\"", + ], + "//conditions:default": [], +}) + select({ + # some distros name "nogroup" group name as "nobody" + ":nogroup-name-as-nobody": [ + "--group=nobody", + ], + "//conditions:default": [], +}) + +# TODO: set prefix to populate pid_path, conf_path, log_path etc + +filegroup( + name = "all_srcs", + srcs = glob( + include = [ + "configure", + "bundle/**", + ], + exclude = [ + "bundle/LuaJIT*/**", + ], + ), +) + +configure_make( + name = "openresty", + build_data = [ + "@lua-kong-nginx-module//:all_srcs", + "@openresty_binding//:all_srcs", + ], + configure_command = "configure", + configure_in_place = True, + configure_options = CONFIGURE_OPTIONS, + lib_source = ":all_srcs", + out_bin_dir = "", + out_binaries = [ + "nginx/sbin/nginx", + ], + targets = [ + "-j" + KONG_VAR["NPROC"], + "install -j" + KONG_VAR["NPROC"], + ], + visibility = ["//visibility:public"], + deps = [ + "@pcre", + "@openssl", + "@openresty//:luajit", + ] + select({ + "@kong//:any-cross": [ + "@cross_deps_zlib//:zlib", + ], + "//conditions:default": [], + }) + select({ + # any cross build that migrated to use libxcrypt needs those flags + # alpine uses different libc so doesn't need it + ":needs-xcrypt2": [ + "@cross_deps_libxcrypt//:libxcrypt", + ], + "//conditions:default": [], + }), +) diff --git a/build/openresty/atc_router/BUILD.bazel b/build/openresty/atc_router/BUILD.bazel new file mode 100644 index 00000000000..e69de29bb2d diff --git a/build/openresty/lua-resty-lmdb-cross.patch b/build/openresty/lua-resty-lmdb-cross.patch new file mode 100644 index 00000000000..d1bf0820f57 --- /dev/null +++ b/build/openresty/lua-resty-lmdb-cross.patch @@ -0,0 +1,51 @@ +lua-resty-lmdb is an external repository, previous artifact may carry +thus we always clean here + +diff --git a/config b/config +index 126c78c..1f0b2aa 100644 +--- a/config ++++ b/config +@@ -5,6 +5,8 @@ ngx_module_incs="$ngx_addon_dir/lmdb/libraries/liblmdb $ngx_addon_dir/src" + + . auto/module + ++rm -f $ngx_addon_dir/lmdb/libraries/liblmdb/liblmdb.a ++ + LINK_DEPS="$LINK_DEPS $ngx_addon_dir/lmdb/libraries/liblmdb/liblmdb.a" + CORE_LIBS="$CORE_LIBS $ngx_addon_dir/lmdb/libraries/liblmdb/liblmdb.a" + +diff --git a/config.make b/config.make +index 14d8cc2..cf17251 100644 +--- a/config.make ++++ b/config.make +@@ -3,7 +3,7 @@ cat <>$NGX_MAKEFILE + + $ngx_addon_dir/lmdb/libraries/liblmdb/liblmdb.a: + echo "Building liblmdb"; \\ +- \$(MAKE) -C $ngx_addon_dir/lmdb/libraries/liblmdb; \\ ++ \$(MAKE) -C $ngx_addon_dir/lmdb/libraries/liblmdb CC=\$(CC) AR=\$(AR); \\ + echo "Finished building liblmdb" + + EOF +diff --git a/libraries/liblmdb/Makefile b/libraries/liblmdb/Makefile +index c252b50..1054432 100644 +--- a/lmdb/libraries/liblmdb/Makefile ++++ b/lmdb/libraries/liblmdb/Makefile +@@ -18,13 +18,13 @@ + # There may be other macros in mdb.c of interest. You should + # read mdb.c before changing any of them. + # +-CC = gcc +-AR = ar ++CC ?= gcc ++AR ?= ar + W = -W -Wall -Wno-unused-parameter -Wbad-function-cast -Wuninitialized + THREADS = -pthread + OPT = -O2 -g +-CFLAGS = $(THREADS) $(OPT) $(W) $(XCFLAGS) +-LDFLAGS = $(THREADS) ++CFLAGS += $(THREADS) $(OPT) $(W) $(XCFLAGS) ++LDFLAGS += $(THREADS) + LDLIBS = + SOLIBS = + SOEXT = .so diff --git a/build/openresty/openssl/BUILD.bazel b/build/openresty/openssl/BUILD.bazel new file mode 100644 index 00000000000..5970c67b233 --- /dev/null +++ b/build/openresty/openssl/BUILD.bazel @@ -0,0 +1,5 @@ +load("@kong//build/openresty/openssl:openssl.bzl", "build_openssl") + +build_openssl( + name = "openssl", +) diff --git a/build/openresty/openssl/README.md b/build/openresty/openssl/README.md new file mode 100644 index 00000000000..8cc90c255fa --- /dev/null +++ b/build/openresty/openssl/README.md @@ -0,0 +1,10 @@ +This target is modified from https://github.com/bazelbuild/rules_foreign_cc/tree/main/examples/third_party +with following changes: + +- Read version from requirements.txt +- Updated `build_file` to new path under //build/openresty +- Remove Windows build support +- Removed the bazel mirror as it's missing latest versions +- Remove runnable test for now until cross compile has been sorted out +- Use system Perl for now +- Updated to be reusable \ No newline at end of file diff --git a/build/openresty/openssl/openssl.bzl b/build/openresty/openssl/openssl.bzl new file mode 100644 index 00000000000..62aa72f34dc --- /dev/null +++ b/build/openresty/openssl/openssl.bzl @@ -0,0 +1,82 @@ +"""An openssl build file based on a snippet found in the github issue: +https://github.com/bazelbuild/rules_foreign_cc/issues/337 + +Note that the $(PERL) "make variable" (https://docs.bazel.build/versions/main/be/make-variables.html) +is populated by the perl toolchain provided by rules_perl. +""" + +load("@rules_foreign_cc//foreign_cc:defs.bzl", "configure_make") +load("@kong_bindings//:variables.bzl", "KONG_VAR") + +# Read https://wiki.openssl.org/index.php/Compilation_and_Installation + +CONFIGURE_OPTIONS = select({ + "@kong//:aarch64-linux-anylibc-cross": [ + "linux-aarch64", + ], + # no extra args needed for "@kong//:x86_64-linux-musl-cross" or non-cross builds + "//conditions:default": [], +}) + [ + "-g", + "-O3", # force -O3 even we are using --debug (for example on CI) + "shared", + "-DPURIFY", + "no-threads", + "no-tests", + "--prefix=%s/kong" % KONG_VAR["INSTALL_DESTDIR"], + "--openssldir=%s/kong" % KONG_VAR["INSTALL_DESTDIR"], + "--libdir=lib", # force lib instead of lib64 (multilib postfix) + "-Wl,-rpath,%s/kong/lib" % KONG_VAR["INSTALL_DESTDIR"], +] + select({ + "@kong//:debug_flag": ["--debug"], + "//conditions:default": [], +}) + +def build_openssl( + name = "openssl"): + extra_make_targets = [] + extra_configure_options = [] + + native.filegroup( + name = name + "-all_srcs", + srcs = native.glob( + include = ["**"], + exclude = ["*.bazel"], + ), + ) + + configure_make( + name = name, + configure_command = "config", + configure_in_place = True, + configure_options = CONFIGURE_OPTIONS + extra_configure_options, + env = select({ + "@platforms//os:macos": { + "AR": "/usr/bin/ar", + }, + "//conditions:default": {}, + }), + lib_source = ":%s-all_srcs" % name, + out_binaries = ["openssl"], + # Note that for Linux builds, libssl must come before libcrypto on the linker command-line. + # As such, libssl must be listed before libcrypto + out_shared_libs = select({ + "@platforms//os:macos": [ + "libssl.1.1.dylib", + "libcrypto.1.1.dylib", + ], + "//conditions:default": [ + "libssl.so.1.1", + "libcrypto.so.1.1", + ], + }), + targets = [ + "-j" + KONG_VAR["NPROC"], + # don't set the prefix by --prefix switch, but only override the install destdir using INSTALLTOP + # while install. this makes both bazel and openssl (build time generated) paths happy. + "install_sw INSTALLTOP=$BUILD_TMPDIR/$INSTALL_PREFIX", + ] + extra_make_targets, + # TODO: uncomment this to allow bazel build a perl if not installed on system + # toolchains = ["@rules_perl//:current_toolchain"], + visibility = ["//visibility:public"], + ) diff --git a/build/openresty/openssl/openssl_repositories.bzl b/build/openresty/openssl/openssl_repositories.bzl new file mode 100644 index 00000000000..c549b59fb1b --- /dev/null +++ b/build/openresty/openssl/openssl_repositories.bzl @@ -0,0 +1,21 @@ +"""A module defining the third party dependency OpenSSL""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") +load("@kong_bindings//:variables.bzl", "KONG_VAR") + +def openssl_repositories(): + version = KONG_VAR["OPENSSL"] + version_github = version.replace(".", "_") + + maybe( + http_archive, + name = "openssl", + build_file = "//build/openresty/openssl:BUILD.bazel", + sha256 = "9384a2b0570dd80358841464677115df785edb941c71211f75076d72fe6b438f", + strip_prefix = "openssl-" + version, + urls = [ + "https://www.openssl.org/source/openssl-" + version + ".tar.gz", + "https://github.com/openssl/openssl/archive/OpenSSL_" + version_github + ".tar.gz", + ], + ) diff --git a/build/openresty/patches/LuaJIT-2.1-20210510_01-ffi-arm64-macos-fix-vararg-call-handling.patch b/build/openresty/patches/LuaJIT-2.1-20210510_01-ffi-arm64-macos-fix-vararg-call-handling.patch new file mode 100644 index 00000000000..9047d7c86d1 --- /dev/null +++ b/build/openresty/patches/LuaJIT-2.1-20210510_01-ffi-arm64-macos-fix-vararg-call-handling.patch @@ -0,0 +1,62 @@ +From 521b367567dc5d91d7f9ae29c257998953e24e53 Mon Sep 17 00:00:00 2001 +From: Mike Pall +Date: Sun, 2 May 2021 22:11:05 +0200 +Subject: [PATCH] FFI/ARM64/OSX: Fix vararg call handling. + +Thanks to Igor Munkin. +--- + LuaJIT-2.1-20210510/src/lj_ccall.c | 8 ++++---- + LuaJIT-2.1-20210510/src/lj_ccallback.c | 2 +- + 2 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/LuaJIT-2.1-20210510/src/lj_ccall.c b/LuaJIT-2.1-20210510/src/lj_ccall.c +index a91ffc7e..3c029823 100644 +--- a/bundle/LuaJIT-2.1-20210510/src/lj_ccall.c ++++ b/bundle/LuaJIT-2.1-20210510/src/lj_ccall.c +@@ -334,7 +334,7 @@ + isfp = sz == 2*sizeof(float) ? 2 : 1; + + #define CCALL_HANDLE_REGARG \ +- if (LJ_TARGET_IOS && isva) { \ ++ if (LJ_TARGET_OSX && isva) { \ + /* IOS: All variadic arguments are on the stack. */ \ + } else if (isfp) { /* Try to pass argument in FPRs. */ \ + int n2 = ctype_isvector(d->info) ? 1 : \ +@@ -345,10 +345,10 @@ + goto done; \ + } else { \ + nfpr = CCALL_NARG_FPR; /* Prevent reordering. */ \ +- if (LJ_TARGET_IOS && d->size < 8) goto err_nyi; \ ++ if (LJ_TARGET_OSX && d->size < 8) goto err_nyi; \ + } \ + } else { /* Try to pass argument in GPRs. */ \ +- if (!LJ_TARGET_IOS && (d->info & CTF_ALIGN) > CTALIGN_PTR) \ ++ if (!LJ_TARGET_OSX && (d->info & CTF_ALIGN) > CTALIGN_PTR) \ + ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ + if (ngpr + n <= maxgpr) { \ + dp = &cc->gpr[ngpr]; \ +@@ -356,7 +356,7 @@ + goto done; \ + } else { \ + ngpr = maxgpr; /* Prevent reordering. */ \ +- if (LJ_TARGET_IOS && d->size < 8) goto err_nyi; \ ++ if (LJ_TARGET_OSX && d->size < 8) goto err_nyi; \ + } \ + } + +diff --git a/LuaJIT-2.1-20210510/src/lj_ccallback.c b/LuaJIT-2.1-20210510/src/lj_ccallback.c +index 8d6cb737..80d738c6 100644 +--- a/bundle/LuaJIT-2.1-20210510/src/lj_ccallback.c ++++ b/bundle/LuaJIT-2.1-20210510/src/lj_ccallback.c +@@ -460,7 +460,7 @@ void lj_ccallback_mcode_free(CTState *cts) + nfpr = CCALL_NARG_FPR; /* Prevent reordering. */ \ + } \ + } else { \ +- if (!LJ_TARGET_IOS && n > 1) \ ++ if (!LJ_TARGET_OSX && n > 1) \ + ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \ + if (ngpr + n <= maxgpr) { \ + sp = &cts->cb.gpr[ngpr]; \ +-- +2.34.1 + diff --git a/build/openresty/patches/LuaJIT-2.1-20210510_02-arm64-fix-pcall-error-case.patch b/build/openresty/patches/LuaJIT-2.1-20210510_02-arm64-fix-pcall-error-case.patch new file mode 100644 index 00000000000..37934cee30d --- /dev/null +++ b/build/openresty/patches/LuaJIT-2.1-20210510_02-arm64-fix-pcall-error-case.patch @@ -0,0 +1,29 @@ +From b4b2dce9fc3ffaaaede39b36d06415311e2aa516 Mon Sep 17 00:00:00 2001 +From: Mike Pall +Date: Wed, 27 Oct 2021 21:56:07 +0200 +Subject: [PATCH] ARM64: Fix pcall() error case. + +Reported by Alex Orlenko. +--- + src/vm_arm64.dasc | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/LuaJIT-2.1-20210510/src/vm_arm64.dasc b/LuaJIT-2.1-20210510/src/vm_arm64.dasc +index c7090ca3..eb87857f 100644 +--- a/bundle/LuaJIT-2.1-20210510/src/vm_arm64.dasc ++++ b/bundle/LuaJIT-2.1-20210510/src/vm_arm64.dasc +@@ -1163,9 +1163,10 @@ static void build_subroutines(BuildCtx *ctx) + |//-- Base library: catch errors ---------------------------------------- + | + |.ffunc pcall ++ | cmp NARGS8:RC, #8 + | ldrb TMP0w, GL->hookmask +- | subs NARGS8:RC, NARGS8:RC, #8 + | blo ->fff_fallback ++ | sub NARGS8:RC, NARGS8:RC, #8 + | mov RB, BASE + | add BASE, BASE, #16 + | ubfx TMP0w, TMP0w, #HOOK_ACTIVE_SHIFT, #1 +-- +2.34.1 + diff --git a/build/openresty/patches/LuaJIT-2.1-20210510_04_pass_cc_env.patch b/build/openresty/patches/LuaJIT-2.1-20210510_04_pass_cc_env.patch new file mode 100644 index 00000000000..afe165ab78a --- /dev/null +++ b/build/openresty/patches/LuaJIT-2.1-20210510_04_pass_cc_env.patch @@ -0,0 +1,40 @@ +diff --git a/bundle/LuaJIT-2.1-20210510/src/Makefile b/bundle/LuaJIT-2.1-20210510/src/Makefile +index 47a21c9..c60b94e 100644 +--- a/bundle/LuaJIT-2.1-20210510/src/Makefile ++++ b/bundle/LuaJIT-2.1-20210510/src/Makefile +@@ -27,7 +27,8 @@ NODOTABIVER= 51 + DEFAULT_CC = gcc + # + # LuaJIT builds as a native 32 or 64 bit binary by default. +-CC= $(DEFAULT_CC) ++CC?= $(DEFAULT_CC) ++AR?= ar + # + # Use this if you want to force a 32 bit build on a 64 bit multilib OS. + #CC= $(DEFAULT_CC) -m32 +@@ -211,7 +212,7 @@ TARGET_CC= $(STATIC_CC) + TARGET_STCC= $(STATIC_CC) + TARGET_DYNCC= $(DYNAMIC_CC) + TARGET_LD= $(CROSS)$(CC) +-TARGET_AR= $(CROSS)ar rcus 2>/dev/null ++TARGET_AR= $(CROSS)$(AR) rcus 2>/dev/null + TARGET_STRIP= $(CROSS)strip + + TARGET_LIBPATH= $(or $(PREFIX),/usr/local)/$(or $(MULTILIB),lib) +@@ -291,11 +292,11 @@ TARGET_XCFLAGS+= $(CCOPT_$(TARGET_LJARCH)) + TARGET_ARCH+= $(patsubst %,-DLUAJIT_TARGET=LUAJIT_ARCH_%,$(TARGET_LJARCH)) + + ifneq (,$(PREFIX)) +-ifneq (/usr/local,$(PREFIX)) +- TARGET_XCFLAGS+= -DLUA_ROOT=\"$(PREFIX)\" +- ifneq (/usr,$(PREFIX)) +- TARGET_DYNXLDOPTS= -Wl,-rpath,$(TARGET_LIBPATH) +- endif ++ifneq (/usr/local,$(LUA_ROOT)) ++ TARGET_XCFLAGS+= -DLUA_ROOT=\"$(LUA_ROOT)\" ++endif ++ifneq (/usr,$(PREFIX)) ++ TARGET_DYNXLDOPTS= -Wl,-rpath,$(TARGET_LIBPATH) + endif + endif + ifneq (,$(MULTILIB)) \ No newline at end of file diff --git a/build/openresty/patches/LuaJIT-2.1-20210510_05_Revert_Detect_SSE4.2_support_dynamically.patch b/build/openresty/patches/LuaJIT-2.1-20210510_05_Revert_Detect_SSE4.2_support_dynamically.patch new file mode 100644 index 00000000000..20cb556d493 --- /dev/null +++ b/build/openresty/patches/LuaJIT-2.1-20210510_05_Revert_Detect_SSE4.2_support_dynamically.patch @@ -0,0 +1,562 @@ +From db0824835876d11bf88b0c8ad9791019ea969ef7 Mon Sep 17 00:00:00 2001 +From: Zhongwei Yao +Date: Fri, 31 May 2024 11:39:51 -0700 +Subject: [PATCH] Revert "Detect SSE4.2 support dynamically" + +This reverts commit 34b63ba83542cad8675f875c9aa849653ead378d. +--- + src/Makefile | 18 +-- + src/lj_arch.h | 4 - + src/lj_init.c | 69 ------------ + src/lj_jit.h | 1 - + src/lj_str.c | 25 ++--- + src/lj_str.h | 12 -- + src/ljamalg.c | 1 + + .../src/lj_str_hash_x64.h} | 106 +++++++----------- + src/x64/test/benchmark.cxx | 13 +-- + src/x64/test/test.cpp | 10 +- + 10 files changed, 64 insertions(+), 195 deletions(-) + delete mode 100644 src/lj_init.c + rename src/{lj_str_hash.c => x64/src/lj_str_hash_x64.h} (76%) + +diff --git a/bundle/LuaJIT-2.1-20210510/src/Makefile b/bundle/LuaJIT-2.1-20210510/src/Makefile +index 287e4cd2..9fcf9316 100644 +--- a/bundle/LuaJIT-2.1-20210510/src/Makefile ++++ b/bundle/LuaJIT-2.1-20210510/src/Makefile +@@ -508,16 +508,10 @@ + lj_ctype.o lj_cdata.o lj_cconv.o lj_ccall.o lj_ccallback.o \ + lj_carith.o lj_clib.o lj_cparse.o \ + lj_lib.o lj_alloc.o lib_aux.o \ +- $(LJLIB_O) lib_init.o lj_str_hash.o +- +-ifeq (x64,$(TARGET_LJARCH)) +- lj_str_hash-CFLAGS = -msse4.2 +-endif +- +-F_CFLAGS = $($(patsubst %.c,%-CFLAGS,$<)) ++ $(LJLIB_O) lib_init.o + + LJVMCORE_O= $(LJVM_O) $(LJCORE_O) +-LJVMCORE_DYNO= $(LJVMCORE_O:.o=_dyn.o) lj_init_dyn.o ++LJVMCORE_DYNO= $(LJVMCORE_O:.o=_dyn.o) + + LIB_VMDEF= jit/vmdef.lua + LIB_VMDEFP= $(LIB_VMDEF) +@@ -539,7 +533,7 @@ + ############################################################################## + + # Mixed mode defaults. +-TARGET_O= lj_init.o $(LUAJIT_A) ++TARGET_O= $(LUAJIT_A) + TARGET_T= $(LUAJIT_T) $(LUAJIT_SO) + TARGET_DEP= $(LIB_VMDEF) $(LUAJIT_SO) + +@@ -621,7 +615,7 @@ + default all: $(TARGET_T) + + amalg: +- $(MAKE) all "LJCORE_O=ljamalg.o lj_str_hash.o" ++ $(MAKE) all "LJCORE_O=ljamalg.o" + + clean: + $(HOST_RM) $(ALL_RM) +@@ -698,8 +692,8 @@ + + %.o: %.c + $(E) "CC $@" +- $(Q)$(TARGET_DYNCC) $(TARGET_ACFLAGS) $(F_CFLAGS) -c -o $(@:.o=_dyn.o) $< +- $(Q)$(TARGET_CC) $(TARGET_ACFLAGS) $(F_CFLAGS) -c -o $@ $< ++ $(Q)$(TARGET_DYNCC) $(TARGET_ACFLAGS) -c -o $(@:.o=_dyn.o) $< ++ $(Q)$(TARGET_CC) $(TARGET_ACFLAGS) -c -o $@ $< + + %.o: %.S + $(E) "ASM $@" +diff --git a/bundle/LuaJIT-2.1-20210510/src/lj_arch.h b/bundle/LuaJIT-2.1-20210510/src/lj_arch.h +index 326c7148..1ea68032 100644 +--- a/bundle/LuaJIT-2.1-20210510/src/lj_arch.h ++++ b/bundle/LuaJIT-2.1-20210510/src/lj_arch.h +@@ -209,10 +209,6 @@ + #define LJ_TARGET_GC64 1 + #endif + +-#ifdef __GNUC__ +-#define LJ_HAS_OPTIMISED_HASH 1 +-#endif +- + #elif LUAJIT_TARGET == LUAJIT_ARCH_ARM + + #define LJ_ARCH_NAME "arm" +diff --git a/bundle/LuaJIT-2.1-20210510/src/lj_init.c b/bundle/LuaJIT-2.1-20210510/src/lj_init.c +deleted file mode 100644 +index a6816e1e..00000000 +--- a/bundle/LuaJIT-2.1-20210510/src/lj_init.c ++++ /dev/null +@@ -1,69 +0,0 @@ +-#include +-#include "lj_arch.h" +-#include "lj_jit.h" +-#include "lj_vm.h" +-#include "lj_str.h" +- +-#if LJ_TARGET_ARM && LJ_TARGET_LINUX +-#include +-#endif +- +-#ifdef _MSC_VER +-/* +-** Append a function pointer to the static constructor table executed by +-** the C runtime. +-** Based on https://stackoverflow.com/questions/1113409/attribute-constructor-equivalent-in-vc +-** see also https://docs.microsoft.com/en-us/cpp/c-runtime-library/crt-initialization. +-*/ +-#pragma section(".CRT$XCU",read) +-#define LJ_INITIALIZER2_(f,p) \ +- static void f(void); \ +- __declspec(allocate(".CRT$XCU")) void (*f##_)(void) = f; \ +- __pragma(comment(linker,"/include:" p #f "_")) \ +- static void f(void) +-#ifdef _WIN64 +-#define LJ_INITIALIZER(f) LJ_INITIALIZER2_(f,"") +-#else +-#define LJ_INITIALIZER(f) LJ_INITIALIZER2_(f,"_") +-#endif +- +-#else +-#define LJ_INITIALIZER(f) static void __attribute__((constructor)) f(void) +-#endif +- +- +-#ifdef LJ_HAS_OPTIMISED_HASH +-static void str_hash_init(uint32_t flags) +-{ +- if (flags & JIT_F_SSE4_2) +- str_hash_init_sse42 (); +-} +- +-/* CPU detection for interpreter features such as string hash function +- selection. We choose to cherry-pick from lj_cpudetect and not have a single +- initializer to make sure that merges with LuaJIT/LuaJIT remain +- convenient. */ +-LJ_INITIALIZER(lj_init_cpuflags) +-{ +- uint32_t flags = 0; +-#if LJ_TARGET_X86ORX64 +- +- uint32_t vendor[4]; +- uint32_t features[4]; +- if (lj_vm_cpuid(0, vendor) && lj_vm_cpuid(1, features)) { +- flags |= ((features[2] >> 0)&1) * JIT_F_SSE3; +- flags |= ((features[2] >> 19)&1) * JIT_F_SSE4_1; +- flags |= ((features[2] >> 20)&1) * JIT_F_SSE4_2; +- if (vendor[0] >= 7) { +- uint32_t xfeatures[4]; +- lj_vm_cpuid(7, xfeatures); +- flags |= ((xfeatures[1] >> 8)&1) * JIT_F_BMI2; +- } +- } +- +-#endif +- +- /* The reason why we initialized early: select our string hash functions. */ +- str_hash_init (flags); +-} +-#endif +diff --git a/bundle/LuaJIT-2.1-20210510/src/lj_jit.h b/bundle/LuaJIT-2.1-20210510/src/lj_jit.h +index 8993354f..c834d80a 100644 +--- a/bundle/LuaJIT-2.1-20210510/src/lj_jit.h ++++ b/bundle/LuaJIT-2.1-20210510/src/lj_jit.h +@@ -22,7 +22,6 @@ + #define JIT_F_SSE3 (JIT_F_CPU << 0) + #define JIT_F_SSE4_1 (JIT_F_CPU << 1) + #define JIT_F_BMI2 (JIT_F_CPU << 2) +-#define JIT_F_SSE4_2 (JIT_F_CPU << 3) + + + #define JIT_F_CPUSTRING "\4SSE3\6SSE4.1\4BMI2" +diff --git a/bundle/LuaJIT-2.1-20210510/src/lj_str.c b/bundle/LuaJIT-2.1-20210510/src/lj_str.c +index d37f3b22..c0e2dfad 100644 +--- a/bundle/LuaJIT-2.1-20210510/src/lj_str.c ++++ b/bundle/LuaJIT-2.1-20210510/src/lj_str.c +@@ -12,6 +12,7 @@ + #include "lj_str.h" + #include "lj_char.h" + #include "lj_prng.h" ++#include "x64/src/lj_str_hash_x64.h" + + /* -- String helpers ------------------------------------------------------ */ + +@@ -82,22 +83,9 @@ int lj_str_haspattern(GCstr *s) + + /* -- String hashing ------------------------------------------------------ */ + +-#ifdef LJ_HAS_OPTIMISED_HASH +-static StrHash hash_sparse_def (uint64_t, const char *, MSize); +-str_sparse_hashfn hash_sparse = hash_sparse_def; +-#if LUAJIT_SECURITY_STRHASH +-static StrHash hash_dense_def(uint64_t, StrHash, const char *, MSize); +-str_dense_hashfn hash_dense = hash_dense_def; +-#endif +-#else +-#define hash_sparse hash_sparse_def +-#if LUAJIT_SECURITY_STRHASH +-#define hash_dense hash_dense_def +-#endif +-#endif +- ++#ifndef ARCH_HASH_SPARSE + /* Keyed sparse ARX string hash. Constant time. */ +-static StrHash hash_sparse_def(uint64_t seed, const char *str, MSize len) ++static StrHash hash_sparse(uint64_t seed, const char *str, MSize len) + { + /* Constants taken from lookup3 hash by Bob Jenkins. */ + StrHash a, b, h = len ^ (StrHash)seed; +@@ -118,11 +106,12 @@ static StrHash hash_sparse_def(uint64_t seed, const char *str, MSize len) + h ^= b; h -= lj_rol(b, 16); + return h; + } ++#endif + +-#if LUAJIT_SECURITY_STRHASH ++#if LUAJIT_SECURITY_STRHASH && !defined(ARCH_HASH_DENSE) + /* Keyed dense ARX string hash. Linear time. */ +-static LJ_NOINLINE StrHash hash_dense_def(uint64_t seed, StrHash h, +- const char *str, MSize len) ++static LJ_NOINLINE StrHash hash_dense(uint64_t seed, StrHash h, ++ const char *str, MSize len) + { + StrHash b = lj_bswap(lj_rol(h ^ (StrHash)(seed >> 32), 4)); + if (len > 12) { +diff --git a/bundle/LuaJIT-2.1-20210510/src/lj_str.h b/bundle/LuaJIT-2.1-20210510/src/lj_str.h +index f7b9234b..28edb5a5 100644 +--- a/bundle/LuaJIT-2.1-20210510/src/lj_str.h ++++ b/bundle/LuaJIT-2.1-20210510/src/lj_str.h +@@ -28,16 +28,4 @@ LJ_FUNC void LJ_FASTCALL lj_str_init(lua_State *L); + #define lj_str_newlit(L, s) (lj_str_new(L, "" s, sizeof(s)-1)) + #define lj_str_size(len) (sizeof(GCstr) + (((len)+4) & ~(MSize)3)) + +-#ifdef LJ_HAS_OPTIMISED_HASH +-typedef StrHash (*str_sparse_hashfn) (uint64_t, const char *, MSize); +-extern str_sparse_hashfn hash_sparse; +- +-#if LUAJIT_SECURITY_STRHASH +-typedef StrHash (*str_dense_hashfn) (uint64_t, StrHash, const char *, MSize); +-extern str_dense_hashfn hash_dense; +-#endif +- +-extern void str_hash_init_sse42 (void); +-#endif +- + #endif +diff --git a/bundle/LuaJIT-2.1-20210510/src/ljamalg.c b/bundle/LuaJIT-2.1-20210510/src/ljamalg.c +index 36ad2f6d..34922650 100644 +--- a/bundle/LuaJIT-2.1-20210510/src/ljamalg.c ++++ b/bundle/LuaJIT-2.1-20210510/src/ljamalg.c +@@ -86,3 +86,4 @@ + #include "lib_jit.c" + #include "lib_ffi.c" + #include "lib_init.c" ++ +diff --git a/bundle/LuaJIT-2.1-20210510/src/lj_str_hash.c b/bundle/LuaJIT-2.1-20210510/src/x64/src/lj_str_hash_x64.h +similarity index 76% +rename from src/lj_str_hash.c +rename to src/x64/src/lj_str_hash_x64.h +index 0ee4b5f6..e6538953 100644 +--- a/bundle/LuaJIT-2.1-20210510/src/lj_str_hash.c ++++ b/bundle/LuaJIT-2.1-20210510/src/x64/src/lj_str_hash_x64.h +@@ -5,48 +5,23 @@ + * to 128 bytes of given string. + */ + +-#include "lj_arch.h" ++#ifndef _LJ_STR_HASH_X64_H_ ++#define _LJ_STR_HASH_X64_H_ ++ ++#if defined(__SSE4_2__) && defined(__x86_64) && defined(__GNUC__) + +-#if LJ_HAS_OPTIMISED_HASH == 1 || defined(SMOKETEST) + #include + #include ++#include + #include + #include + +-#if defined(_MSC_VER) +-#include +-/* Silence deprecated name warning */ +-#define getpid _getpid +-#else +-#include +-#endif +- +-#include "lj_def.h" +-#include "lj_str.h" +-#include "lj_jit.h" +- +- +-#if defined(_MSC_VER) +-/* +- * MSVC doesn't seem to restrict intrinsics used based on /arch: value set +- * while clang-cl will error on it. +- */ +-#if defined(__clang__) && !defined(__SSE4_2__) +-#error "This file must be built with /arch:AVX1 or higher" +-#endif +-#else +-#if !defined(__SSE4_2__) +-#error "This file must be built with -msse4.2" +-#endif +-#endif +- +-#define lj_crc32_u32 _mm_crc32_u32 +-#define lj_crc32_u64 _mm_crc32_u64 ++#include "../../lj_def.h" + + #undef LJ_AINLINE + #define LJ_AINLINE + +-#if defined(__MINGW32__) || defined(_MSC_VER) ++#ifdef __MINGW32__ + #define random() ((long) rand()) + #define srandom(seed) srand(seed) + #endif +@@ -74,7 +49,7 @@ static LJ_AINLINE uint32_t hash_sparse_1_4(uint64_t seed, const char* str, + v = (v << 8) | str[len >> 1]; + v = (v << 8) | str[len - 1]; + v = (v << 8) | len; +- return lj_crc32_u32(0, v); ++ return _mm_crc32_u32(0, v); + #else + uint32_t a, b, h = len ^ seed; + +@@ -105,9 +80,9 @@ static LJ_AINLINE uint32_t hash_sparse_4_16(uint64_t seed, const char* str, + v2 = *cast_uint32p(str + len - 4); + } + +- h = lj_crc32_u32(0, len ^ seed); +- h = lj_crc32_u64(h, v1); +- h = lj_crc32_u64(h, v2); ++ h = _mm_crc32_u32(0, len ^ seed); ++ h = _mm_crc32_u64(h, v1); ++ h = _mm_crc32_u64(h, v2); + return h; + } + +@@ -118,18 +93,18 @@ static uint32_t hash_16_128(uint64_t seed, const char* str, + uint64_t h1, h2; + uint32_t i; + +- h1 = lj_crc32_u32(0, len ^ seed); ++ h1 = _mm_crc32_u32(0, len ^ seed); + h2 = 0; + + for (i = 0; i < len - 16; i += 16) { +- h1 += lj_crc32_u64(h1, *cast_uint64p(str + i)); +- h2 += lj_crc32_u64(h2, *cast_uint64p(str + i + 8)); ++ h1 += _mm_crc32_u64(h1, *cast_uint64p(str + i)); ++ h2 += _mm_crc32_u64(h2, *cast_uint64p(str + i + 8)); + }; + +- h1 = lj_crc32_u64(h1, *cast_uint64p(str + len - 16)); +- h2 = lj_crc32_u64(h2, *cast_uint64p(str + len - 8)); ++ h1 = _mm_crc32_u64(h1, *cast_uint64p(str + len - 16)); ++ h2 = _mm_crc32_u64(h2, *cast_uint64p(str + len - 8)); + +- return lj_crc32_u32(h1, h2); ++ return _mm_crc32_u32(h1, h2); + } + + /* ************************************************************************** +@@ -172,7 +147,7 @@ static LJ_AINLINE uint32_t log2_floor(uint32_t n) + /* This function is to populate `random_pos` such that random_pos[i][*] + * contains random value in the range of [2**i, 2**(i+1)). + */ +-static void str_hash_init_random(void) ++static void x64_init_random(void) + { + int i, seed, rml; + +@@ -183,8 +158,8 @@ static void str_hash_init_random(void) + } + + /* Init seed */ +- seed = lj_crc32_u32(0, getpid()); +- seed = lj_crc32_u32(seed, time(NULL)); ++ seed = _mm_crc32_u32(0, getpid()); ++ seed = _mm_crc32_u32(seed, time(NULL)); + srandom(seed); + + /* Now start to populate the random_pos[][]. */ +@@ -213,6 +188,11 @@ static void str_hash_init_random(void) + } + #undef POW2_MASK + ++void __attribute__((constructor)) x64_init_random_constructor() ++{ ++ x64_init_random(); ++} ++ + /* Return a pre-computed random number in the range of [1**chunk_sz_order, + * 1**(chunk_sz_order+1)). It is "unsafe" in the sense that the return value + * may be greater than chunk-size; it is up to the caller to make sure +@@ -239,7 +219,7 @@ static LJ_NOINLINE uint32_t hash_128_above(uint64_t seed, const char* str, + pos1 = get_random_pos_unsafe(chunk_sz_log2, 0); + pos2 = get_random_pos_unsafe(chunk_sz_log2, 1); + +- h1 = lj_crc32_u32(0, len ^ seed); ++ h1 = _mm_crc32_u32(0, len ^ seed); + h2 = 0; + + /* loop over 14 chunks, 2 chunks at a time */ +@@ -247,29 +227,29 @@ static LJ_NOINLINE uint32_t hash_128_above(uint64_t seed, const char* str, + chunk_ptr += chunk_sz, i++) { + + v = *cast_uint64p(chunk_ptr + pos1); +- h1 = lj_crc32_u64(h1, v); ++ h1 = _mm_crc32_u64(h1, v); + + v = *cast_uint64p(chunk_ptr + chunk_sz + pos2); +- h2 = lj_crc32_u64(h2, v); ++ h2 = _mm_crc32_u64(h2, v); + } + + /* the last two chunks */ + v = *cast_uint64p(chunk_ptr + pos1); +- h1 = lj_crc32_u64(h1, v); ++ h1 = _mm_crc32_u64(h1, v); + + v = *cast_uint64p(chunk_ptr + chunk_sz - 8 - pos2); +- h2 = lj_crc32_u64(h2, v); ++ h2 = _mm_crc32_u64(h2, v); + + /* process the trailing part */ +- h1 = lj_crc32_u64(h1, *cast_uint64p(str)); +- h2 = lj_crc32_u64(h2, *cast_uint64p(str + len - 8)); ++ h1 = _mm_crc32_u64(h1, *cast_uint64p(str)); ++ h2 = _mm_crc32_u64(h2, *cast_uint64p(str + len - 8)); + +- h1 = lj_crc32_u32(h1, h2); ++ h1 = _mm_crc32_u32(h1, h2); + return h1; + } + + /* NOTE: the "len" should not be zero */ +-static StrHash hash_sparse_sse42(uint64_t seed, const char* str, MSize len) ++static uint32_t hash_sparse(uint64_t seed, const char* str, size_t len) + { + if (len < 4 || len >= 128) + return hash_sparse_1_4(seed, str, len); +@@ -280,10 +260,11 @@ static StrHash hash_sparse_sse42(uint64_t seed, const char* str, MSize len) + /* [4, 16) */ + return hash_sparse_4_16(seed, str, len); + } ++#define ARCH_HASH_SPARSE hash_sparse + + #if LUAJIT_SECURITY_STRHASH +-static StrHash hash_dense_sse42(uint64_t seed, uint32_t h, const char* str, +- MSize len) ++static uint32_t hash_dense(uint64_t seed, uint32_t h, const char* str, ++ size_t len) + { + uint32_t b = lj_bswap(lj_rol(h ^ (uint32_t)(seed >> 32), 4)); + +@@ -296,14 +277,11 @@ static StrHash hash_dense_sse42(uint64_t seed, uint32_t h, const char* str, + /* Otherwise, do the slow crc32 randomization for long strings. */ + return hash_128_above(b, str, len); + } ++#define ARCH_HASH_DENSE hash_dense + #endif + +-void str_hash_init_sse42(void) +-{ +- hash_sparse = hash_sparse_sse42; +-#if LUAJIT_SECURITY_STRHASH +- hash_dense = hash_dense_sse42; +-#endif +- str_hash_init_random(); +-} ++#else ++#undef ARCH_HASH_SPARSE ++#undef ARCH_HASH_DENSE + #endif ++#endif /*_LJ_STR_HASH_X64_H_*/ +diff --git a/bundle/LuaJIT-2.1-20210510/src/x64/test/benchmark.cxx b/bundle/LuaJIT-2.1-20210510/src/x64/test/benchmark.cxx +index 1ea8fb6b..ee247c1c 100644 +--- a/bundle/LuaJIT-2.1-20210510/src/x64/test/benchmark.cxx ++++ b/bundle/LuaJIT-2.1-20210510/src/x64/test/benchmark.cxx +@@ -1,10 +1,7 @@ + #include // for gettimeofday() + extern "C" { + #define LUAJIT_SECURITY_STRHASH 1 +-#include "../../lj_str.h" +-str_sparse_hashfn hash_sparse; +-str_dense_hashfn hash_dense; +-#include "../../lj_str_hash.c" ++#include "lj_str_hash_x64.h" + } + #include + #include +@@ -100,7 +97,7 @@ struct TestFuncWasSparse + struct TestFuncIsSparse + { + uint32_t operator()(uint64_t seed, const char* buf, uint32_t len) { +- return hash_sparse_sse42(seed, buf, len); ++ return hash_sparse(seed, buf, len); + } + }; + +@@ -114,7 +111,7 @@ struct TestFuncWasDense + struct TestFuncIsDense + { + uint32_t operator()(uint64_t seed, const char* buf, uint32_t len) { +- return hash_dense_sse42(seed, 42, buf, len); ++ return hash_dense(seed, 42, buf, len); + } + }; + +@@ -271,9 +268,9 @@ benchmarkConflictHelper(uint64_t seed, uint32_t bucketNum, + for (vector::const_iterator i = strs.begin(), e = strs.end(); + i != e; ++i) { + uint32_t h1 = original_hash_sparse(seed, i->c_str(), i->size()); +- uint32_t h2 = hash_sparse_sse42(seed, i->c_str(), i->size()); ++ uint32_t h2 = hash_sparse(seed, i->c_str(), i->size()); + uint32_t h3 = original_hash_dense(seed, h1, i->c_str(), i->size()); +- uint32_t h4 = hash_dense_sse42(seed, h2, i->c_str(), i->size()); ++ uint32_t h4 = hash_dense(seed, h2, i->c_str(), i->size()); + + conflictWasSparse[h1 & mask]++; + conflictIsSparse[h2 & mask]++; +diff --git a/bundle/LuaJIT-2.1-20210510/src/x64/test/test.cpp b/bundle/LuaJIT-2.1-20210510/src/x64/test/test.cpp +index 432c7bbb..75f34e9f 100644 +--- a/bundle/LuaJIT-2.1-20210510/src/x64/test/test.cpp ++++ b/bundle/LuaJIT-2.1-20210510/src/x64/test/test.cpp +@@ -4,14 +4,10 @@ + #include + #define LUAJIT_SECURITY_STRHASH 1 + #include "test_util.hpp" +-#include "../../lj_str.h" +-str_sparse_hashfn hash_sparse; +-str_dense_hashfn hash_dense; +-#include "../../lj_str_hash.c" ++#include "lj_str_hash_x64.h" + + using namespace std; + +- + static bool + smoke_test() + { +@@ -28,9 +24,9 @@ smoke_test() + 255, 256, 257}; + for (unsigned i = 0; i < sizeof(lens)/sizeof(lens[0]); i++) { + string s(buf, lens[i]); +- uint32_t h = hash_sparse_sse42(rand(), s.c_str(), lens[i]); ++ uint32_t h = hash_sparse(rand(), s.c_str(), lens[i]); + test_printf("%d", h); +- test_printf("%d", hash_dense_sse42(rand(), h, s.c_str(), lens[i])); ++ test_printf("%d", hash_dense(rand(), h, s.c_str(), lens[i])); + } + + return true; +-- +2.43.0 + diff --git a/build/openresty/patches/LuaJIT-2.1-20210510_06_Revert_bugfix_fixed_compatibility_regression_with_Mi.patch b/build/openresty/patches/LuaJIT-2.1-20210510_06_Revert_bugfix_fixed_compatibility_regression_with_Mi.patch new file mode 100644 index 00000000000..8fd8baa8796 --- /dev/null +++ b/build/openresty/patches/LuaJIT-2.1-20210510_06_Revert_bugfix_fixed_compatibility_regression_with_Mi.patch @@ -0,0 +1,30 @@ +From 01a9e4c7b67adb9f53cee915633bb754bd9300ee Mon Sep 17 00:00:00 2001 +From: Zhongwei Yao +Date: Fri, 31 May 2024 11:40:58 -0700 +Subject: [PATCH] Revert "bugfix: fixed compatibility regression with MinGW + gcc" + +This reverts commit b882bdb6d65156249fd9119152826e2d1e0ab01c. +--- + src/x64/src/lj_str_hash_x64.h | 5 ----- + 1 file changed, 5 deletions(-) + +diff --git a/bundle/LuaJIT-2.1-20210510/src/x64/src/lj_str_hash_x64.h b/bundle/LuaJIT-2.1-20210510/src/x64/src/lj_str_hash_x64.h +index e6538953..8f6b8e1b 100644 +--- a/bundle/LuaJIT-2.1-20210510/src/x64/src/lj_str_hash_x64.h ++++ b/bundle/LuaJIT-2.1-20210510/src/x64/src/lj_str_hash_x64.h +@@ -21,11 +21,6 @@ + #undef LJ_AINLINE + #define LJ_AINLINE + +-#ifdef __MINGW32__ +-#define random() ((long) rand()) +-#define srandom(seed) srand(seed) +-#endif +- + static const uint64_t* cast_uint64p(const char* str) + { + return (const uint64_t*)(void*)str; +-- +2.43.0 + diff --git a/build/openresty/patches/LuaJIT-2.1-20210510_07_Revert_Adjust_SSE4.1_str_hash_to_replace_hash_sparse.patch b/build/openresty/patches/LuaJIT-2.1-20210510_07_Revert_Adjust_SSE4.1_str_hash_to_replace_hash_sparse.patch new file mode 100644 index 00000000000..1ff29d92233 --- /dev/null +++ b/build/openresty/patches/LuaJIT-2.1-20210510_07_Revert_Adjust_SSE4.1_str_hash_to_replace_hash_sparse.patch @@ -0,0 +1,1145 @@ +From 73811be9a047af819710c71cf2649603bc136cab Mon Sep 17 00:00:00 2001 +From: Zhongwei Yao +Date: Fri, 31 May 2024 11:42:17 -0700 +Subject: [PATCH] Revert "Adjust SSE4.1 str_hash to replace hash_sparse and + hash_dense" + +This reverts commit 5bccde90b1808d1d456f52ae2aa8824487045111. +--- + src/lj_str.c | 5 +- + src/x64/Makefile | 13 -- + src/x64/src/lj_str_hash_x64.h | 282 -------------------------- + src/x64/test/Makefile | 47 ----- + src/x64/test/benchmark.cxx | 357 --------------------------------- + src/x64/test/test.cpp | 77 ------- + src/x64/test/test_str_comp.lua | 67 ------- + src/x64/test/test_util.cxx | 21 -- + src/x64/test/test_util.d | 107 ---------- + src/x64/test/test_util.hpp | 57 ------ + 10 files changed, 1 insertion(+), 1032 deletions(-) + delete mode 100644 src/x64/Makefile + delete mode 100644 src/x64/src/lj_str_hash_x64.h + delete mode 100644 src/x64/test/Makefile + delete mode 100644 src/x64/test/benchmark.cxx + delete mode 100644 src/x64/test/test.cpp + delete mode 100644 src/x64/test/test_str_comp.lua + delete mode 100644 src/x64/test/test_util.cxx + delete mode 100644 src/x64/test/test_util.d + delete mode 100644 src/x64/test/test_util.hpp + +diff --git a/bundle/LuaJIT-2.1-20210510/src/lj_str.c b/bundle/LuaJIT-2.1-20210510/src/lj_str.c +index c0e2dfad..27de2c0d 100644 +--- a/bundle/LuaJIT-2.1-20210510/src/lj_str.c ++++ b/bundle/LuaJIT-2.1-20210510/src/lj_str.c +@@ -12,7 +12,6 @@ + #include "lj_str.h" + #include "lj_char.h" + #include "lj_prng.h" +-#include "x64/src/lj_str_hash_x64.h" + + /* -- String helpers ------------------------------------------------------ */ + +@@ -83,7 +82,6 @@ int lj_str_haspattern(GCstr *s) + + /* -- String hashing ------------------------------------------------------ */ + +-#ifndef ARCH_HASH_SPARSE + /* Keyed sparse ARX string hash. Constant time. */ + static StrHash hash_sparse(uint64_t seed, const char *str, MSize len) + { +@@ -106,9 +104,8 @@ static StrHash hash_sparse(uint64_t seed, const char *str, MSize len) + h ^= b; h -= lj_rol(b, 16); + return h; + } +-#endif + +-#if LUAJIT_SECURITY_STRHASH && !defined(ARCH_HASH_DENSE) ++#if LUAJIT_SECURITY_STRHASH + /* Keyed dense ARX string hash. Linear time. */ + static LJ_NOINLINE StrHash hash_dense(uint64_t seed, StrHash h, + const char *str, MSize len) +diff --git a/bundle/LuaJIT-2.1-20210510/src/x64/Makefile b/bundle/LuaJIT-2.1-20210510/src/x64/Makefile +deleted file mode 100644 +index 27277140..00000000 +--- a/bundle/LuaJIT-2.1-20210510/src/x64/Makefile ++++ /dev/null +@@ -1,13 +0,0 @@ +-.PHONY: default test benchmark clean +- +-default: +- @echo "make target include: test bechmark clean" +- +-test: +- $(MAKE) -C test test +- +-benchmark: +- $(MAKE) -C test benchmark +- +-clean: +- $(MAKE) -C test clean +diff --git a/bundle/LuaJIT-2.1-20210510/src/x64/src/lj_str_hash_x64.h b/bundle/LuaJIT-2.1-20210510/src/x64/src/lj_str_hash_x64.h +deleted file mode 100644 +index 8f6b8e1b..00000000 +--- a/bundle/LuaJIT-2.1-20210510/src/x64/src/lj_str_hash_x64.h ++++ /dev/null +@@ -1,282 +0,0 @@ +-/* +- * This file defines string hash function using CRC32. It takes advantage of +- * Intel hardware support (crc32 instruction, SSE 4.2) to speedup the CRC32 +- * computation. The hash functions try to compute CRC32 of length and up +- * to 128 bytes of given string. +- */ +- +-#ifndef _LJ_STR_HASH_X64_H_ +-#define _LJ_STR_HASH_X64_H_ +- +-#if defined(__SSE4_2__) && defined(__x86_64) && defined(__GNUC__) +- +-#include +-#include +-#include +-#include +-#include +- +-#include "../../lj_def.h" +- +-#undef LJ_AINLINE +-#define LJ_AINLINE +- +-static const uint64_t* cast_uint64p(const char* str) +-{ +- return (const uint64_t*)(void*)str; +-} +- +-static const uint32_t* cast_uint32p(const char* str) +-{ +- return (const uint32_t*)(void*)str; +-} +- +-/* hash string with len in [1, 4) */ +-static LJ_AINLINE uint32_t hash_sparse_1_4(uint64_t seed, const char* str, +- uint32_t len) +-{ +-#if 0 +- /* TODO: The if-1 part (i.e the original algorithm) is working better when +- * the load-factor is high, as revealed by conflict benchmark (via +- * 'make benchmark' command); need to understand why it's so. +- */ +- uint32_t v = str[0]; +- v = (v << 8) | str[len >> 1]; +- v = (v << 8) | str[len - 1]; +- v = (v << 8) | len; +- return _mm_crc32_u32(0, v); +-#else +- uint32_t a, b, h = len ^ seed; +- +- a = *(const uint8_t *)str; +- h ^= *(const uint8_t *)(str+len-1); +- b = *(const uint8_t *)(str+(len>>1)); +- h ^= b; h -= lj_rol(b, 14); +- +- a ^= h; a -= lj_rol(h, 11); +- b ^= a; b -= lj_rol(a, 25); +- h ^= b; h -= lj_rol(b, 16); +- +- return h; +-#endif +-} +- +-/* hash string with len in [4, 16) */ +-static LJ_AINLINE uint32_t hash_sparse_4_16(uint64_t seed, const char* str, +- uint32_t len) +-{ +- uint64_t v1, v2, h; +- +- if (len >= 8) { +- v1 = *cast_uint64p(str); +- v2 = *cast_uint64p(str + len - 8); +- } else { +- v1 = *cast_uint32p(str); +- v2 = *cast_uint32p(str + len - 4); +- } +- +- h = _mm_crc32_u32(0, len ^ seed); +- h = _mm_crc32_u64(h, v1); +- h = _mm_crc32_u64(h, v2); +- return h; +-} +- +-/* hash string with length in [16, 128) */ +-static uint32_t hash_16_128(uint64_t seed, const char* str, +- uint32_t len) +-{ +- uint64_t h1, h2; +- uint32_t i; +- +- h1 = _mm_crc32_u32(0, len ^ seed); +- h2 = 0; +- +- for (i = 0; i < len - 16; i += 16) { +- h1 += _mm_crc32_u64(h1, *cast_uint64p(str + i)); +- h2 += _mm_crc32_u64(h2, *cast_uint64p(str + i + 8)); +- }; +- +- h1 = _mm_crc32_u64(h1, *cast_uint64p(str + len - 16)); +- h2 = _mm_crc32_u64(h2, *cast_uint64p(str + len - 8)); +- +- return _mm_crc32_u32(h1, h2); +-} +- +-/* ************************************************************************** +- * +- * Following is code about hashing string with length >= 128 +- * +- * ************************************************************************** +- */ +-static uint32_t random_pos[32][2]; +-static const int8_t log2_tab[128] = { -1,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4, +- 4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, +- 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6, +- 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, +- 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6 }; +- +-/* return floor(log2(n)) */ +-static LJ_AINLINE uint32_t log2_floor(uint32_t n) +-{ +- if (n <= 127) { +- return log2_tab[n]; +- } +- +- if ((n >> 8) <= 127) { +- return log2_tab[n >> 8] + 8; +- } +- +- if ((n >> 16) <= 127) { +- return log2_tab[n >> 16] + 16; +- } +- +- if ((n >> 24) <= 127) { +- return log2_tab[n >> 24] + 24; +- } +- +- return 31; +-} +- +-#define POW2_MASK(n) ((1L << (n)) - 1) +- +-/* This function is to populate `random_pos` such that random_pos[i][*] +- * contains random value in the range of [2**i, 2**(i+1)). +- */ +-static void x64_init_random(void) +-{ +- int i, seed, rml; +- +- /* Calculate the ceil(log2(RAND_MAX)) */ +- rml = log2_floor(RAND_MAX); +- if (RAND_MAX & (RAND_MAX - 1)) { +- rml += 1; +- } +- +- /* Init seed */ +- seed = _mm_crc32_u32(0, getpid()); +- seed = _mm_crc32_u32(seed, time(NULL)); +- srandom(seed); +- +- /* Now start to populate the random_pos[][]. */ +- for (i = 0; i < 3; i++) { +- /* No need to provide random value for chunk smaller than 8 bytes */ +- random_pos[i][0] = random_pos[i][1] = 0; +- } +- +- for (; i < rml; i++) { +- random_pos[i][0] = random() & POW2_MASK(i+1); +- random_pos[i][1] = random() & POW2_MASK(i+1); +- } +- +- for (; i < 31; i++) { +- int j; +- for (j = 0; j < 2; j++) { +- uint32_t v, scale; +- scale = random_pos[i - rml][0]; +- if (scale == 0) { +- scale = 1; +- } +- v = (random() * scale) & POW2_MASK(i+1); +- random_pos[i][j] = v; +- } +- } +-} +-#undef POW2_MASK +- +-void __attribute__((constructor)) x64_init_random_constructor() +-{ +- x64_init_random(); +-} +- +-/* Return a pre-computed random number in the range of [1**chunk_sz_order, +- * 1**(chunk_sz_order+1)). It is "unsafe" in the sense that the return value +- * may be greater than chunk-size; it is up to the caller to make sure +- * "chunk-base + return-value-of-this-func" has valid virtual address. +- */ +-static LJ_AINLINE uint32_t get_random_pos_unsafe(uint32_t chunk_sz_order, +- uint32_t idx) +-{ +- uint32_t pos = random_pos[chunk_sz_order][idx & 1]; +- return pos; +-} +- +-static LJ_NOINLINE uint32_t hash_128_above(uint64_t seed, const char* str, +- uint32_t len) +-{ +- uint32_t chunk_num, chunk_sz, chunk_sz_log2, i, pos1, pos2; +- uint64_t h1, h2, v; +- const char* chunk_ptr; +- +- chunk_num = 16; +- chunk_sz = len / chunk_num; +- chunk_sz_log2 = log2_floor(chunk_sz); +- +- pos1 = get_random_pos_unsafe(chunk_sz_log2, 0); +- pos2 = get_random_pos_unsafe(chunk_sz_log2, 1); +- +- h1 = _mm_crc32_u32(0, len ^ seed); +- h2 = 0; +- +- /* loop over 14 chunks, 2 chunks at a time */ +- for (i = 0, chunk_ptr = str; i < (chunk_num / 2 - 1); +- chunk_ptr += chunk_sz, i++) { +- +- v = *cast_uint64p(chunk_ptr + pos1); +- h1 = _mm_crc32_u64(h1, v); +- +- v = *cast_uint64p(chunk_ptr + chunk_sz + pos2); +- h2 = _mm_crc32_u64(h2, v); +- } +- +- /* the last two chunks */ +- v = *cast_uint64p(chunk_ptr + pos1); +- h1 = _mm_crc32_u64(h1, v); +- +- v = *cast_uint64p(chunk_ptr + chunk_sz - 8 - pos2); +- h2 = _mm_crc32_u64(h2, v); +- +- /* process the trailing part */ +- h1 = _mm_crc32_u64(h1, *cast_uint64p(str)); +- h2 = _mm_crc32_u64(h2, *cast_uint64p(str + len - 8)); +- +- h1 = _mm_crc32_u32(h1, h2); +- return h1; +-} +- +-/* NOTE: the "len" should not be zero */ +-static uint32_t hash_sparse(uint64_t seed, const char* str, size_t len) +-{ +- if (len < 4 || len >= 128) +- return hash_sparse_1_4(seed, str, len); +- +- if (len >= 16) /* [16, 128) */ +- return hash_16_128(seed, str, len); +- +- /* [4, 16) */ +- return hash_sparse_4_16(seed, str, len); +-} +-#define ARCH_HASH_SPARSE hash_sparse +- +-#if LUAJIT_SECURITY_STRHASH +-static uint32_t hash_dense(uint64_t seed, uint32_t h, const char* str, +- size_t len) +-{ +- uint32_t b = lj_bswap(lj_rol(h ^ (uint32_t)(seed >> 32), 4)); +- +- if (len <= 16) +- return b; +- +- if (len < 128) /* [16, 128), try with a different seed. */ +- return hash_16_128(b, str, len); +- +- /* Otherwise, do the slow crc32 randomization for long strings. */ +- return hash_128_above(b, str, len); +-} +-#define ARCH_HASH_DENSE hash_dense +-#endif +- +-#else +-#undef ARCH_HASH_SPARSE +-#undef ARCH_HASH_DENSE +-#endif +-#endif /*_LJ_STR_HASH_X64_H_*/ +diff --git a/bundle/LuaJIT-2.1-20210510/src/x64/test/Makefile b/bundle/LuaJIT-2.1-20210510/src/x64/test/Makefile +deleted file mode 100644 +index 4326ab3d..00000000 +--- a/bundle/LuaJIT-2.1-20210510/src/x64/test/Makefile ++++ /dev/null +@@ -1,47 +0,0 @@ +-.PHONY: default test benchmark +- +-default: test benchmark +- +-COMMON_OBJ := test_util.o +- +-TEST_PROGRAM := ht_test +-BENCHMARK_PROGRAM := ht_benchmark +- +-TEST_PROGRAM_OBJ := $(COMMON_OBJ) test.o +-BENCHMARK_PROGRAM_OBJ := $(COMMON_OBJ) benchmark.o +- +-ifeq ($(WITH_VALGRIND), 1) +- VALGRIND := valgrind --leak-check=full +-else +- VALGRIND := +-endif +- +-CXXFLAGS := -O3 -MD -g -msse4.2 -Wall -I../src -I../../../src +- +-%.o: %.cxx +- $(CXX) $(CXXFLAGS) -MD -c $< +- +-test: $(TEST_PROGRAM) +- @echo "some unit test" +- $(VALGRIND) ./$(TEST_PROGRAM) +- +- @echo "smoke test" +- ../../luajit test_str_comp.lua +- +-benchmark: $(BENCHMARK_PROGRAM) +- # micro benchmark +- ./$(BENCHMARK_PROGRAM) +- +-$(TEST_PROGRAM) : $(TEST_PROGRAM_OBJ) +- cat $(TEST_PROGRAM_OBJ:.o=.d) > dep1.txt +- $(CXX) $+ $(CXXFLAGS) -lm -o $@ +- +-$(BENCHMARK_PROGRAM): $(BENCHMARK_PROGRAM_OBJ) +- cat $(BENCHMARK_PROGRAM_OBJ:.o=.d) > dep2.txt +- $(CXX) $+ $(CXXFLAGS) -o $@ +- +--include dep1.txt +--include dep2.txt +- +-clean: +- -rm -f *.o *.d dep*.txt $(BENCHMARK_PROGRAM) $(TEST_PROGRAM) +diff --git a/bundle/LuaJIT-2.1-20210510/src/x64/test/benchmark.cxx b/bundle/LuaJIT-2.1-20210510/src/x64/test/benchmark.cxx +deleted file mode 100644 +index ee247c1c..00000000 +--- a/bundle/LuaJIT-2.1-20210510/src/x64/test/benchmark.cxx ++++ /dev/null +@@ -1,357 +0,0 @@ +-#include // for gettimeofday() +-extern "C" { +-#define LUAJIT_SECURITY_STRHASH 1 +-#include "lj_str_hash_x64.h" +-} +-#include +-#include +-#include +-#include +-#include "test_util.hpp" +-#include +-#include +- +-using namespace std; +- +-#define lj_rol(x, n) (((x)<<(n)) | ((x)>>(-(int)(n)&(8*sizeof(x)-1)))) +-#define lj_ror(x, n) (((x)<<(-(int)(n)&(8*sizeof(x)-1))) | ((x)>>(n))) +- +-const char* separator = "-------------------------------------------"; +- +-static uint32_t LJ_AINLINE +-original_hash_sparse(uint64_t seed, const char *str, size_t len) +-{ +- uint32_t a, b, h = len ^ seed; +- if (len >= 4) { +- a = lj_getu32(str); h ^= lj_getu32(str+len-4); +- b = lj_getu32(str+(len>>1)-2); +- h ^= b; h -= lj_rol(b, 14); +- b += lj_getu32(str+(len>>2)-1); +- a ^= h; a -= lj_rol(h, 11); +- b ^= a; b -= lj_rol(a, 25); +- h ^= b; h -= lj_rol(b, 16); +- } else { +- a = *(const uint8_t *)str; +- h ^= *(const uint8_t *)(str+len-1); +- b = *(const uint8_t *)(str+(len>>1)); +- h ^= b; h -= lj_rol(b, 14); +- } +- +- a ^= h; a -= lj_rol(h, 11); +- b ^= a; b -= lj_rol(a, 25); +- h ^= b; h -= lj_rol(b, 16); +- +- return h; +-} +- +-static uint32_t original_hash_dense(uint64_t seed, uint32_t h, +- const char *str, size_t len) +-{ +- uint32_t b = lj_bswap(lj_rol(h ^ (uint32_t)(seed >> 32), 4)); +- if (len > 12) { +- uint32_t a = (uint32_t)seed; +- const char *pe = str+len-12, *p = pe, *q = str; +- do { +- a += lj_getu32(p); +- b += lj_getu32(p+4); +- h += lj_getu32(p+8); +- p = q; q += 12; +- h ^= b; h -= lj_rol(b, 14); +- a ^= h; a -= lj_rol(h, 11); +- b ^= a; b -= lj_rol(a, 25); +- } while (p < pe); +- h ^= b; h -= lj_rol(b, 16); +- a ^= h; a -= lj_rol(h, 4); +- b ^= a; b -= lj_rol(a, 14); +- } +- return b; +-} +- +- +-template double +-BenchmarkHashTmpl(T func, uint64_t seed, char* buf, size_t len) +-{ +- TestClock timer; +- uint32_t h = 0; +- +- timer.start(); +- for(int i = 1; i < 1000000 * 100; i++) { +- // So the buf is not loop invariant, hence the F(...) +- buf[i % 4096] = i; +- h += func(seed, buf, len) ^ i; +- } +- timer.stop(); +- +- // make h alive +- test_printf("%x", h); +- return timer.getElapseInSecond(); +-} +- +-struct TestFuncWasSparse +-{ +- uint32_t operator()(uint64_t seed, const char* buf, uint32_t len) { +- return original_hash_sparse(seed, buf, len); +- } +-}; +- +-struct TestFuncIsSparse +-{ +- uint32_t operator()(uint64_t seed, const char* buf, uint32_t len) { +- return hash_sparse(seed, buf, len); +- } +-}; +- +-struct TestFuncWasDense +-{ +- uint32_t operator()(uint64_t seed, const char* buf, uint32_t len) { +- return original_hash_dense(seed, 42, buf, len); +- } +-}; +- +-struct TestFuncIsDense +-{ +- uint32_t operator()(uint64_t seed, const char* buf, uint32_t len) { +- return hash_dense(seed, 42, buf, len); +- } +-}; +- +-static void +-benchmarkIndividual(uint64_t seed, char* buf) +-{ +- fprintf(stdout,"\n\nCompare performance of particular len (in second)\n"); +- fprintf(stdout, "%-12s%-8s%-8s%s%-8s%-8s%s\n", "len", +- "was (s)", "is (s)", "diff (s)", +- "was (d)", "is (d)", "diff (d)"); +- fprintf(stdout, "-------------------------------------------\n"); +- +- uint32_t lens[] = {3, 4, 7, 10, 15, 16, 20, 32, 36, 63, 80, 100, +- 120, 127, 280, 290, 400}; +- for (unsigned i = 0; i < sizeof(lens)/sizeof(lens[0]); i++) { +- uint32_t len = lens[i]; +- double e1 = BenchmarkHashTmpl(TestFuncWasSparse(), seed, buf, len); +- double e2 = BenchmarkHashTmpl(TestFuncIsSparse(), seed, buf, len); +- double e3 = BenchmarkHashTmpl(TestFuncWasDense(), seed, buf, len); +- double e4 = BenchmarkHashTmpl(TestFuncIsDense(), seed, buf, len); +- fprintf(stdout, "len = %4d: %-7.3lf %-7.3lf %-7.2f%% %-7.3lf %-7.3lf %.2f%%\n", +- len, e1, e2, 100*(e1-e2)/e1, e3, e4, 100*(e3-e4)/e3); +- } +-} +- +-template double +-BenchmarkChangeLenTmpl(T func, uint64_t seed, char* buf, uint32_t* len_vect, +- uint32_t len_num) +-{ +- TestClock timer; +- uint32_t h = 0; +- +- timer.start(); +- for(int i = 1; i < 1000000 * 100; i++) { +- for (int j = 0; j < (int)len_num; j++) { +- // So the buf is not loop invariant, hence the F(...) +- buf[(i + j) % 4096] = i; +- h += func(seed, buf, len_vect[j]) ^ j; +- } +- } +- timer.stop(); +- +- // make h alive +- test_printf("%x", h); +- return timer.getElapseInSecond(); +-} +- +-// It is to measure the performance when length is changing. +-// The purpose is to see how balanced branches impact the performance. +-// +-static void +-benchmarkToggleLens(uint64_t seed, char* buf) +-{ +- double e1, e2, e3, e4; +- fprintf(stdout,"\nChanging length (in second):"); +- fprintf(stdout, "\n%-24s%-8s%-8s%s%-8s%-8s%s\n%s\n", "len", +- "was (s)", "is (s)", "diff (s)", +- "was (d)", "is (d)", "diff (d)", +- separator); +- +- uint32_t lens1[] = {4, 9}; +- e1 = BenchmarkChangeLenTmpl(TestFuncWasSparse(), seed, buf, lens1, 2); +- e2 = BenchmarkChangeLenTmpl(TestFuncIsSparse(), seed, buf, lens1, 2); +- e3 = BenchmarkChangeLenTmpl(TestFuncWasDense(), seed, buf, lens1, 2); +- e4 = BenchmarkChangeLenTmpl(TestFuncIsDense(), seed, buf, lens1, 2); +- fprintf(stdout, "%-20s%-7.3lf %-7.3lf %-7.2f%% %-7.3lf %-7.3lf %.2f%%\n", "4,9", +- e1, e2, 100*(e1-e2)/e1, e3, e4, 100*(e3-e4)/e3); +- +- uint32_t lens2[] = {1, 4, 9}; +- e1 = BenchmarkChangeLenTmpl(TestFuncWasSparse(), seed, buf, lens2, 3); +- e2 = BenchmarkChangeLenTmpl(TestFuncIsSparse(), seed, buf, lens2, 3); +- e3 = BenchmarkChangeLenTmpl(TestFuncWasDense(), seed, buf, lens2, 3); +- e4 = BenchmarkChangeLenTmpl(TestFuncIsDense(), seed, buf, lens2, 3); +- fprintf(stdout, "%-20s%-7.3lf %-7.3lf %-7.2f%% %-7.3lf %-7.3lf %.2f%%\n", "1,4,9", +- e1, e2, 100*(e1-e2)/e1, e3, e4, 100*(e3-e4)/e3); +- +- uint32_t lens3[] = {1, 33, 4, 9}; +- e1 = BenchmarkChangeLenTmpl(TestFuncWasSparse(), seed, buf, lens3, 4); +- e2 = BenchmarkChangeLenTmpl(TestFuncIsSparse(), seed, buf, lens3, 4); +- e3 = BenchmarkChangeLenTmpl(TestFuncWasDense(), seed, buf, lens3, 4); +- e4 = BenchmarkChangeLenTmpl(TestFuncIsDense(), seed, buf, lens3, 4); +- fprintf(stdout, "%-20s%-7.3lf %-7.3lf %-7.2f%% %-7.3lf %-7.3lf %.2f%%\n", +- "1,33,4,9", e1, e2, 100*(e1-e2)/e1, e3, e4, 100*(e3-e4)/e3); +- +- uint32_t lens4[] = {16, 33, 64, 89}; +- e1 = BenchmarkChangeLenTmpl(TestFuncWasSparse(), seed, buf, lens4, 4); +- e2 = BenchmarkChangeLenTmpl(TestFuncIsSparse(), seed, buf, lens4, 4); +- e3 = BenchmarkChangeLenTmpl(TestFuncWasDense(), seed, buf, lens4, 4); +- e4 = BenchmarkChangeLenTmpl(TestFuncIsDense(), seed, buf, lens4, 4); +- fprintf(stdout, "%-20s%-7.3lf %-7.3lf %-7.2f%% %-7.3lf %-7.3lf %.2f%%\n", +- "16,33,64,89", e1, e2, 100*(e1-e2)/e1, e3, e4, 100*(e3-e4)/e3); +-} +- +-static void +-genRandomString(uint32_t min, uint32_t max, +- uint32_t num, vector& result) +-{ +- double scale = (max - min) / (RAND_MAX + 1.0); +- result.clear(); +- result.reserve(num); +- for (uint32_t i = 0; i < num; i++) { +- uint32_t len = (rand() * scale) + min; +- +- char* buf = new char[len]; +- for (uint32_t l = 0; l < len; l++) { +- buf[l] = rand() % 255; +- } +- result.push_back(string(buf, len)); +- delete[] buf; +- } +-} +- +-// Return the standard deviation of given array of number +-static double +-standarDeviation(const vector& v) +-{ +- uint64_t total = 0; +- for (vector::const_iterator i = v.begin(), e = v.end(); +- i != e; ++i) { +- total += *i; +- } +- +- double avg = total / (double)v.size(); +- double sd = 0; +- +- for (vector::const_iterator i = v.begin(), e = v.end(); +- i != e; ++i) { +- double t = avg - *i; +- sd = sd + t*t; +- } +- +- return sqrt(sd/v.size()); +-} +- +-static vector +-benchmarkConflictHelper(uint64_t seed, uint32_t bucketNum, +- const vector& strs) +-{ +- if (bucketNum & (bucketNum - 1)) { +- bucketNum = (1L << (log2_floor(bucketNum) + 1)); +- } +- uint32_t mask = bucketNum - 1; +- +- vector conflictWasSparse(bucketNum); +- vector conflictIsSparse(bucketNum); +- vector conflictWasDense(bucketNum); +- vector conflictIsDense(bucketNum); +- +- conflictWasSparse.resize(bucketNum); +- conflictIsSparse.resize(bucketNum); +- conflictWasDense.resize(bucketNum); +- conflictIsDense.resize(bucketNum); +- +- for (vector::const_iterator i = strs.begin(), e = strs.end(); +- i != e; ++i) { +- uint32_t h1 = original_hash_sparse(seed, i->c_str(), i->size()); +- uint32_t h2 = hash_sparse(seed, i->c_str(), i->size()); +- uint32_t h3 = original_hash_dense(seed, h1, i->c_str(), i->size()); +- uint32_t h4 = hash_dense(seed, h2, i->c_str(), i->size()); +- +- conflictWasSparse[h1 & mask]++; +- conflictIsSparse[h2 & mask]++; +- conflictWasDense[h3 & mask]++; +- conflictIsDense[h4 & mask]++; +- } +- +-#if 0 +- std::sort(conflictWas.begin(), conflictWas.end(), std::greater()); +- std::sort(conflictIs.begin(), conflictIs.end(), std::greater()); +- +- fprintf(stderr, "%d %d %d %d vs %d %d %d %d\n", +- conflictWas[0], conflictWas[1], conflictWas[2], conflictWas[3], +- conflictIs[0], conflictIs[1], conflictIs[2], conflictIs[3]); +-#endif +- vector ret(4); +- ret[0] = standarDeviation(conflictWasSparse); +- ret[1] = standarDeviation(conflictIsSparse); +- ret[2] = standarDeviation(conflictWasDense); +- ret[3] = standarDeviation(conflictIsDense); +- +- return ret; +-} +- +-static void +-benchmarkConflict(uint64_t seed) +-{ +- float loadFactor[] = { 0.5f, 1.0f, 2.0f, 4.0f, 8.0f }; +- int bucketNum[] = { 512, 1024, 2048, 4096, 8192, 16384}; +- int lenRange[][2] = { {1,3}, {4, 15}, {16, 127}, {128, 1024}, {1, 1024}}; +- +- fprintf(stdout, +- "\nBechmarking conflict (stand deviation of conflict)\n%s\n", +- separator); +- +- for (uint32_t k = 0; k < sizeof(lenRange)/sizeof(lenRange[0]); k++) { +- fprintf(stdout, "\nlen range from %d - %d\n", lenRange[k][0], +- lenRange[k][1]); +- fprintf(stdout, "%-10s %-12s %-10s %-10s diff (s) %-10s %-10s diff (d)\n%s\n", +- "bucket", "load-factor", "was (s)", "is (s)", "was (d)", "is (d)", +- separator); +- for (uint32_t i = 0; i < sizeof(bucketNum)/sizeof(bucketNum[0]); ++i) { +- for (uint32_t j = 0; +- j < sizeof(loadFactor)/sizeof(loadFactor[0]); +- ++j) { +- int strNum = bucketNum[i] * loadFactor[j]; +- vector strs(strNum); +- genRandomString(lenRange[k][0], lenRange[k][1], strNum, strs); +- +- vector p; +- p = benchmarkConflictHelper(seed, bucketNum[i], strs); +- fprintf(stdout, "%-10d %-12.2f %-10.2f %-10.2f %-10.2f %-10.2f %-10.2f %.2f\n", +- bucketNum[i], loadFactor[j], +- p[0], p[1], p[0] - p[1], +- p[2], p[3], p[2] - p[3]); +- } +- } +- } +-} +- +-static void +-benchmarkHashFunc() +-{ +- srand(time(0)); +- +- uint64_t seed = (uint32_t) rand(); +- char buf[4096]; +- char c = getpid() % 'a'; +- for (int i = 0; i < (int)sizeof(buf); i++) { +- buf[i] = (c + i) % 255; +- } +- +- benchmarkConflict(seed); +- benchmarkIndividual(seed, buf); +- benchmarkToggleLens(seed, buf); +-} +- +-int +-main(int argc, char** argv) +-{ +- fprintf(stdout, "========================\nMicro benchmark...\n"); +- benchmarkHashFunc(); +- return 0; +-} +diff --git a/bundle/LuaJIT-2.1-20210510/src/x64/test/test.cpp b/bundle/LuaJIT-2.1-20210510/src/x64/test/test.cpp +deleted file mode 100644 +index 75f34e9f..00000000 +--- a/bundle/LuaJIT-2.1-20210510/src/x64/test/test.cpp ++++ /dev/null +@@ -1,77 +0,0 @@ +-#include +-#include +-#include +-#include +-#define LUAJIT_SECURITY_STRHASH 1 +-#include "test_util.hpp" +-#include "lj_str_hash_x64.h" +- +-using namespace std; +- +-static bool +-smoke_test() +-{ +- fprintf(stdout, "running smoke tests...\n"); +- char buf[1024]; +- char c = getpid() % 'a'; +- srand(time(0)); +- +- for (int i = 0; i < (int)sizeof(buf); i++) { +- buf[i] = (c + i) % 255; +- } +- +- uint32_t lens[] = {3, 4, 5, 7, 8, 16, 17, 24, 25, 32, 33, 127, 128, +- 255, 256, 257}; +- for (unsigned i = 0; i < sizeof(lens)/sizeof(lens[0]); i++) { +- string s(buf, lens[i]); +- uint32_t h = hash_sparse(rand(), s.c_str(), lens[i]); +- test_printf("%d", h); +- test_printf("%d", hash_dense(rand(), h, s.c_str(), lens[i])); +- } +- +- return true; +-} +- +-static bool +-verify_log2() +-{ +- fprintf(stdout, "verify log2...\n"); +- bool err = false; +- std::map lm; +- lm[0] =(uint32_t)-1; +- lm[1] = 0; +- lm[2] = 1; +- for (int i = 2; i < 31; i++) { +- lm[(1<::iterator iter = lm.begin(), iter_e = lm.end(); +- iter != iter_e; ++iter) { +- uint32_t v = (*iter).first; +- uint32_t log2_expect = (*iter).second; +- uint32_t log2_get = log2_floor(v); +- if (log2_expect != log2_get) { +- err = true; +- fprintf(stderr, "log2(%u) expect %u, get %u\n", v, log2_expect, log2_get); +- exit(1); +- } +- } +- return !err; +-} +- +-int +-main(int argc, char** argv) +-{ +- fprintf(stdout, "=======================\nRun unit testing...\n"); +- +- ASSERT(smoke_test(), "smoke_test test failed"); +- ASSERT(verify_log2(), "log2 failed"); +- +- fprintf(stdout, TestErrMsgMgr::noError() ? "succ\n\n" : "fail\n\n"); +- +- return TestErrMsgMgr::noError() ? 0 : -1; +-} +diff --git a/bundle/LuaJIT-2.1-20210510/src/x64/test/test_str_comp.lua b/bundle/LuaJIT-2.1-20210510/src/x64/test/test_str_comp.lua +deleted file mode 100644 +index 3a5c3e67..00000000 +--- a/bundle/LuaJIT-2.1-20210510/src/x64/test/test_str_comp.lua ++++ /dev/null +@@ -1,67 +0,0 @@ +---[[ +- Given two content-idental string s1, s2, test if they end up to be the +- same string object. The purpose of this test is to make sure hash function +- do not accidently include extraneous bytes before and after the string in +- question. +-]] +- +-local ffi = require("ffi") +-local C = ffi.C +- +-ffi.cdef[[ +- void free(void*); +- char* malloc(size_t); +- void *memset(void*, int, size_t); +- void *memcpy(void*, void*, size_t); +- long time(void*); +- void srandom(unsigned); +- long random(void); +-]] +- +- +-local function test_equal(len_min, len_max) +- -- source string is wrapped by 16-byte-junk both before and after the +- -- string +- local x = C.random() +- local l = len_min + x % (len_max - len_min); +- local buf_len = tonumber(l + 16 * 2) +- +- local src_buf = C.malloc(buf_len) +- for i = 0, buf_len - 1 do +- src_buf[i] = C.random() % 255 +- end +- +- -- dest string is the clone of the source string, but it is sandwiched +- -- by different junk bytes +- local dest_buf = C.malloc(buf_len) +- C.memset(dest_buf, 0x5a, buf_len) +- +- local ofst = 8 + (C.random() % 8) +- C.memcpy(dest_buf + ofst, src_buf + 16, l); +- +- local str1 = ffi.string(src_buf + 16, l) +- local str2 = ffi.string(dest_buf + ofst, l) +- +- C.free(src_buf) +- C.free(dest_buf) +- +- if str1 ~= str2 then +- -- Oops, look like hash function mistakenly include extraneous bytes +- -- close to the string +- return 1 -- wtf +- end +-end +- +---local lens = {1, 4, 16, 128, 1024} +-local lens = {128, 1024} +-local iter = 1000 +- +-for i = 1, #lens - 1 do +- for j = 1, iter do +- if test_equal(lens[i], lens[i+1]) ~= nil then +- os.exit(1) +- end +- end +-end +- +-os.exit(0) +diff --git a/bundle/LuaJIT-2.1-20210510/src/x64/test/test_util.cxx b/bundle/LuaJIT-2.1-20210510/src/x64/test/test_util.cxx +deleted file mode 100644 +index 34b7d675..00000000 +--- a/bundle/LuaJIT-2.1-20210510/src/x64/test/test_util.cxx ++++ /dev/null +@@ -1,21 +0,0 @@ +-#include +-#include +-#include "test_util.hpp" +- +-using namespace std; +- +-std::vector TestErrMsgMgr::_errMsg; +- +-void +-test_printf(const char* format, ...) +-{ +- va_list args; +- va_start (args, format); +- +- FILE* devNull = fopen("/dev/null", "w"); +- if (devNull != 0) { +- (void)vfprintf (devNull, format, args); +- } +- fclose(devNull); +- va_end (args); +-} +diff --git a/bundle/LuaJIT-2.1-20210510/src/x64/test/test_util.d b/bundle/LuaJIT-2.1-20210510/src/x64/test/test_util.d +deleted file mode 100644 +index e539432e..00000000 +--- a/bundle/LuaJIT-2.1-20210510/src/x64/test/test_util.d ++++ /dev/null +@@ -1,107 +0,0 @@ +-test_util.o: test_util.cxx /usr/include/stdc-predef.h \ +- /usr/lib/gcc/x86_64-redhat-linux/10/include/stdarg.h \ +- /usr/include/stdio.h /usr/include/bits/libc-header-start.h \ +- /usr/include/features.h /usr/include/sys/cdefs.h \ +- /usr/include/bits/wordsize.h /usr/include/bits/long-double.h \ +- /usr/include/gnu/stubs.h /usr/include/gnu/stubs-64.h \ +- /usr/lib/gcc/x86_64-redhat-linux/10/include/stddef.h \ +- /usr/include/bits/types.h /usr/include/bits/timesize.h \ +- /usr/include/bits/typesizes.h /usr/include/bits/time64.h \ +- /usr/include/bits/types/__fpos_t.h /usr/include/bits/types/__mbstate_t.h \ +- /usr/include/bits/types/__fpos64_t.h /usr/include/bits/types/__FILE.h \ +- /usr/include/bits/types/FILE.h /usr/include/bits/types/struct_FILE.h \ +- /usr/include/bits/types/cookie_io_functions_t.h \ +- /usr/include/bits/stdio_lim.h /usr/include/bits/sys_errlist.h \ +- /usr/include/bits/stdio.h test_util.hpp /usr/include/sys/time.h \ +- /usr/include/bits/types/time_t.h \ +- /usr/include/bits/types/struct_timeval.h /usr/include/sys/select.h \ +- /usr/include/bits/select.h /usr/include/bits/types/sigset_t.h \ +- /usr/include/bits/types/__sigset_t.h \ +- /usr/include/bits/types/struct_timespec.h /usr/include/bits/endian.h \ +- /usr/include/bits/endianness.h /usr/include/c++/10/string \ +- /usr/include/c++/10/x86_64-redhat-linux/bits/c++config.h \ +- /usr/include/c++/10/x86_64-redhat-linux/bits/os_defines.h \ +- /usr/include/c++/10/x86_64-redhat-linux/bits/cpu_defines.h \ +- /usr/include/c++/10/bits/stringfwd.h \ +- /usr/include/c++/10/bits/memoryfwd.h \ +- /usr/include/c++/10/bits/char_traits.h \ +- /usr/include/c++/10/bits/stl_algobase.h \ +- /usr/include/c++/10/bits/functexcept.h \ +- /usr/include/c++/10/bits/exception_defines.h \ +- /usr/include/c++/10/bits/cpp_type_traits.h \ +- /usr/include/c++/10/ext/type_traits.h \ +- /usr/include/c++/10/ext/numeric_traits.h \ +- /usr/include/c++/10/bits/stl_pair.h /usr/include/c++/10/bits/move.h \ +- /usr/include/c++/10/type_traits \ +- /usr/include/c++/10/bits/stl_iterator_base_types.h \ +- /usr/include/c++/10/bits/stl_iterator_base_funcs.h \ +- /usr/include/c++/10/bits/concept_check.h \ +- /usr/include/c++/10/debug/assertions.h \ +- /usr/include/c++/10/bits/stl_iterator.h \ +- /usr/include/c++/10/bits/ptr_traits.h /usr/include/c++/10/debug/debug.h \ +- /usr/include/c++/10/bits/predefined_ops.h \ +- /usr/include/c++/10/bits/postypes.h /usr/include/c++/10/cwchar \ +- /usr/include/wchar.h /usr/include/bits/floatn.h \ +- /usr/include/bits/floatn-common.h /usr/include/bits/wchar.h \ +- /usr/include/bits/types/wint_t.h /usr/include/bits/types/mbstate_t.h \ +- /usr/include/bits/types/locale_t.h /usr/include/bits/types/__locale_t.h \ +- /usr/include/c++/10/cstdint \ +- /usr/lib/gcc/x86_64-redhat-linux/10/include/stdint.h \ +- /usr/include/stdint.h /usr/include/bits/stdint-intn.h \ +- /usr/include/bits/stdint-uintn.h /usr/include/c++/10/bits/allocator.h \ +- /usr/include/c++/10/x86_64-redhat-linux/bits/c++allocator.h \ +- /usr/include/c++/10/ext/new_allocator.h /usr/include/c++/10/new \ +- /usr/include/c++/10/exception /usr/include/c++/10/bits/exception.h \ +- /usr/include/c++/10/bits/exception_ptr.h \ +- /usr/include/c++/10/bits/cxxabi_init_exception.h \ +- /usr/include/c++/10/typeinfo /usr/include/c++/10/bits/hash_bytes.h \ +- /usr/include/c++/10/bits/nested_exception.h \ +- /usr/include/c++/10/bits/localefwd.h \ +- /usr/include/c++/10/x86_64-redhat-linux/bits/c++locale.h \ +- /usr/include/c++/10/clocale /usr/include/locale.h \ +- /usr/include/bits/locale.h /usr/include/c++/10/iosfwd \ +- /usr/include/c++/10/cctype /usr/include/ctype.h \ +- /usr/include/c++/10/bits/ostream_insert.h \ +- /usr/include/c++/10/bits/cxxabi_forced.h \ +- /usr/include/c++/10/bits/stl_function.h \ +- /usr/include/c++/10/backward/binders.h \ +- /usr/include/c++/10/bits/range_access.h \ +- /usr/include/c++/10/initializer_list \ +- /usr/include/c++/10/bits/iterator_concepts.h \ +- /usr/include/c++/10/concepts /usr/include/c++/10/bits/range_cmp.h \ +- /usr/include/c++/10/bits/int_limits.h \ +- /usr/include/c++/10/bits/basic_string.h \ +- /usr/include/c++/10/ext/atomicity.h \ +- /usr/include/c++/10/x86_64-redhat-linux/bits/gthr.h \ +- /usr/include/c++/10/x86_64-redhat-linux/bits/gthr-default.h \ +- /usr/include/pthread.h /usr/include/sched.h /usr/include/bits/sched.h \ +- /usr/include/bits/types/struct_sched_param.h /usr/include/bits/cpu-set.h \ +- /usr/include/time.h /usr/include/bits/time.h /usr/include/bits/timex.h \ +- /usr/include/bits/types/clock_t.h /usr/include/bits/types/struct_tm.h \ +- /usr/include/bits/types/clockid_t.h /usr/include/bits/types/timer_t.h \ +- /usr/include/bits/types/struct_itimerspec.h \ +- /usr/include/bits/pthreadtypes.h /usr/include/bits/thread-shared-types.h \ +- /usr/include/bits/pthreadtypes-arch.h /usr/include/bits/struct_mutex.h \ +- /usr/include/bits/struct_rwlock.h /usr/include/bits/setjmp.h \ +- /usr/include/c++/10/x86_64-redhat-linux/bits/atomic_word.h \ +- /usr/include/c++/10/ext/alloc_traits.h \ +- /usr/include/c++/10/bits/alloc_traits.h \ +- /usr/include/c++/10/bits/stl_construct.h \ +- /usr/include/c++/10/ext/string_conversions.h /usr/include/c++/10/cstdlib \ +- /usr/include/stdlib.h /usr/include/bits/waitflags.h \ +- /usr/include/bits/waitstatus.h /usr/include/sys/types.h \ +- /usr/include/endian.h /usr/include/bits/byteswap.h \ +- /usr/include/bits/uintn-identity.h /usr/include/alloca.h \ +- /usr/include/bits/stdlib-bsearch.h /usr/include/bits/stdlib-float.h \ +- /usr/include/c++/10/bits/std_abs.h /usr/include/c++/10/cstdio \ +- /usr/include/c++/10/cerrno /usr/include/errno.h \ +- /usr/include/bits/errno.h /usr/include/linux/errno.h \ +- /usr/include/asm/errno.h /usr/include/asm-generic/errno.h \ +- /usr/include/asm-generic/errno-base.h /usr/include/bits/types/error_t.h \ +- /usr/include/c++/10/bits/charconv.h \ +- /usr/include/c++/10/bits/functional_hash.h \ +- /usr/include/c++/10/bits/basic_string.tcc /usr/include/c++/10/vector \ +- /usr/include/c++/10/bits/stl_uninitialized.h \ +- /usr/include/c++/10/bits/stl_vector.h \ +- /usr/include/c++/10/bits/stl_bvector.h \ +- /usr/include/c++/10/bits/vector.tcc +diff --git a/bundle/LuaJIT-2.1-20210510/src/x64/test/test_util.hpp b/bundle/LuaJIT-2.1-20210510/src/x64/test/test_util.hpp +deleted file mode 100644 +index 6cc2ea2c..00000000 +--- a/bundle/LuaJIT-2.1-20210510/src/x64/test/test_util.hpp ++++ /dev/null +@@ -1,57 +0,0 @@ +-#ifndef _TEST_UTIL_HPP_ +-#define _TEST_UTIL_HPP_ +- +-#include // gettimeofday() +-#include +-#include +- +-struct TestErrMsg +-{ +- const char* fileName; +- unsigned lineNo; +- std::string errMsg; +- +- TestErrMsg(const char* FN, unsigned LN, const char* Err): +- fileName(FN), lineNo(LN), errMsg(Err) {} +-}; +- +-class TestErrMsgMgr +-{ +-public: +- static std::vector getError(); +- static void +- addError(const char* fileName, unsigned lineNo, const char* Err) { +- _errMsg.push_back(TestErrMsg(fileName, lineNo, Err)); +- } +- +- static bool noError() { +- return _errMsg.empty(); +- } +- +-private: +- static std::vector _errMsg; +-}; +- +-#define ASSERT(c, e) \ +- if (!(c)) { TestErrMsgMgr::addError(__FILE__, __LINE__, (e)); } +- +-class TestClock +-{ +-public: +- void start() { gettimeofday(&_start, 0); } +- void stop() { gettimeofday(&_end, 0); } +- double getElapseInSecond() { +- return (_end.tv_sec - _start.tv_sec) +- + ((long)_end.tv_usec - (long)_start.tv_usec) / 1000000.0; +- } +- +-private: +- struct timeval _start, _end; +-}; +- +-// write to /dev/null, the only purpose is to make the data fed to the +-// function alive. +-extern void test_printf(const char* format, ...) +- __attribute__ ((format (printf, 1, 2))); +- +-#endif //_TEST_UTIL_HPP_ +-- +2.43.0 + diff --git a/build/openresty/patches/lua-cjson-2.1.0.8_01-empty_array.patch b/build/openresty/patches/lua-cjson-2.1.0.8_01-empty_array.patch new file mode 100644 index 00000000000..01e413909d1 --- /dev/null +++ b/build/openresty/patches/lua-cjson-2.1.0.8_01-empty_array.patch @@ -0,0 +1,12 @@ +diff -ruN a/lua-cjson-2.1.0.8/lua_cjson.c b/lua-cjson-2.1.0.8/lua_cjson.c +--- a/bundle/lua-cjson-2.1.0.8/lua_cjson.c 2022-01-11 15:11:17.495464192 +0800 ++++ b/bundle/lua-cjson-2.1.0.8/lua_cjson.c 2022-01-11 14:58:55.150669748 +0800 +@@ -796,7 +796,7 @@ + case LUA_TLIGHTUSERDATA: + if (lua_touserdata(l, -1) == NULL) { + strbuf_append_mem(json, "null", 4); +- } else if (lua_touserdata(l, -1) == &json_array) { ++ } else if (lua_touserdata(l, -1) == json_lightudata_mask(&json_array)) { + json_append_array(l, cfg, current_depth, json, 0); + } + break; diff --git a/build/openresty/patches/lua-resty-core-0.1.22_01-cosocket-mtls.patch b/build/openresty/patches/lua-resty-core-0.1.22_01-cosocket-mtls.patch new file mode 100644 index 00000000000..20470b087f5 --- /dev/null +++ b/build/openresty/patches/lua-resty-core-0.1.22_01-cosocket-mtls.patch @@ -0,0 +1,566 @@ +From 4f0f4bf63d23a952179aaf810c10dfffc19ee835 Mon Sep 17 00:00:00 2001 +From: chronolaw +Date: Fri, 28 Jan 2022 20:54:30 +0800 +Subject: [PATCH 1/9] move tcp.lua into socket.lua + +--- + lib/resty/core/socket.lua | 136 +++++++++++++++++++++++++++++++++++++- + 1 file changed, 133 insertions(+), 3 deletions(-) + +diff --git a/lua-resty-core-0.1.22/lib/resty/core/socket.lua b/lua-resty-core-0.1.22/lib/resty/core/socket.lua +index 1a504ec..cc0081e 100644 +--- a/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua ++++ b/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua +@@ -6,13 +6,21 @@ local ffi = require 'ffi' + + local error = error + local tonumber = tonumber ++local tostring = tostring ++local type = type ++local select = select + local registry = debug.getregistry() ++ ++local C = ffi.C + local ffi_new = ffi.new + local ffi_string = ffi.string +-local C = ffi.C ++local ffi_gc = ffi.gc ++ + local get_string_buf = base.get_string_buf + local get_size_ptr = base.get_size_ptr +-local tostring = tostring ++local get_request = base.get_request ++ ++local co_yield = coroutine._yield + + + local option_index = { +@@ -35,15 +43,29 @@ ngx_http_lua_ffi_socket_tcp_getoption(ngx_http_lua_socket_tcp_upstream_t *u, + int + ngx_http_lua_ffi_socket_tcp_setoption(ngx_http_lua_socket_tcp_upstream_t *u, + int opt, int val, unsigned char *err, size_t *errlen); ++ ++int ngx_http_lua_ffi_socket_tcp_sslhandshake(ngx_http_request_t *r, ++ ngx_http_lua_socket_tcp_upstream_t *u, void *sess, ++ int enable_session_reuse, ngx_str_t *server_name, int verify, ++ int ocsp_status_req, void *chain, void *pkey, char **errmsg); ++ ++int ngx_http_lua_ffi_socket_tcp_get_sslhandshake_result(ngx_http_request_t *r, ++ ngx_http_lua_socket_tcp_upstream_t *u, void **sess, char **errmsg, ++ int *openssl_error_code); ++ ++void ngx_http_lua_ffi_ssl_free_session(void *sess); + ]] + + + local output_value_buf = ffi_new("int[1]") + local FFI_OK = base.FFI_OK ++local FFI_ERROR = base.FFI_ERROR ++local FFI_DONE = base.FFI_DONE ++local FFI_AGAIN = base.FFI_AGAIN ++local FFI_NO_REQ_CTX = base.FFI_NO_REQ_CTX + local SOCKET_CTX_INDEX = 1 + local ERR_BUF_SIZE = 4096 + +- + local function get_tcp_socket(cosocket) + local tcp_socket = cosocket[SOCKET_CTX_INDEX] + if not tcp_socket then +@@ -114,10 +136,118 @@ local function setoption(cosocket, option, value) + end + + ++local errmsg = base.get_errmsg_ptr() ++local session_ptr = ffi_new("void *[1]") ++local server_name_str = ffi_new("ngx_str_t[1]") ++local openssl_error_code = ffi_new("int[1]") ++ ++ ++local function setclientcert(self, cert, pkey) ++ if not cert and not pkey then ++ self.client_cert = nil ++ self.client_pkey = nil ++ return ++ end ++ ++ if not cert or not pkey then ++ error("client certificate must be supplied with corresponding " .. ++ "private key", 2) ++ end ++ ++ if type(cert) ~= "cdata" then ++ error("bad client cert type", 2) ++ end ++ ++ if type(pkey) ~= "cdata" then ++ error("bad client pkey type", 2) ++ end ++ ++ self.client_cert = cert ++ self.client_pkey = pkey ++end ++ ++ ++local function sslhandshake(self, reused_session, server_name, ssl_verify, ++ send_status_req, ...) ++ ++ local n = select("#", ...) ++ if not self or n > 1 then ++ error("ngx.socket sslhandshake: expecting 1 ~ 5 arguments " .. ++ "(including the object), but seen " .. (5 + n)) ++ end ++ ++ local r = get_request() ++ if not r then ++ error("no request found", 2) ++ end ++ ++ session_ptr[0] = type(reused_session) == "cdata" and reused_session or nil ++ ++ if server_name then ++ server_name_str[0].data = server_name ++ server_name_str[0].len = #server_name ++ ++ else ++ server_name_str[0].data = nil ++ server_name_str[0].len = 0 ++ end ++ ++ local u = self[SOCKET_CTX_INDEX] ++ ++ local rc = C.ngx_http_lua_ffi_socket_tcp_sslhandshake(r, u, ++ session_ptr[0], ++ reused_session ~= false, ++ server_name_str, ++ ssl_verify and 1 or 0, ++ send_status_req and 1 or 0, ++ self.client_cert, self.client_pkey, errmsg) ++ ++ if rc == FFI_NO_REQ_CTX then ++ error("no request ctx found", 2) ++ end ++ ++ while true do ++ if rc == FFI_ERROR then ++ if openssl_error_code[0] ~= 0 then ++ return nil, openssl_error_code[0] .. ": " .. ffi_string(errmsg[0]) ++ end ++ ++ return nil, ffi_string(errmsg[0]) ++ end ++ ++ if rc == FFI_DONE then ++ return reused_session ++ end ++ ++ if rc == FFI_OK then ++ if reused_session == false then ++ return true ++ end ++ ++ rc = C.ngx_http_lua_ffi_socket_tcp_get_sslhandshake_result(r, u, ++ session_ptr, errmsg, openssl_error_code) ++ ++ if session_ptr[0] == nil then ++ return nil ++ end ++ ++ return ffi_gc(session_ptr[0], C.ngx_http_lua_ffi_ssl_free_session) ++ end ++ ++ co_yield() ++ ++ rc = C.ngx_http_lua_ffi_socket_tcp_get_sslhandshake_result(r, u, ++ session_ptr, errmsg, openssl_error_code) ++ end ++end ++ ++ + do + local method_table = registry.__tcp_cosocket_mt + method_table.getoption = getoption + method_table.setoption = setoption ++ method_table.setclientcert = setclientcert ++ method_table.sslhandshake = sslhandshake + end + + +-- +2.32.0 (Apple Git-132) + + +From 4eab5793d741c739d9c5cfe14e0671c1d70fd6e5 Mon Sep 17 00:00:00 2001 +From: chronolaw +Date: Fri, 28 Jan 2022 21:37:45 +0800 +Subject: [PATCH 2/9] revert assert in sslhandshake + +--- + lib/resty/core/socket.lua | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/lua-resty-core-0.1.22/lib/resty/core/socket.lua b/lua-resty-core-0.1.22/lib/resty/core/socket.lua +index cc0081e..7c61d06 100644 +--- a/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua ++++ b/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua +@@ -5,6 +5,7 @@ local ffi = require 'ffi' + + + local error = error ++local assert = assert + local tonumber = tonumber + local tostring = tostring + local type = type +@@ -227,6 +228,8 @@ local function sslhandshake(self, reused_session, server_name, ssl_verify, + rc = C.ngx_http_lua_ffi_socket_tcp_get_sslhandshake_result(r, u, + session_ptr, errmsg, openssl_error_code) + ++ assert(rc == FFI_OK) ++ + if session_ptr[0] == nil then + return nil + end +@@ -234,6 +237,8 @@ local function sslhandshake(self, reused_session, server_name, ssl_verify, + return ffi_gc(session_ptr[0], C.ngx_http_lua_ffi_ssl_free_session) + end + ++ assert(rc == FFI_AGAIN) ++ + co_yield() + + rc = C.ngx_http_lua_ffi_socket_tcp_get_sslhandshake_result(r, u, +-- +2.32.0 (Apple Git-132) + + +From 58de9a44c89f07eda98bb7fd978a9e04a244d2f2 Mon Sep 17 00:00:00 2001 +From: chronolaw +Date: Fri, 28 Jan 2022 21:45:42 +0800 +Subject: [PATCH 3/9] rename ffi_string to ffi_str + +--- + lib/resty/core/socket.lua | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/lua-resty-core-0.1.22/lib/resty/core/socket.lua b/lua-resty-core-0.1.22/lib/resty/core/socket.lua +index 7c61d06..14457da 100644 +--- a/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua ++++ b/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua +@@ -14,7 +14,7 @@ local registry = debug.getregistry() + + local C = ffi.C + local ffi_new = ffi.new +-local ffi_string = ffi.string ++local ffi_str = ffi.string + local ffi_gc = ffi.gc + + local get_string_buf = base.get_string_buf +@@ -98,7 +98,7 @@ local function getoption(cosocket, option) + err, + errlen) + if rc ~= FFI_OK then +- return nil, ffi_string(err, errlen[0]) ++ return nil, ffi_str(err, errlen[0]) + end + + return tonumber(output_value_buf[0]) +@@ -130,7 +130,7 @@ local function setoption(cosocket, option, value) + err, + errlen) + if rc ~= FFI_OK then +- return nil, ffi_string(err, errlen[0]) ++ return nil, ffi_str(err, errlen[0]) + end + + return true +@@ -210,10 +210,10 @@ local function sslhandshake(self, reused_session, server_name, ssl_verify, + while true do + if rc == FFI_ERROR then + if openssl_error_code[0] ~= 0 then +- return nil, openssl_error_code[0] .. ": " .. ffi_string(errmsg[0]) ++ return nil, openssl_error_code[0] .. ": " .. ffi_str(errmsg[0]) + end + +- return nil, ffi_string(errmsg[0]) ++ return nil, ffi_str(errmsg[0]) + end + + if rc == FFI_DONE then +-- +2.32.0 (Apple Git-132) + + +From ff138619432bda6b9bd4f37403c12600a4739e47 Mon Sep 17 00:00:00 2001 +From: chronolaw +Date: Sat, 29 Jan 2022 07:23:16 +0800 +Subject: [PATCH 4/9] minor style fix + +--- + lib/resty/core/socket.lua | 15 +++++++++------ + 1 file changed, 9 insertions(+), 6 deletions(-) + +diff --git a/lua-resty-core-0.1.22/lib/resty/core/socket.lua b/lua-resty-core-0.1.22/lib/resty/core/socket.lua +index 14457da..3c882af 100644 +--- a/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua ++++ b/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua +@@ -1,7 +1,7 @@ + local base = require "resty.core.base" +-base.allows_subsystem('http') +-local debug = require 'debug' +-local ffi = require 'ffi' ++base.allows_subsystem("http") ++local debug = require "debug" ++local ffi = require "ffi" + + + local error = error +@@ -45,16 +45,19 @@ int + ngx_http_lua_ffi_socket_tcp_setoption(ngx_http_lua_socket_tcp_upstream_t *u, + int opt, int val, unsigned char *err, size_t *errlen); + +-int ngx_http_lua_ffi_socket_tcp_sslhandshake(ngx_http_request_t *r, ++int ++ngx_http_lua_ffi_socket_tcp_sslhandshake(ngx_http_request_t *r, + ngx_http_lua_socket_tcp_upstream_t *u, void *sess, + int enable_session_reuse, ngx_str_t *server_name, int verify, + int ocsp_status_req, void *chain, void *pkey, char **errmsg); + +-int ngx_http_lua_ffi_socket_tcp_get_sslhandshake_result(ngx_http_request_t *r, ++int ++ngx_http_lua_ffi_socket_tcp_get_sslhandshake_result(ngx_http_request_t *r, + ngx_http_lua_socket_tcp_upstream_t *u, void **sess, char **errmsg, + int *openssl_error_code); + +-void ngx_http_lua_ffi_ssl_free_session(void *sess); ++void ++ngx_http_lua_ffi_ssl_free_session(void *sess); + ]] + + +-- +2.32.0 (Apple Git-132) + + +From a843a258987efba49f0b6979389f75ee32c2150c Mon Sep 17 00:00:00 2001 +From: chronolaw +Date: Sat, 29 Jan 2022 07:28:41 +0800 +Subject: [PATCH 5/9] rename self to cosocket + +--- + lib/resty/core/socket.lua | 18 +++++++++--------- + 1 file changed, 9 insertions(+), 9 deletions(-) + +diff --git a/lua-resty-core-0.1.22/lib/resty/core/socket.lua b/lua-resty-core-0.1.22/lib/resty/core/socket.lua +index 3c882af..374d583 100644 +--- a/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua ++++ b/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua +@@ -146,10 +146,10 @@ local server_name_str = ffi_new("ngx_str_t[1]") + local openssl_error_code = ffi_new("int[1]") + + +-local function setclientcert(self, cert, pkey) ++local function setclientcert(cosocket, cert, pkey) + if not cert and not pkey then +- self.client_cert = nil +- self.client_pkey = nil ++ cosocket.client_cert = nil ++ cosocket.client_pkey = nil + return + end + +@@ -166,16 +166,16 @@ local function setclientcert(self, cert, pkey) + error("bad client pkey type", 2) + end + +- self.client_cert = cert +- self.client_pkey = pkey ++ cosocket.client_cert = cert ++ cosocket.client_pkey = pkey + end + + +-local function sslhandshake(self, reused_session, server_name, ssl_verify, ++local function sslhandshake(cosocket, reused_session, server_name, ssl_verify, + send_status_req, ...) + + local n = select("#", ...) +- if not self or n > 1 then ++ if not cosocket or n > 1 then + error("ngx.socket sslhandshake: expecting 1 ~ 5 arguments " .. + "(including the object), but seen " .. (5 + n)) + end +@@ -196,7 +196,7 @@ local function sslhandshake(self, reused_session, server_name, ssl_verify, + server_name_str[0].len = 0 + end + +- local u = self[SOCKET_CTX_INDEX] ++ local u = cosocket[SOCKET_CTX_INDEX] + + local rc = C.ngx_http_lua_ffi_socket_tcp_sslhandshake(r, u, + session_ptr[0], +@@ -204,7 +204,7 @@ local function sslhandshake(self, reused_session, server_name, ssl_verify, + server_name_str, + ssl_verify and 1 or 0, + send_status_req and 1 or 0, +- self.client_cert, self.client_pkey, errmsg) ++ cosocket.client_cert, cosocket.client_pkey, errmsg) + + if rc == FFI_NO_REQ_CTX then + error("no request ctx found", 2) +-- +2.32.0 (Apple Git-132) + + +From db95a049a019ff6f0d3b4e550412e40c25dda41f Mon Sep 17 00:00:00 2001 +From: chronolaw +Date: Sat, 29 Jan 2022 07:35:04 +0800 +Subject: [PATCH 6/9] use get_tcp_socket() in sslhandshake + +--- + lib/resty/core/socket.lua | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lua-resty-core-0.1.22/lib/resty/core/socket.lua b/lua-resty-core-0.1.22/lib/resty/core/socket.lua +index 374d583..ecff453 100644 +--- a/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua ++++ b/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua +@@ -196,7 +196,7 @@ local function sslhandshake(cosocket, reused_session, server_name, ssl_verify, + server_name_str[0].len = 0 + end + +- local u = cosocket[SOCKET_CTX_INDEX] ++ local u = get_tcp_socket(cosocket) + + local rc = C.ngx_http_lua_ffi_socket_tcp_sslhandshake(r, u, + session_ptr[0], +-- +2.32.0 (Apple Git-132) + + +From 6767f0c2e8a73fd1a09d727431bed457c5cac4c0 Mon Sep 17 00:00:00 2001 +From: chronolaw +Date: Sat, 29 Jan 2022 08:58:52 +0800 +Subject: [PATCH 7/9] fix arguments check in sslhandshake + +--- + lib/resty/core/socket.lua | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lua-resty-core-0.1.22/lib/resty/core/socket.lua b/lua-resty-core-0.1.22/lib/resty/core/socket.lua +index ecff453..15e3065 100644 +--- a/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua ++++ b/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua +@@ -177,7 +177,7 @@ local function sslhandshake(cosocket, reused_session, server_name, ssl_verify, + local n = select("#", ...) + if not cosocket or n > 1 then + error("ngx.socket sslhandshake: expecting 1 ~ 5 arguments " .. +- "(including the object), but seen " .. (5 + n)) ++ "(including the object), but seen " .. (cosocket and 5 + n or 0)) + end + + local r = get_request() +-- +2.32.0 (Apple Git-132) + + +From 4eeddcd2114d0097e4b9cb11f2f93d30c70d573e Mon Sep 17 00:00:00 2001 +From: chronolaw +Date: Mon, 7 Feb 2022 10:59:35 +0800 +Subject: [PATCH 8/9] setclientcert return err + +--- + lib/resty/core/socket.lua | 13 ++++++++----- + 1 file changed, 8 insertions(+), 5 deletions(-) + +diff --git a/lua-resty-core-0.1.22/lib/resty/core/socket.lua b/lua-resty-core-0.1.22/lib/resty/core/socket.lua +index 15e3065..879d678 100644 +--- a/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua ++++ b/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua +@@ -150,24 +150,27 @@ local function setclientcert(cosocket, cert, pkey) + if not cert and not pkey then + cosocket.client_cert = nil + cosocket.client_pkey = nil +- return ++ return true + end + + if not cert or not pkey then +- error("client certificate must be supplied with corresponding " .. +- "private key", 2) ++ return nil, ++ "client certificate must be supplied with corresponding " .. ++ "private key" + end + + if type(cert) ~= "cdata" then +- error("bad client cert type", 2) ++ return nil, "bad client cert type" + end + + if type(pkey) ~= "cdata" then +- error("bad client pkey type", 2) ++ return nil, "bad client pkey type" + end + + cosocket.client_cert = cert + cosocket.client_pkey = pkey ++ ++ return true + end + + +-- +2.32.0 (Apple Git-132) + + +From fead2a28f409117ad1b6c98d02edb6a38a64fde0 Mon Sep 17 00:00:00 2001 +From: James Hurst +Date: Wed, 9 Feb 2022 16:05:11 +0000 +Subject: [PATCH 9/9] fix(socket) add temporary backwards compatability for + tlshandshake + +--- + lib/resty/core/socket.lua | 22 ++++++++++++++++++++++ + 1 file changed, 22 insertions(+) + +diff --git a/lua-resty-core-0.1.22/lib/resty/core/socket.lua b/lua-resty-core-0.1.22/lib/resty/core/socket.lua +index 879d678..448bf36 100644 +--- a/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua ++++ b/bundle/lua-resty-core-0.1.22/lib/resty/core/socket.lua +@@ -253,12 +253,34 @@ local function sslhandshake(cosocket, reused_session, server_name, ssl_verify, + end + + ++-- Temporary patch for backwards compatablity with existing Kong tech debt ++local function tlshandshake(cosocket, options) ++ local options = options or {} ++ ++ if options.client_cert then ++ local ok, err = cosocket:setclientcert(options.client_cert, options.client_priv_key) ++ if not ok then ++ return nil, err ++ end ++ end ++ ++ return sslhandshake( ++ cosocket, ++ options.reused_session, ++ options.server_name, ++ options.ssl_verify, ++ options.ocsp_status_req ++ ) ++end ++ ++ + do + local method_table = registry.__tcp_cosocket_mt + method_table.getoption = getoption + method_table.setoption = setoption + method_table.setclientcert = setclientcert + method_table.sslhandshake = sslhandshake ++ method_table.tlshandshake = tlshandshake + end + + +-- +2.32.0 (Apple Git-132) + diff --git a/build/openresty/patches/lua-resty-core-0.1.22_02-dyn_upstream_keepalive.patch b/build/openresty/patches/lua-resty-core-0.1.22_02-dyn_upstream_keepalive.patch new file mode 100644 index 00000000000..f1663e178fc --- /dev/null +++ b/build/openresty/patches/lua-resty-core-0.1.22_02-dyn_upstream_keepalive.patch @@ -0,0 +1,230 @@ +From 37feb95041f183ae4fbafeebc47dc104995e6f27 Mon Sep 17 00:00:00 2001 +From: Thibault Charbonnier +Date: Tue, 17 Sep 2019 11:44:33 -0700 +Subject: [PATCH] feature: implemented the 'balancer.enable_keepalive()' API. + +--- + lua-resty-core-0.1.22/lib/ngx/balancer.lua | 165 +++++++++++++++++++++++++++++++++++++++---- + 1 file changed, 151 insertions(+), 14 deletions(-) + +diff --git a/lua-resty-core-0.1.22/lib/ngx/balancer.lua b/lua-resty-core-0.1.22/lib/ngx/balancer.lua +index d584639..614312f 100644 +--- a/bundle/lua-resty-core-0.1.22/lib/ngx/balancer.lua ++++ b/bundle/lua-resty-core-0.1.22/lib/ngx/balancer.lua +@@ -3,6 +3,7 @@ + + local base = require "resty.core.base" + base.allows_subsystem('http', 'stream') ++require "resty.core.hash" + + + local ffi = require "ffi" +@@ -17,8 +18,10 @@ local error = error + local type = type + local tonumber = tonumber + local max = math.max ++local ngx_crc32_long = ngx.crc32_long + local subsystem = ngx.config.subsystem + local ngx_lua_ffi_balancer_set_current_peer ++local ngx_lua_ffi_balancer_enable_keepalive + local ngx_lua_ffi_balancer_set_more_tries + local ngx_lua_ffi_balancer_get_last_failure + local ngx_lua_ffi_balancer_set_timeouts -- used by both stream and http +@@ -27,7 +30,11 @@ local ngx_lua_ffi_balancer_set_timeouts -- used by both stream and http + if subsystem == 'http' then + ffi.cdef[[ + int ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, +- const unsigned char *addr, size_t addr_len, int port, char **err); ++ const unsigned char *addr, size_t addr_len, int port, ++ unsigned int cpool_crc32, unsigned int cpool_size, char **err); ++ ++ int ngx_http_lua_ffi_balancer_enable_keepalive(ngx_http_request_t *r, ++ unsigned long timeout, unsigned int max_requests, char **err); + + int ngx_http_lua_ffi_balancer_set_more_tries(ngx_http_request_t *r, + int count, char **err); +@@ -46,6 +53,9 @@ if subsystem == 'http' then + ngx_lua_ffi_balancer_set_current_peer = + C.ngx_http_lua_ffi_balancer_set_current_peer + ++ ngx_lua_ffi_balancer_enable_keepalive = ++ C.ngx_http_lua_ffi_balancer_enable_keepalive ++ + ngx_lua_ffi_balancer_set_more_tries = + C.ngx_http_lua_ffi_balancer_set_more_tries + +@@ -96,6 +106,11 @@ else + end + + ++local DEFAULT_KEEPALIVE_POOL_SIZE = 30 ++local DEFAULT_KEEPALIVE_IDLE_TIMEOUT = 60000 ++local DEFAULT_KEEPALIVE_MAX_REQUESTS = 100 ++ ++ + local peer_state_names = { + [1] = "keepalive", + [2] = "next", +@@ -106,25 +121,147 @@ local peer_state_names = { + local _M = { version = base.version } + + +-function _M.set_current_peer(addr, port) +- local r = get_request() +- if not r then +- error("no request found") ++if subsystem == "http" then ++ function _M.set_current_peer(addr, port, opts) ++ local r = get_request() ++ if not r then ++ error("no request found") ++ end ++ ++ local pool_crc32 ++ local pool_size ++ ++ if opts then ++ if type(opts) ~= "table" then ++ error("bad argument #3 to 'set_current_peer' " .. ++ "(table expected, got " .. type(opts) .. ")", 2) ++ end ++ ++ local pool = opts.pool ++ pool_size = opts.pool_size ++ ++ if pool then ++ if type(pool) ~= "string" then ++ error("bad option 'pool' to 'set_current_peer' " .. ++ "(string expected, got " .. type(pool) .. ")", 2) ++ end ++ ++ pool_crc32 = ngx_crc32_long(pool) ++ end ++ ++ if pool_size then ++ if type(pool_size) ~= "number" then ++ error("bad option 'pool_size' to 'set_current_peer' " .. ++ "(number expected, got " .. type(pool_size) .. ")", 2) ++ ++ elseif pool_size < 1 then ++ error("bad option 'pool_size' to 'set_current_peer' " .. ++ "(expected > 0)", 2) ++ end ++ end ++ end ++ ++ if not port then ++ port = 0 ++ ++ elseif type(port) ~= "number" then ++ port = tonumber(port) ++ end ++ ++ if not pool_crc32 then ++ pool_crc32 = 0 ++ end ++ ++ if not pool_size then ++ pool_size = DEFAULT_KEEPALIVE_POOL_SIZE ++ end ++ ++ local rc = ngx_lua_ffi_balancer_set_current_peer(r, addr, #addr, port, ++ pool_crc32, pool_size, ++ errmsg) ++ if rc == FFI_OK then ++ return true ++ end ++ ++ return nil, ffi_str(errmsg[0]) + end + +- if not port then +- port = 0 +- elseif type(port) ~= "number" then +- port = tonumber(port) ++else ++ function _M.set_current_peer(addr, port, opts) ++ local r = get_request() ++ if not r then ++ error("no request found") ++ end ++ ++ if opts then ++ error("bad argument #3 to 'set_current_peer' ('opts' not yet " .. ++ "implemented in " .. subsystem .. " subsystem)", 2) ++ end ++ ++ if not port then ++ port = 0 ++ ++ elseif type(port) ~= "number" then ++ port = tonumber(port) ++ end ++ ++ local rc = ngx_lua_ffi_balancer_set_current_peer(r, addr, #addr, ++ port, errmsg) ++ if rc == FFI_OK then ++ return true ++ end ++ ++ return nil, ffi_str(errmsg[0]) + end ++end + +- local rc = ngx_lua_ffi_balancer_set_current_peer(r, addr, #addr, +- port, errmsg) +- if rc == FFI_OK then +- return true ++ ++if subsystem == "http" then ++ function _M.enable_keepalive(idle_timeout, max_requests) ++ local r = get_request() ++ if not r then ++ error("no request found") ++ end ++ ++ if not idle_timeout then ++ idle_timeout = DEFAULT_KEEPALIVE_IDLE_TIMEOUT ++ ++ elseif type(idle_timeout) ~= "number" then ++ error("bad argument #1 to 'enable_keepalive' " .. ++ "(number expected, got " .. type(idle_timeout) .. ")", 2) ++ ++ elseif idle_timeout < 0 then ++ error("bad argument #1 to 'enable_keepalive' (expected >= 0)", 2) ++ ++ else ++ idle_timeout = idle_timeout * 1000 ++ end ++ ++ if not max_requests then ++ max_requests = DEFAULT_KEEPALIVE_MAX_REQUESTS ++ ++ elseif type(max_requests) ~= "number" then ++ error("bad argument #2 to 'enable_keepalive' " .. ++ "(number expected, got " .. type(max_requests) .. ")", 2) ++ ++ elseif max_requests < 0 then ++ error("bad argument #2 to 'enable_keepalive' (expected >= 0)", 2) ++ end ++ ++ local rc = ngx_lua_ffi_balancer_enable_keepalive(r, idle_timeout, ++ max_requests, errmsg) ++ if rc == FFI_OK then ++ return true ++ end ++ ++ return nil, ffi_str(errmsg[0]) + end + +- return nil, ffi_str(errmsg[0]) ++else ++ function _M.enable_keepalive() ++ error("'enable_keepalive' not yet implemented in " .. subsystem .. ++ " subsystem", 2) ++ end + end + + +-- +2.25.2 diff --git a/build/openresty/patches/lua-resty-core-0.1.22_03-make-resty.core.shdict-compatible-with-m1.patch b/build/openresty/patches/lua-resty-core-0.1.22_03-make-resty.core.shdict-compatible-with-m1.patch new file mode 100644 index 00000000000..d394d5b651e --- /dev/null +++ b/build/openresty/patches/lua-resty-core-0.1.22_03-make-resty.core.shdict-compatible-with-m1.patch @@ -0,0 +1,270 @@ +From 85202b4306db143de55926564bf6ce981f3631b4 Mon Sep 17 00:00:00 2001 +From: Aapo Talvensaari +Date: Thu, 16 Dec 2021 19:28:43 +0200 +Subject: [PATCH] fix(shdict) make resty.core.shdict compatible with m1 (using + wrappers) + +--- + lua-resty-core-0.1.22/lib/resty/core/shdict.lua | 174 ++++++++++++++++++++++++++++++++++++++ + 1 file changed, 174 insertions(+) + +diff --git a/lua-resty-core-0.1.22/lib/resty/core/shdict.lua b/lua-resty-core-0.1.22/lib/resty/core/shdict.lua +index dedf12c..e501a38 100644 +--- a/bundle/lua-resty-core-0.1.22/lib/resty/core/shdict.lua ++++ b/bundle/lua-resty-core-0.1.22/lib/resty/core/shdict.lua +@@ -32,8 +32,11 @@ local subsystem = ngx.config.subsystem + + + local ngx_lua_ffi_shdict_get ++local ngx_lua_ffi_shdict_get_m1 + local ngx_lua_ffi_shdict_incr ++local ngx_lua_ffi_shdict_incr_m1 + local ngx_lua_ffi_shdict_store ++local ngx_lua_ffi_shdict_store_m1 + local ngx_lua_ffi_shdict_flush_all + local ngx_lua_ffi_shdict_get_ttl + local ngx_lua_ffi_shdict_set_expire +@@ -42,6 +45,53 @@ local ngx_lua_ffi_shdict_free_space + local ngx_lua_ffi_shdict_udata_to_zone + + ++local M1 = jit and jit.os == "OSX" and jit.arch == "arm64" ++if M1 then ++ ffi.cdef[[ ++typedef struct { ++ void *zone; ++ const unsigned char *key; ++ size_t key_len; ++ int *value_type; ++ unsigned char **str_value_buf; ++ size_t *str_value_len; ++ double *num_value; ++ int *user_flags; ++ int get_stale; ++ int *is_stale; ++ char **errmsg; ++} ngx_shdict_get_t; ++ ++typedef struct { ++ void *zone; ++ int op; ++ const unsigned char *key; ++ size_t key_len; ++ int value_type; ++ const unsigned char *str_value_buf; ++ size_t str_value_len; ++ double num_value; ++ long exptime; ++ int user_flags; ++ char **errmsg; ++ int *forcible; ++} ngx_shdict_store_t; ++ ++typedef struct { ++ void *zone; ++ const unsigned char *key; ++ size_t key_len; ++ double *num_value; ++ char **errmsg; ++ int has_init; ++ double init; ++ long init_ttl; ++ int *forcible; ++} ngx_shdict_incr_t; ++]] ++end ++ ++ + if subsystem == 'http' then + ffi.cdef[[ + int ngx_http_lua_ffi_shdict_get(void *zone, const unsigned char *key, +@@ -72,6 +122,18 @@ size_t ngx_http_lua_ffi_shdict_capacity(void *zone); + void *ngx_http_lua_ffi_shdict_udata_to_zone(void *zone_udata); + ]] + ++ if M1 then ++ ffi.cdef [[ ++int ngx_http_lua_ffi_shdict_get_m1(ngx_shdict_get_t *s); ++int ngx_http_lua_ffi_shdict_store_m1(ngx_shdict_store_t *s); ++int ngx_http_lua_ffi_shdict_incr_m1(ngx_shdict_incr_t *s); ++ ]] ++ ++ ngx_lua_ffi_shdict_get_m1 = C.ngx_http_lua_ffi_shdict_get_m1 ++ ngx_lua_ffi_shdict_store_m1 = C.ngx_http_lua_ffi_shdict_store_m1 ++ ngx_lua_ffi_shdict_incr_m1 = C.ngx_http_lua_ffi_shdict_incr_m1 ++ end ++ + ngx_lua_ffi_shdict_get = C.ngx_http_lua_ffi_shdict_get + ngx_lua_ffi_shdict_incr = C.ngx_http_lua_ffi_shdict_incr + ngx_lua_ffi_shdict_store = C.ngx_http_lua_ffi_shdict_store +@@ -126,6 +188,17 @@ size_t ngx_stream_lua_ffi_shdict_capacity(void *zone); + void *ngx_stream_lua_ffi_shdict_udata_to_zone(void *zone_udata); + ]] + ++ if M1 then ++ ffi.cdef [[ ++int ngx_stream_lua_ffi_shdict_get_m1(ngx_shdict_get_t *s); ++int ngx_stream_lua_ffi_shdict_store_m1(ngx_shdict_store_t *s); ++int ngx_stream_lua_ffi_shdict_incr_m1(ngx_shdict_incr_t *s); ++ ]] ++ ngx_lua_ffi_shdict_get_m1 = C.ngx_stream_lua_ffi_shdict_get_m1 ++ ngx_lua_ffi_shdict_store_m1 = C.ngx_stream_lua_ffi_shdict_store_m1 ++ ngx_lua_ffi_shdict_incr_m1 = C.ngx_stream_lua_ffi_shdict_incr_m1 ++ end ++ + ngx_lua_ffi_shdict_get = C.ngx_stream_lua_ffi_shdict_get + ngx_lua_ffi_shdict_incr = C.ngx_stream_lua_ffi_shdict_incr + ngx_lua_ffi_shdict_store = C.ngx_stream_lua_ffi_shdict_store +@@ -245,6 +318,31 @@ local function shdict_store(zone, op, key, value, exptime, flags) + return nil, "bad value type" + end + ++ local rc ++ if M1 then ++ local q = ffi_new("ngx_shdict_store_t") ++ q.zone = zone ++ q.op = op ++ q.key = key ++ q.key_len = key_len ++ q.value_type = valtyp ++ q.str_value_buf = str_val_buf ++ q.str_value_len = str_val_len ++ q.num_value = num_val ++ q.exptime = exptime * 1000 ++ q.user_flags = flags ++ q.errmsg = errmsg ++ q.forcible = forcible ++ ++ local rc = ngx_lua_ffi_shdict_store_m1(q) ++ if rc == 0 then -- NGX_OK ++ return true, nil, forcible[0] == 1 ++ end ++ ++ -- NGX_DECLINED or NGX_ERROR ++ return false, ffi_str(errmsg[0]), forcible[0] == 1 ++ end ++ + local rc = ngx_lua_ffi_shdict_store(zone, op, key, key_len, + valtyp, str_val_buf, + str_val_len, num_val, +@@ -317,6 +415,30 @@ local function shdict_get(zone, key) + local value_len = get_size_ptr() + value_len[0] = size + ++ if M1 then ++ local q = ffi_new("ngx_shdict_get_t") ++ q.zone = zone ++ q.key = key ++ q.key_len = key_len ++ q.value_type = value_type ++ q.str_value_buf = str_value_buf ++ q.str_value_len = value_len ++ q.num_value = num_value ++ q.user_flags = user_flags ++ q.get_stale = 0 ++ q.is_stale = is_stale ++ q.errmsg = errmsg ++ ++ local rc = ngx_lua_ffi_shdict_get_m1(q) ++ if rc ~= 0 then ++ if errmsg[0] ~= nil then ++ return nil, ffi_str(errmsg[0]) ++ end ++ ++ error("failed to get the key") ++ end ++ else ++ + local rc = ngx_lua_ffi_shdict_get(zone, key, key_len, value_type, + str_value_buf, value_len, + num_value, user_flags, 0, +@@ -329,6 +451,8 @@ local function shdict_get(zone, key) + error("failed to get the key") + end + ++ end ++ + local typ = value_type[0] + + if typ == 0 then -- LUA_TNIL +@@ -392,6 +516,30 @@ local function shdict_get_stale(zone, key) + local value_len = get_size_ptr() + value_len[0] = size + ++ if M1 then ++ local q = ffi_new("ngx_shdict_get_t") ++ q.zone = zone ++ q.key = key ++ q.key_len = key_len ++ q.value_type = value_type ++ q.str_value_buf = str_value_buf ++ q.str_value_len = value_len ++ q.num_value = num_value ++ q.user_flags = user_flags ++ q.get_stale = 1 ++ q.is_stale = is_stale ++ q.errmsg = errmsg ++ ++ local rc = ngx_lua_ffi_shdict_get_m1(q) ++ if rc ~= 0 then ++ if errmsg[0] ~= nil then ++ return nil, ffi_str(errmsg[0]) ++ end ++ ++ error("failed to get the key") ++ end ++ else ++ + local rc = ngx_lua_ffi_shdict_get(zone, key, key_len, value_type, + str_value_buf, value_len, + num_value, user_flags, 1, +@@ -404,6 +552,8 @@ local function shdict_get_stale(zone, key) + error("failed to get the key") + end + ++ end ++ + local typ = value_type[0] + + if typ == 0 then -- LUA_TNIL +@@ -498,6 +648,28 @@ local function shdict_incr(zone, key, value, init, init_ttl) + init_ttl = 0 + end + ++ if M1 then ++ local q = ffi_new("ngx_shdict_incr_t") ++ q.zone = zone ++ q.key = key ++ q.key_len = key_len ++ q.num_value = num_value ++ q.errmsg = errmsg ++ if init then ++ q.has_init = 1 ++ q.init = init ++ else ++ q.has_init = 0 ++ end ++ q.init_ttl = init_ttl * 1000 ++ q.forcible = forcible ++ ++ local rc = ngx_lua_ffi_shdict_incr_m1(q) ++ if rc ~= 0 then -- ~= NGX_OK ++ return nil, ffi_str(errmsg[0]) ++ end ++ else ++ + local rc = ngx_lua_ffi_shdict_incr(zone, key, key_len, num_value, + errmsg, init and 1 or 0, + init or 0, init_ttl * 1000, +@@ -506,6 +678,8 @@ local function shdict_incr(zone, key, value, init, init_ttl) + return nil, ffi_str(errmsg[0]) + end + ++ end ++ + if not init then + return tonumber(num_value[0]) + end +-- +2.34.1 + diff --git a/build/openresty/patches/lua-resty-core-0.1.22_04-make-resty.core.response-compatible-with-m1.patch b/build/openresty/patches/lua-resty-core-0.1.22_04-make-resty.core.response-compatible-with-m1.patch new file mode 100644 index 00000000000..810eb285c0b --- /dev/null +++ b/build/openresty/patches/lua-resty-core-0.1.22_04-make-resty.core.response-compatible-with-m1.patch @@ -0,0 +1,101 @@ +From 94efefb9aaede738ec9e29e639cf5e934e9a1d5a Mon Sep 17 00:00:00 2001 +From: Aapo Talvensaari +Date: Thu, 16 Dec 2021 19:28:13 +0200 +Subject: [PATCH] fix(response) make resty.core.response compatible with m1 + (using kong wrappers) + +--- + lua-resty-core-0.1.22/lib/resty/core/response.lua | 58 +++++++++++++++++++++++++++++++++++++ + 1 file changed, 58 insertions(+) + +diff --git a/lua-resty-core-0.1.22/lib/resty/core/response.lua b/lua-resty-core-0.1.22/lib/resty/core/response.lua +index 891a07e..1efdf56 100644 +--- a/bundle/lua-resty-core-0.1.22/lib/resty/core/response.lua ++++ b/bundle/lua-resty-core-0.1.22/lib/resty/core/response.lua +@@ -45,6 +45,27 @@ ffi.cdef[[ + ]] + + ++local M1 = jit and jit.os == "OSX" and jit.arch == "arm64" ++if M1 then ++ffi.cdef[[ ++ typedef struct { ++ ngx_http_request_t *r; ++ const char *key_data; ++ size_t key_len; ++ int is_nil; ++ const char *sval; ++ size_t sval_len; ++ void *mvals; ++ size_t mvals_len; ++ int override; ++ char **errmsg; ++ } ngx_set_resp_header_t; ++ ++ int ngx_http_lua_ffi_set_resp_header_m1(ngx_set_resp_header_t *s); ++]] ++end ++ ++ + local function set_resp_header(tb, key, value, no_override) + local r = get_request() + if not r then +@@ -61,6 +82,22 @@ local function set_resp_header(tb, key, value, no_override) + error("invalid header value", 3) + end + ++ if M1 then ++ local q = ffi.new("ngx_set_resp_header_t") ++ q.r = r ++ q.key_data = key ++ q.key_len = #key ++ q.is_nil = true ++ q.sval_len = 0 ++ q.mvals_len = 0 ++ q.override = 1 ++ q.errmsg = errmsg ++ ++ rc = C.ngx_http_lua_ffi_set_resp_header_m1(q) ++ ++ goto results ++ end ++ + rc = C.ngx_http_lua_ffi_set_resp_header(r, key, #key, true, nil, 0, nil, + 0, 1, errmsg) + else +@@ -99,11 +136,32 @@ local function set_resp_header(tb, key, value, no_override) + end + + local override_int = no_override and 0 or 1 ++ ++ if M1 then ++ local s = ffi.new("ngx_set_resp_header_t") ++ s.r = r ++ s.key_data = key ++ s.key_len = #key ++ s.is_nil = false ++ s.sval = sval ++ s.sval_len = sval_len ++ s.mvals = mvals ++ s.mvals_len = mvals_len ++ s.override = override_int ++ s.errmsg = errmsg ++ ++ rc = C.ngx_http_lua_ffi_set_resp_header_m1(s) ++ ++ goto results ++ end ++ + rc = C.ngx_http_lua_ffi_set_resp_header(r, key, #key, false, sval, + sval_len, mvals, mvals_len, + override_int, errmsg) + end + ++ ::results:: ++ + if rc == 0 or rc == FFI_DECLINED then + return + end +-- +2.34.1 + diff --git a/build/openresty/patches/lua-resty-websocket-0.08_01-client-mtls.patch b/build/openresty/patches/lua-resty-websocket-0.08_01-client-mtls.patch new file mode 100644 index 00000000000..da796efb358 --- /dev/null +++ b/build/openresty/patches/lua-resty-websocket-0.08_01-client-mtls.patch @@ -0,0 +1,92 @@ +From 05d0832cf96c216297810cb495706c50309b8c5a Mon Sep 17 00:00:00 2001 +From: James Hurst +Date: Mon, 7 Feb 2022 11:36:25 +0000 +Subject: [PATCH 1/2] feat: add mtls client cert support + +--- + lib/resty/websocket/client.lua | 26 ++++++++++++++++++++++---- + 1 file changed, 22 insertions(+), 4 deletions(-) + +diff --git a/lua-resty-websocket-0.08/lib/resty/websocket/client.lua b/lua-resty-websocket-0.08/lib/resty/websocket/client.lua +index 067b2a5..2ec96dd 100644 +--- a/bundle/lua-resty-websocket-0.08/lib/resty/websocket/client.lua ++++ b/bundle/lua-resty-websocket-0.08/lib/resty/websocket/client.lua +@@ -98,7 +98,8 @@ function _M.connect(self, uri, opts) + path = "/" + end + +- local ssl_verify, headers, proto_header, origin_header, sock_opts = false ++ local ssl_verify, server_name, headers, proto_header, origin_header, sock_opts = false ++ local client_cert, client_priv_key + + if opts then + local protos = opts.protocols +@@ -122,11 +123,20 @@ function _M.connect(self, uri, opts) + sock_opts = { pool = pool } + end + +- if opts.ssl_verify then ++ client_cert = opts.client_cert ++ client_priv_key = opts.client_priv_key ++ ++ if client_cert then ++ assert(client_priv_key, ++ "client_priv_key must be provided with client_cert") ++ end ++ ++ if opts.ssl_verify or opts.server_name then + if not ssl_support then + return nil, "ngx_lua 0.9.11+ required for SSL sockets" + end +- ssl_verify = true ++ ssl_verify = opts.ssl_verify ++ server_name = opts.server_name or host + end + + if opts.headers then +@@ -151,7 +161,15 @@ function _M.connect(self, uri, opts) + if not ssl_support then + return nil, "ngx_lua 0.9.11+ required for SSL sockets" + end +- ok, err = sock:sslhandshake(false, host, ssl_verify) ++ ++ if client_cert then ++ ok, err = sock:setclientcert(client_cert, client_priv_key) ++ if not ok then ++ return nil, "ssl client cert failued: " .. err ++ end ++ end ++ ++ ok, err = sock:sslhandshake(false, server_name, ssl_verify) + if not ok then + return nil, "ssl handshake failed: " .. err + end +-- +2.32.0 (Apple Git-132) + + +From fcf3370eef554cd4e1791ac92c43b420d25d66a1 Mon Sep 17 00:00:00 2001 +From: James Hurst +Date: Mon, 7 Feb 2022 15:20:48 +0000 +Subject: [PATCH 2/2] fix(client) fix typo in error message + +--- + lib/resty/websocket/client.lua | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lua-resty-websocket-0.08/lib/resty/websocket/client.lua b/lua-resty-websocket-0.08/lib/resty/websocket/client.lua +index 2ec96dd..598543f 100644 +--- a/bundle/lua-resty-websocket-0.08/lib/resty/websocket/client.lua ++++ b/bundle/lua-resty-websocket-0.08/lib/resty/websocket/client.lua +@@ -165,7 +165,7 @@ function _M.connect(self, uri, opts) + if client_cert then + ok, err = sock:setclientcert(client_cert, client_priv_key) + if not ok then +- return nil, "ssl client cert failued: " .. err ++ return nil, "ssl client cert failed: " .. err + end + end + +-- +2.32.0 (Apple Git-132) + diff --git a/build/openresty/patches/nginx-1.19.9_01-upstream_client_certificate_and_ssl_verify.patch b/build/openresty/patches/nginx-1.19.9_01-upstream_client_certificate_and_ssl_verify.patch new file mode 100644 index 00000000000..480092978b3 --- /dev/null +++ b/build/openresty/patches/nginx-1.19.9_01-upstream_client_certificate_and_ssl_verify.patch @@ -0,0 +1,52 @@ +diff --git a/nginx-1.19.9/src/http/ngx_http_upstream.c b/nginx-1.19.9/src/http/ngx_http_upstream.c +index 90710557..539a4db9 100644 +--- a/bundle/nginx-1.19.9/src/http/ngx_http_upstream.c ++++ b/bundle/nginx-1.19.9/src/http/ngx_http_upstream.c +@@ -8,6 +8,9 @@ + #include + #include + #include ++#if (NGX_HTTP_LUA_KONG) ++#include ++#endif + + + #if (NGX_HTTP_CACHE) +@@ -1696,7 +1696,14 @@ + c->sendfile = 0; + u->output.sendfile = 0; + ++ ++#if (NGX_HTTP_LUA_KONG) ++ if (u->conf->ssl_server_name ++ || ngx_http_lua_kong_get_upstream_ssl_verify(r, u->conf->ssl_verify)) ++ { ++#else + if (u->conf->ssl_server_name || u->conf->ssl_verify) { ++#endif + if (ngx_http_upstream_ssl_name(r, u, c) != NGX_OK) { + ngx_http_upstream_finalize_request(r, u, + NGX_HTTP_INTERNAL_SERVER_ERROR); +@@ -1724,6 +1727,10 @@ ngx_http_upstream_ssl_init_connection(ngx_http_request_t *r, + } + } + ++#if (NGX_HTTP_LUA_KONG) ++ ngx_http_lua_kong_set_upstream_ssl(r, c); ++#endif ++ + r->connection->log->action = "SSL handshaking to upstream"; + + rc = ngx_ssl_handshake(c); +@@ -1773,7 +1773,11 @@ + + if (c->ssl->handshaked) { + ++#if (NGX_HTTP_LUA_KONG) ++ if (ngx_http_lua_kong_get_upstream_ssl_verify(r, u->conf->ssl_verify)) { ++#else + if (u->conf->ssl_verify) { ++#endif + rc = SSL_get_verify_result(c->ssl->connection); + + if (rc != X509_V_OK) { diff --git a/build/openresty/patches/nginx-1.19.9_02-remove-server-tokens-from-special-responses-output.patch b/build/openresty/patches/nginx-1.19.9_02-remove-server-tokens-from-special-responses-output.patch new file mode 100644 index 00000000000..e8f9e07323c --- /dev/null +++ b/build/openresty/patches/nginx-1.19.9_02-remove-server-tokens-from-special-responses-output.patch @@ -0,0 +1,37 @@ +From 66f96c49ec4a222c4061e18aa8c3f8655b52327d Mon Sep 17 00:00:00 2001 +From: Aapo Talvensaari +Date: Fri, 16 Aug 2019 13:41:49 +0300 +Subject: [PATCH] remove server tokens from special responses output + +--- + nginx-1.19.9/src/http/ngx_http_special_response.c | 3 --- + 1 file changed, 3 deletions(-) + +diff --git a/nginx-1.19.9/src/http/ngx_http_special_response.c b/nginx-1.19.9/src/http/ngx_http_special_response.c +index 4b8bbf5..524cc7b 100644 +--- a/bundle/nginx-1.19.9/src/http/ngx_http_special_response.c ++++ b/bundle/nginx-1.19.9/src/http/ngx_http_special_response.c +@@ -19,21 +19,18 @@ static ngx_int_t ngx_http_send_refresh(ngx_http_request_t *r); + + + static u_char ngx_http_error_full_tail[] = +-"
" NGINX_VER "
" CRLF + "" CRLF + "" CRLF + ; + + + static u_char ngx_http_error_build_tail[] = +-"
" NGINX_VER_BUILD "
" CRLF + "" CRLF + "" CRLF + ; + + + static u_char ngx_http_error_tail[] = +-"
openresty
" CRLF + "" CRLF + "" CRLF + ; +-- +2.22.0 diff --git a/build/openresty/patches/nginx-1.19.9_03-stream_proxy_ssl_disable.patch b/build/openresty/patches/nginx-1.19.9_03-stream_proxy_ssl_disable.patch new file mode 100644 index 00000000000..9053745cb89 --- /dev/null +++ b/build/openresty/patches/nginx-1.19.9_03-stream_proxy_ssl_disable.patch @@ -0,0 +1,33 @@ +diff --git a/nginx-1.19.9/src/stream/ngx_stream_proxy_module.c b/nginx-1.19.9/src/stream/ngx_stream_proxy_module.c +index 09493135..fc8f8479 100644 +--- a/bundle/nginx-1.19.9/src/stream/ngx_stream_proxy_module.c ++++ b/bundle/nginx-1.19.9/src/stream/ngx_stream_proxy_module.c +@@ -8,6 +8,9 @@ + #include + #include + #include ++#if (NGX_STREAM_LUA_KONG) ++#include ++#endif + + + typedef struct { +@@ -812,8 +815,18 @@ ngx_stream_proxy_init_upstream(ngx_stream_session_t *s) + + #if (NGX_STREAM_SSL) + ++#if (NGX_STREAM_LUA_KONG) ++ ++ if (pc->type == SOCK_STREAM && pscf->ssl ++ && !ngx_stream_lua_kong_get_proxy_ssl_disable(s)) ++ { ++ ++#else ++ + if (pc->type == SOCK_STREAM && pscf->ssl) { + ++#endif ++ + if (u->proxy_protocol) { + if (ngx_stream_proxy_send_proxy_protocol(s) != NGX_OK) { + return; diff --git a/build/openresty/patches/nginx-1.19.9_04-grpc_authority_override.patch b/build/openresty/patches/nginx-1.19.9_04-grpc_authority_override.patch new file mode 100644 index 00000000000..6822292e60a --- /dev/null +++ b/build/openresty/patches/nginx-1.19.9_04-grpc_authority_override.patch @@ -0,0 +1,25 @@ +diff --git a/nginx-1.19.3/src/http/modules/ngx_http_grpc_module.c b/nginx-1.19.3/src/http/modules/ngx_http_grpc_module.c +index d4af66db..10d3aaed 100644 +--- a/bundle/nginx-1.19.9/src/http/modules/ngx_http_grpc_module.c ++++ b/bundle/nginx-1.19.9/src/http/modules/ngx_http_grpc_module.c +@@ -8,6 +8,9 @@ + #include + #include + #include ++#if (NGX_HTTP_LUA_KONG) ++#include ++#endif + + + typedef struct { +@@ -733,6 +736,10 @@ ngx_http_grpc_create_request(ngx_http_request_t *r) + len = sizeof(ngx_http_grpc_connection_start) - 1 + + sizeof(ngx_http_grpc_frame_t); /* headers frame */ + ++#if (NGX_HTTP_LUA_KONG) ++ ngx_http_lua_kong_set_grpc_authority(r, &ctx->host); ++#endif ++ + /* :method header */ + + if (r->method == NGX_HTTP_GET || r->method == NGX_HTTP_POST) { diff --git a/build/openresty/patches/nginx-1.19.9_05-remove-server-headers-from-ngx-header-filter-module.patch b/build/openresty/patches/nginx-1.19.9_05-remove-server-headers-from-ngx-header-filter-module.patch new file mode 100644 index 00000000000..a12c3192c25 --- /dev/null +++ b/build/openresty/patches/nginx-1.19.9_05-remove-server-headers-from-ngx-header-filter-module.patch @@ -0,0 +1,70 @@ +From 42a44843445e9db12a8fc5eaf1f3e10b22a0065b Mon Sep 17 00:00:00 2001 +From: Aapo Talvensaari +Date: Tue, 15 Jun 2021 16:04:06 +0300 +Subject: [PATCH] remove server headers from nginx header filter module + +--- + nginx-1.19.9/src/http/ngx_http_header_filter_module.c | 34 ------------------- + 1 file changed, 34 deletions(-) + +diff --git a/nginx-1.19.9/src/http/ngx_http_header_filter_module.c b/nginx-1.19.9/src/http/ngx_http_header_filter_module.c +index ca13f2a..1a07dac 100644 +--- a/bundle/nginx-1.19.9/src/http/ngx_http_header_filter_module.c ++++ b/bundle/nginx-1.19.9/src/http/ngx_http_header_filter_module.c +@@ -46,11 +46,6 @@ ngx_module_t ngx_http_header_filter_module = { + }; + + +-static u_char ngx_http_server_string[] = "Server: openresty" CRLF; +-static u_char ngx_http_server_full_string[] = "Server: " NGINX_VER CRLF; +-static u_char ngx_http_server_build_string[] = "Server: " NGINX_VER_BUILD CRLF; +- +- + static ngx_str_t ngx_http_status_lines[] = { + + ngx_string("200 OK"), +@@ -279,18 +274,6 @@ ngx_http_header_filter(ngx_http_request_t *r) + + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); + +- if (r->headers_out.server == NULL) { +- if (clcf->server_tokens == NGX_HTTP_SERVER_TOKENS_ON) { +- len += sizeof(ngx_http_server_full_string) - 1; +- +- } else if (clcf->server_tokens == NGX_HTTP_SERVER_TOKENS_BUILD) { +- len += sizeof(ngx_http_server_build_string) - 1; +- +- } else { +- len += sizeof(ngx_http_server_string) - 1; +- } +- } +- + if (r->headers_out.date == NULL) { + len += sizeof("Date: Mon, 28 Sep 1970 06:00:00 GMT" CRLF) - 1; + } +@@ -448,23 +431,6 @@ ngx_http_header_filter(ngx_http_request_t *r) + } + *b->last++ = CR; *b->last++ = LF; + +- if (r->headers_out.server == NULL) { +- if (clcf->server_tokens == NGX_HTTP_SERVER_TOKENS_ON) { +- p = ngx_http_server_full_string; +- len = sizeof(ngx_http_server_full_string) - 1; +- +- } else if (clcf->server_tokens == NGX_HTTP_SERVER_TOKENS_BUILD) { +- p = ngx_http_server_build_string; +- len = sizeof(ngx_http_server_build_string) - 1; +- +- } else { +- p = ngx_http_server_string; +- len = sizeof(ngx_http_server_string) - 1; +- } +- +- b->last = ngx_cpymem(b->last, p, len); +- } +- + if (r->headers_out.date == NULL) { + b->last = ngx_cpymem(b->last, "Date: ", sizeof("Date: ") - 1); + b->last = ngx_cpymem(b->last, ngx_cached_http_time.data, +-- +2.31.1 diff --git a/build/openresty/patches/nginx-cross-endianness-fix.patch b/build/openresty/patches/nginx-cross-endianness-fix.patch new file mode 100644 index 00000000000..da3d6745705 --- /dev/null +++ b/build/openresty/patches/nginx-cross-endianness-fix.patch @@ -0,0 +1,79 @@ +# http://cgit.openembedded.org/meta-openembedded/tree/meta-webserver/recipes-httpd/nginx/files/0001-Allow-the-overriding-of-the-endianness-via-the-confi.patch +From be9970aa16c5142ef814531d74a07990a8e9eb14 Mon Sep 17 00:00:00 2001 +From: Derek Straka +Date: Fri, 1 Dec 2017 10:32:29 -0500 +Subject: [PATCH] Allow the overriding of the endianness via the configure flag + --with-endian + +The existing configure options contain the --with-endian; however, the command +line flag does not actually function. It does not set the endianness and it +appears to do nothing. + +Upstream-Status: Pending + +Signed-off-by: Derek Straka + +diff --git a/auto/endianness b/auto/endianness +index 1b552b6..be84487 100644 +--- a/bundle/nginx-1.19.9/endianness ++++ b/bundle/nginx-1.19.9/auto/endianness +@@ -13,7 +13,13 @@ checking for system byte ordering + END + + +-cat << END > $NGX_AUTOTEST.c ++if [ ".$NGX_WITH_ENDIAN" = ".little" ]; then ++ echo " little endian" ++ have=NGX_HAVE_LITTLE_ENDIAN . auto/have ++elif [ ".$NGX_WITH_ENDIAN" = ".big" ]; then ++ echo " big endian" ++else ++ cat << END > $NGX_AUTOTEST.c + + int main(void) { + int i = 0x11223344; +@@ -26,25 +32,26 @@ int main(void) { + + END + +-ngx_test="$CC $CC_TEST_FLAGS $CC_AUX_FLAGS \ +- -o $NGX_AUTOTEST $NGX_AUTOTEST.c $NGX_LD_OPT $ngx_feature_libs" ++ ngx_test="$CC $CC_TEST_FLAGS $CC_AUX_FLAGS \ ++ -o $NGX_AUTOTEST $NGX_AUTOTEST.c $NGX_LD_OPT $ngx_feature_libs" + +-eval "$ngx_test >> $NGX_AUTOCONF_ERR 2>&1" ++ eval "$ngx_test >> $NGX_AUTOCONF_ERR 2>&1" + +-if [ -x $NGX_AUTOTEST ]; then +- if $NGX_AUTOTEST >/dev/null 2>&1; then +- echo " little endian" +- have=NGX_HAVE_LITTLE_ENDIAN . auto/have +- else +- echo " big endian" +- fi ++ if [ -x $NGX_AUTOTEST ]; then ++ if $NGX_AUTOTEST >/dev/null 2>&1; then ++ echo " little endian" ++ have=NGX_HAVE_LITTLE_ENDIAN . auto/have ++ else ++ echo " big endian" ++ fi + +- rm -rf $NGX_AUTOTEST* ++ rm -rf $NGX_AUTOTEST* + +-else +- rm -rf $NGX_AUTOTEST* ++ else ++ rm -rf $NGX_AUTOTEST* + +- echo +- echo "$0: error: cannot detect system byte ordering" +- exit 1 ++ echo ++ echo "$0: error: cannot detect system byte ordering" ++ exit 1 ++ fi + fi +-- +2.7.4 \ No newline at end of file diff --git a/build/openresty/patches/nginx-cross.patch b/build/openresty/patches/nginx-cross.patch new file mode 100644 index 00000000000..f83c19d0526 --- /dev/null +++ b/build/openresty/patches/nginx-cross.patch @@ -0,0 +1,214 @@ +Rebased from http://cgit.openembedded.org/meta-openembedded/tree/meta-webserver/recipes-httpd/nginx/files/nginx-cross.patch + + +=================================================================== +diff --git a/bundle/nginx-1.19.9/auto/feature b/bundle/nginx-1.19.9/auto/feature +index 3561f59..d6a2889 100644 +--- a/bundle/nginx-1.19.9/auto/feature ++++ b/bundle/nginx-1.19.9/auto/feature +@@ -49,12 +49,20 @@ eval "/bin/sh -c \"$ngx_test\" >> $NGX_AUTOCONF_ERR 2>&1" + + if [ -x $NGX_AUTOTEST ]; then + ++ if [ ".$NGX_CROSS_COMPILE" = ".yes" ]; then ++ NGX_AUTOTEST_EXEC="true" ++ NGX_FOUND_MSG=" (not tested, cross compiling)" ++ else ++ NGX_AUTOTEST_EXEC="$NGX_AUTOTEST" ++ NGX_FOUND_MSG="" ++ fi ++ + case "$ngx_feature_run" in + + yes) + # /bin/sh is used to intercept "Killed" or "Abort trap" messages +- if /bin/sh -c $NGX_AUTOTEST >> $NGX_AUTOCONF_ERR 2>&1; then +- echo " found" ++ if /bin/sh -c $NGX_AUTOTEST_EXEC >> $NGX_AUTOCONF_ERR 2>&1; then ++ echo " found$NGX_FOUND_MSG" + ngx_found=yes + + if test -n "$ngx_feature_name"; then +@@ -68,17 +76,27 @@ if [ -x $NGX_AUTOTEST ]; then + + value) + # /bin/sh is used to intercept "Killed" or "Abort trap" messages +- if /bin/sh -c $NGX_AUTOTEST >> $NGX_AUTOCONF_ERR 2>&1; then +- echo " found" ++ if /bin/sh -c $NGX_AUTOTEST_EXEC >> $NGX_AUTOCONF_ERR 2>&1; then ++ echo " found$NGX_FOUND_MSG" + ngx_found=yes + +- cat << END >> $NGX_AUTO_CONFIG_H ++ if [ ".$NGX_CROSS_COMPILE" = ".yes" ]; then ++ cat << END >> $NGX_AUTO_CONFIG_H + + #ifndef $ngx_feature_name +-#define $ngx_feature_name `$NGX_AUTOTEST` ++#define $ngx_feature_name $(eval "echo \$NGX_WITH_${ngx_feature_name}") + #endif + + END ++ else ++ cat << END >> $NGX_AUTO_CONFIG_H ++ ++#ifndef $ngx_feature_name ++#define $ngx_feature_name `$NGX_AUTOTEST_EXEC` ++#endif ++ ++END ++ fi + else + echo " found but is not working" + fi +@@ -86,7 +104,7 @@ END + + bug) + # /bin/sh is used to intercept "Killed" or "Abort trap" messages +- if /bin/sh -c $NGX_AUTOTEST >> $NGX_AUTOCONF_ERR 2>&1; then ++ if /bin/sh -c $NGX_AUTOTEST_EXEC >> $NGX_AUTOCONF_ERR 2>&1; then + echo " not found" + + else +diff --git a/bundle/nginx-1.19.9/auto/options b/bundle/nginx-1.19.9/auto/options +index 182c799..e9eb7b8 100644 +--- a/bundle/nginx-1.19.9/auto/options ++++ b/bundle/nginx-1.19.9/auto/options +@@ -400,6 +400,18 @@ $0: warning: the \"--with-sha1-asm\" option is deprecated" + --test-build-epoll) NGX_TEST_BUILD_EPOLL=YES ;; + --test-build-solaris-sendfilev) NGX_TEST_BUILD_SOLARIS_SENDFILEV=YES ;; + ++ # cross compile support ++ --with-int=*) NGX_WITH_INT="$value" ;; ++ --with-long=*) NGX_WITH_LONG="$value" ;; ++ --with-long-long=*) NGX_WITH_LONG_LONG="$value" ;; ++ --with-ptr-size=*) NGX_WITH_PTR_SIZE="$value" ;; ++ --with-sig-atomic-t=*) NGX_WITH_SIG_ATOMIC_T="$value" ;; ++ --with-size-t=*) NGX_WITH_SIZE_T="$value" ;; ++ --with-off-t=*) NGX_WITH_OFF_T="$value" ;; ++ --with-time-t=*) NGX_WITH_TIME_T="$value" ;; ++ --with-sys-nerr=*) NGX_WITH_NGX_SYS_NERR="$value" ;; ++ --with-endian=*) NGX_WITH_ENDIAN="$value" ;; ++ + *) + echo "$0: error: invalid option \"$option\"" + exit 1 +@@ -590,6 +602,17 @@ cat << END + + --with-debug enable debug logging + ++ --with-int=VALUE force int size ++ --with-long=VALUE force long size ++ --with-long-long=VALUE force long long size ++ --with-ptr-size=VALUE force pointer size ++ --with-sig-atomic-t=VALUE force sig_atomic_t size ++ --with-size-t=VALUE force size_t size ++ --with-off-t=VALUE force off_t size ++ --with-time-t=VALUE force time_t size ++ --with-sys-nerr=VALUE force sys_nerr value ++ --with-endian=VALUE force system endianess ++ + END + + exit 1 +@@ -598,6 +621,8 @@ fi + + if [ ".$NGX_PLATFORM" = ".win32" ]; then + NGX_WINE=$WINE ++elif [ ! -z "$NGX_PLATFORM" ]; then ++ NGX_CROSS_COMPILE="yes" + fi + + +diff --git a/bundle/nginx-1.19.9/auto/types/sizeof b/bundle/nginx-1.19.9/auto/types/sizeof +index 480d8cf..23c5171 100644 +--- a/bundle/nginx-1.19.9/auto/types/sizeof ++++ b/bundle/nginx-1.19.9/auto/types/sizeof +@@ -12,9 +12,12 @@ checking for $ngx_type size + + END + +-ngx_size= ++ngx_size=$(eval "echo \$NGX_WITH_${ngx_param}") + +-cat << END > $NGX_AUTOTEST.c ++if [ ".$ngx_size" != "." ]; then ++ echo " $ngx_size bytes" ++else ++ cat << END > $NGX_AUTOTEST.c + + #include + #include +@@ -33,15 +36,16 @@ int main(void) { + END + + +-ngx_test="$CC $CC_TEST_FLAGS $CC_AUX_FLAGS \ +- -o $NGX_AUTOTEST $NGX_AUTOTEST.c $NGX_LD_OPT $ngx_feature_libs" ++ ngx_test="$CC $CC_TEST_FLAGS $CC_AUX_FLAGS \ ++ -o $NGX_AUTOTEST $NGX_AUTOTEST.c $NGX_LD_OPT $ngx_feature_libs" + +-eval "$ngx_test >> $NGX_AUTOCONF_ERR 2>&1" ++ eval "$ngx_test >> $NGX_AUTOCONF_ERR 2>&1" + + +-if [ -x $NGX_AUTOTEST ]; then +- ngx_size=`$NGX_AUTOTEST` +- echo " $ngx_size bytes" ++ if [ -x $NGX_AUTOTEST ]; then ++ ngx_size=`$NGX_AUTOTEST` ++ echo " $ngx_size bytes" ++ fi + fi + + +diff --git a/bundle/nginx-1.19.9/auto/unix b/bundle/nginx-1.19.9/auto/unix +index b41c70f..febbf3c 100644 +--- a/bundle/nginx-1.19.9/auto/unix ++++ b/bundle/nginx-1.19.9/auto/unix +@@ -592,13 +592,13 @@ ngx_feature_libs= + + # C types + +-ngx_type="int"; . auto/types/sizeof ++ngx_type="int"; ngx_param="INT"; . auto/types/sizeof + +-ngx_type="long"; . auto/types/sizeof ++ngx_type="long"; ngx_param="LONG"; . auto/types/sizeof + +-ngx_type="long long"; . auto/types/sizeof ++ngx_type="long long"; ngx_param="LONG_LONG"; . auto/types/sizeof + +-ngx_type="void *"; . auto/types/sizeof; ngx_ptr_size=$ngx_size ++ngx_type="void *"; ngx_param="PTR_SIZE"; . auto/types/sizeof; ngx_ptr_size=$ngx_size + ngx_param=NGX_PTR_SIZE; ngx_value=$ngx_size; . auto/types/value + + +@@ -609,7 +609,7 @@ NGX_INCLUDE_AUTO_CONFIG_H="#include \"ngx_auto_config.h\"" + ngx_type="uint32_t"; ngx_types="u_int32_t"; . auto/types/typedef + ngx_type="uint64_t"; ngx_types="u_int64_t"; . auto/types/typedef + +-ngx_type="sig_atomic_t"; ngx_types="int"; . auto/types/typedef ++ngx_type="sig_atomic_t"; ngx_param="SIG_ATOMIC_T"; ngx_types="int"; . auto/types/typedef + . auto/types/sizeof + ngx_param=NGX_SIG_ATOMIC_T_SIZE; ngx_value=$ngx_size; . auto/types/value + +@@ -625,15 +625,15 @@ ngx_type="rlim_t"; ngx_types="int"; . auto/types/typedef + + . auto/endianness + +-ngx_type="size_t"; . auto/types/sizeof ++ngx_type="size_t"; ngx_param="SIZE_T"; . auto/types/sizeof + ngx_param=NGX_MAX_SIZE_T_VALUE; ngx_value=$ngx_max_value; . auto/types/value + ngx_param=NGX_SIZE_T_LEN; ngx_value=$ngx_max_len; . auto/types/value + +-ngx_type="off_t"; . auto/types/sizeof ++ngx_type="off_t"; ngx_param="OFF_T"; . auto/types/sizeof + ngx_param=NGX_MAX_OFF_T_VALUE; ngx_value=$ngx_max_value; . auto/types/value + ngx_param=NGX_OFF_T_LEN; ngx_value=$ngx_max_len; . auto/types/value + +-ngx_type="time_t"; . auto/types/sizeof ++ngx_type="time_t"; ngx_param="TIME_T"; . auto/types/sizeof + ngx_param=NGX_TIME_T_SIZE; ngx_value=$ngx_size; . auto/types/value + ngx_param=NGX_TIME_T_LEN; ngx_value=$ngx_max_len; . auto/types/value + ngx_param=NGX_MAX_TIME_T_VALUE; ngx_value=$ngx_max_value; . auto/types/value diff --git a/build/openresty/patches/ngx_lua-0.10.20_01-cosocket-mtls.patch b/build/openresty/patches/ngx_lua-0.10.20_01-cosocket-mtls.patch new file mode 100644 index 00000000000..f339a76d9b9 --- /dev/null +++ b/build/openresty/patches/ngx_lua-0.10.20_01-cosocket-mtls.patch @@ -0,0 +1,1554 @@ +From acd53645754ce42b436cecb0d7a10b547d41fef6 Mon Sep 17 00:00:00 2001 +From: lijunlong +Date: Wed, 13 Oct 2021 23:42:17 +0800 +Subject: [PATCH 01/17] bugfix: nginx crash when resolve an not exist domain in + thread create by ngx.thread.spawn.(#1931) + +FIX #1915 +The resolve ctxes were dded to a link list. function ngx_resolver_process_a iterate through the link list when got the DNS reply. +When processing the first resolve ctx, all the three ctxes were freed. So when ngx_resolver_process_a continues to process the second ctx, it will free the ctx again. + +Co-authored-by: doujiang24 +--- + src/ngx_http_lua_socket_tcp.c | 15 ++------------- + 1 file changed, 2 insertions(+), 13 deletions(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index 55bd203d..b7c3bdd4 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -147,8 +147,6 @@ static void ngx_http_lua_socket_free_pool(ngx_log_t *log, + static int ngx_http_lua_socket_shutdown_pool(lua_State *L); + static void ngx_http_lua_socket_shutdown_pool_helper( + ngx_http_lua_socket_pool_t *spool); +-static void +- ngx_http_lua_socket_empty_resolve_handler(ngx_resolver_ctx_t *ctx); + static int ngx_http_lua_socket_prepare_error_retvals(ngx_http_request_t *r, + ngx_http_lua_socket_tcp_upstream_t *u, lua_State *L, ngx_uint_t ft_type); + #if (NGX_HTTP_SSL) +@@ -1151,13 +1149,6 @@ ngx_http_lua_socket_tcp_connect(lua_State *L) + } + + +-static void +-ngx_http_lua_socket_empty_resolve_handler(ngx_resolver_ctx_t *ctx) +-{ +- /* do nothing */ +-} +- +- + static void + ngx_http_lua_socket_resolve_handler(ngx_resolver_ctx_t *ctx) + { +@@ -6084,10 +6075,8 @@ ngx_http_lua_tcp_resolve_cleanup(void *data) + return; + } + +- /* just to be safer */ +- rctx->handler = ngx_http_lua_socket_empty_resolve_handler; +- +- ngx_resolve_name_done(rctx); ++ /* postpone free the rctx in the handler */ ++ rctx->handler = ngx_resolve_name_done; + } + + +-- +2.32.0 (Apple Git-132) + + +From 59d39ca2f0963695052c2593f957053f1a1779a2 Mon Sep 17 00:00:00 2001 +From: Josh Soref <2119212+jsoref@users.noreply.github.com> +Date: Mon, 25 Oct 2021 03:07:01 -0400 +Subject: [PATCH 02/17] doc: fixed spelling errors in the docs and code. + (#1947) + +--- + src/ngx_http_lua_socket_tcp.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index b7c3bdd4..ace18a0f 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -3112,7 +3112,7 @@ ngx_http_lua_socket_tcp_settimeout(lua_State *L) + n = lua_gettop(L); + + if (n != 2) { +- return luaL_error(L, "ngx.socket settimout: expecting 2 arguments " ++ return luaL_error(L, "ngx.socket settimeout: expecting 2 arguments " + "(including the object) but seen %d", lua_gettop(L)); + } + +@@ -3159,7 +3159,7 @@ ngx_http_lua_socket_tcp_settimeouts(lua_State *L) + n = lua_gettop(L); + + if (n != 4) { +- return luaL_error(L, "ngx.socket settimout: expecting 4 arguments " ++ return luaL_error(L, "ngx.socket settimeout: expecting 4 arguments " + "(including the object) but seen %d", lua_gettop(L)); + } + +-- +2.32.0 (Apple Git-132) + + +From 617cb5dadc14dddb4796d4fff8821dae325f4229 Mon Sep 17 00:00:00 2001 +From: Sharp Liu +Date: Tue, 26 Oct 2021 17:53:54 +0800 +Subject: [PATCH 03/17] style: removed extra space. (#1952) + +--- + src/ngx_http_lua_socket_tcp.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index ace18a0f..26467fdd 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -2825,7 +2825,7 @@ ngx_http_lua_socket_tcp_send(lua_State *L) + + switch (type) { + case LUA_TNUMBER: +- b->last = ngx_http_lua_write_num(L, 2, b->last); ++ b->last = ngx_http_lua_write_num(L, 2, b->last); + break; + + case LUA_TSTRING: +-- +2.32.0 (Apple Git-132) + + +From 287d58810c450f912a8d31a94a1c86ccc039c0e1 Mon Sep 17 00:00:00 2001 +From: Datong Sun +Date: Wed, 18 Sep 2019 16:39:05 -0700 +Subject: [PATCH 04/17] cosocket: add function `tcpsock:tlshandshake`, retired + the Lua C API based `tcpsock:sslhandshake` implementation. + +--- + src/ngx_http_lua_socket_tcp.c | 387 +++++++++++++++------------------- + src/ngx_http_lua_socket_tcp.h | 3 + + 2 files changed, 177 insertions(+), 213 deletions(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index 26467fdd..4ef22c11 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -23,6 +23,9 @@ static int ngx_http_lua_socket_tcp(lua_State *L); + static int ngx_http_lua_socket_tcp_connect(lua_State *L); + #if (NGX_HTTP_SSL) + static int ngx_http_lua_socket_tcp_sslhandshake(lua_State *L); ++static void ngx_http_lua_tls_handshake_handler(ngx_connection_t *c); ++static int ngx_http_lua_tls_handshake_retval_handler(ngx_http_request_t *r, ++ ngx_http_lua_socket_tcp_upstream_t *u, lua_State *L); + #endif + static int ngx_http_lua_socket_tcp_receive(lua_State *L); + static int ngx_http_lua_socket_tcp_receiveany(lua_State *L); +@@ -149,12 +152,6 @@ static void ngx_http_lua_socket_shutdown_pool_helper( + ngx_http_lua_socket_pool_t *spool); + static int ngx_http_lua_socket_prepare_error_retvals(ngx_http_request_t *r, + ngx_http_lua_socket_tcp_upstream_t *u, lua_State *L, ngx_uint_t ft_type); +-#if (NGX_HTTP_SSL) +-static int ngx_http_lua_ssl_handshake_retval_handler(ngx_http_request_t *r, +- ngx_http_lua_socket_tcp_upstream_t *u, lua_State *L); +-static void ngx_http_lua_ssl_handshake_handler(ngx_connection_t *c); +-static int ngx_http_lua_ssl_free_session(lua_State *L); +-#endif + static void ngx_http_lua_socket_tcp_close_connection(ngx_connection_t *c); + + +@@ -324,13 +321,6 @@ ngx_http_lua_inject_socket_tcp_api(ngx_log_t *log, lua_State *L) + lua_pushcfunction(L, ngx_http_lua_socket_tcp_connect); + lua_setfield(L, -2, "connect"); + +-#if (NGX_HTTP_SSL) +- +- lua_pushcfunction(L, ngx_http_lua_socket_tcp_sslhandshake); +- lua_setfield(L, -2, "sslhandshake"); +- +-#endif +- + lua_pushcfunction(L, ngx_http_lua_socket_tcp_receive); + lua_setfield(L, -2, "receive"); + +@@ -404,19 +394,6 @@ ngx_http_lua_inject_socket_tcp_api(ngx_log_t *log, lua_State *L) + lua_setfield(L, -2, "__gc"); + lua_rawset(L, LUA_REGISTRYINDEX); + /* }}} */ +- +-#if (NGX_HTTP_SSL) +- +- /* {{{ssl session userdata metatable */ +- lua_pushlightuserdata(L, ngx_http_lua_lightudata_mask( +- ssl_session_metatable_key)); +- lua_createtable(L, 0 /* narr */, 1 /* nrec */); /* metatable */ +- lua_pushcfunction(L, ngx_http_lua_ssl_free_session); +- lua_setfield(L, -2, "__gc"); +- lua_rawset(L, LUA_REGISTRYINDEX); +- /* }}} */ +- +-#endif + } + + +@@ -1559,64 +1536,69 @@ ngx_http_lua_socket_conn_error_retval_handler(ngx_http_request_t *r, + + #if (NGX_HTTP_SSL) + +-static int +-ngx_http_lua_socket_tcp_sslhandshake(lua_State *L) ++static const char * ++ngx_http_lua_socket_tcp_check_busy(ngx_http_request_t *r, ++ ngx_http_lua_socket_tcp_upstream_t *u, unsigned int ops) + { +- int n, top; +- ngx_int_t rc; +- ngx_str_t name = ngx_null_string; +- ngx_connection_t *c; +- ngx_ssl_session_t **psession; +- ngx_http_request_t *r; +- ngx_http_lua_ctx_t *ctx; +- ngx_http_lua_co_ctx_t *coctx; +- +- ngx_http_lua_socket_tcp_upstream_t *u; +- +- /* Lua function arguments: self [,session] [,host] [,verify] +- [,send_status_req] */ ++ if (ops & SOCKET_OP_CONNECT && u->conn_waiting) { ++ return "socket busy connecting"; ++ } + +- n = lua_gettop(L); +- if (n < 1 || n > 5) { +- return luaL_error(L, "ngx.socket sslhandshake: expecting 1 ~ 5 " +- "arguments (including the object), but seen %d", n); ++ if (ops & SOCKET_OP_READ && u->read_waiting) { ++ return "socket busy reading"; + } + +- r = ngx_http_lua_get_req(L); +- if (r == NULL) { +- return luaL_error(L, "no request found"); ++ if (ops & SOCKET_OP_WRITE ++ && (u->write_waiting ++ || (u->raw_downstream ++ && (r->connection->buffered & NGX_HTTP_LOWLEVEL_BUFFERED)))) ++ { ++ return "socket busy writing"; + } + +- ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, +- "lua tcp socket ssl handshake"); ++ return NULL; ++} + +- luaL_checktype(L, 1, LUA_TTABLE); ++int ++ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, ++ ngx_http_lua_socket_tcp_upstream_t *u, ngx_ssl_session_t *sess, ++ int enable_session_reuse, ngx_str_t *server_name, int verify, ++ int ocsp_status_req, const char **errmsg) ++{ ++ ngx_int_t rc; ++ ngx_connection_t *c; ++ ngx_http_lua_ctx_t *ctx; ++ ngx_http_lua_co_ctx_t *coctx; ++ const char *busy_rc; + +- lua_rawgeti(L, 1, SOCKET_CTX_INDEX); +- u = lua_touserdata(L, -1); ++ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, ++ "lua tcp socket tls handshake"); + + if (u == NULL + || u->peer.connection == NULL + || u->read_closed + || u->write_closed) + { +- lua_pushnil(L); +- lua_pushliteral(L, "closed"); +- return 2; ++ *errmsg = "closed"; ++ return NGX_ERROR; + } + + if (u->request != r) { +- return luaL_error(L, "bad request"); ++ *errmsg = "bad request"; ++ return NGX_ERROR; + } + +- ngx_http_lua_socket_check_busy_connecting(r, u, L); +- ngx_http_lua_socket_check_busy_reading(r, u, L); +- ngx_http_lua_socket_check_busy_writing(r, u, L); ++ busy_rc = ngx_http_lua_socket_tcp_check_busy(r, u, SOCKET_OP_CONNECT ++ | SOCKET_OP_READ ++ | SOCKET_OP_WRITE); ++ if (busy_rc != NULL) { ++ *errmsg = busy_rc; ++ return NGX_ERROR; ++ } + + if (u->raw_downstream || u->body_downstream) { +- lua_pushnil(L); +- lua_pushliteral(L, "not supported for downstream"); +- return 2; ++ *errmsg = "not supported for downstream"; ++ return NGX_ERROR; + } + + c = u->peer.connection; +@@ -1624,122 +1606,96 @@ ngx_http_lua_socket_tcp_sslhandshake(lua_State *L) + u->ssl_session_reuse = 1; + + if (c->ssl && c->ssl->handshaked) { +- switch (lua_type(L, 2)) { +- case LUA_TUSERDATA: +- lua_pushvalue(L, 2); +- break; ++ if (sess != NULL) { ++ return NGX_DONE; ++ } + +- case LUA_TBOOLEAN: +- if (!lua_toboolean(L, 2)) { +- /* avoid generating the ssl session */ +- lua_pushboolean(L, 1); +- break; +- } +- /* fall through */ ++ u->ssl_session_reuse = enable_session_reuse; + +- default: +- ngx_http_lua_ssl_handshake_retval_handler(r, u, L); +- break; +- } ++ (void) ngx_http_lua_tls_handshake_retval_handler(r, u, NULL); + +- return 1; ++ return NGX_OK; + } + + if (ngx_ssl_create_connection(u->conf->ssl, c, + NGX_SSL_BUFFER|NGX_SSL_CLIENT) + != NGX_OK) + { +- lua_pushnil(L); +- lua_pushliteral(L, "failed to create ssl connection"); +- return 2; ++ *errmsg = "failed to create ssl connection"; ++ return NGX_ERROR; + } + + ctx = ngx_http_get_module_ctx(r, ngx_http_lua_module); + if (ctx == NULL) { +- return luaL_error(L, "no ctx found"); ++ ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, ++ "no ngx_lua ctx found while TLS handshaking"); ++ ++ ngx_http_lua_assert(NULL); ++ ++ *errmsg = "no ctx found"; ++ return NGX_ERROR; + } + + coctx = ctx->cur_co_ctx; + + c->sendfile = 0; + +- if (n >= 2) { +- if (lua_type(L, 2) == LUA_TBOOLEAN) { +- u->ssl_session_reuse = lua_toboolean(L, 2); +- +- } else { +- psession = lua_touserdata(L, 2); +- +- if (psession != NULL && *psession != NULL) { +- if (ngx_ssl_set_session(c, *psession) != NGX_OK) { +- lua_pushnil(L); +- lua_pushliteral(L, "lua ssl set session failed"); +- return 2; +- } +- +- ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, +- "lua ssl set session: %p", *psession); +- } ++ if (sess != NULL) { ++ if (ngx_ssl_set_session(c, sess) != NGX_OK) { ++ *errmsg = "lua tls set session failed"; ++ return NGX_ERROR; + } + +- if (n >= 3) { +- name.data = (u_char *) lua_tolstring(L, 3, &name.len); ++ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, ++ "lua tls set session: %p", sess); + +- if (name.data) { +- ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, +- "lua ssl server name: \"%*s\"", name.len, +- name.data); ++ } else { ++ u->ssl_session_reuse = enable_session_reuse; ++ } + +-#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME ++ if (server_name != NULL && server_name->data != NULL) { ++ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, ++ "lua tls server name: \"%V\"", server_name); + +- if (SSL_set_tlsext_host_name(c->ssl->connection, +- (char *) name.data) +- == 0) +- { +- lua_pushnil(L); +- lua_pushliteral(L, "SSL_set_tlsext_host_name failed"); +- return 2; +- } ++#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME ++ if (SSL_set_tlsext_host_name(c->ssl->connection, ++ (char *) server_name->data) ++ == 0) ++ { ++ *errmsg = "SSL_set_tlsext_host_name failed"; ++ return NGX_ERROR; ++ } + + #else +- +- ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, +- "lua socket SNI disabled because the current " +- "version of OpenSSL lacks the support"); +- ++ *errmsg = "OpenSSL has no SNI support"; ++ return NGX_ERROR; + #endif +- } ++ } + +- if (n >= 4) { +- u->ssl_verify = lua_toboolean(L, 4); ++ u->ssl_verify = verify; + +- if (n >= 5) { +- if (lua_toboolean(L, 5)) { ++ if (ocsp_status_req) { + #ifdef NGX_HTTP_LUA_USE_OCSP +- SSL_set_tlsext_status_type(c->ssl->connection, +- TLSEXT_STATUSTYPE_ocsp); ++ SSL_set_tlsext_status_type(c->ssl->connection, ++ TLSEXT_STATUSTYPE_ocsp); ++ + #else +- return luaL_error(L, "no OCSP support"); ++ *errmsg = "no OCSP support"; ++ return NGX_ERROR; + #endif +- } +- } +- } +- } + } + +- dd("found sni name: %.*s %p", (int) name.len, name.data, name.data); +- +- if (name.len == 0) { ++ if (server_name->len == 0) { + u->ssl_name.len = 0; + + } else { + if (u->ssl_name.data) { + /* buffer already allocated */ + +- if (u->ssl_name.len >= name.len) { ++ if (u->ssl_name.len >= server_name->len) { + /* reuse it */ +- ngx_memcpy(u->ssl_name.data, name.data, name.len); +- u->ssl_name.len = name.len; ++ ngx_memcpy(u->ssl_name.data, server_name->data, server_name->len); ++ u->ssl_name.len = server_name->len; + + } else { + ngx_free(u->ssl_name.data); +@@ -1750,17 +1706,16 @@ ngx_http_lua_socket_tcp_sslhandshake(lua_State *L) + + new_ssl_name: + +- u->ssl_name.data = ngx_alloc(name.len, ngx_cycle->log); ++ u->ssl_name.data = ngx_alloc(server_name->len, ngx_cycle->log); + if (u->ssl_name.data == NULL) { + u->ssl_name.len = 0; + +- lua_pushnil(L); +- lua_pushliteral(L, "no memory"); +- return 2; ++ *errmsg = "no memory"; ++ return NGX_ERROR; + } + +- ngx_memcpy(u->ssl_name.data, name.data, name.len); +- u->ssl_name.len = name.len; ++ ngx_memcpy(u->ssl_name.data, server_name->data, server_name->len); ++ u->ssl_name.len = server_name->len; + } + } + +@@ -1774,7 +1729,8 @@ new_ssl_name: + + rc = ngx_ssl_handshake(c); + +- dd("ngx_ssl_handshake returned %d", (int) rc); ++ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, ++ "ngx_ssl_handshake returned %d", rc); + + if (rc == NGX_AGAIN) { + if (c->write->timer_set) { +@@ -1784,13 +1740,13 @@ new_ssl_name: + ngx_add_timer(c->read, u->connect_timeout); + + u->conn_waiting = 1; +- u->write_prepare_retvals = ngx_http_lua_ssl_handshake_retval_handler; ++ u->write_prepare_retvals = ngx_http_lua_tls_handshake_retval_handler; + + ngx_http_lua_cleanup_pending_operation(coctx); + coctx->cleanup = ngx_http_lua_coctx_cleanup; + coctx->data = u; + +- c->ssl->handler = ngx_http_lua_ssl_handshake_handler; ++ c->ssl->handler = ngx_http_lua_tls_handshake_handler; + + if (ctx->entered_content_phase) { + r->write_event_handler = ngx_http_lua_content_wev_handler; +@@ -1799,21 +1755,25 @@ new_ssl_name: + r->write_event_handler = ngx_http_core_run_phases; + } + +- return lua_yield(L, 0); ++ return NGX_AGAIN; ++ } ++ ++ ngx_http_lua_tls_handshake_handler(c); ++ ++ if (rc == NGX_ERROR) { ++ *errmsg = u->error_ret; ++ ++ return NGX_ERROR; + } + +- top = lua_gettop(L); +- ngx_http_lua_ssl_handshake_handler(c); +- return lua_gettop(L) - top; ++ return NGX_OK; + } + + + static void +-ngx_http_lua_ssl_handshake_handler(ngx_connection_t *c) ++ngx_http_lua_tls_handshake_handler(ngx_connection_t *c) + { +- const char *err; + int waiting; +- lua_State *L; + ngx_int_t rc; + ngx_connection_t *dc; /* downstream connection */ + ngx_http_request_t *r; +@@ -1836,11 +1796,9 @@ ngx_http_lua_ssl_handshake_handler(ngx_connection_t *c) + waiting = u->conn_waiting; + + dc = r->connection; +- L = u->write_co_ctx->co; + + if (c->read->timedout) { +- lua_pushnil(L); +- lua_pushliteral(L, "timeout"); ++ u->error_ret = "timeout"; + goto failed; + } + +@@ -1849,19 +1807,18 @@ ngx_http_lua_ssl_handshake_handler(ngx_connection_t *c) + } + + if (c->ssl->handshaked) { +- + if (u->ssl_verify) { + rc = SSL_get_verify_result(c->ssl->connection); + + if (rc != X509_V_OK) { +- lua_pushnil(L); +- err = lua_pushfstring(L, "%d: %s", (int) rc, +- X509_verify_cert_error_string(rc)); ++ u->error_ret = X509_verify_cert_error_string(rc); ++ u->openssl_error_code_ret = rc; + + llcf = ngx_http_get_module_loc_conf(r, ngx_http_lua_module); + if (llcf->log_socket_errors) { +- ngx_log_error(NGX_LOG_ERR, dc->log, 0, "lua ssl " +- "certificate verify error: (%s)", err); ++ ngx_log_error(NGX_LOG_ERR, dc->log, 0, "lua tls " ++ "certificate verify error: (%d: %s)", ++ rc, u->error_ret); + } + + goto failed; +@@ -1872,12 +1829,11 @@ ngx_http_lua_ssl_handshake_handler(ngx_connection_t *c) + if (u->ssl_name.len + && ngx_ssl_check_host(c, &u->ssl_name) != NGX_OK) + { +- lua_pushnil(L); +- lua_pushliteral(L, "certificate host mismatch"); ++ u->error_ret = "certificate host mismatch"; + + llcf = ngx_http_get_module_loc_conf(r, ngx_http_lua_module); + if (llcf->log_socket_errors) { +- ngx_log_error(NGX_LOG_ERR, dc->log, 0, "lua ssl " ++ ngx_log_error(NGX_LOG_ERR, dc->log, 0, "lua tls " + "certificate does not match host \"%V\"", + &u->ssl_name); + } +@@ -1892,7 +1848,7 @@ ngx_http_lua_ssl_handshake_handler(ngx_connection_t *c) + ngx_http_lua_socket_handle_conn_success(r, u); + + } else { +- (void) ngx_http_lua_ssl_handshake_retval_handler(r, u, L); ++ (void) ngx_http_lua_tls_handshake_retval_handler(r, u, NULL); + } + + if (waiting) { +@@ -1902,60 +1858,84 @@ ngx_http_lua_ssl_handshake_handler(ngx_connection_t *c) + return; + } + +- lua_pushnil(L); +- lua_pushliteral(L, "handshake failed"); ++ u->error_ret = "handshake failed"; + + failed: + + if (waiting) { + u->write_prepare_retvals = +- ngx_http_lua_socket_conn_error_retval_handler; +- ngx_http_lua_socket_handle_conn_error(r, u, +- NGX_HTTP_LUA_SOCKET_FT_SSL); ++ ngx_http_lua_socket_conn_error_retval_handler; ++ ngx_http_lua_socket_handle_conn_error(r, u, NGX_HTTP_LUA_SOCKET_FT_SSL); + ngx_http_run_posted_requests(dc); + + } else { +- (void) ngx_http_lua_socket_conn_error_retval_handler(r, u, L); ++ u->ft_type |= NGX_HTTP_LUA_SOCKET_FT_SSL; ++ ++ (void) ngx_http_lua_socket_conn_error_retval_handler(r, u, NULL); ++ } ++} ++ ++ ++ ++int ++ngx_http_lua_ffi_socket_tcp_get_tlshandshake_result(ngx_http_request_t *r, ++ ngx_http_lua_socket_tcp_upstream_t *u, ngx_ssl_session_t **sess, ++ const char **errmsg, int *openssl_error_code) ++{ ++ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, ++ "lua cosocket get TLS handshake result for upstream: %p", u); ++ ++ if (u->error_ret != NULL) { ++ *errmsg = u->error_ret; ++ *openssl_error_code = u->openssl_error_code_ret; ++ ++ return NGX_ERROR; + } ++ ++ *sess = u->ssl_session_ret; ++ ++ return NGX_OK; + } + + + static int +-ngx_http_lua_ssl_handshake_retval_handler(ngx_http_request_t *r, ++ngx_http_lua_tls_handshake_retval_handler(ngx_http_request_t *r, + ngx_http_lua_socket_tcp_upstream_t *u, lua_State *L) + { + ngx_connection_t *c; +- ngx_ssl_session_t *ssl_session, **ud; ++ ngx_ssl_session_t *ssl_session; + + if (!u->ssl_session_reuse) { +- lua_pushboolean(L, 1); +- return 1; ++ return 0; + } + +- ud = lua_newuserdata(L, sizeof(ngx_ssl_session_t *)); +- + c = u->peer.connection; + + ssl_session = ngx_ssl_get_session(c); + if (ssl_session == NULL) { +- *ud = NULL; ++ u->ssl_session_ret = NULL; + + } else { +- *ud = ssl_session; ++ u->ssl_session_ret = ssl_session; + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, +- "lua ssl save session: %p", ssl_session); +- +- /* set up the __gc metamethod */ +- lua_pushlightuserdata(L, ngx_http_lua_lightudata_mask( +- ssl_session_metatable_key)); +- lua_rawget(L, LUA_REGISTRYINDEX); +- lua_setmetatable(L, -2); ++ "lua tls save session: %p", ssl_session); + } + +- return 1; ++ return 0; ++} ++ ++ ++void ++ngx_http_lua_ffi_tls_free_session(ngx_ssl_session_t *sess) ++{ ++ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0, ++ "lua tls free session: %p", sess); ++ ++ ngx_ssl_free_session(sess); + } + ++ + #endif /* NGX_HTTP_SSL */ + + +@@ -2008,12 +1988,14 @@ ngx_http_lua_socket_prepare_error_retvals(ngx_http_request_t *r, + u_char errstr[NGX_MAX_ERROR_STR]; + u_char *p; + +- if (ft_type & (NGX_HTTP_LUA_SOCKET_FT_RESOLVER +- | NGX_HTTP_LUA_SOCKET_FT_SSL)) +- { ++ if (ft_type & NGX_HTTP_LUA_SOCKET_FT_RESOLVER) { + return 2; + } + ++ if (ft_type & NGX_HTTP_LUA_SOCKET_FT_SSL) { ++ return 0; ++ } ++ + lua_pushnil(L); + + if (ft_type & NGX_HTTP_LUA_SOCKET_FT_TIMEOUT) { +@@ -6101,27 +6083,6 @@ ngx_http_lua_coctx_cleanup(void *data) + } + + +-#if (NGX_HTTP_SSL) +- +-static int +-ngx_http_lua_ssl_free_session(lua_State *L) +-{ +- ngx_ssl_session_t **psession; +- +- psession = lua_touserdata(L, 1); +- if (psession && *psession != NULL) { +- ngx_log_debug1(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0, +- "lua ssl free session: %p", *psession); +- +- ngx_ssl_free_session(*psession); +- } +- +- return 0; +-} +- +-#endif /* NGX_HTTP_SSL */ +- +- + void + ngx_http_lua_cleanup_conn_pools(lua_State *L) + { +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.h b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.h +index a0a5a518..ee9411bc 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.h ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.h +@@ -120,6 +120,9 @@ struct ngx_http_lua_socket_tcp_upstream_s { + + #if (NGX_HTTP_SSL) + ngx_str_t ssl_name; ++ ngx_ssl_session_t *ssl_session_ret; ++ const char *error_ret; ++ int openssl_error_code_ret; + #endif + + unsigned ft_type:16; +-- +2.32.0 (Apple Git-132) + + +From f5ba21d6f742e6b169d972a81b6124b27c076016 Mon Sep 17 00:00:00 2001 +From: Datong Sun +Date: Wed, 18 Sep 2019 16:54:32 -0700 +Subject: [PATCH 05/17] change: better error when request context couldn't be + found. + +--- + src/ngx_http_lua_socket_tcp.c | 8 +------- + 1 file changed, 1 insertion(+), 7 deletions(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index 4ef22c11..abd487fa 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -1627,13 +1627,7 @@ ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, + + ctx = ngx_http_get_module_ctx(r, ngx_http_lua_module); + if (ctx == NULL) { +- ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, +- "no ngx_lua ctx found while TLS handshaking"); +- +- ngx_http_lua_assert(NULL); +- +- *errmsg = "no ctx found"; +- return NGX_ERROR; ++ return NGX_HTTP_LUA_FFI_NO_REQ_CTX; + } + + coctx = ctx->cur_co_ctx; +-- +2.32.0 (Apple Git-132) + + +From 78a450d571febf7ba918ecc13369144925d02bcb Mon Sep 17 00:00:00 2001 +From: Datong Sun +Date: Wed, 18 Sep 2019 17:24:07 -0700 +Subject: [PATCH 06/17] feature: TCP cosocket client certificate support. + closes #534 + +--- + src/ngx_http_lua_socket_tcp.c | 60 +++++++++++++++++++++++++++++++---- + 1 file changed, 54 insertions(+), 6 deletions(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index abd487fa..61671b70 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -22,7 +22,6 @@ + static int ngx_http_lua_socket_tcp(lua_State *L); + static int ngx_http_lua_socket_tcp_connect(lua_State *L); + #if (NGX_HTTP_SSL) +-static int ngx_http_lua_socket_tcp_sslhandshake(lua_State *L); + static void ngx_http_lua_tls_handshake_handler(ngx_connection_t *c); + static int ngx_http_lua_tls_handshake_retval_handler(ngx_http_request_t *r, + ngx_http_lua_socket_tcp_upstream_t *u, lua_State *L); +@@ -219,9 +218,6 @@ static char ngx_http_lua_upstream_udata_metatable_key; + static char ngx_http_lua_downstream_udata_metatable_key; + static char ngx_http_lua_pool_udata_metatable_key; + static char ngx_http_lua_pattern_udata_metatable_key; +-#if (NGX_HTTP_SSL) +-static char ngx_http_lua_ssl_session_metatable_key; +-#endif + + + #define ngx_http_lua_tcp_socket_metatable_literal_key "__tcp_cosocket_mt" +@@ -1563,13 +1559,16 @@ int + ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, + ngx_http_lua_socket_tcp_upstream_t *u, ngx_ssl_session_t *sess, + int enable_session_reuse, ngx_str_t *server_name, int verify, +- int ocsp_status_req, const char **errmsg) ++ int ocsp_status_req, STACK_OF(X509) *chain, EVP_PKEY *pkey, ++ const char **errmsg) + { +- ngx_int_t rc; ++ ngx_int_t rc, i; + ngx_connection_t *c; + ngx_http_lua_ctx_t *ctx; + ngx_http_lua_co_ctx_t *coctx; + const char *busy_rc; ++ ngx_ssl_conn_t *ssl_conn; ++ X509 *x509; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "lua tcp socket tls handshake"); +@@ -1625,6 +1624,8 @@ ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, + return NGX_ERROR; + } + ++ ssl_conn = c->ssl->connection; ++ + ctx = ngx_http_get_module_ctx(r, ngx_http_lua_module); + if (ctx == NULL) { + return NGX_HTTP_LUA_FFI_NO_REQ_CTX; +@@ -1647,6 +1648,53 @@ ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, + u->ssl_session_reuse = enable_session_reuse; + } + ++ if (chain != NULL) { ++ ngx_http_lua_assert(pkey != NULL); /* ensured by resty.core */ ++ ++ if (sk_X509_num(chain) < 1) { ++ ERR_clear_error(); ++ *errmsg = "invalid client certificate chain"; ++ return NGX_ERROR; ++ } ++ ++ x509 = sk_X509_value(chain, 0); ++ if (x509 == NULL) { ++ ERR_clear_error(); ++ *errmsg = "lua tls fetch client certificate from chain failed"; ++ return NGX_ERROR; ++ } ++ ++ if (SSL_use_certificate(ssl_conn, x509) == 0) { ++ ERR_clear_error(); ++ *errmsg = "lua tls set client certificate failed"; ++ return NGX_ERROR; ++ } ++ ++ /* read rest of the chain */ ++ ++ for (i = 1; i < sk_X509_num(chain); i++) { ++ x509 = sk_X509_value(chain, i); ++ if (x509 == NULL) { ++ ERR_clear_error(); ++ *errmsg = "lua tls fetch client intermediate certificate " ++ "from chain failed"; ++ return NGX_ERROR; ++ } ++ ++ if (SSL_add1_chain_cert(ssl_conn, x509) == 0) { ++ ERR_clear_error(); ++ *errmsg = "lua tls set client intermediate certificate failed"; ++ return NGX_ERROR; ++ } ++ } ++ ++ if (SSL_use_PrivateKey(ssl_conn, pkey) == 0) { ++ ERR_clear_error(); ++ *errmsg = "lua ssl set client private key failed"; ++ return NGX_ERROR; ++ } ++ } ++ + if (server_name != NULL && server_name->data != NULL) { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "lua tls server name: \"%V\"", server_name); +-- +2.32.0 (Apple Git-132) + + +From 6cc0c89e946ef42adfbc55e8a461ccc2f367254a Mon Sep 17 00:00:00 2001 +From: Datong Sun +Date: Wed, 18 Sep 2019 17:25:20 -0700 +Subject: [PATCH 07/17] style: style fixes. + +--- + src/ngx_http_lua_socket_tcp.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index 61671b70..a7d410c9 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -1736,7 +1736,8 @@ ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, + + if (u->ssl_name.len >= server_name->len) { + /* reuse it */ +- ngx_memcpy(u->ssl_name.data, server_name->data, server_name->len); ++ ngx_memcpy(u->ssl_name.data, server_name->data, ++ server_name->len); + u->ssl_name.len = server_name->len; + + } else { +-- +2.32.0 (Apple Git-132) + + +From 21cd7779252732a02fa0e596b66a1d4663d2fd64 Mon Sep 17 00:00:00 2001 +From: Thibault Charbonnier +Date: Mon, 6 Jan 2020 17:56:10 -0800 +Subject: [PATCH 08/17] cleanup + +--- + src/ngx_http_lua_socket_tcp.c | 24 +++++++++++------------- + 1 file changed, 11 insertions(+), 13 deletions(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index a7d410c9..bd7cc7ca 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -1555,6 +1555,7 @@ ngx_http_lua_socket_tcp_check_busy(ngx_http_request_t *r, + return NULL; + } + ++ + int + ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, + ngx_http_lua_socket_tcp_upstream_t *u, ngx_ssl_session_t *sess, +@@ -1596,7 +1597,7 @@ ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, + } + + if (u->raw_downstream || u->body_downstream) { +- *errmsg = "not supported for downstream"; ++ *errmsg = "not supported for downstream sockets"; + return NGX_ERROR; + } + +@@ -1637,7 +1638,7 @@ ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, + + if (sess != NULL) { + if (ngx_ssl_set_session(c, sess) != NGX_OK) { +- *errmsg = "lua tls set session failed"; ++ *errmsg = "tls set session failed"; + return NGX_ERROR; + } + +@@ -1660,13 +1661,13 @@ ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, + x509 = sk_X509_value(chain, 0); + if (x509 == NULL) { + ERR_clear_error(); +- *errmsg = "lua tls fetch client certificate from chain failed"; ++ *errmsg = "tls fetch client certificate from chain failed"; + return NGX_ERROR; + } + + if (SSL_use_certificate(ssl_conn, x509) == 0) { + ERR_clear_error(); +- *errmsg = "lua tls set client certificate failed"; ++ *errmsg = "tls set client certificate failed"; + return NGX_ERROR; + } + +@@ -1676,21 +1677,21 @@ ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, + x509 = sk_X509_value(chain, i); + if (x509 == NULL) { + ERR_clear_error(); +- *errmsg = "lua tls fetch client intermediate certificate " +- "from chain failed"; ++ *errmsg = "tls fetch client intermediate certificate from " ++ "chain failed"; + return NGX_ERROR; + } + + if (SSL_add1_chain_cert(ssl_conn, x509) == 0) { + ERR_clear_error(); +- *errmsg = "lua tls set client intermediate certificate failed"; ++ *errmsg = "tls set client intermediate certificate failed"; + return NGX_ERROR; + } + } + + if (SSL_use_PrivateKey(ssl_conn, pkey) == 0) { + ERR_clear_error(); +- *errmsg = "lua ssl set client private key failed"; ++ *errmsg = "tls set client private key failed"; + return NGX_ERROR; + } + } +@@ -1709,7 +1710,7 @@ ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, + } + + #else +- *errmsg = "OpenSSL has no SNI support"; ++ *errmsg = "no TLS extension support"; + return NGX_ERROR; + #endif + } +@@ -1752,7 +1753,6 @@ new_ssl_name: + u->ssl_name.data = ngx_alloc(server_name->len, ngx_cycle->log); + if (u->ssl_name.data == NULL) { + u->ssl_name.len = 0; +- + *errmsg = "no memory"; + return NGX_ERROR; + } +@@ -1773,7 +1773,7 @@ new_ssl_name: + rc = ngx_ssl_handshake(c); + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, +- "ngx_ssl_handshake returned %d", rc); ++ "ngx_ssl_handshake returned: %d", rc); + + if (rc == NGX_AGAIN) { + if (c->write->timer_set) { +@@ -1805,7 +1805,6 @@ new_ssl_name: + + if (rc == NGX_ERROR) { + *errmsg = u->error_ret; +- + return NGX_ERROR; + } + +@@ -1919,7 +1918,6 @@ failed: + } + + +- + int + ngx_http_lua_ffi_socket_tcp_get_tlshandshake_result(ngx_http_request_t *r, + ngx_http_lua_socket_tcp_upstream_t *u, ngx_ssl_session_t **sess, +-- +2.32.0 (Apple Git-132) + + +From 0bcf4d1a955db9218e8b0e50685c1d0de8c90b9a Mon Sep 17 00:00:00 2001 +From: Datong Sun +Date: Tue, 24 Nov 2020 01:49:28 -0800 +Subject: [PATCH 09/17] fixed style according to @spacewander's review + +--- + src/ngx_http_lua_socket_tcp.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index bd7cc7ca..1aa37627 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -1536,15 +1536,15 @@ static const char * + ngx_http_lua_socket_tcp_check_busy(ngx_http_request_t *r, + ngx_http_lua_socket_tcp_upstream_t *u, unsigned int ops) + { +- if (ops & SOCKET_OP_CONNECT && u->conn_waiting) { ++ if ((ops & SOCKET_OP_CONNECT) && u->conn_waiting) { + return "socket busy connecting"; + } + +- if (ops & SOCKET_OP_READ && u->read_waiting) { ++ if ((ops & SOCKET_OP_READ) && u->read_waiting) { + return "socket busy reading"; + } + +- if (ops & SOCKET_OP_WRITE ++ if ((ops & SOCKET_OP_WRITE) + && (u->write_waiting + || (u->raw_downstream + && (r->connection->buffered & NGX_HTTP_LOWLEVEL_BUFFERED)))) +-- +2.32.0 (Apple Git-132) + + +From 9b010940f77bbd486c1192eed23af7c35baf4cdb Mon Sep 17 00:00:00 2001 +From: chronolaw +Date: Fri, 21 Jan 2022 13:42:06 +0800 +Subject: [PATCH 10/17] resize tcp_socket_metatable to 7 + +--- + src/ngx_http_lua_socket_tcp.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index 1aa37627..7cdc45c4 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -160,6 +160,8 @@ enum { + SOCKET_CONNECT_TIMEOUT_INDEX = 2, + SOCKET_SEND_TIMEOUT_INDEX = 4, + SOCKET_READ_TIMEOUT_INDEX = 5, ++ SOCKET_CLIENT_CERT_INDEX = 6, ++ SOCKET_CLIENT_KEY_INDEX = 7, + }; + + +@@ -424,7 +426,7 @@ ngx_http_lua_socket_tcp(lua_State *L) + + ngx_http_lua_check_context(L, ctx, NGX_HTTP_LUA_CONTEXT_YIELDABLE); + +- lua_createtable(L, 5 /* narr */, 1 /* nrec */); ++ lua_createtable(L, 7 /* narr */, 1 /* nrec */); + lua_pushlightuserdata(L, ngx_http_lua_lightudata_mask( + tcp_socket_metatable_key)); + lua_rawget(L, LUA_REGISTRYINDEX); +-- +2.32.0 (Apple Git-132) + + +From 36245613be1031b22b0e6b2eec398dac288fe9a5 Mon Sep 17 00:00:00 2001 +From: chronolaw +Date: Fri, 21 Jan 2022 14:12:13 +0800 +Subject: [PATCH 11/17] change errms tls to ssl + +--- + src/ngx_http_lua_socket_tcp.c | 24 ++++++++++++------------ + 1 file changed, 12 insertions(+), 12 deletions(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index 7cdc45c4..af986364 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -1574,7 +1574,7 @@ ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, + X509 *x509; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, +- "lua tcp socket tls handshake"); ++ "lua tcp socket ssl handshake"); + + if (u == NULL + || u->peer.connection == NULL +@@ -1640,12 +1640,12 @@ ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, + + if (sess != NULL) { + if (ngx_ssl_set_session(c, sess) != NGX_OK) { +- *errmsg = "tls set session failed"; ++ *errmsg = "ssl set session failed"; + return NGX_ERROR; + } + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, +- "lua tls set session: %p", sess); ++ "lua ssl set session: %p", sess); + + } else { + u->ssl_session_reuse = enable_session_reuse; +@@ -1663,13 +1663,13 @@ ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, + x509 = sk_X509_value(chain, 0); + if (x509 == NULL) { + ERR_clear_error(); +- *errmsg = "tls fetch client certificate from chain failed"; ++ *errmsg = "ssl fetch client certificate from chain failed"; + return NGX_ERROR; + } + + if (SSL_use_certificate(ssl_conn, x509) == 0) { + ERR_clear_error(); +- *errmsg = "tls set client certificate failed"; ++ *errmsg = "ssl set client certificate failed"; + return NGX_ERROR; + } + +@@ -1679,28 +1679,28 @@ ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, + x509 = sk_X509_value(chain, i); + if (x509 == NULL) { + ERR_clear_error(); +- *errmsg = "tls fetch client intermediate certificate from " ++ *errmsg = "ssl fetch client intermediate certificate from " + "chain failed"; + return NGX_ERROR; + } + + if (SSL_add1_chain_cert(ssl_conn, x509) == 0) { + ERR_clear_error(); +- *errmsg = "tls set client intermediate certificate failed"; ++ *errmsg = "ssl set client intermediate certificate failed"; + return NGX_ERROR; + } + } + + if (SSL_use_PrivateKey(ssl_conn, pkey) == 0) { + ERR_clear_error(); +- *errmsg = "tls set client private key failed"; ++ *errmsg = "ssl set client private key failed"; + return NGX_ERROR; + } + } + + if (server_name != NULL && server_name->data != NULL) { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, +- "lua tls server name: \"%V\"", server_name); ++ "lua ssl server name: \"%V\"", server_name); + + #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME + if (SSL_set_tlsext_host_name(c->ssl->connection, +@@ -1926,7 +1926,7 @@ ngx_http_lua_ffi_socket_tcp_get_tlshandshake_result(ngx_http_request_t *r, + const char **errmsg, int *openssl_error_code) + { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, +- "lua cosocket get TLS handshake result for upstream: %p", u); ++ "lua cosocket get SSL handshake result for upstream: %p", u); + + if (u->error_ret != NULL) { + *errmsg = u->error_ret; +@@ -1962,7 +1962,7 @@ ngx_http_lua_tls_handshake_retval_handler(ngx_http_request_t *r, + u->ssl_session_ret = ssl_session; + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, +- "lua tls save session: %p", ssl_session); ++ "lua ssl save session: %p", ssl_session); + } + + return 0; +@@ -1973,7 +1973,7 @@ void + ngx_http_lua_ffi_tls_free_session(ngx_ssl_session_t *sess) + { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0, +- "lua tls free session: %p", sess); ++ "lua ssl free session: %p", sess); + + ngx_ssl_free_session(sess); + } +-- +2.32.0 (Apple Git-132) + + +From 1f12b89485da6b7ac5dd23810bf094f214dc324e Mon Sep 17 00:00:00 2001 +From: chronolaw +Date: Fri, 21 Jan 2022 14:38:49 +0800 +Subject: [PATCH 12/17] rename function name from tls to ssl + +--- + src/ngx_http_lua_socket_tcp.c | 28 ++++++++++++++-------------- + 1 file changed, 14 insertions(+), 14 deletions(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index af986364..76e98597 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -22,8 +22,8 @@ + static int ngx_http_lua_socket_tcp(lua_State *L); + static int ngx_http_lua_socket_tcp_connect(lua_State *L); + #if (NGX_HTTP_SSL) +-static void ngx_http_lua_tls_handshake_handler(ngx_connection_t *c); +-static int ngx_http_lua_tls_handshake_retval_handler(ngx_http_request_t *r, ++static void ngx_http_lua_ssl_handshake_handler(ngx_connection_t *c); ++static int ngx_http_lua_ssl_handshake_retval_handler(ngx_http_request_t *r, + ngx_http_lua_socket_tcp_upstream_t *u, lua_State *L); + #endif + static int ngx_http_lua_socket_tcp_receive(lua_State *L); +@@ -1559,7 +1559,7 @@ ngx_http_lua_socket_tcp_check_busy(ngx_http_request_t *r, + + + int +-ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, ++ngx_http_lua_ffi_socket_tcp_sslhandshake(ngx_http_request_t *r, + ngx_http_lua_socket_tcp_upstream_t *u, ngx_ssl_session_t *sess, + int enable_session_reuse, ngx_str_t *server_name, int verify, + int ocsp_status_req, STACK_OF(X509) *chain, EVP_PKEY *pkey, +@@ -1614,7 +1614,7 @@ ngx_http_lua_ffi_socket_tcp_tlshandshake(ngx_http_request_t *r, + + u->ssl_session_reuse = enable_session_reuse; + +- (void) ngx_http_lua_tls_handshake_retval_handler(r, u, NULL); ++ (void) ngx_http_lua_ssl_handshake_retval_handler(r, u, NULL); + + return NGX_OK; + } +@@ -1785,13 +1785,13 @@ new_ssl_name: + ngx_add_timer(c->read, u->connect_timeout); + + u->conn_waiting = 1; +- u->write_prepare_retvals = ngx_http_lua_tls_handshake_retval_handler; ++ u->write_prepare_retvals = ngx_http_lua_ssl_handshake_retval_handler; + + ngx_http_lua_cleanup_pending_operation(coctx); + coctx->cleanup = ngx_http_lua_coctx_cleanup; + coctx->data = u; + +- c->ssl->handler = ngx_http_lua_tls_handshake_handler; ++ c->ssl->handler = ngx_http_lua_ssl_handshake_handler; + + if (ctx->entered_content_phase) { + r->write_event_handler = ngx_http_lua_content_wev_handler; +@@ -1803,7 +1803,7 @@ new_ssl_name: + return NGX_AGAIN; + } + +- ngx_http_lua_tls_handshake_handler(c); ++ ngx_http_lua_ssl_handshake_handler(c); + + if (rc == NGX_ERROR) { + *errmsg = u->error_ret; +@@ -1815,7 +1815,7 @@ new_ssl_name: + + + static void +-ngx_http_lua_tls_handshake_handler(ngx_connection_t *c) ++ngx_http_lua_ssl_handshake_handler(ngx_connection_t *c) + { + int waiting; + ngx_int_t rc; +@@ -1860,7 +1860,7 @@ ngx_http_lua_tls_handshake_handler(ngx_connection_t *c) + + llcf = ngx_http_get_module_loc_conf(r, ngx_http_lua_module); + if (llcf->log_socket_errors) { +- ngx_log_error(NGX_LOG_ERR, dc->log, 0, "lua tls " ++ ngx_log_error(NGX_LOG_ERR, dc->log, 0, "lua ssl " + "certificate verify error: (%d: %s)", + rc, u->error_ret); + } +@@ -1877,7 +1877,7 @@ ngx_http_lua_tls_handshake_handler(ngx_connection_t *c) + + llcf = ngx_http_get_module_loc_conf(r, ngx_http_lua_module); + if (llcf->log_socket_errors) { +- ngx_log_error(NGX_LOG_ERR, dc->log, 0, "lua tls " ++ ngx_log_error(NGX_LOG_ERR, dc->log, 0, "lua ssl " + "certificate does not match host \"%V\"", + &u->ssl_name); + } +@@ -1892,7 +1892,7 @@ ngx_http_lua_tls_handshake_handler(ngx_connection_t *c) + ngx_http_lua_socket_handle_conn_success(r, u); + + } else { +- (void) ngx_http_lua_tls_handshake_retval_handler(r, u, NULL); ++ (void) ngx_http_lua_ssl_handshake_retval_handler(r, u, NULL); + } + + if (waiting) { +@@ -1921,7 +1921,7 @@ failed: + + + int +-ngx_http_lua_ffi_socket_tcp_get_tlshandshake_result(ngx_http_request_t *r, ++ngx_http_lua_ffi_socket_tcp_get_sslhandshake_result(ngx_http_request_t *r, + ngx_http_lua_socket_tcp_upstream_t *u, ngx_ssl_session_t **sess, + const char **errmsg, int *openssl_error_code) + { +@@ -1942,7 +1942,7 @@ ngx_http_lua_ffi_socket_tcp_get_tlshandshake_result(ngx_http_request_t *r, + + + static int +-ngx_http_lua_tls_handshake_retval_handler(ngx_http_request_t *r, ++ngx_http_lua_ssl_handshake_retval_handler(ngx_http_request_t *r, + ngx_http_lua_socket_tcp_upstream_t *u, lua_State *L) + { + ngx_connection_t *c; +@@ -1970,7 +1970,7 @@ ngx_http_lua_tls_handshake_retval_handler(ngx_http_request_t *r, + + + void +-ngx_http_lua_ffi_tls_free_session(ngx_ssl_session_t *sess) ++ngx_http_lua_ffi_ssl_free_session(ngx_ssl_session_t *sess) + { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0, + "lua ssl free session: %p", sess); +-- +2.32.0 (Apple Git-132) + + +From 84242561aa54ffed3bfab433cfef6f7797e01a47 Mon Sep 17 00:00:00 2001 +From: chronolaw +Date: Fri, 21 Jan 2022 14:46:38 +0800 +Subject: [PATCH 13/17] rename to SOCKET_CLIENT_PRIV_INDEX + +--- + src/ngx_http_lua_socket_tcp.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index 76e98597..90da45fc 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -160,8 +160,8 @@ enum { + SOCKET_CONNECT_TIMEOUT_INDEX = 2, + SOCKET_SEND_TIMEOUT_INDEX = 4, + SOCKET_READ_TIMEOUT_INDEX = 5, +- SOCKET_CLIENT_CERT_INDEX = 6, +- SOCKET_CLIENT_KEY_INDEX = 7, ++ SOCKET_CLIENT_CERT_INDEX = 6, ++ SOCKET_CLIENT_PRIV_INDEX = 7, + }; + + +-- +2.32.0 (Apple Git-132) + + +From 555166646c525167f9e1e5bb81b6cb100a4834f9 Mon Sep 17 00:00:00 2001 +From: chronolaw +Date: Fri, 21 Jan 2022 14:49:18 +0800 +Subject: [PATCH 14/17] rename to SOCKET_CLIENT_PKEY_INDEX + +--- + src/ngx_http_lua_socket_tcp.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index 90da45fc..494486de 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -161,7 +161,7 @@ enum { + SOCKET_SEND_TIMEOUT_INDEX = 4, + SOCKET_READ_TIMEOUT_INDEX = 5, + SOCKET_CLIENT_CERT_INDEX = 6, +- SOCKET_CLIENT_PRIV_INDEX = 7, ++ SOCKET_CLIENT_PKEY_INDEX = 7, + }; + + +-- +2.32.0 (Apple Git-132) + + +From e9b54c43c05b064b831fe67d0e0aaff45b2ec505 Mon Sep 17 00:00:00 2001 +From: chronolaw +Date: Fri, 21 Jan 2022 17:17:09 +0800 +Subject: [PATCH 15/17] need not to change tcp_socket_metatable + +--- + src/ngx_http_lua_socket_tcp.c | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index 494486de..152d8cbd 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -160,8 +160,6 @@ enum { + SOCKET_CONNECT_TIMEOUT_INDEX = 2, + SOCKET_SEND_TIMEOUT_INDEX = 4, + SOCKET_READ_TIMEOUT_INDEX = 5, +- SOCKET_CLIENT_CERT_INDEX = 6, +- SOCKET_CLIENT_PKEY_INDEX = 7, + }; + + +@@ -426,7 +424,7 @@ ngx_http_lua_socket_tcp(lua_State *L) + + ngx_http_lua_check_context(L, ctx, NGX_HTTP_LUA_CONTEXT_YIELDABLE); + +- lua_createtable(L, 7 /* narr */, 1 /* nrec */); ++ lua_createtable(L, 5 /* narr */, 1 /* nrec */); + lua_pushlightuserdata(L, ngx_http_lua_lightudata_mask( + tcp_socket_metatable_key)); + lua_rawget(L, LUA_REGISTRYINDEX); +-- +2.32.0 (Apple Git-132) + + +From 6c47356ddc327a8692260bd6f43ea67cf2787a73 Mon Sep 17 00:00:00 2001 +From: chronolaw +Date: Wed, 26 Jan 2022 19:55:29 +0800 +Subject: [PATCH 16/17] increase nrec to 3 in the socket object + +--- + src/ngx_http_lua_socket_tcp.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index 152d8cbd..8d71f8b4 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -424,7 +424,7 @@ ngx_http_lua_socket_tcp(lua_State *L) + + ngx_http_lua_check_context(L, ctx, NGX_HTTP_LUA_CONTEXT_YIELDABLE); + +- lua_createtable(L, 5 /* narr */, 1 /* nrec */); ++ lua_createtable(L, 5 /* narr */, 3 /* nrec */); + lua_pushlightuserdata(L, ngx_http_lua_lightudata_mask( + tcp_socket_metatable_key)); + lua_rawget(L, LUA_REGISTRYINDEX); +-- +2.32.0 (Apple Git-132) + + +From 1d538552c7629310d850d4360408ddb555afcbcc Mon Sep 17 00:00:00 2001 +From: chronolaw +Date: Sat, 29 Jan 2022 09:18:52 +0800 +Subject: [PATCH 17/17] change tcp_socket_metatable nrec to 15 + +--- + src/ngx_http_lua_socket_tcp.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c b/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +index 8d71f8b4..5dcdef0e 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_tcp.c +@@ -312,7 +312,7 @@ ngx_http_lua_inject_socket_tcp_api(ngx_log_t *log, lua_State *L) + /* {{{tcp object metatable */ + lua_pushlightuserdata(L, ngx_http_lua_lightudata_mask( + tcp_socket_metatable_key)); +- lua_createtable(L, 0 /* narr */, 14 /* nrec */); ++ lua_createtable(L, 0 /* narr */, 15 /* nrec */); + + lua_pushcfunction(L, ngx_http_lua_socket_tcp_connect); + lua_setfield(L, -2, "connect"); +-- +2.32.0 (Apple Git-132) + diff --git a/build/openresty/patches/ngx_lua-0.10.20_02-dyn_upstream_keepalive.patch b/build/openresty/patches/ngx_lua-0.10.20_02-dyn_upstream_keepalive.patch new file mode 100644 index 00000000000..effdd5b517b --- /dev/null +++ b/build/openresty/patches/ngx_lua-0.10.20_02-dyn_upstream_keepalive.patch @@ -0,0 +1,1319 @@ +From 2d12ac3e4045258b7a174b0505d92f63c26d82fc Mon Sep 17 00:00:00 2001 +From: Thibault Charbonnier +Date: Tue, 17 Sep 2019 11:43:44 -0700 +Subject: [PATCH 1/3] feature: implemented keepalive pooling in + 'balancer_by_lua*'. + +--- + src/ngx_http_lua_balancer.c | 738 ++++++++++++++++++++++++++++++------ + src/ngx_http_lua_common.h | 4 + + src/ngx_http_lua_module.c | 3 + + 3 files changed, 629 insertions(+), 116 deletions(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c b/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c +index f71a3e00..0d403716 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c +@@ -16,46 +16,102 @@ + #include "ngx_http_lua_directive.h" + + ++typedef struct { ++ ngx_uint_t size; ++ ngx_uint_t connections; ++ ++ uint32_t crc32; ++ ++ lua_State *lua_vm; ++ ++ ngx_queue_t cache; ++ ngx_queue_t free; ++} ngx_http_lua_balancer_keepalive_pool_t; ++ ++ ++typedef struct { ++ ngx_queue_t queue; ++ ngx_connection_t *connection; ++ ++ ngx_http_lua_balancer_keepalive_pool_t *cpool; ++} ngx_http_lua_balancer_keepalive_item_t; ++ ++ + struct ngx_http_lua_balancer_peer_data_s { +- /* the round robin data must be first */ +- ngx_http_upstream_rr_peer_data_t rrp; ++ ngx_uint_t cpool_size; ++ ngx_uint_t keepalive_requests; ++ ngx_msec_t keepalive_timeout; ++ ++ ngx_uint_t more_tries; ++ ngx_uint_t total_tries; + +- ngx_http_lua_srv_conf_t *conf; +- ngx_http_request_t *request; ++ int last_peer_state; + +- ngx_uint_t more_tries; +- ngx_uint_t total_tries; ++ uint32_t cpool_crc32; + +- struct sockaddr *sockaddr; +- socklen_t socklen; ++ void *data; + +- ngx_str_t *host; +- in_port_t port; ++ ngx_event_get_peer_pt original_get_peer; ++ ngx_event_free_peer_pt original_free_peer; + +- int last_peer_state; ++#if (NGX_HTTP_SSL) ++ ngx_event_set_peer_session_pt original_set_session; ++ ngx_event_save_peer_session_pt original_save_session; ++#endif ++ ++ ngx_http_request_t *request; ++ ngx_http_lua_srv_conf_t *conf; ++ ngx_http_lua_balancer_keepalive_pool_t *cpool; ++ ++ ngx_str_t *host; ++ ++ struct sockaddr *sockaddr; ++ socklen_t socklen; ++ ++ unsigned keepalive:1; + + #if !(HAVE_NGX_UPSTREAM_TIMEOUT_FIELDS) +- unsigned cloned_upstream_conf; /* :1 */ ++ unsigned cloned_upstream_conf:1; + #endif + }; + + +-#if (NGX_HTTP_SSL) +-static ngx_int_t ngx_http_lua_balancer_set_session(ngx_peer_connection_t *pc, +- void *data); +-static void ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, +- void *data); +-#endif ++static ngx_int_t ngx_http_lua_balancer_by_chunk(lua_State *L, ++ ngx_http_request_t *r); + static ngx_int_t ngx_http_lua_balancer_init(ngx_conf_t *cf, + ngx_http_upstream_srv_conf_t *us); + static ngx_int_t ngx_http_lua_balancer_init_peer(ngx_http_request_t *r, + ngx_http_upstream_srv_conf_t *us); + static ngx_int_t ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, + void *data); +-static ngx_int_t ngx_http_lua_balancer_by_chunk(lua_State *L, +- ngx_http_request_t *r); + static void ngx_http_lua_balancer_free_peer(ngx_peer_connection_t *pc, + void *data, ngx_uint_t state); ++static ngx_int_t ngx_http_lua_balancer_create_keepalive_pool(lua_State *L, ++ ngx_log_t *log, uint32_t cpool_crc32, ngx_uint_t cpool_size, ++ ngx_http_lua_balancer_keepalive_pool_t **cpool); ++static void ngx_http_lua_balancer_get_keepalive_pool(lua_State *L, ++ uint32_t cpool_crc32, ngx_http_lua_balancer_keepalive_pool_t **cpool); ++static void ngx_http_lua_balancer_free_keepalive_pool(ngx_log_t *log, ++ ngx_http_lua_balancer_keepalive_pool_t *cpool); ++static void ngx_http_lua_balancer_close(ngx_connection_t *c); ++static void ngx_http_lua_balancer_dummy_handler(ngx_event_t *ev); ++static void ngx_http_lua_balancer_close_handler(ngx_event_t *ev); ++#if (NGX_HTTP_SSL) ++static ngx_int_t ngx_http_lua_balancer_set_session(ngx_peer_connection_t *pc, ++ void *data); ++static void ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, ++ void *data); ++#endif ++ ++ ++#define ngx_http_lua_balancer_keepalive_is_enabled(bp) \ ++ (bp->keepalive) ++ ++#define ngx_http_lua_balancer_peer_set(bp) \ ++ (bp->sockaddr && bp->socklen) ++ ++ ++static char ngx_http_lua_balancer_keepalive_pools_table_key; + + + ngx_int_t +@@ -102,6 +158,61 @@ ngx_http_lua_balancer_handler_inline(ngx_http_request_t *r, + } + + ++static ngx_int_t ++ngx_http_lua_balancer_by_chunk(lua_State *L, ngx_http_request_t *r) ++{ ++ u_char *err_msg; ++ size_t len; ++ ngx_int_t rc; ++ ++ /* init nginx context in Lua VM */ ++ ngx_http_lua_set_req(L, r); ++ ++#ifndef OPENRESTY_LUAJIT ++ ngx_http_lua_create_new_globals_table(L, 0 /* narr */, 1 /* nrec */); ++ ++ /* {{{ make new env inheriting main thread's globals table */ ++ lua_createtable(L, 0, 1 /* nrec */); /* the metatable for the new env */ ++ ngx_http_lua_get_globals_table(L); ++ lua_setfield(L, -2, "__index"); ++ lua_setmetatable(L, -2); /* setmetatable({}, {__index = _G}) */ ++ /* }}} */ ++ ++ lua_setfenv(L, -2); /* set new running env for the code closure */ ++#endif /* OPENRESTY_LUAJIT */ ++ ++ lua_pushcfunction(L, ngx_http_lua_traceback); ++ lua_insert(L, 1); /* put it under chunk and args */ ++ ++ /* protected call user code */ ++ rc = lua_pcall(L, 0, 1, 1); ++ ++ lua_remove(L, 1); /* remove traceback function */ ++ ++ dd("rc == %d", (int) rc); ++ ++ if (rc != 0) { ++ /* error occurred when running loaded code */ ++ err_msg = (u_char *) lua_tolstring(L, -1, &len); ++ ++ if (err_msg == NULL) { ++ err_msg = (u_char *) "unknown reason"; ++ len = sizeof("unknown reason") - 1; ++ } ++ ++ ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, ++ "failed to run balancer_by_lua*: %*s", len, err_msg); ++ ++ lua_settop(L, 0); /* clear remaining elems on stack */ ++ ++ return NGX_ERROR; ++ } ++ ++ lua_settop(L, 0); /* clear remaining elems on stack */ ++ return rc; ++} ++ ++ + char * + ngx_http_lua_balancer_by_lua_block(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf) +@@ -125,16 +236,16 @@ char * + ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf) + { +- u_char *cache_key = NULL; +- u_char *name; +- ngx_str_t *value; +- ngx_http_lua_srv_conf_t *lscf = conf; +- ++ u_char *cache_key = NULL; ++ u_char *name; ++ ngx_str_t *value; + ngx_http_upstream_srv_conf_t *uscf; ++ ngx_http_lua_srv_conf_t *lscf = conf; + + dd("enter"); + +- /* must specify a content handler */ ++ /* content handler setup */ ++ + if (cmd->post == NULL) { + return NGX_CONF_ERROR; + } +@@ -178,11 +289,19 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, + + lscf->balancer.src_key = cache_key; + ++ /* balancer setup */ ++ + uscf = ngx_http_conf_get_module_srv_conf(cf, ngx_http_upstream_module); + + if (uscf->peer.init_upstream) { + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, + "load balancing method redefined"); ++ ++ lscf->balancer.original_init_upstream = uscf->peer.init_upstream; ++ ++ } else { ++ lscf->balancer.original_init_upstream = ++ ngx_http_upstream_init_round_robin; + } + + uscf->peer.init_upstream = ngx_http_lua_balancer_init; +@@ -198,14 +317,18 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, + + + static ngx_int_t +-ngx_http_lua_balancer_init(ngx_conf_t *cf, +- ngx_http_upstream_srv_conf_t *us) ++ngx_http_lua_balancer_init(ngx_conf_t *cf, ngx_http_upstream_srv_conf_t *us) + { +- if (ngx_http_upstream_init_round_robin(cf, us) != NGX_OK) { ++ ngx_http_lua_srv_conf_t *lscf; ++ ++ lscf = ngx_http_conf_upstream_srv_conf(us, ngx_http_lua_module); ++ ++ if (lscf->balancer.original_init_upstream(cf, us) != NGX_OK) { + return NGX_ERROR; + } + +- /* this callback is called upon individual requests */ ++ lscf->balancer.original_init_peer = us->peer.init; ++ + us->peer.init = ngx_http_lua_balancer_init_peer; + + return NGX_OK; +@@ -216,33 +339,38 @@ static ngx_int_t + ngx_http_lua_balancer_init_peer(ngx_http_request_t *r, + ngx_http_upstream_srv_conf_t *us) + { +- ngx_http_lua_srv_conf_t *bcf; ++ ngx_http_lua_srv_conf_t *lscf; + ngx_http_lua_balancer_peer_data_t *bp; + +- bp = ngx_pcalloc(r->pool, sizeof(ngx_http_lua_balancer_peer_data_t)); +- if (bp == NULL) { ++ lscf = ngx_http_conf_upstream_srv_conf(us, ngx_http_lua_module); ++ ++ if (lscf->balancer.original_init_peer(r, us) != NGX_OK) { + return NGX_ERROR; + } + +- r->upstream->peer.data = &bp->rrp; +- +- if (ngx_http_upstream_init_round_robin_peer(r, us) != NGX_OK) { ++ bp = ngx_pcalloc(r->pool, sizeof(ngx_http_lua_balancer_peer_data_t)); ++ if (bp == NULL) { + return NGX_ERROR; + } + ++ bp->conf = lscf; ++ bp->request = r; ++ bp->data = r->upstream->peer.data; ++ bp->original_get_peer = r->upstream->peer.get; ++ bp->original_free_peer = r->upstream->peer.free; ++ ++ r->upstream->peer.data = bp; + r->upstream->peer.get = ngx_http_lua_balancer_get_peer; + r->upstream->peer.free = ngx_http_lua_balancer_free_peer; + + #if (NGX_HTTP_SSL) ++ bp->original_set_session = r->upstream->peer.set_session; ++ bp->original_save_session = r->upstream->peer.save_session; ++ + r->upstream->peer.set_session = ngx_http_lua_balancer_set_session; + r->upstream->peer.save_session = ngx_http_lua_balancer_save_session; + #endif + +- bcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_lua_module); +- +- bp->conf = bcf; +- bp->request = r; +- + return NGX_OK; + } + +@@ -250,25 +378,26 @@ ngx_http_lua_balancer_init_peer(ngx_http_request_t *r, + static ngx_int_t + ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) + { +- lua_State *L; +- ngx_int_t rc; +- ngx_http_request_t *r; +- ngx_http_lua_ctx_t *ctx; +- ngx_http_lua_srv_conf_t *lscf; +- ngx_http_lua_main_conf_t *lmcf; +- ngx_http_lua_balancer_peer_data_t *bp = data; ++ lua_State *L; ++ ngx_int_t rc; ++ ngx_queue_t *q; ++ ngx_connection_t *c; ++ ngx_http_request_t *r; ++ ngx_http_lua_ctx_t *ctx; ++ ngx_http_lua_srv_conf_t *lscf; ++ ngx_http_lua_main_conf_t *lmcf; ++ ngx_http_lua_balancer_keepalive_item_t *item; ++ ngx_http_lua_balancer_peer_data_t *bp = data; + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, +- "lua balancer peer, tries: %ui", pc->tries); +- +- lscf = bp->conf; ++ "lua balancer: get peer, tries: %ui", pc->tries); + + r = bp->request; ++ lscf = bp->conf; + + ngx_http_lua_assert(lscf->balancer.handler && r); + + ctx = ngx_http_get_module_ctx(r, ngx_http_lua_module); +- + if (ctx == NULL) { + ctx = ngx_http_lua_create_ctx(r); + if (ctx == NULL) { +@@ -286,9 +415,15 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) + + ctx->context = NGX_HTTP_LUA_CONTEXT_BALANCER; + ++ bp->cpool = NULL; + bp->sockaddr = NULL; + bp->socklen = 0; + bp->more_tries = 0; ++ bp->cpool_crc32 = 0; ++ bp->cpool_size = 0; ++ bp->keepalive_requests = 0; ++ bp->keepalive_timeout = 0; ++ bp->keepalive = 0; + bp->total_tries++; + + lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); +@@ -300,7 +435,6 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) + lmcf->balancer_peer_data = bp; + + rc = lscf->balancer.handler(r, lscf, L); +- + if (rc == NGX_ERROR) { + return NGX_ERROR; + } +@@ -322,105 +456,414 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) + } + } + +- if (bp->sockaddr && bp->socklen) { ++ if (ngx_http_lua_balancer_peer_set(bp)) { + pc->sockaddr = bp->sockaddr; + pc->socklen = bp->socklen; ++ pc->name = bp->host; + pc->cached = 0; + pc->connection = NULL; +- pc->name = bp->host; +- +- bp->rrp.peers->single = 0; + + if (bp->more_tries) { + r->upstream->peer.tries += bp->more_tries; + } + +- dd("tries: %d", (int) r->upstream->peer.tries); ++ if (ngx_http_lua_balancer_keepalive_is_enabled(bp)) { ++ ngx_http_lua_balancer_get_keepalive_pool(L, bp->cpool_crc32, ++ &bp->cpool); ++ ++ if (bp->cpool == NULL ++ && ngx_http_lua_balancer_create_keepalive_pool(L, pc->log, ++ bp->cpool_crc32, ++ bp->cpool_size, ++ &bp->cpool) ++ != NGX_OK) ++ { ++ return NGX_ERROR; ++ } ++ ++ ngx_http_lua_assert(bp->cpool); ++ ++ if (!ngx_queue_empty(&bp->cpool->cache)) { ++ q = ngx_queue_head(&bp->cpool->cache); ++ ++ item = ngx_queue_data(q, ngx_http_lua_balancer_keepalive_item_t, ++ queue); ++ c = item->connection; ++ ++ ngx_queue_remove(q); ++ ngx_queue_insert_head(&bp->cpool->free, q); ++ ++ c->idle = 0; ++ c->sent = 0; ++ c->log = pc->log; ++ c->read->log = pc->log; ++ c->write->log = pc->log; ++ c->pool->log = pc->log; ++ ++ if (c->read->timer_set) { ++ ngx_del_timer(c->read); ++ } ++ ++ pc->cached = 1; ++ pc->connection = c; ++ ++ ngx_log_debug3(NGX_LOG_DEBUG_HTTP, pc->log, 0, ++ "lua balancer: keepalive reusing connection %p, " ++ "requests: %ui, cpool: %p", ++ c, c->requests, bp->cpool); ++ ++ return NGX_DONE; ++ } ++ ++ bp->cpool->connections++; ++ ++ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, ++ "lua balancer: keepalive no free connection, " ++ "cpool: %p", bp->cpool); ++ } + + return NGX_OK; + } + +- return ngx_http_upstream_get_round_robin_peer(pc, &bp->rrp); ++ return bp->original_get_peer(pc, bp->data); + } + + +-static ngx_int_t +-ngx_http_lua_balancer_by_chunk(lua_State *L, ngx_http_request_t *r) ++static void ++ngx_http_lua_balancer_free_peer(ngx_peer_connection_t *pc, void *data, ++ ngx_uint_t state) + { +- u_char *err_msg; +- size_t len; +- ngx_int_t rc; ++ ngx_queue_t *q; ++ ngx_connection_t *c; ++ ngx_http_upstream_t *u; ++ ngx_http_lua_balancer_keepalive_item_t *item; ++ ngx_http_lua_balancer_keepalive_pool_t *cpool; ++ ngx_http_lua_balancer_peer_data_t *bp = data; + +- /* init nginx context in Lua VM */ +- ngx_http_lua_set_req(L, r); ++ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, ++ "lua balancer: free peer, tries: %ui", pc->tries); + +-#ifndef OPENRESTY_LUAJIT +- ngx_http_lua_create_new_globals_table(L, 0 /* narr */, 1 /* nrec */); ++ u = bp->request->upstream; ++ c = pc->connection; + +- /* {{{ make new env inheriting main thread's globals table */ +- lua_createtable(L, 0, 1 /* nrec */); /* the metatable for the new env */ +- ngx_http_lua_get_globals_table(L); +- lua_setfield(L, -2, "__index"); +- lua_setmetatable(L, -2); /* setmetatable({}, {__index = _G}) */ +- /* }}} */ ++ if (ngx_http_lua_balancer_peer_set(bp)) { ++ bp->last_peer_state = (int) state; + +- lua_setfenv(L, -2); /* set new running env for the code closure */ +-#endif /* OPENRESTY_LUAJIT */ ++ if (pc->tries) { ++ pc->tries--; ++ } + +- lua_pushcfunction(L, ngx_http_lua_traceback); +- lua_insert(L, 1); /* put it under chunk and args */ ++ if (ngx_http_lua_balancer_keepalive_is_enabled(bp)) { ++ cpool = bp->cpool; + +- /* protected call user code */ +- rc = lua_pcall(L, 0, 1, 1); ++ if (state & NGX_PEER_FAILED ++ || c == NULL ++ || c->read->eof ++ || c->read->error ++ || c->read->timedout ++ || c->write->error ++ || c->write->timedout) ++ { ++ goto invalid; ++ } + +- lua_remove(L, 1); /* remove traceback function */ ++ if (bp->keepalive_requests ++ && c->requests >= bp->keepalive_requests) ++ { ++ goto invalid; ++ } + +- dd("rc == %d", (int) rc); ++ if (!u->keepalive) { ++ goto invalid; ++ } + +- if (rc != 0) { +- /* error occurred when running loaded code */ +- err_msg = (u_char *) lua_tolstring(L, -1, &len); ++ if (!u->request_body_sent) { ++ goto invalid; ++ } + +- if (err_msg == NULL) { +- err_msg = (u_char *) "unknown reason"; +- len = sizeof("unknown reason") - 1; ++ if (ngx_terminate || ngx_exiting) { ++ goto invalid; ++ } ++ ++ if (ngx_handle_read_event(c->read, 0) != NGX_OK) { ++ goto invalid; ++ } ++ ++ if (ngx_queue_empty(&cpool->free)) { ++ q = ngx_queue_last(&cpool->cache); ++ ngx_queue_remove(q); ++ ++ item = ngx_queue_data(q, ngx_http_lua_balancer_keepalive_item_t, ++ queue); ++ ++ ngx_http_lua_balancer_close(item->connection); ++ ++ } else { ++ q = ngx_queue_head(&cpool->free); ++ ngx_queue_remove(q); ++ ++ item = ngx_queue_data(q, ngx_http_lua_balancer_keepalive_item_t, ++ queue); ++ } ++ ++ ngx_log_debug3(NGX_LOG_DEBUG_HTTP, pc->log, 0, ++ "lua balancer: keepalive saving connection %p, " ++ "cpool: %p, connections: %ui", ++ c, cpool, cpool->connections); ++ ++ ngx_queue_insert_head(&cpool->cache, q); ++ ++ item->connection = c; ++ ++ pc->connection = NULL; ++ ++ if (bp->keepalive_timeout) { ++ c->read->delayed = 0; ++ ngx_add_timer(c->read, bp->keepalive_timeout); ++ ++ } else if (c->read->timer_set) { ++ ngx_del_timer(c->read); ++ } ++ ++ if (c->write->timer_set) { ++ ngx_del_timer(c->write); ++ } ++ ++ c->write->handler = ngx_http_lua_balancer_dummy_handler; ++ c->read->handler = ngx_http_lua_balancer_close_handler; ++ ++ c->data = item; ++ c->idle = 1; ++ c->log = ngx_cycle->log; ++ c->read->log = ngx_cycle->log; ++ c->write->log = ngx_cycle->log; ++ c->pool->log = ngx_cycle->log; ++ ++ if (c->read->ready) { ++ ngx_http_lua_balancer_close_handler(c->read); ++ } ++ ++ return; ++ ++invalid: ++ ++ cpool->connections--; ++ ++ ngx_log_debug3(NGX_LOG_DEBUG_HTTP, pc->log, 0, ++ "lua balancer: keepalive not saving connection %p, " ++ "cpool: %p, connections: %ui", ++ c, cpool, cpool->connections); ++ ++ if (cpool->connections == 0) { ++ ngx_http_lua_balancer_free_keepalive_pool(pc->log, cpool); ++ } + } + +- ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, +- "failed to run balancer_by_lua*: %*s", len, err_msg); ++ return; ++ } + +- lua_settop(L, 0); /* clear remaining elems on stack */ ++ bp->original_free_peer(pc, bp->data, state); ++} ++ ++ ++static ngx_int_t ++ngx_http_lua_balancer_create_keepalive_pool(lua_State *L, ngx_log_t *log, ++ uint32_t cpool_crc32, ngx_uint_t cpool_size, ++ ngx_http_lua_balancer_keepalive_pool_t **cpool) ++{ ++ size_t size; ++ ngx_uint_t i; ++ ngx_http_lua_balancer_keepalive_pool_t *upool; ++ ngx_http_lua_balancer_keepalive_item_t *items; ++ ++ /* get upstream connection pools table */ ++ lua_pushlightuserdata(L, ngx_http_lua_lightudata_mask( ++ balancer_keepalive_pools_table_key)); ++ lua_rawget(L, LUA_REGISTRYINDEX); /* pools? */ ++ ++ ngx_http_lua_assert(lua_istable(L, -1)); ++ ++ size = sizeof(ngx_http_lua_balancer_keepalive_pool_t) ++ + sizeof(ngx_http_lua_balancer_keepalive_item_t) * cpool_size; + ++ upool = lua_newuserdata(L, size); /* pools upool */ ++ if (upool == NULL) { + return NGX_ERROR; + } + +- lua_settop(L, 0); /* clear remaining elems on stack */ +- return rc; ++ ngx_log_debug2(NGX_LOG_DEBUG_HTTP, log, 0, ++ "lua balancer: keepalive create pool, crc32: %ui, " ++ "size: %ui", cpool_crc32, cpool_size); ++ ++ upool->lua_vm = L; ++ upool->crc32 = cpool_crc32; ++ upool->size = cpool_size; ++ upool->connections = 0; ++ ++ ngx_queue_init(&upool->cache); ++ ngx_queue_init(&upool->free); ++ ++ lua_rawseti(L, -2, cpool_crc32); /* pools */ ++ lua_pop(L, 1); /* orig stack */ ++ ++ items = (ngx_http_lua_balancer_keepalive_item_t *) (&upool->free + 1); ++ ++ ngx_http_lua_assert((void *) items == ngx_align_ptr(items, NGX_ALIGNMENT)); ++ ++ for (i = 0; i < cpool_size; i++) { ++ ngx_queue_insert_head(&upool->free, &items[i].queue); ++ items[i].cpool = upool; ++ } ++ ++ *cpool = upool; ++ ++ return NGX_OK; + } + + + static void +-ngx_http_lua_balancer_free_peer(ngx_peer_connection_t *pc, void *data, +- ngx_uint_t state) ++ngx_http_lua_balancer_get_keepalive_pool(lua_State *L, uint32_t cpool_crc32, ++ ngx_http_lua_balancer_keepalive_pool_t **cpool) + { +- ngx_http_lua_balancer_peer_data_t *bp = data; ++ ngx_http_lua_balancer_keepalive_pool_t *upool; ++ ++ /* get upstream connection pools table */ ++ lua_pushlightuserdata(L, ngx_http_lua_lightudata_mask( ++ balancer_keepalive_pools_table_key)); ++ lua_rawget(L, LUA_REGISTRYINDEX); /* pools? */ ++ ++ if (lua_isnil(L, -1)) { ++ lua_pop(L, 1); /* orig stack */ ++ ++ /* create upstream connection pools table */ ++ lua_createtable(L, 0, 0); /* pools */ ++ lua_pushlightuserdata(L, ngx_http_lua_lightudata_mask( ++ balancer_keepalive_pools_table_key)); ++ lua_pushvalue(L, -2); /* pools pools_table_key pools */ ++ lua_rawset(L, LUA_REGISTRYINDEX); /* pools */ ++ } + +- ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, +- "lua balancer free peer, tries: %ui", pc->tries); ++ ngx_http_lua_assert(lua_istable(L, -1)); + +- if (bp->sockaddr && bp->socklen) { +- bp->last_peer_state = (int) state; ++ lua_rawgeti(L, -1, cpool_crc32); /* pools upool? */ ++ upool = lua_touserdata(L, -1); ++ lua_pop(L, 2); /* orig stack */ + +- if (pc->tries) { +- pc->tries--; ++ *cpool = upool; ++} ++ ++ ++static void ++ngx_http_lua_balancer_free_keepalive_pool(ngx_log_t *log, ++ ngx_http_lua_balancer_keepalive_pool_t *cpool) ++{ ++ lua_State *L; ++ ++ ngx_log_debug2(NGX_LOG_DEBUG_HTTP, log, 0, ++ "lua balancer: keepalive free pool %p, crc32: %ui", ++ cpool, cpool->crc32); ++ ++ ngx_http_lua_assert(cpool->connections == 0); ++ ++ L = cpool->lua_vm; ++ ++ /* get upstream connection pools table */ ++ lua_pushlightuserdata(L, ngx_http_lua_lightudata_mask( ++ balancer_keepalive_pools_table_key)); ++ lua_rawget(L, LUA_REGISTRYINDEX); /* pools? */ ++ ++ if (lua_isnil(L, -1)) { ++ lua_pop(L, 1); /* orig stack */ ++ return; ++ } ++ ++ ngx_http_lua_assert(lua_istable(L, -1)); ++ ++ lua_pushnil(L); /* pools nil */ ++ lua_rawseti(L, -2, cpool->crc32); /* pools */ ++ lua_pop(L, 1); /* orig stack */ ++} ++ ++ ++static void ++ngx_http_lua_balancer_close(ngx_connection_t *c) ++{ ++ ngx_http_lua_balancer_keepalive_item_t *item; ++ ++ item = c->data; ++ ++#if (NGX_HTTP_SSL) ++ if (c->ssl) { ++ c->ssl->no_wait_shutdown = 1; ++ c->ssl->no_send_shutdown = 1; ++ ++ if (ngx_ssl_shutdown(c) == NGX_AGAIN) { ++ c->ssl->handler = ngx_http_lua_balancer_close; ++ return; ++ } ++ } ++#endif ++ ++ ngx_destroy_pool(c->pool); ++ ngx_close_connection(c); ++ ++ item->cpool->connections--; ++ ++ ngx_log_debug3(NGX_LOG_DEBUG_HTTP, c->log, 0, ++ "lua balancer: keepalive closing connection %p, cpool: %p, " ++ "connections: %ui", ++ c, item->cpool, item->cpool->connections); ++} ++ ++ ++static void ++ngx_http_lua_balancer_dummy_handler(ngx_event_t *ev) ++{ ++ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, ev->log, 0, ++ "lua balancer: dummy handler"); ++} ++ ++ ++static void ++ngx_http_lua_balancer_close_handler(ngx_event_t *ev) ++{ ++ ngx_http_lua_balancer_keepalive_item_t *item; ++ ++ int n; ++ char buf[1]; ++ ngx_connection_t *c; ++ ++ c = ev->data; ++ ++ if (c->close || c->read->timedout) { ++ goto close; ++ } ++ ++ n = recv(c->fd, buf, 1, MSG_PEEK); ++ ++ if (n == -1 && ngx_socket_errno == NGX_EAGAIN) { ++ ev->ready = 0; ++ ++ if (ngx_handle_read_event(c->read, 0) != NGX_OK) { ++ goto close; + } + + return; + } + +- /* fallback */ ++close: ++ ++ item = c->data; ++ c->log = ev->log; ++ ++ ngx_http_lua_balancer_close(c); + +- ngx_http_upstream_free_round_robin_peer(pc, data, state); ++ ngx_queue_remove(&item->queue); ++ ngx_queue_insert_head(&item->cpool->free, &item->queue); ++ ++ if (item->cpool->connections == 0) { ++ ngx_http_lua_balancer_free_keepalive_pool(ev->log, item->cpool); ++ } + } + + +@@ -431,12 +874,12 @@ ngx_http_lua_balancer_set_session(ngx_peer_connection_t *pc, void *data) + { + ngx_http_lua_balancer_peer_data_t *bp = data; + +- if (bp->sockaddr && bp->socklen) { ++ if (ngx_http_lua_balancer_peer_set(bp)) { + /* TODO */ + return NGX_OK; + } + +- return ngx_http_upstream_set_round_robin_peer_session(pc, &bp->rrp); ++ return bp->original_set_session(pc, bp->data); + } + + +@@ -445,13 +888,12 @@ ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, void *data) + { + ngx_http_lua_balancer_peer_data_t *bp = data; + +- if (bp->sockaddr && bp->socklen) { ++ if (ngx_http_lua_balancer_peer_set(bp)) { + /* TODO */ + return; + } + +- ngx_http_upstream_save_round_robin_peer_session(pc, &bp->rrp); +- return; ++ bp->original_save_session(pc, bp->data); + } + + #endif +@@ -459,14 +901,14 @@ ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, void *data) + + int + ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, +- const u_char *addr, size_t addr_len, int port, char **err) ++ const u_char *addr, size_t addr_len, int port, unsigned int cpool_crc32, ++ unsigned int cpool_size, char **err) + { +- ngx_url_t url; +- ngx_http_lua_ctx_t *ctx; +- ngx_http_upstream_t *u; +- +- ngx_http_lua_main_conf_t *lmcf; +- ngx_http_lua_balancer_peer_data_t *bp; ++ ngx_url_t url; ++ ngx_http_upstream_t *u; ++ ngx_http_lua_ctx_t *ctx; ++ ngx_http_lua_main_conf_t *lmcf; ++ ngx_http_lua_balancer_peer_data_t *bp; + + if (r == NULL) { + *err = "no request found"; +@@ -536,6 +978,70 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, + return NGX_ERROR; + } + ++ bp->cpool_crc32 = (uint32_t) cpool_crc32; ++ bp->cpool_size = (ngx_uint_t) cpool_size; ++ ++ return NGX_OK; ++} ++ ++ ++int ++ngx_http_lua_ffi_balancer_enable_keepalive(ngx_http_request_t *r, ++ unsigned long timeout, unsigned int max_requests, char **err) ++{ ++ ngx_http_upstream_t *u; ++ ngx_http_lua_ctx_t *ctx; ++ ngx_http_lua_main_conf_t *lmcf; ++ ngx_http_lua_balancer_peer_data_t *bp; ++ ++ if (r == NULL) { ++ *err = "no request found"; ++ return NGX_ERROR; ++ } ++ ++ u = r->upstream; ++ ++ if (u == NULL) { ++ *err = "no upstream found"; ++ return NGX_ERROR; ++ } ++ ++ ctx = ngx_http_get_module_ctx(r, ngx_http_lua_module); ++ if (ctx == NULL) { ++ *err = "no ctx found"; ++ return NGX_ERROR; ++ } ++ ++ if ((ctx->context & NGX_HTTP_LUA_CONTEXT_BALANCER) == 0) { ++ *err = "API disabled in the current context"; ++ return NGX_ERROR; ++ } ++ ++ lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); ++ ++ /* we cannot read r->upstream->peer.data here directly because ++ * it could be overridden by other modules like ++ * ngx_http_upstream_keepalive_module. ++ */ ++ bp = lmcf->balancer_peer_data; ++ if (bp == NULL) { ++ *err = "no upstream peer data found"; ++ return NGX_ERROR; ++ } ++ ++ if (!ngx_http_lua_balancer_peer_set(bp)) { ++ *err = "no current peer set"; ++ return NGX_ERROR; ++ } ++ ++ if (!bp->cpool_crc32) { ++ bp->cpool_crc32 = ngx_crc32_long(bp->host->data, bp->host->len); ++ } ++ ++ bp->keepalive_timeout = (ngx_msec_t) timeout; ++ bp->keepalive_requests = (ngx_uint_t) max_requests; ++ bp->keepalive = 1; ++ + return NGX_OK; + } + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_common.h b/ngx_lua-0.10.20/src/ngx_http_lua_common.h +index 781a2454..9ce6836a 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h +@@ -320,6 +320,10 @@ union ngx_http_lua_srv_conf_u { + #endif + + struct { ++ ngx_http_upstream_init_pt original_init_upstream; ++ ngx_http_upstream_init_peer_pt original_init_peer; ++ uintptr_t data; ++ + ngx_http_lua_srv_conf_handler_pt handler; + ngx_str_t src; + u_char *src_key; +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_module.c b/ngx_lua-0.10.20/src/ngx_http_lua_module.c +index 9816d864..5d7cedfd 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_module.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_module.c +@@ -1068,6 +1068,9 @@ ngx_http_lua_create_srv_conf(ngx_conf_t *cf) + * lscf->srv.ssl_session_fetch_src = { 0, NULL }; + * lscf->srv.ssl_session_fetch_src_key = NULL; + * ++ * lscf->balancer.original_init_upstream = NULL; ++ * lscf->balancer.original_init_peer = NULL; ++ * lscf->balancer.data = NULL; + * lscf->balancer.handler = NULL; + * lscf->balancer.src = { 0, NULL }; + * lscf->balancer.src_key = NULL; +-- +2.26.2 + + +From 4c5cb29a265b2f9524434322adf15d07deec6c7f Mon Sep 17 00:00:00 2001 +From: Thibault Charbonnier +Date: Tue, 17 Sep 2019 11:43:54 -0700 +Subject: [PATCH 2/3] feature: we now avoid the need for 'upstream' blocks to + define a stub 'server' directive when using 'balancer_by_lua*'. + +--- + src/ngx_http_lua_balancer.c | 42 +++++++++++++++++++++++++++++++++++-- + 1 file changed, 40 insertions(+), 2 deletions(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c b/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c +index 0d403716..5c862d22 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c +@@ -111,7 +111,8 @@ static void ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, + (bp->sockaddr && bp->socklen) + + +-static char ngx_http_lua_balancer_keepalive_pools_table_key; ++static char ngx_http_lua_balancer_keepalive_pools_table_key; ++static struct sockaddr *ngx_http_lua_balancer_default_server_sockaddr; + + + ngx_int_t +@@ -239,7 +240,9 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, + u_char *cache_key = NULL; + u_char *name; + ngx_str_t *value; ++ ngx_url_t url; + ngx_http_upstream_srv_conf_t *uscf; ++ ngx_http_upstream_server_t *us; + ngx_http_lua_srv_conf_t *lscf = conf; + + dd("enter"); +@@ -293,6 +296,29 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, + + uscf = ngx_http_conf_get_module_srv_conf(cf, ngx_http_upstream_module); + ++ if (uscf->servers->nelts == 0) { ++ us = ngx_array_push(uscf->servers); ++ if (us == NULL) { ++ return NGX_CONF_ERROR; ++ } ++ ++ ngx_memzero(us, sizeof(ngx_http_upstream_server_t)); ++ ngx_memzero(&url, sizeof(ngx_url_t)); ++ ++ ngx_str_set(&url.url, "0.0.0.1"); ++ url.default_port = 80; ++ ++ if (ngx_parse_url(cf->pool, &url) != NGX_OK) { ++ return NGX_CONF_ERROR; ++ } ++ ++ us->name = url.url; ++ us->addrs = url.addrs; ++ us->naddrs = url.naddrs; ++ ++ ngx_http_lua_balancer_default_server_sockaddr = us->addrs[0].sockaddr; ++ } ++ + if (uscf->peer.init_upstream) { + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, + "load balancing method redefined"); +@@ -525,7 +551,19 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) + return NGX_OK; + } + +- return bp->original_get_peer(pc, bp->data); ++ rc = bp->original_get_peer(pc, bp->data); ++ if (rc == NGX_ERROR) { ++ return rc; ++ } ++ ++ if (pc->sockaddr == ngx_http_lua_balancer_default_server_sockaddr) { ++ ngx_log_error(NGX_LOG_ERR, pc->log, 0, ++ "lua balancer: no peer set"); ++ ++ return NGX_ERROR; ++ } ++ ++ return rc; + } + + +-- +2.26.2 + + +From 941cd893573561574bc6a326d6306f1a30127293 Mon Sep 17 00:00:00 2001 +From: Thibault Charbonnier +Date: Tue, 17 Sep 2019 11:43:58 -0700 +Subject: [PATCH 3/3] refactor: used a simpler way to stash the balancer peer + data. + +--- + src/ngx_http_lua_balancer.c | 91 +++++++++---------------------------- + src/ngx_http_lua_common.h | 7 --- + 2 files changed, 22 insertions(+), 76 deletions(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c b/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c +index 5c862d22..3ea1f067 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c +@@ -411,9 +411,9 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) + ngx_http_request_t *r; + ngx_http_lua_ctx_t *ctx; + ngx_http_lua_srv_conf_t *lscf; +- ngx_http_lua_main_conf_t *lmcf; + ngx_http_lua_balancer_keepalive_item_t *item; + ngx_http_lua_balancer_peer_data_t *bp = data; ++ void *pdata; + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, + "lua balancer: get peer, tries: %ui", pc->tries); +@@ -452,15 +452,13 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) + bp->keepalive = 0; + bp->total_tries++; + +- lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); +- +- /* balancer_by_lua does not support yielding and +- * there cannot be any conflicts among concurrent requests, +- * thus it is safe to store the peer data in the main conf. +- */ +- lmcf->balancer_peer_data = bp; ++ pdata = r->upstream->peer.data; ++ r->upstream->peer.data = bp; + + rc = lscf->balancer.handler(r, lscf, L); ++ ++ r->upstream->peer.data = pdata; ++ + if (rc == NGX_ERROR) { + return NGX_ERROR; + } +@@ -945,7 +943,6 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, + ngx_url_t url; + ngx_http_upstream_t *u; + ngx_http_lua_ctx_t *ctx; +- ngx_http_lua_main_conf_t *lmcf; + ngx_http_lua_balancer_peer_data_t *bp; + + if (r == NULL) { +@@ -971,18 +968,6 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, + return NGX_ERROR; + } + +- lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); +- +- /* we cannot read r->upstream->peer.data here directly because +- * it could be overridden by other modules like +- * ngx_http_upstream_keepalive_module. +- */ +- bp = lmcf->balancer_peer_data; +- if (bp == NULL) { +- *err = "no upstream peer data found"; +- return NGX_ERROR; +- } +- + ngx_memzero(&url, sizeof(ngx_url_t)); + + url.url.data = ngx_palloc(r->pool, addr_len); +@@ -1006,6 +991,8 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, + return NGX_ERROR; + } + ++ bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; ++ + if (url.addrs && url.addrs[0].sockaddr) { + bp->sockaddr = url.addrs[0].sockaddr; + bp->socklen = url.addrs[0].socklen; +@@ -1029,7 +1016,6 @@ ngx_http_lua_ffi_balancer_enable_keepalive(ngx_http_request_t *r, + { + ngx_http_upstream_t *u; + ngx_http_lua_ctx_t *ctx; +- ngx_http_lua_main_conf_t *lmcf; + ngx_http_lua_balancer_peer_data_t *bp; + + if (r == NULL) { +@@ -1055,17 +1041,7 @@ ngx_http_lua_ffi_balancer_enable_keepalive(ngx_http_request_t *r, + return NGX_ERROR; + } + +- lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); +- +- /* we cannot read r->upstream->peer.data here directly because +- * it could be overridden by other modules like +- * ngx_http_upstream_keepalive_module. +- */ +- bp = lmcf->balancer_peer_data; +- if (bp == NULL) { +- *err = "no upstream peer data found"; +- return NGX_ERROR; +- } ++ bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; + + if (!ngx_http_lua_balancer_peer_set(bp)) { + *err = "no current peer set"; +@@ -1089,14 +1065,13 @@ ngx_http_lua_ffi_balancer_set_timeouts(ngx_http_request_t *r, + long connect_timeout, long send_timeout, long read_timeout, + char **err) + { +- ngx_http_lua_ctx_t *ctx; +- ngx_http_upstream_t *u; ++ ngx_http_lua_ctx_t *ctx; ++ ngx_http_upstream_t *u; + + #if !(HAVE_NGX_UPSTREAM_TIMEOUT_FIELDS) + ngx_http_upstream_conf_t *ucf; +-#endif +- ngx_http_lua_main_conf_t *lmcf; + ngx_http_lua_balancer_peer_data_t *bp; ++#endif + + if (r == NULL) { + *err = "no request found"; +@@ -1121,15 +1096,9 @@ ngx_http_lua_ffi_balancer_set_timeouts(ngx_http_request_t *r, + return NGX_ERROR; + } + +- lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); +- +- bp = lmcf->balancer_peer_data; +- if (bp == NULL) { +- *err = "no upstream peer data found"; +- return NGX_ERROR; +- } +- + #if !(HAVE_NGX_UPSTREAM_TIMEOUT_FIELDS) ++ bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; ++ + if (!bp->cloned_upstream_conf) { + /* we clone the upstream conf for the current request so that + * we do not affect other requests at all. */ +@@ -1184,12 +1153,10 @@ ngx_http_lua_ffi_balancer_set_more_tries(ngx_http_request_t *r, + int count, char **err) + { + #if (nginx_version >= 1007005) +- ngx_uint_t max_tries, total; ++ ngx_uint_t max_tries, total; + #endif +- ngx_http_lua_ctx_t *ctx; +- ngx_http_upstream_t *u; +- +- ngx_http_lua_main_conf_t *lmcf; ++ ngx_http_lua_ctx_t *ctx; ++ ngx_http_upstream_t *u; + ngx_http_lua_balancer_peer_data_t *bp; + + if (r == NULL) { +@@ -1215,13 +1182,7 @@ ngx_http_lua_ffi_balancer_set_more_tries(ngx_http_request_t *r, + return NGX_ERROR; + } + +- lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); +- +- bp = lmcf->balancer_peer_data; +- if (bp == NULL) { +- *err = "no upstream peer data found"; +- return NGX_ERROR; +- } ++ bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; + + #if (nginx_version >= 1007005) + max_tries = r->upstream->conf->next_upstream_tries; +@@ -1247,12 +1208,10 @@ int + ngx_http_lua_ffi_balancer_get_last_failure(ngx_http_request_t *r, + int *status, char **err) + { +- ngx_http_lua_ctx_t *ctx; +- ngx_http_upstream_t *u; +- ngx_http_upstream_state_t *state; +- ++ ngx_http_lua_ctx_t *ctx; ++ ngx_http_upstream_t *u; ++ ngx_http_upstream_state_t *state; + ngx_http_lua_balancer_peer_data_t *bp; +- ngx_http_lua_main_conf_t *lmcf; + + if (r == NULL) { + *err = "no request found"; +@@ -1277,13 +1236,7 @@ ngx_http_lua_ffi_balancer_get_last_failure(ngx_http_request_t *r, + return NGX_ERROR; + } + +- lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); +- +- bp = lmcf->balancer_peer_data; +- if (bp == NULL) { +- *err = "no upstream peer data found"; +- return NGX_ERROR; +- } ++ bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; + + if (r->upstream_states && r->upstream_states->nelts > 1) { + state = r->upstream_states->elts; +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_common.h b/ngx_lua-0.10.20/src/ngx_http_lua_common.h +index 9ce6836a..9a4342df 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h +@@ -239,13 +239,6 @@ struct ngx_http_lua_main_conf_s { + ngx_http_lua_main_conf_handler_pt exit_worker_handler; + ngx_str_t exit_worker_src; + +- ngx_http_lua_balancer_peer_data_t *balancer_peer_data; +- /* neither yielding nor recursion is possible in +- * balancer_by_lua*, so there cannot be any races among +- * concurrent requests and it is safe to store the peer +- * data pointer in the main conf. +- */ +- + ngx_chain_t *body_filter_chain; + /* neither yielding nor recursion is possible in + * body_filter_by_lua*, so there cannot be any races among +-- +2.26.2 diff --git a/build/openresty/patches/ngx_stream_lua-0.0.10_01-expose_request_struct.patch b/build/openresty/patches/ngx_stream_lua-0.0.10_01-expose_request_struct.patch new file mode 100644 index 00000000000..29b9d8ab2a7 --- /dev/null +++ b/build/openresty/patches/ngx_stream_lua-0.0.10_01-expose_request_struct.patch @@ -0,0 +1,26 @@ +From 0acb7f5ad0fbc9ee037f0c5d689f98861fe9e49b Mon Sep 17 00:00:00 2001 +From: Datong Sun +Date: Tue, 10 Dec 2019 11:51:53 -0800 +Subject: [PATCH] Sync with meta-lua-nginx-module + 1330009671cd86eaf045f9f2c5cda3727a94570f. + +--- + ngx_stream_lua-0.0.10/src/api/ngx_stream_lua_api.h | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/ngx_stream_lua-0.0.10/src/api/ngx_stream_lua_api.h b/ngx_stream_lua-0.0.10/src/api/ngx_stream_lua_api.h +index 0e5a18f..040ef84 100644 +--- a/bundle/ngx_stream_lua-0.0.10/src/api/ngx_stream_lua_api.h ++++ b/bundle/ngx_stream_lua-0.0.10/src/api/ngx_stream_lua_api.h +@@ -21,6 +21,9 @@ + + + ++#include ++#include "../ngx_stream_lua_request.h" ++ + + #include + #include +-- +2.20.1 diff --git a/build/openresty/patches/openresty-custom_prefix_and_cc.patch b/build/openresty/patches/openresty-custom_prefix_and_cc.patch new file mode 100644 index 00000000000..f90925125df --- /dev/null +++ b/build/openresty/patches/openresty-custom_prefix_and_cc.patch @@ -0,0 +1,107 @@ +diff --git a/configure b/configure +index d461294..2e8d3e2 100755 +--- a/configure ++++ b/configure +@@ -128,7 +128,7 @@ my $ngx_sbin; + my %resty_opts; + my $dry_run; + my @ngx_rpaths; +-my $cc; ++my $cc = $ENV{CC}; + my $cores; + my $luajit_xcflags = ''; + my $user_luajit_xcflags; +@@ -356,6 +356,9 @@ for my $opt (@ARGV) { + push @ngx_opts, "--with-$lib-opt=-g $opt"; + $with_ext_lib_opts{$lib} = 1; + ++ } elsif ($opt =~ /^--with-install-prefix=(.*)/) { ++ $resty_opts{install_prefix} = $1; ++ + } elsif ($opt =~ /^--sbin-path=(.*)/) { + $ngx_sbin = $1; + push @ngx_opts, $opt; +@@ -696,7 +699,12 @@ _END_ + #unshift @ngx_ld_opts, "-L$lib"; + #unshift @ngx_cc_opts, "-I$inc"; + +- push @ngx_rpaths, "$luajit_prefix/lib"; ++ my $real_luajit_prefix = $luajit_prefix; ++ if ($opts->{install_prefix}) { ++ $real_luajit_prefix = "$opts->{install_prefix}/openresty/luajit"; ++ } ++ ++ push @ngx_rpaths, "$real_luajit_prefix/lib"; + + } elsif ($opts->{luajit}) { + my $luajit_src = auto_complete 'LuaJIT'; +@@ -862,7 +870,12 @@ _END_ + #unshift @ngx_cc_opts, "-I$inc"; + + if ($platform ne 'msys') { +- push @ngx_rpaths, File::Spec->catfile($luajit_prefix, "lib"); ++ my $real_luajit_prefix = $luajit_prefix; ++ if ($opts->{install_prefix}) { ++ $real_luajit_prefix = "$opts->{install_prefix}/openresty/luajit"; ++ } ++ ++ push @ngx_rpaths, File::Spec->catfile($real_luajit_prefix, "lib"); + } + + cd '..'; +@@ -871,8 +884,13 @@ _END_ + if ($opts->{luajit} || $opts->{luajit_path}) { + # build lua modules + +- $lualib_prefix = File::Spec->catfile($prefix, "lualib"); +- my $site_lualib_prefix = File::Spec->catfile($prefix, "site/lualib"); ++ my $openresty_prefix = $prefix; ++ if ($opts->{install_prefix}) { ++ $openresty_prefix = "$opts->{install_prefix}/openresty"; ++ } ++ ++ $lualib_prefix = File::Spec->catfile($openresty_prefix, "lualib"); ++ my $site_lualib_prefix = File::Spec->catfile($openresty_prefix, "site/lualib"); + + { + my $ngx_lua_dir = auto_complete 'ngx_lua'; +@@ -926,6 +944,11 @@ _EOC_ + close $in; + } + ++ # set it back ++ $lualib_prefix = File::Spec->catfile($prefix, "lualib"); ++ $site_lualib_prefix = File::Spec->catfile($prefix, "site/lualib"); ++ ++ + unless ($opts->{no_lua_cjson}) { + my $dir = auto_complete 'lua-cjson'; + if (!defined $dir) { +@@ -1173,10 +1196,16 @@ _EOC_ + open my $in, $resty_bin + or die "Cannot open $resty_bin for reading: $!\n"; + my ($new, $found); ++ ++ my $real_ngx_sbin = $ngx_sbin; ++ if ($opts->{install_prefix}) { ++ $real_ngx_sbin = "$opts->{install_prefix}/openresty/nginx/sbin/nginx"; ++ } ++ + while (<$in>) { + if (/^my \$nginx_path;$/) { + $found = 1; +- $new .= qq/my \$nginx_path = '$ngx_sbin';\n/; ++ $new .= qq/my \$nginx_path = '$real_ngx_sbin';\n/; + + } else { + $new .= $_; +@@ -1354,6 +1383,9 @@ _EOC_ + --with-libpq=DIR specify the libpq (or postgresql) installation prefix + --with-pg_config=PATH specify the path of the pg_config utility + ++ --with-install-prefix=DIR specify the install prefix on target that differs from ++ --prefix that injects hardcoded paths in compiled binary ++ + Options directly inherited from nginx + + --sbin-path=PATH set nginx binary pathname diff --git a/build/openresty/pcre/BUILD.bazel b/build/openresty/pcre/BUILD.bazel new file mode 100644 index 00000000000..78a14d48ef7 --- /dev/null +++ b/build/openresty/pcre/BUILD.bazel @@ -0,0 +1,16 @@ +load("@bazel_skylib//rules:build_test.bzl", "build_test") + +exports_files( + [ + "BUILD.pcre.bazel", + ], + visibility = ["//visibility:public"], +) + +build_test( + name = "build", + targets = [ + "@pcre//:pcre", + ], + visibility = ["//:__pkg__"], +) diff --git a/build/openresty/pcre/BUILD.pcre.bazel b/build/openresty/pcre/BUILD.pcre.bazel new file mode 100644 index 00000000000..229005a870f --- /dev/null +++ b/build/openresty/pcre/BUILD.pcre.bazel @@ -0,0 +1,36 @@ +load("@rules_foreign_cc//foreign_cc:defs.bzl", "cmake") +load("@kong_bindings//:variables.bzl", "KONG_VAR") + +filegroup( + name = "all_srcs", + srcs = glob( + include = ["**"], + exclude = ["*.bazel"], + ), +) + +# pcre cmake detects cross compile automatically +cmake( + name = "pcre", + build_args = [ + "--", # <- Pass remaining options to the native tool. + "-j" + KONG_VAR["NPROC"], + ], + cache_entries = { + "CMAKE_C_FLAGS": "${CMAKE_C_FLAGS:-} -fPIC", + "PCRE_BUILD_PCREGREP": "OFF", # we don't need the cli binary + "PCRE_BUILD_TESTS": "OFF", # test doesn't compile on aarch64-linux-gnu (cross) + "CMAKE_INSTALL_LIBDIR": "lib", # force distros that uses lib64 (rhel family) to use lib + }, + lib_source = ":all_srcs", + out_static_libs = ["libpcre.a"], + visibility = ["//visibility:public"], +) + +filegroup( + name = "pcre_dir", + srcs = [ + ":pcre", + ], + output_group = "gen_dir", +) diff --git a/build/openresty/pcre/README.md b/build/openresty/pcre/README.md new file mode 100644 index 00000000000..667545c0bd3 --- /dev/null +++ b/build/openresty/pcre/README.md @@ -0,0 +1,5 @@ +This target is modified from https://github.com/bazelbuild/rules_foreign_cc/tree/main/examples/third_party +with following chnages: + +- Read version from requirements.txt +- Updated `build_file` to new path under //build/openresty \ No newline at end of file diff --git a/build/openresty/pcre/pcre_repositories.bzl b/build/openresty/pcre/pcre_repositories.bzl new file mode 100644 index 00000000000..54448927f56 --- /dev/null +++ b/build/openresty/pcre/pcre_repositories.bzl @@ -0,0 +1,20 @@ +"""A module defining the third party dependency PCRE""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") +load("@kong_bindings//:variables.bzl", "KONG_VAR") + +def pcre_repositories(): + version = KONG_VAR["PCRE"] + + maybe( + http_archive, + name = "pcre", + build_file = "//build/openresty/pcre:BUILD.pcre.bazel", + strip_prefix = "pcre-" + version, + sha256 = "4e6ce03e0336e8b4a3d6c2b70b1c5e18590a5673a98186da90d4f33c23defc09", + urls = [ + "https://mirror.bazel.build/downloads.sourceforge.net/project/pcre/pcre/" + version + "/pcre-" + version + ".tar.gz", + "https://downloads.sourceforge.net/project/pcre/pcre/" + version + "/pcre-" + version + ".tar.gz", + ], + ) diff --git a/build/openresty/repositories.bzl b/build/openresty/repositories.bzl new file mode 100644 index 00000000000..8a6dec3d7d9 --- /dev/null +++ b/build/openresty/repositories.bzl @@ -0,0 +1,66 @@ +"""A module defining the third party dependency OpenResty""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") +load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository") +load("@kong_bindings//:variables.bzl", "KONG_VAR") +load("//build/openresty/pcre:pcre_repositories.bzl", "pcre_repositories") +load("//build/openresty/openssl:openssl_repositories.bzl", "openssl_repositories") + +# This is a dummy file to export the module's repository. +_NGINX_MODULE_DUMMY_FILE = """ +filegroup( + name = "all_srcs", + srcs = glob(["**"]), + visibility = ["//visibility:public"], +) +""" + +def openresty_repositories(): + pcre_repositories() + openssl_repositories() + + openresty_version = KONG_VAR["OPENRESTY"] + + maybe( + openresty_http_archive_wrapper, + name = "openresty", + build_file = "//build/openresty:BUILD.openresty.bazel", + sha256 = "576ff4e546e3301ce474deef9345522b7ef3a9d172600c62057f182f3a68c1f6", + strip_prefix = "openresty-" + openresty_version, + urls = [ + "https://openresty.org/download/openresty-" + openresty_version + ".tar.gz", + "https://github.com/Kong/openresty-release-mirror/releases/download/" + openresty_version + "/openresty-" + openresty_version + ".tar.gz", + ], + patches = KONG_VAR["OPENRESTY_PATCHES"], + patch_args = ["-p1"], + ) + + maybe( + new_git_repository, + name = "lua-kong-nginx-module", + branch = KONG_VAR["LUA_KONG_NGINX_MODULE"], + remote = "https://github.com/Kong/lua-kong-nginx-module", + build_file_content = _NGINX_MODULE_DUMMY_FILE, + recursive_init_submodules = True, + ) + +def _openresty_binding_impl(ctx): + ctx.file("BUILD.bazel", _NGINX_MODULE_DUMMY_FILE) + ctx.file("WORKSPACE", "workspace(name = \"openresty_patch\")") + + version = "LuaJIT\\\\ 2.1.0-" + for path in ctx.path("../openresty/bundle").readdir(): + if path.basename.startswith("LuaJIT-2.1-"): + version = version + path.basename.replace("LuaJIT-2.1-", "") + break + + ctx.file("variables.bzl", 'LUAJIT_VERSION = "%s"' % version) + +openresty_binding = repository_rule( + implementation = _openresty_binding_impl, +) + +def openresty_http_archive_wrapper(name, **kwargs): + http_archive(name = name, **kwargs) + openresty_binding(name = name + "_binding") diff --git a/build/package/kong.logrotate b/build/package/kong.logrotate new file mode 100644 index 00000000000..e319135ff5b --- /dev/null +++ b/build/package/kong.logrotate @@ -0,0 +1,15 @@ +/usr/local/kong/logs/*.log { + su kong kong + rotate 14 + daily + missingok + compress + delaycompress + notifempty + sharedscripts + postrotate + if [ -f /usr/local/kong/pids/nginx.pid ]; then + kill -USR1 `cat /usr/local/kong/pids/nginx.pid` + fi + endscript +} diff --git a/build/package/kong.service b/build/package/kong.service new file mode 100644 index 00000000000..eeaa6502af9 --- /dev/null +++ b/build/package/kong.service @@ -0,0 +1,25 @@ +[Unit] +Description=Kong +Documentation=https://docs.konghq.com/ +After=syslog.target network.target remote-fs.target nss-lookup.target + +[Service] +ExecStartPre=/usr/local/bin/kong prepare -p /usr/local/kong +ExecStart=/usr/local/openresty/nginx/sbin/nginx -p /usr/local/kong -c nginx.conf +ExecReload=/usr/local/bin/kong prepare -p /usr/local/kong +ExecReload=/usr/local/openresty/nginx/sbin/nginx -p /usr/local/kong -c nginx.conf -s reload +ExecStop=/bin/kill -s QUIT $MAINPID +PrivateTmp=true + +# All environment variables prefixed with `KONG_` and capitalized will override +# the settings specified in the `/etc/kong/kong.conf.default` file. +# +# For example: +# `log_level = debug` in the .conf file -> `KONG_LOG_LEVEL=debug` env var. +Environment=KONG_NGINX_DAEMON=off + +# You can control this limit through /etc/security/limits.conf +LimitNOFILE=infinity + +[Install] +WantedBy=multi-user.target diff --git a/build/package/nfpm.yaml b/build/package/nfpm.yaml new file mode 100644 index 00000000000..7c3940903cb --- /dev/null +++ b/build/package/nfpm.yaml @@ -0,0 +1,73 @@ +name: "${KONG_NAME}" +arch: ${ARCH} +platform: "linux" +version: "${KONG_VERSION}" +section: "default" +priority: "extra" +provides: +- kong +- lapis +- luarocks +- luarocks-admin +maintainer: "Kong Inc. " +description: | + Kong is a distributed gateway for APIs and Microservices, focused on high performance and reliability. +vendor: "Kong Inc." +license: "Apache-2.0" +contents: +- src: nfpm-prefix/bin + dst: /usr/local/bin +- src: nfpm-prefix/kong + dst: /usr/local/kong +- src: nfpm-prefix/lib + dst: /usr/local/lib +- src: nfpm-prefix/etc/luarocks + dst: /usr/local/etc/luarocks +- src: nfpm-prefix/openresty + dst: /usr/local/openresty +- src: nfpm-prefix/share + dst: /usr/local/share +- src: nfpm-prefix/etc/kong + dst: /etc/kong +- src: bin/kong + dst: /usr/local/bin/kong +- src: build/package/kong.logrotate + dst: /etc/kong/kong.logrotate +scripts: + postinstall: ./build/package/postinstall.sh +replaces: +- ${KONG_REPLACES_1} +- ${KONG_REPLACES_2} +conflicts: +- ${KONG_CONFLICTS_1} +- ${KONG_CONFLICTS_2} +overrides: + deb: + depends: + - ca-certificates + - libpcre3 + - perl + - libyaml-0-2 + - zlib1g-dev + rpm: + depends: + - ca-certificates + - pcre + - perl + - perl-Time-HiRes + - zlib + - libyaml + # Workaround for https://github.com/goreleaser/nfpm/issues/589 + - ${RPM_EXTRA_DEPS} + - ${RPM_EXTRA_DEPS_2} + - ${RPM_EXTRA_DEPS_3} + apk: + depends: + - ca-certificates + +rpm: + signature: + # PGP secret key (can also be ASCII-armored), the passphrase is taken + # from the environment variable $NFPM_RPM_PASSPHRASE with a fallback + # to $NFPM_PASSPHRASE. + key_file: ${RPM_SIGNING_KEY_FILE} diff --git a/build/package/postinstall.sh b/build/package/postinstall.sh new file mode 100644 index 00000000000..3a1bc9178b3 --- /dev/null +++ b/build/package/postinstall.sh @@ -0,0 +1,30 @@ +create_user() { + groupadd -f kong + useradd -g kong -s /bin/sh -c "Kong default user" kong + + FILES="" + FILES="${FILES} /etc/kong/" + FILES="${FILES} /usr/local/bin/json2lua" + FILES="${FILES} /usr/local/bin/kong" + FILES="${FILES} /usr/local/bin/lapis" + FILES="${FILES} /usr/local/bin/lua2json" + FILES="${FILES} /usr/local/bin/luarocks" + FILES="${FILES} /usr/local/bin/luarocks-admin" + FILES="${FILES} /usr/local/bin/openapi2kong" + FILES="${FILES} /usr/local/etc/luarocks/" + FILES="${FILES} /usr/local/etc/passwdqc/" + FILES="${FILES} /usr/local/kong/" + FILES="${FILES} /usr/local/lib/lua/" + FILES="${FILES} /usr/local/lib/luarocks/" + FILES="${FILES} /usr/local/openresty/" + FILES="${FILES} /usr/local/share/lua/" + + for FILE in ${FILES}; do + chown -R kong:kong ${FILE} + chmod -R g=u ${FILE} + done + + return 0 +} + +create_user > /dev/null 2>&1 diff --git a/build/platforms/distro/BUILD b/build/platforms/distro/BUILD new file mode 100644 index 00000000000..4816ca427a8 --- /dev/null +++ b/build/platforms/distro/BUILD @@ -0,0 +1,37 @@ +constraint_setting(name = "distro") + +constraint_value( + name = "generic", + constraint_setting = ":distro", + visibility = ["//visibility:public"], +) + +constraint_value( + name = "alpine", + constraint_setting = ":distro", + visibility = ["//visibility:public"], +) + +constraint_value( + name = "rhel9", + constraint_setting = ":distro", + visibility = ["//visibility:public"], +) + +constraint_value( + name = "rhel8", + constraint_setting = ":distro", + visibility = ["//visibility:public"], +) + +constraint_value( + name = "aws2023", + constraint_setting = ":distro", + visibility = ["//visibility:public"], +) + +constraint_value( + name = "aws2", + constraint_setting = ":distro", + visibility = ["//visibility:public"], +) diff --git a/build/repositories.bzl b/build/repositories.bzl new file mode 100644 index 00000000000..60550ae15fd --- /dev/null +++ b/build/repositories.bzl @@ -0,0 +1,73 @@ +"""A module defining the third party dependency OpenResty""" + +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository") +load("//build/luarocks:luarocks_repositories.bzl", "luarocks_repositories") +load("//build/cross_deps:repositories.bzl", "cross_deps_repositories") +load("//build:build_system.bzl", "github_release") +load("@kong_bindings//:variables.bzl", "KONG_VAR") + +_SRCS_BUILD_FILE_CONTENT = """ +filegroup( + name = "all_srcs", + srcs = glob(["**"]), + visibility = ["//visibility:public"], +) +""" + +_DIST_BUILD_FILE_CONTENT = """ +filegroup( + name = "dist_files", + srcs = ["dist"], + visibility = ["//visibility:public"], +) +""" + +def github_cli_repositories(): + """Defines the github cli repositories""" + + gh_matrix = [ + ["linux", "amd64", "tar.gz", "5aee45bd42a27f5be309373c326e45cbcc7f04591b1798581a3094af767225b7"], + ["linux", "arm64", "tar.gz", "3ef741bcc1ae8bb975adb79a78e26ab7f18a246197f193aaa8cb5c3bdc373a3f"], + ["macOS", "amd64", "zip", "6b91c446586935de0e9df82da58309b2d1b83061cfcd4cc173124270f1277ca7"], + ["macOS", "arm64", "zip", "32a71652367f3cf664894456e4c4f655faa95964d71cc3a449fbf64bdce1fff1"], + ] + for name, arch, type, sha in gh_matrix: + http_archive( + name = "gh_%s_%s" % (name, arch), + url = "https://github.com/cli/cli/releases/download/v2.30.0/gh_2.30.0_%s_%s.%s" % (name, arch, type), + strip_prefix = "gh_2.30.0_%s_%s" % (name, arch), + sha256 = sha, + build_file_content = _SRCS_BUILD_FILE_CONTENT, + ) + +def protoc_repositories(): + http_archive( + name = "protoc", + url = "https://github.com/protocolbuffers/protobuf/releases/download/v3.19.0/protoc-3.19.0-linux-x86_64.zip", + sha256 = "2994b7256f7416b90ad831dbf76a27c0934386deb514587109f39141f2636f37", + build_file_content = """ +filegroup( + name = "all_srcs", + srcs = ["include"], + visibility = ["//visibility:public"], +)""", + ) + +def kong_resty_websocket_repositories(): + new_git_repository( + name = "lua-resty-websocket", + branch = KONG_VAR["LUA_RESTY_WEBSOCKET"], + remote = "https://github.com/Kong/lua-resty-websocket", + build_file_content = _SRCS_BUILD_FILE_CONTENT, + ) + +def build_repositories(): + luarocks_repositories() + + github_cli_repositories() + + protoc_repositories() + + cross_deps_repositories() diff --git a/build/templates/venv-commons b/build/templates/venv-commons new file mode 100644 index 00000000000..7fcf2b932d4 --- /dev/null +++ b/build/templates/venv-commons @@ -0,0 +1,65 @@ +#!/bin/bash + +# template variables starts +workspace_path="{{workspace_path}}" +# template variables ends + +if [ "$#" -ne 2 ]; then + echo "Usage: $0 KONG_VENV KONG_VENV_ENV_FILE" + exit 1 +fi + +KONG_VENV=$1 +KONG_VENV_ENV_FILE=$2 + +# clear the file +>| $KONG_VENV_ENV_FILE + +# use env vars to let Fish shell happy, we will unset them later +LUAROCKS_CONFIG="$KONG_VENV/rocks_config" +ROCKS_ROOT="$KONG_VENV" + +chmod -R a+rw "$KONG_VENV" + +mkdir -p "$KONG_VENV/venv/bin" + +echo "#!/bin/bash +$KONG_VENV/openresty/bin/resty -I $KONG_VENV/openresty/site/lualib -I $KONG_VENV/openresty/lualib --nginx $KONG_VENV/openresty/nginx/sbin/nginx \"\$@\" +" >| "$KONG_VENV/venv/bin/resty" +chmod +x "$KONG_VENV/venv/bin/resty" + +echo " +rocks_trees = { + { name = [[system]], root = [[$ROCKS_ROOT]] } +} +" >| "$LUAROCKS_CONFIG" + +# duplicate package.[c]path even though we have set in resty-cli, so luajit and kong can consume +LUA_PATH="\ +$ROCKS_ROOT/share/lua/5.1/?.lua;$ROCKS_ROOT/share/lua/5.1/?.ljbc;\ +$ROCKS_ROOT/share/lua/5.1/?/init.lua;$ROCKS_ROOT/share/lua/5.1/?/init.ljbc;\ +$KONG_VENV/openresty/site/lualib/?.lua;$KONG_VENV/openresty/site/lualib/?.ljbc;\ +$KONG_VENV/openresty/site/lualib/?/init.lua;$KONG_VENV/openresty/site/lualib/?/init.ljbc;\ +$KONG_VENV/openresty/lualib/?.lua;$KONG_VENV/openresty/lualib/?.ljbc;\ +$KONG_VENV/openresty/lualib/?/init.lua;$KONG_VENV/openresty/lualib/?/init.ljbc;\ +$KONG_VENV/openresty/luajit/share/luajit-2.1.0-beta3/?.lua" + +# support custom plugin development +if [ -n $KONG_PLUGIN_PATH ] ; then + LUA_PATH="$KONG_PLUGIN_PATH/?.lua;$KONG_PLUGIN_PATH/?/init.lua;$LUA_PATH" +fi +# default; duplicate of 'lua_package_path' in kong.conf and nginx_kong.lua +LUA_PATH="./?.lua;./?/init.lua;$LUA_PATH;;" + +# write envs to env file +cat >> $KONG_VENV_ENV_FILE </dev/null && stop_services + + unset -f deactivate + unset -f start_services +} + +start_services () { + . $workspace_path/scripts/dependency_services/up.sh + # stop_services is defined by the script above +} + +# actually set env vars +KONG_VENV_ENV_FILE=$(mktemp) +export KONG_VENV_ENV_FILE +bash ${KONG_VENV}-venv/lib/venv-commons $KONG_VENV $KONG_VENV_ENV_FILE +. $KONG_VENV_ENV_FILE + +# set shell prompt +if [ -z "${KONG_VENV_DISABLE_PROMPT-}" ] ; then + if [ -n "${_OLD_KONG_VENV_PS1}" ]; then + # prepend the old PS1 if this script is called multiple times + PS1="(${build_name}) ${_OLD_KONG_VENV_PS1}" + else + _OLD_KONG_VENV_PS1="${PS1-}" + PS1="(${build_name}) ${PS1-}" + fi + export PS1 +fi + +# check wrapper +test -n "$*" && exec "$@" || true diff --git a/build/tests/01-base.sh b/build/tests/01-base.sh new file mode 100755 index 00000000000..ca5829a957f --- /dev/null +++ b/build/tests/01-base.sh @@ -0,0 +1,129 @@ +#!/usr/bin/env bash + +if [ -n "${VERBOSE:-}" ]; then + set -x +fi + +source .requirements +source build/tests/util.sh + +### +# +# user/group +# +### + +# a missing kong user can indicate that the post-install script on rpm/deb +# platforms failed to run properly +msg_test '"kong" user exists' +assert_exec 0 'root' 'getent passwd kong' + +msg_test '"kong" group exists' +assert_exec 0 'root' 'getent group kong' + +### +# +# files and ownership +# +### + +msg_test "/usr/local/kong exists and is owned by kong:root" +assert_exec 0 'kong' "test -O /usr/local/kong || ( rc=\$?; stat '${path}'; exit \$rc )" +assert_exec 0 'root' "test -G /usr/local/kong || ( rc=\$?; stat '${path}'; exit \$rc )" + +msg_test "/usr/local/bin/kong exists and is owned by kong:root" +assert_exec 0 'kong' "test -O /usr/local/kong || ( rc=\$?; stat '${path}'; exit \$rc )" +assert_exec 0 'root' "test -G /usr/local/kong || ( rc=\$?; stat '${path}'; exit \$rc )" + +if alpine; then + # we have never produced real .apk package files for alpine and thus have + # never depended on the kong user/group chown that happens in the + # postinstall script(s) for other package types + # + # if we ever do the work to support real .apk files (with read postinstall + # scripts), we will need to this test + msg_yellow 'skipping file and ownership tests on alpine' +else + for path in \ + /usr/local/bin/luarocks \ + /usr/local/etc/luarocks/ \ + /usr/local/lib/{lua,luarocks}/ \ + /usr/local/openresty/ \ + /usr/local/share/lua/; do + msg_test "${path} exists and is owned by kong:kong" + assert_exec 0 'kong' "test -O ${path} || ( rc=\$?; stat '${path}'; exit \$rc )" + assert_exec 0 'kong' "test -G ${path} || ( rc=\$?; stat '${path}'; exit \$rc )" + done +fi + +msg_test 'default conf file exists and is not empty' +assert_exec 0 'root' "test -s /etc/kong/kong.conf.default" + +msg_test 'default logrotate file exists and is not empty' +assert_exec 0 'root' "test -s /etc/kong/kong.logrotate" + +msg_test 'plugin proto file exists and is not empty' +assert_exec 0 'root' "test -s /usr/local/kong/include/pluginsocket.proto" + +msg_test 'protobuf files exist and are not empty' +assert_exec 0 'root' "for f in /usr/local/kong/include/google/protobuf/*.proto; do test -s \$f; done" + +msg_test 'ssl header files exist and are not empty' +assert_exec 0 'root' "for f in /usr/local/kong/include/openssl/*.h; do test -s \$f; done" + +### +# +# OpenResty binaries/tools +# +### + +msg_test 'openresty binary is expected version' +assert_exec 0 'root' "/usr/local/openresty/bin/openresty -v 2>&1 | grep '${OPENRESTY}'" + +# linking to a non-kong-provided luajit library can indicate the package was +# created on a dev workstation where luajit/openresty was installed manually +# and probably shouldn't be shipped to customers +msg_test 'openresty binary is linked to kong-provided luajit library' +assert_exec 0 'root' "ldd /usr/local/openresty/bin/openresty | grep -E 'libluajit-.*openresty/luajit/lib'" + +# if libpcre appears in the ldd output for the openresty binary, static linking +# of it during the compile of openresty may have failed +msg_test 'openresty binary is NOT linked to external PCRE' +assert_exec 0 'root' "ldd /usr/local/openresty/bin/openresty | grep -ov 'libpcre.so'" + +msg_test 'openresty binary compiled with LuaJIT PCRE support' +assert_exec 0 'root' "/usr/local/openresty/bin/openresty -V 2>&1 | grep '\-\-with-pcre-jit'" + +msg_test 'resty CLI can be run by kong user' +assert_exec 0 'kong' "/usr/local/openresty/bin/resty -e 'print(jit.version)'" + +msg_test 'resty CLI functions and returns valid version of LuaJIT' +assert_exec 0 'root' "/usr/local/openresty/bin/resty -e 'print(jit.version)' | grep -E 'LuaJIT\ ([0-9]\.*){3}\-beta[0-9]+'" + +### +# +# SSL verification +# +### + +# check which ssl openresty is using +if docker_exec root '/usr/local/openresty/bin/openresty -V 2>&1' | grep 'BoringSSL'; then + msg_test 'openresty binary uses expected boringssl version' + assert_exec 0 'root' "/usr/local/openresty/bin/openresty -V 2>&1 | grep '1.1.0'" +else + msg_test 'openresty binary uses expected openssl version' + assert_exec 0 'root' "/usr/local/openresty/bin/openresty -V 2>&1 | grep '${OPENSSL}'" +fi + +msg_test 'openresty binary is linked to kong-provided ssl libraries' +assert_exec 0 'root' "ldd /usr/local/openresty/bin/openresty | grep -E 'libssl.so.*kong/lib'" +assert_exec 0 'root' "ldd /usr/local/openresty/bin/openresty | grep -E 'libcrypto.so.*kong/lib'" + +### +# +# LuaRocks +# +### + +msg_test 'lua-resty-websocket lua files exist and contain a version' +assert_exec 0 'root' 'grep _VERSION /usr/local/openresty/lualib/resty/websocket/*.lua' diff --git a/build/tests/02-admin-api.sh b/build/tests/02-admin-api.sh new file mode 100755 index 00000000000..89d80df7cf3 --- /dev/null +++ b/build/tests/02-admin-api.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +if [ -n "${VERBOSE:-}" ]; then + set -x +fi + +source .requirements +source build/tests/util.sh + +service_name="$(random_string)" +route_name="$(random_string)" + +kong_ready + +msg_test "Check admin API is alive" +assert_response "${KONG_ADMIN_URI}" "200" + +msg_test "Create a service" +assert_response "-d name=${service_name} -d url=http://127.0.0.1:8001 ${KONG_ADMIN_URI}/services" "201" + +msg_test "List services" +assert_response "${KONG_ADMIN_URI}/services" "200" + +msg_test "Create a route" +assert_response "-d name=${route_name} -d paths=/anything ${KONG_ADMIN_URI}/services/${service_name}/routes" "201" + +msg_test "List routes" +assert_response "${KONG_ADMIN_URI}/services/${service_name}/routes" "200" + +msg_test "List services" +assert_response "${KONG_ADMIN_URI}/services" "200" + +msg_test "Proxy a request" +assert_response "${KONG_PROXY_URI}/anything" "200" + +if [[ "$EDITION" == "enterprise" ]]; then + it_runs_free_enterprise +fi diff --git a/build/tests/03-http2-admin-api.sh b/build/tests/03-http2-admin-api.sh new file mode 100755 index 00000000000..c60d63fa333 --- /dev/null +++ b/build/tests/03-http2-admin-api.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +if [ -n "${VERBOSE:-}" ]; then + set -x +fi + +source .requirements +source build/tests/util.sh + +kong_ready + +msg_test "Check if cURL supports HTTP/2" +if ! curl --version | grep -i "http2" > /dev/null; then + err_exit " local cURL does not support HTTP/2" +fi + +msg_test "Check HTTP/2 Admin API response is valid" +admin_api_http2_validity diff --git a/build/tests/util.sh b/build/tests/util.sh new file mode 100755 index 00000000000..18c88203347 --- /dev/null +++ b/build/tests/util.sh @@ -0,0 +1,174 @@ +#!/usr/bin/env bash + +KONG_ADMIN_URI=${KONG_ADMIN_URI:-"http://localhost:8001"} +KONG_ADMIN_HTTP2_URI=${KONG_ADMIN_HTTP2_URI:-"https://localhost:8444"} +KONG_PROXY_URI=${KONG_PROXY_URI:-"http://localhost:8000"} + +set_x_flag='' +if [ -n "${VERBOSE:-}" ]; then + set -x + set_x_flag='-x' +fi + +msg_test() { + builtin echo -en "\033[1;34m" >&1 + echo -n "===> " + builtin echo -en "\033[1;36m" >&1 + echo -e "$@" + builtin echo -en "\033[0m" >&1 +} + +msg_red() { + builtin echo -en "\033[1;31m" >&2 + echo -e "$@" + builtin echo -en "\033[0m" >&2 +} + +msg_yellow() { + builtin echo -en "\033[1;33m" >&1 + echo -e "$@" + builtin echo -en "\033[0m" >&1 +} + +err_exit() { + msg_red "$@" + exit 1 +} + +random_string() { + echo "a$(shuf -er -n19 {A..Z} {a..z} {0..9} | tr -d '\n')" +} + +kong_ready() { + local TIMEOUT_SECONDS=$((15)) + while [[ "$(curl -s -o /dev/null -w "%{http_code}" localhost:8000)" != 404 ]]; do + sleep 5; + COUNTER=$((COUNTER + 5)) + + if (( COUNTER >= TIMEOUT_SECONDS )) + then + printf '\xe2\x98\x93 ERROR: Timed out waiting for %s' "$KONG" + exit 1 + fi + done +} + +docker_exec() { + local user="${1:-kong}" + + shift + + test -t 1 && USE_TTY='-t' + + # shellcheck disable=SC2086 + docker exec --user="$user" ${USE_TTY} kong sh ${set_x_flag} -c "$@" +} + +_os() { + local os="$1" + + if docker_exec 'root' 'uname -a' | grep -qsi "$os"; then + return + else + docker_exec 'root' "grep -qsi '${os}' /etc/os-release" + return $? + fi +} + +alpine() { + _os 'alpine' +} + +assert_same() { + local expected=$(echo "$1" | tr -d '[:space:]') + local actual=$(echo "$2" | tr -d '[:space:]') + + if [ "$expected" != "$actual" ]; then + err_exit " expected $expected, got $actual" + fi +} + +assert_contains() { + local expected=$(echo "$1" | tr -d '[:space:]') + local actual="$2" + + if ! echo "$actual" | grep -q "$expected"; then + err_exit " expected $expected in $actual but not found" + fi +} + +assert_response() { + local endpoint=$1 + local expected_codes=$2 + local resp_code + COUNTER=20 + while : ; do + for code in ${expected_codes}; do + # shellcheck disable=SC2086 + resp_code=$(curl -s -o /dev/null -w "%{http_code}" ${endpoint}) + [ "$resp_code" == "$code" ] && break 2 + done + ((COUNTER-=1)) + [ "$COUNTER" -lt 1 ] && break + sleep 0.5 # 10 seconds max + done + for code in ${expected_codes}; do + [ "$resp_code" == "$code" ] && return + done || err_exit " expected $2, got $resp_code" +} + +assert_exec() { + local expected_code="${1:-0}" + local user="${2:-kong}" + + shift 2 + + ( + docker_exec "$user" "$@" + echo "$?" > /tmp/rc + ) | while read -r line; do printf ' %s\n' "$line"; done + + rc="$(cat /tmp/rc)" + + if ! [ "$rc" == "$expected_code" ]; then + err_exit " expected ${expected_code}, got ${rc}" + fi +} + +it_runs_free_enterprise() { + info=$(curl "$KONG_ADMIN_URI") + msg_test "it does not have ee-only plugins" + [ "$(echo "$info" | jq -r .plugins.available_on_server.canary)" != "true" ] + msg_test "it does not enable vitals" + [ "$(echo "$info" | jq -r .configuration.vitals)" == "false" ] + msg_test "workspaces are not writable" + assert_response "$KONG_ADMIN_URI/workspaces -d name=$(random_string)" "403" +} + +it_runs_full_enterprise() { + info=$(curl "$KONG_ADMIN_URI") + msg_test "it does have ee-only plugins" + [ "$(echo "$info" | jq -r .plugins.available_on_server | jq -r 'has("canary")')" == "true" ] + msg_test "it does enable vitals" + [ "$(echo "$info" | jq -r .configuration.vitals)" == "true" ] + msg_test "workspaces are writable" + assert_response "$KONG_ADMIN_URI/workspaces -d name=$(random_string)" "201" +} + +admin_api_http2_validity() { + output=$(mktemp) + header_dump=$(mktemp) + status=$(curl -ks -D "$header_dump" -o "$output" -w '%{http_code}' "$KONG_ADMIN_HTTP2_URI") + + msg_test "it returns with response status code 200" + assert_same "200" "$status" + + msg_test "it returns with response header content-type application/json" + assert_contains "application/json" "$(cat "$header_dump" | grep -i content-type | tr -d '[:space:]')" + + msg_test "it returns a response body with correct length" + assert_same "$(wc -c < "$output")" "$(cat "$header_dump" | grep -i content-length | cut -d' ' -f2 | tr -d '[:space:]')" + + msg_test "the response body is valid json and has valid json schema" + jq . "$output" > /dev/null || err_exit " response body is not valid json" +} diff --git a/build/toolchain/.gitignore b/build/toolchain/.gitignore new file mode 100644 index 00000000000..3d057f524bf --- /dev/null +++ b/build/toolchain/.gitignore @@ -0,0 +1 @@ +wrappers-* diff --git a/build/toolchain/BUILD b/build/toolchain/BUILD new file mode 100644 index 00000000000..9b870846acf --- /dev/null +++ b/build/toolchain/BUILD @@ -0,0 +1,75 @@ +load(":cc_toolchain_config.bzl", "cc_toolchain_config") +load(":managed_toolchain.bzl", "aarch64_glibc_distros", "define_managed_toolchain") + +package(default_visibility = ["//visibility:public"]) + +filegroup(name = "empty") + +################### +# aarch64-linux-gnu (installed with system) + +toolchain( + name = "local_aarch64-linux-gnu_toolchain", + exec_compatible_with = [ + "@platforms//os:linux", + "@platforms//cpu:x86_64", + ], + target_compatible_with = [ + "@platforms//os:linux", + "@platforms//cpu:aarch64", + "//build/platforms/distro:generic", + ], + toolchain = ":local_aarch64-linux-gnu_cc_toolchain", + toolchain_type = "@bazel_tools//tools/cpp:toolchain_type", +) + +cc_toolchain_config( + name = "local_aarch64-linux-gnu_cc_toolchain_config", + compiler_configuration = {}, + target_cpu = "aarch64", + toolchain_path_prefix = "/usr/aarch64-linux-gnu/", # is this required? + tools_path_prefix = "/usr/bin/aarch64-linux-gnu-", +) + +cc_toolchain( + name = "local_aarch64-linux-gnu_cc_toolchain", + all_files = ":empty", + compiler_files = ":empty", + dwp_files = ":empty", + linker_files = ":empty", + objcopy_files = ":empty", + strip_files = ":empty", + supports_param_files = 0, + toolchain_config = ":local_aarch64-linux-gnu_cc_toolchain_config", + toolchain_identifier = "local_aarch64-linux-gnu_cc_toolchain", +) + +################### +# managed toolchains (downloaded by Bazel) + +define_managed_toolchain( + arch = "x86_64", + gcc_version = "11", + libc = "musl", + target_compatible_with = ["//build/platforms/distro:alpine"], + vendor = "alpine", +) + +define_managed_toolchain( + arch = "aarch64", + gcc_version = "11", + libc = "musl", + target_compatible_with = ["//build/platforms/distro:alpine"], + vendor = "alpine", +) + +[ + define_managed_toolchain( + arch = "aarch64", + gcc_version = aarch64_glibc_distros[vendor], + libc = "gnu", + target_compatible_with = ["//build/platforms/distro:" + vendor], + vendor = vendor, + ) + for vendor in aarch64_glibc_distros +] diff --git a/build/toolchain/cc_toolchain_config.bzl b/build/toolchain/cc_toolchain_config.bzl new file mode 100644 index 00000000000..38380c4f9f0 --- /dev/null +++ b/build/toolchain/cc_toolchain_config.bzl @@ -0,0 +1,213 @@ +# Copyright 2021 The Bazel Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl", "tool_path") +load("@kong_bindings//:variables.bzl", "KONG_VAR") + +# Bazel 4.* doesn't support nested starlark functions, so we cannot simplify +#_fmt_flags() by defining it as a nested function. +def _fmt_flags(flags, toolchain_path_prefix): + return [f.format(toolchain_path_prefix = toolchain_path_prefix) for f in flags] + +# Macro for calling cc_toolchain_config from @bazel_tools with setting the +# right paths and flags for the tools. +def _cc_toolchain_config_impl(ctx): + target_cpu = ctx.attr.target_cpu + toolchain_path_prefix = ctx.attr.toolchain_path_prefix + tools_path_prefix = ctx.attr.tools_path_prefix + compiler_configuration = ctx.attr.compiler_configuration + + # Unfiltered compiler flags; these are placed at the end of the command + # line, so take precendence over any user supplied flags through --copts or + # such. + unfiltered_compile_flags = [ + # Do not resolve our symlinked resource prefixes to real paths. + "-no-canonical-prefixes", + # Reproducibility + "-Wno-builtin-macro-redefined", + "-D__DATE__=\"redacted\"", + "-D__TIMESTAMP__=\"redacted\"", + "-D__TIME__=\"redacted\"", + "-fdebug-prefix-map={}=__bazel_toolchain__/".format(toolchain_path_prefix), + ] + + # Default compiler flags: + compile_flags = [ + # "--target=" + target_system_name, + # Security + "-U_FORTIFY_SOURCE", # https://github.com/google/sanitizers/issues/247 + "-fstack-protector", + "-fno-omit-frame-pointer", + # Diagnostics + "-fcolor-diagnostics", + "-Wall", + "-Wthread-safety", + "-Wself-assign", + ] + + dbg_compile_flags = ["-g", "-fstandalone-debug"] + + opt_compile_flags = [ + "-g0", + "-O2", + "-D_FORTIFY_SOURCE=1", + "-DNDEBUG", + "-ffunction-sections", + "-fdata-sections", + ] + + link_flags = [ + # "--target=" + target_system_name, + "-lm", + "-no-canonical-prefixes", + ] + + # Similar to link_flags, but placed later in the command line such that + # unused symbols are not stripped. + link_libs = [] + + # Note that for xcompiling from darwin to linux, the native ld64 is + # not an option because it is not a cross-linker, so lld is the + # only option. + + link_flags.extend([ + "-fuse-ld=lld", + "-Wl,--build-id=md5", + "-Wl,--hash-style=gnu", + "-Wl,-z,relro,-z,now", + ]) + + opt_link_flags = ["-Wl,--gc-sections"] + + # Coverage flags: + coverage_compile_flags = ["-fprofile-instr-generate", "-fcoverage-mapping"] + coverage_link_flags = ["-fprofile-instr-generate"] + + ## NOTE: framework paths is missing here; unix_cc_toolchain_config + ## doesn't seem to have a feature for this. + + # C++ built-in include directories: + cxx_builtin_include_directories = [ + "/usr/" + target_cpu + "-linux-gnu/include", + "/usr/lib/gcc-cross/" + target_cpu + "-linux-gnu/11/include", + ] + + # sysroot_path = compiler_configuration["sysroot_path"] + # sysroot_prefix = "" + # if sysroot_path: + # sysroot_prefix = "%sysroot%" + + # cxx_builtin_include_directories.extend([ + # sysroot_prefix + "/include", + # sysroot_prefix + "/usr/include", + # sysroot_prefix + "/usr/local/include", + # ]) + + if "additional_include_dirs" in compiler_configuration: + cxx_builtin_include_directories.extend(compiler_configuration["additional_include_dirs"]) + + ## NOTE: make variables are missing here; unix_cc_toolchain_config doesn't + ## pass these to `create_cc_toolchain_config_info`. + + # The tool names come from [here](https://github.com/bazelbuild/bazel/blob/c7e58e6ce0a78fdaff2d716b4864a5ace8917626/src/main/java/com/google/devtools/build/lib/rules/cpp/CppConfiguration.java#L76-L90): + # NOTE: Ensure these are listed in toolchain_tools in toolchain/internal/common.bzl. + tool_paths = [ + tool_path( + name = "ar", + path = tools_path_prefix + "ar", + ), + tool_path( + name = "cpp", + path = tools_path_prefix + "g++", + ), + tool_path( + name = "gcc", + path = tools_path_prefix + "gcc", + ), + tool_path( + name = "gcov", + path = tools_path_prefix + "gcov", + ), + tool_path( + name = "ld", + path = tools_path_prefix + ctx.attr.ld, + ), + tool_path( + name = "nm", + path = tools_path_prefix + "nm", + ), + tool_path( + name = "objcopy", + path = tools_path_prefix + "objcopy", + ), + tool_path( + name = "objdump", + path = tools_path_prefix + "objdump", + ), + tool_path( + name = "strip", + path = tools_path_prefix + "strip", + ), + ] + + cxx_flags = [] + + # Replace flags with any user-provided overrides. + if "compile_flags" in compiler_configuration: + compile_flags = _fmt_flags(compiler_configuration["compile_flags"], toolchain_path_prefix) + if "cxx_flags" in compiler_configuration: + cxx_flags = _fmt_flags(compiler_configuration["cxx_flags"], toolchain_path_prefix) + if "link_flags" in compiler_configuration: + link_flags = _fmt_flags(compiler_configuration["link_flags"], toolchain_path_prefix) + if "link_libs" in compiler_configuration: + link_libs = _fmt_flags(compiler_configuration["link_libs"], toolchain_path_prefix) + if "opt_compile_flags" in compiler_configuration: + opt_compile_flags = _fmt_flags(compiler_configuration["opt_compile_flags"], toolchain_path_prefix) + if "opt_link_flags" in compiler_configuration: + opt_link_flags = _fmt_flags(compiler_configuration["opt_link_flags"], toolchain_path_prefix) + if "dbg_compile_flags" in compiler_configuration: + dbg_compile_flags = _fmt_flags(compiler_configuration["dbg_compile_flags"], toolchain_path_prefix) + if "coverage_compile_flags" in compiler_configuration: + coverage_compile_flags = _fmt_flags(compiler_configuration["coverage_compile_flags"], toolchain_path_prefix) + if "coverage_link_flags" in compiler_configuration: + coverage_link_flags = _fmt_flags(compiler_configuration["coverage_link_flags"], toolchain_path_prefix) + if "unfiltered_compile_flags" in compiler_configuration: + unfiltered_compile_flags = _fmt_flags(compiler_configuration["unfiltered_compile_flags"], toolchain_path_prefix) + + return cc_common.create_cc_toolchain_config_info( + ctx = ctx, + compiler = "gcc", + toolchain_identifier = target_cpu + "-linux-gnu", + host_system_name = "local", + target_cpu = target_cpu, + target_system_name = target_cpu + "-linux-gnu", + target_libc = ctx.attr.target_libc, + # abi_version = "unknown", + # abi_libc_version = "unknown", + cxx_builtin_include_directories = cxx_builtin_include_directories, + tool_paths = tool_paths, + ) + +cc_toolchain_config = rule( + implementation = _cc_toolchain_config_impl, + attrs = { + "target_cpu": attr.string(), + "toolchain_path_prefix": attr.string(), + "tools_path_prefix": attr.string(), + "compiler_configuration": attr.string_list_dict(allow_empty = True, default = {}), + "target_libc": attr.string(default = "gnu"), + "ld": attr.string(default = "gcc"), + }, + provides = [CcToolchainConfigInfo], +) diff --git a/build/toolchain/generate_wrappers.sh b/build/toolchain/generate_wrappers.sh new file mode 100755 index 00000000000..93b2b83d4a7 --- /dev/null +++ b/build/toolchain/generate_wrappers.sh @@ -0,0 +1,31 @@ +#!/bin/bash -e + +name=$1 +wrapper=$2 +prefix=$3 +dummy_file=$4 + +if [[ -z $name || -z $wrapper || -z $prefix ]]; then + echo "Usage: $0 " + exit 1 +fi + +cwd=$(realpath $(dirname $(readlink -f ${BASH_SOURCE[0]}))) +dir=wrappers-$name +mkdir -p $cwd/$dir +cp $wrapper $cwd/$dir/ +chmod 755 $cwd/$dir/wrapper + +pushd $cwd/$dir >/dev/null + +tools="addr2line ar as c++ cc@ c++filt cpp dwp elfedit g++ gcc gcc-ar gcc-nm gcc-ranlib gcov gcov-dump gcov-tool gfortran gprof ld ld.bfd ld.gold lto-dump nm objcopy objdump ranlib readelf size strings strip" +for tool in $tools; do + ln -sf wrapper $prefix$tool +done + +popd >/dev/null + +if [[ -n $dummy_file ]]; then + touch $dummy_file +fi + diff --git a/build/toolchain/managed_toolchain.bzl b/build/toolchain/managed_toolchain.bzl new file mode 100644 index 00000000000..793b335924e --- /dev/null +++ b/build/toolchain/managed_toolchain.bzl @@ -0,0 +1,152 @@ +load(":cc_toolchain_config.bzl", "cc_toolchain_config") + +aarch64_glibc_distros = { + "rhel9": "11", + "rhel8": "8", + "aws2023": "11", + "aws2": "7", +} + +def _generate_wrappers_impl(ctx): + wrapper_file = ctx.actions.declare_file("wrapper") + ctx.actions.expand_template( + template = ctx.file._wrapper_template, + output = wrapper_file, + substitutions = { + "{{TOOLCHAIN_NAME}}": ctx.attr.toolchain_name, + }, + is_executable = True, + ) + + dummy_output = ctx.actions.declare_file(ctx.attr.name + ".wrapper-marker") + + ctx.actions.run_shell( + command = "build/toolchain/generate_wrappers.sh %s %s %s %s" % ( + ctx.attr.toolchain_name, + wrapper_file.path, + ctx.attr.tools_prefix, + dummy_output.path, + ), + progress_message = "Create wrappers for " + ctx.attr.toolchain_name, + inputs = [wrapper_file], + outputs = [dummy_output], + ) + + return [DefaultInfo(files = depset([dummy_output, wrapper_file]))] + +generate_wrappers = rule( + implementation = _generate_wrappers_impl, + attrs = { + "toolchain_name": attr.string(mandatory = True), + "tools_prefix": attr.string(mandatory = True), + "_wrapper_template": attr.label( + default = "//build/toolchain:templates/wrapper", + allow_single_file = True, + ), + }, +) + +def define_managed_toolchain( + name = None, + arch = "x86_64", + vendor = "unknown", + libc = "gnu", + gcc_version = "11", + ld = "gcc", + target_compatible_with = []): + identifier = "{arch}-{vendor}-linux-{libc}-gcc-{gcc_version}".format( + arch = arch, + vendor = vendor, + libc = libc, + gcc_version = gcc_version, + ) + + tools_prefix = "{arch}-{vendor}-linux-{libc}-".format( + arch = arch, + vendor = vendor, + libc = libc, + ) + + native.toolchain( + name = "%s_toolchain" % identifier, + exec_compatible_with = [ + "@platforms//os:linux", + "@platforms//cpu:x86_64", + ], + target_compatible_with = [ + "@platforms//os:linux", + "@platforms//cpu:%s" % arch, + ] + target_compatible_with, + toolchain = ":%s_cc_toolchain" % identifier, + toolchain_type = "@bazel_tools//tools/cpp:toolchain_type", + ) + + cc_toolchain_config( + name = "%s_cc_toolchain_config" % identifier, + ld = ld, + target_cpu = arch, + target_libc = libc, + toolchain_path_prefix = "wrappers-%s/" % identifier, # is this required? + tools_path_prefix = "wrappers-%s/%s" % (identifier, tools_prefix), + ) + + generate_wrappers( + name = "%s_wrappers" % identifier, + toolchain_name = identifier, + tools_prefix = tools_prefix, + ) + + native.filegroup( + name = "%s_files" % identifier, + srcs = [ + ":%s_wrappers" % identifier, + "@%s//:toolchain" % identifier, + ], + ) + + native.cc_toolchain( + name = "%s_cc_toolchain" % identifier, + all_files = ":%s_files" % identifier, + compiler_files = ":%s_files" % identifier, + dwp_files = ":empty", + linker_files = "%s_files" % identifier, + objcopy_files = ":empty", + strip_files = ":empty", + supports_param_files = 0, + toolchain_config = ":%s_cc_toolchain_config" % identifier, + toolchain_identifier = "%s_cc_toolchain" % identifier, + ) + +def register_managed_toolchain(name = None, arch = "x86_64", vendor = "unknown", libc = "gnu", gcc_version = "11"): + identifier = "{arch}-{vendor}-linux-{libc}-gcc-{gcc_version}".format( + arch = arch, + vendor = vendor, + libc = libc, + gcc_version = gcc_version, + ) + native.register_toolchains("//build/toolchain:%s_toolchain" % identifier) + +def register_all_toolchains(name = None): + native.register_toolchains("//build/toolchain:local_aarch64-linux-gnu_toolchain") + + register_managed_toolchain( + arch = "x86_64", + gcc_version = "11", + libc = "musl", + vendor = "alpine", + ) + + register_managed_toolchain( + arch = "aarch64", + gcc_version = "11", + libc = "musl", + vendor = "alpine", + ) + + for vendor in aarch64_glibc_distros: + register_managed_toolchain( + arch = "aarch64", + gcc_version = aarch64_glibc_distros[vendor], + libc = "gnu", + vendor = vendor, + ) diff --git a/build/toolchain/repositories.bzl b/build/toolchain/repositories.bzl new file mode 100644 index 00000000000..19e7e2510ee --- /dev/null +++ b/build/toolchain/repositories.bzl @@ -0,0 +1,70 @@ +"""A module defining the third party dependency OpenResty""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +build_file_content = """ +filegroup( + name = "toolchain", + srcs = glob( + include = [ + "bin/**", + "include/**", + "lib/**", + "libexec/**", + "share/**", + "*-linux-*/**", + ], + exclude = ["usr"], + ), + visibility = ["//visibility:public"], +) +""" + +def toolchain_repositories(): + http_archive( + name = "x86_64-alpine-linux-musl-gcc-11", + url = "https://github.com/Kong/crosstool-ng-actions/releases/download/0.4.0/x86_64-alpine-linux-musl-gcc-11.tar.gz", + sha256 = "4fbc9a48f1f7ace6d2a19a1feeac1f69cf86ce8ece40b101e351d1f703b3560c", + strip_prefix = "x86_64-alpine-linux-musl", + build_file_content = build_file_content, + ) + + http_archive( + name = "aarch64-alpine-linux-musl-gcc-11", + url = "https://github.com/Kong/crosstool-ng-actions/releases/download/0.4.0/aarch64-alpine-linux-musl-gcc-11.tar.gz", + sha256 = "abd7003fc4aa6d533c5aad97a5726040137f580026b1db78d3a8059a69c3d45b", + strip_prefix = "aarch64-alpine-linux-musl", + build_file_content = build_file_content, + ) + + http_archive( + name = "aarch64-rhel9-linux-gnu-gcc-11", + url = "https://github.com/Kong/crosstool-ng-actions/releases/download/0.5.0/aarch64-rhel9-linux-gnu-glibc-2.34-gcc-11.tar.gz", + sha256 = "40fcf85e8315869621573512499aa3e2884283e0054dfefc2bad3bbf21b954c0", + strip_prefix = "aarch64-rhel9-linux-gnu", + build_file_content = build_file_content, + ) + + http_archive( + name = "aarch64-rhel8-linux-gnu-gcc-8", + url = "https://github.com/Kong/crosstool-ng-actions/releases/download/0.5.0/aarch64-rhel8-linux-gnu-glibc-2.28-gcc-8.tar.gz", + sha256 = "7a9a28ccab6d3b068ad49b2618276707e0a31b437ad010c8969ba8660ddf63fb", + strip_prefix = "aarch64-rhel8-linux-gnu", + build_file_content = build_file_content, + ) + + http_archive( + name = "aarch64-aws2023-linux-gnu-gcc-11", + url = "https://github.com/Kong/crosstool-ng-actions/releases/download/0.5.0/aarch64-aws2023-linux-gnu-glibc-2.34-gcc-11.tar.gz", + sha256 = "01498b49c20255dd3d5da733fa5d60b5dad4b1cdd55e50552d8f2867f3d82e98", + strip_prefix = "aarch64-aws2023-linux-gnu", + build_file_content = build_file_content, + ) + + http_archive( + name = "aarch64-aws2-linux-gnu-gcc-7", + url = "https://github.com/Kong/crosstool-ng-actions/releases/download/0.5.0/aarch64-aws2-linux-gnu-glibc-2.26-gcc-7.tar.gz", + sha256 = "9a8d0bb84c3eea7b662192bf44aaf33a76c9c68848a68a544a91ab90cd8cba60", + strip_prefix = "aarch64-aws2-linux-gnu", + build_file_content = build_file_content, + ) diff --git a/build/toolchain/templates/wrapper b/build/toolchain/templates/wrapper new file mode 100644 index 00000000000..cb52306cfca --- /dev/null +++ b/build/toolchain/templates/wrapper @@ -0,0 +1,14 @@ +#!/bin/bash + +PREFIX= +if [[ ! -z ${EXT_BUILD_ROOT} ]]; then + PREFIX=${EXT_BUILD_ROOT}/ +elif [[ ! -e external/{{TOOLCHAIN_NAME}}/bin ]]; then + echo "EXT_BUILD_ROOT is not set and wrapper can't find the toolchain, is this script running with the correct environment (foreign_cc rules, cc_* rules)?" + exit 1 +fi + +NAME=$(/usr/bin/basename "$0") +TOOLCHAIN_BINDIR=${PREFIX}external/{{TOOLCHAIN_NAME}}/bin + +exec "${TOOLCHAIN_BINDIR}"/"${NAME}" "$@" \ No newline at end of file diff --git a/changelog/Makefile b/changelog/Makefile new file mode 100644 index 00000000000..68a9b9e9132 --- /dev/null +++ b/changelog/Makefile @@ -0,0 +1,95 @@ +# SHELL := $(shell which bash) +# $(info Use shell $(SHELL)) + +OWNER_REPO := Kong/kong +BASE_BRANCH ?= release/3.6.x +VERSION ?= 3.6.0 +DEBUG ?= false +UNRELEASED_DIR ?= unreleased + +BRANCH_NAME := generate-$(VERSION)-changelog +ORIGIN_BRANCH := origin/$(BASE_BRANCH) + +.PHONY: all check_tools check_version create_branch generate push_changelog create_pr + +all: check_tools check_version create_branch generate push_changelog create_pr +no_pr: check_tools check_version create_branch generate push_changelog + +REQUIRED_TOOLS := git changelog curl jq +check_tools: + $(foreach cmd,$(REQUIRED_TOOLS), \ + $(if $(shell command -v $(cmd) 2>/dev/null), $(info $(cmd) found), \ + $(error command '$(cmd)' command not found) \ + ) \ + ) +ifndef GITHUB_TOKEN + $(error environment variable GITHUB_TOKEN not found) +else + $(info GITHUB_TOKEN found) +endif + +BINARY_VERSION := $(shell changelog -v | awk '{print $$3}') +BAD_VERSION := 0.0.1 +REQUIRED_VERSION := 0.0.2 +check_version: + @if [ $(BINARY_VERSION) = $(BAD_VERSION) ] ; then \ + echo "changelog version is $(BINARY_VERSION). Upgrade to $(REQUIRED_VERSION) at least." ; \ + false ; \ + else \ + echo "all required tools satisfied" ; \ + fi + +create_branch: + @git fetch --prune + @git submodule update --init --recursive + @git checkout -B $(BRANCH_NAME) $(ORIGIN_BRANCH) + +generate: + @rm -f $(VERSION).md + @touch $(VERSION).md + + @if [ -n "$$(shopt -s nullglob; echo $(UNRELEASED_DIR)/kong/*.yml)" ] || \ + [ -n "$$(shopt -s nullglob; echo $(VERSION)/kong/*.yml)" ] ; then \ + changelog --debug=$(DEBUG) generate \ + --repo-path . \ + --changelog-paths $(VERSION)/kong,$(UNRELEASED_DIR)/kong \ + --title Kong \ + --github-issue-repo $(OWNER_REPO) \ + --github-api-repo $(OWNER_REPO) \ + --with-jiras \ + >> $(VERSION).md; \ + fi + @if [ -n "$$(shopt -s nullglob; echo $(UNRELEASED_DIR)/kong-manager/*.yml)" ] || \ + [ -n "$$(shopt -s nullglob; echo $(VERSION)/kong-manager/*.yml)" ] ; then \ + changelog --debug=$(DEBUG) generate \ + --repo-path . \ + --changelog-paths $(VERSION)/kong-manager,$(UNRELEASED_DIR)/kong-manager \ + --title Kong-Manager \ + --github-issue-repo Kong/kong-manager \ + --github-api-repo $(OWNER_REPO) \ + --with-jiras \ + >> $(VERSION).md; \ + fi + + @echo + @echo "Please inspect $(VERSION).md" + +push_changelog: + @mkdir -p $(VERSION) + @mv -f $(VERSION).md $(VERSION)/ + @for i in kong kong-manager ; do \ + mkdir -p $(UNRELEASED_DIR)/$$i ; \ + mkdir -p $(VERSION)/$$i ; \ + git mv -k $(UNRELEASED_DIR)/$$i/*.yml $(VERSION)/$$i/ ; \ + touch $(UNRELEASED_DIR)/$$i/.gitkeep ; \ + touch $(VERSION)/$$i/.gitkeep ; \ + done + @git add . + @git commit -m "docs(release): genereate $(VERSION) changelog" + @git push -fu origin HEAD + + @echo + @echo "Successfully updated $(BRANCH_NAME) to GitHub." + +create_pr: + @bash create_pr $(OWNER_REPO) $(BASE_BRANCH) $(VERSION) $(BRANCH_NAME) diff --git a/changelog/README.md b/changelog/README.md new file mode 100644 index 00000000000..5a9aacc2f6d --- /dev/null +++ b/changelog/README.md @@ -0,0 +1,137 @@ +# Setup + +Download binary `changelog 0.0.2` from [Kong/gateway-changelog](https://github.com/Kong/gateway-changelog/releases), +or [release-helper](https://github.com/outsinre/release-helper/blob/main/changelog), +and add it to environment variable `PATH`. + +```bash +~ $ PATH="/path/to/changelog:$PATH" + +~ $ changelog +changelog version 0.0.2 +``` + +Ensure `GITHUB_TOKEN` is set in your environment. + +```bash +~ $ echo $GITHUB_TOKEN +``` + +# Create changelog PR + +The command will create a new changelog PR or update an existing one. +Please repeat the command if functional PRs with changelog are merged +after the creation or merge of the changelog PR. + +The command depends on tools like `curl`, `jq`, etc., and will refuse to + create or update changelog PR if any of the tools is not satisfied. + +```bash +~ $ pwd +/Users/zachary/workspace/kong/changelog + +~ $ make BASE_BRANCH="release/3.6.x" VERSION="3.6.0" +``` + +The arguments are clarified as below. + +1. `BASE_BRANCH`: the origin branch that the changelog PR is created from. It + is also the merge base. + + The local repo does not have to be on the base branch. +2. `VERSION`: the release version number we are creating the changelog PR for. + + It can be arbitrary strings as long as you know what you are doing (e.g. for + test purpose) +3. `DEBUG`: shows debug output. Default to `false`. + +# Verify Development PRs + +Given two arbitrary revisions, list commits, PRs, PRs without changelog +and PRs without CE2EE. + +If a CE PR has neither the 'cherry-pick kong-ee' label nor +has cross-referenced EE PRs with 'cherry' in the title, +it is HIGHLY PROBABLY not synced to EE. This is only experimental +as developers may not follow the CE2EE guideline. +However, it is a quick shortcut for us to validate the majority of CE PRs. + +Show the usage. + +```bash +~ $ pwd +/Users/zachary/workspace/kong + +~ $ changelog/verify-prs -h +Version: 0.1 + Author: Zachary Hu (zhucac AT outlook.com) + Script: Compare between two revisions (e.g. tags and branches), and output + commits, PRs, PRs without changelog and CE PRs without CE2EE (experimental). + + A PR should have an associated YML file under 'changelog/unreleased', otherwise + it is printed for verification. + + Regarding CE2EE, if a CE PR has any cross-referenced EE PRs, it is regarded synced + to EE. If strict mode is enabled, associated EE PRs must contain keyword 'cherry' + in the title. If a CE PR is labelled with 'cherry-pick kong-ee', it is regarded synced + to EE. If a CE PR is not synced to EE, it is printed for verification. + + Usage: changelog/verify-prs -h + + -v, --verbose Print debug info. + + --strict-filter When checking if a CE PR is synced to EE, + more strict filters are applied. + + --safe-mode When checking if a CE PR is synced to EE, + check one by one. This overrides '--bulk'. + + --bulk N Number of jobs ran concurrency. Default is '5'. + Adjust this value to your CPU cores. + +Example: + changelog/verify-prs --org-repo kong/kong --base-commit 3.4.2 --head-commit 3.4.3 [--strict-filter] [--bulk 5] [--safe-mode] [-v] + + ORG_REPO=kong/kong BASE_COMMIT=3.4.2 HEAD_COMMIT=3.4.3 changelog/verify-prs +``` + +Run the script. Both `--base-commit` and `--head-commit` can be set to branch names. + +```bash +~ $ pwd +/Users/zachary/workspace/kong + +~ $ changelog/verify-prs --org-repo kong/kong --base-commit 3.4.0 --head-commit 3.5.0 +Org Repo: kong/kong +Base Commit: 3.4.0 +Head Commit: 3.5.0 + +comparing between '3.4.0' and '3.5.0' +number of commits: 280 +number of pages: 6 +commits per page: 50 + +PRs: +https://github.com/Kong/kong/pull/7414 +... + +PRs without changelog: +https://github.com/Kong/kong/pull/7413 +... + +PRs without 'cherry-pick kong-ee' label: +https://github.com/Kong/kong/pull/11721 +... + +PRs without cross-referenced EE PRs: +https://github.com/Kong/kong/pull/11304 +... + +Commits: /var/folders/wc/fnkx5qmx61l_wx5shysmql5r0000gn/T/outputXXX.JEkGD8AO/commits.txt +PRs: /var/folders/wc/fnkx5qmx61l_wx5shysmql5r0000gn/T/outputXXX.JEkGD8AO/prs.txt +PRs without changelog: /var/folders/wc/fnkx5qmx61l_wx5shysmql5r0000gn/T/outputXXX.JEkGD8AO/prs_no_changelog.txt +CE PRs without cherry-pick label: /var/folders/wc/fnkx5qmx61l_wx5shysmql5r0000gn/T/outputXXX.JEkGD8AO/prs_no_cherrypick_label.txt +CE PRs without referenced EE cherry-pick PRs: /var/folders/wc/fnkx5qmx61l_wx5shysmql5r0000gn/T/outputXXX.JEkGD8AO/prs_no_cross_reference.txt + +Remeber to remove /var/folders/wc/fnkx5qmx61l_wx5shysmql5r0000gn/T/outputXXX.JEkGD8AO +``` diff --git a/changelog/create_pr b/changelog/create_pr new file mode 100644 index 00000000000..e765bf78250 --- /dev/null +++ b/changelog/create_pr @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +echo " +Checking existing changelog PR ..." +response=$( + curl -sSL \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "https://api.github.com/repos/${1}/pulls?state=open&base=${2}&head=${4}" \ + | jq -er '.[] | select(.head.ref == "'"${4}"'") | [.html_url, .head.ref] | @tsv' +) + +if [[ -z "${response:+x}" ]] ; then + echo "Not found. Creating ..." + curl -sSL \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "https://api.github.com/repos/${1}/pulls" \ + -d '{"base":"'"${2}"'", "title":"'"Generate ${3} changelog"'","body":"'"Generate ${3} changelog"'","head":"'"${4}"'"}' \ + | jq -r '[.html_url, .head.ref] | @tsv' +else + printf 'Updated existing PR: %s\n' "${response}" +fi diff --git a/changelog/unreleased/kong/add_zlib1g-dev.yml b/changelog/unreleased/kong/add_zlib1g-dev.yml new file mode 100644 index 00000000000..8ca18a69a6c --- /dev/null +++ b/changelog/unreleased/kong/add_zlib1g-dev.yml @@ -0,0 +1,2 @@ +message: Added zlib1g-dev dependency to Ubuntu packages. +type: bugfix diff --git a/changelog/unreleased/kong/fix_hash.yml b/changelog/unreleased/kong/fix_hash.yml new file mode 100644 index 00000000000..6c97221121d --- /dev/null +++ b/changelog/unreleased/kong/fix_hash.yml @@ -0,0 +1,3 @@ +message: Fixed an inefficiency issue in the Luajit hashing algorithm +type: performance +scope: Performance diff --git a/changelog/verify-prs b/changelog/verify-prs new file mode 100755 index 00000000000..1cbe0a51b93 --- /dev/null +++ b/changelog/verify-prs @@ -0,0 +1,464 @@ +#!/usr/bin/env bash + +function warn () { + >&2 printf '%s\n' "$@" +} + +function die () { + local st + st="$?" + case $2 in + (*[^0-9]*|'') : ;; + (*) st=$2 ;; + esac + + if [[ -n "$1" ]] ; then warn "$1" ; fi + + warn "WARNING: $0 is terminated" "output dir $out_dir removed" + rm -rf "$out_dir" + + exit "$st" +} + +function show_help () { + local prg + prg="${BASH_SOURCE[0]}" + cat <<-EOF +Version: 0.1 + Author: Zachary Hu (zhucac AT outlook.com) + Script: Compare between two revisions (e.g. tags and branches), and output + commits, PRs, PRs without changelog and CE PRs without CE2EE (experimental). + + A PR should have an associated YML file under 'changelog/unreleased', otherwise + it is printed for verification. + + Regarding CE2EE, if a CE PR has any cross-referenced EE PRs, it is regarded synced + to EE. If strict mode is enabled, associated EE PRs must contain keyword 'cherry' + in the title. If a CE PR is labelled with 'cherry-pick kong-ee', it is regarded synced + to EE. If a CE PR is not synced to EE, it is printed for verification. + + Usage: ${prg} -h + + -v, --verbose Print debug info. + + --strict-filter When checking if a CE PR is synced to EE, + more strict filters are applied. + + --safe-mode When checking if a CE PR is synced to EE, + check one by one. This overrides '--bulk'. + + --bulk N Number of jobs ran concurrency. Default is '5'. + Adjust this value to your CPU cores. + + ${prg} --org-repo kong/kong --base-commit 3.4.2 --head-commit 3.4.3 [--strict-filter] [--bulk 5] [--safe-mode] [-v] + + ORG_REPO=kong/kong BASE_COMMIT=3.4.2 HEAD_COMMIT=3.4.3 $prg +EOF +} + +function set_globals () { + ORG_REPO="${ORG_REPO:-kong/kong}" + BASE_COMMIT="${BASE_COMMIT:-3.4.2.0}" + HEAD_COMMIT="${HEAD_COMMIT:-3.4.2.1}" + + verbose=0 + STRICT_FILTER=0 + SAFE_MODE=0 + + BULK=5 + USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36" + + out_dir=$(mktemp -dt outputXXX) + commits_file="${out_dir}/commits.txt" ; touch "$commits_file" + prs_file="${out_dir}/prs.txt" ; touch "$prs_file" + prs_no_changelog_file="${out_dir}/prs_no_changelog.txt" ; touch "$prs_no_changelog_file" + prs_no_cherrypick_label_file="${out_dir}/prs_no_cherrypick_label.txt" ; touch "$prs_no_cherrypick_label_file" + prs_no_cross_reference_file="${out_dir}/prs_no_cross_reference.txt" ; touch "$prs_no_cross_reference_file" + + num_of_commits=0 + + per_page=100 + num_of_pages=1 +} + +function parse_args () { + while : ; do + case "$1" in + (-h|--help) + show_help + exit + ;; + (-v|--verbose) + set -x + verbose=$(( verbose + 1 )) + ;; + (--org-repo) + if [[ -n "$2" ]] ; then + ORG_REPO="$2" + else + die 'ERROR: "--org-repo" requires a non-empty option argument.' 2 + fi + shift + ;; + (--org-repo=*) + ORG_REPO="${1#--org-repo=}" + if [[ -z "$ORG_REPO" ]] ; then + die 'ERROR: "--org-repo=" requires a non-empty option argument followed immediately.' 2 + fi + ;; + (--base-commit) + if [[ -n "$2" ]] ; then + BASE_COMMIT="$2" + else + die 'ERROR: "--base-commit" requires a non-empty option argument.' 2 + fi + shift + ;; + (--base-commit=*) + BASE_COMMIT="${1#--base-commit=}" + if [[ -z "$BASE_COMMIT" ]] ; then + die 'ERROR: "--base-commit=" requires a non-empty option argument followed immediately.' 2 + fi + ;; + (--head-commit) + if [[ -n "$2" ]] ; then + HEAD_COMMIT="$2" + else + die 'ERROR: "--head-commit" requires a non-empty option argument.' 2 + fi + shift + ;; + (--head-commit=*) + HEAD_COMMIT="${1#--base-commit=}" + if [[ -z "$HEAD_COMMIT" ]] ; then + die 'ERROR: "--head-commit=" requires a non-empty option argument followed immediately.' 2 + fi + ;; + (--bulk) + if [[ -n "$2" ]] ; then + BULK="$2" + else + die 'ERROR: "--bulk" requires a non-empty option argument.' 2 + fi + shift + ;; + (--bulk=*) + BULK="${1#--bulk=}" + if [[ -z "$BULK" ]] ; then + die 'ERROR: "--bulk=" requires a non-empty option argument followed immediately.' 2 + fi + ;; + (--strict-filter) + STRICT_FILTER=1 + ;; + (--safe-mode) + SAFE_MODE=1 + ;; + (--) + shift + break + ;; + (-?*) + warn "WARNING: unknown option (ignored): $1" + ;; + (*) + break + ;; + esac + + shift + done +} + +function prepare_args () { + parse_args "$@" + + if [[ -z "${ORG_REPO:+x}" ]] ; then + warn "WARNING: ORG_REPO must be provided" + fi + if [[ -z "${BASE_COMMIT:+x}" ]] ; then + warn "WARNING: BASE_COMMIT must be provided" + fi + if [[ -z "${HEAD_COMMIT:+x}" ]] ; then + warn "WARNING: HEAD_COMMIT must be provided" + fi + if [[ -z "${GITHUB_TOKEN:+x}" ]] ; then + warn "WARNING: GITHUB_TOKEN must be provided" + fi + if (( BULK >= 8 )) ; then + warn "WARNING: job concurrency $BULK is too high. May reach the rate limit of GitHub API." + fi + if (( SAFE_MODE )) ; then + warn "WARNING: safe mode enabled. Jobs takes longer time. Take a cup of coffee!" + fi + + printf '%s\n' \ + "Org Repo: ${ORG_REPO}" \ + "Base Commit: ${BASE_COMMIT}" \ + "Head Commit: ${HEAD_COMMIT}" +} + +function get_num_pages_commits () { + local first_paged_response + first_paged_response=$( curl -i -sSL \ + -H "User-Agent: ${USER_AGENT}" \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + "https://api.github.com/repos/${ORG_REPO}/compare/${BASE_COMMIT}...${HEAD_COMMIT}?page=1&per_page=${per_page}" ) + + local status_line + status_line=$( sed -n 1p <<< "$first_paged_response" ) + if ! [[ "$status_line" =~ 200 ]] ; then + die 'ERROR: cannot request GitHub API. Please check arguments or try option "-v"' 2 + fi + + local link_header + link_header=$( awk '/^link:/ { print; exit }' <<< "$first_paged_response" ) + IFS="," read -ra links <<< "$link_header" + + local regex='[^_](page=([0-9]+)).*rel="last"' + for link in "${links[@]}" ; do + if [[ "$link" =~ $regex ]] ; then + num_of_pages="${BASH_REMATCH[2]}" + break + fi + done + + num_of_commits=$( awk 'BEGIN { FS="[[:space:]]+|," } /total_commits/ { print $3; exit }' <<< "$first_paged_response" ) + printf 'number of commits: %s\n' "$num_of_commits" + +} + +function get_commits_prs () { + get_num_pages_commits + printf 'number of pages: %s\n' "$num_of_pages" + printf 'commits per page: %s\n' "$per_page" + + printf '%s\n' "" "PRs:" + for i in $( seq 1 "${num_of_pages}" ) ; do + mapfile -t < <( curl -sSL \ + -H "User-Agent: ${USER_AGENT}" \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + "https://api.github.com/repos/${ORG_REPO}/compare/${BASE_COMMIT}...${HEAD_COMMIT}?page=${i}&per_page=${per_page}" | \ + jq -r '.commits[].sha' ) + + local max_per_request=17 + local BASE_Q="repo:${ORG_REPO}%20type:pr%20is:merged" + local full_q="$BASE_Q" + local count=0 + for commit in "${MAPFILE[@]}" ; do + printf '%s\n' "${commit:0:9}" >> "$commits_file" + + full_q="${full_q}%20${commit:0:9}" + count=$(( count+1 )) + + if ! (( count % max_per_request )) || test "$count" -eq "$per_page" || test "$count" -eq "$num_of_commits" ; then + curl -sSL \ + -H "User-Agent: ${USER_AGENT}" \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + "https://api.github.com/search/issues?q=$full_q" | jq -r '.items[].html_url' | tee -a "$prs_file" + + full_q="$BASE_Q" + fi + done + done + + sort -uo "$prs_file" "$prs_file" +} + +function check_pr_changelog () { + if [[ -z "${1:+x}" ]] ; then return ; fi + + local changelog_pattern="changelog/unreleased/kong*/*.yml" + local req_url="https://api.github.com/repos/${ORG_REPO}/pulls/PR_NUMBER/files" + local pr_number="${1##https*/}" + req_url="${req_url/PR_NUMBER/$pr_number}" + mapfile -t < <( curl -sSL \ + -H "User-Agent: ${USER_AGENT}" \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "$req_url" | jq -r '.[].filename' ) + + local has_changelog=0 + for f in "${MAPFILE[@]}" ; do + if [[ "$f" == ${changelog_pattern} ]] ; then has_changelog=1; break; fi + done + if ! (( has_changelog )) ; then echo "$1" | tee -a "$prs_no_changelog_file" ; fi +} + +function check_changelog () { + echo -e "\nPRs without changelog:" + export ORG_REPO="$ORG_REPO" USER_AGENT="$USER_AGENT" prs_no_changelog_file="$prs_no_changelog_file" + export -f check_pr_changelog + if type parallel >/dev/null 2>&1 ; then + parallel -j "$BULK" check_pr_changelog <"$1" + else + warn "WARNING: GNU 'parallel' is not available, fallback to 'xargs'" + <"$1" xargs -P "$BULK" -n1 bash -c 'check_pr_changelog "$@"' _ + fi + sort -uo "$prs_no_changelog_file" "$prs_no_changelog_file" +} + +function check_cherrypick_label () { + if [[ -z "${1:+x}" ]] ; then return ; fi + + local label_pattern="cherry-pick kong-ee" + local req_url="https://api.github.com/repos/${ORG_REPO}/issues/PR_NUMBER/labels" + local pr_number="${1##https://*/}" + req_url="${req_url/PR_NUMBER/$pr_number}" + mapfile -t < <( curl -sSL \ + -H "User-Agent: ${USER_AGENT}" \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "$req_url" | jq -r '.[].name' ) + + local has_label=0 + for l in "${MAPFILE[@]}" ; do + if [[ "$l" == ${label_pattern} ]] ; then has_label=1; break; fi + done + if ! (( has_label )) ; then echo "$1" | tee -a "$prs_no_cherrypick_label_file" ; fi +} + +function check_cross_reference () { + if [[ -z "${1:+x}" ]] ; then return ; fi + + local req_url="https://api.github.com/repos/${ORG_REPO}/issues/PR_NUMBER/timeline" + local pr_number="${1##https://*/}" + req_url="${req_url/PR_NUMBER/$pr_number}" + + local first_paged_response + first_paged_response=$( curl -i -sSL \ + -H "User-Agent: ${USER_AGENT}" \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "${req_url}?page=1&per_page=${per_page}" ) + + local link_header + link_header=$( awk '/^link:/ { print; exit }' <<< "$first_paged_response" ) + IFS="," read -ra links <<< "$link_header" + + local count=1 + local regex='[^_](page=([0-9]+)).*rel="last"' + for link in "${links[@]}" ; do + if [[ "$link" =~ $regex ]] ; then + count="${BASH_REMATCH[2]}" + break + fi + done + + local jq_filter + if (( STRICT_FILTER )) ; then + jq_filter='.[].source.issue | select( (.pull_request != null) and + (.pull_request.html_url | ascii_downcase | contains("kong/kong-ee")) and + (.pull_request.merged_at != null) and + (.title | ascii_downcase | contains("cherry")) ) + | [.pull_request.html_url, .title] + | @tsv' + else + jq_filter='.[].source.issue | select( (.pull_request != null) and + (.pull_request.html_url | ascii_downcase | contains("kong/kong-ee")) and + (.pull_request.merged_at != null) ) + | [.pull_request.html_url, .title] + | @tsv' + fi + + local has_ref=0 + local json_response + for i in $( seq 1 "${count}" ) ; do + json_response=$( curl -sSL \ + -H "User-Agent: ${USER_AGENT}" \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "${req_url}?page=${i}&per_page=${per_page}" ) + + if jq -er "$jq_filter" <<< "$json_response" >/dev/null + then + has_ref=1 + break + fi + done + + if ! (( has_ref )) ; then echo "$1" | tee -a "$prs_no_cross_reference_file" ; fi +} + +function check_ce2ee () { + if [[ "$ORG_REPO" != "kong/kong" && "$ORG_REPO" != "Kong/kong" ]] ; then + warn "WARNING: only check CE2EE for CE repo. Skip $ORG_REPO" + return + fi + + echo -e "\nPRs without 'cherry-pick kong-ee' label:" + export ORG_REPO="$ORG_REPO" USER_AGENT="$USER_AGENT" prs_no_cherrypick_label_file="$prs_no_cherrypick_label_file" + export -f check_cherrypick_label + if type parallel >/dev/null 2>&1 ; then + parallel -j "$BULK" check_cherrypick_label <"$1" + else + warn "WARNING: GNU 'parallel' is not available, fallback to 'xargs'" + <"$1" xargs -P "$BULK" -n1 bash -c 'check_cherrypick_label "$@"' _ + fi + sort -uo "$prs_no_cherrypick_label_file" "$prs_no_cherrypick_label_file" + + echo -e "\nPRs without cross-referenced EE PRs:" + if (( SAFE_MODE )) ; then + local in_fd + if [[ -f "$1" ]] ; then + : {in_fd}<"$1" + else + : {in_fd}<&0 + warn "WARNING: $1 not a valid file. Read from stdin -" + fi + + while read -r -u "$in_fd" ; do + check_cross_reference "$REPLY" + done + + : ${in_fd}<&- + else + export ORG_REPO="$ORG_REPO" USER_AGENT="$USER_AGENT" STRICT_FILTER="$STRICT_FILTER" prs_no_cross_reference_file="$prs_no_cross_reference_file" + export -f check_cross_reference + if type parallel >/dev/null 2>&1 ; then + parallel -j "$BULK" check_cross_reference <"$1" + else + warn "WARNING: GNU 'parallel' is not available, fallback to 'xargs'" + <"$1" xargs -P "$BULK" -n1 bash -c 'check_cross_reference "$@"' _ + fi + fi + sort -uo "$prs_no_cross_reference_file" "$prs_no_cross_reference_file" +} + +function main () { + set -Eeo pipefail + trap die ERR SIGABRT SIGQUIT SIGHUP SIGINT + + set_globals + prepare_args "$@" + + printf '%s\n' "" "comparing between '${BASE_COMMIT}' and '${HEAD_COMMIT}'" + + get_commits_prs + + check_changelog "$prs_file" + + check_ce2ee "$prs_file" + + printf '%s\n' "" \ + "Commits: $commits_file" \ + "PRs: $prs_file" \ + "PRs without changelog: $prs_no_changelog_file" \ + "CE PRs without cherry-pick label: $prs_no_cherrypick_label_file" \ + "CE PRs without referenced EE cherry-pick PRs: $prs_no_cross_reference_file" \ + "" "Remeber to remove $out_dir" + + trap '' EXIT +} + +if (( "$#" )) ; then main "$@" ; else show_help ; fi diff --git a/kong-2.8.4-0.rockspec b/kong-2.8.5-0.rockspec similarity index 99% rename from kong-2.8.4-0.rockspec rename to kong-2.8.5-0.rockspec index c06749548cb..ac6ef017e39 100644 --- a/kong-2.8.4-0.rockspec +++ b/kong-2.8.5-0.rockspec @@ -1,10 +1,10 @@ package = "kong" -version = "2.8.4-0" +version = "2.8.5-0" rockspec_format = "3.0" supported_platforms = {"linux", "macosx"} source = { url = "https://github.com/Kong/kong.git", - tag = "2.8.4" + tag = "2.8.5" } description = { summary = "Kong is a scalable and customizable API Management Layer built on top of Nginx.", @@ -13,7 +13,7 @@ description = { } dependencies = { "inspect == 3.1.2", - "luasec == 1.0.2", + "luasec == 1.3.2", "luasocket == 3.0-rc1", "penlight == 1.12.0", "lua-resty-http == 0.16.1", @@ -36,7 +36,7 @@ dependencies = { "lua-resty-healthcheck == 1.5.1", "lua-resty-mlcache == 2.5.0", "lua-messagepack == 0.5.2", - "lua-resty-openssl == 0.8.7", + "lua-resty-openssl == 0.8.22", "lua-resty-counter == 0.2.1", "lua-resty-ipmatcher == 0.6.1", "lua-resty-acme == 0.7.2", diff --git a/kong/meta.lua b/kong/meta.lua index 46ce2d9a657..f6075db17aa 100644 --- a/kong/meta.lua +++ b/kong/meta.lua @@ -1,7 +1,7 @@ local version = setmetatable({ major = 2, minor = 8, - patch = 4, + patch = 5, --suffix = "rc.1" }, { -- our Makefile during certain releases adjusts this line. Any changes to diff --git a/scripts/autodoc b/scripts/autodoc index 5bc8a8720e3..f6d19abce48 100755 --- a/scripts/autodoc +++ b/scripts/autodoc @@ -1,5 +1,7 @@ #!/bin/bash +set -e + DOCS_REPO=$1 DOCS_VERSION=$2 @@ -75,11 +77,16 @@ function insert_yaml_file_into_nav_file() { echo "Generating docs ..." rm -rf ./autodoc/output -./autodoc/admin-api/generate.lua && \ -./autodoc/cli/generate.lua && \ -./autodoc/upgrading/generate.lua && \ +./autodoc/cli/generate.lua ./autodoc/pdk/generate.lua +exit_code=$? + +if [[ $exit_code -ne 0 ]] +then + exit $exit_code +fi + if [ -z "$DOCS_REPO" ] || [ -z "$DOCS_VERSION" ] then echo @@ -155,9 +162,6 @@ fi copy autodoc/output/admin-api/admin-api.md "$DOCS_APP/admin-api/index.md" copy autodoc/output/cli.md "$DOCS_APP/reference/cli.md" -rm -rf "$DOCS_APP/install-and-run/upgrade-oss.md" -copy autodoc/output/upgrading.md "$DOCS_APP/install-and-run/upgrade-oss.md" - rm -rf "$DOCS_APP/pdk/" mkdir -p "$DOCS_APP/pdk" diff --git a/scripts/check-labeler.pl b/scripts/check-labeler.pl new file mode 100755 index 00000000000..a9f69f258f0 --- /dev/null +++ b/scripts/check-labeler.pl @@ -0,0 +1,40 @@ +#!/usr/bin/env perl + +# Script to verify that the labeler configuration contains entries for +# all plugins. If any plugins are missing, the script errors out and +# prints the missing entries. + +# The pre- and post-function plugins are tracked together under the +# label "plugins/serverless-functions". Special code is present below +# to ensure that the label exists. + +use strict; + +die "usage: $0 \n" unless ($#ARGV == 0); + +my $labeler_config = $ARGV[0]; + +-f $labeler_config + or die "$0: cannot find labeler config file $labeler_config\n"; + +my %plugins = ( "plugins/serverless-functions", "plugins/serverless-functions:\n- kong/plugins/pre-function\n- kong/plugins/post-function\n\n" ); +for my $path (, ) { + my $plugin = $path =~ s,kong/,,r; + $plugins{$plugin} = "$plugin:\n- $path/**/*\n\n" unless ($plugin =~ m,plugins/(pre|post)-function,); +} + +open(LABELER_CONFIG, "<", $labeler_config) or die "$0: can't open labeler config file $labeler_config: $!\n"; +while () { + delete $plugins{$1} if (m,^(plugins.*):,);; +} +close(LABELER_CONFIG); + +exit 0 unless (keys %plugins); + +print STDERR "Missing plugins in labeler configuration $labeler_config.\n"; +print STDERR "Please add the following sections to the file:\n\n"; +for my $plugin (sort keys %plugins) { + print STDERR $plugins{$plugin}; +} + +exit 1; diff --git a/scripts/check_spec_files_spelling.sh b/scripts/check_spec_files_spelling.sh new file mode 100755 index 00000000000..556589c7c93 --- /dev/null +++ b/scripts/check_spec_files_spelling.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +set -e + +function red() { + echo -e "\033[1;31m$*\033[0m" +} + +readarray -t FOUND < \ +<( + git ls-files 'spec/[0-9]**.lua' \ + | grep -vE \ + -e '_spec.lua$' \ + -f spec/on_demand_specs +) + +if (( ${#FOUND[@]} > 0 )); then + echo + red "----------------------------------------------------------------" + echo "Found some files in spec directory that do not have the _spec suffix, please check if you're misspelling them. If there is an exception, please add the coressponding files(or their path regexes) into the whitelist spec/on_demand_specs." + echo + echo "Possible misspelling file list:" + echo + printf "%s\n" "${FOUND[@]}" + red "----------------------------------------------------------------" + exit 1 +fi diff --git a/scripts/dependency_services/00-create-pg-db.sh b/scripts/dependency_services/00-create-pg-db.sh new file mode 100755 index 00000000000..7463f58d481 --- /dev/null +++ b/scripts/dependency_services/00-create-pg-db.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +pg_conf_file=/var/lib/postgresql/data/postgresql.conf + +echo "\ +log_statement = 'all' +log_disconnections = off +log_duration = on +log_min_duration_statement = -1 +shared_preload_libraries = 'pg_stat_statements' +track_activity_query_size = 2048 +pg_stat_statements.track = all +pg_stat_statements.max = 10000 +" >>$pg_conf_file + +for database in $(echo $POSTGRES_DBS | tr ',' ' '); do + echo "Creating database $database" + psql -U $POSTGRES_USER <<-EOSQL + SELECT 'CREATE DATABASE $database' WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = '$database')\gexec + GRANT ALL PRIVILEGES ON DATABASE $database TO $POSTGRES_USER; +EOSQL +done diff --git a/scripts/dependency_services/common.sh b/scripts/dependency_services/common.sh new file mode 100644 index 00000000000..0538816a2ca --- /dev/null +++ b/scripts/dependency_services/common.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +if [ "$#" -ne 2 ]; then + echo "Usage: $0 KONG_SERVICE_ENV_FILE [up|down]" + exit 1 +fi + +if docker compose version >/dev/null 2>&1; then + DOCKER_COMPOSE="docker compose" +elif [[ -z $(which docker-compose) ]]; then + echo "docker-compose or docker compose plugin not installed" + exit 1 +else + DOCKER_COMPOSE="docker-compose" +fi + +if [ "$2" == "down" ]; then + $DOCKER_COMPOSE down -v + exit 0 +fi + +KONG_SERVICE_ENV_FILE=$1 +# clear the file +> $KONG_SERVICE_ENV_FILE + +cwd=$(realpath $(dirname $(readlink -f ${BASH_SOURCE[0]}))) + +export COMPOSE_FILE=$cwd/docker-compose-test-services.yml +export COMPOSE_PROJECT_NAME="$(basename $(realpath $cwd/../../))-$(basename ${KONG_VENV:-kong-dev})" +echo "export COMPOSE_FILE=$COMPOSE_FILE" >> $KONG_SERVICE_ENV_FILE +echo "export COMPOSE_PROJECT_NAME=$COMPOSE_PROJECT_NAME" >> $KONG_SERVICE_ENV_FILE + +$DOCKER_COMPOSE up -d + +if [ $? -ne 0 ]; then + echo "Something goes wrong, please check $DOCKER_COMPOSE output" + exit 1 +fi + +# Initialize parallel arrays for service names and port definitions +services=() +port_defs=() + +# Add elements to the parallel arrays +services+=("postgres") +port_defs+=("PG_PORT:5432") + +services+=("redis") +port_defs+=("REDIS_PORT:6379 REDIS_SSL_PORT:6380") + +services+=("grpcbin") +port_defs+=("GRPCBIN_PORT:9000 GRPCBIN_SSL_PORT:9001") + +services+=("zipkin") +port_defs+=("ZIPKIN_PORT:9411") + +_kong_added_envs="" + +# Not all env variables need all three prefixes, but we add all of them for simplicity +env_prefixes="KONG_ KONG_TEST_ KONG_SPEC_TEST_" + +for ((i = 0; i < ${#services[@]}; i++)); do + svc="${services[i]}" + + for port_def in ${port_defs[i]}; do + env_name=$(echo $port_def |cut -d: -f1) + private_port=$(echo $port_def |cut -d: -f2) + exposed_port="$($DOCKER_COMPOSE port "$svc" "$private_port" | cut -d: -f2)" + + if [ -z "$exposed_port" ]; then + echo "Port $env_name for service $svc unknown" + continue + fi + + for prefix in $env_prefixes; do + _kong_added_envs="$_kong_added_envs ${prefix}${env_name}" + echo "export ${prefix}${env_name}=$exposed_port" >> "$KONG_SERVICE_ENV_FILE" + done + done +done diff --git a/scripts/dependency_services/docker-compose-test-services.yml b/scripts/dependency_services/docker-compose-test-services.yml new file mode 100644 index 00000000000..823b0c6e3f9 --- /dev/null +++ b/scripts/dependency_services/docker-compose-test-services.yml @@ -0,0 +1,48 @@ +version: '3.5' +services: + postgres: + image: postgres + ports: + - 127.0.0.1::5432 + volumes: + - postgres-data:/var/lib/posgresql/data + - ./00-create-pg-db.sh:/docker-entrypoint-initdb.d/00-create-pg-db.sh + environment: + POSTGRES_DBS: kong,kong_tests + POSTGRES_USER: kong + POSTGRES_HOST_AUTH_METHOD: trust + healthcheck: + test: ["CMD", "pg_isready", "-U", "kong"] + interval: 5s + timeout: 5s + retries: 8 + restart: on-failure + stdin_open: true + tty: true + redis: + image: redis + ports: + - 127.0.0.1::6379 + - 127.0.0.1::6380 + volumes: + - redis-data:/data + restart: on-failure + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 5s + timeout: 10s + retries: 10 + grpcbin: + image: kong/grpcbin + ports: + - 127.0.0.1::9000 + - 127.0.0.1::9001 + zipkin: + image: openzipkin/zipkin:2 + ports: + - 127.0.0.1::9411 + command: --logging.level.zipkin2=DEBUG + +volumes: + postgres-data: + redis-data: diff --git a/scripts/dependency_services/up.fish b/scripts/dependency_services/up.fish new file mode 100755 index 00000000000..f96d6076120 --- /dev/null +++ b/scripts/dependency_services/up.fish @@ -0,0 +1,35 @@ +#!/usr/bin/env fish + +set cwd (dirname (status --current-filename)) + +set -xg KONG_SERVICE_ENV_FILE $(mktemp) + +bash "$cwd/common.sh" $KONG_SERVICE_ENV_FILE up + +if test $status -ne 0 + echo "Something goes wrong, please check common.sh output" + exit 1 +end + +source $KONG_SERVICE_ENV_FILE + +function stop_services -d 'Stop dependency services of Kong and clean up environment variables.' + # set this again in child process without need to export env var + set cwd (dirname (status --current-filename)) + + if test -n $COMPOSE_FILE && test -n $COMPOSE_PROJECT_NAME + bash "$cwd/common.sh" $KONG_SERVICE_ENV_FILE down + end + + for i in (cat $KONG_SERVICE_ENV_FILE | cut -d ' ' -f2 | cut -d '=' -f1) + set -e $i + end + + rm -f $KONG_SERVICE_ENV_FILE + set -e KONG_SERVICE_ENV_FILE + + functions -e stop_services +end + +echo 'Services are up! Use "stop_services" to stop services and cleanup environment variables, +or use "deactivate" to cleanup the venv.' diff --git a/scripts/dependency_services/up.sh b/scripts/dependency_services/up.sh new file mode 100755 index 00000000000..de4835c010d --- /dev/null +++ b/scripts/dependency_services/up.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +if [ "${BASH_SOURCE-}" = "$0" ]; then + echo "You must source this script: \$ source $0" >&2 + exit 33 +fi + +export KONG_SERVICE_ENV_FILE=$(mktemp) + +if [ -n "$ZSH_VERSION" ]; then + cwd=$(dirname $(readlink -f ${(%):-%N})) +else + cwd=$(dirname $(readlink -f ${BASH_SOURCE[0]})) +fi + +/usr/bin/env bash "$cwd/common.sh" $KONG_SERVICE_ENV_FILE up +if [ $? -ne 0 ]; then + echo "Something goes wrong, please check common.sh output" + exit 1 +fi + +. $KONG_SERVICE_ENV_FILE + +stop_services () { + if test -n "$COMPOSE_FILE" && test -n "$COMPOSE_PROJECT_NAME"; then + bash "$cwd/common.sh" $KONG_SERVICE_ENV_FILE down + fi + + for i in $(cat $KONG_SERVICE_ENV_FILE | cut -f2 | cut -d '=' -f1); do + unset $i + done + + rm -rf $KONG_SERVICE_ENV_FILE + unset KONG_SERVICE_ENV_FILE + + unset -f stop_services +} + +echo 'Services are up! Use "stop_services" to stop services and cleanup environment variables, +or use "deactivate" to cleanup the venv.' diff --git a/scripts/explain_manifest/.gitignore b/scripts/explain_manifest/.gitignore new file mode 100644 index 00000000000..bee8a64b79a --- /dev/null +++ b/scripts/explain_manifest/.gitignore @@ -0,0 +1 @@ +__pycache__ diff --git a/scripts/explain_manifest/config.py b/scripts/explain_manifest/config.py new file mode 100644 index 00000000000..398c9346c96 --- /dev/null +++ b/scripts/explain_manifest/config.py @@ -0,0 +1,203 @@ +from copy import deepcopy + +from globmatch import glob_match + +from main import FileInfo +from expect import ExpectSuite +from suites import common_suites, libc_libcpp_suites, arm64_suites, docker_suites + + +def transform(f: FileInfo): + # XXX: libxslt uses libtool and it injects some extra rpaths + # we only care about the kong library rpath so removing it here + # until we find a way to remove the extra rpaths from it + # It should have no side effect as the extra rpaths are long random + # paths created by bazel. + + if glob_match(f.path, ["**/kong/lib/libxslt.so*", "**/kong/lib/libexslt.so*"]): + expected_rpath = "/usr/local/kong/lib" + if f.rpath and expected_rpath in f.rpath: + f.rpath = expected_rpath + elif f.runpath and expected_rpath in f.runpath: + f.runpath = expected_rpath + # otherwise remain unmodified + + if f.path.endswith("/modules/ngx_wasm_module.so"): + expected_rpath = "/usr/local/openresty/luajit/lib:/usr/local/kong/lib:/usr/local/openresty/lualib" + if f.rpath and expected_rpath in f.rpath: + f.rpath = expected_rpath + elif f.runpath and expected_rpath in f.runpath: + f.runpath = expected_rpath + # otherwise remain unmodified + + +# libc: +# - https://repology.org/project/glibc/versions +# GLIBCXX and CXXABI based on gcc version: +# - https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html +# - https://repology.org/project/gcc/versions +# TODO: libstdc++ verions +targets = { + "alpine-amd64": ExpectSuite( + name="Alpine Linux (amd64)", + manifest="fixtures/alpine-amd64.txt", + use_rpath=True, + tests={ + common_suites: {}, + libc_libcpp_suites: { + # alpine 3.16: gcc 11.2.1 + "libcxx_max_version": "3.4.29", + "cxxabi_max_version": "1.3.13", + }, + } + ), + "amazonlinux-2-amd64": ExpectSuite( + name="Amazon Linux 2 (amd64)", + manifest="fixtures/amazonlinux-2-amd64.txt", + use_rpath=True, + tests={ + common_suites: {}, + libc_libcpp_suites: { + "libc_max_version": "2.26", + # gcc 7.3.1 + "libcxx_max_version": "3.4.24", + "cxxabi_max_version": "1.3.11", + }, + }, + ), + "amazonlinux-2023-amd64": ExpectSuite( + name="Amazon Linux 2023 (amd64)", + manifest="fixtures/amazonlinux-2023-amd64.txt", + tests={ + common_suites: { + "libxcrypt_no_obsolete_api": True, + }, + libc_libcpp_suites: { + "libc_max_version": "2.34", + # gcc 11.2.1 + "libcxx_max_version": "3.4.29", + "cxxabi_max_version": "1.3.13", + }, + }, + ), + "el7-amd64": ExpectSuite( + name="Redhat 7 (amd64)", + manifest="fixtures/el7-amd64.txt", + use_rpath=True, + tests={ + common_suites: {}, + libc_libcpp_suites: { + "libc_max_version": "2.17", + # gcc 4.8.5 + "libcxx_max_version": "3.4.19", + "cxxabi_max_version": "1.3.7", + }, + } + ), + "el8-amd64": ExpectSuite( + name="Redhat 8 (amd64)", + manifest="fixtures/el8-amd64.txt", + use_rpath=True, + tests={ + common_suites: {}, + libc_libcpp_suites: { + "libc_max_version": "2.28", + # gcc 8.5.0 + "libcxx_max_version": "3.4.25", + "cxxabi_max_version": "1.3.11", + }, + }, + ), + "el9-amd64": ExpectSuite( + name="Redhat 8 (amd64)", + manifest="fixtures/el9-amd64.txt", + use_rpath=True, + tests={ + common_suites: { + "libxcrypt_no_obsolete_api": True, + }, + libc_libcpp_suites: { + "libc_max_version": "2.34", + # gcc 11.3.1 + "libcxx_max_version": "3.4.29", + "cxxabi_max_version": "1.3.13", + }, + } + ), + "ubuntu-20.04-amd64": ExpectSuite( + name="Ubuntu 20.04 (amd64)", + manifest="fixtures/ubuntu-20.04-amd64.txt", + tests={ + common_suites: {}, + libc_libcpp_suites: { + "libc_max_version": "2.30", + # gcc 9.3.0 + "libcxx_max_version": "3.4.28", + "cxxabi_max_version": "1.3.12", + }, + } + ), + "ubuntu-22.04-amd64": ExpectSuite( + name="Ubuntu 22.04 (amd64)", + manifest="fixtures/ubuntu-22.04-amd64.txt", + tests={ + common_suites: {}, + libc_libcpp_suites: { + "libc_max_version": "2.35", + # gcc 11.2.0 + "libcxx_max_version": "3.4.29", + "cxxabi_max_version": "1.3.13", + }, + } + ), + "debian-10-amd64": ExpectSuite( + name="Debian 10 (amd64)", + manifest="fixtures/debian-10-amd64.txt", + tests={ + common_suites: {}, + libc_libcpp_suites: { + "libc_max_version": "2.28", + # gcc 8.3.0 + "libcxx_max_version": "3.4.25", + "cxxabi_max_version": "1.3.11", + }, + } + ), + "debian-11-amd64": ExpectSuite( + name="Debian 11 (amd64)", + manifest="fixtures/debian-11-amd64.txt", + tests={ + common_suites: {}, + libc_libcpp_suites: { + "libc_max_version": "2.31", + # gcc 10.2.1 + "libcxx_max_version": "3.4.28", + "cxxabi_max_version": "1.3.12", + }, + } + ), + "docker-image": ExpectSuite( + name="Generic Docker Image", + manifest=None, + tests={ + docker_suites: {}, + } + ), +} + +# populate arm64 and fips suites from amd64 suites + +for target in list(targets.keys()): + if target.split("-")[0] in ("alpine", "ubuntu", "debian", "amazonlinux", "el9"): + e = deepcopy(targets[target]) + e.manifest = e.manifest.replace("-amd64.txt", "-arm64.txt") + # Ubuntu 22.04 (arm64) + e.name = e.name.replace("(amd64)", "(arm64)") + e.tests[arm64_suites] = {} + + # TODO: cross compiled aws2023 uses rpath instead of runpath + if target == "amazonlinux-2023-amd64": + e.use_rpath = True + + # ubuntu-22.04-arm64 + targets[target.replace("-amd64", "-arm64")] = e diff --git a/scripts/explain_manifest/docker_image_filelist.txt b/scripts/explain_manifest/docker_image_filelist.txt new file mode 100644 index 00000000000..4ecad80ed00 --- /dev/null +++ b/scripts/explain_manifest/docker_image_filelist.txt @@ -0,0 +1,21 @@ +/etc/passwd +/etc/group +/usr/local/kong/** +/usr/local/bin/kong +/usr/local/bin/luarocks +/usr/local/etc/luarocks/** +/usr/local/lib/lua/** +/usr/local/lib/luarocks/** +/usr/local/openresty/** +/usr/local/share/lua/** +/etc/kong/kong.conf.default +/etc/kong/kong.logrotate +/usr/local/kong/include/kong/pluginsocket.proto +/usr/local/kong/include/google/protobuf/**.proto +/usr/local/kong/include/openssl/**.h +/etc/ssl/certs/ca-certificates.crt +/etc/pki/tls/certs/ca-bundle.crt +/etc/ssl/ca-bundle.pem +/etc/pki/tls/cacert.pem +/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem +/etc/ssl/cert.pem \ No newline at end of file diff --git a/scripts/explain_manifest/expect.py b/scripts/explain_manifest/expect.py new file mode 100644 index 00000000000..7b52dea9717 --- /dev/null +++ b/scripts/explain_manifest/expect.py @@ -0,0 +1,333 @@ +import os +import re +import sys +import time +import atexit +import difflib +import inspect +import datetime +import subprocess +from inspect import getframeinfo + +from globmatch import glob_match + +import suites + + +def glob_match_ignore_slash(path, globs): + if path.startswith("/"): + path = path[1:] + globs = list(globs) + for i, g in enumerate(globs): + if g.startswith("/"): + globs[i] = g[1:] + + return glob_match(path, globs) + + +def write_color(color): + term_colors = { + "red": 31, + "green": 32, + "yellow": 33, + "blue": 34, + "magenta": 35, + "cyan": 36, + "white": 37, + } + + def decorator(fn): + def wrapper(self, *args): + if color not in term_colors: + raise ValueError("unknown color %s" % color) + sys.stdout.write('\033[%dm' % term_colors[color]) + r = fn(self, *args) + sys.stdout.write('\033[0m') + return r + return wrapper + return decorator + + +def write_block_desc(desc_verb): + def decorator(fn): + def wrapper(self, suite: ExpectSuite, *args): + ExpectChain._log("[INFO] start to %s of suite %s" % + (desc_verb, suite.name)) + start_time = time.time() + r = fn(self, suite, *args) + duration = time.time() - start_time + ExpectChain._log("[INFO] finish to %s of suite %s in %.2fms" % ( + desc_verb, suite.name, duration*1000)) + return r + return wrapper + return decorator + + +class ExpectSuite(): + def __init__(self, name, manifest, use_rpath=False, tests={}): + self.name = name + self.manifest = manifest + self.use_rpath = use_rpath + self.tests = tests + + +class ExpectChain(): + def __init__(self, infos): + self._infos = infos + self._all_failures = [] + self._reset() + self.verbs = ("does_not", "equal", "match", "contain", + "contain_match", "less_than", "greater_than") + atexit.register(self._print_all_fails) + + def _reset(self): + # clear states + self._logical_reverse = False + self._files = [] + self._msg = "" + self._title_shown = False + self._checks_count = 0 + self._failures_count = 0 + self._last_attribute = None + + def _ctx_info(self): + f = inspect.currentframe().f_back.f_back.f_back.f_back + fn_rel = os.path.relpath(getframeinfo(f).filename, os.getcwd()) + + return "%s:%d" % (fn_rel, f.f_lineno) + + @classmethod + def _log(self, *args): + sys.stdout.write(" %s " % datetime.datetime.now().strftime('%b %d %X')) + print(*args) + + @write_color("white") + def _print_title(self): + if self._title_shown: + return + self._log("[TEST] %s: %s" % (self._ctx_info(), self._msg)) + self._title_shown = True + + @write_color("red") + def _print_fail(self, msg): + self._log("[FAIL] %s" % msg) + self._all_failures.append("%s: %s" % (self._ctx_info(), msg)) + self._failures_count += 1 + + @write_color("green") + def _print_ok(self, msg): + self._log("[OK ] %s" % msg) + + @write_color("yellow") + def _print_error(self, msg): + self._log("[FAIL] %s" % msg) + + def _print_result(self): + if self._checks_count == 0: + return + if self._failures_count == 0: + self._print_ok("%d check(s) passed for %d file(s)" % + (self._checks_count, len(self._files))) + else: + self._print_error("%d/%d check(s) failed for %d file(s)" % ( + self._failures_count, self._checks_count, len(self._files))) + + @write_color("red") + def _print_all_fails(self): + # flush pending result + self._print_result() + + if self._all_failures: + self._print_error( + "Following failure(s) occured:\n" + "\n".join(self._all_failures)) + os._exit(1) + + def _compare(self, attr, fn): + self._checks_count += 1 + for f in self._files: + if not hasattr(f, attr): + continue # accept missing attribute for now + v = getattr(f, attr) + if self._key_name and isinstance(v, dict): + # TODO: explict flag to accept missing key + if self._key_name not in v: + return True + v = v[self._key_name] + (ok, err_template) = fn(v) + if (not not ok) == self._logical_reverse: + _not = "not" + if self._logical_reverse: + _not = "actually" + + self._print_fail("file %s <%s>: %s" % ( + f.relpath, attr, err_template.format(v, NOT=_not) + )) + return False + return True + + def _exist(self): + self._checks_count += 1 + matched_files_count = len(self._files) + if (matched_files_count > 0) == self._logical_reverse: + self._print_fail("found %d files matching %s" % ( + matched_files_count, self._path_glob)) + return self + + # following are verbs + + def _equal(self, attr, expect): + return self._compare(attr, lambda a: (a == expect, "'{}' does {NOT} equal to '%s'" % expect)) + + def _match(self, attr, expect): + return self._compare(attr, lambda a: (re.search(expect, a), "'{}' does {NOT} match '%s'" % expect)) + + def _less_than(self, attr, expect): + def fn(a): + if isinstance(a, list): + ll = sorted(list(a))[-1] + else: + ll = a + return ll < expect, "'{}' is {NOT} less than %s" % expect + return self._compare(attr, fn) + + def _greater_than(self, attr, expect): + def fn(a): + if isinstance(a, list): + ll = sorted(list(a))[0] + else: + ll = a + return ll > expect, "'{}' is {NOT} greater than %s" % expect + return self._compare(attr, fn) + + def _contain(self, attr, expect): + def fn(a): + if isinstance(a, list): + ok = expect in a + msg = "'%s' is {NOT} found in the list" % expect + if not ok: + if len(a) == 0: + msg = "'%s' is empty" % attr + else: + closest = difflib.get_close_matches(expect, a, 1) + if len(closest) > 0: + msg += ", did you mean '%s'?" % closest[0] + return ok, msg + else: + return False, "%s is not a list" % attr + # should not reach here + return self._compare(attr, fn) + + def _contain_match(self, attr, expect): + def fn(a): + if isinstance(a, list): + msg = "'%s' is {NOT} found in the list" % expect + for e in a: + if re.search(expect, e): + return True, msg + return False, msg + else: + return False, "'%s' is not a list" % attr + return self._compare(attr, fn) + + # following are public methods (test functions) + def to(self): + # does nothing, but helps to construct English + return self + + def expect(self, path_glob, msg): + # lazy print last test result + self._print_result() + # reset states + self._reset() + + self._msg = msg + self._print_title() + + self._path_glob = path_glob + if isinstance(path_glob, str): + self._path_glob = [path_glob] + for f in self._infos: + if glob_match_ignore_slash(f.relpath, self._path_glob): + self._files.append(f) + return self + + def do_not(self): + self._logical_reverse = True + return self + + def does_not(self): + return self.do_not() + + def is_not(self): + return self.do_not() + + # access the value of the dict of key "key" + def key(self, key): + self._key_name = key + return self + + def exist(self): + return self._exist() + + def exists(self): + return self._exist() + + def __getattr__(self, name): + dummy_call = lambda *x: self + + verb = re.findall("^(.*?)(?:s|es)?$", name)[0] + if verb not in self.verbs: + # XXX: hack to support rpath/runpath + if self._current_suite.use_rpath and name == "runpath": + name = "rpath" + elif not self._current_suite.use_rpath and name == "rpath": + name = "runpath" + + self._last_attribute = name + # reset + self._logical_reverse = False + self._key_name = None + return self + + if not self._last_attribute: + self._print_error("attribute is not set before verb \"%s\"" % name) + return dummy_call + + attr = self._last_attribute + for f in self._files: + if not hasattr(f, attr): + self._print_error( + "\"%s\" expect \"%s\" attribute to be present, but it's absent for %s (a %s)" % ( + name, attr, f.relpath, type(f))) + return dummy_call + + def cls(expect): + getattr(self, "_%s" % verb)(attr, expect) + return self + + return cls + + @write_block_desc("compare manifest") + def compare_manifest(self, suite: ExpectSuite, manifest: str): + self._current_suite = suite + + if not suite.manifest: + return + + diff_result = subprocess.run( + ['diff', "-BbNaur", suite.manifest, '-'], input=manifest, stdout=subprocess.PIPE) + if diff_result.returncode != 0: + self._print_fail("manifest is not up-to-date:") + if diff_result.stdout: + print(diff_result.stdout.decode()) + if diff_result.stderr: + print(diff_result.stderr.decode()) + + @write_block_desc("run test suite") + def run(self, suite: ExpectSuite): + self._current_suite = suite + + for s in suite.tests: + s(self.expect, **suite.tests[s]) + + self._print_result() # cleanup the lazy buffer diff --git a/scripts/explain_manifest/explain.py b/scripts/explain_manifest/explain.py new file mode 100644 index 00000000000..d9f807b2dc2 --- /dev/null +++ b/scripts/explain_manifest/explain.py @@ -0,0 +1,246 @@ + +import os +import re +from pathlib import Path + +import lief +from looseversion import LooseVersion +from elftools.elf.elffile import ELFFile + +caches = {} + + +def lazy_evaluate_cache(): + def decorator(fn): + def wrapper(self, name): + key = (self, name) + if key in caches: + return caches[key] + r = fn(self, name) + caches[key] = r + return r + return wrapper + return decorator + + +class ExplainOpts(): + # General + owners = True + mode = True + size = False + # ELF + arch = False + merge_rpaths_runpaths = False + imported_symbols = False + exported_symbols = False + version_requirement = False + + @classmethod + def from_args(this, args): + this.owners = args.owners + this.mode = args.mode + this.size = args.size + this.arch = args.arch + this.merge_rpaths_runpaths = args.merge_rpaths_runpaths + this.imported_symbols = args.imported_symbols + this.exported_symbols = args.exported_symbols + this.version_requirement = args.version_requirement + + return this + + +class FileInfo(): + def __init__(self, path, relpath): + self.path = path + self.relpath = relpath + + self._lazy_evaluate_cache = {} + self._lazy_evaluate_attrs = {} + + if Path(path).is_symlink(): + self.link = os.readlink(path) + elif Path(path).is_dir(): + self.directory = True + + # use lstat to get the mode, uid, gid of the symlink itself + self.mode = os.lstat(path).st_mode + self.uid = os.lstat(path).st_uid + self.gid = os.lstat(path).st_gid + + if not Path(path).is_symlink(): + self.size = os.stat(path).st_size + + self._lazy_evaluate_attrs.update({ + "binary_content": lambda: open(path, "rb").read(), + "text_content": lambda: open(path, "rb").read().decode('utf-8'), + }) + + def __getattr__(self, name): + if name in self._lazy_evaluate_cache: + return self._lazy_evaluate_cache[name] + + ret = None + if name in self._lazy_evaluate_attrs: + ret = self._lazy_evaluate_attrs[name]() + + if ret: + self._lazy_evaluate_cache[name] = ret + return ret + + return self.__getattribute__(name) + + def explain(self, opts: ExplainOpts): + lines = [("Path", self.relpath)] + if hasattr(self, "link"): + lines.append(("Link", self.link)) + lines.append(("Type", "link")) + elif hasattr(self, "directory"): + lines.append(("Type", "directory")) + + if opts.owners: + lines.append(("Uid,Gid", "%s, %s" % (self.uid, self.gid))) + if opts.mode: + lines.append(("Mode", oct(self.mode))) + if opts.size: + lines.append(("Size", self.size)) + + return lines + + +class ElfFileInfo(FileInfo): + def __init__(self, path, relpath): + super().__init__(path, relpath) + + self.arch = None + self.needed_libraries = [] + self.rpath = None + self.runpath = None + self.get_exported_symbols = None + self.get_imported_symbols = None + self.version_requirement = {} + + if not os.path.isfile(path): + return + + with open(path, "rb") as f: + if f.read(4) != b"\x7fELF": + return + + binary = lief.parse(path) + if not binary: # not an ELF file, malformed, etc + return + + self.arch = binary.header.machine_type.name + + for d in binary.dynamic_entries: + if d.tag == lief.ELF.DYNAMIC_TAGS.NEEDED: + self.needed_libraries.append(d.name) + elif d.tag == lief.ELF.DYNAMIC_TAGS.RPATH: + self.rpath = d.name + elif d.tag == lief.ELF.DYNAMIC_TAGS.RUNPATH: + self.runpath = d.name + + # create closures and lazily evaluated + self.get_exported_symbols = lambda: sorted( + [d.name for d in binary.exported_symbols]) + self.get_imported_symbols = lambda: sorted( + [d.name for d in binary.imported_symbols]) + self.get_functions = lambda: sorted( + [d.name for d in binary.functions]) + + for f in binary.symbols_version_requirement: + self.version_requirement[f.name] = [LooseVersion( + a.name) for a in f.get_auxiliary_symbols()] + self.version_requirement[f.name].sort() + + self._lazy_evaluate_attrs.update({ + "exported_symbols": self.get_exported_symbols, + "imported_symbols": self.get_imported_symbols, + "functions": self.get_functions, + }) + + def explain(self, opts: ExplainOpts): + pline = super().explain(opts) + + lines = [] + + if opts.arch and self.arch: + lines.append(("Arch", self.arch)) + if self.needed_libraries: + lines.append(("Needed", self.needed_libraries)) + if self.rpath: + lines.append(("Rpath", self.rpath)) + if self.runpath: + lines.append(("Runpath", self.runpath)) + if opts.exported_symbols and self.get_exported_symbols: + lines.append(("Exported", self.get_exported_symbols())) + if opts.imported_symbols and self.get_imported_symbols: + lines.append(("Imported", self.get_imported_symbols())) + if opts.version_requirement and self.version_requirement: + req = [] + for k in sorted(self.version_requirement): + req.append("%s: %s" % + (k, ", ".join(map(str, self.version_requirement[k])))) + lines.append(("Version Requirement", req)) + + return pline + lines + + +class NginxInfo(ElfFileInfo): + def __init__(self, path, relpath): + super().__init__(path, relpath) + + # nginx must be an ELF file + if not self.needed_libraries: + return + + self.nginx_modules = [] + self.nginx_compiled_openssl = None + self.nginx_compile_flags = None + + binary = lief.parse(path) + + for s in binary.strings: + if re.match("\s*--prefix=/", s): + self.nginx_compile_flags = s + for m in re.findall("add(?:-dynamic)?-module=(.*?) ", s): + if m.startswith("../"): # skip bundled modules + continue + pdir = os.path.basename(os.path.dirname(m)) + mname = os.path.basename(m) + if pdir in ("external", "distribution"): + self.nginx_modules.append(mname) + else: + self.nginx_modules.append(os.path.join(pdir, mname)) + self.nginx_modules = sorted(self.nginx_modules) + elif m := re.match("^built with (.+) \(running with", s): + self.nginx_compiled_openssl = m.group(1).strip() + + # Fetch DWARF infos + with open(path, "rb") as f: + elffile = ELFFile(f) + self.has_dwarf_info = elffile.has_dwarf_info() + self.has_ngx_http_request_t_DW = False + dwarf_info = elffile.get_dwarf_info() + for cu in dwarf_info.iter_CUs(): + dies = [die for die in cu.iter_DIEs()] + # Too many DIEs in the binary, we just check those in `ngx_http_request` + if "ngx_http_request" in dies[0].attributes['DW_AT_name'].value.decode('utf-8'): + for die in dies: + value = die.attributes.get('DW_AT_name') and die.attributes.get( + 'DW_AT_name').value.decode('utf-8') + if value and value == "ngx_http_request_t": + self.has_ngx_http_request_t_DW = True + return + + def explain(self, opts: ExplainOpts): + pline = super().explain(opts) + + lines = [] + lines.append(("Modules", self.nginx_modules)) + lines.append(("OpenSSL", self.nginx_compiled_openssl)) + lines.append(("DWARF", self.has_dwarf_info)) + lines.append(("DWARF - ngx_http_request_t related DWARF DIEs", + self.has_ngx_http_request_t_DW)) + + return pline + lines diff --git a/scripts/explain_manifest/filelist.txt b/scripts/explain_manifest/filelist.txt new file mode 100644 index 00000000000..e1dd21756ed --- /dev/null +++ b/scripts/explain_manifest/filelist.txt @@ -0,0 +1,10 @@ +**/*.so +**/kong/lib/**.so* +**/kong/gui +**/kong/portal +**/kong/include/kong +**/kong/include/google +**/openresty/nginx/sbin/nginx +**/share/xml/xsd +/etc/kong/kong.logrotate +/lib/systemd/system/** diff --git a/scripts/explain_manifest/fixtures/alpine-amd64.txt b/scripts/explain_manifest/fixtures/alpine-amd64.txt new file mode 100644 index 00000000000..148e352515f --- /dev/null +++ b/scripts/explain_manifest/fixtures/alpine-amd64.txt @@ -0,0 +1,116 @@ +- Path : /etc/kong/kong.logrotate + +- Path : /usr/local/kong/include/google + Type : directory + +- Path : /usr/local/kong/lib/engines-1.1/afalg.so + Needed : + - libcrypto.so.1.1 + - libc.so + Rpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/capi.so + Needed : + - libcrypto.so.1.1 + - libc.so + Rpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/padlock.so + Needed : + - libcrypto.so.1.1 + - libc.so + Rpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libcrypto.so.1.1 + Needed : + - libc.so + Rpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libssl.so.1.1 + Needed : + - libcrypto.so.1.1 + - libc.so + Rpath : /usr/local/kong/lib + +- Path : /usr/local/lib/lua/5.1/lfs.so + Needed : + - libc.so + +- Path : /usr/local/lib/lua/5.1/lpeg.so + Needed : + - libc.so + +- Path : /usr/local/lib/lua/5.1/lsyslog.so + Needed : + - libc.so + +- Path : /usr/local/lib/lua/5.1/lua_pack.so + Needed : + - libc.so + +- Path : /usr/local/lib/lua/5.1/lua_system_constants.so + Needed : + - libc.so + +- Path : /usr/local/lib/lua/5.1/mime/core.so + Needed : + - libc.so + +- Path : /usr/local/lib/lua/5.1/pb.so + Needed : + - libc.so + +- Path : /usr/local/lib/lua/5.1/socket/core.so + Needed : + - libc.so + +- Path : /usr/local/lib/lua/5.1/socket/serial.so + Needed : + - libc.so + +- Path : /usr/local/lib/lua/5.1/socket/unix.so + Needed : + - libc.so + +- Path : /usr/local/lib/lua/5.1/ssl.so + Needed : + - libssl.so.1.1 + - libcrypto.so.1.1 + - libc.so + +- Path : /usr/local/lib/lua/5.1/yaml.so + Needed : + - libyaml-0.so.2 + - libc.so + +- Path : /usr/local/openresty/lualib/cjson.so + Needed : + - libc.so + +- Path : /usr/local/openresty/lualib/librestysignal.so + Needed : + - libc.so + +- Path : /usr/local/openresty/lualib/rds/parser.so + Needed : + - libc.so + +- Path : /usr/local/openresty/lualib/redis/parser.so + Needed : + - libc.so + +- Path : /usr/local/openresty/nginx/sbin/nginx + Needed : + - libluajit-5.1.so.2 + - libssl.so.1.1 + - libcrypto.so.1.1 + - libz.so.1 + - libc.so + Rpath : /usr/local/openresty/luajit/lib:/usr/local/kong/lib + Modules : + - lua-kong-nginx-module + - lua-kong-nginx-module/stream + OpenSSL : OpenSSL 1.1.1o 3 May 2022 + DWARF : True + DWARF - ngx_http_request_t related DWARF DIEs: True + diff --git a/scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt b/scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt new file mode 100644 index 00000000000..cb5d0045ac2 --- /dev/null +++ b/scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt @@ -0,0 +1,135 @@ +- Path : /etc/kong/kong.logrotate + +- Path : /usr/local/kong/include/google + Type : directory + +- Path : /usr/local/kong/lib/engines-1.1/afalg.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/capi.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/padlock.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libcrypto.so.1.1 + Needed : + - libstdc++.so.6 + - libm.so.6 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libssl.so.1.1 + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/lib/lua/5.1/lfs.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lpeg.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lsyslog.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lua_pack.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lua_system_constants.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/mime/core.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/pb.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/core.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/serial.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/unix.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/ssl.so + Needed : + - libssl.so.1.1 + - libcrypto.so.1.1 + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/yaml.so + Needed : + - libyaml-0.so.2 + - libc.so.6 + +- Path : /usr/local/openresty/lualib/cjson.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/librestysignal.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/rds/parser.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/redis/parser.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/nginx/sbin/nginx + Needed : + - libdl.so.2 + - libpthread.so.0 + - libcrypt.so.1 + - libluajit-5.1.so.2 + - libm.so.6 + - libssl.so.1.1 + - libcrypto.so.1.1 + - libz.so.1 + - libc.so.6 + Rpath : /usr/local/openresty/luajit/lib:/usr/local/kong/lib + Modules : + - lua-kong-nginx-module + - lua-kong-nginx-module/stream + OpenSSL : OpenSSL 1.1.1o 3 May 2022 + DWARF : True + DWARF - ngx_http_request_t related DWARF DIEs: True + diff --git a/scripts/explain_manifest/fixtures/amazonlinux-2023-amd64.txt b/scripts/explain_manifest/fixtures/amazonlinux-2023-amd64.txt new file mode 100644 index 00000000000..01328af3a9a --- /dev/null +++ b/scripts/explain_manifest/fixtures/amazonlinux-2023-amd64.txt @@ -0,0 +1,128 @@ +- Path : /etc/kong/kong.logrotate + +- Path : /usr/local/kong/include/google + Type : directory + +- Path : /usr/local/kong/lib/engines-1.1/afalg.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/capi.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/padlock.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libcrypto.so.1.1 + Needed : + - libstdc++.so.6 + - libm.so.6 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libssl.so.1.1 + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/lib/lua/5.1/lfs.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lpeg.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lsyslog.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lua_pack.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lua_system_constants.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/mime/core.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/pb.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/core.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/serial.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/unix.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/ssl.so + Needed : + - libssl.so.1.1 + - libcrypto.so.1.1 + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/yaml.so + Needed : + - libyaml-0.so.2 + - libc.so.6 + +- Path : /usr/local/openresty/lualib/cjson.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/librestysignal.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/rds/parser.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/redis/parser.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/nginx/sbin/nginx + Needed : + - libcrypt.so.2 + - libluajit-5.1.so.2 + - libm.so.6 + - libssl.so.1.1 + - libcrypto.so.1.1 + - libz.so.1 + - libc.so.6 + Runpath : /usr/local/openresty/luajit/lib:/usr/local/kong/lib + Modules : + - lua-kong-nginx-module + - lua-kong-nginx-module/stream + OpenSSL : OpenSSL 1.1.1o 3 May 2022 + DWARF : True + DWARF - ngx_http_request_t related DWARF DIEs: True + diff --git a/scripts/explain_manifest/fixtures/debian-10-amd64.txt b/scripts/explain_manifest/fixtures/debian-10-amd64.txt new file mode 100644 index 00000000000..174773be772 --- /dev/null +++ b/scripts/explain_manifest/fixtures/debian-10-amd64.txt @@ -0,0 +1,135 @@ +- Path : /etc/kong/kong.logrotate + +- Path : /usr/local/kong/include/google + Type : directory + +- Path : /usr/local/kong/lib/engines-1.1/afalg.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/capi.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/padlock.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libcrypto.so.1.1 + Needed : + - libstdc++.so.6 + - libm.so.6 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libssl.so.1.1 + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/lib/lua/5.1/lfs.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lpeg.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lsyslog.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lua_pack.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lua_system_constants.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/mime/core.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/pb.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/core.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/serial.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/unix.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/ssl.so + Needed : + - libssl.so.1.1 + - libcrypto.so.1.1 + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/yaml.so + Needed : + - libyaml-0.so.2 + - libc.so.6 + +- Path : /usr/local/openresty/lualib/cjson.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/librestysignal.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/rds/parser.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/redis/parser.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/nginx/sbin/nginx + Needed : + - libdl.so.2 + - libpthread.so.0 + - libcrypt.so.1 + - libluajit-5.1.so.2 + - libm.so.6 + - libssl.so.1.1 + - libcrypto.so.1.1 + - libz.so.1 + - libc.so.6 + Runpath : /usr/local/openresty/luajit/lib:/usr/local/kong/lib + Modules : + - lua-kong-nginx-module + - lua-kong-nginx-module/stream + OpenSSL : OpenSSL 1.1.1o 3 May 2022 + DWARF : True + DWARF - ngx_http_request_t related DWARF DIEs: True + diff --git a/scripts/explain_manifest/fixtures/debian-11-amd64.txt b/scripts/explain_manifest/fixtures/debian-11-amd64.txt new file mode 100644 index 00000000000..9b20fbb595b --- /dev/null +++ b/scripts/explain_manifest/fixtures/debian-11-amd64.txt @@ -0,0 +1,126 @@ +- Path : /etc/kong/kong.logrotate + +- Path : /usr/local/kong/include/google + Type : directory + +- Path : /usr/local/kong/lib/engines-1.1/afalg.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/capi.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/padlock.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libcrypto.so.1.1 + Needed : + - libstdc++.so.6 + - libm.so.6 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libssl.so.1.1 + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/lib/lua/5.1/lfs.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lpeg.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lsyslog.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lua_pack.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lua_system_constants.so + +- Path : /usr/local/lib/lua/5.1/mime/core.so + +- Path : /usr/local/lib/lua/5.1/pb.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/core.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/serial.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/unix.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/ssl.so + Needed : + - libssl.so.1.1 + - libcrypto.so.1.1 + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/yaml.so + Needed : + - libyaml-0.so.2 + - libc.so.6 + +- Path : /usr/local/openresty/lualib/cjson.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/librestysignal.so + +- Path : /usr/local/openresty/lualib/rds/parser.so + +- Path : /usr/local/openresty/lualib/redis/parser.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/nginx/sbin/nginx + Needed : + - libdl.so.2 + - libpthread.so.0 + - libcrypt.so.1 + - libluajit-5.1.so.2 + - libssl.so.1.1 + - libcrypto.so.1.1 + - libz.so.1 + - libc.so.6 + Runpath : /usr/local/openresty/luajit/lib:/usr/local/kong/lib + Modules : + - lua-kong-nginx-module + - lua-kong-nginx-module/stream + OpenSSL : OpenSSL 1.1.1o 3 May 2022 + DWARF : True + DWARF - ngx_http_request_t related DWARF DIEs: True + diff --git a/scripts/explain_manifest/fixtures/el7-amd64.txt b/scripts/explain_manifest/fixtures/el7-amd64.txt new file mode 100644 index 00000000000..cb5d0045ac2 --- /dev/null +++ b/scripts/explain_manifest/fixtures/el7-amd64.txt @@ -0,0 +1,135 @@ +- Path : /etc/kong/kong.logrotate + +- Path : /usr/local/kong/include/google + Type : directory + +- Path : /usr/local/kong/lib/engines-1.1/afalg.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/capi.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/padlock.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libcrypto.so.1.1 + Needed : + - libstdc++.so.6 + - libm.so.6 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libssl.so.1.1 + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/lib/lua/5.1/lfs.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lpeg.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lsyslog.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lua_pack.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lua_system_constants.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/mime/core.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/pb.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/core.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/serial.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/unix.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/ssl.so + Needed : + - libssl.so.1.1 + - libcrypto.so.1.1 + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/yaml.so + Needed : + - libyaml-0.so.2 + - libc.so.6 + +- Path : /usr/local/openresty/lualib/cjson.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/librestysignal.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/rds/parser.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/redis/parser.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/nginx/sbin/nginx + Needed : + - libdl.so.2 + - libpthread.so.0 + - libcrypt.so.1 + - libluajit-5.1.so.2 + - libm.so.6 + - libssl.so.1.1 + - libcrypto.so.1.1 + - libz.so.1 + - libc.so.6 + Rpath : /usr/local/openresty/luajit/lib:/usr/local/kong/lib + Modules : + - lua-kong-nginx-module + - lua-kong-nginx-module/stream + OpenSSL : OpenSSL 1.1.1o 3 May 2022 + DWARF : True + DWARF - ngx_http_request_t related DWARF DIEs: True + diff --git a/scripts/explain_manifest/fixtures/el8-amd64.txt b/scripts/explain_manifest/fixtures/el8-amd64.txt new file mode 100644 index 00000000000..cb5d0045ac2 --- /dev/null +++ b/scripts/explain_manifest/fixtures/el8-amd64.txt @@ -0,0 +1,135 @@ +- Path : /etc/kong/kong.logrotate + +- Path : /usr/local/kong/include/google + Type : directory + +- Path : /usr/local/kong/lib/engines-1.1/afalg.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/capi.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/padlock.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libcrypto.so.1.1 + Needed : + - libstdc++.so.6 + - libm.so.6 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libssl.so.1.1 + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/lib/lua/5.1/lfs.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lpeg.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lsyslog.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lua_pack.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lua_system_constants.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/mime/core.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/pb.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/core.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/serial.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/unix.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/ssl.so + Needed : + - libssl.so.1.1 + - libcrypto.so.1.1 + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/yaml.so + Needed : + - libyaml-0.so.2 + - libc.so.6 + +- Path : /usr/local/openresty/lualib/cjson.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/librestysignal.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/rds/parser.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/redis/parser.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/nginx/sbin/nginx + Needed : + - libdl.so.2 + - libpthread.so.0 + - libcrypt.so.1 + - libluajit-5.1.so.2 + - libm.so.6 + - libssl.so.1.1 + - libcrypto.so.1.1 + - libz.so.1 + - libc.so.6 + Rpath : /usr/local/openresty/luajit/lib:/usr/local/kong/lib + Modules : + - lua-kong-nginx-module + - lua-kong-nginx-module/stream + OpenSSL : OpenSSL 1.1.1o 3 May 2022 + DWARF : True + DWARF - ngx_http_request_t related DWARF DIEs: True + diff --git a/scripts/explain_manifest/fixtures/ubuntu-20.04-amd64.txt b/scripts/explain_manifest/fixtures/ubuntu-20.04-amd64.txt new file mode 100644 index 00000000000..4c29e30d397 --- /dev/null +++ b/scripts/explain_manifest/fixtures/ubuntu-20.04-amd64.txt @@ -0,0 +1,130 @@ +- Path : /etc/kong/kong.logrotate + +- Path : /usr/local/kong/include/google + Type : directory + +- Path : /usr/local/kong/lib/engines-1.1/afalg.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/capi.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/padlock.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libcrypto.so.1.1 + Needed : + - libstdc++.so.6 + - libm.so.6 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libssl.so.1.1 + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libdl.so.2 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/lib/lua/5.1/lfs.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lpeg.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lsyslog.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lua_pack.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lua_system_constants.so + +- Path : /usr/local/lib/lua/5.1/mime/core.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/pb.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/core.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/serial.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/unix.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/ssl.so + Needed : + - libssl.so.1.1 + - libcrypto.so.1.1 + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/yaml.so + Needed : + - libyaml-0.so.2 + - libc.so.6 + +- Path : /usr/local/openresty/lualib/cjson.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/librestysignal.so + +- Path : /usr/local/openresty/lualib/rds/parser.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/redis/parser.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/nginx/sbin/nginx + Needed : + - libdl.so.2 + - libpthread.so.0 + - libcrypt.so.1 + - libluajit-5.1.so.2 + - libssl.so.1.1 + - libcrypto.so.1.1 + - libz.so.1 + - libc.so.6 + Runpath : /usr/local/openresty/luajit/lib:/usr/local/kong/lib + Modules : + - lua-kong-nginx-module + - lua-kong-nginx-module/stream + OpenSSL : OpenSSL 1.1.1o 3 May 2022 + DWARF : True + DWARF - ngx_http_request_t related DWARF DIEs: True + diff --git a/scripts/explain_manifest/fixtures/ubuntu-22.04-amd64.txt b/scripts/explain_manifest/fixtures/ubuntu-22.04-amd64.txt new file mode 100644 index 00000000000..e736498f271 --- /dev/null +++ b/scripts/explain_manifest/fixtures/ubuntu-22.04-amd64.txt @@ -0,0 +1,123 @@ +- Path : /etc/kong/kong.logrotate + +- Path : /usr/local/kong/include/google + Type : directory + +- Path : /usr/local/kong/lib/engines-1.1/afalg.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/capi.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/engines-1.1/padlock.so + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libcrypto.so.1.1 + Needed : + - libstdc++.so.6 + - libm.so.6 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/kong/lib/libssl.so.1.1 + Needed : + - libstdc++.so.6 + - libm.so.6 + - libcrypto.so.1.1 + - libc.so.6 + Runpath : /usr/local/kong/lib + +- Path : /usr/local/lib/lua/5.1/lfs.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lpeg.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lsyslog.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lua_pack.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/lua_system_constants.so + +- Path : /usr/local/lib/lua/5.1/mime/core.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/pb.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/core.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/serial.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/socket/unix.so + Needed : + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/ssl.so + Needed : + - libssl.so.1.1 + - libcrypto.so.1.1 + - libc.so.6 + +- Path : /usr/local/lib/lua/5.1/yaml.so + Needed : + - libyaml-0.so.2 + - libc.so.6 + +- Path : /usr/local/openresty/lualib/cjson.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/librestysignal.so + +- Path : /usr/local/openresty/lualib/rds/parser.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/lualib/redis/parser.so + Needed : + - libc.so.6 + +- Path : /usr/local/openresty/nginx/sbin/nginx + Needed : + - libcrypt.so.1 + - libluajit-5.1.so.2 + - libssl.so.1.1 + - libcrypto.so.1.1 + - libz.so.1 + - libc.so.6 + Runpath : /usr/local/openresty/luajit/lib:/usr/local/kong/lib + Modules : + - lua-kong-nginx-module + - lua-kong-nginx-module/stream + OpenSSL : OpenSSL 1.1.1o 3 May 2022 + DWARF : True + DWARF - ngx_http_request_t related DWARF DIEs: True + diff --git a/scripts/explain_manifest/main.py b/scripts/explain_manifest/main.py new file mode 100755 index 00000000000..44f9dcc00fc --- /dev/null +++ b/scripts/explain_manifest/main.py @@ -0,0 +1,215 @@ +#!/usr/bin/env python3 + +import os +import sys +import glob +import time +import atexit +import difflib +import pathlib +import argparse +import tempfile +from io import StringIO +from typing import List +from pathlib import Path + +import config + +from explain import ExplainOpts, FileInfo, ElfFileInfo, NginxInfo +from expect import ExpectChain, glob_match_ignore_slash + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--path", "-p", help="Path to the directory, binary package or docker image tag to compare") + parser.add_argument( + "--image", help="Docker image tag to compare") + parser.add_argument( + "--output", "-o", help="Path to output manifest, use - to write to stdout") + parser.add_argument( + "--suite", "-s", help="Expect suite name to test, defined in config.py") + parser.add_argument( + "--file_list", "-f", help="Path to the files list to explain for manifest; " + + "each line in the file should be a glob pattern of full path") + parser.add_argument( + "--owners", help="Display owner and group", action="store_true") + parser.add_argument( + "--mode", help="Display mode", action="store_true") + parser.add_argument( + "--size", help="Display size", action="store_true") + parser.add_argument("--arch", + help="Display ELF architecture", action="store_true") + parser.add_argument("--merge_rpaths_runpaths", + help="Treate RPATH and RUNPATH as same", action="store_true") + parser.add_argument( + "--imported_symbols", help="Display imported symbols", action="store_true") + parser.add_argument( + "--exported_symbols", help="Display exported symbols", action="store_true") + parser.add_argument("--version_requirement", + help="Display exported symbols", + action="store_true") + + return parser.parse_args() + + +def read_glob(path: str): + if not path: + return ["**"] + + with open(path, "r") as f: + return f.read().splitlines() + +def gather_files(path: str, image: str): + if image: + t = tempfile.TemporaryDirectory() + atexit.register(t.cleanup) + + code = os.system("docker pull {img} && docker create --name={name} {img} && docker export {name} | tar xf - -C {tmp} && docker rm -f {name}".format( + img=image, + name="explain_manifest_%d" % time.time(), + tmp=t.name + )) + + if code != 0: + raise Exception("Failed to extract image %s" % image) + return t.name + + ext = os.path.splitext(path)[1] + if ext in (".deb", ".rpm") or path.endswith(".apk.tar.gz"): + t = tempfile.TemporaryDirectory() + atexit.register(t.cleanup) + + if ext == ".deb": + code = os.system( + "ar p %s data.tar.gz | tar -C %s -xz" % (path, t.name)) + elif ext == ".rpm": + # rpm2cpio is needed + # rpm2archive ships with rpm2cpio on debians + code = os.system( + """ + rpm2archive %s && tar -C %s -xf %s.tgz + """ % (path, t.name, path)) + elif ext == ".gz": + code = os.system("tar -C %s -xf %s" % (t.name, path)) + + if code != 0: + raise Exception("Failed to extract %s" % path) + + return t.name + elif not Path(path).is_dir(): + raise Exception("Don't know how to process \"%s\"" % path) + + return path + + +def walk_files(path: str, globs: List[str]): + results = [] + # use pathlib instead of glob.glob to avoid recurse into symlink dir + for file in sorted(pathlib.Path(path).rglob("*")): + full_path = str(file) + file = str(file.relative_to(path)) + + if globs and not glob_match_ignore_slash(file, globs): + continue + + if not file.startswith("/") and not file.startswith("./"): + file = '/' + file # prettifier + + if file.endswith("sbin/nginx"): + f = NginxInfo(full_path, file) + elif os.path.splitext(file)[1] == ".so" or os.path.basename(os.path.dirname(file)) in ("bin", "lib", "lib64", "sbin"): + p = Path(full_path) + if p.is_symlink(): + continue + f = ElfFileInfo(full_path, file) + else: + f = FileInfo(full_path, file) + + config.transform(f) + results.append(f) + + return results + + +def write_manifest(title: str, results: List[FileInfo], globs: List[str], opts: ExplainOpts): + f = StringIO() + + for result in results: + if not glob_match_ignore_slash(result.relpath, globs): + continue + + entries = result.explain(opts) + ident = 2 + first = True + for k, v in entries: + if isinstance(v, list): + v = ("\n" + " " * ident + "- ").join([""] + v) + else: + v = " %s" % v + if first: + f.write("-" + (" " * (ident-1))) + first = False + else: + f.write(" " * ident) + f.write("%-10s:%s\n" % (k, v)) + f.write("\n") + + f.flush() + + return f.getvalue().encode("utf-8") + + +if __name__ == "__main__": + args = parse_args() + + if not args.suite and not args.output: + raise Exception("At least one of --suite or --output is required") + + if not args.path and not args.image: + raise Exception("At least one of --path or --image is required") + + if args.image and os.getuid() != 0: + raise Exception("Running as root is required to explain an image") + + if args.path and Path(args.path).is_dir(): + raise Exception( + "suite mode only works with archive files (deb, rpm, apk.tar.gz, etc.") + + directory = gather_files(args.path, args.image) + + globs = read_glob(args.file_list) + + # filter by filelist only when explaining an image to reduce time + infos = walk_files(directory, globs=globs if args.image else None) + + if args.image: + title = "contents in image %s" % args.image + elif Path(args.path).is_file(): + title = "contents in archive %s" % args.path + else: + title = "contents in directory %s" % args.path + + manifest = write_manifest(title, infos, globs, ExplainOpts.from_args(args)) + + if args.suite: + if args.suite not in config.targets: + closest = difflib.get_close_matches( + config.targets.keys(), args.suite, 1) + maybe = "" + if closest: + maybe = ", maybe you meant %s" % closest[0] + raise Exception("Unknown suite %s%s" % (args.suite, maybe)) + E = ExpectChain(infos) + E.compare_manifest(config.targets[args.suite], manifest) + E.run(config.targets[args.suite]) + + if args.output: + if args.output == "-": + f = sys.stdout + manifest = manifest.decode("utf-8") + else: + f = open(args.output, "wb") + f.write(manifest) + if args.output != "-": + f.close() diff --git a/scripts/explain_manifest/requirements.txt b/scripts/explain_manifest/requirements.txt new file mode 100644 index 00000000000..921dc8b3d14 --- /dev/null +++ b/scripts/explain_manifest/requirements.txt @@ -0,0 +1,4 @@ +lief==0.12.* +globmatch==2.0.* +pyelftools==0.29 +looseversion==1.1.2 diff --git a/scripts/explain_manifest/suites.py b/scripts/explain_manifest/suites.py new file mode 100644 index 00000000000..29fc4e7d141 --- /dev/null +++ b/scripts/explain_manifest/suites.py @@ -0,0 +1,128 @@ + +def read_requirements(path=None): + if not path: + path = os.path.join(os.path.dirname(__file__), "..", "..", ".requirements") + + with open(path, "r") as f: + lines = [re.findall("(.+)=([^# ]+)", d) for d in f.readlines()] + return {l[0][0]: l[0][1].strip() for l in lines if l} + +def common_suites(expect, libxcrypt_no_obsolete_api: bool = False): + # file existence + expect("/usr/local/kong/include/google/protobuf/**.proto", + "includes Google protobuf headers").exists() + + expect("/etc/kong/kong.conf.default", "includes default kong config").exists() + + expect("/etc/kong/kong.logrotate", "includes logrotate config").exists() + + expect("/usr/local/kong/include/openssl/**.h", "includes OpenSSL headers").exists() + + # binary correctness + expect("/usr/local/openresty/nginx/sbin/nginx", "nginx rpath should contain kong lib") \ + .rpath.equals("/usr/local/openresty/luajit/lib:/usr/local/kong/lib") + + expect("/usr/local/openresty/nginx/sbin/nginx", "nginx binary should contain dwarf info for dynatrace") \ + .has_dwarf_info.equals(True) \ + .has_ngx_http_request_t_DW.equals(True) + + expect("/usr/local/openresty/nginx/sbin/nginx", "nginx binary should link pcre statically") \ + .exported_symbols.contain("pcre_free") \ + .needed_libraries.do_not().contain_match("libpcre.so.+") + + expect("/usr/local/openresty/nginx/sbin/nginx", "nginx should not be compiled with debug flag") \ + .nginx_compile_flags.do_not().match("with\-debug") + + expect("/usr/local/openresty/nginx/sbin/nginx", "nginx should include Kong's patches") \ + .functions \ + .contain("ngx_http_lua_kong_ffi_set_grpc_authority") \ + .contain("ngx_http_lua_ffi_balancer_enable_keepalive") \ + .contain("ngx_http_lua_kong_ffi_get_full_client_certificate_chain") \ + .contain("ngx_http_lua_kong_ffi_disable_session_reuse") \ + .contain("ngx_http_lua_kong_ffi_set_upstream_client_cert_and_key") \ + .contain("ngx_http_lua_kong_ffi_set_upstream_ssl_trusted_store") \ + .contain("ngx_http_lua_kong_ffi_set_upstream_ssl_verify") \ + .contain("ngx_http_lua_kong_ffi_set_upstream_ssl_verify_depth") \ + .contain("ngx_http_lua_kong_ffi_var_get_by_index") \ + .contain("ngx_http_lua_kong_ffi_var_set_by_index") \ + .contain("ngx_http_lua_kong_ffi_var_load_indexes") + + expect("/usr/local/openresty/lualib/libatc_router.so", "ATC router so should have ffi module compiled") \ + .functions \ + .contain("router_execute") + + if libxcrypt_no_obsolete_api: + expect("/usr/local/openresty/nginx/sbin/nginx", "nginx linked with libxcrypt.so.2") \ + .needed_libraries.contain("libcrypt.so.2") + else: + expect("/usr/local/openresty/nginx/sbin/nginx", "nginx should link libxcrypt.so.1") \ + .needed_libraries.contain("libcrypto.so.1.1") + + expect("/usr/local/openresty/nginx/sbin/nginx", "nginx compiled with OpenSSL 1.1.x") \ + .nginx_compiled_openssl.matches("OpenSSL 1.1.\d") \ + .version_requirement.key("libssl.so.3").less_than("OPENSSL_3.2.0") \ + .version_requirement.key("libcrypto.so.3").less_than("OPENSSL_3.2.0") \ + + expect("**/*.so", "dynamic libraries are compiled with OpenSSL 3.1.x") \ + .version_requirement.key("libssl.so.3").less_than("OPENSSL_3.2.0") \ + .version_requirement.key("libcrypto.so.3").less_than("OPENSSL_3.2.0") \ + + +def libc_libcpp_suites(expect, libc_max_version: str = None, libcxx_max_version: str = None, cxxabi_max_version: str = None): + if libc_max_version: + expect("**/*.so", "libc version is less than %s" % libc_max_version) \ + .version_requirement.key("libc.so.6").is_not().greater_than("GLIBC_%s" % libc_max_version) \ + .version_requirement.key("libdl.so.2").is_not().greater_than("GLIBC_%s" % libc_max_version) \ + .version_requirement.key("libpthread.so.0").is_not().greater_than("GLIBC_%s" % libc_max_version) \ + .version_requirement.key("librt.so.1").is_not().greater_than("GLIBC_%s" % libc_max_version) \ + + if libcxx_max_version: + expect("**/*.so", "glibcxx version is less than %s" % libcxx_max_version) \ + .version_requirement.key("libstdc++.so.6").is_not().greater_than("GLIBCXX_%s" % libcxx_max_version) + + if cxxabi_max_version: + expect("**/*.so", "cxxabi version is less than %s" % cxxabi_max_version) \ + .version_requirement.key("libstdc++.so.6").is_not().greater_than("CXXABI_%s" % cxxabi_max_version) + + +def arm64_suites(expect): + expect("**/*/**.so*", "Dynamic libraries are arm64 arch") \ + .arch.equals("AARCH64") + + expect("/usr/local/openresty/nginx/sbin/nginx", "Nginx is arm64 arch") \ + .arch.equals("AARCH64") + +def docker_suites(expect): + kong_uid = 1000 + kong_gid = 1000 + + expect("/etc/passwd", "kong user exists") \ + .text_content.matches("kong:x:%d" % kong_uid) + + expect("/etc/group", "kong group exists") \ + .text_content.matches("kong:x:%d" % kong_gid) + + for path in ("/usr/local/kong/**", "/usr/local/bin/kong"): + expect(path, "%s owned by kong:root" % path) \ + .uid.equals(kong_uid) \ + .gid.equals(0) + + for path in ("/usr/local/bin/luarocks", + "/usr/local/etc/luarocks/**", + "/usr/local/lib/lua/**", + "/usr/local/lib/luarocks/**", + "/usr/local/openresty/**", + "/usr/local/share/lua/**"): + expect(path, "%s owned by kong:kong" % path) \ + .uid.equals(kong_uid) \ + .gid.equals(kong_gid) + + expect(( + "/etc/ssl/certs/ca-certificates.crt", #Debian/Ubuntu/Gentoo + "/etc/pki/tls/certs/ca-bundle.crt", #Fedora/RHEL 6 + "/etc/ssl/ca-bundle.pem", #OpenSUSE + "/etc/pki/tls/cacert.pem", #OpenELEC + "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", #CentOS/RHEL 7 + "/etc/ssl/cert.pem", #OpenBSD, Alpine + ), "ca-certiticates exists") \ + .exists() diff --git a/scripts/grep-kong-version.sh b/scripts/grep-kong-version.sh new file mode 100755 index 00000000000..ecc7d1c683f --- /dev/null +++ b/scripts/grep-kong-version.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# unofficial strict mode +set -euo pipefail + +kong_version=$(echo kong-*.rockspec | sed 's,.*/,,' | cut -d- -f2) + +if test -f "kong/enterprise_edition/meta.lua"; then + ee_patch=$(grep -o -E 'ee_patch[ \t]+=[ \t]+[0-9]+' kong/enterprise_edition/meta.lua | awk '{print $3}') + kong_version="$kong_version.$ee_patch" +fi + +echo "$kong_version" diff --git a/scripts/release-kong.sh b/scripts/release-kong.sh new file mode 100755 index 00000000000..9c0a4f1cd44 --- /dev/null +++ b/scripts/release-kong.sh @@ -0,0 +1,159 @@ +#!/usr/bin/env bash + +# This script is currently used by .github/workflows/release.yml to release Kong to Pulp. +set -eo pipefail + +source .requirements + +KONG_VERSION=$(bash scripts/grep-kong-version.sh) +KONG_RELEASE_LABEL=${KONG_RELEASE_LABEL:-$KONG_VERSION} + +PULP_HOST=${PULP_HOST:-"https://api.download-dev.konghq.com"} +PULP_USERNAME=${PULP_USERNAME:-"admin"} +PULP_PASSWORD=${PULP_PASSWORD:-} + +PULP_DOCKER_IMAGE="kong/release-script" + +# Variables used by the release script +ARCHITECTURE=${ARCHITECTURE:-amd64} +PACKAGE_TYPE=${PACKAGE_TYPE:-deb} +ARTIFACT_TYPE=${ARTIFACT_TYPE:-debian} + +ARTIFACT_PREFIX=${ARTIFACT_PREFIX:-"bazel-bin/pkg"} +ARTIFACT=${ARTIFACT:-"kong.deb"} +ARTIFACT_VERSION=${ARTIFACT_VERSION:-} + +KONG_ARTIFACT=$ARTIFACT_PREFIX/$ARTIFACT + +# Retries a command a configurable number of times with backoff. +# +# The retry count is given by ATTEMPTS (default 5), the initial backoff +# timeout is given by TIMEOUT in seconds (default 1.) +# +# Successive backoffs double the timeout. +function with_backoff { + local max_attempts=${ATTEMPTS-5} + local timeout=${TIMEOUT-5} + local attempt=1 + local exitCode=0 + + while (( $attempt < $max_attempts )) + do + if "$@" + then + return 0 + else + exitCode=$? + fi + + echo "Failure! Retrying in $timeout.." 1>&2 + sleep $timeout + attempt=$(( attempt + 1 )) + timeout=$(( timeout * 2 )) + done + + if [[ $exitCode != 0 ]] + then + echo "You've failed me for the last time! ($*)" 1>&2 + fi + + return $exitCode +} + +# TODO: remove this once we have a better way to determine if we are releasing +case "$ARTIFACT_TYPE" in + debian|ubuntu) + OUTPUT_FILE_SUFFIX=".$ARTIFACT_VERSION.$ARCHITECTURE.deb" + ;; + rhel) + OUTPUT_FILE_SUFFIX=".rhel$ARTIFACT_VERSION.$ARCHITECTURE.rpm" + ;; + alpine) + OUTPUT_FILE_SUFFIX=".$ARCHITECTURE.apk.tar.gz" + ;; + amazonlinux) + OUTPUT_FILE_SUFFIX=".aws.$ARCHITECTURE.rpm" + ;; + src) + OUTPUT_FILE_SUFFIX=".tar.gz" + ;; +esac + + +DIST_FILE="$KONG_PACKAGE_NAME-$KONG_RELEASE_LABEL$OUTPUT_FILE_SUFFIX" + +function push_package () { + + local dist_version="--dist-version $ARTIFACT_VERSION" + + # TODO: CE gateway-src + + if [ "$ARTIFACT_TYPE" == "alpine" ]; then + dist_version= + fi + + if [ "$ARTIFACT_VERSION" == "18.04" ]; then + dist_version="--dist-version bionic" + fi + if [ "$ARTIFACT_VERSION" == "20.04" ]; then + dist_version="--dist-version focal" + fi + if [ "$ARTIFACT_VERSION" == "22.04" ]; then + dist_version="--dist-version jammy" + fi + + # test for sanitized github actions input + if [[ -n "$(echo "$PACKAGE_TAGS" | tr -d 'a-zA-Z0-9._,')" ]]; then + echo 'invalid characters in PACKAGE_TAGS' + echo "passed to script: ${PACKAGE_TAGS}" + tags='' + else + tags="$PACKAGE_TAGS" + fi + + set -x + release_args='' + + if [ -n "${tags:-}" ]; then + release_args="${release_args} --tags ${tags}" + fi + + release_args="${release_args} --package-type gateway" + if [[ "$EDITION" == "enterprise" ]]; then + release_args="${release_args} --enterprise" + fi + + # pre-releases go to `/internal/` + if [[ "$OFFICIAL_RELEASE" == "true" ]]; then + release_args="${release_args} --publish" + else + release_args="${release_args} --internal" + fi + + docker run \ + -e PULP_HOST="$PULP_HOST" \ + -e PULP_USERNAME="$PULP_USERNAME" \ + -e PULP_PASSWORD="$PULP_PASSWORD" \ + -e VERBOSE \ + -e CLOUDSMITH_API_KEY \ + -e CLOUDSMITH_DRY_RUN \ + -e IGNORE_CLOUDSMITH_FAILURES \ + -e USE_CLOUDSMITH \ + -e USE_PULP \ + -v "$(pwd)/$KONG_ARTIFACT:/files/$DIST_FILE" \ + -i $PULP_DOCKER_IMAGE \ + --file "/files/$DIST_FILE" \ + --dist-name "$ARTIFACT_TYPE" $dist_version \ + --major-version "${KONG_VERSION%%.*}.x" \ + $release_args + + if [[ $? -ne 0 ]]; then + exit 1 + fi +} + +with_backoff push_package + +echo -e "\nReleasing Kong '$KONG_RELEASE_LABEL' of '$ARTIFACT_TYPE $ARTIFACT_VERSION' done" + +exit 0 diff --git a/scripts/upgrade-tests/docker-compose.yml b/scripts/upgrade-tests/docker-compose.yml new file mode 100644 index 00000000000..a127a91b011 --- /dev/null +++ b/scripts/upgrade-tests/docker-compose.yml @@ -0,0 +1,62 @@ +version: '3.5' +services: + + kong_old: + image: ${OLD_KONG_IMAGE} + command: "tail -f /dev/null" + user: root + depends_on: + - db_postgres + healthcheck: + test: ["CMD", "true"] + interval: 1s + timeout: 1s + retries: 10 + environment: + KONG_PG_HOST: db_postgres + KONG_TEST_PG_HOST: db_postgres + volumes: + - ../../worktree/${OLD_KONG_VERSION}:/kong + restart: on-failure + networks: + upgrade_tests: + + kong_new: + image: ${NEW_KONG_IMAGE} + command: "tail -f /dev/null" + user: root + depends_on: + - db_postgres + healthcheck: + test: ["CMD", "true"] + interval: 1s + timeout: 1s + retries: 10 + environment: + KONG_PG_HOST: db_postgres + KONG_TEST_PG_HOST: db_postgres + volumes: + - ../..:/kong + restart: on-failure + networks: + upgrade_tests: + + db_postgres: + image: postgres:9.5 + environment: + POSTGRES_DBS: kong,kong_tests + POSTGRES_USER: kong + POSTGRES_HOST_AUTH_METHOD: trust + healthcheck: + test: ["CMD", "pg_isready", "-U", "kong"] + interval: 5s + timeout: 10s + retries: 10 + restart: on-failure + stdin_open: true + tty: true + networks: + upgrade_tests: + +networks: + upgrade_tests: diff --git a/scripts/validate-rockspec b/scripts/validate-rockspec new file mode 100755 index 00000000000..71990c36f1f --- /dev/null +++ b/scripts/validate-rockspec @@ -0,0 +1,106 @@ +#!/usr/bin/env bash + +set -euo pipefail + +shopt -s nullglob + +fail() { + echo "Failure: $@" + exit 1 +} + +lint() { + local spec=$1 + + echo "Linting (luarocks)..." + + if ! luarocks lint "$spec"; then + fail "luarocks lint returned error" + fi + + echo "Linting (luacheck)..." + + # luacheck helps to point out some semantic issues (like duplicate + # table keys) + if ! luacheck \ + --quiet \ + --no-global \ + -- - \ + < "$spec"; + then + fail "luacheck returned error" + fi +} + +read_modules() { + local spec="$1" + resty -e ' + local fn = loadstring(io.stdin:read("*a")) + local rock = {} + setfenv(fn, rock) + fn() + + for mod, fname in pairs(rock.build.modules) do + print(fname .. "|" .. mod) + end + ' < "$spec" +} + +check_modules() { + local spec=$1 + local -A files=() + + echo "Checking modules..." + + local failed=0 + + for line in $(read_modules "$spec"); do + fname=${line%|*} + module=${line#*|} + + files[$fname]="$module" + + if [[ ! -f $fname ]]; then + : $(( failed++ )) + echo "Module ($module) file ($fname) is missing" + fi + done + + for fname in $(git ls-files 'kong/*.lua'); do + if [[ -z ${files[$fname]:-} ]]; then + : $(( failed++ )) + echo "File ($fname) not found in rockspec ($spec)" + fi + done + + if (( failed > 0 )); then + fail "rockspec build.modules is invalid" + fi +} + + +main() { + local files=(kong-*.rockspec) + local spec + + if (( ${#files[@]} == 0 )); then + fail "no rockspec file found" + + elif (( ${#files[@]} > 1 )); then + fail "multiple rockspec files found: ${files[*]}" + + else + spec=${files[0]} + fi + + echo "Found rockspec file to validate: $spec" + + lint "$spec" + + check_modules "$spec" + + echo "OK!" +} + + +main "$@" diff --git a/spec/02-integration/02-cmd/03-reload_spec.lua b/spec/02-integration/02-cmd/03-reload_spec.lua index 884ed2fddb4..7a2baa91346 100644 --- a/spec/02-integration/02-cmd/03-reload_spec.lua +++ b/spec/02-integration/02-cmd/03-reload_spec.lua @@ -95,7 +95,7 @@ describe("kong reload #" .. strategy, function() helpers.clean_prefix() end) after_each(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) it("send a 'reload' signal to a running Nginx master process", function() @@ -651,7 +651,7 @@ describe("key-auth plugin invalidation on dbless reload #off", function() finally(function() os.remove(yaml_file) - helpers.stop_kong(helpers.test_conf.prefix, true) + helpers.stop_kong(helpers.test_conf.prefix) if admin_client then admin_client:close() end diff --git a/spec/02-integration/02-cmd/12-hybrid_spec.lua b/spec/02-integration/02-cmd/12-hybrid_spec.lua index 745b16c6d6f..7e0fa61a8e0 100644 --- a/spec/02-integration/02-cmd/12-hybrid_spec.lua +++ b/spec/02-integration/02-cmd/12-hybrid_spec.lua @@ -62,6 +62,7 @@ describe("kong hybrid", function() local cert = helpers.test_conf.prefix .. "/test4.crt" local key = helpers.test_conf.prefix .. "/test4.key" + local time = ngx.time() local ok, _, stdout = helpers.kong_exec("hybrid gen_cert " .. cert .. " " .. key) assert.truthy(ok) assert.matches("Successfully generated certificate/key pairs, they have been written to: ", stdout, nil, true) @@ -69,13 +70,14 @@ describe("kong hybrid", function() local crt = x509.new(pl_file.read(cert)) assert.equals(crt:get_not_after() - crt:get_not_before(), 3 * 365 * 86400) - assert(crt:get_not_before() >= ngx.time()) + assert(crt:get_not_before() >= time) end) it("gen_cert cert days can be overwritten with -d", function() local cert = helpers.test_conf.prefix .. "/test5.crt" local key = helpers.test_conf.prefix .. "/test5.key" + local time = ngx.time() local ok, _, stdout = helpers.kong_exec("hybrid gen_cert -d 1 " .. cert .. " " .. key) assert.truthy(ok) assert.matches("Successfully generated certificate/key pairs, they have been written to: ", stdout, nil, true) @@ -83,13 +85,14 @@ describe("kong hybrid", function() local crt = x509.new(pl_file.read(cert)) assert.equals(crt:get_not_after() - crt:get_not_before(), 86400) - assert(crt:get_not_before() >= ngx.time()) + assert(crt:get_not_before() >= time) end) it("gen_cert cert days can be overwritten with --days", function() local cert = helpers.test_conf.prefix .. "/test6.crt" local key = helpers.test_conf.prefix .. "/test6.key" + local time = ngx.time() local ok, _, stdout = helpers.kong_exec("hybrid gen_cert --days 2 " .. cert .. " " .. key) assert.truthy(ok) assert.matches("Successfully generated certificate/key pairs, they have been written to: ", stdout, nil, true) @@ -97,7 +100,7 @@ describe("kong hybrid", function() local crt = x509.new(pl_file.read(cert)) assert.equals(crt:get_not_after() - crt:get_not_before(), 2 * 86400) - assert(crt:get_not_before() >= ngx.time()) + assert(crt:get_not_before() >= time) end) end) end) diff --git a/spec/02-integration/04-admin_api/03-consumers_routes_spec.lua b/spec/02-integration/04-admin_api/03-consumers_routes_spec.lua index ad683254eda..63b14ecd57a 100644 --- a/spec/02-integration/04-admin_api/03-consumers_routes_spec.lua +++ b/spec/02-integration/04-admin_api/03-consumers_routes_spec.lua @@ -46,7 +46,7 @@ describe("Admin API (#" .. strategy .. "): ", function() end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) before_each(function() diff --git a/spec/02-integration/04-admin_api/04-plugins_routes_spec.lua b/spec/02-integration/04-admin_api/04-plugins_routes_spec.lua index 549e259f8ad..d00d959abcb 100644 --- a/spec/02-integration/04-admin_api/04-plugins_routes_spec.lua +++ b/spec/02-integration/04-admin_api/04-plugins_routes_spec.lua @@ -24,7 +24,7 @@ for _, strategy in helpers.each_strategy() do end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) before_each(function() diff --git a/spec/02-integration/04-admin_api/09-routes_routes_spec.lua b/spec/02-integration/04-admin_api/09-routes_routes_spec.lua index 61bfab0301a..54c0ace061c 100644 --- a/spec/02-integration/04-admin_api/09-routes_routes_spec.lua +++ b/spec/02-integration/04-admin_api/09-routes_routes_spec.lua @@ -35,7 +35,7 @@ for _, strategy in helpers.each_strategy() do end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) before_each(function() @@ -1909,7 +1909,7 @@ for _, strategy in helpers.each_strategy() do end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) before_each(function() diff --git a/spec/02-integration/04-admin_api/10-services_routes_spec.lua b/spec/02-integration/04-admin_api/10-services_routes_spec.lua index e55e98004a1..4c4a114596f 100644 --- a/spec/02-integration/04-admin_api/10-services_routes_spec.lua +++ b/spec/02-integration/04-admin_api/10-services_routes_spec.lua @@ -35,7 +35,7 @@ for _, strategy in helpers.each_strategy() do end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) before_each(function() diff --git a/spec/02-integration/04-admin_api/15-off_spec.lua b/spec/02-integration/04-admin_api/15-off_spec.lua index cd4b4fed117..b8fcc5bc4d0 100644 --- a/spec/02-integration/04-admin_api/15-off_spec.lua +++ b/spec/02-integration/04-admin_api/15-off_spec.lua @@ -34,7 +34,7 @@ describe("Admin API #off", function() end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) before_each(function() @@ -870,7 +870,7 @@ describe("Admin API (concurrency tests) #off", function() end) after_each(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() if client then client:close() @@ -991,7 +991,7 @@ describe("Admin API #off with Unique Foreign #unique", function() end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) before_each(function() @@ -1123,7 +1123,7 @@ describe("Admin API #off worker_consistency=eventual", function() end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) before_each(function() diff --git a/spec/02-integration/04-admin_api/17-foreign-entity_spec.lua b/spec/02-integration/04-admin_api/17-foreign-entity_spec.lua index 928da9c6923..8481033b3bc 100644 --- a/spec/02-integration/04-admin_api/17-foreign-entity_spec.lua +++ b/spec/02-integration/04-admin_api/17-foreign-entity_spec.lua @@ -51,7 +51,7 @@ for _, strategy in helpers.each_strategy() do end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) before_each(function() diff --git a/spec/02-integration/04-admin_api/18-worker-events.lua b/spec/02-integration/04-admin_api/18-worker-events.lua index ce1a2d5e52f..122a644093c 100644 --- a/spec/02-integration/04-admin_api/18-worker-events.lua +++ b/spec/02-integration/04-admin_api/18-worker-events.lua @@ -55,7 +55,7 @@ for _, strategy in helpers.each_strategy() do end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) before_each(function() diff --git a/spec/02-integration/04-admin_api/19-vaults_spec.lua b/spec/02-integration/04-admin_api/19-vaults_spec.lua index bb27fed6d2a..664ca5864ae 100644 --- a/spec/02-integration/04-admin_api/19-vaults_spec.lua +++ b/spec/02-integration/04-admin_api/19-vaults_spec.lua @@ -21,7 +21,7 @@ for _, strategy in helpers.each_strategy() do end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) before_each(function() diff --git a/spec/02-integration/04-admin_api/21-truncated_arguments_spec.lua b/spec/02-integration/04-admin_api/21-truncated_arguments_spec.lua index 03d342edaf3..3a4071642b2 100644 --- a/spec/02-integration/04-admin_api/21-truncated_arguments_spec.lua +++ b/spec/02-integration/04-admin_api/21-truncated_arguments_spec.lua @@ -18,7 +18,7 @@ for _, strategy in helpers.each_strategy() do end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) before_each(function() diff --git a/spec/02-integration/05-proxy/04-plugins_triggering_spec.lua b/spec/02-integration/05-proxy/04-plugins_triggering_spec.lua index e0384f0c2e6..84b082e8916 100644 --- a/spec/02-integration/05-proxy/04-plugins_triggering_spec.lua +++ b/spec/02-integration/05-proxy/04-plugins_triggering_spec.lua @@ -231,7 +231,7 @@ for _, strategy in helpers.each_strategy() do lazy_teardown(function() if proxy_client then proxy_client:close() end - helpers.stop_kong(nil, true) + helpers.stop_kong() end) it("checks global configuration without credentials", function() @@ -312,7 +312,7 @@ for _, strategy in helpers.each_strategy() do proxy_client:close() end - helpers.stop_kong(nil, true) + helpers.stop_kong() db:truncate("routes") db:truncate("services") db:truncate("consumers") @@ -405,7 +405,7 @@ for _, strategy in helpers.each_strategy() do os.remove(FILE_LOG_PATH) - helpers.stop_kong(nil, true) + helpers.stop_kong() end) before_each(function() @@ -516,7 +516,7 @@ for _, strategy in helpers.each_strategy() do proxy_client:close() end - helpers.stop_kong(nil, true) + helpers.stop_kong() db:truncate("routes") db:truncate("services") @@ -563,7 +563,7 @@ for _, strategy in helpers.each_strategy() do proxy_client:close() end - helpers.stop_kong(nil, true) + helpers.stop_kong() end) it("runs without causing an internal error", function() @@ -712,7 +712,7 @@ for _, strategy in helpers.each_strategy() do lazy_teardown(function() helpers.stop_kong("servroot2") - helpers.stop_kong(nil, true) + helpers.stop_kong() end) @@ -1073,7 +1073,7 @@ for _, strategy in helpers.each_strategy() do proxy_client:close() end - helpers.stop_kong(nil, true) + helpers.stop_kong() end) it("is executed", function() @@ -1147,7 +1147,7 @@ for _, strategy in helpers.each_strategy() do admin_client:close() end - helpers.stop_kong(nil, true) + helpers.stop_kong() end) it("is executed", function() @@ -1227,7 +1227,7 @@ for _, strategy in helpers.each_strategy() do end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) it("certificate phase clears context, fix #7054", function() diff --git a/spec/02-integration/05-proxy/09-websockets_spec.lua b/spec/02-integration/05-proxy/09-websockets_spec.lua index b88b6b788f5..a70d8a4c585 100644 --- a/spec/02-integration/05-proxy/09-websockets_spec.lua +++ b/spec/02-integration/05-proxy/09-websockets_spec.lua @@ -42,7 +42,7 @@ for _, strategy in helpers.each_strategy() do end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) local function open_socket(uri) diff --git a/spec/02-integration/05-proxy/11-handler_spec.lua b/spec/02-integration/05-proxy/11-handler_spec.lua index fbd048b2a5b..ec374a65804 100644 --- a/spec/02-integration/05-proxy/11-handler_spec.lua +++ b/spec/02-integration/05-proxy/11-handler_spec.lua @@ -43,7 +43,7 @@ for _, strategy in helpers.each_strategy() do lazy_teardown(function() if admin_client then admin_client:close() end - helpers.stop_kong(nil, true) + helpers.stop_kong() end) it("runs", function() @@ -101,7 +101,7 @@ for _, strategy in helpers.each_strategy() do lazy_teardown(function() if admin_client then admin_client:close() end - helpers.stop_kong(nil, true) + helpers.stop_kong() end) it("doesn't run", function() @@ -175,7 +175,7 @@ for _, strategy in helpers.each_strategy() do lazy_teardown(function() if admin_client then admin_client:close() end - helpers.stop_kong(nil, true) + helpers.stop_kong() end) it("doesn't run", function() diff --git a/spec/02-integration/05-proxy/13-error_handlers_spec.lua b/spec/02-integration/05-proxy/13-error_handlers_spec.lua index d63f43daf9d..fd78481ee1c 100644 --- a/spec/02-integration/05-proxy/13-error_handlers_spec.lua +++ b/spec/02-integration/05-proxy/13-error_handlers_spec.lua @@ -11,7 +11,7 @@ describe("Proxy error handlers", function() end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) before_each(function() diff --git a/spec/02-integration/05-proxy/25-upstream_keepalive_spec.lua b/spec/02-integration/05-proxy/25-upstream_keepalive_spec.lua index 1848d9d4eb6..eda46e12408 100644 --- a/spec/02-integration/05-proxy/25-upstream_keepalive_spec.lua +++ b/spec/02-integration/05-proxy/25-upstream_keepalive_spec.lua @@ -104,7 +104,7 @@ describe("#postgres upstream keepalive", function() proxy_client:close() end - helpers.stop_kong(nil, true) + helpers.stop_kong() end) diff --git a/spec/02-integration/06-invalidations/02-core_entities_invalidations_spec.lua b/spec/02-integration/06-invalidations/02-core_entities_invalidations_spec.lua index c570665dc5e..eb670e8f375 100644 --- a/spec/02-integration/06-invalidations/02-core_entities_invalidations_spec.lua +++ b/spec/02-integration/06-invalidations/02-core_entities_invalidations_spec.lua @@ -88,8 +88,8 @@ for _, strategy in helpers.each_strategy() do end) lazy_teardown(function() - helpers.stop_kong("servroot1", true) - helpers.stop_kong("servroot2", true) + helpers.stop_kong("servroot1") + helpers.stop_kong("servroot2") end) before_each(function() @@ -1110,8 +1110,8 @@ for _, strategy in helpers.each_strategy() do end) lazy_teardown(function() - helpers.stop_kong("servroot1", true) - helpers.stop_kong("servroot2", true) + helpers.stop_kong("servroot1") + helpers.stop_kong("servroot2") end) before_each(function() @@ -1256,8 +1256,8 @@ for _, strategy in helpers.each_strategy() do end) lazy_teardown(function() - helpers.stop_kong("servroot1", true) - helpers.stop_kong("servroot2", true) + helpers.stop_kong("servroot1") + helpers.stop_kong("servroot2") end) before_each(function() diff --git a/spec/02-integration/11-dbless/01-respawn_spec.lua b/spec/02-integration/11-dbless/01-respawn_spec.lua index 2c2065f31fe..7fdf287e0f3 100644 --- a/spec/02-integration/11-dbless/01-respawn_spec.lua +++ b/spec/02-integration/11-dbless/01-respawn_spec.lua @@ -13,7 +13,7 @@ describe("worker respawn", function() end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) before_each(function() diff --git a/spec/02-integration/11-dbless/02-workers_spec.lua b/spec/02-integration/11-dbless/02-workers_spec.lua index f45ba0944de..0b1c77a0951 100644 --- a/spec/02-integration/11-dbless/02-workers_spec.lua +++ b/spec/02-integration/11-dbless/02-workers_spec.lua @@ -29,7 +29,7 @@ describe("Workers initialization #off", function() lazy_teardown(function() admin_client:close() proxy_client:close() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) it("restarts worker correctly without issues on the init_worker phase when config includes 1000+ plugins", function() diff --git a/spec/03-plugins/09-key-auth/02-access_spec.lua b/spec/03-plugins/09-key-auth/02-access_spec.lua index 4ba12da2306..57e277de487 100644 --- a/spec/03-plugins/09-key-auth/02-access_spec.lua +++ b/spec/03-plugins/09-key-auth/02-access_spec.lua @@ -51,8 +51,8 @@ for _, strategy in helpers.each_strategy() do local service7 = bp.services:insert{ protocol = "http", - port = 80, - host = "mockbin.com", + port = helpers.mock_upstream_port, + host = "localhost", } local route7 = bp.routes:insert { diff --git a/spec/03-plugins/19-hmac-auth/04-invalidations_spec.lua b/spec/03-plugins/19-hmac-auth/04-invalidations_spec.lua index f9eb0f21af1..b552fc96cf6 100644 --- a/spec/03-plugins/19-hmac-auth/04-invalidations_spec.lua +++ b/spec/03-plugins/19-hmac-auth/04-invalidations_spec.lua @@ -58,7 +58,7 @@ for _, strategy in helpers.each_strategy() do admin_client:close() end - helpers.stop_kong(nil, true) + helpers.stop_kong() end) local function hmac_sha1_binary(secret, data) diff --git a/spec/03-plugins/20-ldap-auth/02-invalidations_spec.lua b/spec/03-plugins/20-ldap-auth/02-invalidations_spec.lua index 27bf8764b54..41ce53dc883 100644 --- a/spec/03-plugins/20-ldap-auth/02-invalidations_spec.lua +++ b/spec/03-plugins/20-ldap-auth/02-invalidations_spec.lua @@ -64,7 +64,7 @@ for _, ldap_strategy in pairs(ldap_strategies) do end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) local function cache_key(conf, username, password) diff --git a/spec/03-plugins/23-rate-limiting/03-api_spec.lua b/spec/03-plugins/23-rate-limiting/03-api_spec.lua index f6bc24ab122..084b735718a 100644 --- a/spec/03-plugins/23-rate-limiting/03-api_spec.lua +++ b/spec/03-plugins/23-rate-limiting/03-api_spec.lua @@ -21,7 +21,7 @@ for _, strategy in helpers.each_strategy() do admin_client:close() end - helpers.stop_kong(nil, true) + helpers.stop_kong() end) describe("POST", function() diff --git a/spec/03-plugins/24-response-rate-limiting/04-access_spec.lua b/spec/03-plugins/24-response-rate-limiting/04-access_spec.lua index e1e8af62ee4..703a2f0c33e 100644 --- a/spec/03-plugins/24-response-rate-limiting/04-access_spec.lua +++ b/spec/03-plugins/24-response-rate-limiting/04-access_spec.lua @@ -329,7 +329,7 @@ describe(fmt("#flaky Plugin: response-ratelimiting (access) with policy: #%s [#% end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) describe("Without authentication (IP address)", function() @@ -567,7 +567,7 @@ describe(fmt("#flaky Plugin: response-ratelimiting (expirations) with policy: #% end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) it("expires a counter", function() @@ -638,7 +638,7 @@ describe(fmt("#flaky Plugin: response-ratelimiting (access - global for single c end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) it("blocks when the consumer exceeds their quota, no matter what service/route used", function() @@ -675,7 +675,7 @@ describe(fmt("#flaky Plugin: response-ratelimiting (access - global) with policy end) lazy_teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) before_each(function() @@ -753,7 +753,7 @@ describe(fmt("#flaky Plugin: response-ratelimiting (fault tolerance) with policy end) after_each(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) it("does not work if an error occurs", function() @@ -848,7 +848,7 @@ describe(fmt("#flaky Plugin: response-ratelimiting (fault tolerance) with policy end) after_each(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) it("does not work if an error occurs", function() diff --git a/spec/03-plugins/31-proxy-cache/02-access_spec.lua b/spec/03-plugins/31-proxy-cache/02-access_spec.lua index 3c917bdc26a..4ba6f319dc0 100644 --- a/spec/03-plugins/31-proxy-cache/02-access_spec.lua +++ b/spec/03-plugins/31-proxy-cache/02-access_spec.lua @@ -270,7 +270,7 @@ do admin_client:close() end - helpers.stop_kong(nil, true) + helpers.stop_kong() end) it("caches a simple request", function() diff --git a/spec/03-plugins/31-proxy-cache/03-api_spec.lua b/spec/03-plugins/31-proxy-cache/03-api_spec.lua index 684e7733400..afa42372430 100644 --- a/spec/03-plugins/31-proxy-cache/03-api_spec.lua +++ b/spec/03-plugins/31-proxy-cache/03-api_spec.lua @@ -64,7 +64,7 @@ describe("Plugin: proxy-cache", function() end) teardown(function() - helpers.stop_kong(nil, true) + helpers.stop_kong() end) describe("(schema)", function() diff --git a/spec/03-plugins/31-proxy-cache/04-invalidations_spec.lua b/spec/03-plugins/31-proxy-cache/04-invalidations_spec.lua index ed2085b6635..d44f23405a8 100644 --- a/spec/03-plugins/31-proxy-cache/04-invalidations_spec.lua +++ b/spec/03-plugins/31-proxy-cache/04-invalidations_spec.lua @@ -92,8 +92,8 @@ describe("proxy-cache invalidations via: " .. strategy, function() end) teardown(function() - helpers.stop_kong("servroot1", true) - helpers.stop_kong("servroot2", true) + helpers.stop_kong("servroot1") + helpers.stop_kong("servroot2") end) before_each(function() diff --git a/spec/03-plugins/34-zipkin/zipkin_spec.lua b/spec/03-plugins/34-zipkin/zipkin_spec.lua index 151f6702beb..86e31dcd2f3 100644 --- a/spec/03-plugins/34-zipkin/zipkin_spec.lua +++ b/spec/03-plugins/34-zipkin/zipkin_spec.lua @@ -53,6 +53,7 @@ local function wait_for_spans(zipkin_client, number_of_spans, remoteServiceName, local spans = {} helpers.wait_until(function() if trace_id then + ngx.sleep(1) local res = assert(zipkin_client:get("/api/v2/trace/" .. trace_id)) spans = cjson.decode(assert.response(res).has.status(200)) return #spans == number_of_spans From 68ea0077b73839fc704dc88bcdf5ae7608432d8a Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Sun, 23 Jun 2024 11:38:08 +0800 Subject: [PATCH 2/2] docs(release): genereate 2.8.5 changelog --- changelog/2.8.5/2.8.5.md | 20 +++++++++++++++++++ changelog/2.8.5/kong-manager/.gitkeep | 0 changelog/2.8.5/kong/.gitkeep | 0 .../kong/add_zlib1g-dev.yml | 0 .../{unreleased => 2.8.5}/kong/fix_hash.yml | 0 changelog/unreleased/kong-manager/.gitkeep | 0 changelog/unreleased/kong/.gitkeep | 0 7 files changed, 20 insertions(+) create mode 100644 changelog/2.8.5/2.8.5.md create mode 100644 changelog/2.8.5/kong-manager/.gitkeep create mode 100644 changelog/2.8.5/kong/.gitkeep rename changelog/{unreleased => 2.8.5}/kong/add_zlib1g-dev.yml (100%) rename changelog/{unreleased => 2.8.5}/kong/fix_hash.yml (100%) create mode 100644 changelog/unreleased/kong-manager/.gitkeep create mode 100644 changelog/unreleased/kong/.gitkeep diff --git a/changelog/2.8.5/2.8.5.md b/changelog/2.8.5/2.8.5.md new file mode 100644 index 00000000000..48d4ec4a9e0 --- /dev/null +++ b/changelog/2.8.5/2.8.5.md @@ -0,0 +1,20 @@ +## Kong + + +### Performance +#### Performance + +- Fixed an inefficiency issue in the Luajit hashing algorithm + [#13269](https://github.com/Kong/kong/issues/13269) + [KAG-4786](https://konghq.atlassian.net/browse/KAG-4786) + + + + + +### Fixes +#### Default + +- Added zlib1g-dev dependency to Ubuntu packages. + [#13269](https://github.com/Kong/kong/issues/13269) + [KAG-4786](https://konghq.atlassian.net/browse/KAG-4786) diff --git a/changelog/2.8.5/kong-manager/.gitkeep b/changelog/2.8.5/kong-manager/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/changelog/2.8.5/kong/.gitkeep b/changelog/2.8.5/kong/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/changelog/unreleased/kong/add_zlib1g-dev.yml b/changelog/2.8.5/kong/add_zlib1g-dev.yml similarity index 100% rename from changelog/unreleased/kong/add_zlib1g-dev.yml rename to changelog/2.8.5/kong/add_zlib1g-dev.yml diff --git a/changelog/unreleased/kong/fix_hash.yml b/changelog/2.8.5/kong/fix_hash.yml similarity index 100% rename from changelog/unreleased/kong/fix_hash.yml rename to changelog/2.8.5/kong/fix_hash.yml diff --git a/changelog/unreleased/kong-manager/.gitkeep b/changelog/unreleased/kong-manager/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/changelog/unreleased/kong/.gitkeep b/changelog/unreleased/kong/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d