diff --git a/Makefile.toml b/Makefile.toml index 0ccc1255c7e..e6561e321b4 100644 --- a/Makefile.toml +++ b/Makefile.toml @@ -3,19 +3,38 @@ skip_core_tasks = true [env] BUILDSYS_ROOT_DIR = "${CARGO_MAKE_WORKING_DIRECTORY}" -# The build tool, Twoliter, needs to be installed to run a Bottlerocket build. -# If you need to build in an offline environment, first you should run `cargo -# make fetch` in an online environment. Subsequently in the offline environment -# you need to set this to true. `cargo make -e TWOLITER_SKIP_INSTALL=true build` -TWOLITER_SKIP_INSTALL = "false" -# TODO - replace with crates.io version when published -TWOLITER_GIT = "https://github.com/bottlerocket-os/twoliter" -TWOLITER_REV = "v0.0.2" + +# Skip installing Twoliter if it is already installed its version +# matches the requested version. +TWOLITER_REUSE_EXISTING_INSTALL="true" + +# Allow Twoliter to be installed Twoliter from a binary distribution if binaries +# are expected to exist for the host OS and architecture. +TWOLITER_ALLOW_BINARY_INSTALL="true" + +# Allow Twoliter to be installed by building from sourcecode. +TWOLITER_ALLOW_SOURCE_INSTALL="true" + +# Where Twoliter will be installed. +TWOLITER_INSTALL_DIR = "${BUILDSYS_ROOT_DIR}/tools/twoliter" + +# For binary installation, this should be a released version (prefixed with a v, +# for example v0.1.0). For the git sourcecode installation method, this can be +# any git rev, e.g. a tag, sha, or branch name. +TWOLITER_VERSION = "v0.0.3" + +# For binary installation method, this is GitHub repository that has binary +# release artifacts attached to it, for example +# https://github.com/bottlerocket-os/twoliter For git sourcecode installation, +# this is any URI that can be used in a git clone command. +TWOLITER_REPO = "https://github.com/bottlerocket-os/twoliter" + +# The logging verbosity for Twoliter: error, warn, info, debug, trace TWOLITER_LOG_LEVEL = "info" -TWOLITER_INSTALL_ROOT = "${BUILDSYS_ROOT_DIR}/tools" -TWOLITER_BIN_DIR = "${TWOLITER_INSTALL_ROOT}/bin" -TWOLITER = "${TWOLITER_BIN_DIR}/twoliter" + +# The project file that configures Twoliter. TWOLITER_PROJECT = "${BUILDSYS_ROOT_DIR}/Twoliter.toml" + BUILDSYS_ARCH = { script = ['echo "${BUILDSYS_ARCH:-$(uname -m)}"'] } BUILDSYS_BUILD_DIR = "${BUILDSYS_ROOT_DIR}/build" BUILDSYS_PACKAGES_DIR = "${BUILDSYS_BUILD_DIR}/rpms" @@ -150,6 +169,8 @@ TESTSYS_TEST_CONFIG_PATH = "${BUILDSYS_ROOT_DIR}/Test.toml" # Certain variables are defined here to allow us to override a component value # on the command line. +TWOLITER = "${TWOLITER_INSTALL_DIR}/twoliter" + # Depends on ${BUILDSYS_ARCH}, ${BUILDSYS_REGISTRY}, ${BUILDSYS_SDK_NAME}, and # ${BUILDSYS_SDK_VERSION}. BUILDSYS_SDK_IMAGE = { script = [ "echo ${BUILDSYS_REGISTRY}/${BUILDSYS_SDK_NAME}-sdk-${BUILDSYS_ARCH}:${BUILDSYS_SDK_VERSION}" ] } @@ -250,19 +271,25 @@ fi script_runner = "bash" script = [ ''' -if [ "${TWOLITER_SKIP_INSTALL}" = "true" ]; then - echo "TWOLITER_SKIP_INSTALL=${TWOLITER_SKIP_INSTALL}" - echo "Skipping Twoliter installation" - exit 0 +declare -a flags + +if [ "${TWOLITER_REUSE_EXISTING_INSTALL}" = "true" ]; then + flags+=("--reuse-existing-install") +fi + +if [ "${TWOLITER_ALLOW_BINARY_INSTALL}" = "true" ]; then + flags+=("--allow-binary-install") +fi + +if [ "${TWOLITER_ALLOW_SOURCE_INSTALL}" = "true" ]; then + flags+=("--allow-from-source") fi -cargo install \ - --locked \ - --root "${TWOLITER_INSTALL_ROOT}" \ - --quiet \ - --git "${TWOLITER_GIT}" \ - --rev "${TWOLITER_REV}" \ - twoliter +"${BUILDSYS_TOOLS_DIR}/install-twoliter.sh" \ + --repo "${TWOLITER_REPO}" \ + --version "${TWOLITER_VERSION}" \ + --directory "${TWOLITER_INSTALL_DIR}" \ + "${flags[@]}" ''' ] @@ -279,6 +306,17 @@ args = [ "${@}", ] +[tasks.deprecated] +dependencies = ["install-twoliter"] +script_runner = "bash" +script = [ +''' +echo "The '${CARGO_MAKE_TASK}' task is deprecated." +echo "All it does is ensure that Twoliter is installed." +echo "You should do this with 'cargo make install-twoliter' instead.", +''' +] + [tasks.setup] run_task = "run-twoliter" @@ -326,18 +364,15 @@ run_task = "run-twoliter" run_task = "run-twoliter" [tasks.build-tools] -run_task = "run-twoliter" +run_task = "deprecated" # Note: this is separate from publish-tools because publish-tools takes a while # to build and isn't needed to build an image. [tasks.publish-setup-tools] -run_task = "run-twoliter" - -[tasks.infra-tools] -run_task = "run-twoliter" +run_task = "deprecated" [tasks.publish-tools] -run_task = "run-twoliter" +run_task = "deprecated" [tasks.build-sbkeys] run_task = "run-twoliter" @@ -370,10 +405,7 @@ run_task = "run-twoliter" run_task = "run-twoliter" [tasks.tuftool] -run_task = "run-twoliter" - -[tasks.create-infra] -run_task = "run-twoliter" +run_task = "deprecated" [tasks.publish-setup] run_task = "run-twoliter" @@ -472,7 +504,7 @@ run_task = "run-twoliter" run_task = "run-twoliter" [tasks.test-tools] -run_task = "run-twoliter" +run_task = "deprecated" [tasks.setup-test] run_task = "run-twoliter" diff --git a/tools/.gitignore b/tools/.gitignore index d3ceb7fc84a..d745034ed23 100644 --- a/tools/.gitignore +++ b/tools/.gitignore @@ -1,3 +1,4 @@ /bin +/twoliter /.crates.toml /.crates2.json diff --git a/tools/Cargo.lock b/tools/Cargo.lock deleted file mode 100644 index 7ac2c9d2498..00000000000 --- a/tools/Cargo.lock +++ /dev/null @@ -1,3987 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "addr2line" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "aho-corasick" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" -dependencies = [ - "memchr", -] - -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - -[[package]] -name = "android_system_properties" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" -dependencies = [ - "libc", -] - -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - -[[package]] -name = "anstream" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "is-terminal", - "utf8parse", -] - -[[package]] -name = "anstyle" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" - -[[package]] -name = "anstyle-parse" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" -dependencies = [ - "windows-sys 0.48.0", -] - -[[package]] -name = "anstyle-wincon" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c" -dependencies = [ - "anstyle", - "windows-sys 0.48.0", -] - -[[package]] -name = "argh" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7af5ba06967ff7214ce4c7419c7d185be7ecd6cc4965a8f6e1d8ce0398aad219" -dependencies = [ - "argh_derive", - "argh_shared", -] - -[[package]] -name = "argh_derive" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56df0aeedf6b7a2fc67d06db35b09684c3e8da0c95f8f27685cb17e08413d87a" -dependencies = [ - "argh_shared", - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "argh_shared" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5693f39141bda5760ecc4111ab08da40565d1771038c4a0250f03457ec707531" -dependencies = [ - "serde", -] - -[[package]] -name = "assert-json-diff" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "async-recursion" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "async-trait" -version = "0.1.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "aws-config" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcdcf0d683fe9c23d32cf5b53c9918ea0a500375a9fb20109802552658e576c9" -dependencies = [ - "aws-credential-types", - "aws-http", - "aws-sdk-sso", - "aws-sdk-sts", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-types", - "aws-types", - "bytes", - "fastrand 1.9.0", - "hex", - "http", - "hyper", - "ring", - "time", - "tokio", - "tower", - "tracing", - "zeroize", -] - -[[package]] -name = "aws-credential-types" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fcdb2f7acbc076ff5ad05e7864bdb191ca70a6fd07668dc3a1a8bcd051de5ae" -dependencies = [ - "aws-smithy-async", - "aws-smithy-types", - "fastrand 1.9.0", - "tokio", - "tracing", - "zeroize", -] - -[[package]] -name = "aws-endpoint" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cce1c41a6cfaa726adee9ebb9a56fcd2bbfd8be49fd8a04c5e20fd968330b04" -dependencies = [ - "aws-smithy-http", - "aws-smithy-types", - "aws-types", - "http", - "regex", - "tracing", -] - -[[package]] -name = "aws-http" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aadbc44e7a8f3e71c8b374e03ecd972869eb91dd2bc89ed018954a52ba84bc44" -dependencies = [ - "aws-credential-types", - "aws-smithy-http", - "aws-smithy-types", - "aws-types", - "bytes", - "http", - "http-body", - "lazy_static", - "percent-encoding", - "pin-project-lite", - "tracing", -] - -[[package]] -name = "aws-sdk-cloudformation" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f32bb66da99e2955ce49e346200cb14421784755a39c74fe2c043536b2d57ba" -dependencies = [ - "aws-credential-types", - "aws-endpoint", - "aws-http", - "aws-sig-auth", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-query", - "aws-smithy-types", - "aws-smithy-xml", - "aws-types", - "bytes", - "fastrand 1.9.0", - "http", - "regex", - "tokio-stream", - "tower", - "tracing", -] - -[[package]] -name = "aws-sdk-ebs" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c44666651c93b43b78bc3d0bc280efffa64ab6c23ecb3370ed0760d6e69d417" -dependencies = [ - "aws-credential-types", - "aws-endpoint", - "aws-http", - "aws-sig-auth", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-types", - "aws-types", - "bytes", - "fastrand 1.9.0", - "http", - "regex", - "tokio-stream", - "tower", - "tracing", -] - -[[package]] -name = "aws-sdk-ec2" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eab2493c5857725eeafe12ec66ba4ce6feb3355e3af6828d9ef28d6152972a27" -dependencies = [ - "aws-credential-types", - "aws-endpoint", - "aws-http", - "aws-sig-auth", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-query", - "aws-smithy-types", - "aws-smithy-xml", - "aws-types", - "bytes", - "fastrand 1.9.0", - "http", - "regex", - "tokio-stream", - "tower", - "tracing", -] - -[[package]] -name = "aws-sdk-kms" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545335abd7c6ef7285d2972a67b9f8279ff5fec8bbb3ffc637fa436ba1e6e434" -dependencies = [ - "aws-credential-types", - "aws-endpoint", - "aws-http", - "aws-sig-auth", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-types", - "aws-types", - "bytes", - "http", - "regex", - "tokio-stream", - "tower", - "tracing", -] - -[[package]] -name = "aws-sdk-s3" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fba197193cbb4bcb6aad8d99796b2291f36fa89562ded5d4501363055b0de89f" -dependencies = [ - "aws-credential-types", - "aws-endpoint", - "aws-http", - "aws-sig-auth", - "aws-sigv4", - "aws-smithy-async", - "aws-smithy-checksums", - "aws-smithy-client", - "aws-smithy-eventstream", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-types", - "aws-smithy-xml", - "aws-types", - "bytes", - "http", - "http-body", - "once_cell", - "percent-encoding", - "regex", - "tokio-stream", - "tower", - "tracing", - "url", -] - -[[package]] -name = "aws-sdk-ssm" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "014a095ed73c1f789699dfeb45a2b1debb03119910392bd7fcda4a07a72b3af4" -dependencies = [ - "aws-credential-types", - "aws-endpoint", - "aws-http", - "aws-sig-auth", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-types", - "aws-types", - "bytes", - "fastrand 1.9.0", - "http", - "regex", - "tokio-stream", - "tower", - "tracing", -] - -[[package]] -name = "aws-sdk-sso" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8b812340d86d4a766b2ca73f740dfd47a97c2dff0c06c8517a16d88241957e4" -dependencies = [ - "aws-credential-types", - "aws-endpoint", - "aws-http", - "aws-sig-auth", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-types", - "aws-types", - "bytes", - "http", - "regex", - "tokio-stream", - "tower", - "tracing", -] - -[[package]] -name = "aws-sdk-sts" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "265fac131fbfc188e5c3d96652ea90ecc676a934e3174eaaee523c6cec040b3b" -dependencies = [ - "aws-credential-types", - "aws-endpoint", - "aws-http", - "aws-sig-auth", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-query", - "aws-smithy-types", - "aws-smithy-xml", - "aws-types", - "bytes", - "http", - "regex", - "tower", - "tracing", -] - -[[package]] -name = "aws-sig-auth" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b94acb10af0c879ecd5c7bdf51cda6679a0a4f4643ce630905a77673bfa3c61" -dependencies = [ - "aws-credential-types", - "aws-sigv4", - "aws-smithy-eventstream", - "aws-smithy-http", - "aws-types", - "http", - "tracing", -] - -[[package]] -name = "aws-sigv4" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d2ce6f507be68e968a33485ced670111d1cbad161ddbbab1e313c03d37d8f4c" -dependencies = [ - "aws-smithy-eventstream", - "aws-smithy-http", - "bytes", - "form_urlencoded", - "hex", - "hmac", - "http", - "once_cell", - "percent-encoding", - "regex", - "sha2", - "time", - "tracing", -] - -[[package]] -name = "aws-smithy-async" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bda3996044c202d75b91afeb11a9afae9db9a721c6a7a427410018e286b880" -dependencies = [ - "futures-util", - "pin-project-lite", - "tokio", - "tokio-stream", -] - -[[package]] -name = "aws-smithy-checksums" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07ed8b96d95402f3f6b8b57eb4e0e45ee365f78b1a924faf20ff6e97abf1eae6" -dependencies = [ - "aws-smithy-http", - "aws-smithy-types", - "bytes", - "crc32c", - "crc32fast", - "hex", - "http", - "http-body", - "md-5", - "pin-project-lite", - "sha1", - "sha2", - "tracing", -] - -[[package]] -name = "aws-smithy-client" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a86aa6e21e86c4252ad6a0e3e74da9617295d8d6e374d552be7d3059c41cedd" -dependencies = [ - "aws-smithy-async", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-types", - "bytes", - "fastrand 1.9.0", - "http", - "http-body", - "hyper", - "hyper-rustls 0.23.2", - "lazy_static", - "pin-project-lite", - "rustls 0.20.8", - "tokio", - "tower", - "tracing", -] - -[[package]] -name = "aws-smithy-eventstream" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "460c8da5110835e3d9a717c61f5556b20d03c32a1dec57f8fc559b360f733bb8" -dependencies = [ - "aws-smithy-types", - "bytes", - "crc32fast", -] - -[[package]] -name = "aws-smithy-http" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b3b693869133551f135e1f2c77cb0b8277d9e3e17feaf2213f735857c4f0d28" -dependencies = [ - "aws-smithy-eventstream", - "aws-smithy-types", - "bytes", - "bytes-utils", - "futures-core", - "http", - "http-body", - "hyper", - "once_cell", - "percent-encoding", - "pin-project-lite", - "pin-utils", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "aws-smithy-http-tower" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ae4f6c5798a247fac98a867698197d9ac22643596dc3777f0c76b91917616b9" -dependencies = [ - "aws-smithy-http", - "aws-smithy-types", - "bytes", - "http", - "http-body", - "pin-project-lite", - "tower", - "tracing", -] - -[[package]] -name = "aws-smithy-json" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23f9f42fbfa96d095194a632fbac19f60077748eba536eb0b9fecc28659807f8" -dependencies = [ - "aws-smithy-types", -] - -[[package]] -name = "aws-smithy-query" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98819eb0b04020a1c791903533b638534ae6c12e2aceda3e6e6fba015608d51d" -dependencies = [ - "aws-smithy-types", - "urlencoding", -] - -[[package]] -name = "aws-smithy-types" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16a3d0bf4f324f4ef9793b86a1701d9700fbcdbd12a846da45eed104c634c6e8" -dependencies = [ - "base64-simd", - "itoa", - "num-integer", - "ryu", - "time", -] - -[[package]] -name = "aws-smithy-xml" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1b9d12875731bd07e767be7baad95700c3137b56730ec9ddeedb52a5e5ca63b" -dependencies = [ - "xmlparser", -] - -[[package]] -name = "aws-types" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dd209616cc8d7bfb82f87811a5c655dc97537f592689b18743bddf5dc5c4829" -dependencies = [ - "aws-credential-types", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-types", - "http", - "rustc_version", - "tracing", -] - -[[package]] -name = "backtrace" -version = "0.3.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" -dependencies = [ - "addr2line", - "cc", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "base64" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" - -[[package]] -name = "base64" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" - -[[package]] -name = "base64-simd" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195" -dependencies = [ - "outref", - "vsimd", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" - -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] - -[[package]] -name = "bottlerocket-types" -version = "0.0.9" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.9#2491a2a122cf75bd7df23accc3574141669568ea" -dependencies = [ - "builder-derive", - "configuration-derive", - "serde", - "serde_json", - "serde_plain", - "serde_yaml 0.8.26", - "testsys-model", -] - -[[package]] -name = "bottlerocket-variant" -version = "0.1.0" -dependencies = [ - "generate-readme", - "serde", - "snafu", -] - -[[package]] -name = "bstr" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" -dependencies = [ - "memchr", - "serde", -] - -[[package]] -name = "builder-derive" -version = "0.0.9" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.9#2491a2a122cf75bd7df23accc3574141669568ea" -dependencies = [ - "proc-macro2", - "quote", - "serde", - "syn 1.0.109", -] - -[[package]] -name = "buildsys" -version = "0.1.0" -dependencies = [ - "bottlerocket-variant", - "duct", - "hex", - "lazy_static", - "nonzero_ext", - "rand", - "regex", - "reqwest", - "serde", - "serde_plain", - "sha2", - "snafu", - "toml", - "url", - "walkdir", -] - -[[package]] -name = "bumpalo" -version = "3.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" - -[[package]] -name = "bytecount" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" - -[[package]] -name = "byteorder" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" - -[[package]] -name = "bytes" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" - -[[package]] -name = "bytes-utils" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e47d3a8076e283f3acd27400535992edb3ba4b5bb72f8891ad8fbe7932a7d4b9" -dependencies = [ - "bytes", - "either", -] - -[[package]] -name = "cargo-readme" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66dbfc9307f5b2429656e07533613cd3f26803fd2857fc33be22aa2711181d58" -dependencies = [ - "clap 2.34.0", - "lazy_static", - "percent-encoding", - "regex", - "serde", - "serde_derive", - "toml", -] - -[[package]] -name = "cc" -version = "1.0.83" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" -dependencies = [ - "libc", -] - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "chrono" -version = "0.4.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" -dependencies = [ - "android-tzdata", - "iana-time-zone", - "num-traits", - "serde", - "winapi", -] - -[[package]] -name = "clap" -version = "2.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "ansi_term", - "atty", - "bitflags 1.3.2", - "strsim 0.8.0", - "textwrap", - "unicode-width", - "vec_map", -] - -[[package]] -name = "clap" -version = "4.3.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03aef18ddf7d879c15ce20f04826ef8418101c7e528014c3eeea13321047dca3" -dependencies = [ - "clap_builder", - "clap_derive", - "once_cell", -] - -[[package]] -name = "clap_builder" -version = "4.3.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ce6fffb678c9b80a70b6b6de0aad31df727623a70fd9a842c30cd573e2fa98" -dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim 0.10.0", -] - -[[package]] -name = "clap_derive" -version = "4.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "clap_lex" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" - -[[package]] -name = "coldsnap" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faa54b44a1a199e3f37ba30ffb7391ed2fe1e4deb15cc55232786b2ca228cb33" -dependencies = [ - "argh", - "async-trait", - "aws-config", - "aws-sdk-ebs", - "aws-sdk-ec2", - "aws-smithy-http", - "aws-types", - "base64 0.13.1", - "bytes", - "env_logger", - "futures", - "indicatif", - "log", - "nix", - "sha2", - "snafu", - "tempfile", - "tokio", -] - -[[package]] -name = "colorchoice" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" - -[[package]] -name = "configuration-derive" -version = "0.0.9" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.9#2491a2a122cf75bd7df23accc3574141669568ea" -dependencies = [ - "quote", - "syn 1.0.109", -] - -[[package]] -name = "console" -version = "0.15.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" -dependencies = [ - "encode_unicode", - "lazy_static", - "libc", - "unicode-width", - "windows-sys 0.45.0", -] - -[[package]] -name = "core-foundation" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" - -[[package]] -name = "cpufeatures" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" -dependencies = [ - "libc", -] - -[[package]] -name = "crc32c" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8f48d60e5b4d2c53d5c2b1d8a58c849a70ae5e5509b08a48d047e3b65714a74" -dependencies = [ - "rustc_version", -] - -[[package]] -name = "crc32fast" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" -dependencies = [ - "cfg-if", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" -dependencies = [ - "cfg-if", - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" -dependencies = [ - "autocfg", - "cfg-if", - "crossbeam-utils", - "memoffset", - "scopeguard", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "typenum", -] - -[[package]] -name = "darling" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" -dependencies = [ - "darling_core", - "darling_macro", -] - -[[package]] -name = "darling_core" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" -dependencies = [ - "darling_core", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "dashmap" -version = "5.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" -dependencies = [ - "cfg-if", - "hashbrown", - "lock_api", - "once_cell", - "parking_lot_core", -] - -[[package]] -name = "deranged" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" - -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer", - "crypto-common", - "subtle", -] - -[[package]] -name = "dirs-next" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" -dependencies = [ - "cfg-if", - "dirs-sys-next", -] - -[[package]] -name = "dirs-sys-next" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" -dependencies = [ - "libc", - "redox_users", - "winapi", -] - -[[package]] -name = "doc-comment" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" - -[[package]] -name = "duct" -version = "0.13.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ae3fc31835f74c2a7ceda3aeede378b0ae2e74c8f1c36559fcc9ae2a4e7d3e" -dependencies = [ - "libc", - "once_cell", - "os_pipe", - "shared_child", -] - -[[package]] -name = "dyn-clone" -version = "1.0.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbfc4744c1b8f2a09adc0e55242f60b1af195d88596bd8700be74418c056c555" - -[[package]] -name = "either" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" - -[[package]] -name = "encode_unicode" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" - -[[package]] -name = "encoding_rs" -version = "0.8.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "env_logger" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" -dependencies = [ - "humantime", - "is-terminal", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "errno" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" -dependencies = [ - "errno-dragonfly", - "libc", - "windows-sys 0.48.0", -] - -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - -[[package]] -name = "fastrand" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "form_urlencoded" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" -dependencies = [ - "percent-encoding", -] - -[[package]] -name = "futures" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" - -[[package]] -name = "futures-executor" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-io" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" - -[[package]] -name = "futures-macro" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "futures-sink" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" - -[[package]] -name = "futures-task" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" - -[[package]] -name = "futures-timer" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" - -[[package]] -name = "futures-util" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] -name = "generate-readme" -version = "0.1.0" -dependencies = [ - "cargo-readme", - "snafu", -] - -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", -] - -[[package]] -name = "gimli" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" - -[[package]] -name = "globset" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759c97c1e17c55525b57192c06a267cda0ac5210b222d6b82189a2338fa1c13d" -dependencies = [ - "aho-corasick", - "bstr", - "fnv", - "log", - "regex", -] - -[[package]] -name = "governor" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c390a940a5d157878dd057c78680a33ce3415bcd05b4799509ea44210914b4d5" -dependencies = [ - "cfg-if", - "dashmap", - "futures", - "futures-timer", - "no-std-compat", - "nonzero_ext", - "parking_lot", - "quanta", - "rand", - "smallvec", -] - -[[package]] -name = "h2" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "handlebars" -version = "4.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" -dependencies = [ - "log", - "pest", - "pest_derive", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "hmac" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" -dependencies = [ - "digest", -] - -[[package]] -name = "home" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" -dependencies = [ - "windows-sys 0.48.0", -] - -[[package]] -name = "http" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" -dependencies = [ - "bytes", - "http", - "pin-project-lite", -] - -[[package]] -name = "http-range-header" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" - -[[package]] -name = "httparse" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" - -[[package]] -name = "httpdate" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" - -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - -[[package]] -name = "hyper" -version = "0.14.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper-rustls" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" -dependencies = [ - "http", - "hyper", - "log", - "rustls 0.20.8", - "rustls-native-certs", - "tokio", - "tokio-rustls 0.23.4", -] - -[[package]] -name = "hyper-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" -dependencies = [ - "futures-util", - "http", - "hyper", - "log", - "rustls 0.21.6", - "rustls-native-certs", - "tokio", - "tokio-rustls 0.24.1", -] - -[[package]] -name = "hyper-timeout" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" -dependencies = [ - "hyper", - "pin-project-lite", - "tokio", - "tokio-io-timeout", -] - -[[package]] -name = "iana-time-zone" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" -dependencies = [ - "android_system_properties", - "core-foundation-sys", - "iana-time-zone-haiku", - "js-sys", - "wasm-bindgen", - "windows", -] - -[[package]] -name = "iana-time-zone-haiku" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" -dependencies = [ - "cc", -] - -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - -[[package]] -name = "idna" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown", -] - -[[package]] -name = "indicatif" -version = "0.17.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b297dc40733f23a0e52728a58fa9489a5b7638a324932de16b41adc3ef80730" -dependencies = [ - "console", - "instant", - "number_prefix", - "portable-atomic", - "unicode-width", -] - -[[package]] -name = "infrasys" -version = "0.1.0" -dependencies = [ - "assert-json-diff", - "async-trait", - "aws-config", - "aws-sdk-cloudformation", - "aws-sdk-s3", - "aws-types", - "clap 4.3.23", - "hex", - "log", - "pubsys-config", - "serde_json", - "serde_yaml 0.9.21", - "sha2", - "shell-words", - "simplelog", - "snafu", - "tokio", - "url", -] - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "ipnet" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" - -[[package]] -name = "is-terminal" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" -dependencies = [ - "hermit-abi 0.3.2", - "rustix", - "windows-sys 0.48.0", -] - -[[package]] -name = "itoa" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" - -[[package]] -name = "js-sys" -version = "0.3.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "json-patch" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f54898088ccb91df1b492cc80029a6fdf1c48ca0db7c6822a8babad69c94658" -dependencies = [ - "serde", - "serde_json", - "thiserror", - "treediff", -] - -[[package]] -name = "jsonpath_lib" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaa63191d68230cccb81c5aa23abd53ed64d83337cacbb25a7b8c7979523774f" -dependencies = [ - "log", - "serde", - "serde_json", -] - -[[package]] -name = "k8s-openapi" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd990069640f9db34b3b0f7a1afc62a05ffaa3be9b66aa3c313f58346df7f788" -dependencies = [ - "base64 0.21.2", - "bytes", - "chrono", - "serde", - "serde-value", - "serde_json", -] - -[[package]] -name = "kube" -version = "0.82.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc7d3d52dd5c871991679102e80dfb192faaaa09fecdbccdd8c55af264ce7a8f" -dependencies = [ - "k8s-openapi", - "kube-client", - "kube-core", - "kube-derive", -] - -[[package]] -name = "kube-client" -version = "0.82.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "544339f1665488243f79080441cacb09c997746fd763342303e66eebb9d3ba13" -dependencies = [ - "base64 0.20.0", - "bytes", - "chrono", - "dirs-next", - "either", - "futures", - "http", - "http-body", - "hyper", - "hyper-rustls 0.24.1", - "hyper-timeout", - "jsonpath_lib", - "k8s-openapi", - "kube-core", - "pem", - "pin-project", - "rand", - "rustls 0.21.6", - "rustls-pemfile", - "secrecy", - "serde", - "serde_json", - "serde_yaml 0.9.21", - "thiserror", - "tokio", - "tokio-tungstenite", - "tokio-util", - "tower", - "tower-http", - "tracing", -] - -[[package]] -name = "kube-core" -version = "0.82.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25983d07f414dfffba08c5951fe110f649113416b1d8e22f7c89c750eb2555a7" -dependencies = [ - "chrono", - "form_urlencoded", - "http", - "json-patch", - "k8s-openapi", - "once_cell", - "schemars", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "kube-derive" -version = "0.82.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5af652b642aca19ef5194de3506aa39f89d788d5326a570da68b13a02d6c5ba2" -dependencies = [ - "darling", - "proc-macro2", - "quote", - "serde_json", - "syn 1.0.109", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.147" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" - -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - -[[package]] -name = "linux-raw-sys" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" - -[[package]] -name = "lock_api" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" -dependencies = [ - "autocfg", - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" - -[[package]] -name = "mach" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" -dependencies = [ - "libc", -] - -[[package]] -name = "maplit" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" - -[[package]] -name = "md-5" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" -dependencies = [ - "digest", -] - -[[package]] -name = "memchr" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" - -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] - -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - -[[package]] -name = "miniz_oxide" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" -dependencies = [ - "adler", -] - -[[package]] -name = "mio" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" -dependencies = [ - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", -] - -[[package]] -name = "nix" -version = "0.26.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" -dependencies = [ - "bitflags 1.3.2", - "cfg-if", - "libc", - "static_assertions", -] - -[[package]] -name = "no-std-compat" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" - -[[package]] -name = "nonzero_ext" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" - -[[package]] -name = "num-integer" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" -dependencies = [ - "autocfg", -] - -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi 0.3.2", - "libc", -] - -[[package]] -name = "num_threads" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" -dependencies = [ - "libc", -] - -[[package]] -name = "number_prefix" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" - -[[package]] -name = "object" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe" -dependencies = [ - "memchr", -] - -[[package]] -name = "olpc-cjson" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d637c9c15b639ccff597da8f4fa968300651ad2f1e968aefc3b4927a6fb2027a" -dependencies = [ - "serde", - "serde_json", - "unicode-normalization", -] - -[[package]] -name = "once_cell" -version = "1.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "ordered-float" -version = "2.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7940cf2ca942593318d07fcf2596cdca60a85c9e7fab408a5e21a4f9dcd40d87" -dependencies = [ - "num-traits", -] - -[[package]] -name = "os_pipe" -version = "1.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ae859aa07428ca9a929b936690f8b12dc5f11dd8c6992a18ca93919f28bc177" -dependencies = [ - "libc", - "windows-sys 0.48.0", -] - -[[package]] -name = "outref" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4030760ffd992bef45b0ae3f10ce1aba99e33464c90d14dd7c039884963ddc7a" - -[[package]] -name = "papergrid" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1526bb6aa9f10ec339fb10360f22c57edf81d5678d0278e93bc12a47ffbe4b01" -dependencies = [ - "bytecount", - "fnv", - "unicode-width", -] - -[[package]] -name = "parking_lot" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall 0.3.5", - "smallvec", - "windows-targets 0.48.5", -] - -[[package]] -name = "parse-datetime" -version = "0.1.0" -dependencies = [ - "chrono", - "generate-readme", - "snafu", -] - -[[package]] -name = "path-absolutize" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43eb3595c63a214e1b37b44f44b0a84900ef7ae0b4c5efce59e123d246d7a0de" -dependencies = [ - "path-dedot", -] - -[[package]] -name = "path-dedot" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d55e486337acb9973cdea3ec5638c1b3bcb22e573b2b7b41969e0c744d5a15e" -dependencies = [ - "once_cell", -] - -[[package]] -name = "pem" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" -dependencies = [ - "base64 0.13.1", -] - -[[package]] -name = "percent-encoding" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" - -[[package]] -name = "pest" -version = "2.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1acb4a4365a13f749a93f1a094a7805e5cfa0955373a9de860d962eaa3a5fe5a" -dependencies = [ - "thiserror", - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "666d00490d4ac815001da55838c500eafb0320019bbaa44444137c48b443a853" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "pest_meta" -version = "2.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56af0a30af74d0445c0bf6d9d051c979b516a1a5af790d251daee76005420a48" -dependencies = [ - "once_cell", - "pest", - "sha2", -] - -[[package]] -name = "pin-project" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "portable-atomic" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f32154ba0af3a075eefa1eda8bb414ee928f62303a54ea85b8d6638ff1a6ee9e" - -[[package]] -name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - -[[package]] -name = "proc-macro2" -version = "1.0.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "pubsys" -version = "0.1.0" -dependencies = [ - "aws-config", - "aws-credential-types", - "aws-sdk-ebs", - "aws-sdk-ec2", - "aws-sdk-kms", - "aws-sdk-ssm", - "aws-sdk-sts", - "aws-smithy-types", - "aws-types", - "buildsys", - "chrono", - "clap 4.3.23", - "coldsnap", - "duct", - "futures", - "governor", - "indicatif", - "lazy_static", - "log", - "nonzero_ext", - "num_cpus", - "parse-datetime", - "pubsys-config", - "rayon", - "reqwest", - "semver", - "serde", - "serde_json", - "serde_plain", - "simplelog", - "snafu", - "tabled", - "tempfile", - "tinytemplate", - "tokio", - "tokio-stream", - "toml", - "tough", - "tough-kms", - "tough-ssm", - "update_metadata", - "url", -] - -[[package]] -name = "pubsys-config" -version = "0.1.0" -dependencies = [ - "chrono", - "home", - "lazy_static", - "log", - "parse-datetime", - "serde", - "serde_yaml 0.9.21", - "snafu", - "toml", - "url", -] - -[[package]] -name = "pubsys-setup" -version = "0.1.0" -dependencies = [ - "clap 4.3.23", - "hex", - "log", - "pubsys-config", - "reqwest", - "sha2", - "shell-words", - "simplelog", - "snafu", - "tempfile", - "url", -] - -[[package]] -name = "quanta" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20afe714292d5e879d8b12740aa223c6a88f118af41870e8b6196e39a02238a8" -dependencies = [ - "crossbeam-utils", - "libc", - "mach", - "once_cell", - "raw-cpuid", - "wasi 0.10.2+wasi-snapshot-preview1", - "web-sys", - "winapi", -] - -[[package]] -name = "quote" -version = "1.0.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "raw-cpuid" -version = "10.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "rayon" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" -dependencies = [ - "crossbeam-channel", - "crossbeam-deque", - "crossbeam-utils", - "num_cpus", -] - -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_users" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" -dependencies = [ - "getrandom", - "redox_syscall 0.2.16", - "thiserror", -] - -[[package]] -name = "regex" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" -dependencies = [ - "aho-corasick", - "memchr", - "regex-automata", - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" - -[[package]] -name = "reqwest" -version = "0.11.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" -dependencies = [ - "base64 0.21.2", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-rustls 0.24.1", - "ipnet", - "js-sys", - "log", - "mime", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls 0.21.6", - "rustls-pemfile", - "serde", - "serde_json", - "serde_urlencoded", - "tokio", - "tokio-rustls 0.24.1", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "webpki-roots", - "winreg", -] - -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin", - "untrusted", - "web-sys", - "winapi", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" - -[[package]] -name = "rustc_version" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" -dependencies = [ - "semver", -] - -[[package]] -name = "rustix" -version = "0.38.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f" -dependencies = [ - "bitflags 2.4.0", - "errno", - "libc", - "linux-raw-sys", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustls" -version = "0.20.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" -dependencies = [ - "log", - "ring", - "sct", - "webpki", -] - -[[package]] -name = "rustls" -version = "0.21.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb" -dependencies = [ - "log", - "ring", - "rustls-webpki", - "sct", -] - -[[package]] -name = "rustls-native-certs" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" -dependencies = [ - "openssl-probe", - "rustls-pemfile", - "schannel", - "security-framework", -] - -[[package]] -name = "rustls-pemfile" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" -dependencies = [ - "base64 0.21.2", -] - -[[package]] -name = "rustls-webpki" -version = "0.101.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "ryu" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "schannel" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" -dependencies = [ - "windows-sys 0.48.0", -] - -[[package]] -name = "schemars" -version = "0.8.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1847b767a3d62d95cbf3d8a9f0e421cf57a0d8aa4f411d4b16525afb0284d4ed" -dependencies = [ - "dyn-clone", - "schemars_derive", - "serde", - "serde_json", -] - -[[package]] -name = "schemars_derive" -version = "0.8.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af4d7e1b012cb3d9129567661a63755ea4b8a7386d339dc945ae187e403c6743" -dependencies = [ - "proc-macro2", - "quote", - "serde_derive_internals", - "syn 1.0.109", -] - -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - -[[package]] -name = "sct" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "secrecy" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" -dependencies = [ - "serde", - "zeroize", -] - -[[package]] -name = "security-framework" -version = "2.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "semver" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" -dependencies = [ - "serde", -] - -[[package]] -name = "serde" -version = "1.0.185" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be9b6f69f1dfd54c3b568ffa45c310d6973a5e5148fd40cf515acaf38cf5bc31" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde-value" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" -dependencies = [ - "ordered-float", - "serde", -] - -[[package]] -name = "serde_derive" -version = "1.0.185" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc59dfdcbad1437773485e0367fea4b090a2e0a16d9ffc46af47764536a298ec" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "serde_derive_internals" -version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "serde_json" -version = "1.0.97" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" -dependencies = [ - "indexmap", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_plain" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6018081315db179d0ce57b1fe4b62a12a0028c9cf9bbef868c9cf477b3c34ae" -dependencies = [ - "serde", -] - -[[package]] -name = "serde_urlencoded" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" -dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_yaml" -version = "0.8.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" -dependencies = [ - "indexmap", - "ryu", - "serde", - "yaml-rust", -] - -[[package]] -name = "serde_yaml" -version = "0.9.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9d684e3ec7de3bf5466b32bd75303ac16f0736426e5a4e0d6e489559ce1249c" -dependencies = [ - "indexmap", - "itoa", - "ryu", - "serde", - "unsafe-libyaml", -] - -[[package]] -name = "sha1" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest", -] - -[[package]] -name = "sha2" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest", -] - -[[package]] -name = "shared_child" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0d94659ad3c2137fef23ae75b03d5241d633f8acded53d672decfa0e6e0caef" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "shell-words" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" - -[[package]] -name = "signal-hook-registry" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" -dependencies = [ - "libc", -] - -[[package]] -name = "simplelog" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acee08041c5de3d5048c8b3f6f13fafb3026b24ba43c6a695a0c76179b844369" -dependencies = [ - "log", - "termcolor", - "time", -] - -[[package]] -name = "slab" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] - -[[package]] -name = "smallvec" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" - -[[package]] -name = "snafu" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4de37ad025c587a29e8f3f5605c00f70b98715ef90b9061a815b9e59e9042d6" -dependencies = [ - "backtrace", - "doc-comment", - "snafu-derive", -] - -[[package]] -name = "snafu-derive" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "990079665f075b699031e9c08fd3ab99be5029b96f3b78dc0709e8f77e4efebf" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "socket2" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "subtle" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "tabled" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c3ee73732ffceaea7b8f6b719ce3bb17f253fa27461ffeaf568ebd0cdb4b85" -dependencies = [ - "papergrid", - "tabled_derive", - "unicode-width", -] - -[[package]] -name = "tabled_derive" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "beca1b4eaceb4f2755df858b88d9b9315b7ccfd1ffd0d7a48a52602301f01a57" -dependencies = [ - "heck", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "tempfile" -version = "3.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" -dependencies = [ - "cfg-if", - "fastrand 2.0.0", - "redox_syscall 0.3.5", - "rustix", - "windows-sys 0.48.0", -] - -[[package]] -name = "term_size" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e4129646ca0ed8f45d09b929036bafad5377103edd06e50bf574b353d2b08d9" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "termcolor" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "testsys" -version = "0.1.0" -dependencies = [ - "async-trait", - "aws-config", - "aws-sdk-ec2", - "base64 0.20.0", - "bottlerocket-types", - "bottlerocket-variant", - "clap 4.3.23", - "env_logger", - "fastrand 1.9.0", - "futures", - "handlebars", - "log", - "maplit", - "pubsys-config", - "serde", - "serde_json", - "serde_plain", - "serde_yaml 0.9.21", - "snafu", - "term_size", - "testsys-config", - "testsys-model", - "tokio", - "unescape", - "url", -] - -[[package]] -name = "testsys-config" -version = "0.1.0" -dependencies = [ - "bottlerocket-types", - "bottlerocket-variant", - "handlebars", - "log", - "maplit", - "serde", - "serde_plain", - "serde_yaml 0.9.21", - "snafu", - "testsys-model", - "toml", -] - -[[package]] -name = "testsys-model" -version = "0.0.9" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.9#2491a2a122cf75bd7df23accc3574141669568ea" -dependencies = [ - "async-recursion", - "async-trait", - "base64 0.20.0", - "bytes", - "chrono", - "futures", - "http", - "json-patch", - "k8s-openapi", - "kube", - "lazy_static", - "log", - "maplit", - "regex", - "schemars", - "serde", - "serde_json", - "serde_plain", - "serde_yaml 0.8.26", - "snafu", - "tabled", - "tokio", - "tokio-util", - "topological-sort", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - -[[package]] -name = "thiserror" -version = "1.0.47" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.47" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "time" -version = "0.3.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb39ee79a6d8de55f48f2293a830e040392f1c5f16e336bdd1788cd0aadce07" -dependencies = [ - "deranged", - "itoa", - "libc", - "num_threads", - "serde", - "time-core", - "time-macros", -] - -[[package]] -name = "time-core" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" - -[[package]] -name = "time-macros" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733d258752e9303d392b94b75230d07b0b9c489350c69b851fc6c065fde3e8f9" -dependencies = [ - "time-core", -] - -[[package]] -name = "tinytemplate" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "tinyvec" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" - -[[package]] -name = "tokio" -version = "1.29.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" -dependencies = [ - "autocfg", - "backtrace", - "bytes", - "libc", - "mio", - "num_cpus", - "parking_lot", - "pin-project-lite", - "signal-hook-registry", - "socket2", - "tokio-macros", - "windows-sys 0.48.0", -] - -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-macros" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "tokio-rustls" -version = "0.23.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" -dependencies = [ - "rustls 0.20.8", - "tokio", - "webpki", -] - -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.6", - "tokio", -] - -[[package]] -name = "tokio-stream" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" -dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-tungstenite" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54319c93411147bced34cb5609a80e0a8e44c5999c93903a81cd866630ec0bfd" -dependencies = [ - "futures-util", - "log", - "tokio", - "tungstenite", -] - -[[package]] -name = "tokio-util" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", - "tracing", -] - -[[package]] -name = "toml" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" -dependencies = [ - "serde", -] - -[[package]] -name = "topological-sort" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea68304e134ecd095ac6c3574494fc62b909f416c4fca77e440530221e549d3d" - -[[package]] -name = "tough" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda3efa9005cf9c1966984c3b9a44c3f37b7ed2c95ba338d6ad51bba70e989a0" -dependencies = [ - "chrono", - "dyn-clone", - "globset", - "hex", - "log", - "olpc-cjson", - "path-absolutize", - "pem", - "percent-encoding", - "reqwest", - "ring", - "serde", - "serde_json", - "serde_plain", - "snafu", - "tempfile", - "untrusted", - "url", - "walkdir", -] - -[[package]] -name = "tough-kms" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc49c1a5300e54484604162ec78417fc39306f0c9e2c98166df3ebfa203d6800" -dependencies = [ - "aws-config", - "aws-sdk-kms", - "pem", - "ring", - "snafu", - "tokio", - "tough", -] - -[[package]] -name = "tough-ssm" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcf4932265842607b42840e65f3fde9dde2834eaa97209b994d6c1a7ff9f3fd7" -dependencies = [ - "aws-config", - "aws-sdk-ssm", - "snafu", - "tokio", - "tough", -] - -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "pin-project", - "pin-project-lite", - "tokio", - "tokio-util", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower-http" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ae70283aba8d2a8b411c695c437fe25b8b5e44e23e780662002fc72fb47a82" -dependencies = [ - "base64 0.21.2", - "bitflags 2.4.0", - "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", - "mime", - "pin-project-lite", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower-layer" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" - -[[package]] -name = "tower-service" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" - -[[package]] -name = "tracing" -version = "0.1.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" -dependencies = [ - "cfg-if", - "log", - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "tracing-core" -version = "0.1.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" -dependencies = [ - "once_cell", -] - -[[package]] -name = "treediff" -version = "4.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52984d277bdf2a751072b5df30ec0377febdb02f7696d64c2d7d54630bac4303" -dependencies = [ - "serde_json", -] - -[[package]] -name = "try-lock" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" - -[[package]] -name = "tungstenite" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" -dependencies = [ - "base64 0.13.1", - "byteorder", - "bytes", - "http", - "httparse", - "log", - "rand", - "sha1", - "thiserror", - "url", - "utf-8", -] - -[[package]] -name = "typenum" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" - -[[package]] -name = "ucd-trie" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" - -[[package]] -name = "unescape" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccb97dac3243214f8d8507998906ca3e2e0b900bf9bf4870477f125b82e68f6e" - -[[package]] -name = "unicode-bidi" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" - -[[package]] -name = "unicode-ident" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" - -[[package]] -name = "unicode-normalization" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-width" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" - -[[package]] -name = "unsafe-libyaml" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28467d3e1d3c6586d8f25fa243f544f5800fec42d97032474e17222c2b75cfa" - -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - -[[package]] -name = "update_metadata" -version = "0.1.0" -dependencies = [ - "chrono", - "parse-datetime", - "regex", - "semver", - "serde", - "serde_json", - "serde_plain", - "snafu", - "toml", -] - -[[package]] -name = "url" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" -dependencies = [ - "form_urlencoded", - "idna", - "percent-encoding", - "serde", -] - -[[package]] -name = "urlencoding" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" - -[[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - -[[package]] -name = "utf8parse" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" - -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "vsimd" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" - -[[package]] -name = "walkdir" -version = "2.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" -dependencies = [ - "same-file", - "winapi-util", -] - -[[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasm-bindgen" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" -dependencies = [ - "cfg-if", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" -dependencies = [ - "bumpalo", - "log", - "once_cell", - "proc-macro2", - "quote", - "syn 2.0.29", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" -dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" - -[[package]] -name = "web-sys" -version = "0.3.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "webpki" -version = "0.22.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "webpki-roots" -version = "0.25.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" -dependencies = [ - "windows-targets 0.48.5", -] - -[[package]] -name = "windows-sys" -version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" -dependencies = [ - "windows-targets 0.42.2", -] - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets 0.48.5", -] - -[[package]] -name = "windows-targets" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - -[[package]] -name = "windows_i686_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - -[[package]] -name = "windows_i686_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" - -[[package]] -name = "winreg" -version = "0.50.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - -[[package]] -name = "xmlparser" -version = "0.13.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d25c75bf9ea12c4040a97f829154768bbbce366287e2dc044af160cd79a13fd" - -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - -[[package]] -name = "zeroize" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" diff --git a/tools/Cargo.toml b/tools/Cargo.toml deleted file mode 100644 index e6efc568b7e..00000000000 --- a/tools/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[workspace] -resolver = "1" -members = [ - "infrasys", - "buildsys", - "pubsys", - "pubsys-config", - "pubsys-setup", - "testsys", - "testsys-config", -] diff --git a/tools/buildsys/.gitignore b/tools/buildsys/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml deleted file mode 100644 index 7f882cad3f9..00000000000 --- a/tools/buildsys/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "buildsys" -version = "0.1.0" -authors = ["Ben Cressey "] -license = "Apache-2.0 OR MIT" -edition = "2021" -publish = false -# Don't rebuild crate just because of changes to README. -exclude = ["README.md"] - -[dependencies] -bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } -duct = "0.13" -hex = "0.4" -lazy_static = "1" -rand = { version = "0.8", default-features = false, features = ["std", "std_rng"] } -regex = "1" -reqwest = { version = "0.11", default-features = false, features = ["rustls-tls", "blocking"] } -serde = { version = "1", features = ["derive"] } -serde_plain = "1" -sha2 = "0.10" -snafu = "0.7" -toml = "0.5" -url = { version = "2", features = ["serde"] } -walkdir = "2" -nonzero_ext = "0.3" diff --git a/tools/buildsys/src/bin/bottlerocket-variant/main.rs b/tools/buildsys/src/bin/bottlerocket-variant/main.rs deleted file mode 100644 index d994a983ed3..00000000000 --- a/tools/buildsys/src/bin/bottlerocket-variant/main.rs +++ /dev/null @@ -1,70 +0,0 @@ -use bottlerocket_variant::Variant; -use buildsys::manifest::ManifestInfo; -use snafu::ResultExt; -use std::path::PathBuf; -use std::{env, process}; - -// Returning a Result from main makes it print a Debug representation of the error, but with Snafu -// we have nice Display representations of the error, so we wrap "main" (run) and print any error. -// https://github.com/shepmaster/snafu/issues/110 -fn main() { - if let Err(e) = run() { - eprintln!("{}", e); - process::exit(1); - } -} - -/// Read `BUILDSYS_VARIANT` from the environment, parse into its components, and emit related -/// environment variables to set (or export). Do the same for features defined in the variant -/// manifest. -fn run() -> Result<()> { - let env = getenv("BUILDSYS_VARIANT")?; - let variant = Variant::new(&env).context(error::VariantParseSnafu)?; - println!("BUILDSYS_VARIANT_PLATFORM={}", variant.platform()); - println!("BUILDSYS_VARIANT_RUNTIME={}", variant.runtime()); - println!("BUILDSYS_VARIANT_FAMILY={}", variant.family()); - println!( - "BUILDSYS_VARIANT_FLAVOR={}", - variant.variant_flavor().unwrap_or("''") - ); - let manifest = PathBuf::from(getenv("BUILDSYS_ROOT_DIR")?) - .join("variants") - .join(&env) - .join("Cargo.toml"); - let variant_manifest = ManifestInfo::new(manifest).context(error::ManifestParseSnafu)?; - if let Some(image_features) = variant_manifest.image_features() { - for image_feature in image_features { - println!("export BUILDSYS_VARIANT_IMAGE_FEATURE_{}=1", image_feature); - } - } - Ok(()) -} - -/// Retrieve a variable that we expect to be set in the environment. -fn getenv(var: &str) -> Result { - env::var(var).context(error::EnvironmentSnafu { var }) -} - -mod error { - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(super) enum Error { - VariantParse { - source: bottlerocket_variant::error::Error, - }, - - ManifestParse { - source: buildsys::manifest::Error, - }, - - #[snafu(display("Missing environment variable '{}'", var))] - Environment { - var: String, - source: std::env::VarError, - }, - } -} - -type Result = std::result::Result; diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs deleted file mode 100644 index 0949d354571..00000000000 --- a/tools/buildsys/src/builder.rs +++ /dev/null @@ -1,650 +0,0 @@ -/*! -This module handles the calls to Docker needed to execute package and variant -builds. The actual build steps and the expected parameters are defined in -the repository's top-level Dockerfile. - -*/ -pub(crate) mod error; -use error::Result; - -use duct::cmd; -use lazy_static::lazy_static; -use nonzero_ext::nonzero; -use rand::Rng; -use regex::Regex; -use sha2::{Digest, Sha512}; -use snafu::{ensure, OptionExt, ResultExt}; -use std::collections::HashSet; -use std::env; -use std::fs::{self, read_dir, File}; -use std::num::NonZeroU16; -use std::path::{Path, PathBuf}; -use std::process::Output; -use walkdir::{DirEntry, WalkDir}; - -use buildsys::manifest::{ImageFeature, ImageFormat, ImageLayout, PartitionPlan, SupportedArch}; - -/* -There's a bug in BuildKit that can lead to a build failure during parallel -`docker build` executions: - https://github.com/moby/buildkit/issues/1090 - -Unfortunately we can't do much to control the concurrency here, and even when -the bug is fixed there will be many older versions of Docker in the wild. - -The failure has an exit code of 1, which is too generic to be helpful. All we -can do is check the output for the error's signature, and retry if we find it. -*/ -lazy_static! { - static ref DOCKER_BUILD_FRONTEND_ERROR: Regex = Regex::new(concat!( - r#"failed to solve with frontend dockerfile.v0: "#, - r#"failed to solve with frontend gateway.v0: "#, - r#"frontend grpc server closed unexpectedly"# - )) - .unwrap(); -} - -/* -There's a similar bug that's fixed in new releases of BuildKit but still in the wild in popular -versions of Docker/BuildKit: - https://github.com/moby/buildkit/issues/1468 -*/ -lazy_static! { - static ref DOCKER_BUILD_DEAD_RECORD_ERROR: Regex = Regex::new(concat!( - r#"failed to solve with frontend dockerfile.v0: "#, - r#"failed to solve with frontend gateway.v0: "#, - r#"rpc error: code = Unknown desc = failed to build LLB: "#, - r#"failed to get dead record"#, - )) - .unwrap(); -} - -/* -We also see sporadic CI failures with only this error message. -We use (?m) for multi-line mode so we can match the message on a line of its own without splitting -the output ourselves; we match the regexes against the whole of stdout. -*/ -lazy_static! { - static ref UNEXPECTED_EOF_ERROR: Regex = Regex::new("(?m)unexpected EOF$").unwrap(); -} - -/* -Sometimes new RPMs are not fully written to the host directory before another build starts, which -exposes `createrepo_c` to partially-written RPMs that cannot be added to the repo metadata. Retry -these errors by restarting the build since the alternatives are to ignore the `createrepo_c` exit -code (masking other problems) or aggressively `sync()` the host directory (hurting performance). -*/ -lazy_static! { - static ref CREATEREPO_C_READ_HEADER_ERROR: Regex = Regex::new(®ex::escape( - r#"C_CREATEREPOLIB: Warning: read_header: rpmReadPackageFile() error"# - )) - .unwrap(); -} - -static DOCKER_BUILD_MAX_ATTEMPTS: NonZeroU16 = nonzero!(10u16); - -pub(crate) struct PackageBuilder; - -impl PackageBuilder { - /// Build RPMs for the specified package. - pub(crate) fn build( - package: &str, - image_features: Option>, - ) -> Result { - let output_dir: PathBuf = getenv("BUILDSYS_PACKAGES_DIR")?.into(); - let arch = getenv("BUILDSYS_ARCH")?; - let goarch = serde_plain::from_str::(&arch) - .context(error::UnsupportedArchSnafu { arch: &arch })? - .goarch(); - - let mut args = Vec::new(); - args.push("--network".into()); - args.push("none".into()); - args.build_arg("PACKAGE", package); - args.build_arg("ARCH", &arch); - args.build_arg("GOARCH", goarch); - - // Pass certain environment variables into the build environment. These variables aren't - // automatically used to trigger rebuilds when they change, because most packages aren't - // affected. Packages that care should "echo cargo:rerun-if-env-changed=VAR" in their - // build.rs build script. - for (src_env_var, target_env_var) in [ - ("BUILDSYS_VARIANT", "VARIANT"), - ("BUILDSYS_VARIANT_PLATFORM", "VARIANT_PLATFORM"), - ("BUILDSYS_VARIANT_RUNTIME", "VARIANT_RUNTIME"), - ("BUILDSYS_VARIANT_FAMILY", "VARIANT_FAMILY"), - ("BUILDSYS_VARIANT_FLAVOR", "VARIANT_FLAVOR"), - ("PUBLISH_REPO", "REPO"), - ] { - let src_env_val = - env::var(src_env_var).context(error::EnvironmentSnafu { var: src_env_var })?; - args.build_arg(target_env_var, src_env_val); - } - - let tag = format!( - "buildsys-pkg-{package}-{arch}", - package = package, - arch = arch, - ); - - if let Some(image_features) = image_features { - for image_feature in image_features.iter() { - args.build_arg(format!("{}", image_feature), "1"); - } - } - - build(BuildType::Package, package, &arch, args, &tag, &output_dir)?; - - Ok(Self) - } -} - -pub(crate) struct VariantBuilder; - -impl VariantBuilder { - /// Build a variant with the specified packages installed. - pub(crate) fn build( - packages: &[String], - image_format: Option<&ImageFormat>, - image_layout: Option<&ImageLayout>, - kernel_parameters: Option<&Vec>, - image_features: Option>, - ) -> Result { - let output_dir: PathBuf = getenv("BUILDSYS_OUTPUT_DIR")?.into(); - - let variant = getenv("BUILDSYS_VARIANT")?; - let arch = getenv("BUILDSYS_ARCH")?; - let goarch = serde_plain::from_str::(&arch) - .context(error::UnsupportedArchSnafu { arch: &arch })? - .goarch(); - - let image_layout = image_layout.cloned().unwrap_or_default(); - let ImageLayout { - os_image_size_gib, - data_image_size_gib, - partition_plan, - .. - } = image_layout; - - let (os_image_publish_size_gib, data_image_publish_size_gib) = - image_layout.publish_image_sizes_gib(); - - let mut args = Vec::new(); - args.push("--network".into()); - args.push("host".into()); - args.build_arg("PACKAGES", packages.join(" ")); - args.build_arg("ARCH", &arch); - args.build_arg("GOARCH", goarch); - args.build_arg("VARIANT", &variant); - args.build_arg("VERSION_ID", getenv("BUILDSYS_VERSION_IMAGE")?); - args.build_arg("BUILD_ID", getenv("BUILDSYS_VERSION_BUILD")?); - args.build_arg("PRETTY_NAME", getenv("BUILDSYS_PRETTY_NAME")?); - args.build_arg("IMAGE_NAME", getenv("BUILDSYS_NAME")?); - args.build_arg( - "IMAGE_FORMAT", - match image_format { - Some(ImageFormat::Raw) | None => "raw", - Some(ImageFormat::Qcow2) => "qcow2", - Some(ImageFormat::Vmdk) => "vmdk", - }, - ); - args.build_arg("OS_IMAGE_SIZE_GIB", format!("{}", os_image_size_gib)); - args.build_arg("DATA_IMAGE_SIZE_GIB", format!("{}", data_image_size_gib)); - args.build_arg( - "OS_IMAGE_PUBLISH_SIZE_GIB", - format!("{}", os_image_publish_size_gib), - ); - args.build_arg( - "DATA_IMAGE_PUBLISH_SIZE_GIB", - format!("{}", data_image_publish_size_gib), - ); - args.build_arg( - "PARTITION_PLAN", - match partition_plan { - PartitionPlan::Split => "split", - PartitionPlan::Unified => "unified", - }, - ); - args.build_arg( - "KERNEL_PARAMETERS", - kernel_parameters.map(|v| v.join(" ")).unwrap_or_default(), - ); - - if let Some(image_features) = image_features { - for image_feature in image_features.iter() { - args.build_arg(format!("{}", image_feature), "1"); - } - } - - // Add known secrets to the build argments. - add_secrets(&mut args)?; - - // Always rebuild variants since they are located in a different workspace, - // and don't directly track changes in the underlying packages. - getenv("BUILDSYS_TIMESTAMP")?; - - let tag = format!( - "buildsys-var-{variant}-{arch}", - variant = variant, - arch = arch - ); - - build(BuildType::Variant, &variant, &arch, args, &tag, &output_dir)?; - - Ok(Self) - } -} - -// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - -enum BuildType { - Package, - Variant, -} - -/// Invoke a series of `docker` commands to drive a package or variant build. -fn build( - kind: BuildType, - what: &str, - arch: &str, - build_args: Vec, - tag: &str, - output_dir: &PathBuf, -) -> Result<()> { - // Our Dockerfile is in the top-level directory. - let root = getenv("BUILDSYS_ROOT_DIR")?; - env::set_current_dir(&root).context(error::DirectoryChangeSnafu { path: &root })?; - - // Compute a per-checkout prefix for the tag to avoid collisions. - let mut d = Sha512::new(); - d.update(&root); - let digest = hex::encode(d.finalize()); - let token = &digest[..12]; - let tag = format!("{}-{}", tag, token); - - // Our SDK and toolchain are picked by the external `cargo make` invocation. - let sdk = getenv("BUILDSYS_SDK_IMAGE")?; - let toolchain = getenv("BUILDSYS_TOOLCHAIN")?; - - // Avoid using a cached layer from a previous build. - let nocache = rand::thread_rng().gen::(); - - // Create a directory for tracking outputs before we move them into position. - let build_dir = create_build_dir(&kind, what, arch)?; - - // Clean up any previous outputs we have tracked. - clean_build_files(&build_dir, output_dir)?; - - let target = match kind { - BuildType::Package => "package", - BuildType::Variant => "variant", - }; - - let mut build = format!( - "build . \ - --target {target} \ - --tag {tag}", - target = target, - tag = tag, - ) - .split_string(); - - build.extend(build_args); - build.build_arg("SDK", sdk); - build.build_arg("TOOLCHAIN", toolchain); - build.build_arg("NOCACHE", nocache.to_string()); - // Avoid using a cached layer from a concurrent build in another checkout. - build.build_arg("TOKEN", token); - - let create = format!("create --name {} {} true", tag, tag).split_string(); - let cp = format!("cp {}:/output/. {}", tag, build_dir.display()).split_string(); - let rm = format!("rm --force {}", tag).split_string(); - let rmi = format!("rmi --force {}", tag).split_string(); - - // Clean up the stopped container if it exists. - let _ = docker(&rm, Retry::No); - - // Clean up the previous image if it exists. - let _ = docker(&rmi, Retry::No); - - // Build the image, which builds the artifacts we want. - // Work around transient, known failure cases with Docker. - docker( - &build, - Retry::Yes { - attempts: DOCKER_BUILD_MAX_ATTEMPTS, - messages: &[ - &*DOCKER_BUILD_FRONTEND_ERROR, - &*DOCKER_BUILD_DEAD_RECORD_ERROR, - &*UNEXPECTED_EOF_ERROR, - &*CREATEREPO_C_READ_HEADER_ERROR, - ], - }, - )?; - - // Create a stopped container so we can copy artifacts out. - docker(&create, Retry::No)?; - - // Copy artifacts into our output directory. - docker(&cp, Retry::No)?; - - // Clean up our stopped container after copying artifacts out. - docker(&rm, Retry::No)?; - - // Clean up our image now that we're done. - docker(&rmi, Retry::No)?; - - // Copy artifacts to the expected directory and write markers to track them. - copy_build_files(&build_dir, output_dir)?; - - Ok(()) -} - -/// Run `docker` with the specified arguments. -fn docker(args: &[String], retry: Retry) -> Result { - let mut max_attempts: u16 = 1; - let mut retry_messages: &[&Regex] = &[]; - if let Retry::Yes { attempts, messages } = retry { - max_attempts = attempts.into(); - retry_messages = messages; - } - - let mut attempt = 1; - loop { - let output = cmd("docker", args) - .stderr_to_stdout() - .stdout_capture() - .unchecked() - .run() - .context(error::CommandStartSnafu)?; - - let stdout = String::from_utf8_lossy(&output.stdout); - println!("{}", &stdout); - if output.status.success() { - return Ok(output); - } - - ensure!( - retry_messages.iter().any(|m| m.is_match(&stdout)) && attempt < max_attempts, - error::DockerExecutionSnafu { - args: &args.join(" ") - } - ); - - attempt += 1; - } -} - -/// Allow the caller to configure retry behavior, since the command may fail -/// for spurious reasons that should not be treated as an error. -enum Retry<'a> { - No, - Yes { - attempts: NonZeroU16, - messages: &'a [&'static Regex], - }, -} - -// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - -/// Add secrets that might be needed for builds. Since most builds won't use -/// them, they are not automatically tracked for changes. If necessary, builds -/// can emit the relevant cargo directives for tracking in their build script. -fn add_secrets(args: &mut Vec) -> Result<()> { - let sbkeys_var = "BUILDSYS_SBKEYS_PROFILE_DIR"; - let sbkeys_dir = env::var(sbkeys_var).context(error::EnvironmentSnafu { var: sbkeys_var })?; - - let sbkeys = read_dir(&sbkeys_dir).context(error::DirectoryReadSnafu { path: &sbkeys_dir })?; - for s in sbkeys { - let s = s.context(error::DirectoryReadSnafu { path: &sbkeys_dir })?; - args.build_secret( - "file", - &s.file_name().to_string_lossy(), - &s.path().to_string_lossy(), - ); - } - - for var in &[ - "AWS_ACCESS_KEY_ID", - "AWS_SECRET_ACCESS_KEY", - "AWS_SESSION_TOKEN", - ] { - let id = format!("{}.env", var.to_lowercase().replace('_', "-")); - args.build_secret("env", &id, var); - } - - Ok(()) -} - -// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - -/// Create a directory for build artifacts. -fn create_build_dir(kind: &BuildType, name: &str, arch: &str) -> Result { - let prefix = match kind { - BuildType::Package => "packages", - BuildType::Variant => "variants", - }; - - let path = [&getenv("BUILDSYS_STATE_DIR")?, arch, prefix, name] - .iter() - .collect(); - - fs::create_dir_all(&path).context(error::DirectoryCreateSnafu { path: &path })?; - - Ok(path) -} - -const MARKER_EXTENSION: &str = ".buildsys_marker"; - -/// Copy build artifacts to the output directory. -/// Before we copy each file, we create a corresponding marker file to record its existence. -fn copy_build_files

(build_dir: P, output_dir: P) -> Result<()> -where - P: AsRef, -{ - fn has_artifacts(entry: &DirEntry) -> bool { - let is_dir = entry.path().is_dir(); - let is_file = entry.file_type().is_file(); - let is_not_marker = is_file - && entry - .file_name() - .to_str() - .map(|s| !s.ends_with(MARKER_EXTENSION)) - .unwrap_or(false); - let is_symlink = entry.file_type().is_symlink(); - is_dir || is_not_marker || is_symlink - } - - for artifact_file in find_files(&build_dir, has_artifacts) { - let mut marker_file = artifact_file.clone().into_os_string(); - marker_file.push(MARKER_EXTENSION); - File::create(&marker_file).context(error::FileCreateSnafu { path: &marker_file })?; - - let mut output_file: PathBuf = output_dir.as_ref().into(); - output_file.push(artifact_file.strip_prefix(&build_dir).context( - error::StripPathPrefixSnafu { - path: &marker_file, - prefix: build_dir.as_ref(), - }, - )?); - - let parent_dir = output_file - .parent() - .context(error::BadDirectorySnafu { path: &output_file })?; - fs::create_dir_all(parent_dir) - .context(error::DirectoryCreateSnafu { path: &parent_dir })?; - - fs::rename(&artifact_file, &output_file).context(error::FileRenameSnafu { - old_path: &artifact_file, - new_path: &output_file, - })?; - } - - Ok(()) -} - -/// Remove build artifacts from the output directory. -/// Any marker file we find could have a corresponding file that should be cleaned up. -/// We also clean up the marker files so they do not accumulate across builds. -/// For the same reason, if a directory is empty after build artifacts, marker files, and other -/// empty directories have been removed, then that directory will also be removed. -fn clean_build_files

(build_dir: P, output_dir: P) -> Result<()> -where - P: AsRef, -{ - let build_dir = build_dir.as_ref(); - let output_dir = output_dir.as_ref(); - - fn has_markers(entry: &DirEntry) -> bool { - let is_dir = entry.path().is_dir(); - let is_file = entry.file_type().is_file(); - let is_marker = is_file - && entry - .file_name() - .to_str() - .map(|s| s.ends_with(MARKER_EXTENSION)) - .unwrap_or(false); - is_dir || is_marker - } - - fn cleanup(path: &Path, top: &Path, dirs: &mut HashSet) -> Result<()> { - if !path.exists() && !path.is_symlink() { - return Ok(()); - } - std::fs::remove_file(path).context(error::FileRemoveSnafu { path })?; - let mut parent = path.parent(); - while let Some(p) = parent { - if p == top || dirs.contains(p) { - break; - } - dirs.insert(p.into()); - parent = p.parent() - } - Ok(()) - } - - fn is_empty_dir(path: &Path) -> Result { - Ok(path.is_dir() - && path - .read_dir() - .context(error::DirectoryReadSnafu { path })? - .next() - .is_none()) - } - - let mut clean_dirs: HashSet = HashSet::new(); - - for marker_file in find_files(&build_dir, has_markers) { - let mut output_file: PathBuf = output_dir.into(); - output_file.push(marker_file.strip_prefix(build_dir).context( - error::StripPathPrefixSnafu { - path: &marker_file, - prefix: build_dir, - }, - )?); - output_file.set_extension(""); - cleanup(&output_file, output_dir, &mut clean_dirs)?; - cleanup(&marker_file, build_dir, &mut clean_dirs)?; - } - - // Clean up directories in reverse order, so that empty child directories don't stop an - // otherwise empty parent directory from being removed. - let mut clean_dirs = clean_dirs.into_iter().collect::>(); - clean_dirs.sort_by(|a, b| b.cmp(a)); - - for clean_dir in clean_dirs { - if is_empty_dir(&clean_dir)? { - std::fs::remove_dir(&clean_dir) - .context(error::DirectoryRemoveSnafu { path: &clean_dir })?; - } - } - - Ok(()) -} - -/// Create an iterator over files matching the supplied filter. -fn find_files

( - dir: P, - filter: for<'r> fn(&'r walkdir::DirEntry) -> bool, -) -> impl Iterator -where - P: AsRef, -{ - WalkDir::new(&dir) - .follow_links(false) - .same_file_system(true) - .min_depth(1) - .into_iter() - .filter_entry(filter) - .flat_map(|e| e.context(error::DirectoryWalkSnafu)) - .map(|e| e.into_path()) - .filter(|e| e.is_file() || e.is_symlink()) -} - -/// Retrieve a BUILDSYS_* variable that we expect to be set in the environment, -/// and ensure that we track it for changes, since it will directly affect the -/// output. -fn getenv(var: &str) -> Result { - println!("cargo:rerun-if-env-changed={}", var); - env::var(var).context(error::EnvironmentSnafu { var }) -} - -// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - -/// Helper trait for constructing buildkit --build-arg arguments. -trait BuildArg { - fn build_arg(&mut self, key: S1, value: S2) - where - S1: AsRef, - S2: AsRef; -} - -impl BuildArg for Vec { - fn build_arg(&mut self, key: S1, value: S2) - where - S1: AsRef, - S2: AsRef, - { - self.push("--build-arg".to_string()); - self.push(format!("{}={}", key.as_ref(), value.as_ref())); - } -} - -/// Helper trait for constructing buildkit --secret arguments. -trait BuildSecret { - fn build_secret(&mut self, typ: S, id: S, src: S) - where - S: AsRef; -} - -impl BuildSecret for Vec { - fn build_secret(&mut self, typ: S, id: S, src: S) - where - S: AsRef, - { - self.push("--secret".to_string()); - self.push(format!( - "type={},id={},src={}", - typ.as_ref(), - id.as_ref(), - src.as_ref() - )); - } -} - -/// Helper trait for splitting a string on spaces into owned Strings. -/// -/// If you need an element with internal spaces, you should handle that separately, for example -/// with BuildArg. -trait SplitString { - fn split_string(&self) -> Vec; -} - -impl SplitString for S -where - S: AsRef, -{ - fn split_string(&self) -> Vec { - self.as_ref().split(' ').map(String::from).collect() - } -} diff --git a/tools/buildsys/src/builder/error.rs b/tools/buildsys/src/builder/error.rs deleted file mode 100644 index fe527590b85..00000000000 --- a/tools/buildsys/src/builder/error.rs +++ /dev/null @@ -1,82 +0,0 @@ -use snafu::Snafu; -use std::path::PathBuf; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -pub(crate) enum Error { - #[snafu(display("Failed to start command: {}", source))] - CommandStart { source: std::io::Error }, - - #[snafu(display("Failed to execute command: 'docker {}'", args))] - DockerExecution { args: String }, - - #[snafu(display("Failed to change directory to '{}': {}", path.display(), source))] - DirectoryChange { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to get parent directory for '{}'", path.display()))] - BadDirectory { path: PathBuf }, - - #[snafu(display("Failed to create directory '{}': {}", path.display(), source))] - DirectoryCreate { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to create directory '{}': {}", path.display(), source))] - DirectoryRemove { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to read directory '{}': {}", path.display(), source))] - DirectoryRead { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to walk directory to find marker files: {}", source))] - DirectoryWalk { source: walkdir::Error }, - - #[snafu(display("Failed to create file '{}': {}", path.display(), source))] - FileCreate { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to remove file '{}': {}", path.display(), source))] - FileRemove { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to rename file '{}' to '{}': {}", old_path.display(), new_path.display(), source))] - FileRename { - old_path: PathBuf, - new_path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Missing environment variable '{}'", var))] - Environment { - var: String, - source: std::env::VarError, - }, - - #[snafu(display("Failed to strip prefix '{}' from path '{}': {}", prefix.display(), path.display(), source))] - StripPathPrefix { - path: PathBuf, - prefix: PathBuf, - source: std::path::StripPrefixError, - }, - - #[snafu(display("Unsupported architecture '{}'", arch))] - UnsupportedArch { - arch: String, - source: serde_plain::Error, - }, -} - -pub(super) type Result = std::result::Result; diff --git a/tools/buildsys/src/cache.rs b/tools/buildsys/src/cache.rs deleted file mode 100644 index 6bc4e8ac9ce..00000000000 --- a/tools/buildsys/src/cache.rs +++ /dev/null @@ -1,154 +0,0 @@ -/*! -Many of the inputs to package builds are not source files tracked within the git -repository, but large binary artifacts such as tar archives that are independently -distributed by an upstream project. - -This module provides the ability to retrieve and validate these external files, -given the (name, url, hash) data that uniquely identifies each file. - -It implements a two-tier approach to retrieval: files are first pulled from the -"lookaside" cache and only fetched from the upstream site if that access fails. - -*/ -pub(crate) mod error; -use error::Result; - -use buildsys::manifest; -use reqwest::header::{HeaderMap, HeaderValue, USER_AGENT}; -use sha2::{Digest, Sha512}; -use snafu::{ensure, OptionExt, ResultExt}; -use std::env; -use std::fs::{self, File}; -use std::io::{self, BufWriter}; -use std::path::{Path, PathBuf}; - -static LOOKASIDE_CACHE: &str = "https://cache.bottlerocket.aws"; - -pub(crate) struct LookasideCache; - -impl LookasideCache { - /// Fetch files stored out-of-tree and ensure they match the stored hash. - pub(crate) fn fetch(files: &[manifest::ExternalFile]) -> Result { - for f in files { - let url_file_name = Self::extract_file_name(&f.url)?; - let path = &f.path.as_ref().unwrap_or(&url_file_name); - ensure!( - path.components().count() == 1, - error::ExternalFileNameSnafu { path } - ); - - let hash = &f.sha512; - if path.is_file() { - match Self::verify_file(path, hash) { - Ok(_) => continue, - Err(e) => { - eprintln!("{}", e); - fs::remove_file(path).context(error::ExternalFileDeleteSnafu { path })?; - } - } - } - - let name = path.display(); - let tmp = PathBuf::from(format!(".{}", name)); - - // first check the lookaside cache - let url = format!("{}/{}/{}/{}", LOOKASIDE_CACHE, name, hash, name); - match Self::fetch_file(&url, &tmp, hash) { - Ok(_) => { - fs::rename(&tmp, path) - .context(error::ExternalFileRenameSnafu { path: &tmp })?; - continue; - } - Err(e) => { - eprintln!("{}", e); - } - } - - // next check with upstream, if permitted - if f.force_upstream.unwrap_or(false) - || std::env::var("BUILDSYS_UPSTREAM_SOURCE_FALLBACK") == Ok("true".to_string()) - { - println!("Fetching {:?} from upstream source", url_file_name); - Self::fetch_file(&f.url, &tmp, hash)?; - fs::rename(&tmp, path).context(error::ExternalFileRenameSnafu { path: &tmp })?; - } - } - - Ok(Self) - } - - /// Retrieves a file from the specified URL and write it to the given path, - /// then verifies the contents against the SHA-512 hash provided. - fn fetch_file>(url: &str, path: P, hash: &str) -> Result<()> { - let path = path.as_ref(); - - let version = Self::getenv("BUILDSYS_VERSION_FULL")?; - - let mut headers = HeaderMap::new(); - headers.insert( - USER_AGENT, - HeaderValue::from_str(&format!( - "Bottlerocket buildsys {version} (https://github.com/bottlerocket-os/bottlerocket)" - )) - .unwrap_or(HeaderValue::from_static( - "Bottlerocket buildsys (https://github.com/bottlerocket-os/bottlerocket)", - )), - ); - - let client = reqwest::blocking::Client::new(); - let mut resp = client - .get(url) - .headers(headers) - .send() - .context(error::ExternalFileRequestSnafu { url })?; - let status = resp.status(); - ensure!( - status.is_success(), - error::ExternalFileFetchSnafu { url, status } - ); - - let f = File::create(path).context(error::ExternalFileOpenSnafu { path })?; - let mut f = BufWriter::new(f); - resp.copy_to(&mut f) - .context(error::ExternalFileSaveSnafu { path })?; - drop(f); - - match Self::verify_file(path, hash) { - Ok(_) => Ok(()), - Err(e) => { - fs::remove_file(path).context(error::ExternalFileDeleteSnafu { path })?; - Err(e) - } - } - } - - fn getenv(var: &str) -> Result { - env::var(var).context(error::EnvironmentSnafu { var: (var) }) - } - - fn extract_file_name(url: &str) -> Result { - let parsed = reqwest::Url::parse(url).context(error::ExternalFileUrlSnafu { url })?; - let name = parsed - .path_segments() - .context(error::ExternalFileNameSnafu { path: url })? - .last() - .context(error::ExternalFileNameSnafu { path: url })?; - Ok(name.into()) - } - - /// Reads a file from disk and compares it to the expected SHA-512 hash. - fn verify_file>(path: P, hash: &str) -> Result<()> { - let path = path.as_ref(); - let mut f = File::open(path).context(error::ExternalFileOpenSnafu { path })?; - let mut d = Sha512::new(); - - io::copy(&mut f, &mut d).context(error::ExternalFileLoadSnafu { path })?; - let digest = hex::encode(d.finalize()); - - ensure!( - digest == hash, - error::ExternalFileVerifySnafu { path, hash } - ); - Ok(()) - } -} diff --git a/tools/buildsys/src/cache/error.rs b/tools/buildsys/src/cache/error.rs deleted file mode 100644 index 7665ba68bdb..00000000000 --- a/tools/buildsys/src/cache/error.rs +++ /dev/null @@ -1,55 +0,0 @@ -use snafu::Snafu; -use std::io; -use std::path::PathBuf; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -#[allow(clippy::enum_variant_names)] -pub(crate) enum Error { - #[snafu(display("Missing environment variable '{}'", var))] - Environment { - var: String, - source: std::env::VarError, - }, - - #[snafu(display("Bad file name '{}'", path.display()))] - ExternalFileName { path: PathBuf }, - - #[snafu(display("Bad file url '{}': {}", url, source))] - ExternalFileUrl { - url: String, - source: url::ParseError, - }, - - #[snafu(display("Failed to request '{}': {}", url, source))] - ExternalFileRequest { url: String, source: reqwest::Error }, - - #[snafu(display("Failed to fetch '{}': {}", url, status))] - ExternalFileFetch { - url: String, - status: reqwest::StatusCode, - }, - - #[snafu(display("Failed to open file '{}': {}", path.display(), source))] - ExternalFileOpen { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to write file '{}': {}", path.display(), source))] - ExternalFileSave { - path: PathBuf, - source: reqwest::Error, - }, - - #[snafu(display("Failed to load file '{}': {}", path.display(), source))] - ExternalFileLoad { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to verify file '{}' with hash '{}'", path.display(), hash))] - ExternalFileVerify { path: PathBuf, hash: String }, - - #[snafu(display("Failed to rename file '{}': {}", path.display(), source))] - ExternalFileRename { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to delete file '{}': {}", path.display(), source))] - ExternalFileDelete { path: PathBuf, source: io::Error }, -} - -pub(super) type Result = std::result::Result; diff --git a/tools/buildsys/src/gomod.rs b/tools/buildsys/src/gomod.rs deleted file mode 100644 index 6a1dabc0a35..00000000000 --- a/tools/buildsys/src/gomod.rs +++ /dev/null @@ -1,207 +0,0 @@ -/*! -Packages using the Go programming language may have upstream tar archives that -include only the source code of the project, but not the source code of any -dependencies. The Go programming language promotes the use of "modules" for -dependencies. Projects adopting modules will provide `go.mod` and `go.sum` files. - -This Rust module extends the functionality of `packages.metadata.build-package.external-files` -and provides the ability to retrieve and validate dependencies -declared using Go modules given a tar archive containing a `go.mod` and `go.sum`. - -The location where dependencies are retrieved from are controlled by the -standard environment variables employed by the Go tool: `GOPROXY`, `GOSUMDB`, and -`GOPRIVATE`. These variables are automatically retrieved from the host environment -when the docker-go script is invoked. - - */ - -pub(crate) mod error; -use error::Result; - -use buildsys::manifest; -use duct::cmd; -use snafu::{ensure, OptionExt, ResultExt}; -use std::io::Write; -use std::os::unix::fs::PermissionsExt; -use std::path::{Path, PathBuf}; -use std::{env, fs}; - -pub(crate) struct GoMod; - -const GO_MOD_DOCKER_SCRIPT_NAME: &str = "docker-go-script.sh"; - -// The following bash template script is intended to be run within a container -// using the docker-go tool found in this codebase under `tools/docker-go`. -// -// This script inspects the top level directory found in the package upstream -// archive and uses that as the default Go module path if no explicit module -// path was provided. It will then untar the archive, vendor the Go -// dependencies, create a new archive using the {module-path}/vendor directory -// and name it the output path provided. If no output path was given, it -// defaults to "bundled-{package-file-name}". Finally, it cleans up by removing -// the untar'd source code. The upstream archive remains intact and both tar -// files can then be used during packaging. -// -// This script exists as an in memory template string literal and is populated -// into a temporary file in the package directory itself to enable buildsys to -// be as portable as possible and have no dependency on runtime paths. Since -// buildsys is executed from the context of many different package directories, -// managing a temporary file via this Rust module prevents having to acquire the -// path of some static script file on the host system. -const GO_MOD_SCRIPT_TMPL: &str = r#".#!/bin/bash - -set -e - -toplevel=$(tar tf __LOCAL_FILE_NAME__ | head -1) -if [ -z __MOD_DIR__ ] ; then - targetdir="${toplevel}" -else - targetdir="__MOD_DIR__" -fi - -tar xf __LOCAL_FILE_NAME__ - -pushd "${targetdir}" - go list -mod=readonly ./... >/dev/null && go mod vendor -popd - -tar czf __OUTPUT__ "${targetdir}"/vendor -rm -rf "${targetdir}" -touch -r __LOCAL_FILE_NAME__ __OUTPUT__ -"#; - -impl GoMod { - pub(crate) fn vendor( - root_dir: &Path, - package_dir: &Path, - external_file: &manifest::ExternalFile, - ) -> Result<()> { - let url_file_name = extract_file_name(&external_file.url)?; - let local_file_name = &external_file.path.as_ref().unwrap_or(&url_file_name); - ensure!( - local_file_name.components().count() == 1, - error::InputFileSnafu - ); - - let full_path = package_dir.join(local_file_name); - ensure!( - full_path.is_file(), - error::InputFileBadSnafu { path: full_path } - ); - - // If a module directory was not provided, set as an empty path. - // By default, without a provided module directory, tar will be passed - // the first directory found in the archives as the top level Go module - let default_empty_path = PathBuf::from(""); - let mod_dir = external_file - .bundle_root_path - .as_ref() - .unwrap_or(&default_empty_path); - - // Use a default "bundle-{name-of-file}" if no output path was provided - let default_output_path = - PathBuf::from(format!("bundled-{}", local_file_name.to_string_lossy())); - let output_path_arg = external_file - .bundle_output_path - .as_ref() - .unwrap_or(&default_output_path); - println!( - "cargo:rerun-if-changed={}", - output_path_arg.to_string_lossy() - ); - - // Our SDK and toolchain are picked by the external `cargo make` invocation. - let sdk = env::var("BUILDSYS_SDK_IMAGE").context(error::EnvironmentSnafu { - var: "BUILDSYS_SDK_IMAGE", - })?; - - let args = DockerGoArgs { - module_path: package_dir, - sdk_image: sdk, - go_mod_cache: &root_dir.join(".gomodcache"), - command: format!("./{}", GO_MOD_DOCKER_SCRIPT_NAME), - }; - - // Create and/or write the temporary script file to the package directory - // using the script template string and placeholder variables - let script_contents = GO_MOD_SCRIPT_TMPL - .replace("__LOCAL_FILE_NAME__", &local_file_name.to_string_lossy()) - .replace("__MOD_DIR__", &mod_dir.to_string_lossy()) - .replace("__OUTPUT__", &output_path_arg.to_string_lossy()); - let script_path = format!( - "{}/{}", - package_dir.to_string_lossy(), - GO_MOD_DOCKER_SCRIPT_NAME - ); - - // Drop the reference after writing the file to avoid a "text busy" error - // when attempting to execute it. - { - let mut script_file = fs::File::create(&script_path) - .context(error::CreateFileSnafu { path: &script_path })?; - fs::set_permissions(&script_path, fs::Permissions::from_mode(0o777)) - .context(error::SetFilePermissionsSnafu { path: &script_path })?; - script_file - .write_all(script_contents.as_bytes()) - .context(error::WriteFileSnafu { path: &script_path })?; - } - - let res = docker_go(root_dir, &args); - fs::remove_file(&script_path).context(error::RemoveFileSnafu { path: &script_path })?; - res - } -} - -fn extract_file_name(url: &str) -> Result { - let parsed = reqwest::Url::parse(url).context(error::InputUrlSnafu { url })?; - let name = parsed - .path_segments() - .context(error::InputFileBadSnafu { path: url })? - .last() - .context(error::InputFileBadSnafu { path: url })?; - Ok(name.into()) -} - -struct DockerGoArgs<'a> { - module_path: &'a Path, - sdk_image: String, - go_mod_cache: &'a Path, - command: String, -} - -/// Run `docker-go` with the specified arguments. -fn docker_go(root_dir: &Path, dg_args: &DockerGoArgs) -> Result<()> { - let args = vec![ - "--module-path", - dg_args - .module_path - .to_str() - .context(error::InputFileSnafu)?, - "--sdk-image", - &dg_args.sdk_image, - "--go-mod-cache", - dg_args - .go_mod_cache - .to_str() - .context(error::InputFileSnafu)?, - "--command", - &dg_args.command, - ]; - let arg_string = args.join(" "); - let program = root_dir.join("tools/docker-go"); - println!("program: {}", program.to_string_lossy()); - let output = cmd(program, args) - .stderr_to_stdout() - .stdout_capture() - .unchecked() - .run() - .context(error::CommandStartSnafu)?; - - let stdout = String::from_utf8_lossy(&output.stdout); - println!("{}", &stdout); - ensure!( - output.status.success(), - error::DockerExecutionSnafu { args: arg_string } - ); - Ok(()) -} diff --git a/tools/buildsys/src/gomod/error.rs b/tools/buildsys/src/gomod/error.rs deleted file mode 100644 index 64d736d3170..00000000000 --- a/tools/buildsys/src/gomod/error.rs +++ /dev/null @@ -1,57 +0,0 @@ -use std::path::PathBuf; - -use snafu::Snafu; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -pub(crate) enum Error { - #[snafu(display("Failed to start command: {}", source))] - CommandStart { source: std::io::Error }, - - #[snafu(display("Failed to execute docker-go script. 'args: {}'", args))] - DockerExecution { args: String }, - - #[snafu(display("Input url is required"))] - InputFile, - - #[snafu(display("Input file {} must be a file", path.display()))] - InputFileBad { path: PathBuf }, - - #[snafu(display("Bad file url '{}': {}", url, source))] - InputUrl { - url: String, - source: url::ParseError, - }, - - #[snafu(display("Missing environment variable '{}'", var))] - Environment { - var: String, - source: std::env::VarError, - }, - - #[snafu(display("Failed to create '{}': {}", path.display(), source))] - CreateFile { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to set permissions on '{}': {}", path.display(), source))] - SetFilePermissions { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to write contents to '{}': {}", path.display(), source))] - WriteFile { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to remove '{}': {}", path.display(), source))] - RemoveFile { - path: PathBuf, - source: std::io::Error, - }, -} - -pub(super) type Result = std::result::Result; diff --git a/tools/buildsys/src/lib.rs b/tools/buildsys/src/lib.rs deleted file mode 100644 index 640fc648d2b..00000000000 --- a/tools/buildsys/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod manifest; diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs deleted file mode 100644 index 5ea01121f8d..00000000000 --- a/tools/buildsys/src/main.rs +++ /dev/null @@ -1,296 +0,0 @@ -/*! -This tool carries out a package or variant build using Docker. - -It is meant to be called by a Cargo build script. To keep those scripts simple, -all of the configuration is taken from the environment, with the build type -specified as a command line argument. - -The implementation is closely tied to the top-level Dockerfile. - -*/ -mod builder; -mod cache; -mod gomod; -mod project; -mod spec; - -use builder::{PackageBuilder, VariantBuilder}; -use buildsys::manifest::{BundleModule, ManifestInfo, SupportedArch}; -use cache::LookasideCache; -use gomod::GoMod; -use project::ProjectInfo; -use serde::Deserialize; -use snafu::{ensure, ResultExt}; -use spec::SpecInfo; -use std::env; -use std::path::PathBuf; -use std::process; - -mod error { - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(super) enum Error { - ManifestParse { - source: buildsys::manifest::Error, - }, - - SpecParse { - source: super::spec::error::Error, - }, - - ExternalFileFetch { - source: super::cache::error::Error, - }, - - GoMod { - source: super::gomod::error::Error, - }, - - ProjectCrawl { - source: super::project::error::Error, - }, - - BuildAttempt { - source: super::builder::error::Error, - }, - - #[snafu(display("Missing environment variable '{}'", var))] - Environment { - var: String, - source: std::env::VarError, - }, - - #[snafu(display("Unknown architecture: '{}'", arch))] - UnknownArch { - arch: String, - source: serde_plain::Error, - }, - - #[snafu(display( - "Unsupported architecture {}, this variant supports {}", - arch, - supported_arches.join(", ") - ))] - UnsupportedArch { - arch: String, - supported_arches: Vec, - }, - } -} - -type Result = std::result::Result; - -#[derive(Debug, Deserialize)] -#[serde(rename_all = "kebab-case")] -enum Command { - BuildPackage, - BuildVariant, -} - -fn usage() -> ! { - eprintln!( - "\ -USAGE: - buildsys - -SUBCOMMANDS: - build-package Build RPMs from a spec file and sources. - build-variant Build filesystem and disk images from RPMs." - ); - process::exit(1) -} - -// Returning a Result from main makes it print a Debug representation of the error, but with Snafu -// we have nice Display representations of the error, so we wrap "main" (run) and print any error. -// https://github.com/shepmaster/snafu/issues/110 -fn main() { - if let Err(e) = run() { - eprintln!("{}", e); - process::exit(1); - } -} - -fn run() -> Result<()> { - // Not actually redundant for a diverging function. - #[allow(clippy::redundant_closure)] - let command_str = std::env::args().nth(1).unwrap_or_else(|| usage()); - let command = serde_plain::from_str::(&command_str).unwrap_or_else(|_| usage()); - match command { - Command::BuildPackage => build_package()?, - Command::BuildVariant => build_variant()?, - } - Ok(()) -} - -fn build_package() -> Result<()> { - let manifest_file = "Cargo.toml"; - println!("cargo:rerun-if-changed={}", manifest_file); - - let root_dir: PathBuf = getenv("BUILDSYS_ROOT_DIR")?.into(); - let variant = getenv("BUILDSYS_VARIANT")?; - let variant_manifest_path = root_dir.join("variants").join(variant).join(manifest_file); - let variant_manifest = - ManifestInfo::new(variant_manifest_path).context(error::ManifestParseSnafu)?; - supported_arch(&variant_manifest)?; - let mut image_features = variant_manifest.image_features(); - - let manifest_dir: PathBuf = getenv("CARGO_MANIFEST_DIR")?.into(); - let manifest = - ManifestInfo::new(manifest_dir.join(manifest_file)).context(error::ManifestParseSnafu)?; - let package_features = manifest.package_features(); - - // For any package feature specified in the package manifest, track the corresponding - // environment variable for changes to the ambient set of image features for the current - // variant. - if let Some(package_features) = &package_features { - for package_feature in package_features { - println!( - "cargo:rerun-if-env-changed=BUILDSYS_VARIANT_IMAGE_FEATURE_{}", - package_feature - ); - } - } - - // Keep only the image features that the package has indicated that it tracks, if any. - if let Some(image_features) = &mut image_features { - match package_features { - Some(package_features) => image_features.retain(|k| package_features.contains(k)), - None => image_features.clear(), - } - } - - // If manifest has package.metadata.build-package.variant-sensitive set, then track the - // appropriate environment variable for changes. - if let Some(sensitivity) = manifest.variant_sensitive() { - use buildsys::manifest::{SensitivityType::*, VariantSensitivity::*}; - fn emit_variant_env(suffix: Option<&str>) { - if let Some(suffix) = suffix { - println!( - "cargo:rerun-if-env-changed=BUILDSYS_VARIANT_{}", - suffix.to_uppercase() - ); - } else { - println!("cargo:rerun-if-env-changed=BUILDSYS_VARIANT"); - } - } - match sensitivity { - Any(false) => (), - Any(true) => emit_variant_env(None), - Specific(Platform) => emit_variant_env(Some("platform")), - Specific(Runtime) => emit_variant_env(Some("runtime")), - Specific(Family) => emit_variant_env(Some("family")), - Specific(Flavor) => emit_variant_env(Some("flavor")), - } - } - - if let Some(files) = manifest.external_files() { - LookasideCache::fetch(files).context(error::ExternalFileFetchSnafu)?; - for f in files { - if f.bundle_modules.is_none() { - continue; - } - - for b in f.bundle_modules.as_ref().unwrap() { - match b { - BundleModule::Go => { - GoMod::vendor(&root_dir, &manifest_dir, f).context(error::GoModSnafu)? - } - } - } - } - } - - if let Some(groups) = manifest.source_groups() { - let var = "BUILDSYS_SOURCES_DIR"; - let root: PathBuf = getenv(var)?.into(); - println!("cargo:rerun-if-env-changed={}", var); - - let dirs = groups.iter().map(|d| root.join(d)).collect::>(); - let info = ProjectInfo::crawl(&dirs).context(error::ProjectCrawlSnafu)?; - for f in info.files { - println!("cargo:rerun-if-changed={}", f.display()); - } - } - - // Package developer can override name of package if desired, e.g. to name package with - // characters invalid in Cargo crate names - let package = if let Some(name_override) = manifest.package_name() { - name_override.clone() - } else { - getenv("CARGO_PKG_NAME")? - }; - let spec = format!("{}.spec", package); - println!("cargo:rerun-if-changed={}", spec); - - let info = SpecInfo::new(PathBuf::from(&spec)).context(error::SpecParseSnafu)?; - - for f in info.sources { - println!("cargo:rerun-if-changed={}", f.display()); - } - - for f in info.patches { - println!("cargo:rerun-if-changed={}", f.display()); - } - - PackageBuilder::build(&package, image_features).context(error::BuildAttemptSnafu)?; - - Ok(()) -} - -fn build_variant() -> Result<()> { - let manifest_dir: PathBuf = getenv("CARGO_MANIFEST_DIR")?.into(); - let manifest_file = "Cargo.toml"; - println!("cargo:rerun-if-changed={}", manifest_file); - - let manifest = - ManifestInfo::new(manifest_dir.join(manifest_file)).context(error::ManifestParseSnafu)?; - - supported_arch(&manifest)?; - - if let Some(packages) = manifest.included_packages() { - let image_format = manifest.image_format(); - let image_layout = manifest.image_layout(); - let kernel_parameters = manifest.kernel_parameters(); - let image_features = manifest.image_features(); - VariantBuilder::build( - packages, - image_format, - image_layout, - kernel_parameters, - image_features, - ) - .context(error::BuildAttemptSnafu)?; - } else { - println!("cargo:warning=No included packages in manifest. Skipping variant build."); - } - - Ok(()) -} - -/// Ensure that the current arch is supported by the current variant -fn supported_arch(manifest: &ManifestInfo) -> Result<()> { - if let Some(supported_arches) = manifest.supported_arches() { - let arch = getenv("BUILDSYS_ARCH")?; - let current_arch: SupportedArch = - serde_plain::from_str(&arch).context(error::UnknownArchSnafu { arch: &arch })?; - - ensure!( - supported_arches.contains(¤t_arch), - error::UnsupportedArchSnafu { - arch: &arch, - supported_arches: supported_arches - .iter() - .map(|a| a.to_string()) - .collect::>() - } - ) - } - Ok(()) -} - -/// Retrieve a variable that we expect to be set in the environment. -fn getenv(var: &str) -> Result { - env::var(var).context(error::EnvironmentSnafu { var }) -} diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs deleted file mode 100644 index 1df31abe820..00000000000 --- a/tools/buildsys/src/manifest.rs +++ /dev/null @@ -1,582 +0,0 @@ -/*! -# Build system metadata - -This module provides deserialization and convenience methods for build system -metadata located in `Cargo.toml`. - -Cargo ignores the `package.metadata` table in its manifest, so it can be used -to store configuration for other tools. We recognize the following keys. - -## Metadata for packages - -`source-groups` is a list of directories in the top-level `sources` directory, -each of which contains a set of related Rust projects. Changes to files in -these groups should trigger a rebuild. -```ignore -[package.metadata.build-package] -source-groups = ["api"] -``` - -`external-files` is a list of out-of-tree files that should be retrieved -as additional dependencies for the build. If the path for the external -file name is not provided, it will be taken from the last path component -of the URL. -```ignore -[[package.metadata.build-package.external-files]] -path = "foo" -url = "https://foo" -sha512 = "abcdef" - -[[package.metadata.build-package.external-files]] -path = "bar" -url = "https://bar" -sha512 = "123456" -``` - -The `bundle-*` keys on `external-files` are a group of optional modifiers -and are used to untar an upstream external file archive, vendor any dependent -code, and produce an additional archive with those dependencies. -Only `bundle-modules` is required when bundling an archive's dependences. - -`bundle-modules` is a list of module "paradigms" the external-file should -be vendored through. For example, if a project contains a `go.mod` and `go.sum` -file, adding "go" to the list will vendor the dependencies through go modules. -Currently, only "go" is supported. - -`bundle-root-path` is an optional argument that provides the filepath -within the archive that contains the module. By default, the first top level -directory in the archive is used. So, for example, given a Go project that has -the necessary `go.mod` and `go.sum` files in the archive located at the -filepath `a/b/c`, this `bundle-root-path` value should be "a/b/c". Or, given an -archive with a single directory that contains a Go project that has `go.mod` -and `go.sum` files located in that top level directory, this option may be -omitted since the single top-level directory will authomatically be used. - -`bundle-output-path` is an optional argument that provides the desired path of -the output archive. By default, this will use the name of the existing archive, -but prepended with "bundled-". For example, if "my-unique-archive-name.tar.gz" -is entered as the value for `bundle-output-path`, then the output directory -will be named `my-unique-archive-name.tar.gz`. Or, by default, given the name -of some upstream archive is "my-package.tar.gz", the output archive would be -named `bundled-my-package.tar.gz`. This output path may then be referenced -within an RPM spec or when creating a package in order to access the vendored -upstream dependencies during build time. -```ignore -[[package.metadata.build-package.external-files]] -path = "foo" -url = "https://foo" -sha512 = "abcdef" -bundle-modules = [ "go" ] -bundle-root-path = "path/to/module" -bundle-output-path = "path/to/output.tar.gz" -``` - -`package-name` lets you override the package name in Cargo.toml; this is useful -if you have a package with "." in its name, for example, which Cargo doesn't -allow. This means the directory name and spec file name can use your preferred -naming. -```ignore -[package.metadata.build-package] -package-name = "better.name" -``` - -`variant-sensitive` lets you specify whether the package should be rebuilt when -building a new variant, and defaults to false; set it to true if a package is -using the variant to affect its build process. - -```ignore -[package.metadata.build-package] -variant-sensitive = true -``` - -Some packages might only be sensitive to certain components of the variant -tuple, such as the platform, runtime, or family. The `variant-sensitive` field -can also take a string to indicate the source of the sensitivity. - -```ignore -[package.metadata.build-package] -# sensitive to platform, like "metal" or "aws" -variant-sensitive = "platform" - -# sensitive to runtime, like "k8s" or "ecs" -variant-sensitive = "runtime" - -# sensitive to family, like "metal-k8s" or "aws-ecs" -variant-sensitive = "family" -``` - -`package-features` is a list of image features that the package tracks. This is -useful when the way the package is built changes based on whether a particular -image feature is enabled for the current variant, rather than when the variant -tuple changes. - -```ignore -[package.metadata.build-package] -package-features = [ - "grub-set-private-var", -] -``` - -`releases-url` is ignored by buildsys, but can be used by packager maintainers -to indicate a good URL for checking whether the software has had a new release. -```ignore -[package.metadata.build-package] -releases-url = "https://www.example.com/releases" -``` - -## Metadata for variants - -`included-packages` is a list of packages that should be included in a variant. -```ignore -[package.metadata.build-variant] -included-packages = ["release"] -``` - -`image-format` is the desired format for the built images. -This can be `raw` (the default), `vmdk`, or `qcow2`. -```ignore -[package.metadata.build-variant] -image-format = "vmdk" -``` - -`image-layout` is the desired layout for the built images. - -`os-image-size-gib` is the desired size of the "os" disk image in GiB. -The specified size will be automatically divided into two banks, where each -bank contains the set of partitions needed for in-place upgrades. Roughly 40% -will be available for each root filesystem partition, with the rest allocated -to other essential system partitions. - -`data-image-size-gib` is the desired size of the "data" disk image in GiB. -The full size will be used for the single data partition, except for the 2 MiB -overhead for the GPT labels and partition alignment. The data partition will be -automatically resized to fill the disk on boot, so it is usually not necessary -to increase this value. - -`publish-image-size-hint-gib` is the desired size of the published image in GiB. -When the `split` layout is used, the "os" image volume will remain at the built -size, and any additional space will be allocated to the "data" image volume. -When the `unified` layout is used, this value will be used directly for the -single "os" image volume. The hint will be ignored if the combined size of the -"os" and "data" images exceeds the specified value. - -`partition-plan` is the desired strategy for image partitioning. -This can be `split` (the default) for "os" and "data" images backed by separate -volumes, or `unified` to have "os" and "data" share the same volume. -```ignore -[package.metadata.build-variant.image-layout] -os-image-size-gib = 2 -data-image-size-gib = 1 -publish-image-size-hint-gib = 22 -partition-plan = "split" -``` - -`supported-arches` is the list of architectures the variant is able to run on. -The values can be `x86_64` and `aarch64`. -If not specified, the variant can run on any of those architectures. -```ignore -[package.metadata.build-variant] -supported-arches = ["x86_64"] -``` - -`kernel-parameters` is a list of extra parameters to be added to the kernel command line. -The given parameters are inserted at the start of the command line. -```ignore -[package.metadata.build-variant] -kernel-parameters = [ - "console=ttyS42", -] - -`image-features` is a map of image feature flags, which can be enabled or disabled. This allows us -to conditionally use or exclude certain firmware-level features in variants. - -`grub-set-private-var` means that the grub image for the current variant includes the command to -find the BOTTLEROCKET_PRIVATE partition and set the appropriate `$private` variable for the grub -config file to consume. This feature flag is a prerequisite for Boot Config support. -```ignore -[package.metadata.build-variant.image-features] -grub-set-private-var = true -``` - -`systemd-networkd` uses the `systemd-networkd` network backend in place of `wicked`. This feature -flag is meant primarily for development, and will be removed when development has completed. -```ignore -[package.metadata.build-variant.image-features] -systemd-networkd = true -``` - -`unified-cgroup-hierarchy` makes systemd set up a unified cgroup hierarchy on -boot, i.e. the host will use cgroup v2 by default. This feature flag allows -old variants to continue booting with cgroup v1 and new variants to move to -cgroup v2, while users will still be able to override the default via command -line arguments set in the boot configuration. -```ignore -[package.metadata.build-variant.image-features] -unified-cgroup-hierarchy = true -``` - -`xfs-data-partition` changes the filesystem for the data partition from ext4 to xfs. The -default will remain ext4 and xfs is opt-in. - -```ignore -[package.metadata.build-variant.image-features] -xfs-data-partition = true -``` - -`uefi-secure-boot` means that the bootloader and kernel are signed. The grub image for the current -variant will have a public GPG baked in, and will expect the grub config file to have a valid -detached signature. Published artifacts such as AMIs and OVAs will enforce the signature checks -when the platform supports it. - -```ignore -[package.metadata.build-variant.image-features] -uefi-secure-boot = true -``` - -*/ - -mod error; - -use serde::Deserialize; -use snafu::{ResultExt, Snafu}; -use std::cmp::max; -use std::collections::{HashMap, HashSet}; -use std::convert::TryFrom; -use std::fmt::{self, Display}; -use std::fs; -use std::path::{Path, PathBuf}; - -#[derive(Debug, Snafu)] -pub struct Error(error::Error); -type Result = std::result::Result; - -/// The nested structures here are somewhat complex, but they make it trivial -/// to deserialize the structure we expect to find in the manifest. -#[derive(Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -pub struct ManifestInfo { - package: Package, -} - -impl ManifestInfo { - /// Extract the settings we understand from `Cargo.toml`. - pub fn new>(path: P) -> Result { - let path = path.as_ref(); - let manifest_data = - fs::read_to_string(path).context(error::ManifestFileReadSnafu { path })?; - let manifest = - toml::from_str(&manifest_data).context(error::ManifestFileLoadSnafu { path })?; - Ok(manifest) - } - - /// Convenience method to return the list of source groups. - pub fn source_groups(&self) -> Option<&Vec> { - self.build_package().and_then(|b| b.source_groups.as_ref()) - } - - /// Convenience method to return the list of external files. - pub fn external_files(&self) -> Option<&Vec> { - self.build_package().and_then(|b| b.external_files.as_ref()) - } - - /// Convenience method to return the package name override, if any. - pub fn package_name(&self) -> Option<&String> { - self.build_package().and_then(|b| b.package_name.as_ref()) - } - - /// Convenience method to find whether the package is sensitive to variant changes. - pub fn variant_sensitive(&self) -> Option<&VariantSensitivity> { - self.build_package() - .and_then(|b| b.variant_sensitive.as_ref()) - } - - /// Convenience method to return the image features tracked by this package. - pub fn package_features(&self) -> Option> { - self.build_package() - .and_then(|b| b.package_features.as_ref().map(|m| m.iter().collect())) - } - - /// Convenience method to return the list of included packages. - pub fn included_packages(&self) -> Option<&Vec> { - self.build_variant() - .and_then(|b| b.included_packages.as_ref()) - } - - /// Convenience method to return the image format override, if any. - pub fn image_format(&self) -> Option<&ImageFormat> { - self.build_variant().and_then(|b| b.image_format.as_ref()) - } - - /// Convenience method to return the image layout, if specified. - pub fn image_layout(&self) -> Option<&ImageLayout> { - self.build_variant().map(|b| &b.image_layout) - } - - /// Convenience method to return the supported architectures for this variant. - pub fn supported_arches(&self) -> Option<&HashSet> { - self.build_variant() - .and_then(|b| b.supported_arches.as_ref()) - } - - /// Convenience method to return the kernel parameters for this variant. - pub fn kernel_parameters(&self) -> Option<&Vec> { - self.build_variant() - .and_then(|b| b.kernel_parameters.as_ref()) - } - - /// Convenience method to return the enabled image features for this variant. - pub fn image_features(&self) -> Option> { - self.build_variant().and_then(|b| { - b.image_features - .as_ref() - .map(|m| m.iter().filter(|(_k, v)| **v).map(|(k, _v)| k).collect()) - }) - } - - /// Helper methods to navigate the series of optional struct fields. - fn build_package(&self) -> Option<&BuildPackage> { - self.package - .metadata - .as_ref() - .and_then(|m| m.build_package.as_ref()) - } - - fn build_variant(&self) -> Option<&BuildVariant> { - self.package - .metadata - .as_ref() - .and_then(|m| m.build_variant.as_ref()) - } -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -struct Package { - metadata: Option, -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -struct Metadata { - build_package: Option, - build_variant: Option, -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -#[allow(dead_code)] -pub struct BuildPackage { - pub external_files: Option>, - pub package_name: Option, - pub releases_url: Option, - pub source_groups: Option>, - pub variant_sensitive: Option, - pub package_features: Option>, -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -#[serde(untagged)] -pub enum VariantSensitivity { - Any(bool), - Specific(SensitivityType), -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -pub enum SensitivityType { - Platform, - Runtime, - Family, - Flavor, -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -pub struct BuildVariant { - pub included_packages: Option>, - pub image_format: Option, - #[serde(default)] - pub image_layout: ImageLayout, - pub supported_arches: Option>, - pub kernel_parameters: Option>, - pub image_features: Option>, -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "lowercase")] -pub enum ImageFormat { - Qcow2, - Raw, - Vmdk, -} - -#[derive(Deserialize, Debug, Copy, Clone)] -/// Constrain specified image sizes to a plausible range, from 0 - 65535 GiB. -pub struct ImageSize(u16); - -impl Display for ImageSize { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -#[derive(Deserialize, Debug, Copy, Clone)] -#[serde(rename_all = "kebab-case")] -pub struct ImageLayout { - #[serde(default = "ImageLayout::default_os_image_size_gib")] - pub os_image_size_gib: ImageSize, - #[serde(default = "ImageLayout::default_data_image_size_gib")] - pub data_image_size_gib: ImageSize, - #[serde(default = "ImageLayout::default_publish_image_size_hint_gib")] - publish_image_size_hint_gib: ImageSize, - #[serde(default = "ImageLayout::default_partition_plan")] - pub partition_plan: PartitionPlan, -} - -/// These are the historical defaults for all variants, before we added support -/// for customizing these properties. -static DEFAULT_OS_IMAGE_SIZE_GIB: ImageSize = ImageSize(2); -static DEFAULT_DATA_IMAGE_SIZE_GIB: ImageSize = ImageSize(1); -static DEFAULT_PUBLISH_IMAGE_SIZE_HINT_GIB: ImageSize = ImageSize(22); -static DEFAULT_PARTITION_PLAN: PartitionPlan = PartitionPlan::Split; - -impl ImageLayout { - fn default_os_image_size_gib() -> ImageSize { - DEFAULT_OS_IMAGE_SIZE_GIB - } - - fn default_data_image_size_gib() -> ImageSize { - DEFAULT_DATA_IMAGE_SIZE_GIB - } - - fn default_publish_image_size_hint_gib() -> ImageSize { - DEFAULT_PUBLISH_IMAGE_SIZE_HINT_GIB - } - - fn default_partition_plan() -> PartitionPlan { - DEFAULT_PARTITION_PLAN - } - - // At publish time we will need specific sizes for the OS image and the (optional) data image. - // The sizes returned by this function depend on the image layout, and whether the publish - // image hint is larger than the required minimum size. - pub fn publish_image_sizes_gib(&self) -> (i32, i32) { - let os_image_base_size_gib = self.os_image_size_gib.0; - let data_image_base_size_gib = self.data_image_size_gib.0; - let publish_image_size_hint_gib = self.publish_image_size_hint_gib.0; - - let min_publish_image_size_gib = os_image_base_size_gib + data_image_base_size_gib; - let publish_image_size_gib = max(publish_image_size_hint_gib, min_publish_image_size_gib); - - match self.partition_plan { - PartitionPlan::Split => { - let os_image_publish_size_gib = os_image_base_size_gib; - let data_image_publish_size_gib = publish_image_size_gib - os_image_base_size_gib; - ( - os_image_publish_size_gib.into(), - data_image_publish_size_gib.into(), - ) - } - PartitionPlan::Unified => (publish_image_size_gib.into(), -1), - } - } -} - -impl Default for ImageLayout { - fn default() -> Self { - Self { - os_image_size_gib: Self::default_os_image_size_gib(), - data_image_size_gib: Self::default_data_image_size_gib(), - publish_image_size_hint_gib: Self::default_publish_image_size_hint_gib(), - partition_plan: Self::default_partition_plan(), - } - } -} - -#[derive(Deserialize, Debug, Copy, Clone)] -#[serde(rename_all = "lowercase")] -pub enum PartitionPlan { - Split, - Unified, -} - -#[derive(Deserialize, Debug, PartialEq, Eq, Hash)] -#[serde(rename_all = "lowercase")] -pub enum SupportedArch { - X86_64, - Aarch64, -} - -/// Map a Linux architecture into the corresponding Docker architecture. -impl SupportedArch { - pub fn goarch(&self) -> &'static str { - match self { - SupportedArch::X86_64 => "amd64", - SupportedArch::Aarch64 => "arm64", - } - } -} - -#[derive(Deserialize, Debug, PartialEq, Eq, Hash)] -#[serde(try_from = "String")] -pub enum ImageFeature { - GrubSetPrivateVar, - SystemdNetworkd, - UnifiedCgroupHierarchy, - XfsDataPartition, - UefiSecureBoot, -} - -impl TryFrom for ImageFeature { - type Error = Error; - fn try_from(s: String) -> Result { - match s.as_str() { - "grub-set-private-var" => Ok(ImageFeature::GrubSetPrivateVar), - "systemd-networkd" => Ok(ImageFeature::SystemdNetworkd), - "unified-cgroup-hierarchy" => Ok(ImageFeature::UnifiedCgroupHierarchy), - "xfs-data-partition" => Ok(ImageFeature::XfsDataPartition), - "uefi-secure-boot" => Ok(ImageFeature::UefiSecureBoot), - _ => error::ParseImageFeatureSnafu { what: s }.fail()?, - } - } -} - -impl fmt::Display for ImageFeature { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - ImageFeature::GrubSetPrivateVar => write!(f, "GRUB_SET_PRIVATE_VAR"), - ImageFeature::SystemdNetworkd => write!(f, "SYSTEMD_NETWORKD"), - ImageFeature::UnifiedCgroupHierarchy => write!(f, "UNIFIED_CGROUP_HIERARCHY"), - ImageFeature::XfsDataPartition => write!(f, "XFS_DATA_PARTITION"), - ImageFeature::UefiSecureBoot => write!(f, "UEFI_SECURE_BOOT"), - } - } -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "lowercase")] -pub enum BundleModule { - Go, -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -pub struct ExternalFile { - pub path: Option, - pub sha512: String, - pub url: String, - pub force_upstream: Option, - pub bundle_modules: Option>, - pub bundle_root_path: Option, - pub bundle_output_path: Option, -} - -impl fmt::Display for SupportedArch { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - SupportedArch::X86_64 => write!(f, "x86_64"), - SupportedArch::Aarch64 => write!(f, "aarch64"), - } - } -} diff --git a/tools/buildsys/src/manifest/error.rs b/tools/buildsys/src/manifest/error.rs deleted file mode 100644 index 788cbb1a56c..00000000000 --- a/tools/buildsys/src/manifest/error.rs +++ /dev/null @@ -1,22 +0,0 @@ -use snafu::Snafu; -use std::io; -use std::path::PathBuf; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -pub(super) enum Error { - #[snafu(display("Failed to read manifest file '{}': {}", path.display(), source))] - ManifestFileRead { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to load manifest file '{}': {}", path.display(), source))] - ManifestFileLoad { - path: PathBuf, - source: toml::de::Error, - }, - - #[snafu(display("Failed to parse image feature '{}'", what))] - ParseImageFeature { what: String }, - - #[snafu(display("Invalid image size {}; must be between 1 and 1024", value))] - InvalidImageSize { value: i32 }, -} diff --git a/tools/buildsys/src/project.rs b/tools/buildsys/src/project.rs deleted file mode 100644 index 08b5d4ff46d..00000000000 --- a/tools/buildsys/src/project.rs +++ /dev/null @@ -1,51 +0,0 @@ -/*! -This module handles iterating through project directories to discover source -files that should be passed to Cargo to watch for changes. - -For now, it's a thin wrapper around `walkdir` with a filter applied to ignore -files that shouldn't trigger rebuilds. - -*/ -pub(crate) mod error; -use error::Result; - -use snafu::ResultExt; -use std::path::{Path, PathBuf}; -use walkdir::{DirEntry, WalkDir}; - -pub(crate) struct ProjectInfo { - pub(crate) files: Vec, -} - -impl ProjectInfo { - /// Traverse the list of directories and produce a list of files to track. - pub(crate) fn crawl>(dirs: &[P]) -> Result { - let mut files = Vec::new(); - - for dir in dirs { - let walker = WalkDir::new(dir) - .follow_links(false) - .same_file_system(true) - .into_iter(); - - files.extend( - walker - .filter_entry(|e| !Self::ignored(e)) - .flat_map(|e| e.context(error::DirectoryWalkSnafu)) - .map(|e| e.into_path()) - .filter(|e| e.is_file()), - ); - } - - Ok(ProjectInfo { files }) - } - - /// Exclude hidden files and build artifacts from the list. - fn ignored(entry: &DirEntry) -> bool { - entry - .file_name() - .to_str() - .map(|s| s.starts_with('.') || s == "target" || s == "vendor" || s == "README.md") - .unwrap_or(false) - } -} diff --git a/tools/buildsys/src/project/error.rs b/tools/buildsys/src/project/error.rs deleted file mode 100644 index 03502682426..00000000000 --- a/tools/buildsys/src/project/error.rs +++ /dev/null @@ -1,10 +0,0 @@ -use snafu::Snafu; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -pub(crate) enum Error { - #[snafu(display("Failed to walk directory to find project files: {}", source))] - DirectoryWalk { source: walkdir::Error }, -} - -pub(super) type Result = std::result::Result; diff --git a/tools/buildsys/src/spec.rs b/tools/buildsys/src/spec.rs deleted file mode 100644 index 946b97f7a08..00000000000 --- a/tools/buildsys/src/spec.rs +++ /dev/null @@ -1,74 +0,0 @@ -/*! -This module provides a very simple parser for RPM spec files. - -It does not attempt to expand macros or perform any meaningful validation. Its -only purpose is to extract Source and Patch declarations so they can be passed -to Cargo as files to watch for changes. - -*/ -pub(crate) mod error; -use error::Result; - -use snafu::ResultExt; -use std::collections::VecDeque; -use std::fs::File; -use std::io::{BufRead, BufReader}; -use std::path::{Path, PathBuf}; - -pub(crate) struct SpecInfo { - pub(crate) sources: Vec, - pub(crate) patches: Vec, -} - -impl SpecInfo { - /// Returns a list of 'Source' and 'Patch' lines found in a spec file. - pub(crate) fn new>(path: P) -> Result { - let (sources, patches) = Self::parse(path)?; - let sources = Self::filter(&sources); - let patches = Self::filter(&patches); - Ok(Self { sources, patches }) - } - - /// "Parse" a spec file, extracting values of potential interest. - fn parse>(path: P) -> Result<(Vec, Vec)> { - let path = path.as_ref(); - let f = File::open(path).context(error::SpecFileReadSnafu { path })?; - let f = BufReader::new(f); - - let mut sources = Vec::new(); - let mut patches = Vec::new(); - - for line in f.lines() { - let line = line.context(error::SpecFileReadSnafu { path })?; - - let mut tokens = line.split_whitespace().collect::>(); - if let Some(t) = tokens.pop_front() { - if t.starts_with("Source") { - if let Some(s) = tokens.pop_front() { - sources.push(s.into()); - } - } else if t.starts_with("Patch") { - if let Some(p) = tokens.pop_front() { - patches.push(p.into()); - } - } - } - } - - Ok((sources, patches)) - } - - /// Emitting a non-existent file for `rerun-if-changed` will cause Cargo - /// to always repeat the build. Therefore we exclude "files" that do not - /// exist or that point outside the package directory. We also exclude - /// anything that appears to be an unexpanded macro. - fn filter(input: &[String]) -> Vec { - input - .iter() - .filter(|s| !s.contains("%{")) - .map(PathBuf::from) - .filter(|p| p.components().count() == 1) - .filter(|p| p.file_name().is_some()) - .collect() - } -} diff --git a/tools/buildsys/src/spec/error.rs b/tools/buildsys/src/spec/error.rs deleted file mode 100644 index 969ccf32d7e..00000000000 --- a/tools/buildsys/src/spec/error.rs +++ /dev/null @@ -1,12 +0,0 @@ -use snafu::Snafu; -use std::io; -use std::path::PathBuf; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -pub(crate) enum Error { - #[snafu(display("Failed to read spec file '{}': {}", path.display(), source))] - SpecFileRead { path: PathBuf, source: io::Error }, -} - -pub(super) type Result = std::result::Result; diff --git a/tools/deny.toml b/tools/deny.toml deleted file mode 100644 index 584d80be901..00000000000 --- a/tools/deny.toml +++ /dev/null @@ -1,102 +0,0 @@ -[licenses] -unlicensed = "deny" - -# Deny licenses unless they are specifically listed here -copyleft = "deny" -allow-osi-fsf-free = "neither" -default = "deny" - -# We want really high confidence when inferring licenses from text -confidence-threshold = 0.93 - -# Commented license types are allowed but not currently used -allow = [ - "Apache-2.0", - "BSD-2-Clause", - "BSD-3-Clause", - "BSL-1.0", - # "CC0-1.0", - "ISC", - "MIT", - "OpenSSL", - "Unlicense", - "Zlib", -] - -exceptions = [ - { name = "webpki-roots", allow = ["MPL-2.0"], version = "*" }, - { name = "unicode-ident", version = "1.0.4", allow = ["MIT", "Apache-2.0", "Unicode-DFS-2016"] }, -] - -# https://github.com/hsivonen/encoding_rs The non-test code that isn't generated from the WHATWG data in this crate is -# under Apache-2.0 OR MIT. Test code is under CC0. -[[licenses.clarify]] -name = "encoding_rs" -version = "0.8.30" -expression = "(Apache-2.0 OR MIT) AND BSD-3-Clause" -license-files = [ - { path = "COPYRIGHT", hash = 0x39f8ad31 } -] - -[[licenses.clarify]] -name = "ring" -expression = "MIT AND ISC AND OpenSSL" -license-files = [ - { path = "LICENSE", hash = 0xbd0eed23 }, -] - -[[licenses.clarify]] -name = "webpki" -expression = "ISC" -license-files = [ - { path = "LICENSE", hash = 0x001c7e6c }, -] - -[[licenses.clarify]] -name = "rustls-webpki" -expression = "ISC" -license-files = [ - { path = "LICENSE", hash = 0x001c7e6c }, -] - -[bans] -# Deny multiple versions or wildcard dependencies. -multiple-versions = "deny" -wildcards = "deny" - -skip = [ - # several dependencies are using multiple versions of base64 - { name = "base64" }, - # several dependencies are using an old version of bitflags - { name = "bitflags", version = "=1.3" }, - # several dependencies are using an old version of serde_yaml - { name = "serde_yaml", version = "=0.8" }, - # governor uses an old version of wasi - { name = "wasi", version = "=0.10.2" }, - # aws-sdk-rust is using an old version of fastrand - { name = "fastrand", version = "=1.9" }, - # aws-sdk-rust is using an old version of rustls, hyper-rustls, and tokio-rustls - { name = "rustls", version = "=0.20" }, - { name = "hyper-rustls", version = "=0.23" }, - { name = "tokio-rustls", version = "=0.23" }, - # kube-client uses an old version of redox_syscall - { name = "redox_syscall", version = "=0.2" }, -] - -skip-tree = [ - # windows-sys is not a direct dependency. mio and schannel - # are using different versions of windows-sys. we skip the - # dependency tree because windows-sys has many sub-crates - # that differ in major version. - { name = "windows-sys" }, - # generate-readme uses an old version of clap and other dependencies - { name = "generate-readme", version = "0.1.0" } -] - -[sources] -allow-git = [ - "https://github.com/bottlerocket-os/bottlerocket-test-system", -] -# Deny crates from unknown registries or git repositories. -unknown-registry = "deny" -unknown-git = "deny" diff --git a/tools/docker-go b/tools/docker-go deleted file mode 100755 index 50915d987b4..00000000000 --- a/tools/docker-go +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env bash - -# Helper script for running commands in a golang build/runtime environment for testing/vendoring/building a go module - -set -e -o pipefail - -usage() { - cat >&2 < - --go-version - --go-mod-cache - --command "" -Runs - -Required: - --module-path The path of the Go module to mount into the container - --sdk-image Name of the SDK image to use - --go-mod-cache The Go module cache path to mount into the container - --command The command to run in the SDK container -EOF -} - -required_arg() { - local arg="${1:?}" - local value="${2}" - if [ -z "${value}" ]; then - echo "ERROR: ${arg} is required" >&2 - exit 2 - fi -} - -# shellcheck disable=SC2124 # TODO: improve command interface (#2534) -parse_args() { - while [ ${#} -gt 0 ] ; do - case "${1}" in - --help ) usage; exit 0 ;; - --module-path ) shift; GO_MODULE_PATH="${1}" ;; - --sdk-image ) shift; SDK_IMAGE="${1}" ;; - --go-mod-cache ) shift; GO_MOD_CACHE="${1}" ;; - --command ) shift; COMMAND="${@:1}" ;; - *) ;; - esac - shift - done - - # Required arguments - required_arg "--module-path" "${GO_MODULE_PATH}" - required_arg "--sdk-image" "${SDK_IMAGE}" - required_arg "--go-mod-cache" "${GO_MOD_CACHE}" - required_arg "--command" "${COMMAND}" -} - -# We need to mount the ../.. parent of GO_MOD_CACHE -GOPATH=$(cd "${GO_MOD_CACHE}/../.." && pwd) - -DOCKER_RUN_ARGS="--network=host" - -parse_args "${@}" - -# Pass through relevant Go variables, from the config or environment. -go_env=( ) -for i in GOPROXY GONOPROXY GOPRIVATE GOSUMDB ; do - if command -v go >/dev/null 2>&1 ; then - govar="$(go env ${i})" - if [ -n "${govar}" ] ; then - go_env[${#go_env[@]}]="--env=${i}=${govar}" - fi - elif [ -n "${!i}" ] ; then - go_env[${#go_env[@]}]="--env=${i}=${!i}" - fi -done - -# Go accepts both lower and uppercase proxy variables, pass both through. -proxy_env=( ) -for i in http_proxy https_proxy no_proxy HTTP_PROXY HTTPS_PROXY NO_PROXY ; do - if [ -n "${!i}" ]; then - proxy_env[${#proxy_env[@]}]="--env=$i=${!i}" - fi -done - -docker run --rm \ - -e GOCACHE='/tmp/.cache' \ - -e GOPATH="${GOPATH}" \ - "${go_env[@]}" \ - "${proxy_env[@]}" \ - --user "$(id -u):$(id -g)" \ - --security-opt="label=disable" \ - ${DOCKER_RUN_ARGS} \ - -v "${GOPATH}":"${GOPATH}" \ - -v "${GO_MODULE_PATH}":"${GO_MODULE_PATH}" \ - -w "${GO_MODULE_PATH}" \ - "${SDK_IMAGE}" \ - bash -c "${COMMAND}" diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml deleted file mode 100644 index 8579f62eee9..00000000000 --- a/tools/infrasys/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "infrasys" -version = "0.1.0" -license = "Apache-2.0 OR MIT" -authors = ["Aashna Sheth "] -edition = "2021" -publish = false - -[dependencies] -async-trait = "0.1" -clap = { version = "4", features = ["derive"] } -hex = "0.4" -log = "0.4" -pubsys-config = { path = "../pubsys-config/", version = "0.1" } -aws-config = "0.55" -aws-types = "0.55" -aws-sdk-cloudformation = "0.28" -aws-sdk-s3 = "0.28" -serde_json = "1" -serde_yaml = "0.9" -sha2 = "0.10" -shell-words = "1" -simplelog = "0.12" -snafu = "0.7" -tokio = { version = "1", default-features = false, features = ["macros", "rt-multi-thread"] } -url = "2" - -[dev-dependencies] -assert-json-diff = "2" diff --git a/tools/infrasys/cloudformation-templates/kms_key_setup.yml b/tools/infrasys/cloudformation-templates/kms_key_setup.yml deleted file mode 100644 index 385174526be..00000000000 --- a/tools/infrasys/cloudformation-templates/kms_key_setup.yml +++ /dev/null @@ -1,30 +0,0 @@ -Parameters: - Alias: - Description: "Required. Alias for KMS key to be created" - Type: String - -Resources: - KMSKey: - Type: AWS::KMS::Key - Properties: - KeySpec: RSA_3072 - KeyUsage: SIGN_VERIFY - KeyPolicy: - Statement: - - Effect: Allow - Principal: - AWS: !Sub "arn:aws:iam::${AWS::AccountId}:root" - Action: "kms:*" - Resource: "*" - - KMSKeyAlias: - Type: AWS::KMS::Alias - DependsOn: - - KMSKey - Properties: - AliasName: !Sub "alias/${Alias}" - TargetKeyId: !Ref KMSKey - -Outputs: - KeyId: - Value: !GetAtt KMSKey.Arn diff --git a/tools/infrasys/cloudformation-templates/s3_setup.yml b/tools/infrasys/cloudformation-templates/s3_setup.yml deleted file mode 100644 index 31b4e9fe35d..00000000000 --- a/tools/infrasys/cloudformation-templates/s3_setup.yml +++ /dev/null @@ -1,25 +0,0 @@ -Resources: - TUFRepoBucket: - Type: AWS::S3::Bucket - DeletionPolicy: Retain - Properties: - VersioningConfiguration: - Status: Enabled - AccessControl: LogDeliveryWrite - MetricsConfigurations: - - Id: BucketMetrics - BucketEncryption: - ServerSideEncryptionConfiguration: - - ServerSideEncryptionByDefault: - SSEAlgorithm: AES256 - PublicAccessBlockConfiguration: - BlockPublicAcls: True - BlockPublicPolicy: True - IgnorePublicAcls: True - RestrictPublicBuckets: True - -Outputs: - BucketName: - Value: !Ref TUFRepoBucket - RDN: - Value: !GetAtt TUFRepoBucket.RegionalDomainName diff --git a/tools/infrasys/src/error.rs b/tools/infrasys/src/error.rs deleted file mode 100644 index 1a3b668b103..00000000000 --- a/tools/infrasys/src/error.rs +++ /dev/null @@ -1,169 +0,0 @@ -use aws_sdk_s3::error::SdkError; -use snafu::Snafu; -use std::io; -use std::path::PathBuf; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -pub enum Error { - #[snafu(display( - "Failed to create CFN stack '{}' in '{}': {}", - stack_name, - region, - source - ))] - CreateStack { - stack_name: String, - region: String, - source: SdkError, - }, - - #[snafu(display( - "Received CREATE_FAILED status for CFN stack '{}' in '{}'", - stack_name, - region - ))] - CreateStackFailure { stack_name: String, region: String }, - - #[snafu(display("Error splitting shell command '{}': {}", command, source))] - CommandSplit { - command: String, - source: shell_words::ParseError, - }, - - #[snafu(display("Error reading Infra.toml: {}", source))] - Config { source: pubsys_config::Error }, - - #[snafu(display( - "Stuck in indefinite CREATE_IN_PROGRESS loop for CFN stack '{}' in '{}'", - stack_name, - region - ))] - CreateStackTimeout { stack_name: String, region: String }, - - #[snafu(display("No stack data returned for CFN stack '{}' in {}", stack_name, region))] - MissingStack { stack_name: String, region: String }, - - #[snafu(display( - "Failed to fetch stack details for CFN stack '{}' in '{}': {}", - stack_name, - region, - source - ))] - DescribeStack { - stack_name: String, - region: String, - source: SdkError, - }, - - #[snafu(display("Missing environment variable '{}'", var))] - Environment { - var: String, - source: std::env::VarError, - }, - - #[snafu(display("File already exists at '{}'", path.display()))] - FileExists { path: PathBuf }, - - #[snafu(display("Failed to open file at '{}': {}", path.display(), source))] - FileOpen { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to read file at '{}': {}", path.display(), source))] - FileRead { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to write file at '{}': {}", path.display(), source))] - FileWrite { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to get bucket policy statement for bucket '{}'", bucket_name))] - GetPolicyStatement { bucket_name: String }, - - #[snafu(display("Failed to convert '{}' to yaml: {}", what, source))] - InvalidJson { - what: String, - source: serde_json::Error, - }, - - #[snafu(display("Invalid path '{}' for '{}'", path.display(), thing))] - InvalidPath { path: PathBuf, thing: String }, - - #[snafu(display("Publication/Root key threshold must be <= {}, currently {}", num_keys.to_string(), threshold))] - InvalidThreshold { threshold: String, num_keys: usize }, - - #[snafu(display("Failed to convert updated Infra.toml information to yaml: {}", source))] - InvalidYaml { source: serde_yaml::Error }, - - #[snafu(display( - "Failed to create keys due to invalid key config. Missing '{}'.", - missing - ))] - KeyConfig { missing: String }, - - #[snafu(display( - "Failed to create new keys or access pre-existing keys in available_keys list." - ))] - KeyCreation, - - #[snafu(display("Logger setup error: {}", source))] - Logger { source: log::SetLoggerError }, - - #[snafu(display("Infra.toml is missing '{}'", missing))] - MissingConfig { missing: String }, - - #[snafu(display("Failed to create directory '{}': {}", path.display(), source))] - Mkdir { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to get parent of path '{}'", path.display()))] - Parent { path: PathBuf }, - - #[snafu(display("Failed to parse '{}' to int: {}", what, source))] - ParseInt { - what: String, - source: std::num::ParseIntError, - }, - - #[snafu(display("Failed to find default region"))] - DefaultRegion, - - #[snafu(display("Unable to parse stack status"))] - ParseStatus, - - #[snafu(display( - "Failed to find field '{}' after attempting to create resource '{}'", - what, - resource_name - ))] - ParseResponse { what: String, resource_name: String }, - - #[snafu(display("Failed to convert '{}' to URL: {}", input, source))] - ParseUrl { - input: String, - source: url::ParseError, - }, - - #[snafu(display("Failed to push object to bucket '{}': {}", bucket_name, source))] - PutObject { - bucket_name: String, - source: SdkError, - }, - - #[snafu(display( - "Failed to update bucket policy for bucket '{}': {}", - bucket_name, - source - ))] - PutPolicy { - bucket_name: String, - source: SdkError, - }, - - #[snafu(display("Failed to create async runtime: {}", source))] - Runtime { source: std::io::Error }, - - #[snafu(display("'tuftool {}' returned {}", command, code))] - TuftoolResult { command: String, code: String }, - - #[snafu(display("Failed to start tuftool: {}", source))] - TuftoolSpawn { source: io::Error }, -} - -pub type Result = std::result::Result; diff --git a/tools/infrasys/src/keys.rs b/tools/infrasys/src/keys.rs deleted file mode 100644 index a00283c2537..00000000000 --- a/tools/infrasys/src/keys.rs +++ /dev/null @@ -1,150 +0,0 @@ -use async_trait::async_trait; -use aws_sdk_cloudformation::Client as CloudFormationClient; -use aws_types::region::Region; -use pubsys_config::{KMSKeyConfig, SigningKeyConfig}; -use snafu::{OptionExt, ResultExt}; -use std::fs; - -use super::{error, shared, Result}; - -/// Creates keys using data stored in SigningKeyConfig enum -/// Output: Edits KMSConfig fields in place after creating new keys -pub async fn create_keys(signing_key_config: &mut SigningKeyConfig) -> Result<()> { - // An extra check even through these parameters are checked earlier in main.rs - check_signing_key_config(signing_key_config)?; - match signing_key_config { - SigningKeyConfig::file { .. } => (), - SigningKeyConfig::kms { config, .. } => { - config - .as_mut() - .context(error::MissingConfigSnafu { - missing: "config field for a kms key", - })? - .create_kms_keys() - .await?; - } - SigningKeyConfig::ssm { .. } => (), - } - Ok(()) -} - -pub fn check_signing_key_config(signing_key_config: &SigningKeyConfig) -> Result<()> { - match signing_key_config { - SigningKeyConfig::file { .. } => (), - SigningKeyConfig::kms { config, .. } => { - let config = config.as_ref().context(error::MissingConfigSnafu { - missing: "config field for kms keys", - })?; - - match ( - config.available_keys.is_empty(), - config.regions.is_empty(), - config.key_alias.as_ref(), - ) { - // everything is unspecified (no way to allocate a key_id) - (true, true, None) => error::KeyConfigSnafu { - missing: "an available_key or region/key_alias", - } - .fail()?, - // regions is populated, but no key alias - // (it doesn't matter if available keys are listed or not) - (_, false, None) => error::KeyConfigSnafu { - missing: "key_alias", - } - .fail()?, - // key alias is populated, but no key regions to create keys in - // (it doesn't matter if available keys are listed or not) - (_, true, Some(..)) => error::KeyConfigSnafu { missing: "region" }.fail()?, - _ => (), - }; - } - SigningKeyConfig::ssm { .. } => (), - } - Ok(()) -} - -/// Must create a trait because can't directly implement a method for an struct in an -/// external crate like KMSKeyConfig (which lives in pubsys-config/lib.rs) -#[async_trait] -trait KMSKeyConfigExt { - async fn create_kms_keys(&mut self) -> Result<()>; -} - -/// Creates new KMS keys using cloudformation in regions specified -/// Input Conditions: Alias+Region or AvailableKeys must be specified -/// Output: Populates KMSKeyConfig with information about resources created -/// 'available-keys' starts as a map of pre-existing keyids:regions and will end as a -/// map of pre-existing and generated keyids:regions, -/// 'key-stack-arns' starts empty and will end as a -/// map of keyids:stackarn if new keys are created -#[async_trait] -impl KMSKeyConfigExt for KMSKeyConfig { - async fn create_kms_keys(&mut self) -> Result<()> { - // Generating new keys (if regions is non-empty) - for region in self.regions.iter() { - let stack_name = format!( - "TUF-KMS-{}", - self.key_alias.as_ref().context(error::KeyConfigSnafu { - missing: "key_alias", - })? - ); - - let config = aws_config::from_env() - .region(Region::new(region.to_owned())) - .load() - .await; - let cfn_client = CloudFormationClient::new(&config); - - let cfn_filepath = format!( - "{}/infrasys/cloudformation-templates/kms_key_setup.yml", - shared::getenv("BUILDSYS_TOOLS_DIR")? - ); - let cfn_template = fs::read_to_string(&cfn_filepath) - .context(error::FileReadSnafu { path: cfn_filepath })?; - - let stack_result = cfn_client - .create_stack() - .parameters(shared::create_parameter( - "Alias".to_string(), - self.key_alias - .as_ref() - .context(error::KeyConfigSnafu { - missing: "key_alias", - })? - .to_string(), - )) - .stack_name(stack_name.clone()) - .template_body(cfn_template.clone()) - .send() - .await - .context(error::CreateStackSnafu { - stack_name: &stack_name, - region, - })?; - - let stack_arn = stack_result - .clone() - .stack_id - .context(error::ParseResponseSnafu { - what: "stack_id", - resource_name: &stack_name, - })?; - - let output_array = shared::get_stack_outputs(&cfn_client, &stack_name, region).await?; - let key_id = - output_array[0] - .output_value - .as_ref() - .context(error::ParseResponseSnafu { - what: "outputs[0].output_value (key id)", - resource_name: stack_name, - })?; - self.available_keys - .insert(key_id.to_string(), region.to_string()); - self.key_stack_arns - .insert(key_id.to_string(), stack_arn.to_string()); - } - - Ok(()) - } -} diff --git a/tools/infrasys/src/main.rs b/tools/infrasys/src/main.rs deleted file mode 100644 index 7fa8ce815ae..00000000000 --- a/tools/infrasys/src/main.rs +++ /dev/null @@ -1,361 +0,0 @@ -mod error; -mod keys; -mod root; -mod s3; -mod shared; - -use aws_sdk_cloudformation::config::Region; -use clap::Parser; -use error::Result; -use log::{error, info}; -use pubsys_config::{InfraConfig, RepoConfig, S3Config, SigningKeyConfig}; -use sha2::{Digest, Sha512}; -use shared::KeyRole; -use simplelog::{CombinedLogger, Config as LogConfig, ConfigBuilder, LevelFilter, SimpleLogger}; -use snafu::{ensure, OptionExt, ResultExt}; -use std::collections::HashMap; -use std::num::NonZeroUsize; -use std::path::{Path, PathBuf}; -use std::{fs, process}; -use tokio::runtime::Runtime; -use url::Url; - -// =^..^= =^..^= =^..^= SUB-COMMAND STRUCTS =^..^= =^..^= =^..^= - -#[derive(Debug, Parser)] -struct Args { - #[arg(global = true, long, default_value = "INFO")] - log_level: LevelFilter, - - // Path to Infra.toml (NOTE: must be specified before subcommand) - #[arg(long)] - infra_config_path: PathBuf, - - #[command(subcommand)] - subcommand: SubCommand, -} - -#[derive(Debug, Parser)] -struct CreateInfraArgs { - /// Path to the root.json file. - #[arg(long)] - root_role_path: PathBuf, -} - -#[derive(Debug, Parser)] -enum SubCommand { - /// Creates infrastructure specified in the Infra.toml file. - CreateInfra(CreateInfraArgs), -} - -// =^..^= =^..^= =^..^= MAIN METHODS =^..^= =^..^= =^..^= - -fn main() { - if let Err(e) = run() { - eprintln!("{}", e); - process::exit(1); - } -} - -fn run() -> Result<()> { - // Parse and store the args passed to the program - let args = Args::parse(); - - match args.log_level { - // Set log level for AWS SDK to error to reduce verbosity. - LevelFilter::Info => { - CombinedLogger::init(vec![ - SimpleLogger::new( - LevelFilter::Info, - ConfigBuilder::new() - .add_filter_ignore_str("aws_config") - .add_filter_ignore_str("aws_smithy") - .add_filter_ignore_str("tracing::span") - .build(), - ), - SimpleLogger::new( - LevelFilter::Warn, - ConfigBuilder::new() - .add_filter_allow_str("aws_config") - .add_filter_allow_str("aws_smithy") - .add_filter_allow_str("tracing::span") - .build(), - ), - ]) - .context(error::LoggerSnafu)?; - } - - // Set the supplied log level across the whole crate. - _ => { - SimpleLogger::init(args.log_level, LogConfig::default()).context(error::LoggerSnafu)? - } - } - - match args.subcommand { - SubCommand::CreateInfra(ref run_task_args) => { - let rt = Runtime::new().context(error::RuntimeSnafu)?; - rt.block_on(async { - create_infra(&args.infra_config_path, &run_task_args.root_role_path).await - }) - } - } -} - -fn check_infra_lock(toml_path: &Path) -> Result<()> { - let lock_path = InfraConfig::compute_lock_path(toml_path).context(error::ConfigSnafu)?; - - ensure!(!lock_path.is_file(), { - error!( - "It looks like you've already created some resources for your custom TUF repository because a lock file exists at '{}'. - \nPlease clean up your TUF resources in AWS, delete Infra.lock, and run again.", - lock_path.display() - ); - error::FileExistsSnafu { path: lock_path } - }); - Ok(()) -} - -/// Automates setting up infrastructure for a custom TUF repo -async fn create_infra(toml_path: &Path, root_role_path: &Path) -> Result<()> { - check_infra_lock(toml_path)?; - info!("Parsing Infra.toml..."); - let mut infra_config = InfraConfig::from_path(toml_path).context(error::ConfigSnafu)?; - let repos = infra_config - .repo - .as_mut() - .context(error::MissingConfigSnafu { missing: "repo" })?; - let s3_info_map = infra_config - .aws - .as_mut() - .context(error::MissingConfigSnafu { missing: "aws" })? - .s3 - .as_mut() - .context(error::MissingConfigSnafu { missing: "aws.s3" })?; - - for (repo_name, repo_config) in repos.iter_mut() { - // Validate repo_config and unwrap required optional data - let mut repo_info = ValidRepoInfo::new(repo_config, repo_name, s3_info_map)?; - - // Validate the key configurations and root file - keys::check_signing_key_config(repo_info.signing_keys)?; - keys::check_signing_key_config(repo_info.root_keys)?; - root::check_root(root_role_path)?; - - // Create the repo - let (s3_stack_arn, bucket_name, bucket_rdn) = - create_repo_infrastructure(&mut repo_info).await?; - *repo_info.stack_arn = Some(s3_stack_arn); - *repo_info.bucket_name = Some(bucket_name.clone()); - update_root_and_sign_root(&mut repo_info, root_role_path).await?; - - // Upload root.json. - info!("Uploading root.json to S3 bucket..."); - s3::upload_file( - &repo_info.s3_region, - &bucket_name, - &repo_info.prefix, - root_role_path, - ) - .await?; - - // Update infra_config with output parameters if not already set - if repo_info.metadata_base_url.is_none() { - *repo_info.metadata_base_url = Some( - Url::parse(format!("https://{}{}/", &bucket_rdn, &repo_info.prefix).as_str()) - .context(error::ParseUrlSnafu { input: &bucket_rdn })?, - ); - } - if repo_info.targets_url.is_none() { - *repo_info.targets_url = Some( - Url::parse( - format!("https://{}{}/targets/", &bucket_rdn, &repo_info.prefix).as_str(), - ) - .context(error::ParseUrlSnafu { input: &bucket_rdn })?, - ); - } - if repo_info.root_role_url.is_none() { - *repo_info.root_role_url = Some( - Url::parse( - format!("https://{}{}/root.json", &bucket_rdn, &repo_info.prefix).as_str(), - ) - .context(error::ParseUrlSnafu { input: &bucket_rdn })?, - ); - } - let root_role_data = fs::read_to_string(root_role_path).context(error::FileReadSnafu { - path: root_role_path, - })?; - let mut d = Sha512::new(); - d.update(&root_role_data); - let digest = hex::encode(d.finalize()); - repo_config.root_role_sha512 = Some(digest); - } - - // Generate Infra.lock - info!("Writing Infra.lock..."); - let yaml_string = serde_yaml::to_string(&infra_config).context(error::InvalidYamlSnafu)?; - fs::write( - toml_path - .parent() - .context(error::ParentSnafu { path: toml_path })? - .join("Infra.lock"), - yaml_string, - ) - .context(error::FileWriteSnafu { path: toml_path })?; - - info!("Complete!"); - Ok(()) -} - -struct ValidRepoInfo<'a> { - bucket_name: &'a mut Option, - metadata_base_url: &'a mut Option, - prefix: String, - pub_key_threshold: &'a NonZeroUsize, - root_key_threshold: &'a NonZeroUsize, - root_keys: &'a mut SigningKeyConfig, - root_role_url: &'a mut Option, - s3_region: Region, - s3_stack_name: String, - signing_keys: &'a mut SigningKeyConfig, - stack_arn: &'a mut Option, - targets_url: &'a mut Option, - vpce_id: &'a String, -} - -impl<'a> ValidRepoInfo<'a> { - fn new( - repo_config: &'a mut RepoConfig, - repo_name: &str, - s3_info_map: &'a mut HashMap, - ) -> Result { - let s3_stack_name = - repo_config - .file_hosting_config_name - .to_owned() - .context(error::MissingConfigSnafu { - missing: "file_hosting_config_name", - })?; - let s3_info = s3_info_map - .get_mut(&s3_stack_name) - .context(error::MissingConfigSnafu { - missing: format!("aws.s3 config with name {}", s3_stack_name), - })?; - Ok(ValidRepoInfo { - s3_stack_name: s3_stack_name.to_string(), - s3_region: Region::new(s3_info.region.as_ref().cloned().context( - error::MissingConfigSnafu { - missing: format!("region for '{}' s3 config", s3_stack_name), - }, - )?), - bucket_name: &mut s3_info.bucket_name, - stack_arn: &mut s3_info.stack_arn, - vpce_id: s3_info - .vpc_endpoint_id - .as_ref() - .context(error::MissingConfigSnafu { - missing: format!("vpc_endpoint_id for '{}' s3 config", s3_stack_name), - })?, - prefix: s3::format_prefix(&s3_info.s3_prefix), - signing_keys: repo_config - .signing_keys - .as_mut() - .context(error::MissingConfigSnafu { - missing: format!("signing_keys for '{}' repo config", repo_name), - })?, - root_keys: repo_config - .root_keys - .as_mut() - .context(error::MissingConfigSnafu { - missing: format!("root_keys for '{}' repo config", repo_name), - })?, - root_key_threshold: repo_config.root_key_threshold.as_mut().context( - error::MissingConfigSnafu { - missing: format!("root_key_threshold for '{}' repo config", repo_name), - }, - )?, - pub_key_threshold: repo_config.pub_key_threshold.as_ref().context( - error::MissingConfigSnafu { - missing: format!("pub_key_threshold for '{}' repo config", repo_name), - }, - )?, - root_role_url: &mut repo_config.root_role_url, - targets_url: &mut repo_config.targets_url, - metadata_base_url: &mut repo_config.metadata_base_url, - }) - } -} - -async fn create_repo_infrastructure( - repo_info: &'_ mut ValidRepoInfo<'_>, -) -> Result<(String, String, String)> { - // Create S3 bucket - info!("Creating S3 bucket..."); - let (s3_stack_arn, bucket_name, bucket_rdn) = - s3::create_s3_bucket(&repo_info.s3_region, &repo_info.s3_stack_name).await?; - - // Add Bucket Policy to newly created bucket - s3::add_bucket_policy( - &repo_info.s3_region, - &bucket_name, - &repo_info.prefix, - repo_info.vpce_id, - ) - .await?; - - // Create root + publication keys - info!("Creating KMS Keys..."); - keys::create_keys(repo_info.signing_keys).await?; - keys::create_keys(repo_info.root_keys).await?; - Ok((s3_stack_arn, bucket_name, bucket_rdn)) -} - -async fn update_root_and_sign_root( - repo_info: &'_ mut ValidRepoInfo<'_>, - root_role_path: &Path, -) -> Result<()> { - // Create and populate (add/sign) root.json - info!("Creating and signing root.json..."); - root::create_root(root_role_path)?; - // Add keys (for both roles) - root::add_keys( - repo_info.signing_keys, - &KeyRole::Publication, - repo_info.pub_key_threshold, - &root_role_path.display().to_string(), - )?; - root::add_keys( - repo_info.root_keys, - &KeyRole::Root, - repo_info.root_key_threshold, - &root_role_path.display().to_string(), - )?; - // Sign root with all root keys - root::sign_root(repo_info.root_keys, &root_role_path.display().to_string())?; - Ok(()) -} - -// =^..^= =^..^= =^..^= TESTS =^..^= =^..^= =^..^= - -#[cfg(test)] -mod tests { - use super::{fs, shared, InfraConfig}; - - #[test] - fn toml_yaml_conversion() { - let test_toml_path = format!( - "{}/test_tomls/toml_yaml_conversion.toml", - shared::getenv("CARGO_MANIFEST_DIR").unwrap() - ); - let toml_struct = InfraConfig::from_path(&test_toml_path).unwrap(); - let yaml_string = serde_yaml::to_string(&toml_struct).expect("Could not write to file!"); - - let test_yaml_path = format!( - "{}/test_tomls/toml_yaml_conversion.yml", - shared::getenv("CARGO_MANIFEST_DIR").unwrap() - ); - fs::write(&test_yaml_path, &yaml_string).expect("Could not write to file!"); - let decoded_yaml = InfraConfig::from_lock_path(&test_yaml_path).unwrap(); - - assert_eq!(toml_struct, decoded_yaml); - } -} diff --git a/tools/infrasys/src/root.rs b/tools/infrasys/src/root.rs deleted file mode 100644 index bd0c6108305..00000000000 --- a/tools/infrasys/src/root.rs +++ /dev/null @@ -1,206 +0,0 @@ -use super::{error, KeyRole, Result}; -use aws_config::meta::region::RegionProviderChain; -use log::{trace, warn}; -use pubsys_config::SigningKeyConfig; -use snafu::{ensure, OptionExt, ResultExt}; -use std::collections::HashMap; -use std::fs; -use std::num::NonZeroUsize; -use std::path::Path; -use std::process::Command; - -/// The tuftool macro wraps Command to simplify calls to tuftool, adding region functionality. -macro_rules! tuftool { - ($region:expr, $format_str:expr, $($format_arg:expr),*) => { - let arg_str = format!($format_str, $($format_arg),*); - trace!("tuftool arg string: {}", arg_str); - let args = shell_words::split(&arg_str).context(error::CommandSplitSnafu { command: &arg_str })?; - trace!("tuftool split args: {:#?}", args); - - let status = Command::new("tuftool") - .args(args) - .env("AWS_REGION", $region) - .status() - .context(error::TuftoolSpawnSnafu)?; - - ensure!(status.success(), error::TuftoolResultSnafu { - command: arg_str, - code: status.code().map(|i| i.to_string()).unwrap_or_else(|| "".to_string()) - }); - } -} - -pub fn check_root(root_role_path: &Path) -> Result<()> { - ensure!(!root_role_path.is_file(), { - warn!("Cowardly refusing to overwrite the existing root.json at {}. Please manually delete it and run again.", root_role_path.display()); - error::FileExistsSnafu { - path: root_role_path, - } - }); - Ok(()) -} -pub fn get_region() -> Result { - let rt = tokio::runtime::Runtime::new().context(error::RuntimeSnafu)?; - rt.block_on(async { async_get_region().await }) -} - -async fn async_get_region() -> Result { - let default_region_fallback = "us-east-1"; - let default_region = RegionProviderChain::default_provider() - .or_else(default_region_fallback) - .region() - .await - .context(error::DefaultRegionSnafu)? - .to_string(); - Ok(default_region) -} - -/// Creates the directory where root.json will live and creates root.json itself according to details specified in root-role-path -pub fn create_root(root_role_path: &Path) -> Result<()> { - // Make /roles and /keys directories, if they don't exist, so we can write generated files. - let role_dir = root_role_path.parent().context(error::InvalidPathSnafu { - path: root_role_path, - thing: "root role", - })?; - fs::create_dir_all(role_dir).context(error::MkdirSnafu { path: role_dir })?; - let default_region = get_region()?; - - // Initialize root - tuftool!(&default_region, "root init '{}'", root_role_path.display()); - tuftool!( - &default_region, - // TODO: expose expiration date as a configurable parameter - "root expire '{}' 'in 52 weeks'", - root_role_path.display() - ); - Ok(()) -} - -/// Adds keys to root.json according to key type -pub fn add_keys( - signing_key_config: &mut SigningKeyConfig, - role: &KeyRole, - threshold: &NonZeroUsize, - filepath: &str, -) -> Result<()> { - match signing_key_config { - SigningKeyConfig::file { .. } => (), - SigningKeyConfig::kms { key_id, config, .. } => add_keys_kms( - &config - .as_ref() - .context(error::MissingConfigSnafu { - missing: "config field for a kms key", - })? - .available_keys, - role, - threshold, - filepath, - key_id, - )?, - SigningKeyConfig::ssm { .. } => (), - } - Ok(()) -} - -/// Adds KMSKeys to root.json given root or publication type -/// Input: available-keys (keys to sign with), role (root or publication), threshold for role, filepath for root.JSON, -/// mutable key_id -/// Output: in-place edit of root.json and key_id with a valid publication key -/// (If key-id is populated, it will not change. Otherwise, it will be populated with a key-id of an available key) -fn add_keys_kms( - available_keys: &HashMap, - role: &KeyRole, - threshold: &NonZeroUsize, - filepath: &str, - key_id: &mut Option, -) -> Result<()> { - ensure!( - (*available_keys).len() >= (*threshold).get(), - error::InvalidThresholdSnafu { - threshold: threshold.to_string(), - num_keys: (*available_keys).len(), - } - ); - let default_region = get_region()?; - match role { - KeyRole::Root => { - tuftool!( - &default_region, - "root set-threshold '{}' root '{}' ", - filepath, - threshold.to_string() - ); - for (keyid, region) in available_keys.iter() { - tuftool!( - region, - "root add-key '{}' aws-kms:///'{}' --role root", - filepath, - keyid - ); - } - } - KeyRole::Publication => { - tuftool!( - &default_region, - "root set-threshold '{}' snapshot '{}' ", - filepath, - threshold.to_string() - ); - tuftool!( - &default_region, - "root set-threshold '{}' targets '{}' ", - filepath, - threshold.to_string() - ); - tuftool!( - &default_region, - "root set-threshold '{}' timestamp '{}' ", - filepath, - threshold.to_string() - ); - for (keyid, region) in available_keys.iter() { - tuftool!( - region, - "root add-key '{}' aws-kms:///'{}' --role snapshot --role targets --role timestamp", - filepath, - keyid - ); - } - - // Set key_id using a publication key (if one is not already provided) - if key_id.is_none() { - *key_id = Some( - available_keys - .iter() - .next() - .context(error::KeyCreationSnafu)? - .0 - .to_string(), - ); - } - } - } - - Ok(()) -} - -/// Signs root with available_keys under root_keys (will have a different tuftool command depending on key type) -pub fn sign_root(signing_key_config: &SigningKeyConfig, filepath: &str) -> Result<()> { - match signing_key_config { - SigningKeyConfig::file { .. } => (), - SigningKeyConfig::kms { config, .. } => { - for (keyid, region) in config - .as_ref() - .context(error::MissingConfigSnafu { - missing: "KMS key details", - })? - .available_keys - .iter() - { - tuftool!(region, "root sign '{}' -k aws-kms:///'{}'", filepath, keyid); - } - } - SigningKeyConfig::ssm { .. } => (), - } - Ok(()) -} diff --git a/tools/infrasys/src/s3.rs b/tools/infrasys/src/s3.rs deleted file mode 100644 index 6fc9c8047bf..00000000000 --- a/tools/infrasys/src/s3.rs +++ /dev/null @@ -1,369 +0,0 @@ -use aws_sdk_cloudformation::{config::Region, Client as CloudFormationClient}; -use aws_sdk_s3::Client as S3Client; -use snafu::{OptionExt, ResultExt}; -use std::fs; -use std::fs::File; -use std::io::prelude::*; -use std::path::{Path, PathBuf}; - -use super::{error, shared, Result}; - -pub fn format_prefix(prefix: &str) -> String { - if prefix.is_empty() { - return prefix.to_string(); - } - let formatted = { - if prefix.starts_with('/') { - prefix.to_string() - } else { - format!("/{}", prefix) - } - }; - if formatted.ends_with('/') { - formatted[..formatted.len() - 1].to_string() - } else if formatted.ends_with("/*") { - formatted[..formatted.len() - 2].to_string() - } else { - formatted - } -} - -/// Creates a *private* S3 Bucket using a CloudFormation template -/// Input: The region in which the bucket will be created and the name of the bucket -/// Output: The stack_arn of the stack w/ the S3 bucket, the CFN allocated bucket name, -/// and the bucket url (for the url fields in Infra.lock) -pub async fn create_s3_bucket( - region: &Region, - stack_name: &str, -) -> Result<(String, String, String)> { - // TODO: Add support for accommodating pre-existing buckets (skip this creation process) - let config = aws_config::from_env() - .region(region.to_owned()) - .load() - .await; - let cfn_client = CloudFormationClient::new(&config); - - let cfn_filepath: PathBuf = format!( - "{}/infrasys/cloudformation-templates/s3_setup.yml", - shared::getenv("BUILDSYS_TOOLS_DIR")? - ) - .into(); - let cfn_template = - fs::read_to_string(&cfn_filepath).context(error::FileReadSnafu { path: cfn_filepath })?; - - let stack_result = cfn_client - .create_stack() - .stack_name(stack_name.to_string()) - .template_body(cfn_template.clone()) - .send() - .await - .context(error::CreateStackSnafu { - stack_name, - region: region.as_ref(), - })?; - // We don't have to wait for successful stack creation to grab the stack ARN - let stack_arn = stack_result - .clone() - .stack_id - .context(error::ParseResponseSnafu { - what: "stack_id", - resource_name: stack_name, - })?; - - // Grab the StackOutputs to get the Bucketname and BucketURL - let output_array = shared::get_stack_outputs(&cfn_client, stack_name, region.as_ref()).await?; - let bucket_name = output_array[0] - .output_value - .as_ref() - .context(error::ParseResponseSnafu { - what: "outputs[0].output_value (bucket name)", - resource_name: stack_name, - })? - .to_string(); - let bucket_rdn = output_array[1] - .output_value - .as_ref() - .context(error::ParseResponseSnafu { - what: "outputs[1].output_value (bucket url)", - resource_name: stack_name, - })? - .to_string(); - - Ok((stack_arn, bucket_name, bucket_rdn)) -} - -/// Adds a BucketPolicy allowing GetObject access to a specified VPC -/// Input: Region, Name of bucket, which prefix root.json should be put under, and vpcid -/// Note that the prefix parameter must have the format "//*" and the bucket name "" -/// Output: Doesn't need to save any metadata from this action -pub async fn add_bucket_policy( - region: &Region, - bucket_name: &str, - prefix: &str, - vpcid: &str, -) -> Result<()> { - // Get old policy - let config = aws_config::from_env() - .region(region.to_owned()) - .load() - .await; - let s3_client = S3Client::new(&config); - let mut policy: serde_json::Value = match s3_client - .get_bucket_policy() - .bucket(bucket_name.to_string()) - .send() - .await - { - Ok(output) => serde_json::from_str(&output.policy.context(error::ParseResponseSnafu { - what: "policy", - resource_name: bucket_name, - })?) - .context(error::InvalidJsonSnafu { - what: format!("retrieved bucket policy for {}", &bucket_name), - })?, - - Err(..) => serde_json::from_str( - r#"{"Version": "2008-10-17", - "Statement": []}"#, - ) - .context(error::InvalidJsonSnafu { - what: format!("new bucket policy for {}", &bucket_name), - })?, - }; - - // Create a new policy - let new_bucket_policy = serde_json::from_str(&format!( - r#"{{ - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::{}{}/*", - "Condition": {{ - "StringEquals": {{ - "aws:sourceVpce": "{}" - }} - }} - }}"#, - bucket_name, prefix, vpcid - )) - .context(error::InvalidJsonSnafu { - what: format!("new bucket policy for {}", &bucket_name), - })?; - - // Append new policy onto old one - policy - .get_mut("Statement") - .context(error::GetPolicyStatementSnafu { bucket_name })? - .as_array_mut() - .context(error::GetPolicyStatementSnafu { bucket_name })? - .push(new_bucket_policy); - - // Push the new policy as a string - s3_client - .put_bucket_policy() - .bucket(bucket_name.to_string()) - .policy( - serde_json::to_string(&policy).context(error::InvalidJsonSnafu { - what: format!("new bucket policy for {}", &bucket_name), - })?, - ) - .send() - .await - .context(error::PutPolicySnafu { bucket_name })?; - - Ok(()) -} - -/// Uploads root.json to S3 Bucket (automatically creates the folder that the bucket policy was scoped to or will simply add to it) -/// Input: Region, Name of bucket, which prefix root.json should be put under, and path to the S3 bucket CFN template -/// Note that the prefix parameter must have the format "/" and the bucket name "" -/// Output: Doesn't need to save any metadata from this action -pub async fn upload_file( - region: &Region, - bucket_name: &str, - prefix: &str, - file_path: &Path, -) -> Result<()> { - let config = aws_config::from_env() - .region(region.to_owned()) - .load() - .await; - let s3_client = S3Client::new(&config); - - // File --> Bytes - let mut file = File::open(file_path).context(error::FileOpenSnafu { path: file_path })?; - let mut buffer = Vec::new(); - file.read_to_end(&mut buffer) - .context(error::FileReadSnafu { path: file_path })?; - - s3_client - .put_object() - .bucket(format!("{}{}", bucket_name, prefix)) - .key("root.json".to_string()) - .body(aws_sdk_s3::primitives::ByteStream::from(buffer)) - .send() - .await - .context(error::PutObjectSnafu { bucket_name })?; - - Ok(()) -} - -// =^..^= =^..^= =^..^= TESTS =^..^= =^..^= =^..^= - -#[cfg(test)] -mod tests { - use super::format_prefix; - use assert_json_diff::assert_json_include; - - #[test] - fn format_prefix_test() { - let valid = "/prefix"; - let missing_slash = "prefix"; - let excess_ending_1 = "/prefix/"; - let excess_ending_2 = "/prefix/*"; - let slash_and_excess_ending = "prefix/*"; - let empty = ""; - let single_slash = "/"; - - assert_eq!("/prefix", format_prefix(valid)); - assert_eq!("/prefix", format_prefix(missing_slash)); - assert_eq!("/prefix", format_prefix(excess_ending_1)); - assert_eq!("/prefix", format_prefix(excess_ending_2)); - assert_eq!("/prefix", format_prefix(slash_and_excess_ending)); - assert_eq!("", format_prefix(empty)); - assert_eq!("", format_prefix(single_slash)); - } - - #[test] - fn empty_bucket_policy() { - let mut policy: serde_json::Value = serde_json::from_str( - r#"{"Version": "2008-10-17", - "Statement": []}"#, - ) - .unwrap(); - - let new_bucket_policy = serde_json::from_str(&format!( - r#"{{ - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::{}{}/*", - "Condition": {{ - "StringEquals": {{ - "aws:sourceVpce": "{}" - }} - }} - }}"#, - "test-bucket-name", "/test-prefix", "testvpc123" - )) - .unwrap(); - - policy - .get_mut("Statement") - .unwrap() - .as_array_mut() - .unwrap() - .push(new_bucket_policy); - - let expected_policy: serde_json::Value = serde_json::from_str( - r#"{ - "Version": "2008-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::test-bucket-name/test-prefix/*", - "Condition": { - "StringEquals": { - "aws:sourceVpce": "testvpc123" - } - } - } - ] - }"#, - ) - .unwrap(); - - assert_json_include!(expected: expected_policy, actual: &policy); - } - - #[test] - fn populated_bucket_policy() { - let mut policy: serde_json::Value = serde_json::from_str( - r#"{ - "Version": "2008-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::test-bucket-name/test-prefix/*", - "Condition": { - "StringEquals": { - "aws:sourceVpce": "testvpc123" - } - } - } - ] - }"#, - ) - .unwrap(); - - let new_bucket_policy = serde_json::from_str(&format!( - r#"{{ - "Effect": "Deny", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::{}{}/*", - "Condition": {{ - "StringEquals": {{ - "aws:sourceVpce": "{}" - }} - }} - }}"#, - "test-bucket-name", "/test-prefix", "testvpc123" - )) - .unwrap(); - - policy - .get_mut("Statement") - .unwrap() - .as_array_mut() - .unwrap() - .push(new_bucket_policy); - - let expected_policy: serde_json::Value = serde_json::from_str( - r#"{ - "Version": "2008-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::test-bucket-name/test-prefix/*", - "Condition": { - "StringEquals": { - "aws:sourceVpce": "testvpc123" - } - } - }, - { - "Effect": "Deny", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::test-bucket-name/test-prefix/*", - "Condition": { - "StringEquals": { - "aws:sourceVpce": "testvpc123" - } - } - } - ] - }"#, - ) - .unwrap(); - - assert_json_include!(expected: expected_policy, actual: &policy); - } -} diff --git a/tools/infrasys/src/shared.rs b/tools/infrasys/src/shared.rs deleted file mode 100644 index a12a677039a..00000000000 --- a/tools/infrasys/src/shared.rs +++ /dev/null @@ -1,99 +0,0 @@ -use aws_sdk_cloudformation::types::{Output, Parameter}; -use aws_sdk_cloudformation::Client as CloudFormationClient; -use clap::Parser; -use log::info; -use snafu::{ensure, OptionExt, ResultExt}; -use std::{env, thread, time}; - -use super::{error, Result}; - -#[derive(Debug, Parser)] -pub enum KeyRole { - Root, - Publication, -} - -/// Retrieve a BUILDSYS_* variable that we expect to be set in the environment -pub fn getenv(var: &str) -> Result { - env::var(var).context(error::EnvironmentSnafu { var }) -} - -/// Generates a parameter type object used to specify parameters in CloudFormation templates -pub fn create_parameter(key: String, val: String) -> Parameter { - Parameter::builder() - .parameter_key(key) - .parameter_value(val) - .build() -} - -/// Polls cfn_client for stack_name in region until it's ready -/// Once stack is created, we can grab the outputs (before this point, outputs are empty) -pub async fn get_stack_outputs( - cfn_client: &CloudFormationClient, - stack_name: &str, - region: &str, -) -> Result> { - let mut stack_outputs = cfn_client - .describe_stacks() - .stack_name(stack_name) - .send() - .await - .context(error::DescribeStackSnafu { stack_name, region })? - .stacks - .context(error::ParseResponseSnafu { - what: "stacks", - resource_name: stack_name, - })? - .first() - .context(error::MissingStackSnafu { stack_name, region })? - .clone(); - - // Checking that keys have been created so we can return updated outputs - let mut status = stack_outputs - .stack_status() - .context(error::ParseStatusSnafu)? - .as_str(); - // Max wait is 30 mins (90 attempts * 20s = 1800s = 30mins) - let mut max_attempts: u32 = 90; - while status != "CREATE_COMPLETE" { - ensure!( - max_attempts > 0, - error::CreateStackTimeoutSnafu { stack_name, region } - ); - ensure!( - status != "CREATE_FAILED", - error::CreateStackFailureSnafu { stack_name, region } - ); - info!( - "Waiting for stack resources to be ready, current status is '{}'...", - status - ); - thread::sleep(time::Duration::from_secs(20)); - stack_outputs = cfn_client - .describe_stacks() - .stack_name(stack_name) - .send() - .await - .context(error::DescribeStackSnafu { stack_name, region })? - .stacks - .context(error::ParseResponseSnafu { - what: "stacks", - resource_name: stack_name, - })? - .first() - .context(error::MissingStackSnafu { stack_name, region })? - .clone(); - status = stack_outputs - .stack_status() - .context(error::ParseStatusSnafu)? - .as_str(); - max_attempts -= 1; - } - - let output_array = stack_outputs.outputs.context(error::ParseResponseSnafu { - what: "outputs", - resource_name: stack_name, - })?; - - Ok(output_array) -} diff --git a/tools/infrasys/test_tomls/toml_yaml_conversion.toml b/tools/infrasys/test_tomls/toml_yaml_conversion.toml deleted file mode 100644 index f2e580133ec..00000000000 --- a/tools/infrasys/test_tomls/toml_yaml_conversion.toml +++ /dev/null @@ -1,12 +0,0 @@ -[repo.default] - file_hosting_config_name = "TUF-Repo-S3-Buck" - signing_keys = { kms = { available_keys = { "e4a8f7fe-2272-4e51-bc3e-3f719c77eb31" = "us-west-1" } } } - root_keys = { kms = { available_keys = { "e4a8f7fe-2272-4e51-bc3e-3f719c77eb31" = "us-west-1" } } } - root_key_threshold = 1 - pub_key_threshold = 1 - -[aws] - [aws.s3.TUF-Repo-S3-Buck] - region = "us-west-2" - vpc_endpoint_id = "vpc-12345" - s3_prefix = "/my-bottlerocket-remix" diff --git a/tools/infrasys/test_tomls/toml_yaml_conversion.yml b/tools/infrasys/test_tomls/toml_yaml_conversion.yml deleted file mode 100644 index c9482f65251..00000000000 --- a/tools/infrasys/test_tomls/toml_yaml_conversion.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -repo: - default: - root_role_url: ~ - root_role_sha512: ~ - signing_keys: - kms: - key_id: ~ - available_keys: - e4a8f7fe-2272-4e51-bc3e-3f719c77eb31: us-west-1 - key_alias: ~ - regions: [] - key_stack_arns: {} - root_keys: - kms: - key_id: ~ - available_keys: - e4a8f7fe-2272-4e51-bc3e-3f719c77eb31: us-west-1 - key_alias: ~ - regions: [] - key_stack_arns: {} - metadata_base_url: ~ - targets_url: ~ - file_hosting_config_name: TUF-Repo-S3-Buck - root_key_threshold: 1 - pub_key_threshold: 1 -aws: - regions: [] - role: ~ - profile: ~ - region: {} - ssm_prefix: ~ - s3: - TUF-Repo-S3-Buck: - region: us-west-2 - s3_prefix: /my-bottlerocket-remix - vpc_endpoint_id: vpc-12345 - stack_arn: ~ - bucket_name: ~ -vmware: ~ diff --git a/tools/install-twoliter.sh b/tools/install-twoliter.sh new file mode 100755 index 00000000000..959643fde08 --- /dev/null +++ b/tools/install-twoliter.sh @@ -0,0 +1,168 @@ +#!/usr/bin/env bash + +# +# Common error handling +# + +exit_trap_cmds=() + +on_exit() { + exit_trap_cmds+=( "$1" ) +} + +run_exit_trap_cmds() { + for cmd in "${exit_trap_cmds[@]}"; do + eval "${cmd}" + done +} + +trap run_exit_trap_cmds EXIT + +warn() { + >&2 echo "Warning: $*" +} + +bail() { + if [[ $# -gt 0 ]]; then + >&2 echo "Error: $*" + fi + exit 1 +} + +usage() { + cat <&2 usage + bail "$1" +} + + +# +# Parse arguments +# + +while [[ $# -gt 0 ]]; do + case $1 in + -r|--repo) + shift; repo=$1 ;; + -v|--version) + shift; version=$1 ;; + -d|--directory) + shift; dir=$1 ;; + -e|--reuse-existing-install) + reuse_existing="true" ;; + -b|--allow-binary-install) + allow_bin="true" ;; + -s|--allow-from-source) + from_source="true" ;; + -h|--help) + usage; exit 0 ;; + *) + usage_error "Invalid option '$1'" ;; + esac + shift +done + +set -e + +workdir="$(mktemp -d)" +on_exit "rm -rf ${workdir}" + +if [ "${reuse_existing}" = "true" ] ; then + if [ -x "${dir}/twoliter" ] ; then + version_output="$("${dir}/twoliter" --version)" + found_version=v$(echo $version_output | awk '{print $2}') + echo "Found twoliter ${found_version} installed." + if [ "${found_version}" = "${version}" ] ; then + echo "Skipping installation." + exit 0 + fi + fi +fi + +if [ "${allow_bin}" = "true" ] ; then + host_arch="$(uname -m)" + host_arch="${host_arch,,}" + host_kernel="$(uname -s)" + host_kernel="${host_kernel,,}" + case "${host_kernel}-${host_arch}" in + linux-x86_64 | linux-aarch64) + echo "Installing twoliter from binary release." + twoliter_release="${repo}/releases/download/${version}" + twoliter_target="${host_arch}-unknown-${host_kernel}-musl" + cd "${workdir}" + curl -sSL "${twoliter_release}/twoliter-${twoliter_target}.tar.xz" -o "twoliter.tar.xz" + tar xf twoliter.tar.xz + mkdir -p "${dir}" + mv "./twoliter-${twoliter_target}/twoliter" "${dir}" + exit 0 + ;; + *) + echo "No pre-built binaries available for twoliter ${version}." + ;; + esac +else + echo "Skipped installing twoliter ${version} from pre-built binaries." +fi + +if [ "${from_source}" = "true" ] ; then + cargo install \ + --locked \ + --root "${workdir}" \ + --git "${repo}" \ + --rev "${version}" \ + --bin twoliter \ + --quiet \ + twoliter + mv "${workdir}/bin/twoliter" "${dir}/twoliter" + echo "Installed twoliter ${version} from source." + exit 0 +else + echo "Skipped installing twoliter ${version} from source." +fi + + +if [ ! -x "${dir}/twoliter" ] ; then + echo "Could not install twoliter ${version}" >&2 + exit 1 +fi \ No newline at end of file diff --git a/tools/partyplanner b/tools/partyplanner deleted file mode 100755 index d638319c87e..00000000000 --- a/tools/partyplanner +++ /dev/null @@ -1,276 +0,0 @@ -#!/usr/bin/env bash -# shellcheck disable=SC2034 # Variables are used externally by rpm2img - -############################################################################### -# Section 1: partition type GUIDs and partition GUIDs - -# Define partition type GUIDs for all OS-managed partitions. This is required -# for the boot partition, where we set gptprio bits in the GUID-specific use -# field, but we might as well do it for all of them. -BOTTLEROCKET_BOOT_TYPECODE="6b636168-7420-6568-2070-6c616e657421" -BOTTLEROCKET_ROOT_TYPECODE="5526016a-1a97-4ea4-b39a-b7c8c6ca4502" -BOTTLEROCKET_HASH_TYPECODE="598f10af-c955-4456-6a99-7720068a6cea" -BOTTLEROCKET_RESERVED_TYPECODE="0c5d99a5-d331-4147-baef-08e2b855bdc9" -BOTTLEROCKET_PRIVATE_TYPECODE="440408bb-eb0b-4328-a6e5-a29038fad706" -BOTTLEROCKET_DATA_TYPECODE="626f7474-6c65-6474-6861-726d61726b73" - -# Under BIOS, the firmware will transfer control to the MBR on the boot device, -# which will pass control to the GRUB stage 2 binary written to the BIOS boot -# partition. The BIOS does not attach any significance to this partition type, -# but GRUB knows to install itself there when we run `grub-bios-setup`. -BIOS_BOOT_TYPECODE="ef02" - -# Under EFI, the firmware will find the EFI system partition and execute the -# program at a platform-defined path like `bootx64.efi`. The partition type -# must match what the firmware expects. -EFI_SYSTEM_TYPECODE="C12A7328-F81F-11D2-BA4B-00A0C93EC93B" - -# Whichever entry point is used for booting the system, it's important to note -# that only one build of GRUB is involved - the one that's installed during the -# image build. - -# GRUB understands the GPT priorities scheme we use to find the active boot -# partition; EFI and BIOS firmware does not. This is why we do not update GRUB -# during our system updates; we would have no way to revert to an earlier copy -# of the bootloader if it failed to boot. -# -# We may eventually want to have an active/passive scheme for EFI partitions, -# to allow for potential GRUB and shim updates on EFI platforms in cases where -# we need to deliver security fixes. For now, add a placeholder partition type -# for an alternate bank. -EFI_BACKUP_TYPECODE="B39CE39C-0A00-B4AB-2D11-F18F8237A21C" - -# Define partition GUIDs for the data partitions. We use the GUID for determining -# which data partition to label and use at boot. -BOTTLEROCKET_DATA_PREFERRED_PARTGUID="5b94e8df-28b8-485c-9d19-362263b5944c" -BOTTLEROCKET_DATA_FALLBACK_PARTGUID="69040874-417d-4e26-a764-7885f22007ea" - -############################################################################### -# Section 2: fixed size partitions and reservations - -# The GPT header and footer each take up 32 sectors, but we reserve a full MiB -# so that partitions can all be aligned on MiB boundaries. -GPT_MIB="1" # two per disk - -# The BIOS partition is only used on x86 platforms, and only needs to be large -# enough for the GRUB stage 2. Increasing its size will reduce the size of the -# "private" and "reserved" partitions. This should be relatively safe since we -# don't apply image updates to those partitions. -BIOS_MIB="4" # one per disk - -# The GPT and BIOS reservations are fixed overhead that will be deducted from -# the space nominally given to the private partition used to persist settings. -OVERHEAD_MIB="$((GPT_MIB * 2 + BIOS_MIB))" - -# The 'recommended' size for the EFI partition is 100MB but our EFI images are -# under 2MB, so this will suffice for now. It would be possible to increase the -# EFI partition size by taking space from the "reserved" area below. -EFI_MIB="5" # one per bank - -# Allocate 1 MiB for the initial data partition A. -DATA_A_MIB="1" # one per disk - -############################################################################### -# Section 3: variable sized partitions - -# These partitions scale based on image size. The scaling factors are chosen so -# that we end up with the same partition sizes for the banks on a 2 GiB image, -# which was the only image size we historically supported. -# -# !!! WARNING !!! -# -# Increasing any of these constants is very likely to break systems on update, -# since the corresponding partitions are adjacent on disk and have no room to -# grow. -BOOT_SCALE_FACTOR="20" -ROOT_SCALE_FACTOR="460" -HASH_SCALE_FACTOR="5" -RESERVE_SCALE_FACTOR="15" -PRIVATE_SCALE_FACTOR="24" - -############################################################################### -# Section 4: ASCII art gallery - -# Layout for a 1 GiB OS image. Sizes marked with (*) scale with overall image -# size, based on the constant factors above. - -# +---------------------------------+ -# Prelude | GPT header 1 MiB | 5 MiB -# | BIOS boot partition 4 MiB | Fixed size. -# +---------------------------------+ -# | EFI system partition 5 MiB | -# | Boot partition A 20 MiB* | (image size - prelude - postlude) / 2 -# Bank A | Root partition A 460 MiB* | Example: (1 GiB - 5 MiB - 19 MiB) / 2 -# | Hash partition A 5 MiB* | 500 MiB -# | Reserved partition A 10 MiB* | -# +---------------------------------+ -# | EFI backup partition 5 MiB | -# | Boot partition B 20 MiB* | (image size - prelude - postlude) / 2 -# Bank B | Root partition B 460 MiB* | Example: (1 GiB - 5 MiB - 19 MiB) / 2 -# | Hash partition B 5 MiB* | 500 MiB -# | Reserved partition B 10 MiB* | -# +---------------------------------+ -# | Private partition 17 MiB* | (image size * 24 as MiB) - prelude - DATA-A size -# | Data partition A 1 MiB | Data partition A -# Postlude | GPT footer 1 MiB | GPT is fixed, private partition grows. -# +---------------------------------+ - -############################################################################## -# Section 5: library functions - -# Populate the caller's tables with sizes and offsets for known partitions. -set_partition_sizes() { - local os_image_gib data_image_gib partition_plan - local -n pp_size pp_offset - os_image_gib="${1:?}" - data_image_gib="${2:?}" - - # Whether we're building a layout for a "split" image, where OS and data - # volumes are on separate disks, or a "unified" image, where they share the - # same disk. - partition_plan="${3:?}" - - # Table for partition sizes, in MiB. - pp_size="${4:?}" - - # Table for partition offsets from start of disk, in MiB. - pp_offset="${5:?}" - - # Most of the partitions on the main image scale with the overall size. - local boot_mib root_mib hash_mib reserved_mib private_mib - boot_mib="$((os_image_gib * BOOT_SCALE_FACTOR))" - root_mib="$((os_image_gib * ROOT_SCALE_FACTOR))" - hash_mib="$((os_image_gib * HASH_SCALE_FACTOR))" - - # Reserved space is everything left in the bank after the other partitions - # are scaled, minus the fixed 5 MiB EFI partition in that bank. - reserved_mib=$((os_image_gib * RESERVE_SCALE_FACTOR - EFI_MIB)) - - # Private space scales per GiB, minus the BIOS and GPT partition overhead. - private_mib=$((os_image_gib * PRIVATE_SCALE_FACTOR - OVERHEAD_MIB)) - # We need 1 MiB of space for data partition A. - private_mib=$((private_mib - DATA_A_MIB)) - - # Skip the GPT label at start of disk. - local offset - ((offset = 1)) - - pp_offset["BIOS"]="${offset}" - pp_size["BIOS"]="${BIOS_MIB}" - ((offset += BIOS_MIB)) - - for bank in A B ; do - pp_offset["EFI-${bank}"]="${offset}" - pp_size["EFI-${bank}"]="${EFI_MIB}" - ((offset += EFI_MIB)) - - pp_offset["BOOT-${bank}"]="${offset}" - pp_size["BOOT-${bank}"]="${boot_mib}" - ((offset += boot_mib)) - - pp_offset["ROOT-${bank}"]="${offset}" - pp_size["ROOT-${bank}"]="${root_mib}" - ((offset += root_mib)) - - pp_offset["HASH-${bank}"]="${offset}" - pp_size["HASH-${bank}"]="${hash_mib}" - ((offset += hash_mib)) - - pp_offset["RESERVED-${bank}"]="${offset}" - pp_size["RESERVED-${bank}"]="${reserved_mib}" - ((offset += reserved_mib)) - done - - pp_offset["PRIVATE"]="${offset}" - pp_size["PRIVATE"]="${private_mib}" - ((offset += private_mib)) - - case "${partition_plan}" in - split) - # For data partition A that lives on the OS image - pp_offset["DATA-A"]="${offset}" - pp_size["DATA-A"]="${DATA_A_MIB}" - ((offset += DATA_A_MIB)) - - # For a split data image, the first and last MiB are reserved for the GPT - # labels, and the rest is for data partition B. - pp_size["DATA-B"]="$((data_image_gib * 1024 - GPT_MIB * 2))" - pp_offset["DATA-B"]="1" - ;; - unified) - # For a unified image, we've already accounted for the GPT label space in - # the earlier calculations, so all the space is for the data partition. - pp_size["DATA-A"]="$((data_image_gib * 1024))" - pp_offset["DATA-A"]="${offset}" - ((offset += data_image_gib * 1024)) - ;; - *) - echo "unknown partition plan '${partition_plan}'" >&2 - exit 1 - ;; - esac -} - -# Populate the caller's table with labels for known partitions. -set_partition_labels() { - local -n pp_label - pp_label="${1:?}" - pp_label["BIOS"]="BIOS-BOOT" - pp_label["EFI-A"]="EFI-SYSTEM" - pp_label["EFI-B"]="EFI-BACKUP" - # Empty label for the data partitions. We're labeling the data partition - # during boot. - pp_label["DATA-A"]="" - pp_label["DATA-B"]="" - pp_label["PRIVATE"]="BOTTLEROCKET-PRIVATE" - for part in BOOT ROOT HASH RESERVED ; do - for bank in A B ; do - pp_label["${part}-${bank}"]="BOTTLEROCKET-${part}-${bank}" - done - done -} - -# Populate the caller's table with GPT type codes for known partitions. -set_partition_types() { - local -n pp_type - pp_type="${1:?}" - pp_type["BIOS"]="${BIOS_BOOT_TYPECODE}" - pp_type["DATA-A"]="${BOTTLEROCKET_DATA_TYPECODE}" - pp_type["DATA-B"]="${BOTTLEROCKET_DATA_TYPECODE}" - pp_type["EFI-A"]="${EFI_SYSTEM_TYPECODE}" - pp_type["EFI-B"]="${EFI_BACKUP_TYPECODE}" - pp_type["PRIVATE"]="${BOTTLEROCKET_PRIVATE_TYPECODE}" - local typecode - for part in BOOT ROOT HASH RESERVED ; do - for bank in A B ; do - typecode="BOTTLEROCKET_${part}_TYPECODE" - typecode="${!typecode}" - pp_type["${part}-${bank}"]="${typecode}" - done - done -} - -# Populate the caller's table with GPT partition UUIDs for DATA-A and -# DATA-B partitions. -set_partition_uuids() { - local -n pp_uuid - pp_uuid="${1:?}" - # Whether we're building a layout for a "split" image, where OS and data - # volumes are on separate disks, or a "unified" image, where they share the - # same disk. - partition_plan="${2:?}" - case "${partition_plan}" in - split) - pp_uuid["DATA-A"]="${BOTTLEROCKET_DATA_FALLBACK_PARTGUID}" - pp_uuid["DATA-B"]="${BOTTLEROCKET_DATA_PREFERRED_PARTGUID}" - ;; - unified) - pp_uuid["DATA-A"]="${BOTTLEROCKET_DATA_PREFERRED_PARTGUID}" - pp_uuid["DATA-B"]="${BOTTLEROCKET_DATA_FALLBACK_PARTGUID}" - ;; - *) - echo "unknown partition plan '${partition_plan}'" >&2 - exit 1 - ;; - esac -} diff --git a/tools/pubsys-config/Cargo.toml b/tools/pubsys-config/Cargo.toml deleted file mode 100644 index ba060eebdf9..00000000000 --- a/tools/pubsys-config/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "pubsys-config" -version = "0.1.0" -authors = ["Zac Mrowicki ", "Tom Kirchner "] -license = "Apache-2.0 OR MIT" -edition = "2021" -publish = false - -[dependencies] -chrono = { version = "0.4", default-features = false, features = ["std", "clock"] } -home = "0.5" -lazy_static = "1" -log = "0.4" -parse-datetime = { path = "../../sources/parse-datetime", version = "0.1" } -serde = { version = "1", features = ["derive"] } -serde_yaml = "0.9" -snafu = "0.7" -toml = "0.5" -url = { version = "2", features = ["serde"] } diff --git a/tools/pubsys-config/src/lib.rs b/tools/pubsys-config/src/lib.rs deleted file mode 100644 index 8b244977e3e..00000000000 --- a/tools/pubsys-config/src/lib.rs +++ /dev/null @@ -1,279 +0,0 @@ -//! The config module owns the definition and loading process for our configuration sources. -pub mod vmware; - -use crate::vmware::VmwareConfig; -use chrono::Duration; -use log::info; -use parse_datetime::parse_offset; -use serde::{Deserialize, Deserializer, Serialize}; -use snafu::{OptionExt, ResultExt}; -use std::collections::{HashMap, VecDeque}; -use std::convert::TryFrom; -use std::fs; -use std::num::NonZeroUsize; -use std::path::{Path, PathBuf}; -use url::Url; - -/// Configuration needed to load and create repos -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq)] -#[serde(deny_unknown_fields)] -pub struct InfraConfig { - // Repo subcommand config - pub repo: Option>, - - // Config for AWS specific subcommands - pub aws: Option, - - // Config for VMware specific subcommands - pub vmware: Option, -} - -impl InfraConfig { - /// Deserializes an InfraConfig from a given path - pub fn from_path

(path: P) -> Result - where - P: AsRef, - { - let path = path.as_ref(); - let infra_config_str = fs::read_to_string(path).context(error::FileSnafu { path })?; - toml::from_str(&infra_config_str).context(error::InvalidTomlSnafu { path }) - } - - /// Deserializes an InfraConfig from a Infra.lock file at a given path - pub fn from_lock_path

(path: P) -> Result - where - P: AsRef, - { - let path = path.as_ref(); - let infra_config_str = fs::read_to_string(path).context(error::FileSnafu { path })?; - serde_yaml::from_str(&infra_config_str).context(error::InvalidLockSnafu { path }) - } - - /// Deserializes an InfraConfig from a given path, if it exists, otherwise builds a default - /// config - pub fn from_path_or_default

(path: P) -> Result - where - P: AsRef, - { - if path.as_ref().exists() { - Self::from_path(path) - } else { - Ok(Self::default()) - } - } - - /// Deserializes an InfraConfig from Infra.lock, if it exists, otherwise uses Infra.toml - /// If the default flag is true, will create a default config if Infra.toml doesn't exist - pub fn from_path_or_lock(path: &Path, default: bool) -> Result { - let lock_path = Self::compute_lock_path(path)?; - if lock_path.exists() { - info!("Found infra config at path: {}", lock_path.display()); - Self::from_lock_path(lock_path) - } else if default { - Self::from_path_or_default(path) - } else { - info!("Found infra config at path: {}", path.display()); - Self::from_path(path) - } - } - - /// Looks for a file named `Infra.lock` in the same directory as the file named by - /// `infra_config_path`. Returns true if the `Infra.lock` file exists, or if `infra_config_path` - /// exists. Returns an error if the directory of `infra_config_path` cannot be found. - pub fn lock_or_infra_config_exists

(infra_config_path: P) -> Result - where - P: AsRef, - { - let lock_path = Self::compute_lock_path(&infra_config_path)?; - Ok(lock_path.exists() || infra_config_path.as_ref().exists()) - } - - /// Returns the file path to a file named `Infra.lock` in the same directory as the file named - /// by `infra_config_path`. - pub fn compute_lock_path

(infra_config_path: P) -> Result - where - P: AsRef, - { - Ok(infra_config_path - .as_ref() - .parent() - .context(error::ParentSnafu { - path: infra_config_path.as_ref(), - })? - .join("Infra.lock")) - } -} - -/// S3-specific TUF infrastructure configuration -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] -pub struct S3Config { - pub region: Option, - #[serde(default)] - pub s3_prefix: String, - pub vpc_endpoint_id: Option, - pub stack_arn: Option, - pub bucket_name: Option, -} - -/// AWS-specific infrastructure configuration -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] -#[serde(deny_unknown_fields)] -pub struct AwsConfig { - #[serde(default)] - pub regions: VecDeque, - pub role: Option, - pub profile: Option, - #[serde(default)] - pub region: HashMap, - pub ssm_prefix: Option, - pub s3: Option>, -} - -/// AWS region-specific configuration -#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Clone)] -#[serde(deny_unknown_fields)] -pub struct AwsRegionConfig { - pub role: Option, -} - -/// Location of signing keys -// These variant names are lowercase because they have to match the text in Infra.toml, and it's -// more common for TOML config to be lowercase. -#[allow(non_camel_case_types)] -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] -#[serde(deny_unknown_fields)] -pub enum SigningKeyConfig { - file { - path: PathBuf, - }, - kms { - key_id: Option, - #[serde(flatten)] - config: Option, - }, - ssm { - parameter: String, - }, -} - -/// AWS region-specific configuration -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] -//#[serde(deny_unknown_fields)] -pub struct KMSKeyConfig { - #[serde(default)] - pub available_keys: HashMap, - pub key_alias: Option, - #[serde(default)] - pub regions: VecDeque, - #[serde(default)] - pub key_stack_arns: HashMap, -} - -impl TryFrom for Url { - type Error = (); - fn try_from(key: SigningKeyConfig) -> std::result::Result { - match key { - SigningKeyConfig::file { path } => Url::from_file_path(path), - // We don't support passing profiles to tough in the name of the key/parameter, so for - // KMS and SSM we prepend a slash if there isn't one present. - SigningKeyConfig::kms { key_id, .. } => { - let mut key_id = key_id.unwrap_or_default(); - key_id = if key_id.starts_with('/') { - key_id.to_string() - } else { - format!("/{}", key_id) - }; - Url::parse(&format!("aws-kms://{}", key_id)).map_err(|_| ()) - } - SigningKeyConfig::ssm { parameter } => { - let parameter = if parameter.starts_with('/') { - parameter - } else { - format!("/{}", parameter) - }; - Url::parse(&format!("aws-ssm://{}", parameter)).map_err(|_| ()) - } - } - } -} - -/// Represents a Bottlerocket repo's location and the metadata needed to update the repo -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq)] -#[serde(deny_unknown_fields)] -pub struct RepoConfig { - pub root_role_url: Option, - pub root_role_sha512: Option, - pub signing_keys: Option, - pub root_keys: Option, - pub metadata_base_url: Option, - pub targets_url: Option, - pub file_hosting_config_name: Option, - pub root_key_threshold: Option, - pub pub_key_threshold: Option, -} - -/// How long it takes for each metadata type to expire -#[derive(Debug, Deserialize)] -#[serde(deny_unknown_fields)] -pub struct RepoExpirationPolicy { - #[serde(deserialize_with = "deserialize_offset")] - pub snapshot_expiration: Duration, - #[serde(deserialize_with = "deserialize_offset")] - pub targets_expiration: Duration, - #[serde(deserialize_with = "deserialize_offset")] - pub timestamp_expiration: Duration, -} - -impl RepoExpirationPolicy { - /// Deserializes a RepoExpirationPolicy from a given path - pub fn from_path

(path: P) -> Result - where - P: AsRef, - { - let path = path.as_ref(); - let expiration_str = fs::read_to_string(path).context(error::FileSnafu { path })?; - toml::from_str(&expiration_str).context(error::InvalidTomlSnafu { path }) - } -} - -/// Deserializes a Duration in the form of "in X hours/days/weeks" -fn deserialize_offset<'de, D>(deserializer: D) -> std::result::Result -where - D: Deserializer<'de>, -{ - let s: &str = Deserialize::deserialize(deserializer)?; - parse_offset(s).map_err(serde::de::Error::custom) -} - -mod error { - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub enum Error { - #[snafu(display("Failed to read '{}': {}", path.display(), source))] - File { path: PathBuf, source: io::Error }, - - #[snafu(display("Invalid config file at '{}': {}", path.display(), source))] - InvalidToml { - path: PathBuf, - source: toml::de::Error, - }, - - #[snafu(display("Invalid lock file at '{}': {}", path.display(), source))] - InvalidLock { - path: PathBuf, - source: serde_yaml::Error, - }, - - #[snafu(display("Missing config: {}", what))] - MissingConfig { what: String }, - - #[snafu(display("Failed to get parent of path: {}", path.display()))] - Parent { path: PathBuf }, - } -} -pub use error::Error; -pub type Result = std::result::Result; diff --git a/tools/pubsys-config/src/vmware.rs b/tools/pubsys-config/src/vmware.rs deleted file mode 100644 index a50460961b5..00000000000 --- a/tools/pubsys-config/src/vmware.rs +++ /dev/null @@ -1,221 +0,0 @@ -//! The vmware module owns the definition and loading process for our VMware configuration sources. -use lazy_static::lazy_static; -use log::debug; -use serde::{Deserialize, Serialize}; -use snafu::{OptionExt, ResultExt}; -use std::collections::HashMap; -use std::path::{Path, PathBuf}; -use std::{env, fs}; - -lazy_static! { - /// Determine the full path to the Vsphere credentials at runtime. This is an Option because it is - /// possible (however unlikely) that `home_dir()` is unable to find the home directory of the - /// current user - pub static ref VMWARE_CREDS_PATH: Option = home::home_dir().map(|home| home - .join(".config") - .join("pubsys") - .join("vsphere-credentials.toml")); -} - -const GOVC_USERNAME: &str = "GOVC_USERNAME"; -const GOVC_PASSWORD: &str = "GOVC_PASSWORD"; -const GOVC_URL: &str = "GOVC_URL"; -const GOVC_DATACENTER: &str = "GOVC_DATACENTER"; -const GOVC_DATASTORE: &str = "GOVC_DATASTORE"; -const GOVC_NETWORK: &str = "GOVC_NETWORK"; -const GOVC_RESOURCE_POOL: &str = "GOVC_RESOURCE_POOL"; -const GOVC_FOLDER: &str = "GOVC_FOLDER"; - -/// VMware-specific infrastructure configuration -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq)] -#[serde(deny_unknown_fields)] -pub struct VmwareConfig { - #[serde(default)] - pub datacenters: Vec, - #[serde(default)] - pub datacenter: HashMap, - pub common: Option, -} - -/// VMware datacenter-specific configuration. -/// -/// Fields are optional here because this struct is used to gather environment variables, common -/// config, and datacenter-specific configuration, each of which may not have the complete set of -/// fields. It is used to build a complete datacenter configuration (hence the "Builder" name). -#[derive(Debug, Deserialize, Serialize, PartialEq, Eq)] -#[serde(deny_unknown_fields)] -pub struct DatacenterBuilder { - pub vsphere_url: Option, - pub datacenter: Option, - pub datastore: Option, - pub network: Option, - pub folder: Option, - pub resource_pool: Option, -} - -/// Helper macro for retrieving a field from another struct if the field in `self` is `None` -macro_rules! field_or { - ($self:expr, $field:ident, $other:expr) => { - $self - .$field - .as_ref() - .or($other.and_then(|o| o.$field.as_ref())) - .cloned() - }; -} - -impl DatacenterBuilder { - /// Create a DatacenterBuilder from environment variables - pub fn from_env() -> Self { - Self { - vsphere_url: get_env(GOVC_URL), - datacenter: get_env(GOVC_DATACENTER), - datastore: get_env(GOVC_DATASTORE), - network: get_env(GOVC_NETWORK), - folder: get_env(GOVC_FOLDER), - resource_pool: get_env(GOVC_RESOURCE_POOL), - } - } - - /// Creates a new DatacenterBuilder, merging fields from another (Optional) - /// DatacenterBuilder if the field in `self` is None - pub fn take_missing_from(&self, other: Option<&Self>) -> Self { - Self { - vsphere_url: field_or!(self, vsphere_url, other), - datacenter: field_or!(self, datacenter, other), - datastore: field_or!(self, datastore, other), - network: field_or!(self, network, other), - folder: field_or!(self, folder, other), - resource_pool: field_or!(self, resource_pool, other), - } - } - - /// Attempts to create a `Datacenter`, consuming `self` and ensuring that each field contains a - /// value. - pub fn build(self) -> Result { - let get_or_err = - |opt: Option, what: &str| opt.context(error::MissingConfigSnafu { what }); - - Ok(Datacenter { - vsphere_url: get_or_err(self.vsphere_url, "vSphere URL")?, - datacenter: get_or_err(self.datacenter, "vSphere datacenter")?, - datastore: get_or_err(self.datastore, "vSphere datastore")?, - network: get_or_err(self.network, "vSphere network")?, - folder: get_or_err(self.folder, "vSphere folder")?, - resource_pool: get_or_err(self.resource_pool, "vSphere resource pool")?, - }) - } -} - -/// A fully configured VMware datacenter, i.e. no optional fields -#[derive(Debug)] -pub struct Datacenter { - pub vsphere_url: String, - pub datacenter: String, - pub datastore: String, - pub network: String, - pub folder: String, - pub resource_pool: String, -} - -/// VMware infrastructure credentials for all datacenters -#[derive(Debug, Default, Deserialize)] -#[serde(deny_unknown_fields)] -pub struct DatacenterCredsConfig { - #[serde(default)] - pub datacenter: HashMap, -} - -impl DatacenterCredsConfig { - /// Deserializes a DatacenterCredsConfig from a given path - pub fn from_path

(path: P) -> Result - where - P: AsRef, - { - let path = path.as_ref(); - let creds_config_str = fs::read_to_string(path).context(error::FileSnafu { path })?; - toml::from_str(&creds_config_str).context(error::InvalidTomlSnafu { path }) - } -} - -/// VMware datacenter-specific credentials. Fields are optional here since this struct is used to -/// gather environment variables as well as fields from file, either of which may or may not exist. -/// It is used to build a complete credentials configuration (hence the "Builder" name). -#[derive(Debug, Default, Deserialize)] -#[serde(deny_unknown_fields)] -pub struct DatacenterCredsBuilder { - pub username: Option, - pub password: Option, -} - -impl DatacenterCredsBuilder { - /// Create a DatacenterCredsBuilder from environment variables - pub fn from_env() -> Self { - Self { - username: get_env(GOVC_USERNAME), - password: get_env(GOVC_PASSWORD), - } - } - - /// Creates a new DatacenterCredsBuilder, merging fields from another (Optional) - /// DatacenterCredsBuilder if the field in `self` is None - pub fn take_missing_from(&self, other: Option<&Self>) -> Self { - Self { - username: field_or!(self, username, other), - password: field_or!(self, password, other), - } - } - /// Attempts to create a `DatacenterCreds`, consuming `self` and ensuring that each field - /// contains a value - pub fn build(self) -> Result { - let get_or_err = - |opt: Option, what: &str| opt.context(error::MissingConfigSnafu { what }); - - Ok(DatacenterCreds { - username: get_or_err(self.username, "vSphere username")?, - password: get_or_err(self.password, "vSphere password")?, - }) - } -} - -/// Fully configured datacenter credentials, i.e. no optional fields -#[derive(Debug)] -pub struct DatacenterCreds { - pub username: String, - pub password: String, -} - -/// Attempt to retrieve an environment variable, returning None if it doesn't exist -fn get_env(var: &str) -> Option { - match env::var(var) { - Ok(v) => Some(v), - Err(e) => { - debug!("Unable to read environment variable '{}': {}", var, e); - None - } - } -} - -mod error { - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub enum Error { - #[snafu(display("Failed to read '{}': {}", path.display(), source))] - File { path: PathBuf, source: io::Error }, - - #[snafu(display("Invalid config file at '{}': {}", path.display(), source))] - InvalidToml { - path: PathBuf, - source: toml::de::Error, - }, - - #[snafu(display("Missing config: {}", what))] - MissingConfig { what: String }, - } -} -pub use error::Error; -pub type Result = std::result::Result; diff --git a/tools/pubsys-setup/Cargo.toml b/tools/pubsys-setup/Cargo.toml deleted file mode 100644 index f16852bc800..00000000000 --- a/tools/pubsys-setup/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "pubsys-setup" -version = "0.1.0" -authors = ["Zac Mrowicki ", "Tom Kirchner "] -license = "Apache-2.0 OR MIT" -edition = "2021" -publish = false - -[dependencies] -clap = { version = "4", features = ["derive"] } -hex = "0.4" -log = "0.4" -pubsys-config = { path = "../pubsys-config/", version = "0.1" } -reqwest = { version = "0.11", default-features = false, features = ["rustls-tls", "blocking"] } -sha2 = "0.10" -shell-words = "1" -simplelog = "0.12" -snafu = "0.7" -tempfile = "3" -url = { version = "2", features = ["serde"] } diff --git a/tools/pubsys-setup/src/main.rs b/tools/pubsys-setup/src/main.rs deleted file mode 100644 index d07cb7a2a1a..00000000000 --- a/tools/pubsys-setup/src/main.rs +++ /dev/null @@ -1,388 +0,0 @@ -/*! -`pubsys setup` helps you get started with the credentials you need to make Bottlerocket images and -the repos you use to update them. Specifically, it can create a new key and role, or download an -existing role. -*/ - -use clap::Parser; -use log::{debug, info, trace, warn}; -use pubsys_config::InfraConfig; -use sha2::{Digest, Sha512}; -use simplelog::{Config as LogConfig, LevelFilter, SimpleLogger}; -use snafu::{ensure, OptionExt, ResultExt}; -use std::convert::TryFrom; -use std::fs; -use std::os::unix::fs::PermissionsExt; -use std::path::PathBuf; -use std::process::{self, Command}; -use tempfile::NamedTempFile; -use url::Url; - -/// Helps you get started with credentials to make Bottlerocket images and repos. -#[derive(Debug, Parser)] -struct Args { - #[arg(global = true, long, default_value = "INFO")] - /// How much detail to log; from least to most: ERROR, WARN, INFO, DEBUG, TRACE - log_level: LevelFilter, - - #[arg(long)] - /// Path to Infra.toml - infra_config_path: PathBuf, - - #[arg(long)] - /// Use this named repo infrastructure from Infra.toml - repo: String, - - #[arg(long)] - /// Path to root.json - root_role_path: PathBuf, - - #[arg(long)] - /// If we have to generate a local key, store it here - default_key_path: PathBuf, - - #[arg(long)] - /// Allow setup to continue if we have a root role but no key for it - allow_missing_key: bool, -} - -/// The tuftool macro wraps Command to simplify calls to tuftool. -macro_rules! tuftool { - // We use variadic arguments to wrap a format! call so the user doesn't need to call format! - // each time. `tuftool root` always requires the path to root.json so there's always at least - // one. - ($format_str:expr, $($format_arg:expr),*) => { - let arg_str = format!($format_str, $($format_arg),*); - trace!("tuftool arg string: {}", arg_str); - let args = shell_words::split(&arg_str).context(error::CommandSplitSnafu { command: &arg_str })?; - trace!("tuftool split args: {:#?}", args); - - let status = Command::new("tuftool") - .args(args) - .status() - .context(error::TuftoolSpawnSnafu)?; - - ensure!(status.success(), error::TuftoolResultSnafu { - command: arg_str, - code: status.code().map(|i| i.to_string()).unwrap_or_else(|| "".to_string()) - }); - } -} - -/// Main entry point for tuftool setup. -fn run() -> Result<()> { - // Parse and store the args passed to the program - let args = Args::parse(); - - // SimpleLogger will send errors to stderr and anything less to stdout. - SimpleLogger::init(args.log_level, LogConfig::default()).context(error::LoggerSnafu)?; - - // Make /roles and /keys directories, if they don't exist, so we can write generated files. - let role_dir = args.root_role_path.parent().context(error::PathSnafu { - path: &args.root_role_path, - thing: "root role", - })?; - let key_dir = args.default_key_path.parent().context(error::PathSnafu { - path: &args.default_key_path, - thing: "key", - })?; - fs::create_dir_all(role_dir).context(error::MkdirSnafu { path: role_dir })?; - fs::create_dir_all(key_dir).context(error::MkdirSnafu { path: key_dir })?; - - // Main branching logic for deciding whether to create role/key, use what we have, or error. - match find_root_role_and_key(&args)? { - (Some(_root_role_path), Some(_key_url)) => Ok(()), - (Some(_root_role_path), None) => { - ensure!( - args.allow_missing_key, - error::MissingKeySnafu { repo: args.repo } - ); - Ok(()) - } - // User is missing something, so we generate at least a root.json and maybe a key. - (None, maybe_key_url) => { - if maybe_key_url.is_some() { - info!("Didn't find root role in Infra.toml, generating..."); - } else { - info!("Didn't find root role or signing key in Infra.toml, generating..."); - } - - let temp_root_role = - NamedTempFile::new_in(role_dir).context(error::TempFileCreateSnafu { - purpose: "root role", - })?; - let temp_root_role_path = temp_root_role.path().display(); - - // Make tuftool calls to create an initial root.json with basic parameters. - tuftool!("root init '{}'", temp_root_role_path); - - tuftool!("root expire '{}' 'in 52 weeks'", temp_root_role_path); - - tuftool!("root set-threshold '{}' root 1", temp_root_role_path); - tuftool!("root set-threshold '{}' snapshot 1", temp_root_role_path); - tuftool!("root set-threshold '{}' targets 1", temp_root_role_path); - tuftool!("root set-threshold '{}' timestamp 1", temp_root_role_path); - - let key_url = if let Some(key_url) = maybe_key_url { - // If the user has a key, add it to each role. - tuftool!("root add-key '{}' '{}' --role root --role snapshot --role targets --role timestamp", - temp_root_role_path, key_url); - key_url - } else { - // If the user has no key, build one and add it to each role. - tuftool!("root gen-rsa-key '{}' '{}' --role root --role snapshot --role targets --role timestamp", - temp_root_role_path, args.default_key_path.display()); - warn!( - "Created a key at {} - note that for production use, you should \ - use a key stored in a trusted service like KMS or SSM", - args.default_key_path.display() - ); - - Url::from_file_path(&args.default_key_path) - .ok() - .context(error::FileToUrlSnafu { - path: args.default_key_path, - })? - }; - - // Sign the role with the given key. - tuftool!("root sign '{}' -k '{}'", temp_root_role_path, key_url); - - temp_root_role - .persist_noclobber(&args.root_role_path) - .context(error::TempFilePersistSnafu { - path: &args.root_role_path, - })?; - - warn!( - "Created a root role at {} - note that for production use, you should create \ - a role with a shorter expiration and higher thresholds", - args.root_role_path.display() - ); - - // Root role files don't need to be secret. - fs::set_permissions(&args.root_role_path, fs::Permissions::from_mode(0o644)).context( - error::SetModeSnafu { - path: &args.root_role_path, - }, - )?; - - Ok(()) - } - } -} - -/// Searches Infra.toml and expected local paths for a root role and key for the requested repo. -fn find_root_role_and_key(args: &Args) -> Result<(Option<&PathBuf>, Option)> { - let (mut root_role_path, mut key_url) = (None, None); - - if InfraConfig::lock_or_infra_config_exists(&args.infra_config_path) - .context(error::ConfigSnafu)? - { - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(error::ConfigSnafu)?; - trace!("Parsed infra config: {:?}", infra_config); - - // Check whether the user has the relevant repo defined in their Infra.toml. - if let Some(repo_config) = infra_config - .repo - .as_ref() - .and_then(|repo_section| repo_section.get(&args.repo)) - { - // If they have a root role URL and checksum defined, we can download it. - if let (Some(url), Some(sha512)) = - (&repo_config.root_role_url, &repo_config.root_role_sha512) - { - // If it's already been downloaded, just confirm the checksum. - if args.root_role_path.exists() { - let root_role_data = - fs::read_to_string(&args.root_role_path).context(error::ReadFileSnafu { - path: &args.root_role_path, - })?; - let mut d = Sha512::new(); - d.update(&root_role_data); - let digest = hex::encode(d.finalize()); - - ensure!( - &digest == sha512, - error::HashSnafu { - expected: sha512, - got: digest, - thing: args.root_role_path.to_string_lossy() - } - ); - debug!( - "Using existing downloaded root role at {}", - args.root_role_path.display() - ); - } else { - // Download the root role by URL and verify its checksum before writing it. - let root_role_data = if url.scheme() == "file" { - // reqwest won't fetch a file URL, so just read the file. - let path = url - .to_file_path() - .ok() - .with_context(|| error::UrlToFileSnafu { url: url.clone() })?; - fs::read_to_string(&path).context(error::ReadFileSnafu { path: &path })? - } else { - reqwest::blocking::get(url.clone()) - .with_context(|_| error::GetUrlSnafu { url: url.clone() })? - .text() - .with_context(|_| error::GetUrlSnafu { url: url.clone() })? - }; - - let mut d = Sha512::new(); - d.update(&root_role_data); - let digest = hex::encode(d.finalize()); - - ensure!( - &digest == sha512, - error::HashSnafu { - expected: sha512, - got: digest, - thing: url.to_string() - } - ); - - // Write root role to expected path on disk. - fs::write(&args.root_role_path, &root_role_data).context( - error::WriteFileSnafu { - path: &args.root_role_path, - }, - )?; - debug!("Downloaded root role to {}", args.root_role_path.display()); - } - - root_role_path = Some(&args.root_role_path); - } else if repo_config.root_role_url.is_some() || repo_config.root_role_sha512.is_some() - { - // Must specify both URL and checksum. - error::RootRoleConfigSnafu.fail()?; - } - - if let Some(key_config) = &repo_config.signing_keys { - key_url = Some( - Url::try_from(key_config.clone()) - .ok() - .context(error::SigningKeyUrlSnafu { repo: &args.repo })?, - ); - } - } else { - info!( - "No repo config in '{}' - using local roles/keys", - args.infra_config_path.display() - ); - } - } else { - info!( - "No infra config at '{}' - using local roles/keys", - args.infra_config_path.display() - ); - } - - // If they don't have an Infra.toml or didn't define a root role / key there, check for them in - // expected local paths. - if root_role_path.is_none() && args.root_role_path.exists() { - root_role_path = Some(&args.root_role_path); - } - if key_url.is_none() && args.default_key_path.exists() { - key_url = Some(Url::from_file_path(&args.default_key_path).ok().context( - error::FileToUrlSnafu { - path: &args.default_key_path, - }, - )?); - } - - Ok((root_role_path, key_url)) -} - -// Returning a Result from main makes it print a Debug representation of the error, but with Snafu -// we have nice Display representations of the error, so we wrap "main" (run) and print any error. -// https://github.com/shepmaster/snafu/issues/110 -fn main() { - if let Err(e) = run() { - eprintln!("{}", e); - process::exit(1); - } -} - -mod error { - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - use url::Url; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(super) enum Error { - #[snafu(display("Error splitting shell command - {} - input: {}", source, command))] - CommandSplit { - command: String, - source: shell_words::ParseError, - }, - - #[snafu(display("Error reading config: {}", source))] - Config { source: pubsys_config::Error }, - - #[snafu(display("Path not valid as a URL: {}", path.display()))] - FileToUrl { path: PathBuf }, - - #[snafu(display("Failed to fetch URL '{}': {}", url, source))] - GetUrl { url: Url, source: reqwest::Error }, - - #[snafu(display("Hash mismatch for '{}', got {} but expected {}", thing, got, expected))] - Hash { - expected: String, - got: String, - thing: String, - }, - - #[snafu(display("Logger setup error: {}", source))] - Logger { source: log::SetLoggerError }, - - #[snafu(display("'{}' repo has root role but no key. You wouldn't be able to update a repo without the matching key. To continue, pass '-e ALLOW_MISSING_KEY=true'", repo))] - MissingKey { repo: String }, - - #[snafu(display("Failed to create '{}': {}", path.display(), source))] - Mkdir { path: PathBuf, source: io::Error }, - - #[snafu(display("Invalid path '{}' for {}", path.display(), thing))] - Path { path: PathBuf, thing: String }, - - #[snafu(display("Failed to read '{}': {}", path.display(), source))] - ReadFile { path: PathBuf, source: io::Error }, - - #[snafu(display( - "Must specify both URL and SHA512 of root role in Infra.toml, found only one" - ))] - RootRoleConfig, - - #[snafu(display("Failed to set permissions on {}: {}", path.display(), source))] - SetMode { path: PathBuf, source: io::Error }, - - #[snafu(display("Unable to build URL from signing key for repo '{}'", repo))] - SigningKeyUrl { repo: String }, - - #[snafu(display("Failed to create temp file for {}: {}", purpose, source))] - TempFileCreate { purpose: String, source: io::Error }, - - #[snafu(display("Failed to move temp file to {}: {}", path.display(), source))] - TempFilePersist { - path: PathBuf, - source: tempfile::PersistError, - }, - - #[snafu(display("Returned {}: tuftool {}", code, command))] - TuftoolResult { code: String, command: String }, - - #[snafu(display("Failed to start tuftool: {}", source))] - TuftoolSpawn { source: io::Error }, - - #[snafu(display("URL not valid as a path: {}", url))] - UrlToFile { url: Url }, - - #[snafu(display("Failed to write '{}': {}", path.display(), source))] - WriteFile { path: PathBuf, source: io::Error }, - } -} -type Result = std::result::Result; diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml deleted file mode 100644 index c306d8087aa..00000000000 --- a/tools/pubsys/Cargo.toml +++ /dev/null @@ -1,52 +0,0 @@ -[package] -name = "pubsys" -version = "0.1.0" -authors = ["Zac Mrowicki ", "Tom Kirchner "] -license = "Apache-2.0 OR MIT" -edition = "2021" -publish = false - -[dependencies] -aws-config = "0.55" -aws-credential-types = "0.55" -aws-sdk-ebs = "0.28" -aws-sdk-ec2 = "0.28" -aws-sdk-kms = "0.28" -aws-sdk-ssm = "0.28" -aws-sdk-sts = "0.28" -aws-smithy-types = "0.55" -aws-types = "0.55" -buildsys = { path = "../buildsys", version = "0.1" } -chrono = { version = "0.4", default-features = false, features = ["std", "clock"] } -clap = { version = "4", features = ["derive"] } -coldsnap = { version = "0.6", default-features = false, features = ["aws-sdk-rust-rustls"] } -duct = "0.13" -futures = "0.3" -governor = "0.5" -indicatif = "0.17" -lazy_static = "1" -log = "0.4" -nonzero_ext = "0.3" -num_cpus = "1" -parse-datetime = { path = "../../sources/parse-datetime", version = "0.1" } -pubsys-config = { path = "../pubsys-config/", version = "0.1" } -rayon = "1" -# Need to bring in reqwest with a TLS feature so tough can support TLS repos. -reqwest = { version = "0.11", default-features = false, features = ["rustls-tls", "blocking"] } -semver = "1" -serde = { version = "1", features = ["derive"] } -serde_json = "1" -serde_plain = "1" -simplelog = "0.12" -snafu = "0.7" -tabled = "0.10" -tempfile = "3" -tinytemplate = "1" -tokio = { version = "1", features = ["full"] } # LTS -tokio-stream = { version = "0.1", features = ["time"] } -toml = "0.5" -tough = { version = "0.14", features = ["http"] } -tough-kms = "0.6" -tough-ssm = "0.9" -update_metadata = { path = "../../sources/updater/update_metadata/", version = "0.1" } -url = { version = "2", features = ["serde"] } diff --git a/tools/pubsys/src/aws/ami/launch_permissions.rs b/tools/pubsys/src/aws/ami/launch_permissions.rs deleted file mode 100644 index f8f58447412..00000000000 --- a/tools/pubsys/src/aws/ami/launch_permissions.rs +++ /dev/null @@ -1,101 +0,0 @@ -use aws_sdk_ec2::{ - types::{ImageAttributeName, LaunchPermission}, - Client as Ec2Client, -}; -use serde::{Deserialize, Serialize}; -use snafu::ResultExt; - -/// Returns the launch permissions for the given AMI -pub(crate) async fn get_launch_permissions( - ec2_client: &Ec2Client, - region: &str, - ami_id: &str, -) -> Result> { - let ec2_response = ec2_client - .describe_image_attribute() - .image_id(ami_id) - .attribute(ImageAttributeName::LaunchPermission) - .send() - .await - .context(error::DescribeImageAttributeSnafu { - ami_id, - region: region.to_string(), - })?; - - let mut launch_permissions = vec![]; - - let responses: Vec = - ec2_response.launch_permissions().unwrap_or(&[]).to_vec(); - for permission in responses { - launch_permissions.push(LaunchPermissionDef::try_from(permission)?) - } - Ok(launch_permissions) -} - -#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq, Hash)] -#[serde(rename_all = "lowercase")] -pub(crate) enum LaunchPermissionDef { - /// The name of the group - Group(String), - - /// The Amazon Web Services account ID - UserId(String), - - /// The ARN of an organization - OrganizationArn(String), - - /// The ARN of an organizational unit - OrganizationalUnitArn(String), -} - -impl TryFrom for LaunchPermissionDef { - type Error = crate::aws::ami::launch_permissions::Error; - - fn try_from(launch_permission: LaunchPermission) -> std::result::Result { - let LaunchPermission { - group, - user_id, - organization_arn, - organizational_unit_arn, - .. - } = launch_permission.clone(); - match (group, user_id, organization_arn, organizational_unit_arn) { - (Some(group), None, None, None) => { - Ok(LaunchPermissionDef::Group(group.as_str().to_string())) - } - (None, Some(user_id), None, None) => Ok(LaunchPermissionDef::UserId(user_id)), - (None, None, Some(organization_arn), None) => { - Ok(LaunchPermissionDef::OrganizationArn(organization_arn)) - } - (None, None, None, Some(organizational_unit_arn)) => Ok( - LaunchPermissionDef::OrganizationalUnitArn(organizational_unit_arn), - ), - _ => Err(Error::InvalidLaunchPermission { launch_permission }), - } - } -} - -mod error { - use aws_sdk_ec2::error::SdkError; - use aws_sdk_ec2::operation::describe_image_attribute::DescribeImageAttributeError; - use aws_sdk_ec2::types::LaunchPermission; - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Error describing AMI {} in {}: {}", ami_id, region, source))] - DescribeImageAttribute { - ami_id: String, - region: String, - #[snafu(source(from(SdkError, Box::new)))] - source: Box>, - }, - - #[snafu(display("Invalid launch permission: {:?}", launch_permission))] - InvalidLaunchPermission { launch_permission: LaunchPermission }, - } -} -pub(crate) use error::Error; - -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs deleted file mode 100644 index 825f23dc923..00000000000 --- a/tools/pubsys/src/aws/ami/mod.rs +++ /dev/null @@ -1,627 +0,0 @@ -//! The ami module owns the 'ami' subcommand and controls the process of registering and copying -//! EC2 AMIs. - -pub(crate) mod launch_permissions; -pub(crate) mod public; -mod register; -mod snapshot; -pub(crate) mod wait; - -use crate::aws::ami::launch_permissions::get_launch_permissions; -use crate::aws::ami::public::ami_is_public; -use crate::aws::publish_ami::{get_snapshots, modify_image, modify_snapshots, ModifyOptions}; -use crate::aws::{client::build_client_config, parse_arch, region_from_string}; -use crate::Args; -use aws_sdk_ebs::Client as EbsClient; -use aws_sdk_ec2::error::{ProvideErrorMetadata, SdkError}; -use aws_sdk_ec2::operation::copy_image::{CopyImageError, CopyImageOutput}; -use aws_sdk_ec2::types::{ArchitectureValues, OperationType}; -use aws_sdk_ec2::{config::Region, Client as Ec2Client}; -use aws_sdk_sts::operation::get_caller_identity::{ - GetCallerIdentityError, GetCallerIdentityOutput, -}; -use aws_sdk_sts::Client as StsClient; -use clap::Parser; -use futures::future::{join, lazy, ready, FutureExt}; -use futures::stream::{self, StreamExt}; -use log::{error, info, trace, warn}; -use pubsys_config::{AwsConfig as PubsysAwsConfig, InfraConfig}; -use register::{get_ami_id, register_image, RegisteredIds}; -use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; -use std::collections::{HashMap, HashSet}; -use std::path::PathBuf; -use wait::wait_for_ami; - -const WARN_SEPARATOR: &str = "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"; - -/// Builds Bottlerocket AMIs using latest build artifacts -#[derive(Debug, Parser)] -pub(crate) struct AmiArgs { - /// Path to the image containing the os volume - #[arg(short = 'o', long)] - os_image: PathBuf, - - /// Path to the image containing the data volume - #[arg(short = 'd', long)] - data_image: Option, - - /// Path to the variant manifest - #[arg(short = 'v', long)] - variant_manifest: PathBuf, - - /// Path to the UEFI data - #[arg(short = 'e', long)] - uefi_data: PathBuf, - - /// The architecture of the machine image - #[arg(short = 'a', long, value_parser = parse_arch)] - arch: ArchitectureValues, - - /// The desired AMI name - #[arg(short = 'n', long)] - name: String, - - /// The desired AMI description - #[arg(long)] - description: Option, - - /// Don't display progress bars - #[arg(long)] - no_progress: bool, - - /// Regions where you want the AMI, the first will be used as the base for copying - #[arg(long, value_delimiter = ',')] - regions: Vec, - - /// If specified, save created regional AMI IDs in JSON at this path. - #[arg(long)] - ami_output: Option, -} - -/// Common entrypoint from main() -pub(crate) async fn run(args: &Args, ami_args: &AmiArgs) -> Result<()> { - match _run(args, ami_args).await { - Ok(amis) => { - // Write the AMI IDs to file if requested - if let Some(ref path) = ami_args.ami_output { - write_amis(path, &amis).context(error::WriteAmisSnafu { path })?; - } - Ok(()) - } - Err(e) => Err(e), - } -} - -async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> { - let mut amis = HashMap::new(); - - // If a lock file exists, use that, otherwise use Infra.toml or default - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, true) - .context(error::ConfigSnafu)?; - trace!("Using infra config: {:?}", infra_config); - - let aws = infra_config.aws.unwrap_or_default(); - - // If the user gave an override list of regions, use that, otherwise use what's in the config. - let mut regions = if !ami_args.regions.is_empty() { - ami_args.regions.clone() - } else { - aws.regions.clone().into() - } - .into_iter() - .map(|name| region_from_string(&name)) - .collect::>(); - - ensure!( - !regions.is_empty(), - error::MissingConfigSnafu { - missing: "aws.regions" - } - ); - - // We register in this base region first, then copy from there to any other regions. - let base_region = regions.remove(0); - - // Build EBS client for snapshot management, and EC2 client for registration - let client_config = build_client_config(&base_region, &base_region, &aws).await; - - let base_ebs_client = EbsClient::new(&client_config); - - let base_ec2_client = Ec2Client::new(&client_config); - - // Check if the AMI already exists, in which case we can use the existing ID, otherwise we - // register a new one. - let maybe_id = get_ami_id( - &ami_args.name, - &ami_args.arch, - &base_region, - &base_ec2_client, - ) - .await - .context(error::GetAmiIdSnafu { - name: &ami_args.name, - arch: ami_args.arch.as_ref(), - region: base_region.as_ref(), - })?; - - // If the AMI does not exist yet, `public` should be false and `launch_permissions` empty - let mut public = false; - let mut launch_permissions = vec![]; - - let (ids_of_image, already_registered) = if let Some(found_id) = maybe_id { - warn!( - "\n{}\n\nFound '{}' already registered in {}: {}\n\n{0}", - WARN_SEPARATOR, ami_args.name, base_region, found_id - ); - let snapshot_ids = get_snapshots(&found_id, &base_region, &base_ec2_client) - .await - .context(error::GetSnapshotsSnafu { - image_id: &found_id, - region: base_region.as_ref(), - })?; - let found_ids = RegisteredIds { - image_id: found_id.clone(), - snapshot_ids, - }; - - public = ami_is_public(&base_ec2_client, base_region.as_ref(), &found_id) - .await - .context(error::IsAmiPublicSnafu { - image_id: found_id.clone(), - region: base_region.to_string(), - })?; - - launch_permissions = - get_launch_permissions(&base_ec2_client, base_region.as_ref(), &found_id) - .await - .context(error::DescribeImageAttributeSnafu { - image_id: found_id, - region: base_region.to_string(), - })?; - - (found_ids, true) - } else { - let new_ids = register_image(ami_args, &base_region, base_ebs_client, &base_ec2_client) - .await - .context(error::RegisterImageSnafu { - name: &ami_args.name, - arch: ami_args.arch.as_ref(), - region: base_region.as_ref(), - })?; - info!( - "Registered AMI '{}' in {}: {}", - ami_args.name, base_region, new_ids.image_id - ); - (new_ids, false) - }; - - amis.insert( - base_region.as_ref().to_string(), - Image::new( - &ids_of_image.image_id, - &ami_args.name, - Some(public), - Some(launch_permissions), - ), - ); - - // If we don't need to copy AMIs, we're done. - if regions.is_empty() { - return Ok(amis); - } - - // Wait for AMI to be available so it can be copied - let successes_required = if already_registered { 1 } else { 3 }; - wait_for_ami( - &ids_of_image.image_id, - &base_region, - &base_region, - "available", - successes_required, - &aws, - ) - .await - .context(error::WaitAmiSnafu { - id: &ids_of_image.image_id, - region: base_region.as_ref(), - })?; - - // For every other region, initiate copy-image calls. - - // First we need to find the account IDs for any given roles, so we can grant access to those - // accounts to copy the AMI and snapshots. - info!("Getting account IDs for target regions so we can grant access to copy source AMI"); - let mut account_ids = get_account_ids(®ions, &base_region, &aws).await?; - - // Get the account ID used in the base region; we don't need to grant to it so we can remove it - // from the list. - let client_config = build_client_config(&base_region, &base_region, &aws).await; - let base_sts_client = StsClient::new(&client_config); - - let response = base_sts_client.get_caller_identity().send().await.context( - error::GetCallerIdentitySnafu { - region: base_region.as_ref(), - }, - )?; - let base_account_id = response.account.context(error::MissingInResponseSnafu { - request_type: "GetCallerIdentity", - missing: "account", - })?; - account_ids.remove(&base_account_id); - - // If we have any accounts other than the base account, grant them access. - if !account_ids.is_empty() { - info!("Granting access to target accounts so we can copy the AMI"); - let account_id_vec: Vec<_> = account_ids.into_iter().collect(); - - let modify_options = ModifyOptions { - user_ids: account_id_vec, - group_names: Vec::new(), - organization_arns: Vec::new(), - organizational_unit_arns: Vec::new(), - }; - - modify_snapshots( - &modify_options, - &OperationType::Add, - &ids_of_image.snapshot_ids, - &base_ec2_client, - &base_region, - ) - .await - .context(error::GrantAccessSnafu { - thing: "snapshots", - region: base_region.as_ref(), - })?; - - modify_image( - &modify_options, - &OperationType::Add, - &ids_of_image.image_id, - &base_ec2_client, - ) - .await - .context(error::GrantImageAccessSnafu { - thing: "image", - region: base_region.as_ref(), - })?; - } - - // Next, make EC2 clients so we can fetch and copy AMIs. We make a map storing our regional - // clients because they're used in a future and need to live until the future is resolved. - let mut ec2_clients = HashMap::with_capacity(regions.len()); - for region in regions.iter() { - let client_config = build_client_config(region, &base_region, &aws).await; - let ec2_client = Ec2Client::new(&client_config); - ec2_clients.insert(region.clone(), ec2_client); - } - - // First, we check if the AMI already exists in each region. - info!("Checking whether AMIs already exist in target regions"); - let mut get_requests = Vec::with_capacity(regions.len()); - for region in regions.iter() { - let ec2_client = &ec2_clients[region]; - let get_request = get_ami_id(&ami_args.name, &ami_args.arch, region, ec2_client); - let info_future = ready(region.clone()); - get_requests.push(join(info_future, get_request)); - } - let request_stream = stream::iter(get_requests).buffer_unordered(4); - let get_responses: Vec<(Region, std::result::Result, register::Error>)> = - request_stream.collect().await; - - // If an AMI already existed, just add it to our list, otherwise prepare a copy request. - let mut copy_requests = Vec::with_capacity(regions.len()); - for (region, get_response) in get_responses { - let get_response = get_response.context(error::GetAmiIdSnafu { - name: &ami_args.name, - arch: ami_args.arch.as_ref(), - region: region.as_ref(), - })?; - if let Some(id) = get_response { - info!( - "Found '{}' already registered in {}: {}", - ami_args.name, region, id - ); - let public = ami_is_public(&ec2_clients[®ion], region.as_ref(), &id) - .await - .context(error::IsAmiPublicSnafu { - image_id: id.clone(), - region: base_region.to_string(), - })?; - - let launch_permissions = - get_launch_permissions(&ec2_clients[®ion], region.as_ref(), &id) - .await - .context(error::DescribeImageAttributeSnafu { - region: region.as_ref(), - image_id: id.clone(), - })?; - - amis.insert( - region.as_ref().to_string(), - Image::new(&id, &ami_args.name, Some(public), Some(launch_permissions)), - ); - continue; - } - - let ec2_client = &ec2_clients[®ion]; - let base_region = base_region.to_owned(); - let copy_future = ec2_client - .copy_image() - .set_description(ami_args.description.clone()) - .set_name(Some(ami_args.name.clone())) - .set_source_image_id(Some(ids_of_image.image_id.clone())) - .set_source_region(Some(base_region.as_ref().to_string())) - .send(); - - // Store the region so we can output it to the user - let region_future = ready(region.clone()); - // Let the user know the copy is starting, when this future goes to run - let message_future = - lazy(move |_| info!("Starting copy from {} to {}", base_region, region)); - copy_requests.push(message_future.then(|_| join(region_future, copy_future))); - } - - // If all target regions already have the AMI, we're done. - if copy_requests.is_empty() { - return Ok(amis); - } - - // Start requests; they return almost immediately and the copying work is done by the service - // afterward. You should wait for the AMI status to be "available" before launching it. - // (We still use buffer_unordered, rather than something like join_all, to retain some control - // over the number of requests going out in case we need it later, but this will effectively - // spin through all regions quickly because the requests return before any copying is done.) - let request_stream = stream::iter(copy_requests).buffer_unordered(4); - // Run through the stream and collect results into a list. - let copy_responses: Vec<( - Region, - std::result::Result>, - )> = request_stream.collect().await; - - // Report on successes and errors; don't fail immediately if we see an error so we can report - // all successful IDs. - let mut saw_error = false; - for (region, copy_response) in copy_responses { - match copy_response { - Ok(success) => { - if let Some(image_id) = success.image_id { - info!( - "Registered AMI '{}' in {}: {}", - ami_args.name, region, image_id, - ); - amis.insert( - region.as_ref().to_string(), - Image::new(&image_id, &ami_args.name, Some(false), Some(vec![])), - ); - } else { - saw_error = true; - error!( - "Registered AMI '{}' in {} but didn't receive an AMI ID!", - ami_args.name, region, - ); - } - } - Err(e) => { - saw_error = true; - error!( - "Copy to {} failed: {}", - region, - e.into_service_error().code().unwrap_or("unknown") - ); - } - } - } - - ensure!(!saw_error, error::AmiCopySnafu); - - Ok(amis) -} - -/// If JSON output was requested, we serialize out a mapping of region to AMI information; this -/// struct holds the information we save about each AMI. The `ssm` subcommand uses this -/// information to populate templates representing SSM parameter names and values. -#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq, Hash)] -pub(crate) struct Image { - pub(crate) id: String, - pub(crate) name: String, - pub(crate) public: Option, - pub(crate) launch_permissions: Option>, -} - -impl Image { - fn new( - id: &str, - name: &str, - public: Option, - launch_permissions: Option>, - ) -> Self { - Self { - id: id.to_string(), - name: name.to_string(), - public, - launch_permissions, - } - } -} - -/// Returns the set of account IDs associated with the roles configured for the given regions. -async fn get_account_ids( - regions: &[Region], - base_region: &Region, - pubsys_aws_config: &PubsysAwsConfig, -) -> Result> { - let mut grant_accounts = HashSet::new(); - - // We make a map storing our regional clients because they're used in a future and need to - // live until the future is resolved. - let mut sts_clients = HashMap::with_capacity(regions.len()); - for region in regions.iter() { - let client_config = build_client_config(region, base_region, pubsys_aws_config).await; - let sts_client = StsClient::new(&client_config); - sts_clients.insert(region.clone(), sts_client); - } - - let mut requests = Vec::with_capacity(regions.len()); - for region in regions.iter() { - let sts_client = &sts_clients[region]; - let response_future = sts_client.get_caller_identity().send(); - - // Store the region so we can include it in any errors - let region_future = ready(region.clone()); - requests.push(join(region_future, response_future)); - } - - let request_stream = stream::iter(requests).buffer_unordered(4); - // Run through the stream and collect results into a list. - let responses: Vec<( - Region, - std::result::Result>, - )> = request_stream.collect().await; - - for (region, response) in responses { - let response = response.context(error::GetCallerIdentitySnafu { - region: region.as_ref(), - })?; - let account_id = response.account.context(error::MissingInResponseSnafu { - request_type: "GetCallerIdentity", - missing: "account", - })?; - grant_accounts.insert(account_id); - } - trace!("Found account IDs {:?}", grant_accounts); - - Ok(grant_accounts) -} - -mod error { - use crate::aws::{ami, publish_ami}; - use aws_sdk_ec2::error::SdkError; - use aws_sdk_ec2::operation::modify_image_attribute::ModifyImageAttributeError; - use aws_sdk_ec2::types::LaunchPermission; - use aws_sdk_sts::operation::get_caller_identity::GetCallerIdentityError; - use snafu::Snafu; - use std::path::PathBuf; - - use super::public; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Some AMIs failed to copy, see above"))] - AmiCopy, - - #[snafu(display("Error reading config: {}", source))] - Config { source: pubsys_config::Error }, - - #[snafu(display( - "Failed to describe image attributes for image {} in region {}: {}", - image_id, - region, - source - ))] - DescribeImageAttribute { - image_id: String, - region: String, - source: super::launch_permissions::Error, - }, - - #[snafu(display("Failed to create file '{}': {}", path.display(), source))] - FileCreate { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Error getting AMI ID for {} {} in {}: {}", arch, name, region, source))] - GetAmiId { - name: String, - arch: String, - region: String, - source: ami::register::Error, - }, - - #[snafu(display("Error getting account ID in {}: {}", region, source))] - GetCallerIdentity { - region: String, - source: SdkError, - }, - - #[snafu(display( - "Failed to get snapshot IDs associated with {} in {}: {}", - image_id, - region, - source - ))] - GetSnapshots { - image_id: String, - region: String, - source: publish_ami::Error, - }, - - #[snafu(display("Failed to grant access to {} in {}: {}", thing, region, source))] - GrantAccess { - thing: String, - region: String, - source: publish_ami::Error, - }, - - #[snafu(display("Failed to grant access to {} in {}: {}", thing, region, source))] - GrantImageAccess { - thing: String, - region: String, - source: SdkError, - }, - - #[snafu(display( - "Failed to check if AMI with id {} is public in {}: {}", - image_id, - region, - source - ))] - IsAmiPublic { - image_id: String, - region: String, - source: public::Error, - }, - - #[snafu(display("Invalid launch permission: {:?}", launch_permission))] - InvalidLaunchPermission { launch_permission: LaunchPermission }, - - #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { missing: String }, - - #[snafu(display("Response to {} was missing {}", request_type, missing))] - MissingInResponse { - request_type: String, - missing: String, - }, - - #[snafu(display("Error registering {} {} in {}: {}", arch, name, region, source))] - RegisterImage { - name: String, - arch: String, - region: String, - source: ami::register::Error, - }, - - #[snafu(display("AMI '{}' in {} did not become available: {}", id, region, source))] - WaitAmi { - id: String, - region: String, - source: ami::wait::Error, - }, - - #[snafu(display("Failed to write AMIs to '{}': {}", path.display(), source))] - WriteAmis { - path: PathBuf, - source: publish_ami::Error, - }, - } -} -pub(crate) use error::Error; - -use self::launch_permissions::LaunchPermissionDef; - -use super::publish_ami::write_amis; -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ami/public.rs b/tools/pubsys/src/aws/ami/public.rs deleted file mode 100644 index 6404abda122..00000000000 --- a/tools/pubsys/src/aws/ami/public.rs +++ /dev/null @@ -1,64 +0,0 @@ -use aws_sdk_ec2::Client as Ec2Client; -use snafu::{ensure, OptionExt, ResultExt}; - -/// Returns whether or not the given AMI ID refers to a public AMI. -pub(crate) async fn ami_is_public( - ec2_client: &Ec2Client, - region: &str, - ami_id: &str, -) -> Result { - let ec2_response = ec2_client - .describe_images() - .image_ids(ami_id.to_string()) - .send() - .await - .context(error::DescribeImagesSnafu { - ami_id: ami_id.to_string(), - region: region.to_string(), - })?; - - let returned_images = ec2_response.images().unwrap_or_default(); - - ensure!( - returned_images.len() <= 1, - error::TooManyImagesSnafu { - ami_id: ami_id.to_string(), - region: region.to_string(), - } - ); - - Ok(returned_images - .first() - .context(error::NoSuchImageSnafu { - ami_id: ami_id.to_string(), - region: region.to_string(), - })? - .public() - .unwrap_or(false)) -} - -mod error { - use aws_sdk_ec2::error::SdkError; - use aws_sdk_ec2::operation::describe_images::DescribeImagesError; - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Error describing AMI {} in {}: {}", ami_id, region, source))] - DescribeImages { - ami_id: String, - region: String, - #[snafu(source(from(SdkError, Box::new)))] - source: Box>, - }, - - #[snafu(display("AMI {} not found in {}", ami_id, region))] - NoSuchImage { ami_id: String, region: String }, - - #[snafu(display("Multiples AMIs with ID {} found in {}", ami_id, region))] - TooManyImages { ami_id: String, region: String }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ami/register.rs b/tools/pubsys/src/aws/ami/register.rs deleted file mode 100644 index aed614aec15..00000000000 --- a/tools/pubsys/src/aws/ami/register.rs +++ /dev/null @@ -1,331 +0,0 @@ -use super::{snapshot::snapshot_from_image, AmiArgs}; -use aws_sdk_ebs::Client as EbsClient; -use aws_sdk_ec2::types::{ - ArchitectureValues, BlockDeviceMapping, EbsBlockDevice, Filter, VolumeType, -}; -use aws_sdk_ec2::{config::Region, Client as Ec2Client}; -use buildsys::manifest::{self, ImageFeature}; -use coldsnap::{SnapshotUploader, SnapshotWaiter}; -use log::{debug, info, warn}; -use snafu::{ensure, OptionExt, ResultExt}; - -const ROOT_DEVICE_NAME: &str = "/dev/xvda"; -const DATA_DEVICE_NAME: &str = "/dev/xvdb"; - -// Features we assume/enable for the images. -const VIRT_TYPE: &str = "hvm"; -const VOLUME_TYPE: &str = "gp2"; -const SRIOV: &str = "simple"; -const ENA: bool = true; - -#[derive(Debug)] -pub(crate) struct RegisteredIds { - pub(crate) image_id: String, - pub(crate) snapshot_ids: Vec, -} - -/// Helper for `register_image`. Inserts registered snapshot IDs into `cleanup_snapshot_ids` so -/// they can be cleaned up on failure if desired. -async fn _register_image( - ami_args: &AmiArgs, - region: &Region, - ebs_client: EbsClient, - ec2_client: &Ec2Client, - cleanup_snapshot_ids: &mut Vec, -) -> Result { - let variant_manifest = manifest::ManifestInfo::new(&ami_args.variant_manifest).context( - error::LoadVariantManifestSnafu { - path: &ami_args.variant_manifest, - }, - )?; - - let image_layout = variant_manifest - .image_layout() - .context(error::MissingImageLayoutSnafu { - path: &ami_args.variant_manifest, - })?; - - let (os_volume_size, data_volume_size) = image_layout.publish_image_sizes_gib(); - - let uefi_data = - std::fs::read_to_string(&ami_args.uefi_data).context(error::LoadUefiDataSnafu { - path: &ami_args.uefi_data, - })?; - - debug!("Uploading images into EBS snapshots in {}", region); - let uploader = SnapshotUploader::new(ebs_client); - let os_snapshot = - snapshot_from_image(&ami_args.os_image, &uploader, None, ami_args.no_progress) - .await - .context(error::SnapshotSnafu { - path: &ami_args.os_image, - region: region.as_ref(), - })?; - cleanup_snapshot_ids.push(os_snapshot.clone()); - - let mut data_snapshot = None; - if let Some(data_image) = &ami_args.data_image { - let snapshot = snapshot_from_image(data_image, &uploader, None, ami_args.no_progress) - .await - .context(error::SnapshotSnafu { - path: &ami_args.os_image, - region: region.as_ref(), - })?; - cleanup_snapshot_ids.push(snapshot.clone()); - data_snapshot = Some(snapshot); - } - - info!("Waiting for snapshots to become available in {}", region); - let waiter = SnapshotWaiter::new(ec2_client.clone()); - waiter - .wait(&os_snapshot, Default::default()) - .await - .context(error::WaitSnapshotSnafu { - snapshot_type: "root", - })?; - - if let Some(ref data_snapshot) = data_snapshot { - waiter - .wait(&data_snapshot, Default::default()) - .await - .context(error::WaitSnapshotSnafu { - snapshot_type: "data", - })?; - } - - // Prepare parameters for AMI registration request - let os_bdm = BlockDeviceMapping::builder() - .set_device_name(Some(ROOT_DEVICE_NAME.to_string())) - .set_ebs(Some( - EbsBlockDevice::builder() - .set_delete_on_termination(Some(true)) - .set_snapshot_id(Some(os_snapshot.clone())) - .set_volume_type(Some(VolumeType::from(VOLUME_TYPE))) - .set_volume_size(Some(os_volume_size)) - .build(), - )) - .build(); - - let mut data_bdm = None; - if let Some(ref data_snapshot) = data_snapshot { - let mut bdm = os_bdm.clone(); - bdm.device_name = Some(DATA_DEVICE_NAME.to_string()); - if let Some(ebs) = bdm.ebs.as_mut() { - ebs.snapshot_id = Some(data_snapshot.clone()); - ebs.volume_size = Some(data_volume_size); - } - data_bdm = Some(bdm); - } - - let mut block_device_mappings = vec![os_bdm]; - if let Some(data_bdm) = data_bdm { - block_device_mappings.push(data_bdm); - } - - let uefi_secure_boot_enabled = variant_manifest - .image_features() - .iter() - .flatten() - .any(|f| **f == ImageFeature::UefiSecureBoot); - - let (boot_mode, uefi_data) = if uefi_secure_boot_enabled { - (Some("uefi-preferred".into()), Some(uefi_data)) - } else { - (None, None) - }; - - info!("Making register image call in {}", region); - let register_response = ec2_client - .register_image() - .set_architecture(Some(ami_args.arch.clone())) - .set_block_device_mappings(Some(block_device_mappings)) - .set_boot_mode(boot_mode) - .set_uefi_data(uefi_data) - .set_description(ami_args.description.clone()) - .set_ena_support(Some(ENA)) - .set_name(Some(ami_args.name.clone())) - .set_root_device_name(Some(ROOT_DEVICE_NAME.to_string())) - .set_sriov_net_support(Some(SRIOV.to_string())) - .set_virtualization_type(Some(VIRT_TYPE.to_string())) - .send() - .await - .context(error::RegisterImageSnafu { - region: region.as_ref(), - })?; - - let image_id = register_response - .image_id - .context(error::MissingImageIdSnafu { - region: region.as_ref(), - })?; - - let mut snapshot_ids = vec![os_snapshot]; - if let Some(data_snapshot) = data_snapshot { - snapshot_ids.push(data_snapshot); - } - - Ok(RegisteredIds { - image_id, - snapshot_ids, - }) -} - -/// Uploads the given images into snapshots and registers an AMI using them as its block device -/// mapping. Deletes snapshots on failure. -pub(crate) async fn register_image( - ami_args: &AmiArgs, - region: &Region, - ebs_client: EbsClient, - ec2_client: &Ec2Client, -) -> Result { - info!("Registering '{}' in {}", ami_args.name, region); - let mut cleanup_snapshot_ids = Vec::new(); - let register_result = _register_image( - ami_args, - region, - ebs_client, - ec2_client, - &mut cleanup_snapshot_ids, - ) - .await; - - if register_result.is_err() { - for snapshot_id in cleanup_snapshot_ids { - if let Err(e) = ec2_client - .delete_snapshot() - .set_snapshot_id(Some(snapshot_id.clone())) - .send() - .await - { - warn!( - "While cleaning up, failed to delete snapshot {}: {}", - snapshot_id, e - ); - } - } - } - register_result -} - -/// Queries EC2 for the given AMI name. If found, returns Ok(Some(id)), if not returns Ok(None). -pub(crate) async fn get_ami_id( - name: S, - arch: &ArchitectureValues, - region: &Region, - ec2_client: &Ec2Client, -) -> Result> -where - S: Into, -{ - let describe_response = ec2_client - .describe_images() - .set_owners(Some(vec!["self".to_string()])) - .set_filters(Some(vec![ - Filter::builder() - .set_name(Some("name".to_string())) - .set_values(Some(vec![name.into()])) - .build(), - Filter::builder() - .set_name(Some("architecture".to_string())) - .set_values(Some(vec![arch.as_ref().to_string()])) - .build(), - Filter::builder() - .set_name(Some("image-type".to_string())) - .set_values(Some(vec!["machine".to_string()])) - .build(), - Filter::builder() - .set_name(Some("virtualization-type".to_string())) - .set_values(Some(vec![VIRT_TYPE.to_string()])) - .build(), - ])) - .send() - .await - .context(error::DescribeImagesSnafu { - region: region.as_ref(), - })?; - if let Some(mut images) = describe_response.images { - if images.is_empty() { - return Ok(None); - } - ensure!( - images.len() == 1, - error::MultipleImagesSnafu { - images: images - .into_iter() - .map(|i| i.image_id.unwrap_or_else(|| "".to_string())) - .collect::>() - } - ); - let image = images.remove(0); - // If there is an image but we couldn't find the ID of it, fail rather than returning None, - // which would indicate no image. - let id = image.image_id.context(error::MissingImageIdSnafu { - region: region.as_ref(), - })?; - Ok(Some(id)) - } else { - Ok(None) - } -} - -mod error { - use crate::aws::ami; - use aws_sdk_ec2::error::SdkError; - use aws_sdk_ec2::operation::{ - describe_images::DescribeImagesError, register_image::RegisterImageError, - }; - use snafu::Snafu; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Failed to describe images in {}: {}", region, source))] - DescribeImages { - region: String, - source: SdkError, - }, - - #[snafu(display("Failed to load variant manifest from {}: {}", path.display(), source))] - LoadVariantManifest { - path: PathBuf, - source: buildsys::manifest::Error, - }, - - #[snafu(display("Failed to load UEFI data from {}: {}", path.display(), source))] - LoadUefiData { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Could not find image layout for {}", path.display()))] - MissingImageLayout { path: PathBuf }, - - #[snafu(display("Image response in {} did not include image ID", region))] - MissingImageId { region: String }, - - #[snafu(display("DescribeImages with unique filters returned multiple results: {}", images.join(", ")))] - MultipleImages { images: Vec }, - - #[snafu(display("Failed to register image in {}: {}", region, source))] - RegisterImage { - region: String, - source: SdkError, - }, - - #[snafu(display("Failed to upload snapshot from {} in {}: {}", path.display(),region, source))] - Snapshot { - path: PathBuf, - region: String, - source: ami::snapshot::Error, - }, - - #[snafu(display("{} snapshot did not become available: {}", snapshot_type, source))] - WaitSnapshot { - snapshot_type: String, - source: coldsnap::WaitError, - }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ami/snapshot.rs b/tools/pubsys/src/aws/ami/snapshot.rs deleted file mode 100644 index 15b25611910..00000000000 --- a/tools/pubsys/src/aws/ami/snapshot.rs +++ /dev/null @@ -1,65 +0,0 @@ -use coldsnap::SnapshotUploader; -use indicatif::{ProgressBar, ProgressStyle}; -use snafu::{OptionExt, ResultExt}; -use std::path::Path; - -/// Create a progress bar to show status of snapshot blocks, if wanted. -fn build_progress_bar(no_progress: bool, verb: &str) -> Result> { - if no_progress { - return Ok(None); - } - let progress_bar = ProgressBar::new(0); - progress_bar.set_style( - ProgressStyle::default_bar() - .template(&[" ", verb, " [{bar:50.white/black}] {pos}/{len} ({eta})"].concat()) - .context(error::ProgressBarTemplateSnafu)? - .progress_chars("=> "), - ); - Ok(Some(progress_bar)) -} - -/// Uploads the given path into a snapshot. -pub(crate) async fn snapshot_from_image

( - path: P, - uploader: &SnapshotUploader, - desired_size: Option, - no_progress: bool, -) -> Result -where - P: AsRef, -{ - let path = path.as_ref(); - let progress_bar = build_progress_bar(no_progress, "Uploading snapshot"); - let filename = path - .file_name() - .context(error::InvalidImagePathSnafu { path })? - .to_string_lossy(); - - uploader - .upload_from_file(path, desired_size, Some(&filename), progress_bar?) - .await - .context(error::UploadSnapshotSnafu) -} - -mod error { - use snafu::Snafu; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - #[allow(clippy::large_enum_variant)] - pub(crate) enum Error { - #[snafu(display("Invalid image path '{}'", path.display()))] - InvalidImagePath { path: PathBuf }, - - #[snafu(display("Failed to parse progress style template: {}", source))] - ProgressBarTemplate { - source: indicatif::style::TemplateError, - }, - - #[snafu(display("Failed to upload snapshot: {}", source))] - UploadSnapshot { source: coldsnap::UploadError }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ami/wait.rs b/tools/pubsys/src/aws/ami/wait.rs deleted file mode 100644 index 9a2c7cd5360..00000000000 --- a/tools/pubsys/src/aws/ami/wait.rs +++ /dev/null @@ -1,139 +0,0 @@ -use crate::aws::client::build_client_config; -use aws_sdk_ec2::{config::Region, types::ImageState, Client as Ec2Client}; -use log::info; -use pubsys_config::AwsConfig as PubsysAwsConfig; -use snafu::{ensure, ResultExt}; -use std::thread::sleep; -use std::time::Duration; - -/// Waits for the given AMI ID to reach the given state, requiring it be in that state for -/// `success_required` checks in a row. -pub(crate) async fn wait_for_ami( - id: &str, - region: &Region, - sts_region: &Region, - state: &str, - successes_required: u8, - pubsys_aws_config: &PubsysAwsConfig, -) -> Result<()> { - let mut successes = 0; - let max_attempts = 90; - let mut attempts = 0; - let seconds_between_attempts = 2; - - loop { - attempts += 1; - // Stop if we're over max, unless we're on a success streak, then give it some wiggle room. - ensure!( - (attempts - successes) <= max_attempts, - error::MaxAttemptsSnafu { - id, - max_attempts, - region: region.as_ref(), - } - ); - - // Use a new client each time so we have more confidence that different endpoints can see - // the new AMI. - let client_config = build_client_config(region, sts_region, pubsys_aws_config).await; - let ec2_client = Ec2Client::new(&client_config); - let describe_response = ec2_client - .describe_images() - .set_image_ids(Some(vec![id.to_string()])) - .send() - .await - .context(error::DescribeImagesSnafu { - region: region.as_ref(), - })?; - - // The response contains an Option>, so we have to check that we got a - // list at all, and then that the list contains the ID in question. - if let Some(images) = describe_response.images { - let mut saw_it = false; - for image in images { - if let Some(found_id) = image.image_id { - if let Some(found_state) = image.state { - if id == found_id && ImageState::from(state) == found_state { - // Success; check if we have enough to declare victory. - saw_it = true; - successes += 1; - if successes >= successes_required { - info!("Found {} {} in {}", id, state, region); - return Ok(()); - } - break; - } - // If the state shows us the AMI failed, we know we'll never hit the - // desired state. (Unless they desired "error", which will be caught - // above.) - match &found_state { - ImageState::Invalid - | ImageState::Deregistered - | ImageState::Failed - | ImageState::Error => error::StateSnafu { - id, - state: found_state.as_ref(), - region: region.as_ref(), - } - .fail(), - _ => Ok(()), - }?; - } - } - } - if !saw_it { - // Did not find image in list; reset success count and try again (if we have spare attempts) - successes = 0; - } - } else { - // Did not receive list; reset success count and try again (if we have spare attempts) - successes = 0; - }; - - if attempts % 5 == 1 { - info!( - "Waiting for {} in {} to be {}... (attempt {} of {})", - id, region, state, attempts, max_attempts - ); - } - sleep(Duration::from_secs(seconds_between_attempts)); - } -} - -mod error { - use aws_sdk_ec2::error::SdkError; - use aws_sdk_ec2::operation::describe_images::DescribeImagesError; - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - #[allow(clippy::large_enum_variant)] - pub(crate) enum Error { - #[snafu(display("Failed to describe images in {}: {}", region, source))] - DescribeImages { - region: String, - source: SdkError, - }, - - #[snafu(display( - "Failed to reach desired state within {} attempts for {} in {}", - max_attempts, - id, - region - ))] - MaxAttempts { - max_attempts: u8, - id: String, - region: String, - }, - - #[snafu(display("Image '{}' went to '{}' state in {}", id, state, region))] - State { - id: String, - state: String, - region: String, - }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/client.rs b/tools/pubsys/src/aws/client.rs deleted file mode 100644 index 770504ac916..00000000000 --- a/tools/pubsys/src/aws/client.rs +++ /dev/null @@ -1,71 +0,0 @@ -use aws_config::default_provider::credentials::default_provider; -use aws_config::profile::ProfileFileCredentialsProvider; -use aws_config::sts::AssumeRoleProvider; -use aws_config::SdkConfig; -use aws_credential_types::provider::SharedCredentialsProvider; -use aws_types::region::Region; -use pubsys_config::AwsConfig as PubsysAwsConfig; - -/// Create an AWS client config using the given regions and pubsys config. -pub(crate) async fn build_client_config( - region: &Region, - sts_region: &Region, - pubsys_aws_config: &PubsysAwsConfig, -) -> SdkConfig { - let maybe_profile = pubsys_aws_config.profile.clone(); - let maybe_role = pubsys_aws_config.role.clone(); - let maybe_regional_role = pubsys_aws_config - .region - .get(region.as_ref()) - .and_then(|r| r.role.clone()); - let base_provider = base_provider(&maybe_profile).await; - - let config = match (&maybe_role, &maybe_regional_role) { - (None, None) => aws_config::from_env().credentials_provider(base_provider), - _ => { - let assume_roles = maybe_role.iter().chain(maybe_regional_role.iter()).cloned(); - let provider = - build_provider(sts_region, assume_roles.clone(), base_provider.clone()).await; - aws_config::from_env().credentials_provider(provider) - } - }; - - config.region(region.clone()).load().await -} - -/// Chains credentials providers to assume the given roles in order. -/// The region given should be the one in which you want to talk to STS to get temporary -/// credentials, not the region in which you want to talk to a service endpoint like EC2. This is -/// needed because you may be assuming a role in an opt-in region from an account that has not -/// opted-in to that region, and you need to get session credentials from an STS endpoint in a -/// region to which you have access in the base account -async fn build_provider( - sts_region: &Region, - assume_roles: impl Iterator, - base_provider: SharedCredentialsProvider, -) -> SharedCredentialsProvider { - let mut provider = base_provider; - for assume_role in assume_roles { - provider = SharedCredentialsProvider::new( - AssumeRoleProvider::builder(assume_role) - .region(sts_region.clone()) - .session_name("pubsys") - .build(provider.clone()), - ) - } - provider -} - -/// If the user specified a profile, use that, otherwise use the default -/// credentials mechanisms. -async fn base_provider(maybe_profile: &Option) -> SharedCredentialsProvider { - if let Some(profile) = maybe_profile { - SharedCredentialsProvider::new( - ProfileFileCredentialsProvider::builder() - .profile_name(profile) - .build(), - ) - } else { - SharedCredentialsProvider::new(default_provider().await) - } -} diff --git a/tools/pubsys/src/aws/mod.rs b/tools/pubsys/src/aws/mod.rs deleted file mode 100644 index 80e06de51a0..00000000000 --- a/tools/pubsys/src/aws/mod.rs +++ /dev/null @@ -1,42 +0,0 @@ -use aws_sdk_ec2::config::Region; -use aws_sdk_ec2::types::ArchitectureValues; - -#[macro_use] -pub(crate) mod client; - -pub(crate) mod ami; -pub(crate) mod promote_ssm; -pub(crate) mod publish_ami; -pub(crate) mod ssm; -pub(crate) mod validate_ami; -pub(crate) mod validate_ssm; - -/// Builds a Region from the given region name. -fn region_from_string(name: &str) -> Region { - Region::new(name.to_owned()) -} - -/// Parses the given string as an architecture, mapping values to the ones used in EC2. -pub(crate) fn parse_arch(input: &str) -> Result { - match input { - "x86_64" | "amd64" => Ok(ArchitectureValues::X8664), - "arm64" | "aarch64" => Ok(ArchitectureValues::Arm64), - _ => error::ParseArchSnafu { - input, - msg: "unknown architecture", - } - .fail(), - } -} - -mod error { - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Failed to parse arch '{}': {}", input, msg))] - ParseArch { input: String, msg: String }, - } -} -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs deleted file mode 100644 index 21f4ca1d413..00000000000 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ /dev/null @@ -1,550 +0,0 @@ -//! The promote_ssm module owns the 'promote-ssm' subcommand and controls the process of copying -//! SSM parameters from one version to another - -use crate::aws::client::build_client_config; -use crate::aws::ssm::template::RenderedParametersMap; -use crate::aws::ssm::{key_difference, ssm, template, BuildContext, SsmKey}; -use crate::aws::validate_ssm::parse_parameters; -use crate::aws::{parse_arch, region_from_string}; -use crate::Args; -use aws_sdk_ec2::types::ArchitectureValues; -use aws_sdk_ssm::{config::Region, Client as SsmClient}; -use clap::Parser; -use log::{info, trace}; -use pubsys_config::InfraConfig; -use snafu::{ensure, ResultExt}; -use std::collections::HashMap; -use std::path::PathBuf; - -/// Copies sets of SSM parameters -#[derive(Debug, Parser)] -pub(crate) struct PromoteArgs { - /// The architecture of the machine image - #[arg(long, value_parser = parse_arch)] - arch: ArchitectureValues, - - /// The variant name for the current build - #[arg(long)] - variant: String, - - /// Version number (or string) to copy from - #[arg(long)] - source: String, - - /// Version number (or string) to copy to - #[arg(long)] - target: String, - - /// Comma-separated list of regions to promote in, overriding Infra.toml - #[arg(long, value_delimiter = ',')] - regions: Vec, - - /// File holding the parameter templates - #[arg(long)] - template_path: PathBuf, - - /// If set, contains the path to the file holding the original SSM parameters - /// and where the newly promoted parameters will be written - #[arg(long)] - ssm_parameter_output: Option, -} - -/// Common entrypoint from main() -pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { - info!( - "Promoting SSM parameters from {} to {}", - promote_args.source, promote_args.target - ); - - // Setup =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - // If a lock file exists, use that, otherwise use Infra.toml - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(error::ConfigSnafu)?; - - trace!("Parsed infra config: {:#?}", infra_config); - let aws = infra_config.aws.unwrap_or_default(); - let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or(""); - - // If the user gave an override list of regions, use that, otherwise use what's in the config. - let regions = if !promote_args.regions.is_empty() { - promote_args.regions.clone() - } else { - aws.regions.clone().into() - } - .into_iter() - .map(|name| region_from_string(&name)) - .collect::>(); - - ensure!( - !regions.is_empty(), - error::MissingConfigSnafu { - missing: "aws.regions" - } - ); - let base_region = ®ions[0]; - - let mut ssm_clients = HashMap::with_capacity(regions.len()); - for region in ®ions { - let client_config = build_client_config(region, base_region, &aws).await; - let ssm_client = SsmClient::new(&client_config); - ssm_clients.insert(region.clone(), ssm_client); - } - - // Template setup =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - // Non-image-specific context for building and rendering templates - let source_build_context = BuildContext { - variant: &promote_args.variant, - arch: promote_args.arch.as_str(), - image_version: &promote_args.source, - }; - - let target_build_context = BuildContext { - variant: &promote_args.variant, - arch: promote_args.arch.as_str(), - image_version: &promote_args.target, - }; - - info!( - "Parsing SSM parameter templates from {}", - promote_args.template_path.display() - ); - // Doesn't matter which build context we use to find template files because version isn't used - // in their naming - let template_parameters = - template::get_parameters(&promote_args.template_path, &source_build_context) - .context(error::FindTemplatesSnafu)?; - - if template_parameters.parameters.is_empty() { - info!( - "No parameters for this arch/variant in {}", - promote_args.template_path.display() - ); - return Ok(()); - } - - // Render parameter names into maps of {template string => rendered value}. We need the - // template strings so we can associate source parameters with target parameters that came - // from the same template, so we know what to copy. - let source_parameter_map = - template::render_parameter_names(&template_parameters, ssm_prefix, &source_build_context) - .context(error::RenderTemplatesSnafu)?; - let target_parameter_map = - template::render_parameter_names(&template_parameters, ssm_prefix, &target_build_context) - .context(error::RenderTemplatesSnafu)?; - - // Parameters are the same in each region, so we need to associate each region with each of - // the parameter names so we can fetch them. - let source_keys: Vec = regions - .iter() - .flat_map(|region| { - source_parameter_map - .values() - .map(move |name| SsmKey::new(region.clone(), name.clone())) - }) - .collect(); - let target_keys: Vec = regions - .iter() - .flat_map(|region| { - target_parameter_map - .values() - .map(move |name| SsmKey::new(region.clone(), name.clone())) - }) - .collect(); - - // SSM get/compare =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - info!("Getting current SSM parameters for source and target names"); - let current_source_parameters = ssm::get_parameters(&source_keys, &ssm_clients) - .await - .context(error::FetchSsmSnafu)?; - trace!( - "Current source SSM parameters: {:#?}", - current_source_parameters - ); - ensure!( - !current_source_parameters.is_empty(), - error::EmptySourceSnafu { - version: &promote_args.source - } - ); - - let current_target_parameters = ssm::get_parameters(&target_keys, &ssm_clients) - .await - .context(error::FetchSsmSnafu)?; - trace!( - "Current target SSM parameters: {:#?}", - current_target_parameters - ); - - // Build a map of rendered source parameter names to rendered target parameter names. This - // will let us find which target parameters to set based on the source parameter names we get - // back from SSM. - let source_target_map: HashMap<&String, &String> = source_parameter_map - .iter() - .map(|(k, v)| (v, &target_parameter_map[k])) - .collect(); - - // Show the difference between source and target parameters in SSM. We use the - // source_target_map we built above to map source keys to target keys (generated from the same - // template) so that the diff code has common keys to compare. - let set_parameters = key_difference( - ¤t_source_parameters - .into_iter() - .map(|(key, value)| { - ( - SsmKey::new(key.region, source_target_map[&key.name].to_string()), - value, - ) - }) - .collect(), - ¤t_target_parameters, - ); - if set_parameters.is_empty() { - info!("No changes necessary."); - return Ok(()); - } - - // If an output file path was given, read the existing parameters in `ssm_parameter_output` and - // write the newly promoted parameters to `ssm_parameter_output` along with the original - // parameters - if let Some(ssm_parameter_output) = &promote_args.ssm_parameter_output { - append_rendered_parameters(ssm_parameter_output, &set_parameters).await?; - } - - // SSM set =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - info!("Setting updated SSM parameters."); - ssm::set_parameters(&set_parameters, &ssm_clients) - .await - .context(error::SetSsmSnafu)?; - - info!("Validating whether live parameters in SSM reflect changes."); - ssm::validate_parameters(&set_parameters, &ssm_clients) - .await - .context(error::ValidateSsmSnafu)?; - - info!("All parameters match requested values."); - Ok(()) -} - -/// Read parameters in given file, add newly promoted parameters, and write combined parameters to -/// the given file -async fn append_rendered_parameters( - ssm_parameters_output: &PathBuf, - set_parameters: &HashMap, -) -> Result<()> { - // If the file doesn't exist, assume that there are no existing parameters - let parsed_parameters = parse_parameters(&ssm_parameters_output.to_owned()) - .await - .or_else({ - |e| match e { - crate::aws::validate_ssm::Error::ReadExpectedParameterFile { .. } => { - Ok(HashMap::new()) - } - _ => Err(e), - } - }) - .context(error::ParseExistingSsmParametersSnafu { - path: ssm_parameters_output, - })? - // SsmKey contains region information, so we can lose the top-level region. - .into_values() - .fold(HashMap::new(), |mut acc, params| { - acc.extend(params); - acc - }); - - let combined_parameters = merge_parameters(parsed_parameters, set_parameters); - - write_rendered_parameters( - ssm_parameters_output, - &RenderedParametersMap::from(combined_parameters).rendered_parameters, - ) - .context(error::WriteRenderedSsmParametersSnafu { - path: ssm_parameters_output, - })?; - - Ok(()) -} - -/// Return a HashMap of Region mapped to a HashMap of SsmKey, String pairs, representing the newly -/// promoted parameters as well as the original parameters. In case of a parameter collision, -/// the parameter takes the promoted value. -fn merge_parameters( - source_parameters: HashMap, - set_parameters: &HashMap, -) -> HashMap> { - let mut combined_parameters = HashMap::new(); - - source_parameters - .into_iter() - // Process the `set_parameters` second so that they overwrite existing values. - .chain(set_parameters.clone()) - .for_each(|(ssm_key, ssm_value)| { - combined_parameters - // The `entry()` API demands that we clone - .entry(ssm_key.region.clone()) - .or_insert(HashMap::new()) - .insert(ssm_key, ssm_value); - }); - - combined_parameters -} - -mod error { - use std::path::PathBuf; - - use crate::aws::{ - ssm::{ssm, template}, - validate_ssm, - }; - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Error reading config: {}", source))] - Config { - source: pubsys_config::Error, - }, - - #[snafu(display("Found no parameters in source version {}", version))] - EmptySource { - version: String, - }, - - #[snafu(display("Failed to fetch parameters from SSM: {}", source))] - FetchSsm { - source: ssm::Error, - }, - - #[snafu(display("Failed to find templates: {}", source))] - FindTemplates { - source: template::Error, - }, - - #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { - missing: String, - }, - - #[snafu(display("Failed to render templates: {}", source))] - RenderTemplates { - source: template::Error, - }, - - #[snafu(display("Failed to set SSM parameters: {}", source))] - SetSsm { - source: ssm::Error, - }, - - ValidateSsm { - source: ssm::Error, - }, - - #[snafu(display( - "Failed to parse existing SSM parameters at path {:?}: {}", - path, - source, - ))] - ParseExistingSsmParameters { - source: validate_ssm::error::Error, - path: PathBuf, - }, - - #[snafu(display("Failed to parse rendered SSM parameters to JSON: {}", source))] - ParseRenderedSsmParameters { - source: serde_json::Error, - }, - - #[snafu(display("Failed to write rendered SSM parameters to {}: {}", path.display(), source))] - WriteRenderedSsmParameters { - path: PathBuf, - source: crate::aws::ssm::Error, - }, - } -} -pub(crate) use error::Error; - -use super::ssm::write_rendered_parameters; -type Result = std::result::Result; - -#[cfg(test)] -mod test { - use std::collections::HashMap; - - use crate::aws::{promote_ssm::merge_parameters, ssm::SsmKey}; - use aws_sdk_ssm::config::Region; - - #[test] - fn combined_parameters() { - let existing_parameters = HashMap::from([ - ( - SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), - "test1-parameter-value".to_string(), - ), - ( - SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), - "test2-parameter-value".to_string(), - ), - ( - SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), - "test3-parameter-value".to_string(), - ), - ( - SsmKey::new( - Region::new("us-east-1"), - "test4-unpromoted-parameter-name".to_string(), - ), - "test4-unpromoted-parameter-value".to_string(), - ), - ]); - let set_parameters = HashMap::from([ - ( - SsmKey::new( - Region::new("us-west-2"), - "test1-parameter-name-promoted".to_string(), - ), - "test1-parameter-value".to_string(), - ), - ( - SsmKey::new( - Region::new("us-west-2"), - "test2-parameter-name-promoted".to_string(), - ), - "test2-parameter-value".to_string(), - ), - ( - SsmKey::new( - Region::new("us-east-1"), - "test3-parameter-name-promoted".to_string(), - ), - "test3-parameter-value".to_string(), - ), - ]); - let map = merge_parameters(existing_parameters, &set_parameters); - let expected_map = HashMap::from([ - ( - Region::new("us-west-2"), - HashMap::from([ - ( - SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), - "test1-parameter-value".to_string(), - ), - ( - SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), - "test2-parameter-value".to_string(), - ), - ( - SsmKey::new( - Region::new("us-west-2"), - "test1-parameter-name-promoted".to_string(), - ), - "test1-parameter-value".to_string(), - ), - ( - SsmKey::new( - Region::new("us-west-2"), - "test2-parameter-name-promoted".to_string(), - ), - "test2-parameter-value".to_string(), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashMap::from([ - ( - SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), - "test3-parameter-value".to_string(), - ), - ( - SsmKey::new( - Region::new("us-east-1"), - "test3-parameter-name-promoted".to_string(), - ), - "test3-parameter-value".to_string(), - ), - ( - SsmKey::new( - Region::new("us-east-1"), - "test4-unpromoted-parameter-name".to_string(), - ), - "test4-unpromoted-parameter-value".to_string(), - ), - ]), - ), - ]); - assert_eq!(map, expected_map); - } - - #[test] - fn combined_parameters_overwrite() { - let existing_parameters = HashMap::from([ - ( - SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), - "test1-parameter-value".to_string(), - ), - ( - SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), - "test2-parameter-value".to_string(), - ), - ( - SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), - "test3-parameter-value".to_string(), - ), - ]); - let set_parameters = HashMap::from([ - ( - SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), - "test1-parameter-value-new".to_string(), - ), - ( - SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), - "test2-parameter-value-new".to_string(), - ), - ( - SsmKey::new( - Region::new("us-east-1"), - "test3-parameter-name-promoted".to_string(), - ), - "test3-parameter-value".to_string(), - ), - ]); - let map = merge_parameters(existing_parameters, &set_parameters); - let expected_map = HashMap::from([ - ( - Region::new("us-west-2"), - HashMap::from([ - ( - SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), - "test1-parameter-value-new".to_string(), - ), - ( - SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), - "test2-parameter-value-new".to_string(), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashMap::from([ - ( - SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), - "test3-parameter-value".to_string(), - ), - ( - SsmKey::new( - Region::new("us-east-1"), - "test3-parameter-name-promoted".to_string(), - ), - "test3-parameter-value".to_string(), - ), - ]), - ), - ]); - assert_eq!(map, expected_map); - } -} diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs deleted file mode 100644 index 578bdee4898..00000000000 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ /dev/null @@ -1,731 +0,0 @@ -//! The publish_ami module owns the 'publish-ami' subcommand and controls the process of granting -//! and revoking access to EC2 AMIs. - -use crate::aws::ami::launch_permissions::{get_launch_permissions, LaunchPermissionDef}; -use crate::aws::ami::wait::{self, wait_for_ami}; -use crate::aws::ami::Image; -use crate::aws::client::build_client_config; -use crate::aws::region_from_string; -use crate::Args; -use aws_sdk_ec2::error::{ProvideErrorMetadata, SdkError}; -use aws_sdk_ec2::operation::{ - modify_image_attribute::{ModifyImageAttributeError, ModifyImageAttributeOutput}, - modify_snapshot_attribute::{ModifySnapshotAttributeError, ModifySnapshotAttributeOutput}, -}; -use aws_sdk_ec2::types::{ - ImageAttributeName, OperationType, PermissionGroup, SnapshotAttributeName, -}; -use aws_sdk_ec2::{config::Region, Client as Ec2Client}; -use clap::{Args as ClapArgs, Parser}; -use futures::future::{join, ready}; -use futures::stream::{self, StreamExt}; -use log::{debug, error, info, trace}; -use pubsys_config::InfraConfig; -use snafu::{ensure, OptionExt, ResultExt}; -use std::collections::{HashMap, HashSet}; -use std::fs::File; -use std::iter::FromIterator; -use std::path::PathBuf; - -#[derive(Debug, Parser)] -#[group(id = "who", required = true, multiple = true)] -pub(crate) struct ModifyOptions { - /// User IDs to give/remove access - #[arg(long, value_delimiter = ',', group = "who")] - pub(crate) user_ids: Vec, - /// Group names to give/remove access - #[arg(long, value_delimiter = ',', group = "who")] - pub(crate) group_names: Vec, - /// Organization arns to give/remove access - #[arg(long, value_delimiter = ',', group = "who")] - pub(crate) organization_arns: Vec, - /// Organizational unit arns to give/remove access - #[arg(long, value_delimiter = ',', group = "who")] - pub(crate) organizational_unit_arns: Vec, -} - -/// Grants or revokes permissions to Bottlerocket AMIs -#[derive(Debug, ClapArgs)] -#[group(id = "mode", required = true, multiple = false)] -pub(crate) struct Who { - /// Path to the JSON file containing regional AMI IDs to modify - #[arg(long)] - ami_input: PathBuf, - - /// Comma-separated list of regions to publish in, overriding Infra.toml; given regions must be - /// in the --ami-input file - #[arg(long, value_delimiter = ',')] - regions: Vec, - - /// Grant access to the given users/groups - #[arg(long, group = "mode")] - grant: bool, - /// Revoke access from the given users/groups - #[arg(long, group = "mode")] - revoke: bool, - - #[command(flatten)] - modify_opts: ModifyOptions, -} - -/// Common entrypoint from main() -pub(crate) async fn run(args: &Args, publish_args: &Who) -> Result<()> { - let (operation, description) = if publish_args.grant { - (OperationType::Add, "granting access") - } else if publish_args.revoke { - (OperationType::Remove, "revoking access") - } else { - unreachable!("developer error: --grant and --revoke not required/exclusive"); - }; - - info!( - "Using AMI data from path: {}", - publish_args.ami_input.display() - ); - let file = File::open(&publish_args.ami_input).context(error::FileSnafu { - op: "open", - path: &publish_args.ami_input, - })?; - let mut ami_input: HashMap = - serde_json::from_reader(file).context(error::DeserializeSnafu { - path: &publish_args.ami_input, - })?; - trace!("Parsed AMI input: {:?}", ami_input); - - // pubsys will not create a file if it did not create AMIs, so we should only have an empty - // file if a user created one manually, and they shouldn't be creating an empty file. - ensure!( - !ami_input.is_empty(), - error::InputSnafu { - path: &publish_args.ami_input - } - ); - - // If a lock file exists, use that, otherwise use Infra.toml or default - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, true) - .context(error::ConfigSnafu)?; - trace!("Using infra config: {:?}", infra_config); - - let aws = infra_config.aws.unwrap_or_default(); - - // If the user gave an override list of regions, use that, otherwise use what's in the config. - let regions = if !publish_args.regions.is_empty() { - publish_args.regions.clone() - } else { - aws.regions.clone().into() - }; - ensure!( - !regions.is_empty(), - error::MissingConfigSnafu { - missing: "aws.regions" - } - ); - let base_region = region_from_string(®ions[0]); - - // Check that the requested regions are a subset of the regions we *could* publish from the AMI - // input JSON. - let requested_regions = HashSet::from_iter(regions.iter()); - let known_regions = HashSet::<&String>::from_iter(ami_input.keys()); - ensure!( - requested_regions.is_subset(&known_regions), - error::UnknownRegionsSnafu { - regions: requested_regions - .difference(&known_regions) - .map(|s| s.to_string()) - .collect::>(), - } - ); - - // Parse region names - let mut amis = HashMap::with_capacity(regions.len()); - for name in regions { - let image = ami_input - .remove(&name) - // This could only happen if someone removes the check above... - .with_context(|| error::UnknownRegionsSnafu { - regions: vec![name.clone()], - })?; - let region = region_from_string(&name); - amis.insert(region, image); - } - - // We make a map storing our regional clients because they're used in a future and need to - // live until the future is resolved. - let mut ec2_clients = HashMap::with_capacity(amis.len()); - for region in amis.keys() { - let client_config = build_client_config(region, &base_region, &aws).await; - let ec2_client = Ec2Client::new(&client_config); - ec2_clients.insert(region.clone(), ec2_client); - } - - // If AMIs aren't in "available" state, we can get a DescribeImages response that includes - // most of the data we need, but not snapshot IDs. - if amis.len() == 1 { - info!("Waiting for AMI to be available before changing its permissions") - } else { - info!( - "Waiting for all {} AMIs to be available before changing any of their permissions", - amis.len(), - ); - } - let mut wait_requests = Vec::with_capacity(amis.len()); - for (region, image) in &amis { - let wait_future = wait_for_ami(&image.id, region, &base_region, "available", 1, &aws); - // Store the region and ID so we can include it in errors - let info_future = ready((region.clone(), image.id.clone())); - wait_requests.push(join(info_future, wait_future)); - } - // Send requests in parallel and wait for responses, collecting results into a list. - let request_stream = stream::iter(wait_requests).buffer_unordered(4); - let wait_responses: Vec<((Region, String), std::result::Result<(), wait::Error>)> = - request_stream.collect().await; - - // Make sure waits succeeded and AMIs are available. - for ((region, image_id), wait_response) in wait_responses { - wait_response.context(error::WaitAmiSnafu { - id: &image_id, - region: region.as_ref(), - })?; - } - - let snapshots = get_regional_snapshots(&amis, &ec2_clients).await?; - trace!("Found snapshots: {:?}", snapshots); - - info!( - "Updating all snapshot permissions before changing any AMI permissions - {}", - description - ); - modify_regional_snapshots( - &publish_args.modify_opts, - &operation, - &snapshots, - &ec2_clients, - ) - .await?; - - info!("Updating AMI permissions - {}", description); - modify_regional_images( - &publish_args.modify_opts, - &operation, - &mut amis, - &ec2_clients, - ) - .await?; - - write_amis( - &publish_args.ami_input, - &amis - .into_iter() - .map(|(region, image)| (region.to_string(), image)) - .collect::>(), - )?; - - Ok(()) -} - -pub(crate) fn write_amis(path: &PathBuf, amis: &HashMap) -> Result<()> { - let file = File::create(path).context(error::FileSnafu { - op: "write AMIs to file", - path, - })?; - serde_json::to_writer_pretty(file, &amis).context(error::SerializeSnafu { path })?; - info!("Wrote AMI data to {}", path.display()); - - Ok(()) -} - -/// Returns the snapshot IDs associated with the given AMI. -pub(crate) async fn get_snapshots( - image_id: &str, - region: &Region, - ec2_client: &Ec2Client, -) -> Result> { - let describe_response = ec2_client - .describe_images() - .set_image_ids(Some(vec![image_id.to_string()])) - .send() - .await - .context(error::DescribeImagesSnafu { - region: region.as_ref(), - })?; - - // Get the image description, ensuring we only have one. - let mut images = describe_response - .images - .context(error::MissingInResponseSnafu { - request_type: "DescribeImages", - missing: "images", - })?; - ensure!( - !images.is_empty(), - error::MissingImageSnafu { - region: region.as_ref(), - image_id: image_id.to_string(), - } - ); - ensure!( - images.len() == 1, - error::MultipleImagesSnafu { - region: region.as_ref(), - images: images - .into_iter() - .map(|i| i.image_id.unwrap_or_else(|| "".to_string())) - .collect::>() - } - ); - let image = images.remove(0); - - // Look into the block device mappings for snapshots. - let bdms = image - .block_device_mappings - .context(error::MissingInResponseSnafu { - request_type: "DescribeImages", - missing: "block_device_mappings", - })?; - ensure!( - !bdms.is_empty(), - error::MissingInResponseSnafu { - request_type: "DescribeImages", - missing: "non-empty block_device_mappings" - } - ); - let mut snapshot_ids = Vec::with_capacity(bdms.len()); - for bdm in bdms { - let ebs = bdm.ebs.context(error::MissingInResponseSnafu { - request_type: "DescribeImages", - missing: "ebs in block_device_mappings", - })?; - let snapshot_id = ebs.snapshot_id.context(error::MissingInResponseSnafu { - request_type: "DescribeImages", - missing: "snapshot_id in block_device_mappings.ebs", - })?; - snapshot_ids.push(snapshot_id); - } - - Ok(snapshot_ids) -} - -/// Returns a regional mapping of snapshot IDs associated with the given AMIs. -async fn get_regional_snapshots( - amis: &HashMap, - clients: &HashMap, -) -> Result>> { - // Build requests for image information. - let mut snapshots_requests = Vec::with_capacity(amis.len()); - for (region, image) in amis { - let ec2_client = &clients[region]; - - let snapshots_future = get_snapshots(&image.id, region, ec2_client); - - // Store the region so we can include it in errors - let info_future = ready(region.clone()); - snapshots_requests.push(join(info_future, snapshots_future)); - } - - // Send requests in parallel and wait for responses, collecting results into a list. - let request_stream = stream::iter(snapshots_requests).buffer_unordered(4); - let snapshots_responses: Vec<(Region, Result>)> = request_stream.collect().await; - - // For each described image, get the snapshot IDs from the block device mappings. - let mut snapshots = HashMap::with_capacity(amis.len()); - for (region, snapshot_ids) in snapshots_responses { - let snapshot_ids = snapshot_ids?; - snapshots.insert(region, snapshot_ids); - } - - Ok(snapshots) -} - -/// Modify createVolumePermission for the given users/groups on the given snapshots. The -/// `operation` should be "add" or "remove" to allow/deny permission. -pub(crate) async fn modify_snapshots( - modify_opts: &ModifyOptions, - operation: &OperationType, - snapshot_ids: &[String], - ec2_client: &Ec2Client, - region: &Region, -) -> Result<()> { - let mut requests = Vec::new(); - for snapshot_id in snapshot_ids { - let response_future = ec2_client - .modify_snapshot_attribute() - .set_attribute(Some(SnapshotAttributeName::CreateVolumePermission)) - .set_user_ids( - (!modify_opts.user_ids.is_empty()).then_some(modify_opts.user_ids.clone()), - ) - .set_group_names( - (!modify_opts.group_names.is_empty()).then_some(modify_opts.group_names.clone()), - ) - .set_operation_type(Some(operation.clone())) - .set_snapshot_id(Some(snapshot_id.clone())) - .send(); - // Store the snapshot_id so we can include it in any errors - let info_future = ready(snapshot_id.to_string()); - requests.push(join(info_future, response_future)); - } - - // Send requests in parallel and wait for responses, collecting results into a list. - let request_stream = stream::iter(requests).buffer_unordered(4); - let responses: Vec<( - String, - std::result::Result>, - )> = request_stream.collect().await; - - for (snapshot_id, response) in responses { - response.context(error::ModifyImageAttributeSnafu { - snapshot_id, - region: region.as_ref(), - })?; - } - - Ok(()) -} - -/// Modify createVolumePermission for the given users/groups, across all of the snapshots in the -/// given regional mapping. The `operation` should be "add" or "remove" to allow/deny permission. -pub(crate) async fn modify_regional_snapshots( - modify_opts: &ModifyOptions, - operation: &OperationType, - snapshots: &HashMap>, - clients: &HashMap, -) -> Result<()> { - // Build requests to modify snapshot attributes. - let mut requests = Vec::new(); - for (region, snapshot_ids) in snapshots { - let ec2_client = &clients[region]; - let modify_snapshot_future = - modify_snapshots(modify_opts, operation, snapshot_ids, ec2_client, region); - - // Store the region and snapshot ID so we can include it in errors - let info_future = ready((region.clone(), snapshot_ids.clone())); - requests.push(join(info_future, modify_snapshot_future)); - } - - // Send requests in parallel and wait for responses, collecting results into a list. - let request_stream = stream::iter(requests).buffer_unordered(4); - - #[allow(clippy::type_complexity)] - let responses: Vec<((Region, Vec), Result<()>)> = request_stream.collect().await; - - // Count up successes and failures so we can give a clear total in the final error message. - let mut error_count = 0u16; - let mut success_count = 0u16; - for ((region, snapshot_ids), response) in responses { - match response { - Ok(()) => { - success_count += 1; - debug!( - "Modified permissions in {} for snapshots [{}]", - region.as_ref(), - snapshot_ids.join(", "), - ); - } - Err(e) => { - error_count += 1; - if let Error::ModifyImageAttribute { source: err, .. } = e { - error!( - "Failed to modify permissions in {} for snapshots [{}]: {:?}", - region.as_ref(), - snapshot_ids.join(", "), - err.into_service_error().code().unwrap_or("unknown"), - ); - } - } - } - } - - ensure!( - error_count == 0, - error::ModifySnapshotAttributesSnafu { - error_count, - success_count, - } - ); - - Ok(()) -} - -/// Modify launchPermission for the given users/groups on the given images. The `operation` -/// should be "add" or "remove" to allow/deny permission. -pub(crate) async fn modify_image( - modify_opts: &ModifyOptions, - operation: &OperationType, - image_id: &str, - ec2_client: &Ec2Client, -) -> std::result::Result> { - ec2_client - .modify_image_attribute() - .set_attribute(Some( - ImageAttributeName::LaunchPermission.as_ref().to_string(), - )) - .set_user_ids((!modify_opts.user_ids.is_empty()).then_some(modify_opts.user_ids.clone())) - .set_user_groups( - (!modify_opts.group_names.is_empty()).then_some(modify_opts.group_names.clone()), - ) - .set_organization_arns( - (!modify_opts.organization_arns.is_empty()) - .then_some(modify_opts.organization_arns.clone()), - ) - .set_organizational_unit_arns( - (!modify_opts.organizational_unit_arns.is_empty()) - .then_some(modify_opts.organizational_unit_arns.clone()), - ) - .set_operation_type(Some(operation.clone())) - .set_image_id(Some(image_id.to_string())) - .send() - .await -} - -/// Modify launchPermission for the given users/groups, across all of the images in the given -/// regional mapping. The `operation` should be "add" or "remove" to allow/deny permission. -pub(crate) async fn modify_regional_images( - modify_opts: &ModifyOptions, - operation: &OperationType, - images: &mut HashMap, - clients: &HashMap, -) -> Result<()> { - let mut requests = Vec::new(); - for (region, image) in &mut *images { - let image_id = &image.id; - let ec2_client = &clients[region]; - - let modify_image_future = modify_image(modify_opts, operation, image_id, ec2_client); - - // Store the region and image ID so we can include it in errors - let info_future = ready((region.as_ref().to_string(), image_id.clone())); - requests.push(join(info_future, modify_image_future)); - } - - // Send requests in parallel and wait for responses, collecting results into a list. - let request_stream = stream::iter(requests).buffer_unordered(4); - #[allow(clippy::type_complexity)] - let responses: Vec<( - (String, String), - std::result::Result>, - )> = request_stream.collect().await; - - // Count up successes and failures so we can give a clear total in the final error message. - let mut error_count = 0u16; - let mut success_count = 0u16; - for ((region, image_id), modify_image_response) in responses { - match modify_image_response { - Ok(_) => { - success_count += 1; - info!("Modified permissions of image {} in {}", image_id, region); - - // Set the `public` and `launch_permissions` fields for the Image object - let image = images.get_mut(&Region::new(region.clone())).ok_or( - error::Error::MissingRegion { - region: region.clone(), - }, - )?; - let launch_permissions: Vec = get_launch_permissions( - &clients[&Region::new(region.clone())], - region.as_ref(), - &image_id, - ) - .await - .context(error::DescribeImageAttributeSnafu { - image_id: image_id.clone(), - region: region.to_string(), - })?; - - // If the launch permissions contain the group `all` after the modification, - // the image is public - image.public = Some(launch_permissions.iter().any(|launch_permission| { - launch_permission - == &LaunchPermissionDef::Group(PermissionGroup::All.as_str().to_string()) - })); - image.launch_permissions = Some(launch_permissions); - } - Err(e) => { - error_count += 1; - error!( - "Modifying permissions of {} in {} failed: {}", - image_id, - region, - e.into_service_error().code().unwrap_or("unknown"), - ); - } - } - } - - ensure!( - error_count == 0, - error::ModifyImagesAttributesSnafu { - error_count, - success_count, - } - ); - - Ok(()) -} - -mod error { - use crate::aws::ami; - use aws_sdk_ec2::error::SdkError; - use aws_sdk_ec2::operation::{ - describe_images::DescribeImagesError, modify_image_attribute::ModifyImageAttributeError, - modify_snapshot_attribute::ModifySnapshotAttributeError, - }; - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Error reading config: {}", source))] - Config { source: pubsys_config::Error }, - - #[snafu(display( - "Failed to describe image attributes for image {} in region {}: {}", - image_id, - region, - source - ))] - DescribeImageAttribute { - image_id: String, - region: String, - source: crate::aws::ami::launch_permissions::Error, - }, - - #[snafu(display("Failed to describe images in {}: {}", region, source))] - DescribeImages { - region: String, - source: SdkError, - }, - - #[snafu(display("Failed to deserialize input from '{}': {}", path.display(), source))] - Deserialize { - path: PathBuf, - source: serde_json::Error, - }, - - #[snafu(display("Failed to {} '{}': {}", op, path.display(), source))] - File { - op: String, - path: PathBuf, - source: io::Error, - }, - - #[snafu(display("Input '{}' is empty", path.display()))] - Input { path: PathBuf }, - - #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { missing: String }, - - #[snafu(display("Failed to find given AMI ID {} in {}", image_id, region))] - MissingImage { region: String, image_id: String }, - - #[snafu(display("Response to {} was missing {}", request_type, missing))] - MissingInResponse { - request_type: String, - missing: String, - }, - - #[snafu(display("Failed to find region {} in AMI map", region))] - MissingRegion { region: String }, - - #[snafu(display( - "Failed to modify permissions of {} in {}: {}", - snapshot_id, - region, - source - ))] - ModifyImageAttribute { - snapshot_id: String, - region: String, - source: SdkError, - }, - - #[snafu(display( - "Failed to modify permissions of {} of {} images", - error_count, error_count + success_count, - ))] - ModifyImagesAttributes { - error_count: u16, - success_count: u16, - }, - - #[snafu(display( - "Failed to modify permissions of {} in {}: {}", - image_id, - region, - source - ))] - ModifyImageAttributes { - image_id: String, - region: String, - source: SdkError, - }, - - #[snafu(display( - "Failed to modify permissions of {} of {} snapshots", - error_count, error_count + success_count, - ))] - ModifySnapshotAttributes { - error_count: u16, - success_count: u16, - }, - - #[snafu(display("DescribeImages in {} with unique filters returned multiple results: {}", region, images.join(", ")))] - MultipleImages { region: String, images: Vec }, - - #[snafu(display("Failed to serialize output to '{}': {}", path.display(), source))] - Serialize { - path: PathBuf, - source: serde_json::Error, - }, - - #[snafu(display( - "Given region(s) in Infra.toml / regions argument that are not in --ami-input file: {}", - regions.join(", ") - ))] - UnknownRegions { regions: Vec }, - - #[snafu(display("AMI '{}' in {} did not become available: {}", id, region, source))] - WaitAmi { - id: String, - region: String, - source: ami::wait::Error, - }, - } - - impl Error { - /// The number of AMIs that have had their permissions successfully changed. - pub(crate) fn amis_affected(&self) -> u16 { - match self { - // We list all of these variants so that future editors of the code will have to - // look at this and decide whether or not their new error variant might have - // modified any AMI permissions. - Error::Config { .. } - | Error::DescribeImageAttribute { .. } - | Error::DescribeImages { .. } - | Error::Deserialize { .. } - | Error::File { .. } - | Error::Input { .. } - | Error::MissingConfig { .. } - | Error::MissingImage { .. } - | Error::MissingInResponse { .. } - | Error::MissingRegion { .. } - | Error::ModifyImageAttribute { .. } - | Error::ModifyImageAttributes { .. } - | Error::ModifySnapshotAttributes { .. } - | Error::MultipleImages { .. } - | Error::Serialize { .. } - | Error::UnknownRegions { .. } - | Error::WaitAmi { .. } => 0u16, - - // If an error occurs during the modify AMI permissions loop, then some AMIs may - // have been affected. - Error::ModifyImagesAttributes { - error_count: _, - success_count, - } => *success_count, - } - } - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ssm/mod.rs b/tools/pubsys/src/aws/ssm/mod.rs deleted file mode 100644 index 82d3685b0e7..00000000000 --- a/tools/pubsys/src/aws/ssm/mod.rs +++ /dev/null @@ -1,540 +0,0 @@ -//! The ssm module owns the 'ssm' subcommand and controls the process of setting SSM parameters -//! based on current build information - -#[allow(clippy::module_inception)] -pub(crate) mod ssm; -pub(crate) mod template; - -use self::template::RenderedParameter; -use crate::aws::ssm::template::RenderedParametersMap; -use crate::aws::{ - ami::public::ami_is_public, ami::Image, client::build_client_config, parse_arch, - region_from_string, -}; -use crate::Args; -use aws_config::SdkConfig; -use aws_sdk_ec2::{types::ArchitectureValues, Client as Ec2Client}; -use aws_sdk_ssm::{config::Region, Client as SsmClient}; -use clap::Parser; -use futures::stream::{StreamExt, TryStreamExt}; -use governor::{prelude::*, Quota, RateLimiter}; -use log::{error, info, trace}; -use nonzero_ext::nonzero; -use pubsys_config::InfraConfig; -use serde::Serialize; -use snafu::{ensure, OptionExt, ResultExt}; -use std::iter::FromIterator; -use std::path::PathBuf; -use std::{ - collections::{HashMap, HashSet}, - fs::File, -}; - -/// Sets SSM parameters based on current build information -#[derive(Debug, Parser)] -pub(crate) struct SsmArgs { - // This is JSON output from `pubsys ami` like `{"us-west-2": "ami-123"}` - /// Path to the JSON file containing regional AMI IDs to modify - #[arg(long)] - ami_input: PathBuf, - - /// The architecture of the machine image - #[arg(long, value_parser = parse_arch)] - arch: ArchitectureValues, - - /// The variant name for the current build - #[arg(long)] - variant: String, - - /// The version of the current build - #[arg(long)] - version: String, - - /// Regions where you want parameters published - #[arg(long, value_delimiter = ',')] - regions: Vec, - - /// File holding the parameter templates - #[arg(long)] - template_path: PathBuf, - - /// Allows overwrite of existing parameters - #[arg(long)] - allow_clobber: bool, - - /// Allows publishing non-public images to the `/aws/` namespace - #[arg(long)] - allow_private_images: bool, - - /// If set, writes the generated SSM parameters to this path - #[arg(long)] - ssm_parameter_output: Option, -} - -/// Wrapper struct over parameter update and AWS clients needed to execute on it. -#[derive(Debug, Clone)] -struct SsmParamUpdateOp { - parameter: RenderedParameter, - ec2_client: Ec2Client, -} - -/// Common entrypoint from main() -pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { - // Setup =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - // If a lock file exists, use that, otherwise use Infra.toml - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(error::ConfigSnafu)?; - trace!("Parsed infra config: {:#?}", infra_config); - let aws = infra_config.aws.unwrap_or_default(); - let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or(""); - - // If the user gave an override list of regions, use that, otherwise use what's in the config. - let regions = if !ssm_args.regions.is_empty() { - ssm_args.regions.clone() - } else { - aws.regions.clone().into() - }; - ensure!( - !regions.is_empty(), - error::MissingConfigSnafu { - missing: "aws.regions" - } - ); - let base_region = region_from_string(®ions[0]); - - let amis = parse_ami_input(®ions, ssm_args)?; - - // Template setup =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - // Non-image-specific context for building and rendering templates - let build_context = BuildContext { - variant: &ssm_args.variant, - arch: ssm_args.arch.as_ref(), - image_version: &ssm_args.version, - }; - - info!( - "Parsing SSM parameter templates from {}", - ssm_args.template_path.display() - ); - let template_parameters = template::get_parameters(&ssm_args.template_path, &build_context) - .context(error::FindTemplatesSnafu)?; - - if template_parameters.parameters.is_empty() { - info!( - "No parameters for this arch/variant in {}", - ssm_args.template_path.display() - ); - return Ok(()); - } - - let new_parameters = - template::render_parameters(template_parameters, &amis, ssm_prefix, &build_context) - .context(error::RenderTemplatesSnafu)?; - trace!("Generated templated parameters: {:#?}", new_parameters); - - // If the path to an output file was given, write the rendered parameters to this file - if let Some(ssm_parameter_output) = &ssm_args.ssm_parameter_output { - write_rendered_parameters( - ssm_parameter_output, - &RenderedParametersMap::from(&new_parameters).rendered_parameters, - )?; - } - - // Generate AWS Clients to use for the updates. - let mut param_update_ops: Vec = Vec::with_capacity(new_parameters.len()); - let mut aws_sdk_configs: HashMap = HashMap::with_capacity(regions.len()); - let mut ssm_clients = HashMap::with_capacity(amis.len()); - - for parameter in new_parameters.iter() { - let region = ¶meter.ssm_key.region; - // Store client configs so that we only have to create them once. - // The HashMap `entry` API doesn't play well with `async`, so we use a match here instead. - let client_config = match aws_sdk_configs.get(region) { - Some(client_config) => client_config.clone(), - None => { - let client_config = build_client_config(region, &base_region, &aws).await; - aws_sdk_configs.insert(region.clone(), client_config.clone()); - client_config - } - }; - - let ssm_client = SsmClient::new(&client_config); - if ssm_clients.get(region).is_none() { - ssm_clients.insert(region.clone(), ssm_client); - } - - let ec2_client = Ec2Client::new(&client_config); - param_update_ops.push(SsmParamUpdateOp { - parameter: parameter.clone(), - ec2_client, - }); - } - - // Unless overridden, only allow public images to be published to public parameters. - if !ssm_args.allow_private_images { - info!("Ensuring that only public images are published to public parameters."); - ensure!( - check_public_namespace_amis_are_public(param_update_ops.iter()).await?, - error::NoPrivateImagesSnafu - ); - } - - // SSM get/compare =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - info!("Getting current SSM parameters"); - let new_parameter_names: Vec<&SsmKey> = - new_parameters.iter().map(|param| ¶m.ssm_key).collect(); - let current_parameters = ssm::get_parameters(&new_parameter_names, &ssm_clients) - .await - .context(error::FetchSsmSnafu)?; - trace!("Current SSM parameters: {:#?}", current_parameters); - - // Show the difference between source and target parameters in SSM. - let parameters_to_set = key_difference( - &RenderedParameter::as_ssm_parameters(&new_parameters), - ¤t_parameters, - ); - if parameters_to_set.is_empty() { - info!("No changes necessary."); - return Ok(()); - } - - // Unless the user wants to allow it, make sure we're not going to overwrite any existing - // keys. - if !ssm_args.allow_clobber { - let current_keys: HashSet<&SsmKey> = current_parameters.keys().collect(); - let new_keys: HashSet<&SsmKey> = parameters_to_set.keys().collect(); - ensure!(current_keys.is_disjoint(&new_keys), error::NoClobberSnafu); - } - - // SSM set =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - info!("Setting updated SSM parameters."); - ssm::set_parameters(¶meters_to_set, &ssm_clients) - .await - .context(error::SetSsmSnafu)?; - - info!("Validating whether live parameters in SSM reflect changes."); - ssm::validate_parameters(¶meters_to_set, &ssm_clients) - .await - .context(error::ValidateSsmSnafu)?; - - info!("All parameters match requested values."); - Ok(()) -} - -/// Write rendered parameters to the file at `ssm_parameters_output` -pub(crate) fn write_rendered_parameters( - ssm_parameters_output: &PathBuf, - parameters: &HashMap>, -) -> Result<()> { - info!( - "Writing rendered SSM parameters to {:#?}", - ssm_parameters_output - ); - - serde_json::to_writer_pretty( - &File::create(ssm_parameters_output).context(error::WriteRenderedSsmParametersSnafu { - path: ssm_parameters_output, - })?, - ¶meters, - ) - .context(error::ParseRenderedSsmParametersSnafu)?; - - info!( - "Wrote rendered SSM parameters to {:#?}", - ssm_parameters_output - ); - Ok(()) -} - -// Rate limits on the EC2 side use the TokenBucket method, and buckets refill at a rate of 20 tokens per second. -// See https://docs.aws.amazon.com/AWSEC2/latest/APIReference/throttling.html#throttling-rate-based for more details. -const DESCRIBE_IMAGES_RATE_LIMIT: Quota = Quota::per_second(nonzero!(20u32)); -const MAX_CONCURRENT_AMI_CHECKS: usize = 8; - -/// Given a set of SSM parameter updates, ensures all parameters in the public namespace refer to public AMIs. -async fn check_public_namespace_amis_are_public( - parameter_updates: impl Iterator, -) -> Result { - let public_namespace_updates = parameter_updates - .filter(|update| update.parameter.ssm_key.is_in_public_namespace()) - .cloned(); - - // Wrap `crate::aws::ami::public::ami_is_public()` in a future that returns the correct error type. - let check_ami_public = |update: SsmParamUpdateOp| async move { - let region = &update.parameter.ssm_key.region; - let ami_id = &update.parameter.ami.id; - let is_public = ami_is_public(&update.ec2_client, region.as_ref(), ami_id) - .await - .context(error::CheckAmiPublicSnafu { - ami_id: ami_id.to_string(), - region: region.to_string(), - }); - - if let Ok(false) = is_public { - error!( - "Attempted to set parameter '{}' in {} to '{}', based on AMI {}. That AMI is not marked public!", - update.parameter.ssm_key.name, region, update.parameter.value, ami_id - ); - } - - is_public - }; - - // Concurrently check our input parameter updates... - let rate_limiter = RateLimiter::direct(DESCRIBE_IMAGES_RATE_LIMIT); - let results: Vec> = futures::stream::iter(public_namespace_updates) - .ratelimit_stream(&rate_limiter) - .then(|update| async move { Ok(check_ami_public(update)) }) - .try_buffer_unordered(usize::min(num_cpus::get(), MAX_CONCURRENT_AMI_CHECKS)) - .collect() - .await; - - // `collect()` on `TryStreams` doesn't seem to happily invert a `Vec>` to a `Result>`, - // so we use the usual `Iterator` methods to do it here. - Ok(results - .into_iter() - .collect::>>()? - .into_iter() - .all(|is_public| is_public)) -} - -/// The key to a unique SSM parameter -#[derive(Debug, Eq, Hash, PartialEq, Clone)] -pub(crate) struct SsmKey { - pub(crate) region: Region, - pub(crate) name: String, -} - -impl SsmKey { - pub(crate) fn new(region: Region, name: String) -> Self { - Self { region, name } - } - - pub(crate) fn is_in_public_namespace(&self) -> bool { - self.name.starts_with("/aws/") - } -} - -impl AsRef for SsmKey { - fn as_ref(&self) -> &Self { - self - } -} - -/// Non-image-specific context for building and rendering templates -#[derive(Debug, Serialize)] -pub(crate) struct BuildContext<'a> { - pub(crate) variant: &'a str, - pub(crate) arch: &'a str, - pub(crate) image_version: &'a str, -} - -/// A map of SsmKey to its value -pub(crate) type SsmParameters = HashMap; - -/// Parse the AMI input file -fn parse_ami_input(regions: &[String], ssm_args: &SsmArgs) -> Result> { - info!("Using AMI data from path: {}", ssm_args.ami_input.display()); - let file = File::open(&ssm_args.ami_input).context(error::FileSnafu { - op: "open", - path: &ssm_args.ami_input, - })?; - let mut ami_input: HashMap = - serde_json::from_reader(file).context(error::DeserializeSnafu { - path: &ssm_args.ami_input, - })?; - trace!("Parsed AMI input: {:#?}", ami_input); - - // pubsys will not create a file if it did not create AMIs, so we should only have an empty - // file if a user created one manually, and they shouldn't be creating an empty file. - ensure!( - !ami_input.is_empty(), - error::InputSnafu { - path: &ssm_args.ami_input - } - ); - - // Check that the requested regions are a subset of the regions we *could* publish from the AMI - // input JSON. - let requested_regions = HashSet::from_iter(regions.iter()); - let known_regions = HashSet::<&String>::from_iter(ami_input.keys()); - ensure!( - requested_regions.is_subset(&known_regions), - error::UnknownRegionsSnafu { - regions: requested_regions - .difference(&known_regions) - .map(|s| s.to_string()) - .collect::>(), - } - ); - - // Parse region names - let mut amis = HashMap::with_capacity(regions.len()); - for name in regions { - let image = ami_input - .remove(name) - // This could only happen if someone removes the check above... - .with_context(|| error::UnknownRegionsSnafu { - regions: vec![name.clone()], - })?; - let region = region_from_string(name); - amis.insert(region.clone(), image); - } - - Ok(amis) -} - -/// Shows the user the difference between two sets of parameters. We look for parameters in -/// `wanted` that are either missing or changed in `current`. We print these differences for the -/// user, then return the `wanted` values. -pub(crate) fn key_difference(wanted: &SsmParameters, current: &SsmParameters) -> SsmParameters { - let mut parameters_to_set = HashMap::new(); - - let wanted_keys: HashSet<&SsmKey> = wanted.keys().collect(); - let current_keys: HashSet<&SsmKey> = current.keys().collect(); - - for key in wanted_keys.difference(¤t_keys) { - let new_value = &wanted[key]; - println!( - "{} - {} - new parameter:\n new value: {}", - key.name, key.region, new_value, - ); - parameters_to_set.insert( - SsmKey::new(key.region.clone(), key.name.clone()), - new_value.clone(), - ); - } - - for key in wanted_keys.intersection(¤t_keys) { - let current_value = ¤t[key]; - let new_value = &wanted[key]; - - if current_value == new_value { - println!("{} - {} - no change", key.name, key.region); - } else { - println!( - "{} - {} - changing value:\n old value: {}\n new value: {}", - key.name, key.region, current_value, new_value - ); - parameters_to_set.insert( - SsmKey::new(key.region.clone(), key.name.clone()), - new_value.clone(), - ); - } - } - // Note: don't care about items that are in current but not wanted; that could happen if you - // remove a parameter from your templates, for example. - - parameters_to_set -} - -mod error { - use crate::aws::ssm::{ssm, template}; - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Error reading config: {}", source))] - Config { - source: pubsys_config::Error, - }, - - #[snafu(display( - "Failed to check whether AMI {} in {} was public: {}", - ami_id, - region, - source - ))] - CheckAmiPublic { - ami_id: String, - region: String, - source: crate::aws::ami::public::Error, - }, - - #[snafu(display("Failed to create EC2 client for region {}", region))] - CreateEc2Client { - region: String, - }, - - #[snafu(display("Failed to deserialize input from '{}': {}", path.display(), source))] - Deserialize { - path: PathBuf, - source: serde_json::Error, - }, - - #[snafu(display("Failed to fetch parameters from SSM: {}", source))] - FetchSsm { - source: ssm::Error, - }, - - #[snafu(display("Failed to {} '{}': {}", op, path.display(), source))] - File { - op: String, - path: PathBuf, - source: io::Error, - }, - - #[snafu(display("Failed to find templates: {}", source))] - FindTemplates { - source: template::Error, - }, - - #[snafu(display("Input '{}' is empty", path.display()))] - Input { - path: PathBuf, - }, - - #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { - missing: String, - }, - - #[snafu(display("Cowardly refusing to overwrite parameters without ALLOW_CLOBBER"))] - NoClobber, - - #[snafu(display("Cowardly refusing to publish private image to public namespace without ALLOW_PRIVATE_IMAGES"))] - NoPrivateImages, - - #[snafu(display("Failed to render templates: {}", source))] - RenderTemplates { - source: template::Error, - }, - - #[snafu(display("Failed to set SSM parameters: {}", source))] - SetSsm { - source: ssm::Error, - }, - - #[snafu(display( - "Given region(s) in Infra.toml / regions argument that are not in --ami-input file: {}", - regions.join(", ") - ))] - UnknownRegions { - regions: Vec, - }, - - ValidateSsm { - source: ssm::Error, - }, - - #[snafu(display("Failed to parse rendered SSM parameters to JSON: {}", source))] - ParseRenderedSsmParameters { - source: serde_json::Error, - }, - - #[snafu(display("Failed to write rendered SSM parameters to {:#?}: {}", path, source))] - WriteRenderedSsmParameters { - path: PathBuf, - source: std::io::Error, - }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ssm/ssm.rs b/tools/pubsys/src/aws/ssm/ssm.rs deleted file mode 100644 index f92666fda2f..00000000000 --- a/tools/pubsys/src/aws/ssm/ssm.rs +++ /dev/null @@ -1,472 +0,0 @@ -//! The ssm module owns the getting and setting of parameters in SSM. - -use super::{SsmKey, SsmParameters}; -use aws_sdk_ssm::error::{ProvideErrorMetadata, SdkError}; -use aws_sdk_ssm::operation::{ - get_parameters::{GetParametersError, GetParametersOutput}, - put_parameter::{PutParameterError, PutParameterOutput}, -}; -use aws_sdk_ssm::{config::Region, types::ParameterType, Client as SsmClient}; -use futures::future::{join, ready}; -use futures::stream::{self, FuturesUnordered, StreamExt}; -use log::{debug, error, info, trace, warn}; -use snafu::{ensure, OptionExt, ResultExt}; -use std::collections::{HashMap, HashSet}; -use std::time::Duration; - -/// Fetches the values of the given SSM keys using the given clients -// TODO: We can batch GET requests so throttling is less likely here, but if we need to handle -// hundreds of parameters for a given build, we could use the throttling logic from -// `set_parameters` -pub(crate) async fn get_parameters( - requested: &[K], - clients: &HashMap, -) -> Result -where - K: AsRef, -{ - // Build requests for parameters; we have to request with a regional client so we split them by - // region - let mut requests = Vec::with_capacity(requested.len()); - let mut regional_names: HashMap> = HashMap::new(); - for key in requested { - let SsmKey { region, name } = key.as_ref(); - regional_names - .entry(region.clone()) - .or_default() - .push(name.clone()); - } - for (region, names) in regional_names { - // At most 10 parameters can be requested at a time. - for names_chunk in names.chunks(10) { - trace!("Requesting {:?} in {}", names_chunk, region); - let ssm_client = &clients[®ion]; - let len = names_chunk.len(); - let get_future = ssm_client - .get_parameters() - .set_names((!names_chunk.is_empty()).then_some(names_chunk.to_vec().clone())) - .send(); - - // Store the region so we can include it in errors and the output map - let info_future = ready((region.clone(), len)); - requests.push(join(info_future, get_future)); - } - } - - // Send requests in parallel and wait for responses, collecting results into a list. - let request_stream = stream::iter(requests).buffer_unordered(4); - #[allow(clippy::type_complexity)] - let responses: Vec<( - (Region, usize), - std::result::Result>, - )> = request_stream.collect().await; - - // If you're checking parameters in a region you haven't pushed to before, you can get an - // error here about the parameter's namespace being new. We want to treat these as new - // parameters rather than failing. Unfortunately, we don't know which parameter in the region - // was considered new, but we expect that most people are publishing all of their parameters - // under the same namespace, so treating the whole region as new is OK. We use this just to - // warn the user. - let mut new_regions = HashSet::new(); - - // For each existing parameter in the response, get the name and value for our output map. - let mut parameters = HashMap::with_capacity(requested.len()); - for ((region, expected_len), response) in responses { - // Get the image description, ensuring we only have one. - let response = match response { - Ok(response) => response, - Err(e) => { - // Note: there's no structured error type for this so we have to string match. - if e.to_string().contains("is not a valid namespace") { - new_regions.insert(region.clone()); - continue; - } else { - return Err(e).context(error::GetParametersSnafu { - region: region.as_ref(), - }); - } - } - }; - - // Check that we received a response including every parameter - // Note: response.invalid_parameters includes both new parameters and ill-formatted - // parameter names... - let valid_count = response.parameters.as_ref().map(|v| v.len()).unwrap_or(0); - let invalid_count = response.invalid_parameters.map(|v| v.len()).unwrap_or(0); - let total_count = valid_count + invalid_count; - ensure!( - total_count == expected_len, - error::MissingInResponseSnafu { - region: region.as_ref(), - request_type: "GetParameters", - missing: format!( - "parameters - got {}, expected {}", - total_count, expected_len - ), - } - ); - - // Save the successful parameters - if let Some(valid_parameters) = response.parameters { - if !valid_parameters.is_empty() { - for parameter in valid_parameters { - let name = parameter.name.context(error::MissingInResponseSnafu { - region: region.as_ref(), - request_type: "GetParameters", - missing: "parameter name", - })?; - let value = parameter.value.context(error::MissingInResponseSnafu { - region: region.as_ref(), - request_type: "GetParameters", - missing: format!("value for parameter {}", name), - })?; - parameters.insert(SsmKey::new(region.clone(), name), value); - } - } - } - } - - for region in new_regions { - warn!( - "Invalid namespace in {}, this is OK for the first publish in a region", - region - ); - } - - Ok(parameters) -} - -/// Fetches all SSM parameters under a given prefix using the given clients -pub(crate) async fn get_parameters_by_prefix<'a>( - clients: &'a HashMap, - ssm_prefix: &str, -) -> HashMap<&'a Region, Result> { - // Build requests for parameters; we have to request with a regional client so we split them by - // region - let mut requests = Vec::with_capacity(clients.len()); - for region in clients.keys() { - trace!("Requesting parameters in {}", region); - let ssm_client: &SsmClient = &clients[region]; - let get_future = get_parameters_by_prefix_in_region(region, ssm_client, ssm_prefix); - - requests.push(join(ready(region), get_future)); - } - - // Send requests in parallel and wait for responses, collecting results into a list. - requests - .into_iter() - .collect::>() - .collect() - .await -} - -/// Fetches all SSM parameters under a given prefix in a single region -pub(crate) async fn get_parameters_by_prefix_in_region( - region: &Region, - client: &SsmClient, - ssm_prefix: &str, -) -> Result { - info!("Retrieving SSM parameters in {}", region.to_string()); - let mut parameters = HashMap::new(); - - // Send the request - let mut get_future = client - .get_parameters_by_path() - .path(ssm_prefix) - .recursive(true) - .into_paginator() - .send(); - - // Iterate over the retrieved parameters - while let Some(page) = get_future.next().await { - let retrieved_parameters = page - .context(error::GetParametersByPathSnafu { - path: ssm_prefix, - region: region.to_string(), - })? - .parameters() - .unwrap_or_default() - .to_owned(); - for parameter in retrieved_parameters { - // Insert a new key-value pair into the map, with the key containing region and parameter name - // and the value containing the parameter value - parameters.insert( - SsmKey::new( - region.to_owned(), - parameter - .name() - .ok_or(error::Error::MissingField { - region: region.to_string(), - field: "name".to_string(), - })? - .to_owned(), - ), - parameter - .value() - .ok_or(error::Error::MissingField { - region: region.to_string(), - field: "value".to_string(), - })? - .to_owned(), - ); - } - } - info!( - "SSM parameters in {} have been retrieved", - region.to_string() - ); - Ok(parameters) -} - -/// Sets the values of the given SSM keys using the given clients -pub(crate) async fn set_parameters( - parameters_to_set: &SsmParameters, - ssm_clients: &HashMap, -) -> Result<()> { - // Start with a small delay between requests, and increase if we get throttled. - let mut request_interval = Duration::from_millis(100); - let max_interval = Duration::from_millis(1600); - let interval_factor = 2; - let mut should_increase_interval = false; - - // We run all requests in a batch, and any failed requests are added to the next batch for - // retry - let mut failed_parameters: HashMap> = HashMap::new(); - let max_failures = 5; - - /// Stores the values we need to be able to retry requests - struct RequestContext<'a> { - region: &'a Region, - name: &'a str, - value: &'a str, - failures: u8, - } - - // Create the initial request contexts - let mut contexts = Vec::new(); - for (SsmKey { region, name }, value) in parameters_to_set { - contexts.push(RequestContext { - region, - name, - value, - failures: 0, - }); - } - let total_count = contexts.len(); - - // We drain requests out of the contexts list and put them back if we need to retry; we do this - // until all requests have succeeded or we've hit the max failures - while !contexts.is_empty() { - debug!("Starting {} SSM put requests", contexts.len()); - - if should_increase_interval { - request_interval *= interval_factor; - warn!( - "Requests were throttled, increasing interval to {:?}", - request_interval - ); - } - should_increase_interval = false; - - ensure!( - request_interval <= max_interval, - error::ThrottledSnafu { max_interval } - ); - - // Build requests for parameters. We need to group them by region so we can run each - // region in parallel. Each region's stream will be throttled to run one request per - // request_interval. - let mut regional_requests = HashMap::new(); - // Remove contexts from the list with drain; they get added back in if we retry the - // request. - for context in contexts.drain(..) { - let ssm_client = &ssm_clients[context.region]; - - let put_future = ssm_client - .put_parameter() - .set_name(Some(context.name.to_string())) - .set_value(Some(context.value.to_string())) - .set_overwrite(Some(true)) - .set_type(Some(ParameterType::String)) - .send(); - - let regional_list = regional_requests - .entry(context.region) - .or_insert_with(Vec::new); - // Store the context so we can retry as needed - regional_list.push(join(ready(context), put_future)); - } - - // Create a throttled stream per region; throttling applies per region. (Request futures - // are already regional, by virtue of being created with a regional client, so we don't - // need the region again here.) - let mut throttled_streams = Vec::new(); - for (_region, request_list) in regional_requests { - throttled_streams.push(Box::pin(tokio_stream::StreamExt::throttle( - stream::iter(request_list), - request_interval, - ))); - } - - // Run all regions in parallel and wait for responses. - let parallel_requests = stream::select_all(throttled_streams).buffer_unordered(4); - let responses: Vec<( - RequestContext<'_>, - std::result::Result>, - )> = parallel_requests.collect().await; - - // For each error response, check if we should retry or bail. - for (context, response) in responses { - if let Err(e) = response { - // Throttling errors are not currently surfaced in AWS SDK Rust, doing a string match is best we can do - let error_type = e - .into_service_error() - .code() - .unwrap_or("unknown") - .to_owned(); - if error_type.contains("ThrottlingException") { - // We only want to increase the interval once per loop, not once per error, - // because when you get throttled you're likely to get a bunch of throttling - // errors at once. - should_increase_interval = true; - // Retry the request without increasing the failure counter; the request didn't - // fail, a throttle means we couldn't even make the request. - contexts.push(context); - // -1 so we don't try again next loop; this keeps failure checking in one place - } else if context.failures >= max_failures - 1 { - // Past max failures, store the failure for reporting, don't retry. - failed_parameters - .entry(context.region.clone()) - .or_default() - .push((context.name.to_string(), error_type)); - } else { - // Increase failure counter and try again. - let context = RequestContext { - failures: context.failures + 1, - ..context - }; - debug!( - "Request attempt {} of {} failed in {}: {}", - context.failures, max_failures, context.region, error_type - ); - contexts.push(context); - } - } - } - } - - if !failed_parameters.is_empty() { - for (region, failures) in &failed_parameters { - for (parameter, error) in failures { - error!("Failed to set {} in {}: {}", parameter, region, error); - } - } - return error::SetParametersSnafu { - failure_count: failed_parameters.len(), - total_count, - } - .fail(); - } - - Ok(()) -} - -/// Fetch the given parameters, and ensure the live values match the given values -pub(crate) async fn validate_parameters( - expected_parameters: &SsmParameters, - ssm_clients: &HashMap, -) -> Result<()> { - // Fetch the given parameter names - let expected_parameter_names: Vec<&SsmKey> = expected_parameters.keys().collect(); - let updated_parameters = get_parameters(&expected_parameter_names, ssm_clients).await?; - - // Walk through and check each value - let mut success = true; - for (expected_key, expected_value) in expected_parameters { - let SsmKey { - region: expected_region, - name: expected_name, - } = expected_key; - // All parameters should have a value, and it should match the given value, otherwise the - // parameter wasn't updated / created. - if let Some(updated_value) = updated_parameters.get(expected_key) { - if updated_value != expected_value { - error!("Failed to set {} in {}", expected_name, expected_region); - success = false; - } - } else { - error!( - "{} in {} still doesn't exist", - expected_name, expected_region - ); - success = false; - } - } - ensure!(success, error::ValidateParametersSnafu); - - Ok(()) -} - -pub(crate) mod error { - use aws_sdk_ssm::error::SdkError; - use aws_sdk_ssm::operation::{ - get_parameters::GetParametersError, get_parameters_by_path::GetParametersByPathError, - }; - use snafu::Snafu; - use std::error::Error as _; - use std::time::Duration; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - #[allow(clippy::large_enum_variant)] - pub enum Error { - #[snafu(display("Failed to fetch SSM parameters in {}: {}", region, source.source().map(|x| x.to_string()).unwrap_or("unknown".to_string())))] - GetParameters { - region: String, - source: SdkError, - }, - - #[snafu(display( - "Failed to fetch SSM parameters by path {} in {}: {}", - path, - region, - source - ))] - GetParametersByPath { - path: String, - region: String, - source: SdkError, - }, - - #[snafu(display("Missing field in parameter in {}: {}", region, field))] - MissingField { region: String, field: String }, - - #[snafu(display("Response to {} was missing {}", request_type, missing))] - MissingInResponse { - region: String, - request_type: String, - missing: String, - }, - - #[snafu(display( - "Failed to set {} of {} parameters; see above", - failure_count, - total_count - ))] - SetParameters { - failure_count: usize, - total_count: usize, - }, - - #[snafu(display( - "SSM requests throttled too many times, went beyond our max interval {:?}", - max_interval - ))] - Throttled { max_interval: Duration }, - - #[snafu(display("Failed to validate all changes; see above."))] - ValidateParameters, - } -} -pub(crate) use error::Error; -pub(crate) type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ssm/template.rs b/tools/pubsys/src/aws/ssm/template.rs deleted file mode 100644 index ac60583e3cf..00000000000 --- a/tools/pubsys/src/aws/ssm/template.rs +++ /dev/null @@ -1,415 +0,0 @@ -//! The template module owns the finding and rendering of parameter templates that used to generate -//! SSM parameter names and values. - -use super::{BuildContext, SsmKey, SsmParameters}; -use crate::aws::ami::Image; -use aws_sdk_ssm::config::Region; -use log::trace; -use serde::{Deserialize, Serialize}; -use snafu::{ensure, ResultExt}; -use std::collections::HashMap; -use std::fs; -use std::path::Path; -use tinytemplate::TinyTemplate; - -/// Represents a single SSM parameter -#[derive(Debug, Deserialize)] -pub(crate) struct TemplateParameter { - pub(crate) name: String, - pub(crate) value: String, - - // User can say parameters only apply to these variants/arches - #[serde(default, rename = "variant")] - pub(crate) variants: Vec, - #[serde(default, rename = "arch")] - pub(crate) arches: Vec, -} - -/// Represents a set of SSM parameters, in a format that allows for clear definition of -/// parameters in TOML files -#[derive(Debug, Deserialize)] -pub(crate) struct TemplateParameters { - // In a TOML table, it's clearer to define a single entry as a "parameter". - #[serde(default, rename = "parameter")] - pub(crate) parameters: Vec, -} - -/// Deserializes template parameters from the template file, taking into account conditional -/// parameters that may or may not apply based on our build context. -pub(crate) fn get_parameters( - template_path: &Path, - build_context: &BuildContext<'_>, -) -> Result { - let templates_str = fs::read_to_string(template_path).context(error::FileSnafu { - op: "read", - path: &template_path, - })?; - let mut template_parameters: TemplateParameters = - toml::from_str(&templates_str).context(error::InvalidTomlSnafu { - path: &template_path, - })?; - trace!("Parsed templates: {:#?}", template_parameters); - - // You shouldn't point to an empty file, but if all the entries are removed by - // conditionals below, we allow that and just don't set any parameters. - ensure!( - !template_parameters.parameters.is_empty(), - error::NoTemplatesSnafu { - path: template_path - } - ); - - let variant = build_context.variant.to_string(); - let arch = build_context.arch.to_string(); - template_parameters.parameters.retain(|p| { - (p.variants.is_empty() || p.variants.contains(&variant)) - && (p.arches.is_empty() || p.arches.contains(&arch)) - }); - trace!("Templates after conditionals: {:#?}", template_parameters); - - Ok(template_parameters) -} - -/// A value which stores rendered SSM parameters alongside metadata used to render their templates -#[derive(Debug, Eq, PartialEq, Hash, Clone)] -pub(crate) struct RenderedParameter { - pub(crate) ami: Image, - pub(crate) ssm_key: SsmKey, - pub(crate) value: String, -} - -impl RenderedParameter { - /// Creates an `SsmParameters` HashMap from a list of `RenderedParameter` - pub(crate) fn as_ssm_parameters(rendered_parameters: &[RenderedParameter]) -> SsmParameters { - rendered_parameters - .iter() - .map(|param| (param.ssm_key.clone(), param.value.clone())) - .collect() - } -} - -/// Render the given template parameters using the data from the given AMIs -pub(crate) fn render_parameters( - template_parameters: TemplateParameters, - amis: &HashMap, - ssm_prefix: &str, - build_context: &BuildContext<'_>, -) -> Result> { - /// Values that we allow as template variables - #[derive(Debug, Serialize)] - struct TemplateContext<'a> { - variant: &'a str, - arch: &'a str, - image_id: &'a str, - image_name: &'a str, - image_version: &'a str, - region: &'a str, - } - let mut new_parameters = Vec::new(); - for (region, image) in amis { - let context = TemplateContext { - variant: build_context.variant, - arch: build_context.arch, - image_id: &image.id, - image_name: &image.name, - image_version: build_context.image_version, - region: region.as_ref(), - }; - - for tp in &template_parameters.parameters { - let mut tt = TinyTemplate::new(); - tt.add_template("name", &tp.name) - .context(error::AddTemplateSnafu { template: &tp.name })?; - tt.add_template("value", &tp.value) - .context(error::AddTemplateSnafu { - template: &tp.value, - })?; - let name_suffix = tt - .render("name", &context) - .context(error::RenderTemplateSnafu { template: &tp.name })?; - let value = tt - .render("value", &context) - .context(error::RenderTemplateSnafu { - template: &tp.value, - })?; - - new_parameters.push(RenderedParameter { - ami: image.clone(), - ssm_key: SsmKey::new(region.clone(), join_name(ssm_prefix, &name_suffix)), - value, - }); - } - } - - Ok(new_parameters) -} - -/// Render the names of the given template parameters using the fixed data about the current build. -/// Returns a mapping of templated name to rendered name, so we can associate rendered names to a -/// common source name -pub(crate) fn render_parameter_names( - template_parameters: &TemplateParameters, - ssm_prefix: &str, - build_context: &BuildContext<'_>, -) -> Result> { - let mut new_parameters = HashMap::new(); - for tp in &template_parameters.parameters { - let mut tt = TinyTemplate::new(); - tt.add_template("name", &tp.name) - .context(error::AddTemplateSnafu { template: &tp.name })?; - let name_suffix = tt - .render("name", &build_context) - .context(error::RenderTemplateSnafu { template: &tp.name })?; - new_parameters.insert(tp.name.clone(), join_name(ssm_prefix, &name_suffix)); - } - - Ok(new_parameters) -} - -/// Make sure prefix and parameter name are separated by one slash -fn join_name(ssm_prefix: &str, name_suffix: &str) -> String { - if ssm_prefix.ends_with('/') && name_suffix.starts_with('/') { - format!("{}{}", ssm_prefix, &name_suffix[1..]) - } else if ssm_prefix.ends_with('/') || name_suffix.starts_with('/') { - format!("{}{}", ssm_prefix, name_suffix) - } else { - format!("{}/{}", ssm_prefix, name_suffix) - } -} - -type RegionName = String; -type SsmParameterName = String; -type SsmParameterValue = String; - -/// Struct containing a HashMap of RegionName, mapped to a HashMap -/// of SsmParameterName, SsmParameterValue pairs -#[derive(Deserialize, PartialEq, Serialize)] -pub(crate) struct RenderedParametersMap { - pub(crate) rendered_parameters: - HashMap>, -} - -impl From<&Vec> for RenderedParametersMap { - fn from(parameters: &Vec) -> Self { - let mut parameter_map: HashMap> = - HashMap::new(); - for parameter in parameters.iter() { - parameter_map - .entry(parameter.ssm_key.region.to_string()) - .or_default() - .insert( - parameter.ssm_key.name.to_owned(), - parameter.value.to_owned(), - ); - } - RenderedParametersMap { - rendered_parameters: parameter_map, - } - } -} - -impl From>> for RenderedParametersMap { - fn from(parameters: HashMap>) -> Self { - let mut parameter_map: HashMap> = - HashMap::new(); - parameters - .into_iter() - .for_each(|(region, region_parameters)| { - parameter_map.insert( - region.to_string(), - region_parameters - .into_iter() - .map(|(ssm_key, ssm_value)| (ssm_key.name, ssm_value)) - .collect::>(), - ); - }); - RenderedParametersMap { - rendered_parameters: parameter_map, - } - } -} - -mod error { - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Error building template from '{}': {}", template, source))] - AddTemplate { - template: String, - source: tinytemplate::error::Error, - }, - - #[snafu(display("Failed to {} '{}': {}", op, path.display(), source))] - File { - op: String, - path: PathBuf, - source: io::Error, - }, - - #[snafu(display("Invalid config file at '{}': {}", path.display(), source))] - InvalidToml { - path: PathBuf, - source: toml::de::Error, - }, - - #[snafu(display("Found no parameter templates in {}", path.display()))] - NoTemplates { path: PathBuf }, - - #[snafu(display("Error rendering template from '{}': {}", template, source))] - RenderTemplate { - template: String, - source: tinytemplate::error::Error, - }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; - -#[cfg(test)] -mod test { - use std::collections::HashMap; - - use super::{RenderedParameter, RenderedParametersMap}; - use crate::aws::{ami::Image, ssm::SsmKey}; - use aws_sdk_ssm::config::Region; - - // These tests assert that the RenderedParametersMap can be created correctly. - #[test] - fn rendered_parameters_map_from_vec() { - let rendered_parameters = vec![ - RenderedParameter { - ami: Image { - id: "test1-image-id".to_string(), - name: "test1-image-name".to_string(), - public: Some(true), - launch_permissions: Some(vec![]), - }, - ssm_key: SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - value: "test1-parameter-value".to_string(), - }, - RenderedParameter { - ami: Image { - id: "test2-image-id".to_string(), - name: "test2-image-name".to_string(), - public: Some(true), - launch_permissions: Some(vec![]), - }, - ssm_key: SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - value: "test2-parameter-value".to_string(), - }, - RenderedParameter { - ami: Image { - id: "test3-image-id".to_string(), - name: "test3-image-name".to_string(), - public: Some(true), - launch_permissions: Some(vec![]), - }, - ssm_key: SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - value: "test3-parameter-value".to_string(), - }, - ]; - let map = &RenderedParametersMap::from(&rendered_parameters).rendered_parameters; - let expected_map = &HashMap::from([ - ( - "us-east-1".to_string(), - HashMap::from([( - "test3-parameter-name".to_string(), - "test3-parameter-value".to_string(), - )]), - ), - ( - "us-west-2".to_string(), - HashMap::from([ - ( - "test1-parameter-name".to_string(), - "test1-parameter-value".to_string(), - ), - ( - "test2-parameter-name".to_string(), - "test2-parameter-value".to_string(), - ), - ]), - ), - ]); - assert_eq!(map, expected_map); - } - - #[test] - fn rendered_parameters_map_from_empty_vec() { - let rendered_parameters = vec![]; - let map = &RenderedParametersMap::from(&rendered_parameters).rendered_parameters; - let expected_map = &HashMap::new(); - assert_eq!(map, expected_map); - } - - #[test] - fn rendered_parameters_map_from_map() { - let existing_parameters = HashMap::from([ - ( - Region::new("us-west-2"), - HashMap::from([ - ( - SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), - "test1-parameter-value".to_string(), - ), - ( - SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), - "test2-parameter-value".to_string(), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashMap::from([( - SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), - "test3-parameter-value".to_string(), - )]), - ), - ]); - let map = &RenderedParametersMap::from(existing_parameters).rendered_parameters; - let expected_map = &HashMap::from([ - ( - "us-east-1".to_string(), - HashMap::from([( - "test3-parameter-name".to_string(), - "test3-parameter-value".to_string(), - )]), - ), - ( - "us-west-2".to_string(), - HashMap::from([ - ( - "test1-parameter-name".to_string(), - "test1-parameter-value".to_string(), - ), - ( - "test2-parameter-name".to_string(), - "test2-parameter-value".to_string(), - ), - ]), - ), - ]); - assert_eq!(map, expected_map); - } - - #[test] - fn rendered_parameters_map_from_empty_map() { - let existing_parameters = HashMap::new(); - let map = &RenderedParametersMap::from(existing_parameters).rendered_parameters; - let expected_map = &HashMap::new(); - assert_eq!(map, expected_map); - } -} diff --git a/tools/pubsys/src/aws/validate_ami/ami.rs b/tools/pubsys/src/aws/validate_ami/ami.rs deleted file mode 100644 index 4ee85fb4dc8..00000000000 --- a/tools/pubsys/src/aws/validate_ami/ami.rs +++ /dev/null @@ -1,223 +0,0 @@ -//! The ami module owns the describing of images in EC2. - -use aws_sdk_ec2::{config::Region, types::Image, Client as Ec2Client}; -use futures::future::{join, ready}; -use futures::stream::{FuturesUnordered, StreamExt}; -use log::{info, trace}; -use serde::{Deserialize, Serialize}; -use snafu::ResultExt; -use std::collections::HashMap; - -use crate::aws::ami::launch_permissions::{get_launch_permissions, LaunchPermissionDef}; - -/// Wrapper structure for the `ImageDef` struct, used during deserialization -#[derive(Deserialize)] -#[serde(untagged)] -pub(crate) enum ImageData { - Image(ImageDef), - ImageList(Vec), -} - -impl ImageData { - pub(crate) fn images(&self) -> Vec { - match self { - ImageData::Image(image) => vec![image.to_owned()], - ImageData::ImageList(images) => images.to_owned(), - } - } -} - -/// Structure of the EC2 image fields that should be validated -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Debug, Clone)] -pub(crate) struct ImageDef { - /// The ID of the EC2 image - pub(crate) id: String, - - /// The name of the EC2 image - pub(crate) name: String, - - /// Whether or not the EC2 image is public - #[serde(default)] - pub(crate) public: bool, - - /// The launch permissions for the EC2 image. - pub(crate) launch_permissions: Option>, - - /// Whether or not the EC2 image supports Elastic Network Adapter - #[serde(default = "default_ena_support")] - pub(crate) ena_support: bool, - - /// The level of the EC2 image's Single Root I/O Virtualization support - #[serde(default = "default_sriov_net_support")] - pub(crate) sriov_net_support: String, -} - -fn default_ena_support() -> bool { - true -} - -fn default_sriov_net_support() -> String { - "simple".to_string() -} - -impl From<(Image, Option>)> for ImageDef { - fn from(args: (Image, Option>)) -> Self { - Self { - id: args.0.image_id().unwrap_or_default().to_string(), - name: args.0.name().unwrap_or_default().to_string(), - public: args.0.public().unwrap_or_default(), - launch_permissions: args.1, - ena_support: args.0.ena_support().unwrap_or_default(), - sriov_net_support: args.0.sriov_net_support().unwrap_or_default().to_string(), - } - } -} - -/// Fetches all images whose IDs are keys in `expected_images`. The map `expected_image_public` is -/// used to determine if the launch permissions for the image should be fetched (only if the image is not -/// public). The return value is a HashMap of Region to a Result, which is `Ok` if the request for -/// that region was successful and `Err` if not. The Result contains a HashMap of `image_id` to -/// `ImageDef`. -pub(crate) async fn describe_images<'a>( - clients: &'a HashMap, - expected_images: &HashMap>, -) -> HashMap<&'a Region, Result>> { - // Build requests for images; we have to request with a regional client so we split them by - // region - let mut requests = Vec::with_capacity(clients.len()); - clients.iter().for_each(|(region, ec2_client)| { - trace!("Requesting images in {}", region); - let get_future = describe_images_in_region( - region, - ec2_client, - expected_images - .get(region) - .map(|i| i.to_owned()) - .unwrap_or_default() - .into_iter() - .map(|i| (i.id.clone(), i)) - .collect::>(), - ); - - requests.push(join(ready(region), get_future)); - }); - - // Send requests in parallel and wait for responses, collecting results into a list. - requests - .into_iter() - .collect::>() - .collect() - .await -} - -/// Fetches the images whose IDs are keys in `expected_images` -pub(crate) async fn describe_images_in_region( - region: &Region, - client: &Ec2Client, - expected_images: HashMap, -) -> Result> { - info!("Retrieving images in {}", region.to_string()); - let mut images = HashMap::new(); - - // Send the request - let mut get_future = client - .describe_images() - .include_deprecated(true) - .set_image_ids(Some(Vec::from_iter( - expected_images.keys().map(|k| k.to_owned()), - ))) - .into_paginator() - .send(); - - // Iterate over the retrieved images - while let Some(page) = get_future.next().await { - let retrieved_images = page - .context(error::DescribeImagesSnafu { - region: region.to_string(), - })? - .images() - .unwrap_or_default() - .to_owned(); - for image in retrieved_images { - // Insert a new key-value pair into the map, with the key containing image ID - // and the value containing the ImageDef object created from the image - let image_id = image - .image_id() - .ok_or(error::Error::MissingField { - missing: "image_id".to_string(), - })? - .to_string(); - let expected_public = expected_images - .get(&image_id) - .ok_or(error::Error::MissingExpectedPublic { - missing: image_id.clone(), - })? - .public; - // If the image is not expected to be public, retrieve the launch permissions - trace!( - "Retrieving launch permissions for {} in {}", - image_id, - region.as_ref() - ); - let launch_permissions = if !expected_public { - Some( - get_launch_permissions(client, region.as_ref(), &image_id) - .await - .context(error::GetLaunchPermissionsSnafu { - region: region.as_ref(), - image_id: image_id.clone(), - })?, - ) - } else { - None - }; - let image_def = ImageDef::from((image.to_owned(), launch_permissions)); - images.insert(image_id, image_def); - } - } - - info!("Images in {} have been retrieved", region.to_string()); - Ok(images) -} - -pub(crate) mod error { - use aws_sdk_ec2::operation::describe_images::DescribeImagesError; - use aws_sdk_ssm::error::SdkError; - use aws_smithy_types::error::display::DisplayErrorContext; - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - #[allow(clippy::large_enum_variant)] - pub(crate) enum Error { - #[snafu(display( - "Failed to describe images in {}: {}", - region, - DisplayErrorContext(source) - ))] - DescribeImages { - region: String, - source: SdkError, - }, - - #[snafu(display( - "Failed to retrieve launch permissions for image {} in region {}: {}", - image_id, - region, - source - ))] - GetLaunchPermissions { - region: String, - image_id: String, - source: crate::aws::ami::launch_permissions::Error, - }, - - #[snafu(display("Missing field in image: {}", missing))] - MissingField { missing: String }, - - #[snafu(display("Missing image ID in expected image publicity map: {}", missing))] - MissingExpectedPublic { missing: String }, - } -} - -pub(crate) type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/validate_ami/mod.rs b/tools/pubsys/src/aws/validate_ami/mod.rs deleted file mode 100644 index e827059c6c0..00000000000 --- a/tools/pubsys/src/aws/validate_ami/mod.rs +++ /dev/null @@ -1,850 +0,0 @@ -//! The validate_ami module owns the 'validate-ami' subcommand and controls the process of validating -//! EC2 images - -pub(crate) mod ami; -pub(crate) mod results; - -use self::ami::{ImageData, ImageDef}; -use self::results::{AmiValidationResult, AmiValidationResultStatus, AmiValidationResults}; -use crate::aws::client::build_client_config; -use crate::aws::validate_ami::ami::describe_images; -use crate::Args; -use aws_sdk_ec2::{config::Region, Client as AmiClient}; -use clap::Parser; -use log::{error, info, trace}; -use pubsys_config::InfraConfig; -use snafu::ResultExt; -use std::collections::{HashMap, HashSet}; -use std::fs::File; -use std::path::PathBuf; - -/// Validates EC2 images by calling `describe-images` on all images in the file given by -/// `expected-amis-path` and ensuring that the returned `public`, `ena-support`, -/// `sriov-net-support`, and `launch-permissions` fields have the expected values. -#[derive(Debug, Parser)] -pub(crate) struct ValidateAmiArgs { - /// File holding the expected amis - #[arg(long)] - expected_amis_path: PathBuf, - - /// Optional path where the validation results should be written - #[arg(long)] - write_results_path: Option, - - #[arg(long, requires = "write_results_path")] - /// Optional filter to only write validation results with these statuses to the above path - /// The available statuses are: `Correct`, `Incorrect`, `Missing`. - write_results_filter: Option>, - - #[arg(long)] - /// If this argument is given, print the validation results summary as a JSON object instead - /// of a plaintext table - json: bool, -} - -/// Performs EC2 image validation and returns the `AmiValidationResults` object -pub(crate) async fn validate( - args: &Args, - validate_ami_args: &ValidateAmiArgs, -) -> Result { - info!("Parsing Infra.toml file"); - - // If a lock file exists, use that, otherwise use Infra.toml - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(error::ConfigSnafu)?; - - trace!("Parsed infra config: {:#?}", infra_config); - - let aws = infra_config.aws.unwrap_or_default(); - - // Parse the expected ami file - info!("Parsing expected ami file"); - let expected_images = parse_expected_amis(&validate_ami_args.expected_amis_path).await?; - - info!("Parsed expected ami file"); - - // Create a `HashMap` of `AmiClient`s, one for each region where validation should happen - let base_region = &Region::new( - aws.regions - .get(0) - .ok_or(error::Error::EmptyInfraRegions { - path: args.infra_config_path.clone(), - })? - .clone(), - ); - let mut ami_clients = HashMap::with_capacity(expected_images.len()); - - for region in expected_images.keys() { - let client_config = build_client_config(region, base_region, &aws).await; - let ami_client = AmiClient::new(&client_config); - ami_clients.insert(region.clone(), ami_client); - } - - // Retrieve the EC2 images using the `AmiClient`s - info!("Retrieving EC2 images"); - let images = describe_images(&ami_clients, &expected_images) - .await - .into_iter() - .map(|(region, result)| { - ( - region, - result.map_err(|e| { - error!( - "Failed to retrieve images in region {}: {}", - region.to_string(), - e - ); - error::Error::UnreachableRegion { - region: region.to_string(), - } - }), - ) - }) - .collect::>>(); - - // Validate the retrieved EC2 images per region - info!("Validating EC2 images"); - let results: HashMap> = images - .into_iter() - .map(|(region, region_result)| { - ( - region.clone(), - validate_images_in_region( - &expected_images - .get(region) - .map(|e| e.to_owned()) - .unwrap_or_default(), - ®ion_result, - region, - ), - ) - }) - .collect(); - - let validation_results = AmiValidationResults::from_result_map(results); - - // If a path was given, write the results - if let Some(write_results_path) = &validate_ami_args.write_results_path { - // Filter the results by given status, and if no statuses were given, get all results - info!("Writing results to file"); - let results = if let Some(filter) = &validate_ami_args.write_results_filter { - validation_results.get_results_for_status(filter) - } else { - validation_results.get_all_results() - }; - - // Write the results as JSON - serde_json::to_writer_pretty( - &File::create(write_results_path).context(error::WriteValidationResultsSnafu { - path: write_results_path, - })?, - &results, - ) - .context(error::SerializeValidationResultsSnafu)?; - } - - Ok(validation_results) -} - -/// Validates EC2 images in a single region, based on a `Vec` of expected images -/// and a `HashMap` of actual retrieved images. Returns a -/// `HashSet` containing the result objects. -pub(crate) fn validate_images_in_region( - expected_images: &[ImageDef], - actual_images: &Result>, - region: &Region, -) -> HashSet { - match actual_images { - Ok(actual_images) => expected_images - .iter() - .map(|image| { - let new_image = if image.public { - ImageDef { - launch_permissions: None, - ..image.clone() - } - } else { - image.clone() - }; - AmiValidationResult::new( - image.id.clone(), - new_image, - Ok(actual_images.get(&image.id).map(|v| v.to_owned())), - region.clone(), - ) - }) - .collect(), - Err(_) => expected_images - .iter() - .map(|image| { - AmiValidationResult::new( - image.id.clone(), - image.clone(), - Err(error::Error::UnreachableRegion { - region: region.to_string(), - }), - region.clone(), - ) - }) - .collect(), - } -} - -type RegionName = String; -type AmiId = String; - -/// Parse the file holding image values. Return a `HashMap` of `Region` mapped to a vec of `ImageDef`s -/// for that region. -pub(crate) async fn parse_expected_amis( - expected_amis_path: &PathBuf, -) -> Result>> { - // Parse the JSON file as a `HashMap` of region_name, mapped to an `ImageData` struct - let expected_amis: HashMap = serde_json::from_reader( - &File::open(expected_amis_path.clone()).context(error::ReadExpectedImagesFileSnafu { - path: expected_amis_path, - })?, - ) - .context(error::ParseExpectedImagesFileSnafu)?; - - // Extract the `Vec` from the `ImageData` structs - let vectored_images = expected_amis - .into_iter() - .map(|(region, value)| (Region::new(region), value.images())) - .collect::>>(); - - Ok(vectored_images) -} - -/// Common entrypoint from main() -pub(crate) async fn run(args: &Args, validate_ami_args: &ValidateAmiArgs) -> Result<()> { - let results = validate(args, validate_ami_args).await?; - - if validate_ami_args.json { - println!( - "{}", - serde_json::to_string_pretty(&results.get_json_summary()) - .context(error::SerializeResultsSummarySnafu)? - ) - } else { - println!("{}", results); - } - Ok(()) -} - -mod error { - use snafu::Snafu; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Error reading config: {}", source))] - Config { source: pubsys_config::Error }, - - #[snafu(display("Empty regions array in Infra.toml at path {}", path.display()))] - EmptyInfraRegions { path: PathBuf }, - - #[snafu(display("Failed to parse image file: {}", source))] - ParseExpectedImagesFile { source: serde_json::Error }, - - #[snafu(display("Failed to read image file: {:?}", path))] - ReadExpectedImagesFile { - source: std::io::Error, - path: PathBuf, - }, - - #[snafu(display("Failed to serialize validation results to json: {}", source))] - SerializeValidationResults { source: serde_json::Error }, - - #[snafu(display("Failed to retrieve images from region {}", region))] - UnreachableRegion { region: String }, - - #[snafu(display("Failed to write validation results to {:?}: {}", path, source))] - WriteValidationResults { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to serialize results summary to JSON: {}", source))] - SerializeResultsSummary { source: serde_json::Error }, - } -} - -pub(crate) use error::Error; - -type Result = std::result::Result; - -#[cfg(test)] -mod test { - use super::ami::ImageDef; - use super::validate_images_in_region; - use crate::aws::{ - ami::launch_permissions::LaunchPermissionDef, - validate_ami::results::{AmiValidationResult, AmiValidationResultStatus}, - }; - use aws_sdk_ec2::config::Region; - use std::collections::{HashMap, HashSet}; - - // These tests assert that the images can be validated correctly. - - // Tests validation of images where the expected value is equal to the actual value - #[test] - fn validate_images_all_correct() { - let expected_parameters: Vec = vec![ - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ]; - let actual_parameters: HashMap = HashMap::from([ - ( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ), - ( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ), - ( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ), - ]); - let expected_results = HashSet::from_iter(vec![ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - ]); - let results = validate_images_in_region( - &expected_parameters, - &Ok(actual_parameters), - &Region::new("us-west-2"), - ); - - for result in &results { - assert_eq!(result.status, AmiValidationResultStatus::Correct); - } - assert_eq!(results, expected_results); - } - - // Tests validation of images where the expected value is different from the actual value - #[test] - fn validate_images_all_incorrect() { - let expected_parameters: Vec = vec![ - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ]; - let actual_parameters: HashMap = HashMap::from([ - ( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - }, - ), - ( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: false, - launch_permissions: Some(vec![LaunchPermissionDef::Group("all".to_string())]), - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ), - ( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "not simple".to_string(), - }, - ), - ]); - let expected_results = HashSet::from_iter(vec![ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "not simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: false, - launch_permissions: Some(vec![LaunchPermissionDef::Group("all".to_string())]), - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - ]); - let results = validate_images_in_region( - &expected_parameters, - &Ok(actual_parameters), - &Region::new("us-west-2"), - ); - for result in &results { - assert_eq!(result.status, AmiValidationResultStatus::Incorrect); - } - assert_eq!(results, expected_results); - } - - // Tests validation of images where the actual value is missing - #[test] - fn validate_images_all_missing() { - let expected_parameters: Vec = vec![ - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ]; - let actual_parameters = HashMap::new(); - let expected_results = HashSet::from_iter(vec![ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-west-2"), - ), - ]); - let results = validate_images_in_region( - &expected_parameters, - &Ok(actual_parameters), - &Region::new("us-west-2"), - ); - for result in &results { - assert_eq!(result.status, AmiValidationResultStatus::Missing); - } - assert_eq!(results, expected_results); - } - - // Tests validation of parameters where each reachable status (Correct, Incorrect, Missing) happens once - #[test] - fn validate_images_mixed() { - let expected_parameters: Vec = vec![ - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ]; - let actual_parameters: HashMap = HashMap::from([ - ( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ), - ( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: false, - launch_permissions: Some(vec![LaunchPermissionDef::Group("all".to_string())]), - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ), - ]); - let expected_results = HashSet::from_iter(vec![ - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: false, - launch_permissions: Some(vec![LaunchPermissionDef::Group("all".to_string())]), - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-west-2"), - ), - ]); - let results = validate_images_in_region( - &expected_parameters, - &Ok(actual_parameters), - &Region::new("us-west-2"), - ); - - assert_eq!(results, expected_results); - } - - // Tests validation of parameters where the region is unreachable - #[test] - fn validate_images_unreachable() { - let expected_parameters: Vec = vec![ - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ]; - let expected_results = HashSet::from_iter(vec![ - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Err(crate::aws::validate_ami::Error::UnreachableRegion { - region: "us-west-2".to_string(), - }), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Err(crate::aws::validate_ami::Error::UnreachableRegion { - region: "us-west-2".to_string(), - }), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Err(crate::aws::validate_ami::Error::UnreachableRegion { - region: "us-west-2".to_string(), - }), - Region::new("us-west-2"), - ), - ]); - let results = validate_images_in_region( - &expected_parameters, - &Err(crate::aws::validate_ami::Error::UnreachableRegion { - region: "us-west-2".to_string(), - }), - &Region::new("us-west-2"), - ); - - assert_eq!(results, expected_results); - } -} diff --git a/tools/pubsys/src/aws/validate_ami/results.rs b/tools/pubsys/src/aws/validate_ami/results.rs deleted file mode 100644 index 698fbe01ae1..00000000000 --- a/tools/pubsys/src/aws/validate_ami/results.rs +++ /dev/null @@ -1,1034 +0,0 @@ -//! The results module owns the reporting of EC2 image validation results. - -use super::ami::ImageDef; -use super::Result; -use aws_sdk_ec2::config::Region; -use serde::{Deserialize, Serialize}; -use serde_plain::{derive_display_from_serialize, derive_fromstr_from_deserialize}; -use std::collections::{HashMap, HashSet}; -use std::fmt::{self, Display}; -use tabled::{Table, Tabled}; - -/// Represent the possible status of an EC2 image validation -#[derive(Debug, Eq, Hash, PartialEq, Serialize, Deserialize, Clone)] -pub(crate) enum AmiValidationResultStatus { - /// The image was found and its monitored fields have the expected values - Correct, - - /// The image was found but some of the monitored fields do not have the expected values - Incorrect, - - /// The image was expected but not included in the actual images - Missing, - - /// The region containing the image was not reachable - Unreachable, -} - -derive_display_from_serialize!(AmiValidationResultStatus); -derive_fromstr_from_deserialize!(AmiValidationResultStatus); - -/// Represents a single EC2 image validation result -#[derive(Debug, Eq, Hash, PartialEq, Serialize)] -pub(crate) struct AmiValidationResult { - /// The ID of the image - pub(crate) id: String, - - /// `ImageDef` containing expected values for the image - pub(crate) expected_image_def: ImageDef, - - /// `ImageDef` containing actual values for the image - pub(crate) actual_image_def: Option, - - /// The region the image resides in - #[serde(serialize_with = "serialize_region")] - pub(crate) region: Region, - - /// The validation status of the image - pub(crate) status: AmiValidationResultStatus, -} - -fn serialize_region(region: &Region, serializer: S) -> std::result::Result -where - S: serde::Serializer, -{ - serializer.serialize_str(region.to_string().as_str()) -} - -impl AmiValidationResult { - pub(crate) fn new( - id: String, - expected_image_def: ImageDef, - actual_image_def: Result>, - region: Region, - ) -> Self { - // Determine the validation status based on equality, presence, and absence of expected and - // actual image values - let status = match (&expected_image_def, &actual_image_def) { - (expected_image_def, Ok(Some(actual_image_def))) - if actual_image_def == expected_image_def => - { - AmiValidationResultStatus::Correct - } - (_, Ok(Some(_))) => AmiValidationResultStatus::Incorrect, - (_, Ok(None)) => AmiValidationResultStatus::Missing, - (_, Err(_)) => AmiValidationResultStatus::Unreachable, - }; - AmiValidationResult { - id, - expected_image_def, - actual_image_def: actual_image_def.unwrap_or_default(), - region, - status, - } - } -} - -#[derive(Tabled, Serialize)] -struct AmiValidationRegionSummary { - correct: u64, - incorrect: u64, - missing: u64, - unreachable: u64, -} - -impl From<&HashSet> for AmiValidationRegionSummary { - fn from(results: &HashSet) -> Self { - let mut region_validation = AmiValidationRegionSummary { - correct: 0, - incorrect: 0, - missing: 0, - unreachable: 0, - }; - for validation_result in results { - match validation_result.status { - AmiValidationResultStatus::Correct => region_validation.correct += 1, - AmiValidationResultStatus::Incorrect => region_validation.incorrect += 1, - AmiValidationResultStatus::Missing => region_validation.missing += 1, - AmiValidationResultStatus::Unreachable => region_validation.missing += 1, - } - } - region_validation - } -} - -/// Represents all EC2 image validation results -#[derive(Debug)] -pub(crate) struct AmiValidationResults { - pub(crate) results: HashMap>, -} - -impl Default for AmiValidationResults { - fn default() -> Self { - Self::from_result_map(HashMap::new()) - } -} - -impl Display for AmiValidationResults { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Create a summary for each region, counting the number of parameters per status - let region_validations: HashMap = - self.get_results_summary(); - - // Represent the `HashMap` of summaries as a `Table` - let table = Table::new( - region_validations - .iter() - .map(|(region, results)| (region.to_string(), results)) - .collect::>(), - ) - .to_string(); - write!(f, "{}", table) - } -} - -impl AmiValidationResults { - pub(crate) fn from_result_map(results: HashMap>) -> Self { - AmiValidationResults { results } - } - - /// Returns a `HashSet` containing all validation results whose status is present in `requested_status` - pub(crate) fn get_results_for_status( - &self, - requested_status: &[AmiValidationResultStatus], - ) -> HashSet<&AmiValidationResult> { - let mut results = HashSet::new(); - for region_results in self.results.values() { - results.extend( - region_results - .iter() - .filter(|result| requested_status.contains(&result.status)) - .collect::>(), - ) - } - results - } - - /// Returns a `HashSet` containing all validation results - pub(crate) fn get_all_results(&self) -> HashSet<&AmiValidationResult> { - let mut results = HashSet::new(); - for region_results in self.results.values() { - results.extend(region_results) - } - results - } - - fn get_results_summary(&self) -> HashMap { - self.results - .iter() - .map(|(region, region_result)| { - ( - region.clone(), - AmiValidationRegionSummary::from(region_result), - ) - }) - .collect() - } - - pub(crate) fn get_json_summary(&self) -> serde_json::Value { - serde_json::json!(self - .get_results_summary() - .into_iter() - .map(|(region, results)| (region.to_string(), results)) - .collect::>()) - } -} - -#[cfg(test)] -mod test { - use super::{AmiValidationResult, AmiValidationResultStatus, AmiValidationResults}; - use crate::aws::validate_ami::ami::ImageDef; - use aws_sdk_ssm::config::Region; - use std::collections::{HashMap, HashSet}; - - // These tests assert that the `get_results_for_status` function returns the correct values. - - // Tests empty `AmiValidationResults` - #[test] - fn get_results_for_status_empty() { - let results = AmiValidationResults::from_result_map(HashMap::from([ - (Region::new("us-west-2"), HashSet::from([])), - (Region::new("us-east-1"), HashSet::from([])), - ])); - let results_filtered = results.get_results_for_status(&vec![ - AmiValidationResultStatus::Correct, - AmiValidationResultStatus::Incorrect, - AmiValidationResultStatus::Missing, - ]); - - assert_eq!(results_filtered, HashSet::new()); - } - - // Tests the `Correct` status - #[test] - fn get_results_for_status_correct() { - let results = AmiValidationResults::from_result_map(HashMap::from([ - ( - Region::new("us-west-2"), - HashSet::from([ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "not simple".to_string(), - })), - Region::new("us-west-2"), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashSet::from([ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "not simple".to_string(), - })), - Region::new("us-east-1"), - ), - ]), - ), - ])); - let results_filtered = - results.get_results_for_status(&vec![AmiValidationResultStatus::Correct]); - - assert_eq!( - results_filtered, - HashSet::from([ - &AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - &AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ) - ]) - ); - } - - // Tests a filter containing the `Correct` and `Incorrect` statuses - #[test] - fn get_results_for_status_correct_incorrect() { - let results = AmiValidationResults::from_result_map(HashMap::from([ - ( - Region::new("us-west-2"), - HashSet::from([ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-west-2"), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashSet::from([ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-east-1"), - ), - ]), - ), - ])); - let results_filtered = results.get_results_for_status(&vec![ - AmiValidationResultStatus::Correct, - AmiValidationResultStatus::Incorrect, - ]); - - assert_eq!( - results_filtered, - HashSet::from([ - &AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - &AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - &AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - &AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ) - ]) - ); - } - - // Tests a filter containing all statuses - #[test] - fn get_results_for_status_all() { - let results = AmiValidationResults::from_result_map(HashMap::from([ - ( - Region::new("us-west-2"), - HashSet::from([ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-west-2"), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashSet::from([ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-east-1"), - ), - ]), - ), - ( - Region::new("us-east-2"), - HashSet::from([AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Err(crate::aws::validate_ami::error::Error::UnreachableRegion { - region: "us-east-2".to_string(), - }), - Region::new("us-east-2"), - )]), - ), - ])); - let results_filtered = results.get_results_for_status(&vec![ - AmiValidationResultStatus::Correct, - AmiValidationResultStatus::Incorrect, - AmiValidationResultStatus::Missing, - AmiValidationResultStatus::Unreachable, - ]); - - assert_eq!( - results_filtered, - HashSet::from([ - &AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - &AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - &AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - &AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - &AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-west-2"), - ), - &AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-east-1"), - ), - &AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Err(crate::aws::validate_ami::error::Error::UnreachableRegion { - region: "us-east-2".to_string(), - }), - Region::new("us-east-2"), - ), - ]) - ); - } - - // Tests the `Missing` filter when none of the AmiValidationResults have this status - #[test] - fn get_results_for_status_missing_none() { - let results = AmiValidationResults::from_result_map(HashMap::from([ - ( - Region::new("us-west-2"), - HashSet::from([ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "not simple".to_string(), - })), - Region::new("us-west-2"), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashSet::from([ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "not simple".to_string(), - })), - Region::new("us-east-1"), - ), - ]), - ), - ])); - let results_filtered = - results.get_results_for_status(&vec![AmiValidationResultStatus::Missing]); - - assert_eq!(results_filtered, HashSet::new()); - } -} diff --git a/tools/pubsys/src/aws/validate_ssm/mod.rs b/tools/pubsys/src/aws/validate_ssm/mod.rs deleted file mode 100644 index 3dc5f4ee848..00000000000 --- a/tools/pubsys/src/aws/validate_ssm/mod.rs +++ /dev/null @@ -1,797 +0,0 @@ -//! The validate_ssm module owns the 'validate-ssm' subcommand and controls the process of -//! validating SSM parameters and AMIs - -pub mod results; - -use self::results::{SsmValidationResult, SsmValidationResultStatus, SsmValidationResults}; -use super::ssm::ssm::get_parameters_by_prefix; -use super::ssm::{SsmKey, SsmParameters}; -use crate::aws::client::build_client_config; -use crate::Args; -use aws_sdk_ssm::{config::Region, Client as SsmClient}; -use clap::Parser; -use log::{error, info, trace}; -use pubsys_config::InfraConfig; -use snafu::ResultExt; -use std::collections::{HashMap, HashSet}; -use std::fs::File; -use std::path::PathBuf; - -/// Validates SSM parameters and AMIs -#[derive(Debug, Parser)] -pub struct ValidateSsmArgs { - /// File holding the expected parameters - #[arg(long)] - expected_parameters_path: PathBuf, - - /// If this flag is set, check for unexpected parameters in the validation regions. If not, - /// only the parameters present in the expected parameters file will be validated. - #[arg(long)] - check_unexpected: bool, - - /// Optional path where the validation results should be written - #[arg(long)] - write_results_path: Option, - - /// Optional filter to only write validation results with these statuses to the above path - /// Available statuses are: `Correct`, `Incorrect`, `Missing`, `Unexpected` - #[arg(long, requires = "write_results_path")] - write_results_filter: Option>, - - /// If this flag is added, print the results summary table as JSON instead of a - /// plaintext table - #[arg(long)] - json: bool, -} - -/// Performs SSM parameter validation and returns the `SsmValidationResults` object -pub async fn validate( - args: &Args, - validate_ssm_args: &ValidateSsmArgs, -) -> Result { - info!("Parsing Infra.toml file"); - - // If a lock file exists, use that, otherwise use Infra.toml - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(error::ConfigSnafu)?; - - let aws = infra_config.aws.clone().unwrap_or_default(); - - trace!("Parsed infra config: {:#?}", infra_config); - - let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or(""); - - // Parse the file holding expected parameters - info!("Parsing expected parameters file"); - let expected_parameters = parse_parameters(&validate_ssm_args.expected_parameters_path).await?; - - info!("Parsed expected parameters file"); - - // Create a HashMap of SsmClients, one for each region where validation should happen - let base_region = Region::new(aws.regions[0].clone()); - let mut ssm_clients = HashMap::with_capacity(expected_parameters.len()); - - for region in expected_parameters.keys() { - let client_config = build_client_config(region, &base_region, &aws).await; - let ssm_client = SsmClient::new(&client_config); - ssm_clients.insert(region.clone(), ssm_client); - } - - // Retrieve the SSM parameters using the SsmClients - info!("Retrieving SSM parameters"); - let parameters = get_parameters_by_prefix(&ssm_clients, ssm_prefix) - .await - .into_iter() - .map(|(region, result)| { - ( - region, - result.map_err(|e| { - error!( - "Failed to retrieve images in region {}: {}", - region.to_string(), - e - ); - error::Error::UnreachableRegion { - region: region.to_string(), - } - }), - ) - }) - .collect::>>(); - - // Validate the retrieved SSM parameters per region - info!("Validating SSM parameters"); - let results: HashMap> = parameters - .into_iter() - .map(|(region, region_result)| { - ( - region.clone(), - validate_parameters_in_region( - expected_parameters.get(region).unwrap_or(&HashMap::new()), - ®ion_result, - validate_ssm_args.check_unexpected, - ), - ) - }) - .collect::>>(); - - let validation_results = SsmValidationResults::new(results); - - // If a path was given to write the results to, write the results - if let Some(write_results_path) = &validate_ssm_args.write_results_path { - // Filter the results by given status, and if no statuses were given, get all results - info!("Writing results to file"); - let results = if let Some(filter) = &validate_ssm_args.write_results_filter { - validation_results.get_results_for_status(filter) - } else { - validation_results.get_all_results() - }; - - // Write the results as JSON - serde_json::to_writer_pretty( - &File::create(write_results_path).context(error::WriteValidationResultsSnafu { - path: write_results_path, - })?, - &results, - ) - .context(error::SerializeValidationResultsSnafu)?; - } - - Ok(validation_results) -} - -/// Validates SSM parameters in a single region, based on a HashMap (SsmKey, String) of expected -/// parameters and a HashMap (SsmKey, String) of actual retrieved parameters. Returns a HashSet of -/// SsmValidationResult objects. -pub(crate) fn validate_parameters_in_region( - expected_parameters: &HashMap, - actual_parameters: &Result, - check_unexpected: bool, -) -> HashSet { - match actual_parameters { - Ok(actual_parameters) => { - // Clone the HashMap of actual parameters so items can be removed - let mut actual_parameters = actual_parameters.clone(); - let mut results = HashSet::new(); - - // Validate all expected parameters, creating an SsmValidationResult object and - // removing the corresponding parameter from `actual_parameters` if found - for (ssm_key, ssm_value) in expected_parameters { - results.insert(SsmValidationResult::new( - ssm_key.name.to_owned(), - Some(ssm_value.clone()), - Ok(actual_parameters.get(ssm_key).map(|v| v.to_owned())), - ssm_key.region.clone(), - )); - actual_parameters.remove(ssm_key); - } - - if check_unexpected { - // Any remaining parameters in `actual_parameters` were not present in `expected_parameters` - // and therefore get the `Unexpected` status - for (ssm_key, ssm_value) in actual_parameters { - results.insert(SsmValidationResult::new( - ssm_key.name.to_owned(), - None, - Ok(Some(ssm_value)), - ssm_key.region.clone(), - )); - } - } - results - } - Err(_) => expected_parameters - .iter() - .map(|(ssm_key, ssm_value)| { - SsmValidationResult::new( - ssm_key.name.to_owned(), - Some(ssm_value.to_owned()), - Err(error::Error::UnreachableRegion { - region: ssm_key.region.to_string(), - }), - ssm_key.region.clone(), - ) - }) - .collect(), - } -} - -type RegionName = String; -type ParameterName = String; -type ParameterValue = String; - -/// Parse the file holding expected parameters. Return a HashMap of Region mapped to a HashMap -/// of the parameters in that region, with each parameter being a mapping of `SsmKey` to its -/// value as `String`. -pub(crate) async fn parse_parameters( - expected_parameters_file: &PathBuf, -) -> Result>> { - // Parse the JSON file as a HashMap of region_name, mapped to a HashMap of parameter_name and - // parameter_value - let expected_parameters: HashMap> = - serde_json::from_reader(&File::open(expected_parameters_file.clone()).context( - error::ReadExpectedParameterFileSnafu { - path: expected_parameters_file, - }, - )?) - .context(error::ParseExpectedParameterFileSnafu)?; - - // Iterate over the parsed HashMap, converting the nested HashMap into a HashMap of Region - // mapped to a HashMap of SsmKey, String - let parameter_map = expected_parameters - .into_iter() - .map(|(region, parameters)| { - ( - Region::new(region.clone()), - parameters - .into_iter() - .map(|(parameter_name, parameter_value)| { - ( - SsmKey::new(Region::new(region.clone()), parameter_name), - parameter_value, - ) - }) - .collect::>(), - ) - }) - .collect(); - - Ok(parameter_map) -} - -/// Common entrypoint from main() -pub(crate) async fn run(args: &Args, validate_ssm_args: &ValidateSsmArgs) -> Result<()> { - let results = validate(args, validate_ssm_args).await?; - - if validate_ssm_args.json { - println!( - "{}", - serde_json::to_string_pretty(&results.get_json_summary()) - .context(error::SerializeResultsSummarySnafu)? - ) - } else { - println!("{}", results) - } - Ok(()) -} - -pub(crate) mod error { - use crate::aws::ssm::ssm; - use snafu::Snafu; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub enum Error { - #[snafu(display("Error reading config: {}", source))] - Config { source: pubsys_config::Error }, - - #[snafu(display("Failed to fetch parameters from SSM: {}", source))] - FetchSsm { source: ssm::error::Error }, - - #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { missing: String }, - - #[snafu(display("Failed to validate SSM parameters: {}", missing))] - ValidateSsm { missing: String }, - - #[snafu(display("Failed to parse expected parameters file: {}", source))] - ParseExpectedParameterFile { source: serde_json::Error }, - - #[snafu(display("Failed to read expected parameters file: {}", path.display()))] - ReadExpectedParameterFile { - source: std::io::Error, - path: PathBuf, - }, - - #[snafu(display("Invalid validation status filter: {}", filter))] - InvalidStatusFilter { filter: String }, - - #[snafu(display("Failed to serialize validation results to json: {}", source))] - SerializeValidationResults { source: serde_json::Error }, - - #[snafu(display("Failed to retrieve SSM parameters from region {}", region))] - UnreachableRegion { region: String }, - - #[snafu(display("Failed to write validation results to {}: {}", path.display(), source))] - WriteValidationResults { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to serialize results summary into JSON: {}", source))] - SerializeResultsSummary { source: serde_json::Error }, - } -} - -pub(crate) use error::Error; -type Result = std::result::Result; - -#[cfg(test)] -mod test { - use crate::aws::{ - ssm::{SsmKey, SsmParameters}, - validate_ssm::{results::SsmValidationResult, validate_parameters_in_region}, - }; - use aws_sdk_ssm::config::Region; - use std::collections::{HashMap, HashSet}; - - // These tests assert that the parameters can be validated correctly. - - // Tests validation of parameters where the expected value is equal to the actual value - #[test] - fn validate_parameters_all_correct() { - let expected_parameters: HashMap = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value".to_string(), - ), - ]); - let actual_parameters: SsmParameters = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value".to_string(), - ), - ]); - let expected_results = HashSet::from_iter(vec![ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(Some("test3-parameter-value".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value".to_string())), - Region::new("us-west-2"), - ), - ]); - let results = - validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), true); - - assert_eq!(results, expected_results); - } - - // Tests validation of parameters where the expected value is different from the actual value - #[test] - fn validate_parameters_all_incorrect() { - let expected_parameters: HashMap = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value".to_string(), - ), - ]); - let actual_parameters: SsmParameters = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value-wrong".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value-wrong".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value-wrong".to_string(), - ), - ]); - let expected_results = HashSet::from_iter(vec![ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(Some("test3-parameter-value-wrong".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - ]); - let results = - validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), true); - - assert_eq!(results, expected_results); - } - - // Tests validation of parameters where the actual value is missing - #[test] - fn validate_parameters_all_missing() { - let expected_parameters: HashMap = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value".to_string(), - ), - ]); - let actual_parameters: SsmParameters = HashMap::new(); - let expected_results = HashSet::from_iter(vec![ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(None), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(None), - Region::new("us-west-2"), - ), - ]); - let results = - validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), true); - - assert_eq!(results, expected_results); - } - - // Tests validation of parameters where the expected value is missing - #[test] - fn validate_parameters_all_unexpected() { - let expected_parameters: HashMap = HashMap::new(); - let actual_parameters: SsmParameters = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value".to_string(), - ), - ]); - let expected_results = HashSet::from_iter(vec![ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - None, - Ok(Some("test3-parameter-value".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - None, - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - None, - Ok(Some("test2-parameter-value".to_string())), - Region::new("us-west-2"), - ), - ]); - let results = - validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), true); - - assert_eq!(results, expected_results); - } - - // Tests validation of parameters where each status (Correct, Incorrect, Missing, Unexpected) - // happens once - #[test] - fn validate_parameters_mixed() { - let expected_parameters: HashMap = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value".to_string(), - ), - ]); - let actual_parameters: SsmParameters = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value-wrong".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test4-parameter-name".to_string(), - }, - "test4-parameter-value".to_string(), - ), - ]); - let expected_results = HashSet::from_iter(vec![ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-east-1"), - ), - ]); - let results = - validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), true); - - assert_eq!(results, expected_results); - } - - // Tests validation of parameters where each reachable status (Correct, Incorrect, Missing, Unexpected) - // happens once and `--check-unexpected` is false - #[test] - fn validate_parameters_mixed_unexpected_false() { - let expected_parameters: HashMap = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value".to_string(), - ), - ]); - let actual_parameters: SsmParameters = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value-wrong".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test4-parameter-name".to_string(), - }, - "test4-parameter-value".to_string(), - ), - ]); - let expected_results = HashSet::from_iter(vec![ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - ]); - let results = - validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), false); - - assert_eq!(results, expected_results); - } - - // Tests validation of parameters where the status is Unreachable - #[test] - fn validate_parameters_unreachable() { - let expected_parameters: HashMap = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value".to_string(), - ), - ]); - let expected_results = HashSet::from_iter(vec![ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Err(crate::aws::validate_ssm::Error::UnreachableRegion { - region: "us-west-2".to_string(), - }), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Err(crate::aws::validate_ssm::Error::UnreachableRegion { - region: "us-west-2".to_string(), - }), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Err(crate::aws::validate_ssm::Error::UnreachableRegion { - region: "us-west-2".to_string(), - }), - Region::new("us-west-2"), - ), - ]); - let results = validate_parameters_in_region( - &expected_parameters, - &Err(crate::aws::validate_ssm::Error::UnreachableRegion { - region: "us-west-2".to_string(), - }), - false, - ); - - assert_eq!(results, expected_results); - } -} diff --git a/tools/pubsys/src/aws/validate_ssm/results.rs b/tools/pubsys/src/aws/validate_ssm/results.rs deleted file mode 100644 index eadd4290562..00000000000 --- a/tools/pubsys/src/aws/validate_ssm/results.rs +++ /dev/null @@ -1,615 +0,0 @@ -//! The results module owns the reporting of SSM validation results. - -use crate::aws::validate_ssm::Result; -use aws_sdk_ssm::config::Region; -use serde::{Deserialize, Serialize}; -use serde_plain::{derive_display_from_serialize, derive_fromstr_from_deserialize}; -use std::collections::{HashMap, HashSet}; -use std::fmt::{self, Display}; -use tabled::{Table, Tabled}; - -/// Represent the possible status of an SSM validation -#[derive(Debug, Eq, Hash, PartialEq, Serialize, Deserialize, Clone)] -pub enum SsmValidationResultStatus { - /// The expected value was equal to the actual value - Correct, - - /// The expected value was different from the actual value - Incorrect, - - /// The parameter was expected but not included in the actual parameters - Missing, - - /// The parameter was present in the actual parameters but not expected - Unexpected, - - /// The region containing the parameter is not reachable - Unreachable, -} - -derive_display_from_serialize!(SsmValidationResultStatus); -derive_fromstr_from_deserialize!(SsmValidationResultStatus); - -/// Represents a single SSM validation result -#[derive(Debug, Eq, Hash, PartialEq, Serialize)] -pub struct SsmValidationResult { - /// The name of the parameter - pub(crate) name: String, - - /// The expected value of the parameter - pub(crate) expected_value: Option, - - /// The actual retrieved value of the parameter - pub(crate) actual_value: Option, - - /// The region the parameter resides in - #[serde(serialize_with = "serialize_region")] - pub(crate) region: Region, - - /// The validation status of the parameter - pub(crate) status: SsmValidationResultStatus, -} - -fn serialize_region(region: &Region, serializer: S) -> std::result::Result -where - S: serde::Serializer, -{ - serializer.serialize_str(region.to_string().as_str()) -} - -impl SsmValidationResult { - pub(crate) fn new( - name: String, - expected_value: Option, - actual_value: Result>, - region: Region, - ) -> SsmValidationResult { - // Determine the validation status based on equality, presence, and absence of expected and - // actual parameter values - let status = match (&expected_value, &actual_value) { - (Some(expected_value), Ok(Some(actual_value))) if actual_value.eq(expected_value) => { - SsmValidationResultStatus::Correct - } - (Some(_), Ok(Some(_))) => SsmValidationResultStatus::Incorrect, - (_, Ok(None)) => SsmValidationResultStatus::Missing, - (None, Ok(_)) => SsmValidationResultStatus::Unexpected, - (_, Err(_)) => SsmValidationResultStatus::Unreachable, - }; - SsmValidationResult { - name, - expected_value, - actual_value: actual_value.unwrap_or_default(), - region, - status, - } - } -} - -#[derive(Tabled, Serialize)] -struct SsmValidationRegionSummary { - correct: u64, - incorrect: u64, - missing: u64, - unexpected: u64, - unreachable: u64, -} - -impl From<&HashSet> for SsmValidationRegionSummary { - fn from(results: &HashSet) -> Self { - let mut region_validation = SsmValidationRegionSummary { - correct: 0, - incorrect: 0, - missing: 0, - unexpected: 0, - unreachable: 0, - }; - for validation_result in results { - match validation_result.status { - SsmValidationResultStatus::Correct => region_validation.correct += 1, - SsmValidationResultStatus::Incorrect => region_validation.incorrect += 1, - SsmValidationResultStatus::Missing => region_validation.missing += 1, - SsmValidationResultStatus::Unexpected => region_validation.unexpected += 1, - SsmValidationResultStatus::Unreachable => region_validation.unreachable += 1, - } - } - region_validation - } -} - -/// Represents all SSM validation results -#[derive(Debug)] -pub struct SsmValidationResults { - pub(crate) results: HashMap>, -} - -impl Default for SsmValidationResults { - fn default() -> Self { - Self::new(HashMap::new()) - } -} - -impl Display for SsmValidationResults { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Create a summary for each region, counting the number of parameters per status - let region_validations: HashMap = - self.get_results_summary(); - - // Represent the HashMap of summaries as a `Table` - let table = Table::new( - region_validations - .iter() - .map(|(region, results)| (region.to_string(), results)) - .collect::>(), - ) - .to_string(); - write!(f, "{}", table) - } -} - -impl SsmValidationResults { - pub fn new(results: HashMap>) -> Self { - SsmValidationResults { results } - } - - /// Returns a HashSet containing all validation results whose status is present in - /// `requested_status` - pub fn get_results_for_status( - &self, - requested_status: &[SsmValidationResultStatus], - ) -> HashSet<&SsmValidationResult> { - let mut results = HashSet::new(); - for region_results in self.results.values() { - results.extend( - region_results - .iter() - .filter(|result| requested_status.contains(&result.status)) - .collect::>(), - ) - } - results - } - - /// Returns a `HashSet` containing all validation results - pub(crate) fn get_all_results(&self) -> HashSet<&SsmValidationResult> { - let mut results = HashSet::new(); - for region_results in self.results.values() { - results.extend(region_results) - } - results - } - - fn get_results_summary(&self) -> HashMap { - self.results - .iter() - .map(|(region, region_result)| { - ( - region.clone(), - SsmValidationRegionSummary::from(region_result), - ) - }) - .collect() - } - - pub(crate) fn get_json_summary(&self) -> serde_json::Value { - serde_json::json!(self - .get_results_summary() - .into_iter() - .map(|(region, results)| (region.to_string(), results)) - .collect::>()) - } -} - -#[cfg(test)] -mod test { - use std::collections::{HashMap, HashSet}; - - use crate::aws::validate_ssm::results::{ - SsmValidationResult, SsmValidationResultStatus, SsmValidationResults, - }; - use aws_sdk_ssm::config::Region; - - // These tests assert that the `get_results_for_status` function returns the correct values. - - // Tests empty SsmValidationResults - #[test] - fn get_results_for_status_empty() { - let results = SsmValidationResults::new(HashMap::from([ - (Region::new("us-west-2"), HashSet::from([])), - (Region::new("us-east-1"), HashSet::from([])), - ])); - let results_filtered = results.get_results_for_status(&[ - SsmValidationResultStatus::Correct, - SsmValidationResultStatus::Incorrect, - SsmValidationResultStatus::Missing, - SsmValidationResultStatus::Unexpected, - ]); - - assert_eq!(results_filtered, HashSet::new()); - } - - // Tests the `Correct` status - #[test] - fn get_results_for_status_correct() { - let results = SsmValidationResults::new(HashMap::from([ - ( - Region::new("us-west-2"), - HashSet::from([ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-west-2"), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashSet::from([ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-east-1"), - ), - ]), - ), - ])); - let results_filtered = - results.get_results_for_status(&[SsmValidationResultStatus::Correct]); - - assert_eq!( - results_filtered, - HashSet::from([ - &SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - &SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-east-1"), - ) - ]) - ); - } - - // Tests a filter containing the `Correct` and `Incorrect` statuses - #[test] - fn get_results_for_status_correct_incorrect() { - let results = SsmValidationResults::new(HashMap::from([ - ( - Region::new("us-west-2"), - HashSet::from([ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-west-2"), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashSet::from([ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-east-1"), - ), - ]), - ), - ])); - let results_filtered = results.get_results_for_status(&[ - SsmValidationResultStatus::Correct, - SsmValidationResultStatus::Incorrect, - ]); - - assert_eq!( - results_filtered, - HashSet::from([ - &SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - &SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-east-1"), - ), - &SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - &SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-east-1"), - ) - ]) - ); - } - - // Tests a filter containing all statuses - #[test] - fn get_results_for_status_all() { - let results = SsmValidationResults::new(HashMap::from([ - ( - Region::new("us-west-2"), - HashSet::from([ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-west-2"), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashSet::from([ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-east-1"), - ), - ]), - ), - ( - Region::new("us-east-2"), - HashSet::from([SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Err(crate::aws::validate_ssm::Error::UnreachableRegion { - region: "us-east-2".to_string(), - }), - Region::new("us-east-2"), - )]), - ), - ])); - let results_filtered = results.get_results_for_status(&[ - SsmValidationResultStatus::Correct, - SsmValidationResultStatus::Incorrect, - SsmValidationResultStatus::Missing, - SsmValidationResultStatus::Unexpected, - SsmValidationResultStatus::Unreachable, - ]); - - assert_eq!( - results_filtered, - HashSet::from([ - &SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - &SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-east-1"), - ), - &SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - &SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-east-1"), - ), - &SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-west-2"), - ), - &SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-west-2"), - ), - &SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-east-1"), - ), - &SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-east-1"), - ), - &SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Err(crate::aws::validate_ssm::Error::UnreachableRegion { - region: "us-east-2".to_string() - }), - Region::new("us-east-2"), - ), - ]) - ); - } - - // Tests the `Missing` filter when none of the SsmValidationResults have this status - #[test] - fn get_results_for_status_missing_none() { - let results = SsmValidationResults::new(HashMap::from([ - ( - Region::new("us-west-2"), - HashSet::from([ - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-west-2"), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashSet::from([ - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-east-1"), - ), - ]), - ), - ])); - let results_filtered = - results.get_results_for_status(&[SsmValidationResultStatus::Missing]); - - assert_eq!(results_filtered, HashSet::new()); - } -} diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs deleted file mode 100644 index 520ef4e5a58..00000000000 --- a/tools/pubsys/src/main.rs +++ /dev/null @@ -1,265 +0,0 @@ -/*! -`pubsys` simplifies the process of publishing Bottlerocket updates. - -Currently implemented: -* building repos, whether starting from an existing repo or from scratch -* validating repos by loading them and retrieving their targets -* checking for repository metadata expirations within specified number of days -* refreshing and re-signing repos' non-root metadata files -* registering and copying EC2 AMIs -* Marking EC2 AMIs public (or private again) -* setting SSM parameters based on built AMIs -* promoting SSM parameters from versioned entries to named (e.g. 'latest') -* validating SSM parameters by comparing the returned parameters in a region to a given list of parameters - -To be implemented: -* high-level document describing pubsys usage with examples - -Configuration comes from: -* command-line parameters, to specify basic options and paths to the below files -* Infra.toml, for repo and AMI configuration -* Release.toml, for migrations -* Policy files for repo metadata expiration and update wave timing -*/ - -mod aws; -mod repo; -mod vmware; - -use clap::Parser; -use semver::Version; -use simplelog::{CombinedLogger, Config as LogConfig, ConfigBuilder, LevelFilter, SimpleLogger}; -use snafu::ResultExt; -use std::path::PathBuf; -use std::process; -use tokio::runtime::Runtime; - -fn run() -> Result<()> { - // Parse and store the args passed to the program - let args = Args::parse(); - - // SimpleLogger will send errors to stderr and anything less to stdout. - // To reduce verbosity of messages related to the AWS SDK for Rust we need - // to spin up two loggers, setting different levels for each. This allows - // us to retain the mixed logging of stdout/stderr in simplelog. - match args.log_level { - LevelFilter::Info => { - CombinedLogger::init(vec![ - SimpleLogger::new( - LevelFilter::Info, - ConfigBuilder::new() - .add_filter_ignore_str("aws_config") - .add_filter_ignore_str("aws_credential_types") - .add_filter_ignore_str("aws_smithy") - .add_filter_ignore_str("tracing::span") - .build(), - ), - SimpleLogger::new( - LevelFilter::Warn, - ConfigBuilder::new() - .add_filter_allow_str("aws_config") - .add_filter_allow_str("aws_credential_types") - .add_filter_allow_str("aws_smithy") - .add_filter_allow_str("tracing::span") - .build(), - ), - ]) - .context(error::LoggerSnafu)?; - } - _ => { - SimpleLogger::init(args.log_level, LogConfig::default()).context(error::LoggerSnafu)? - } - } - - match args.subcommand { - SubCommands::Repo(ref repo_args) => repo::run(&args, repo_args).context(error::RepoSnafu), - SubCommands::ValidateRepo(ref validate_repo_args) => { - repo::validate_repo::run(&args, validate_repo_args).context(error::ValidateRepoSnafu) - } - SubCommands::CheckRepoExpirations(ref check_expirations_args) => { - repo::check_expirations::run(&args, check_expirations_args) - .context(error::CheckExpirationsSnafu) - } - SubCommands::RefreshRepo(ref refresh_repo_args) => { - repo::refresh_repo::run(&args, refresh_repo_args).context(error::RefreshRepoSnafu) - } - SubCommands::Ami(ref ami_args) => { - let rt = Runtime::new().context(error::RuntimeSnafu)?; - rt.block_on(async { - aws::ami::run(&args, ami_args) - .await - .context(error::AmiSnafu) - }) - } - SubCommands::PublishAmi(ref publish_args) => { - let rt = Runtime::new().context(error::RuntimeSnafu)?; - rt.block_on(async { - aws::publish_ami::run(&args, publish_args) - .await - .context(error::PublishAmiSnafu) - }) - } - SubCommands::Ssm(ref ssm_args) => { - let rt = Runtime::new().context(error::RuntimeSnafu)?; - rt.block_on(async { - aws::ssm::run(&args, ssm_args) - .await - .context(error::SsmSnafu) - }) - } - SubCommands::PromoteSsm(ref promote_args) => { - let rt = Runtime::new().context(error::RuntimeSnafu)?; - rt.block_on(async { - aws::promote_ssm::run(&args, promote_args) - .await - .context(error::PromoteSsmSnafu) - }) - } - SubCommands::ValidateSsm(ref validate_ssm_args) => { - let rt = Runtime::new().context(error::RuntimeSnafu)?; - rt.block_on(async { - aws::validate_ssm::run(&args, validate_ssm_args) - .await - .context(error::ValidateSsmSnafu) - }) - } - SubCommands::ValidateAmi(ref validate_ami_args) => { - let rt = Runtime::new().context(error::RuntimeSnafu)?; - rt.block_on(async { - aws::validate_ami::run(&args, validate_ami_args) - .await - .context(error::ValidateAmiSnafu) - }) - } - SubCommands::UploadOva(ref upload_args) => { - vmware::upload_ova::run(&args, upload_args).context(error::UploadOvaSnafu) - } - } -} - -fn main() { - if let Err(e) = run() { - eprintln!("{}", e); - process::exit(1); - } -} - -/// Automates publishing of Bottlerocket updates -#[derive(Debug, Parser)] -pub struct Args { - #[arg(global = true, long, default_value = "INFO")] - /// How much detail to log; from least to most: ERROR, WARN, INFO, DEBUG, TRACE - log_level: LevelFilter, - - #[arg(long)] - /// Path to Infra.toml (NOTE: must be specified before subcommand) - infra_config_path: PathBuf, - - #[command(subcommand)] - subcommand: SubCommands, -} - -#[derive(Debug, Parser)] -enum SubCommands { - Repo(repo::RepoArgs), - ValidateRepo(repo::validate_repo::ValidateRepoArgs), - CheckRepoExpirations(repo::check_expirations::CheckExpirationsArgs), - RefreshRepo(repo::refresh_repo::RefreshRepoArgs), - - Ami(aws::ami::AmiArgs), - PublishAmi(aws::publish_ami::Who), - ValidateAmi(aws::validate_ami::ValidateAmiArgs), - - Ssm(aws::ssm::SsmArgs), - PromoteSsm(aws::promote_ssm::PromoteArgs), - ValidateSsm(aws::validate_ssm::ValidateSsmArgs), - - UploadOva(vmware::upload_ova::UploadArgs), -} - -/// Parses a SemVer, stripping a leading 'v' if present -pub(crate) fn friendly_version( - mut version_str: &str, -) -> std::result::Result { - if version_str.starts_with('v') { - version_str = &version_str[1..]; - }; - - Version::parse(version_str) -} - -mod error { - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(super) enum Error { - #[snafu(display("Failed to build AMI: {}", source))] - Ami { source: crate::aws::ami::Error }, - - #[snafu(display("Logger setup error: {}", source))] - Logger { source: log::SetLoggerError }, - - #[snafu(display( - "Error during publish-ami command: {}: {}", - publish_ami_message(source), - source - ))] - PublishAmi { - source: crate::aws::publish_ami::Error, - }, - - #[snafu(display("Failed to promote SSM: {}", source))] - PromoteSsm { - source: crate::aws::promote_ssm::Error, - }, - - #[snafu(display("Failed to build repo: {}", source))] - Repo { source: crate::repo::Error }, - - #[snafu(display("Failed to validate repository: {}", source))] - ValidateRepo { - source: crate::repo::validate_repo::Error, - }, - - #[snafu(display("Check expirations error: {}", source))] - CheckExpirations { - source: crate::repo::check_expirations::Error, - }, - - #[snafu(display("Failed to refresh repository metadata: {}", source))] - RefreshRepo { - source: crate::repo::refresh_repo::Error, - }, - - #[snafu(display("Failed to create async runtime: {}", source))] - Runtime { source: std::io::Error }, - - #[snafu(display("Failed to update SSM: {}", source))] - Ssm { source: crate::aws::ssm::Error }, - - #[snafu(display("Failed to upload OVA: {}", source))] - UploadOva { - source: crate::vmware::upload_ova::Error, - }, - - #[snafu(display("Failed to validate SSM parameters: {}", source))] - ValidateSsm { - source: crate::aws::validate_ssm::Error, - }, - - #[snafu(display("Failed to validate EC2 images: {}", source))] - ValidateAmi { - source: crate::aws::validate_ami::Error, - }, - } - - fn publish_ami_message(error: &crate::aws::publish_ami::Error) -> String { - match error.amis_affected() { - 0 => String::from("No AMI permissions were updated"), - 1 => String::from("Permissions for 1 AMI were updated, the rest failed"), - n => format!("Permissions for {} AMIs were updated, the rest failed", n), - } - } -} -type Result = std::result::Result; diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs deleted file mode 100644 index 97b37d841dd..00000000000 --- a/tools/pubsys/src/repo.rs +++ /dev/null @@ -1,808 +0,0 @@ -//! The repo module owns the 'repo' subcommand and controls the process of building a repository. - -pub(crate) mod check_expirations; -pub(crate) mod refresh_repo; -pub(crate) mod validate_repo; - -use crate::{friendly_version, Args}; -use aws_sdk_kms::{config::Region, Client as KmsClient}; -use chrono::{DateTime, Utc}; -use clap::Parser; -use lazy_static::lazy_static; -use log::{debug, info, trace, warn}; -use parse_datetime::parse_datetime; -use pubsys_config::{ - InfraConfig, KMSKeyConfig, RepoConfig, RepoExpirationPolicy, SigningKeyConfig, -}; -use semver::Version; -use snafu::{ensure, OptionExt, ResultExt}; -use std::convert::TryInto; -use std::fs::{self, File}; -use std::num::NonZeroU64; -use std::path::{Path, PathBuf}; -use tempfile::NamedTempFile; -use tokio::runtime::Runtime; -use tough::{ - editor::signed::PathExists, - editor::RepositoryEditor, - key_source::{KeySource, LocalKeySource}, - schema::Target, - RepositoryLoader, TransportErrorKind, -}; -use tough_kms::{KmsKeySource, KmsSigningAlgorithm}; -use tough_ssm::SsmKeySource; -use update_metadata::{Images, Manifest, Release, UpdateWaves}; -use url::Url; - -lazy_static! { - static ref DEFAULT_START_TIME: DateTime = Utc::now(); -} - -/// Builds Bottlerocket repos using latest build artifacts -#[derive(Debug, Parser)] -pub(crate) struct RepoArgs { - // Metadata about the update - #[arg(long)] - /// Use this named repo infrastructure from Infra.toml - repo: String, - #[arg(long)] - /// The architecture of the repo and the update being added - arch: String, - #[arg(long, value_parser = friendly_version)] - /// The version of the update being added - version: Version, - #[arg(long)] - /// The variant of the update being added - variant: String, - - // The images to add in this update - #[arg(long)] - /// Path to the image containing the boot partition - boot_image: PathBuf, - #[arg(long)] - /// Path to the image containing the root partition - root_image: PathBuf, - #[arg(long)] - /// Path to the image containing the verity hashes - hash_image: PathBuf, - - // Optionally add other files to the repo - #[arg(long = "link-target")] - /// Optional paths to add as targets and symlink into repo - link_targets: Vec, - #[arg(long = "copy-target")] - /// Optional paths to add as targets and copy into repo - copy_targets: Vec, - - // Policies that pubsys interprets to set repo parameters - #[arg(long)] - /// Path to file that defines when repo metadata should expire - repo_expiration_policy_path: PathBuf, - - // Configuration that pubsys passes on to other tools - #[arg(long)] - /// Path to Release.toml - release_config_path: PathBuf, - #[arg(long)] - /// Path to file that defines when this update will become available - wave_policy_path: PathBuf, - #[arg(long)] - /// Path to root.json for this repo - root_role_path: PathBuf, - #[arg(long)] - /// If we generated a local key, we'll find it here; used if Infra.toml has no key defined - default_key_path: PathBuf, - - #[arg(long, value_parser = parse_datetime)] - /// When the waves and expiration timer will start; RFC3339 date or "in X hours/days/weeks" - release_start_time: Option>, - - #[arg(long)] - /// Where to store the created repo - outdir: PathBuf, -} - -/// Adds update, migrations, and waves to the Manifest -fn update_manifest(repo_args: &RepoArgs, manifest: &mut Manifest) -> Result<()> { - // Add update =^..^= =^..^= =^..^= =^..^= - - let filename = |path: &PathBuf| -> Result { - Ok(path - .file_name() - .context(error::InvalidImagePathSnafu { path })? - .to_str() - .context(error::NonUtf8PathSnafu { path })? - .to_string()) - }; - - let images = Images { - boot: filename(&repo_args.boot_image)?, - root: filename(&repo_args.root_image)?, - hash: filename(&repo_args.hash_image)?, - }; - - info!( - "Adding update to manifest for version: {}, arch: {}, variant: {}", - repo_args.version, repo_args.arch, repo_args.variant - ); - manifest - .add_update( - repo_args.version.clone(), - None, - repo_args.arch.clone(), - repo_args.variant.clone(), - images, - ) - .context(error::AddUpdateSnafu)?; - - // Add migrations =^..^= =^..^= =^..^= =^..^= - - info!( - "Using release config from path: {}", - repo_args.release_config_path.display() - ); - let release = Release::from_path(&repo_args.release_config_path).context( - error::UpdateMetadataReadSnafu { - path: &repo_args.release_config_path, - }, - )?; - trace!( - "Adding migrations to manifest for versions: {:#?}", - release - .migrations - .keys() - .map(|(from, to)| format!("({}, {})", from, to)) - .collect::>() - ); - // Replace the manifest 'migrations' section with the new data - manifest.migrations = release.migrations; - - // Add update waves =^..^= =^..^= =^..^= =^..^= - - let wave_start_time = repo_args.release_start_time.unwrap_or(*DEFAULT_START_TIME); - info!( - "Using wave policy from path: {}", - repo_args.wave_policy_path.display() - ); - info!( - "Offsets from that file will be added to the release start time of: {}", - wave_start_time - ); - let waves = UpdateWaves::from_path(&repo_args.wave_policy_path).context( - error::UpdateMetadataReadSnafu { - path: &repo_args.wave_policy_path, - }, - )?; - manifest - .set_waves( - repo_args.variant.clone(), - repo_args.arch.clone(), - repo_args.version.clone(), - wave_start_time, - &waves, - ) - .context(error::SetWavesSnafu { - wave_policy_path: &repo_args.wave_policy_path, - })?; - - Ok(()) -} - -/// Set expirations of all non-root role metadata based on a given `RepoExpirationPolicy` and an -/// expiration start time -fn set_expirations( - editor: &mut RepositoryEditor, - expiration_policy: &RepoExpirationPolicy, - expiration_start_time: DateTime, -) -> Result<()> { - let snapshot_expiration = expiration_start_time + expiration_policy.snapshot_expiration; - let targets_expiration = expiration_start_time + expiration_policy.targets_expiration; - let timestamp_expiration = expiration_start_time + expiration_policy.timestamp_expiration; - info!( - "Setting non-root metadata expiration times:\n\tsnapshot: {}\n\ttargets: {}\n\ttimestamp: {}", - snapshot_expiration, targets_expiration, timestamp_expiration - ); - editor - .snapshot_expires(snapshot_expiration) - .targets_expires(targets_expiration) - .context(error::SetTargetsExpirationSnafu { - expiration: targets_expiration, - })? - .timestamp_expires(timestamp_expiration); - - Ok(()) -} - -/// Set versions of all role metadata; the version will be the UNIX timestamp of the current time. -fn set_versions(editor: &mut RepositoryEditor) -> Result<()> { - let seconds = Utc::now().timestamp(); - let unsigned_seconds = seconds.try_into().expect("System clock before 1970??"); - let version = NonZeroU64::new(unsigned_seconds).expect("System clock exactly 1970??"); - debug!("Repo version: {}", version); - editor - .snapshot_version(version) - .targets_version(version) - .context(error::SetTargetsVersionSnafu { version })? - .timestamp_version(version); - - Ok(()) -} - -/// Adds targets, expirations, and version to the RepositoryEditor -fn update_editor<'a, P>( - repo_args: &'a RepoArgs, - editor: &mut RepositoryEditor, - targets: impl Iterator, - manifest_path: P, -) -> Result<()> -where - P: AsRef, -{ - // Add targets =^..^= =^..^= =^..^= =^..^= - - for target_path in targets { - debug!("Adding target from path: {}", target_path.display()); - editor - .add_target_path(target_path) - .context(error::AddTargetSnafu { path: &target_path })?; - } - - let manifest_target = Target::from_path(&manifest_path).context(error::BuildTargetSnafu { - path: manifest_path.as_ref(), - })?; - debug!("Adding target for manifest.json"); - editor - .add_target("manifest.json", manifest_target) - .context(error::AddTargetSnafu { - path: "manifest.json", - })?; - - // Add expirations =^..^= =^..^= =^..^= =^..^= - - info!( - "Using repo expiration policy from path: {}", - repo_args.repo_expiration_policy_path.display() - ); - let expiration = RepoExpirationPolicy::from_path(&repo_args.repo_expiration_policy_path) - .context(error::ConfigSnafu)?; - - let expiration_start_time = repo_args.release_start_time.unwrap_or(*DEFAULT_START_TIME); - let snapshot_expiration = expiration_start_time + expiration.snapshot_expiration; - let targets_expiration = expiration_start_time + expiration.targets_expiration; - let timestamp_expiration = expiration_start_time + expiration.timestamp_expiration; - info!( - "Repo expiration times:\n\tsnapshot: {}\n\ttargets: {}\n\ttimestamp: {}", - snapshot_expiration, targets_expiration, timestamp_expiration - ); - editor - .snapshot_expires(snapshot_expiration) - .targets_expires(targets_expiration) - .context(error::SetTargetsExpirationSnafu { - expiration: targets_expiration, - })? - .timestamp_expires(timestamp_expiration); - - // Add version =^..^= =^..^= =^..^= =^..^= - - let seconds = Utc::now().timestamp(); - let unsigned_seconds = seconds.try_into().expect("System clock before 1970??"); - let version = NonZeroU64::new(unsigned_seconds).expect("System clock exactly 1970??"); - debug!("Repo version: {}", version); - editor - .snapshot_version(version) - .targets_version(version) - .context(error::SetTargetsVersionSnafu { version })? - .timestamp_version(version); - - Ok(()) -} - -/// If the infra config has a repo section defined for the given repo, and it has metadata base and -/// targets URLs defined, returns those URLs, otherwise None. -fn repo_urls<'a>( - repo_config: &'a RepoConfig, - variant: &str, - arch: &str, -) -> Result> { - // Check if both URLs are set - if let Some(metadata_base_url) = repo_config.metadata_base_url.as_ref() { - if let Some(targets_url) = repo_config.targets_url.as_ref() { - let base_slash = if metadata_base_url.as_str().ends_with('/') { - "" - } else { - "/" - }; - let metadata_url_str = - format!("{}{}{}/{}", metadata_base_url, base_slash, variant, arch); - let metadata_url = Url::parse(&metadata_url_str).context(error::ParseUrlSnafu { - input: &metadata_url_str, - })?; - - debug!("Using metadata url: {}", metadata_url); - return Ok(Some((metadata_url, targets_url))); - } - } - - Ok(None) -} - -/// Builds an editor and manifest; will start from an existing repo if one is specified in the -/// configuration. Returns Err if we fail to read from the repo. Returns Ok(None) if we detect -/// that the repo does not exist. -fn load_editor_and_manifest<'a, P>( - root_role_path: P, - metadata_url: &'a Url, - targets_url: &'a Url, -) -> Result> -where - P: AsRef, -{ - let root_role_path = root_role_path.as_ref(); - - // Try to load the repo... - let repo_load_result = RepositoryLoader::new( - File::open(root_role_path).context(error::FileSnafu { - path: root_role_path, - })?, - metadata_url.clone(), - targets_url.clone(), - ) - .load(); - - match repo_load_result { - // If we load it successfully, build an editor and manifest from it. - Ok(repo) => { - let target = "manifest.json"; - let target = target - .try_into() - .context(error::ParseTargetNameSnafu { target })?; - let reader = repo - .read_target(&target) - .context(error::ReadTargetSnafu { - target: target.raw(), - })? - .with_context(|| error::NoManifestSnafu { - metadata_url: metadata_url.clone(), - })?; - let manifest = serde_json::from_reader(reader).context(error::InvalidJsonSnafu { - path: "manifest.json", - })?; - - let editor = RepositoryEditor::from_repo(root_role_path, repo) - .context(error::EditorFromRepoSnafu)?; - - Ok(Some((editor, manifest))) - } - // If we fail to load, but we only failed because the repo doesn't exist yet, then start - // fresh by signalling that there is no known repo. Otherwise, fail hard. - Err(e) => { - if is_file_not_found_error(&e) { - Ok(None) - } else { - Err(e).with_context(|_| error::RepoLoadSnafu { - metadata_base_url: metadata_url.clone(), - }) - } - } - } -} - -/// Inspects the `tough` error to see if it is a `Transport` error, and if so, is it `FileNotFound`. -fn is_file_not_found_error(e: &tough::error::Error) -> bool { - if let tough::error::Error::Transport { source, .. } = e { - matches!(source.kind(), TransportErrorKind::FileNotFound) - } else { - false - } -} - -/// Gets the corresponding `KeySource` according to the signing key config from Infra.toml -fn get_signing_key_source(signing_key_config: &SigningKeyConfig) -> Result> { - match signing_key_config { - SigningKeyConfig::file { path } => Ok(Box::new(LocalKeySource { path: path.clone() })), - SigningKeyConfig::kms { key_id, config, .. } => Ok(Box::new(KmsKeySource { - profile: None, - key_id: key_id - .clone() - .context(error::MissingConfigSnafu { missing: "key_id" })?, - client: { - let key_id_val = key_id - .clone() - .context(error::MissingConfigSnafu { missing: "key_id" })?; - match config.as_ref() { - Some(config_val) => get_client(config_val, &key_id_val)?, - None => None, - } - }, - signing_algorithm: KmsSigningAlgorithm::RsassaPssSha256, - })), - SigningKeyConfig::ssm { parameter } => Ok(Box::new(SsmKeySource { - profile: None, - parameter_name: parameter.clone(), - key_id: None, - })), - } -} - -/// Helper function that generates a KmsClient or None given config containing available keys -fn get_client(kmskey_config: &KMSKeyConfig, key_id: &str) -> Result> { - if let Some(region) = kmskey_config.available_keys.get(key_id) { - let rt = Runtime::new().context(error::RuntimeSnafu)?; - Ok(Some(rt.block_on(async { async_get_client(region).await }))) - } else { - Ok(None) - } -} - -/// Helper function that generates a KmsClient given region -async fn async_get_client(region: &str) -> KmsClient { - let client_config = aws_config::from_env() - .region(Region::new(region.to_string())) - .load() - .await; - KmsClient::new(&client_config) -} - -/// Common entrypoint from main() -pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { - let metadata_out_dir = repo_args - .outdir - .join(&repo_args.variant) - .join(&repo_args.arch); - let targets_out_dir = repo_args.outdir.join("targets"); - - // If the given metadata directory exists, throw an error. We don't want to overwrite a user's - // existing repository. (The targets directory is shared, so it's fine if that exists.) - ensure!( - !Path::exists(&metadata_out_dir), - error::RepoExistsSnafu { - path: metadata_out_dir - } - ); - - // Build repo =^..^= =^..^= =^..^= =^..^= - - // If a lock file exists, use that, otherwise use Infra.toml or default - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, true) - .context(error::ConfigSnafu)?; - trace!("Using infra config: {:?}", infra_config); - - // If the user has the requested (or "default") repo defined in their Infra.toml, use it, - // otherwise use a default config. - let default_repo_config = RepoConfig::default(); - let repo_config = if let Some(repo_config) = infra_config - .repo - .as_ref() - .and_then(|repo_section| repo_section.get(&repo_args.repo)) - .map(|repo| { - info!("Using repo '{}' from Infra.toml", repo_args.repo); - repo - }) { - repo_config - } else { - info!( - "Didn't find repo '{}' in Infra.toml, using default configuration", - repo_args.repo - ); - &default_repo_config - }; - - // Build a repo editor and manifest, from an existing repo if available, otherwise fresh - let maybe_urls = repo_urls(repo_config, &repo_args.variant, &repo_args.arch)?; - let (mut editor, mut manifest) = if let Some((metadata_url, targets_url)) = maybe_urls.as_ref() - { - info!("Found metadata and target URLs, loading existing repository"); - match load_editor_and_manifest(&repo_args.root_role_path, metadata_url, targets_url)? { - Some((editor, manifest)) => (editor, manifest), - None => { - warn!( - "Did not find repo at '{}', starting a new one", - metadata_url - ); - ( - RepositoryEditor::new(&repo_args.root_role_path) - .context(error::NewEditorSnafu)?, - Manifest::default(), - ) - } - } - } else { - info!("Did not find metadata and target URLs in infra config, creating a new repository"); - ( - RepositoryEditor::new(&repo_args.root_role_path).context(error::NewEditorSnafu)?, - Manifest::default(), - ) - }; - - // Add update information to manifest - update_manifest(repo_args, &mut manifest)?; - // Write manifest to tempfile so it can be copied in as target later - let manifest_path = NamedTempFile::new() - .context(error::TempFileSnafu)? - .into_temp_path(); - update_metadata::write_file(&manifest_path, &manifest).context(error::ManifestWriteSnafu { - path: &manifest_path, - })?; - - // Add manifest and targets to editor - let copy_targets = &repo_args.copy_targets; - let link_targets = repo_args.link_targets.iter().chain(vec![ - &repo_args.boot_image, - &repo_args.root_image, - &repo_args.hash_image, - ]); - let all_targets = copy_targets.iter().chain(link_targets.clone()); - - update_editor(repo_args, &mut editor, all_targets, &manifest_path)?; - - // Sign repo =^..^= =^..^= =^..^= =^..^= - - // Check if we have a signing key defined in Infra.toml; if not, we'll fall back to the - // generated local key. - let signing_key_config = repo_config.signing_keys.as_ref(); - - let key_source = if let Some(signing_key_config) = signing_key_config { - get_signing_key_source(signing_key_config)? - } else { - ensure!( - repo_args.default_key_path.exists(), - error::MissingConfigSnafu { - missing: "signing_keys in repo config, and we found no local key", - } - ); - Box::new(LocalKeySource { - path: repo_args.default_key_path.clone(), - }) - }; - - let signed_repo = editor.sign(&[key_source]).context(error::RepoSignSnafu)?; - - // Write repo =^..^= =^..^= =^..^= =^..^= - - // Write targets first so we don't have invalid metadata if targets fail - info!("Writing repo targets to: {}", targets_out_dir.display()); - fs::create_dir_all(&targets_out_dir).context(error::CreateDirSnafu { - path: &targets_out_dir, - })?; - - // Copy manifest with proper name instead of tempfile name - debug!("Copying manifest.json into {}", targets_out_dir.display()); - let target = "manifest.json"; - let target = target - .try_into() - .context(error::ParseTargetNameSnafu { target })?; - signed_repo - .copy_target( - &manifest_path, - &targets_out_dir, - // We should never have matching manifests from different repos - PathExists::Fail, - Some(&target), - ) - .context(error::CopyTargetSnafu { - target: &manifest_path, - path: &targets_out_dir, - })?; - - // Copy / link any other user requested targets - for copy_target in copy_targets { - debug!( - "Copying target '{}' into {}", - copy_target.display(), - targets_out_dir.display() - ); - signed_repo - .copy_target(copy_target, &targets_out_dir, PathExists::Skip, None) - .context(error::CopyTargetSnafu { - target: copy_target, - path: &targets_out_dir, - })?; - } - for link_target in link_targets { - debug!( - "Linking target '{}' into {}", - link_target.display(), - targets_out_dir.display() - ); - signed_repo - .link_target(link_target, &targets_out_dir, PathExists::Skip, None) - .context(error::LinkTargetSnafu { - target: link_target, - path: &targets_out_dir, - })?; - } - - info!("Writing repo metadata to: {}", metadata_out_dir.display()); - fs::create_dir_all(&metadata_out_dir).context(error::CreateDirSnafu { - path: &metadata_out_dir, - })?; - signed_repo - .write(&metadata_out_dir) - .context(error::RepoWriteSnafu { - path: &repo_args.outdir, - })?; - - Ok(()) -} - -mod error { - use chrono::{DateTime, Utc}; - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - use url::Url; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Failed to add new update to manifest: {}", source))] - AddUpdate { - source: update_metadata::error::Error, - }, - - #[snafu(display("Failed to add new target '{}' to repo: {}", path.display(), source))] - AddTarget { - path: PathBuf, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to build target metadata from path '{}': {}", path.display(), source))] - BuildTarget { - path: PathBuf, - #[snafu(source(from(tough::schema::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to copy target '{}' to '{}': {}", target.display(), path.display(), source))] - CopyTarget { - target: PathBuf, - path: PathBuf, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Error reading config: {}", source))] - Config { source: pubsys_config::Error }, - - #[snafu(display("Failed to create directory '{}': {}", path.display(), source))] - CreateDir { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to create repo editor from given repo: {}", source))] - EditorFromRepo { - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to read '{}': {}", path.display(), source))] - File { path: PathBuf, source: io::Error }, - - #[snafu(display("Invalid path given for image file: '{}'", path.display()))] - InvalidImagePath { path: PathBuf }, - - #[snafu(display("Invalid config file at '{}': {}", path.display(), source))] - InvalidJson { - path: PathBuf, - source: serde_json::Error, - }, - - #[snafu(display("Failed to symlink target '{}' to '{}': {}", target.display(), path.display(), source))] - LinkTarget { - target: PathBuf, - path: PathBuf, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to write Manifest to '{}': {}", path.display(), source))] - ManifestWrite { - path: PathBuf, - source: update_metadata::error::Error, - }, - - #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { missing: String }, - - #[snafu(display("Repo URLs not specified for repo '{}'", repo))] - MissingRepoUrls { repo: String }, - - #[snafu(display("Failed to create new repo editor: {}", source))] - NewEditor { - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Repo does not have a manifest.json: {}", metadata_url))] - NoManifest { metadata_url: Url }, - - #[snafu(display("Non-UTF8 path '{}' not supported", path.display()))] - NonUtf8Path { path: PathBuf }, - - #[snafu(display("Invalid URL '{}': {}", input, source))] - ParseUrl { - input: String, - source: url::ParseError, - }, - - #[snafu(display("Failed to read target '{}' from repo: {}", target, source))] - ReadTarget { - target: String, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to create async runtime: {}", source))] - Runtime { source: std::io::Error }, - - #[snafu(display("Failed to parse target name from string '{}': {}", target, source))] - ParseTargetName { - target: String, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Repo exists at '{}' - remove it and try again", path.display()))] - RepoExists { path: PathBuf }, - - #[snafu(display("Could not fetch repo at '{}': {}", url, msg))] - RepoFetch { url: Url, msg: String }, - - #[snafu(display( - "Failed to load repo from metadata URL '{}': {}", - metadata_base_url, - source - ))] - RepoLoad { - metadata_base_url: Url, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Requested repository does not exist: '{}'", url))] - RepoNotFound { url: Url }, - - #[snafu(display("Failed to sign repository: {}", source))] - RepoSign { - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to write repository to {}: {}", path.display(), source))] - RepoWrite { - path: PathBuf, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to set targets expiration to {}: {}", expiration, source))] - SetTargetsExpiration { - expiration: DateTime, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to set targets version to {}: {}", version, source))] - SetTargetsVersion { - version: u64, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to set waves from '{}': {}", wave_policy_path.display(), source))] - SetWaves { - wave_policy_path: PathBuf, - source: update_metadata::error::Error, - }, - - #[snafu(display("Failed to create temporary file: {}", source))] - TempFile { source: io::Error }, - - #[snafu(display("Failed to read update metadata '{}': {}", path.display(), source))] - UpdateMetadataRead { - path: PathBuf, - source: update_metadata::error::Error, - }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/repo/check_expirations/mod.rs b/tools/pubsys/src/repo/check_expirations/mod.rs deleted file mode 100644 index bebbcd25752..00000000000 --- a/tools/pubsys/src/repo/check_expirations/mod.rs +++ /dev/null @@ -1,184 +0,0 @@ -//! The check_expirations module owns the 'check-repo-expirations' subcommand and provide methods for -//! checking the metadata expirations of a given TUF repository. - -use crate::repo::{error as repo_error, repo_urls}; -use crate::Args; -use chrono::{DateTime, Utc}; -use clap::Parser; -use log::{error, info, trace, warn}; -use parse_datetime::parse_datetime; -use pubsys_config::InfraConfig; -use snafu::{OptionExt, ResultExt}; -use std::collections::HashMap; -use std::fs::File; -use std::path::PathBuf; -use tough::{ExpirationEnforcement, Repository, RepositoryLoader}; -use url::Url; - -/// Checks for metadata expirations for a set of TUF repositories -#[derive(Debug, Parser)] -pub(crate) struct CheckExpirationsArgs { - #[arg(long)] - /// Use this named repo infrastructure from Infra.toml - repo: String, - - #[arg(long)] - /// The architecture of the repo being checked for expirations - arch: String, - #[arg(long)] - /// The variant of the repo being checked for expirations - variant: String, - - #[arg(long)] - /// Path to root.json for this repo - root_role_path: PathBuf, - - #[arg(long, value_parser = parse_datetime)] - /// Finds metadata files expiring between now and a specified time; RFC3339 date or "in X hours/days/weeks" - expiration_limit: DateTime, -} - -/// Checks for upcoming role expirations, gathering them in a map of role to expiration datetime. -fn find_upcoming_metadata_expiration( - repo: &Repository, - end_date: DateTime, -) -> HashMap> { - let mut expirations = HashMap::new(); - info!( - "Looking for metadata expirations happening from now to {}", - end_date - ); - if repo.root().signed.expires <= end_date { - expirations.insert(tough::schema::RoleType::Root, repo.root().signed.expires); - } - if repo.snapshot().signed.expires <= end_date { - expirations.insert( - tough::schema::RoleType::Snapshot, - repo.snapshot().signed.expires, - ); - } - if repo.targets().signed.expires <= end_date { - expirations.insert( - tough::schema::RoleType::Targets, - repo.targets().signed.expires, - ); - } - if repo.timestamp().signed.expires <= end_date { - expirations.insert( - tough::schema::RoleType::Timestamp, - repo.timestamp().signed.expires, - ); - } - - expirations -} - -fn check_expirations( - root_role_path: &PathBuf, - metadata_url: &Url, - targets_url: &Url, - expiration_limit: DateTime, -) -> Result<()> { - // Load the repository - let repo = RepositoryLoader::new( - File::open(root_role_path).context(repo_error::FileSnafu { - path: root_role_path, - })?, - metadata_url.clone(), - targets_url.clone(), - ) - // We're gonna check the expiration ourselves - .expiration_enforcement(ExpirationEnforcement::Unsafe) - .load() - .context(repo_error::RepoLoadSnafu { - metadata_base_url: metadata_url.clone(), - })?; - info!("Loaded TUF repo:\t{}", metadata_url); - - info!("Root expiration:\t{}", repo.root().signed.expires); - info!("Snapshot expiration:\t{}", repo.snapshot().signed.expires); - info!("Targets expiration:\t{}", repo.targets().signed.expires); - info!("Timestamp expiration:\t{}", repo.timestamp().signed.expires); - // Check for upcoming metadata expirations if a timeframe is specified - let upcoming_expirations = find_upcoming_metadata_expiration(&repo, expiration_limit); - if !upcoming_expirations.is_empty() { - let now = Utc::now(); - for (role, expiration_date) in upcoming_expirations { - if expiration_date < now { - error!( - "Repo '{}': '{}' expired on {}", - metadata_url, role, expiration_date - ) - } else { - warn!( - "Repo '{}': '{}' expiring in {} at {}", - metadata_url, - role, - expiration_date - now, - expiration_date - ) - } - } - return Err(Error::RepoExpirations { - metadata_url: metadata_url.clone(), - }); - } - - Ok(()) -} - -/// Common entrypoint from main() -pub(crate) fn run(args: &Args, check_expirations_args: &CheckExpirationsArgs) -> Result<()> { - // If a lock file exists, use that, otherwise use Infra.toml - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(repo_error::ConfigSnafu)?; - trace!("Parsed infra config: {:?}", infra_config); - let repo_config = infra_config - .repo - .as_ref() - .context(repo_error::MissingConfigSnafu { - missing: "repo section", - })? - .get(&check_expirations_args.repo) - .with_context(|| repo_error::MissingConfigSnafu { - missing: format!("definition for repo {}", &check_expirations_args.repo), - })?; - - let repo_urls = repo_urls( - repo_config, - &check_expirations_args.variant, - &check_expirations_args.arch, - )? - .context(repo_error::MissingRepoUrlsSnafu { - repo: &check_expirations_args.repo, - })?; - check_expirations( - &check_expirations_args.root_role_path, - &repo_urls.0, - repo_urls.1, - check_expirations_args.expiration_limit, - )?; - - Ok(()) -} - -mod error { - use snafu::Snafu; - use url::Url; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(context(false), display("{}", source))] - Repo { - #[snafu(source(from(crate::repo::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Found expiring/expired metadata in '{}'", metadata_url))] - RepoExpirations { metadata_url: Url }, - } -} -pub(crate) use error::Error; - -type Result = std::result::Result; diff --git a/tools/pubsys/src/repo/refresh_repo/mod.rs b/tools/pubsys/src/repo/refresh_repo/mod.rs deleted file mode 100644 index be70787617c..00000000000 --- a/tools/pubsys/src/repo/refresh_repo/mod.rs +++ /dev/null @@ -1,214 +0,0 @@ -//! The refresh_repo module owns the 'refresh-repo' subcommand and provide methods for -//! refreshing and re-signing the metadata files of a given TUF repository. - -use crate::repo::{ - error as repo_error, get_signing_key_source, repo_urls, set_expirations, set_versions, -}; -use crate::Args; -use chrono::{DateTime, Utc}; -use clap::Parser; -use lazy_static::lazy_static; -use log::{info, trace}; -use pubsys_config::{InfraConfig, RepoExpirationPolicy}; -use snafu::{ensure, OptionExt, ResultExt}; -use std::fs; -use std::fs::File; -use std::path::{Path, PathBuf}; -use tough::editor::RepositoryEditor; -use tough::key_source::{KeySource, LocalKeySource}; -use tough::{ExpirationEnforcement, RepositoryLoader}; -use url::Url; - -lazy_static! { - static ref EXPIRATION_START_TIME: DateTime = Utc::now(); -} - -/// Refreshes and re-sign TUF repositories' non-root metadata files with new expiration dates -#[derive(Debug, Parser)] -pub(crate) struct RefreshRepoArgs { - #[arg(long)] - /// Use this named repo infrastructure from Infra.toml - repo: String, - - #[arg(long)] - /// The architecture of the repo being refreshed and re-signed - arch: String, - #[arg(long)] - /// The variant of the repo being refreshed and re-signed - variant: String, - - #[arg(long)] - /// Path to root.json for this repo - root_role_path: PathBuf, - - #[arg(long)] - /// If we generated a local key, we'll find it here; used if Infra.toml has no key defined - default_key_path: PathBuf, - - #[arg(long)] - /// Path to file that defines when repo non-root metadata should expire - repo_expiration_policy_path: PathBuf, - - #[arg(long)] - /// Where to store the refresh/re-signed repository (just the metadata files) - outdir: PathBuf, - - #[arg(long)] - /// If this flag is set, repositories will succeed in loading and be refreshed even if they have - /// expired metadata files. - unsafe_refresh: bool, -} - -fn refresh_repo( - root_role_path: &PathBuf, - metadata_out_dir: &PathBuf, - metadata_url: &Url, - targets_url: &Url, - key_source: Box, - expiration: &RepoExpirationPolicy, - unsafe_refresh: bool, -) -> Result<(), Error> { - // If the given metadata directory exists, throw an error. We don't want to overwrite a user's - // existing repository. - ensure!( - !Path::exists(metadata_out_dir), - repo_error::RepoExistsSnafu { - path: metadata_out_dir - } - ); - - let expiration_enforcement = if unsafe_refresh { - ExpirationEnforcement::Unsafe - } else { - ExpirationEnforcement::Safe - }; - - // Load the repository and get the repo editor for it - let repo = RepositoryLoader::new( - File::open(root_role_path).context(repo_error::FileSnafu { - path: root_role_path, - })?, - metadata_url.clone(), - targets_url.clone(), - ) - .expiration_enforcement(expiration_enforcement) - .load() - .context(repo_error::RepoLoadSnafu { - metadata_base_url: metadata_url.clone(), - })?; - let mut repo_editor = RepositoryEditor::from_repo(root_role_path, repo) - .context(repo_error::EditorFromRepoSnafu)?; - info!("Loaded TUF repo: {}", metadata_url); - - // Refresh the expiration dates of all non-root metadata files - set_expirations(&mut repo_editor, expiration, *EXPIRATION_START_TIME)?; - - // Refresh the versions of all non-root metadata files - set_versions(&mut repo_editor)?; - - // Sign the repository - let signed_repo = repo_editor - .sign(&[key_source]) - .context(repo_error::RepoSignSnafu)?; - - // Write out the metadata files for the repository - info!("Writing repo metadata to: {}", metadata_out_dir.display()); - fs::create_dir_all(metadata_out_dir).context(repo_error::CreateDirSnafu { - path: &metadata_out_dir, - })?; - signed_repo - .write(metadata_out_dir) - .context(repo_error::RepoWriteSnafu { - path: &metadata_out_dir, - })?; - - Ok(()) -} - -/// Common entrypoint from main() -pub(crate) fn run(args: &Args, refresh_repo_args: &RefreshRepoArgs) -> Result<(), Error> { - // If a lock file exists, use that, otherwise use Infra.toml - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(repo_error::ConfigSnafu)?; - trace!("Parsed infra config: {:?}", infra_config); - - let repo_config = infra_config - .repo - .as_ref() - .context(repo_error::MissingConfigSnafu { - missing: "repo section", - })? - .get(&refresh_repo_args.repo) - .context(repo_error::MissingConfigSnafu { - missing: format!("definition for repo {}", &refresh_repo_args.repo), - })?; - - // Check if we have a signing key defined in Infra.toml; if not, we'll fall back to the - // generated local key. - let signing_key_config = repo_config.signing_keys.as_ref(); - - let key_source = if let Some(signing_key_config) = signing_key_config { - get_signing_key_source(signing_key_config)? - } else { - ensure!( - refresh_repo_args.default_key_path.exists(), - repo_error::MissingConfigSnafu { - missing: "signing_keys in repo config, and we found no local key", - } - ); - Box::new(LocalKeySource { - path: refresh_repo_args.default_key_path.clone(), - }) - }; - - // Get the expiration policy - info!( - "Using repo expiration policy from path: {}", - refresh_repo_args.repo_expiration_policy_path.display() - ); - let expiration = - RepoExpirationPolicy::from_path(&refresh_repo_args.repo_expiration_policy_path) - .context(repo_error::ConfigSnafu)?; - - let repo_urls = repo_urls( - repo_config, - &refresh_repo_args.variant, - &refresh_repo_args.arch, - )? - .context(repo_error::MissingRepoUrlsSnafu { - repo: &refresh_repo_args.repo, - })?; - refresh_repo( - &refresh_repo_args.root_role_path, - &refresh_repo_args - .outdir - .join(&refresh_repo_args.variant) - .join(&refresh_repo_args.arch), - &repo_urls.0, - repo_urls.1, - key_source, - &expiration, - refresh_repo_args.unsafe_refresh, - )?; - - Ok(()) -} - -mod error { - use snafu::Snafu; - use url::Url; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(context(false), display("{}", source))] - Repo { - #[snafu(source(from(crate::repo::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to refresh & re-sign metadata for: {:#?}", list_of_urls))] - RepoRefresh { list_of_urls: Vec }, - } -} -pub(crate) use error::Error; diff --git a/tools/pubsys/src/repo/validate_repo/mod.rs b/tools/pubsys/src/repo/validate_repo/mod.rs deleted file mode 100644 index 1734f6bb053..00000000000 --- a/tools/pubsys/src/repo/validate_repo/mod.rs +++ /dev/null @@ -1,198 +0,0 @@ -//! The validate_repo module owns the 'validate-repo' subcommand and provides methods for validating -//! a given TUF repository by attempting to load the repository and download its targets. - -use crate::repo::{error as repo_error, repo_urls}; -use crate::Args; -use clap::Parser; -use log::{info, trace}; -use pubsys_config::InfraConfig; -use snafu::{OptionExt, ResultExt}; -use std::cmp::min; -use std::fs::File; -use std::io; -use std::path::PathBuf; -use std::sync::mpsc; -use tough::{Repository, RepositoryLoader, TargetName}; -use url::Url; - -/// Validates a set of TUF repositories -#[derive(Debug, Parser)] -pub(crate) struct ValidateRepoArgs { - #[arg(long)] - /// Use this named repo infrastructure from Infra.toml - repo: String, - - #[arg(long)] - /// The architecture of the repo being validated - arch: String, - #[arg(long)] - /// The variant of the repo being validated - variant: String, - - #[arg(long)] - /// Path to root.json for this repo - root_role_path: PathBuf, - - #[arg(long)] - /// Specifies whether to validate all listed targets by attempting to download them - validate_targets: bool, -} - -/// If we are on a machine with a large number of cores, then we limit the number of simultaneous -/// downloads to this arbitrarily chosen maximum. -const MAX_DOWNLOAD_THREADS: usize = 16; - -/// Retrieves listed targets and attempts to download them for validation purposes. We use a Rayon -/// thread pool instead of tokio for async execution because `reqwest::blocking` creates a tokio -/// runtime (and multiple tokio runtimes are not supported). -fn retrieve_targets(repo: &Repository) -> Result<(), Error> { - let targets = &repo.targets().signed.targets; - let thread_pool = rayon::ThreadPoolBuilder::new() - .num_threads(min(num_cpus::get(), MAX_DOWNLOAD_THREADS)) - .build() - .context(error::ThreadPoolSnafu)?; - - // create the channels through which our download results will be passed - let (tx, rx) = mpsc::channel(); - - for target in targets.keys() { - let repo = repo.clone(); - let tx = tx.clone(); - info!("Downloading target: {}", target.raw()); - let target = target.clone(); - thread_pool.spawn(move || { - tx.send(download_targets(&repo, target)) - // inability to send on this channel is unrecoverable - .unwrap(); - }); - } - // close all senders - drop(tx); - - // block and await all downloads - let results: Vec> = rx.into_iter().collect(); - - // check all results and return the first error we see - for result in results { - result?; - } - - // no errors were found, the targets are validated - Ok(()) -} - -fn download_targets(repo: &Repository, target: TargetName) -> Result { - let mut reader = match repo.read_target(&target) { - Ok(Some(reader)) => reader, - Ok(None) => { - return error::TargetMissingSnafu { - target: target.raw(), - } - .fail() - } - Err(e) => { - return Err(e).context(error::TargetReadSnafu { - target: target.raw(), - }) - } - }; - // tough's `Read` implementation validates the target as it's being downloaded - io::copy(&mut reader, &mut io::sink()).context(error::TargetDownloadSnafu { - target: target.raw(), - }) -} - -fn validate_repo( - root_role_path: &PathBuf, - metadata_url: Url, - targets_url: &Url, - validate_targets: bool, -) -> Result<(), Error> { - // Load the repository - let repo = RepositoryLoader::new( - File::open(root_role_path).context(repo_error::FileSnafu { - path: root_role_path, - })?, - metadata_url.clone(), - targets_url.clone(), - ) - .load() - .context(repo_error::RepoLoadSnafu { - metadata_base_url: metadata_url.clone(), - })?; - info!("Loaded TUF repo: {}", metadata_url); - if validate_targets { - // Try retrieving listed targets - retrieve_targets(&repo)?; - } - - Ok(()) -} - -/// Common entrypoint from main() -pub(crate) fn run(args: &Args, validate_repo_args: &ValidateRepoArgs) -> Result<(), Error> { - // If a lock file exists, use that, otherwise use Infra.toml - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(repo_error::ConfigSnafu)?; - trace!("Parsed infra config: {:?}", infra_config); - let repo_config = infra_config - .repo - .as_ref() - .context(repo_error::MissingConfigSnafu { - missing: "repo section", - })? - .get(&validate_repo_args.repo) - .context(repo_error::MissingConfigSnafu { - missing: format!("definition for repo {}", &validate_repo_args.repo), - })?; - - let repo_urls = repo_urls( - repo_config, - &validate_repo_args.variant, - &validate_repo_args.arch, - )? - .context(repo_error::MissingRepoUrlsSnafu { - repo: &validate_repo_args.repo, - })?; - validate_repo( - &validate_repo_args.root_role_path, - repo_urls.0, - repo_urls.1, - validate_repo_args.validate_targets, - ) -} - -mod error { - use snafu::Snafu; - use std::io; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Invalid percentage specified: {} is greater than 100", percentage))] - InvalidPercentage { percentage: u8 }, - - #[snafu(context(false), display("{}", source))] - Repo { - #[snafu(source(from(crate::repo::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to download and write target '{}': {}", target, source))] - TargetDownload { target: String, source: io::Error }, - - #[snafu(display("Missing target: {}", target))] - TargetMissing { target: String }, - - #[snafu(display("Failed to read target '{}' from repo: {}", target, source))] - TargetRead { - target: String, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Unable to create thread pool: {}", source))] - ThreadPool { source: rayon::ThreadPoolBuildError }, - } -} -pub(crate) use error::Error; diff --git a/tools/pubsys/src/vmware/govc.rs b/tools/pubsys/src/vmware/govc.rs deleted file mode 100644 index b44d67008ea..00000000000 --- a/tools/pubsys/src/vmware/govc.rs +++ /dev/null @@ -1,177 +0,0 @@ -//! The govc module handles the process of building and executing the calls to Docker in order to -//! run specific `govc` commands. -use duct::cmd; -use log::trace; -use pubsys_config::vmware::{Datacenter, DatacenterCreds}; -use snafu::ResultExt; -use std::env; -use std::path::Path; -use std::process::Output; - -pub(crate) struct Govc { - env_config: Vec, -} - -impl Govc { - const GOVC: &'static str = "govc"; - - /// Make a new instance of `Govc`, creating all of the environment variables required to run - /// `govc` as Docker `--env` arguments - pub(crate) fn new(dc: Datacenter, creds: DatacenterCreds) -> Self { - let mut env_config = Vec::new(); - env_config.env_arg("GOVC_USERNAME", creds.username); - env_config.env_arg("GOVC_PASSWORD", creds.password); - env_config.env_arg("GOVC_URL", dc.vsphere_url); - env_config.env_arg("GOVC_DATACENTER", dc.datacenter); - env_config.env_arg("GOVC_DATASTORE", dc.datastore); - env_config.env_arg("GOVC_NETWORK", dc.network); - env_config.env_arg("GOVC_RESOURCE_POOL", dc.resource_pool); - env_config.env_arg("GOVC_FOLDER", dc.folder); - - Self { env_config } - } - - /// Run `govc import.ova` using Docker. - /// - /// Using the given name, OVA path, and import spec path, this function builds the `govc - /// import.ova` command as it will be used in the container. It also builds the necessary bind - /// mount arguments to mount the import spec and OVA into the container. Finally, it calls - /// `govc` via `docker run` invocation using these arguments. - pub(crate) fn upload_ova( - self, - name: S, - ova_path: P1, - import_spec_path: P2, - ) -> Result - where - S: AsRef, - P1: AsRef, - P2: AsRef, - { - let name = name.as_ref(); - let ova_host_path = ova_path.as_ref(); - let import_spec_host_path = import_spec_path.as_ref(); - - // Define the paths to the OVA and import spec we will use for the bind mounts into the - // container - let ova_container_path = "/tmp/bottlerocket.ova"; - let import_spec_container_path = "/tmp/import.spec"; - - //--mount type=bind,source="path/to/thing",target=/tmp/thing,readonly - let mount_config = &[ - // Mount the import spec file - "--mount", - &format!( - "type=bind,source={},target={},readonly", - import_spec_host_path.display(), - import_spec_container_path - ), - // Mount the OVA - "--mount", - &format!( - "type=bind,source={},target={},readonly", - ova_host_path.display(), - ova_container_path - ), - ]; - - // govc import.ova -options=/path/to/spec -name bottlerocket_vm_name /path/to/ova - let govc_cmd = &[ - Self::GOVC, - "import.ova", - &format!("-options={}", import_spec_container_path), - "-name", - name, - ova_container_path, - ]; - - let env_config: Vec<&str> = self.env_config.iter().map(|s| s.as_ref()).collect(); - - docker_run(&env_config, Some(mount_config), govc_cmd) - } -} - -/// Execute `docker run` using the SDK container with the specified environment, mount, and command -/// arguments. -/// -/// This builds the entire `docker run` command string using a list of Docker `--env FOO=BAR` -/// strings, an optional list of `--mount` strings, and a list of strings meant to be the command -/// to run in the container. -// The arguments are `&[&str]` in an attempt to be as flexible as possible for the caller -fn docker_run(docker_env: &[&str], mount: Option<&[&str]>, command: &[&str]) -> Result { - let sdk = env::var("BUILDSYS_SDK_IMAGE").context(error::EnvironmentSnafu { - var: "BUILDSYS_SDK_IMAGE", - })?; - trace!("SDK image: {}", sdk); - - let mut args = vec!["run"]; - args.push("--net=host"); - args.extend(docker_env); - - if let Some(mount_cfg) = mount { - args.extend(mount_cfg) - } - - args.push(&sdk); - args.extend(command); - - let output = cmd("docker", args) - .stderr_to_stdout() - .stdout_capture() - .unchecked() - .run() - .context(error::CommandStartSnafu)?; - - let stdout = String::from_utf8_lossy(&output.stdout); - trace!("{}", stdout); - if output.status.success() { - Ok(output) - } else { - error::DockerSnafu { output: stdout }.fail() - } -} - -// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - -/// Helper trait for constructing Docker `--env` arguments -trait EnvArg { - fn env_arg(&mut self, key: S1, value: S2) - where - S1: AsRef, - S2: AsRef; -} - -impl EnvArg for Vec { - fn env_arg(&mut self, key: S1, value: S2) - where - S1: AsRef, - S2: AsRef, - { - self.push("--env".to_string()); - self.push(format!("{}={}", key.as_ref(), value.as_ref())) - } -} - -// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - -pub(crate) mod error { - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Failed to start command: {}", source))] - CommandStart { source: std::io::Error }, - - #[snafu(display("Docker invocation failed: {}", output))] - Docker { output: String }, - - #[snafu(display("Missing environment variable '{}'", var))] - Environment { - var: String, - source: std::env::VarError, - }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/vmware/mod.rs b/tools/pubsys/src/vmware/mod.rs deleted file mode 100644 index 3eabc7edce5..00000000000 --- a/tools/pubsys/src/vmware/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub(crate) mod govc; -pub(crate) mod upload_ova; diff --git a/tools/pubsys/src/vmware/upload_ova/mod.rs b/tools/pubsys/src/vmware/upload_ova/mod.rs deleted file mode 100644 index 3df0454dfc2..00000000000 --- a/tools/pubsys/src/vmware/upload_ova/mod.rs +++ /dev/null @@ -1,239 +0,0 @@ -//! The upload_ova module owns the 'upload_ova' subcommand and is responsible for collating all of -//! the config necessary to upload an OVA bundle to VMware datacenters. -use crate::vmware::govc::Govc; -use crate::Args; -use clap::Parser; -use log::{debug, info, trace}; -use pubsys_config::vmware::{ - Datacenter, DatacenterBuilder, DatacenterCreds, DatacenterCredsBuilder, DatacenterCredsConfig, - VMWARE_CREDS_PATH, -}; -use pubsys_config::InfraConfig; -use serde::Serialize; -use snafu::{ensure, OptionExt, ResultExt}; -use std::fs; -use std::path::PathBuf; -use tempfile::NamedTempFile; -use tinytemplate::TinyTemplate; - -const SPEC_TEMPLATE_NAME: &str = "spec_template"; - -/// Uploads a Bottlerocket OVA to VMware datacenters -#[derive(Debug, Parser)] -pub(crate) struct UploadArgs { - /// Path to the OVA image - #[arg(short = 'o', long)] - ova: PathBuf, - - /// Path to the import spec - #[arg(short = 's', long)] - spec: PathBuf, - - /// The desired VM name - #[arg(short = 'n', long)] - name: String, - - /// Make the uploaded OVA a VM template - #[arg(long)] - mark_as_template: bool, - - /// Datacenters to which you want to upload the OVA - #[arg(long, value_delimiter = ',')] - datacenters: Vec, -} - -/// Common entrypoint from main() -pub(crate) fn run(args: &Args, upload_args: &UploadArgs) -> Result<()> { - // If a lock file exists, use that, otherwise use Infra.toml or default - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, true) - .context(error::InfraConfigSnafu)?; - trace!("Using infra config: {:?}", infra_config); - - let vmware = infra_config - .vmware - .context(error::MissingConfigSnafu { missing: "vmware" })?; - - // If the user gave an override list of datacenters, use it, otherwise use what's in the config - let upload_datacenters = if !upload_args.datacenters.is_empty() { - &upload_args.datacenters - } else { - &vmware.datacenters - }; - ensure!( - !upload_datacenters.is_empty(), - error::MissingConfigSnafu { - missing: "vmware.datacenters" - } - ); - - // Retrieve credentials from GOVC_ environment variables - let creds_env = DatacenterCredsBuilder::from_env(); - // Retrieve credentials from file. The `home` crate is used to construct the VMWARE_CREDS_PATH, - // and it's possible (however unlikely) that it is unable to determine the user's home folder. - let creds_file = if let Some(ref creds_file) = *VMWARE_CREDS_PATH { - if creds_file.exists() { - info!("Using vSphere credentials file at {}", creds_file.display()); - DatacenterCredsConfig::from_path(creds_file).context(error::VmwareConfigSnafu)? - } else { - info!("vSphere credentials file not found, will attempt to use environment"); - DatacenterCredsConfig::default() - } - } else { - info!("Unable to determine vSphere credentials file location, will attempt to use environment"); - DatacenterCredsConfig::default() - }; - - // Retrieve datacenter-related GOVC_ environment variables and any common configuration given - // via Infra.toml - let dc_env = DatacenterBuilder::from_env(); - let dc_common = vmware.common.as_ref(); - - // Read the import spec as a template - let import_spec_str = fs::read_to_string(&upload_args.spec).context(error::FileSnafu { - action: "read", - path: &upload_args.spec, - })?; - let mut tt = TinyTemplate::new(); - tt.add_template(SPEC_TEMPLATE_NAME, &import_spec_str) - .context(error::AddTemplateSnafu { - path: &upload_args.spec, - })?; - - info!( - "Uploading to datacenters: {}", - &upload_datacenters.join(", ") - ); - for dc in upload_datacenters { - debug!("Building config for {}", &dc); - // If any specific configuration exists for this datacenter, retrieve it from VMware - // config. Then build out a complete datacenter config with all values necessary to - // interact with VMware. Environment variables trump all others, so start with those, then - // fill in any missing items with datacenter-specific configuration and any common - // configuration. - let dc_config = vmware.datacenter.get(dc); - trace!("{} config: {:?}", &dc, &dc_config); - let datacenter: Datacenter = dc_env - .take_missing_from(dc_config) - .take_missing_from(dc_common) - .build() - .context(error::DatacenterBuildSnafu)?; - - // Use a similar pattern here for credentials; start with environment variables and fill in - // any missing items with the datacenter-specific credentials from file. - let dc_creds = creds_file.datacenter.get(dc); - let creds: DatacenterCreds = creds_env - .take_missing_from(dc_creds) - .build() - .context(error::CredsBuildSnafu)?; - - // Render the import spec with this datacenter's details and write to temp file - let rendered_spec = render_spec(&tt, &datacenter.network, upload_args.mark_as_template)?; - let import_spec = NamedTempFile::new().context(error::TempFileSnafu)?; - fs::write(import_spec.path(), &rendered_spec).context(error::FileSnafu { - action: "write", - path: import_spec.path(), - })?; - trace!("Import spec: {}", &rendered_spec); - - if upload_args.mark_as_template { - info!( - "Uploading OVA to datacenter '{}' as template with name: '{}'", - &dc, &upload_args.name - ); - } else { - info!( - "Uploading OVA to datacenter '{}' with name '{}'", - &dc, &upload_args.name - ); - } - - Govc::new(datacenter, creds) - .upload_ova(&upload_args.name, &upload_args.ova, import_spec) - .context(error::UploadOvaSnafu)?; - } - - Ok(()) -} - -/// Render the import spec template given the current network and template setting. -// This exists primarily to abstract the creation of the Context struct that is required by -// TinyTemplate; it's pretty ugly to do inline with the rest of the code. -fn render_spec(tt: &TinyTemplate<'_>, network: S, mark_as_template: bool) -> Result -where - S: AsRef, -{ - #[derive(Debug, Serialize)] - struct Context { - network: String, - mark_as_template: bool, - } - - let context = Context { - network: network.as_ref().to_string(), - mark_as_template, - }; - - tt.render(SPEC_TEMPLATE_NAME, &context) - .context(error::RenderTemplateSnafu) -} - -mod error { - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Error building template from '{}': {}", path.display(), source))] - AddTemplate { - path: PathBuf, - source: tinytemplate::error::Error, - }, - - #[snafu(display("Unable to build datacenter credentials: {}", source))] - CredsBuild { - source: pubsys_config::vmware::Error, - }, - - #[snafu(display("Unable to build datacenter config: {}", source))] - DatacenterBuild { - source: pubsys_config::vmware::Error, - }, - - #[snafu(display("Missing environment variable '{}'", var))] - Environment { - var: String, - source: std::env::VarError, - }, - - #[snafu(display("Failed to {} '{}': {}", action, path.display(), source))] - File { - action: String, - path: PathBuf, - source: io::Error, - }, - - #[snafu(display("Error reading config: {}", source))] - InfraConfig { source: pubsys_config::Error }, - - #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { missing: String }, - - #[snafu(display("Error rendering template: {}", source))] - RenderTemplate { source: tinytemplate::error::Error }, - - #[snafu(display("Failed to create temporary file: {}", source))] - TempFile { source: io::Error }, - - #[snafu(display("Error reading config: {}", source))] - VmwareConfig { - source: pubsys_config::vmware::Error, - }, - - #[snafu(display("Failed to upload OVA: {}", source))] - UploadOva { source: crate::vmware::govc::Error }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/rpm2img b/tools/rpm2img deleted file mode 100755 index 93e4cb4d999..00000000000 --- a/tools/rpm2img +++ /dev/null @@ -1,792 +0,0 @@ -#!/usr/bin/env bash -# shellcheck disable=SC2034 - -set -eu -o pipefail -shopt -qs failglob - -# import the partition helper functions -# shellcheck source=partyplanner -. "${0%/*}/partyplanner" - -OUTPUT_FMT="raw" -BUILDER_ARCH="$(uname -m)" -OVF_TEMPLATE="" - -GRUB_SET_PRIVATE_VAR="no" -XFS_DATA_PARTITION="no" -UEFI_SECURE_BOOT="no" - -for opt in "$@"; do - optarg="$(expr "${opt}" : '[^=]*=\(.*\)')" - case "${opt}" in - --package-dir=*) PACKAGE_DIR="${optarg}" ;; - --output-dir=*) OUTPUT_DIR="${optarg}" ;; - --output-fmt=*) OUTPUT_FMT="${optarg}" ;; - --os-image-size-gib=*) OS_IMAGE_SIZE_GIB="${optarg}" ;; - --data-image-size-gib=*) DATA_IMAGE_SIZE_GIB="${optarg}" ;; - --os-image-publish-size-gib=*) OS_IMAGE_PUBLISH_SIZE_GIB="${optarg}" ;; - --data-image-publish-size-gib=*) DATA_IMAGE_PUBLISH_SIZE_GIB="${optarg}" ;; - --partition-plan=*) PARTITION_PLAN="${optarg}" ;; - --ovf-template=*) OVF_TEMPLATE="${optarg}" ;; - --with-grub-set-private-var=*) GRUB_SET_PRIVATE_VAR="${optarg}" ;; - --xfs-data-partition=*) XFS_DATA_PARTITION="${optarg}" ;; - --with-uefi-secure-boot=*) UEFI_SECURE_BOOT="${optarg}" ;; - esac -done - -case "${OUTPUT_FMT}" in - raw|qcow2|vmdk) ;; - *) - echo "unexpected image output format '${OUTPUT_FMT}'" >&2 - exit 1 - ;; -esac - -case "${PARTITION_PLAN}" in - split|unified) ;; - *) - echo "unexpected partition plan '${PARTITION_PLAN}'" >&2 - exit 1 - ;; -esac - -# Fail fast if the OVF template doesn't exist, or doesn't match the layout. -if [ "${OUTPUT_FMT}" == "vmdk" ] ; then - if [ ! -s "${OVF_TEMPLATE}" ] ; then - echo "required OVF template not found: ${OVF_TEMPLATE}" >&2 - exit 1 - fi - - if [ "${PARTITION_PLAN}" == "split" ] ; then - if ! grep -Fq '{{DATA_DISK}}' "${OVF_TEMPLATE}" ; then - echo "Missing data disk in OVF template, which is required for 'split' layout." >&2 - exit 1 - fi - fi - - if [ "${PARTITION_PLAN}" == "unified" ] ; then - if grep -Fq '{{DATA_DISK}}' "${OVF_TEMPLATE}" ; then - echo "Incorrect data disk in OVF template, which is not supported for 'unified' layout." >&2 - exit 1 - fi - fi - - if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then - if ! grep -Fq '{{DB_CERT_DER_HEX}}' "${OVF_TEMPLATE}" ; then - echo "Missing CA certificate field in OVF template, which is required for Secure Boot support." >&2 - exit 1 - fi - fi -fi - -# Store output artifacts in a versioned directory. -OUTPUT_DIR="${OUTPUT_DIR}/${VERSION_ID}-${BUILD_ID}" -mkdir -p "${OUTPUT_DIR}" - -FILENAME_PREFIX="${IMAGE_NAME}-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}" -SYMLINK_PREFIX="${IMAGE_NAME}-${VARIANT}-${ARCH}" -VERSIONED_SYMLINK_PREFIX="${IMAGE_NAME}-${VARIANT}-${ARCH}-${VERSION_ID}" -FRIENDLY_VERSIONED_SYMLINK_PREFIX="${IMAGE_NAME}-${VARIANT}-${ARCH}-v${VERSION_ID}" - -OS_IMAGE_NAME="${FILENAME_PREFIX}" -OS_IMAGE_SYMLINK="${SYMLINK_PREFIX}" -OS_IMAGE_VERSIONED_SYMLINK="${VERSIONED_SYMLINK_PREFIX}" -OS_IMAGE_FRIENDLY_VERSIONED_SYMLINK="${FRIENDLY_VERSIONED_SYMLINK_PREFIX}" - -DATA_IMAGE_NAME="${FILENAME_PREFIX}-data" -DATA_IMAGE_SYMLINK="${SYMLINK_PREFIX}-data" -DATA_IMAGE_VERSIONED_SYMLINK="${VERSIONED_SYMLINK_PREFIX}-data" -DATA_IMAGE_FRIENDLY_VERSIONED_SYMLINK="${FRIENDLY_VERSIONED_SYMLINK_PREFIX}-data" - -BOOT_IMAGE_NAME="${FILENAME_PREFIX}-boot.ext4.lz4" -BOOT_IMAGE_SYMLINK="${SYMLINK_PREFIX}-boot.ext4.lz4" -BOOT_IMAGE_VERSIONED_SYMLINK="${VERSIONED_SYMLINK_PREFIX}-boot.ext4.lz4" -BOOT_IMAGE_FRIENDLY_VERSIONED_SYMLINK="${FRIENDLY_VERSIONED_SYMLINK_PREFIX}-boot.ext4.lz4" - -VERITY_IMAGE_NAME="${FILENAME_PREFIX}-root.verity.lz4" -VERITY_IMAGE_SYMLINK="${SYMLINK_PREFIX}-root.verity.lz4" -VERITY_IMAGE_VERSIONED_SYMLINK="${VERSIONED_SYMLINK_PREFIX}-root.verity.lz4" -VERITY_IMAGE_FRIENDLY_VERSIONED_SYMLINK="${FRIENDLY_VERSIONED_SYMLINK_PREFIX}-root.verity.lz4" - -ROOT_IMAGE_NAME="${FILENAME_PREFIX}-root.ext4.lz4" -ROOT_IMAGE_SYMLINK="${SYMLINK_PREFIX}-root.ext4.lz4" -ROOT_IMAGE_VERSIONED_SYMLINK="${VERSIONED_SYMLINK_PREFIX}-root.ext4.lz4" -ROOT_IMAGE_FRIENDLY_VERSIONED_SYMLINK="${FRIENDLY_VERSIONED_SYMLINK_PREFIX}-root.ext4.lz4" - -OS_IMAGE="$(mktemp)" -BOOT_IMAGE="$(mktemp)" -VERITY_IMAGE="$(mktemp)" -ROOT_IMAGE="$(mktemp)" -DATA_IMAGE="$(mktemp)" -EFI_IMAGE="$(mktemp)" -PRIVATE_IMAGE="$(mktemp)" -BOTTLEROCKET_DATA="$(mktemp)" - -ROOT_MOUNT="$(mktemp -d)" -BOOT_MOUNT="$(mktemp -d)" -DATA_MOUNT="$(mktemp -d)" -EFI_MOUNT="$(mktemp -d)" -PRIVATE_MOUNT="$(mktemp -d)" - -SBKEYS="${HOME}/sbkeys" - -SELINUX_ROOT="/etc/selinux" -SELINUX_POLICY="fortified" -SELINUX_FILE_CONTEXTS="${ROOT_MOUNT}/${SELINUX_ROOT}/${SELINUX_POLICY}/contexts/files/file_contexts" - -VERITY_VERSION=1 -VERITY_HASH_ALGORITHM=sha256 -VERITY_DATA_BLOCK_SIZE=4096 -VERITY_HASH_BLOCK_SIZE=4096 - -# Bottlerocket has been experimentally shown to boot faster on EBS volumes when striping the root filesystem into 4MiB stripes. -# We use 4kb ext4 blocks. The stride and stripe should both be $STRIPE_SIZE / $EXT4_BLOCK_SIZE -ROOT_STRIDE=1024 -ROOT_STRIPE_WIDTH=1024 - -case "${PARTITION_PLAN}" in - split) - truncate -s "${OS_IMAGE_SIZE_GIB}G" "${OS_IMAGE}" - truncate -s "${DATA_IMAGE_SIZE_GIB}G" "${DATA_IMAGE}" - ;; - unified) - truncate -s "$((OS_IMAGE_SIZE_GIB + DATA_IMAGE_SIZE_GIB))G" "${OS_IMAGE}" - ;; -esac - -declare -A partlabel parttype partguid partsize partoff -set_partition_sizes \ - "${OS_IMAGE_SIZE_GIB}" "${DATA_IMAGE_SIZE_GIB}" "${PARTITION_PLAN}" \ - partsize partoff -set_partition_labels partlabel -set_partition_types parttype -set_partition_uuids partguid "${PARTITION_PLAN}" - -declare -a partargs -for part in \ - BIOS \ - EFI-A BOOT-A ROOT-A HASH-A RESERVED-A \ - EFI-B BOOT-B ROOT-B HASH-B RESERVED-B \ - PRIVATE DATA-A DATA-B ; -do - # We create the DATA-B partition separately if we're using the split layout - if [ "${part}" == "DATA-B" ] ; then - continue - fi - - # Each partition is aligned to a 1 MiB boundary, and extends to the sector - # before the next partition starts. Specify the end point in sectors so we - # can subtract a sector to fix the off-by-one error that comes from adding - # start and size together. (1 MiB contains 2048 512-byte sectors.) - part_start="${partoff[${part}]}" - part_end="$((part_start + partsize[${part}]))" - part_end="$((part_end * 2048 - 1))" - - partargs+=(-n "0:${part_start}M:${part_end}") - partargs+=(-c "0:${partlabel[${part}]}") - partargs+=(-t "0:${parttype[${part}]}") - partargs+=(-u "0:${partguid[${part}]:-R}") - - # Boot partition attributes: - # 48 = gptprio priority bit - # 56 = gptprio successful bit - case "${part}" in - BOOT-A) partargs+=(-A 0:"set":48 -A 0:"set":56) ;; - BOOT-B) partargs+=(-A 0:"clear":48 -A 0:"clear":56) ;; - esac -done - -sgdisk --clear "${partargs[@]}" --sort --print "${OS_IMAGE}" - -# Partition the separate data disk, if we're using the split layout. -if [ "${PARTITION_PLAN}" == "split" ] ; then - data_start="${partoff[DATA-B]}" - data_end=$((data_start + partsize[DATA-B])) - data_end=$((data_end * 2048 - 1)) - sgdisk --clear \ - -n "0:${data_start}M:${data_end}" \ - -c "0:${partlabel[DATA-B]}" \ - -t "0:${parttype[DATA-B]}" \ - -u "0:${partguid[DATA-B]}" \ - --sort --print "${DATA_IMAGE}" -fi - -INSTALL_TIME="$(date -u +%Y-%m-%dT%H:%M:%SZ)" -rpm -iv --ignorearch --root "${ROOT_MOUNT}" "${PACKAGE_DIR}"/*.rpm - -# inventory installed packages -INVENTORY_QUERY="\{\"Name\":\"%{NAME}\"\ -,\"Publisher\":\"Bottlerocket\"\ -,\"Version\":\"${VERSION_ID}\"\ -,\"Release\":\"${BUILD_ID}\"\ -,\"InstalledTime\":\"${INSTALL_TIME}\"\ -,\"ApplicationType\":\"%{GROUP}\"\ -,\"Architecture\":\"%{ARCH}\"\ -,\"Url\":\"%{URL}\"\ -,\"Summary\":\"%{Summary}\"\}\n" - -mapfile -t installed_rpms <<< "$(rpm -qa --root "${ROOT_MOUNT}" \ - --queryformat "${INVENTORY_QUERY}")" - -# wrap installed_rpms mapfile into json -INVENTORY_DATA="$(jq --raw-output . <<< "${installed_rpms[@]}")" -# remove the 'bottlerocket-' prefix from package names -INVENTORY_DATA="$(jq --arg PKG_PREFIX "bottlerocket-" \ - '(.Name) |= sub($PKG_PREFIX; "")' <<< "${INVENTORY_DATA}")" -# sort by package name and add 'Content' as top-level -INVENTORY_DATA="$(jq --slurp 'sort_by(.Name)' <<< "${INVENTORY_DATA}" | jq '{"Content": .}')" -printf "%s\n" "${INVENTORY_DATA}" > "${ROOT_MOUNT}/usr/share/bottlerocket/application-inventory.json" - -# install licenses -install -p -m 0644 /host/{COPYRIGHT,LICENSE-APACHE,LICENSE-MIT} "${ROOT_MOUNT}"/usr/share/licenses/ -mksquashfs \ - "${ROOT_MOUNT}"/usr/share/licenses \ - "${ROOT_MOUNT}"/usr/share/bottlerocket/licenses.squashfs \ - -no-exports -all-root -comp zstd -rm -rf "${ROOT_MOUNT}"/var/lib "${ROOT_MOUNT}"/usr/share/licenses/* - -if [[ "${ARCH}" == "x86_64" ]]; then - # MBR and BIOS-BOOT - echo "(hd0) ${OS_IMAGE}" > "${ROOT_MOUNT}/boot/grub/device.map" - "${ROOT_MOUNT}/sbin/grub-bios-setup" \ - --directory="${ROOT_MOUNT}/boot/grub" \ - --device-map="${ROOT_MOUNT}/boot/grub/device.map" \ - --root="hd0" \ - --skip-fs-probe \ - "${OS_IMAGE}" - - rm -vf "${ROOT_MOUNT}"/boot/grub/* "${ROOT_MOUNT}"/sbin/grub* -fi - -# We also need an EFI partition, formatted FAT32 with the -# EFI binary at the correct path, e.g. /efi/boot. The grub -# package has placed the image in /boot/efi/EFI/BOOT. -mv "${ROOT_MOUNT}/boot/efi"/* "${EFI_MOUNT}" - -# Do the setup required for `pesign` and `gpg` signing and -# verification to "just work" later on, regardless of which -# type of signing profile we have. -if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then - declare -a SHIM_SIGN_KEY - declare -a CODE_SIGN_KEY - - # For an AWS profile, we expect a config file for the PKCS11 - # helper. Otherwise, there should be a local key and cert. - if [ -s "${HOME}/.config/aws-kms-pkcs11/config.json" ] ; then - # Set AWS environment variables from build secrets, if present. - for var in AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN ; do - val="${var,,}" - val="${HOME}/.aws/${val//_/-}.env" - [ -s "${val}" ] || continue - declare -x "${var}=$(cat "${val}")" - done - # Verify that AWS credentials are functional. - aws sts get-caller-identity - # Log all PKCS11 helper activity, to simplify debugging. - export AWS_KMS_PKCS11_DEBUG=1 - SB_KEY_SOURCE="aws" - SHIM_SIGN_KEY=(-c shim-sign-key -t shim-sign-key) - CODE_SIGN_KEY=(-c code-sign-key -t code-sign-key) - else - # Disable the PKCS11 helper. - rm /etc/pkcs11/modules/aws-kms-pkcs11.module - - # Generate the PKCS12 archives for import. - openssl pkcs12 \ - -export \ - -passout pass: \ - -inkey "${SBKEYS}/shim-sign.key" \ - -in "${SBKEYS}/shim-sign.crt" \ - -certfile "${SBKEYS}/db.crt" \ - -out "${SBKEYS}/shim-sign.p12" - - openssl pkcs12 \ - -export \ - -passout pass: \ - -inkey "${SBKEYS}/code-sign.key" \ - -in "${SBKEYS}/code-sign.crt" \ - -certfile "${SBKEYS}/vendor.crt" \ - -out "${SBKEYS}/code-sign.p12" - - # Import certificates and private key archive. - PEDB="/etc/pki/pesign" - - certutil -d "${PEDB}" -A -n db -i "${SBKEYS}/db.crt" -t ",,C" - certutil -d "${PEDB}" -A -n shim-sign-key -i "${SBKEYS}/shim-sign.crt" -t ",,P" - pk12util -d "${PEDB}" -i "${SBKEYS}/shim-sign.p12" -W "" - - certutil -d "${PEDB}" -A -n vendor -i "${SBKEYS}/vendor.crt" -t ",,C" - certutil -d "${PEDB}" -A -n code-sign-key -i "${SBKEYS}/code-sign.crt" -t ",,P" - pk12util -d "${PEDB}" -i "${SBKEYS}/code-sign.p12" -W "" - - certutil -d "${PEDB}" -L - SB_KEY_SOURCE="local" - SHIM_SIGN_KEY=(-c shim-sign-key) - CODE_SIGN_KEY=(-c code-sign-key) - fi - - # Convert certificates from PEM format (ASCII) to DER (binary). This could be - # done when the certificates are created, but the resulting binary files are - # not as nice to store in source control. - for cert in PK KEK db vendor ; do - openssl x509 \ - -inform PEM -in "${SBKEYS}/${cert}.crt" \ - -outform DER -out "${SBKEYS}/${cert}.cer" - done - - # For signing the grub config, we need to embed the GPG public key in binary - # form, which is similarly awkward to store in source control. - gpg --import "${SBKEYS}/config-sign.key" - if [ "${SB_KEY_SOURCE}" == "aws" ] ; then - gpg --card-status - fi - gpg --export > "${SBKEYS}/config-sign.pubkey" - gpg --list-keys -fi - -# shim expects the following data structure in `.vendor_cert`: -# -# struct { -# uint32_t vendor_authorized_size; -# uint32_t vendor_deauthorized_size; -# uint32_t vendor_authorized_offset; -# uint32_t vendor_deauthorized_offset; -# } cert_table; -# -cert_table() { - local input output size offset uint32_t - input="${1:?}" - output="${2:?}" - size="$(stat -c %s "${input}")" - rm -f "${output}" - # The cert payload is offset by four 4-byte uint32_t values in the header. - offset="$((4 * 4))" - for n in "${size}" 0 "${offset}" "$(( size + offset ))" ; do - printf -v uint32_t '\\x%02x\\x%02x\\x%02x\\x%02x' \ - $((n & 255)) $((n >> 8 & 255)) $((n >> 16 & 255)) $((n >> 24 & 255)) - printf "${uint32_t}" >> "${output}" - done - cat "${input}" >> "${output}" - # Zero-pad the output to the expected section size. Otherwise a subsequent - # `objcopy` operation on the same section might fail to replace it, if the - # new vendor certificate is larger than this one. - truncate -s 4096 "${output}" -} - -# Helper function to log the object layout before and after changes. -objdumpcopy() { - local obj objdump objcopy - obj="${1:?}" - shift - objdump="${ARCH}-bottlerocket-linux-gnu-objdump" - objcopy="${ARCH}-bottlerocket-linux-gnu-objcopy" - "${objdump}" -h "${obj}" - "${objcopy}" "${@}" "${obj}" - "${objdump}" -h "${obj}" -} - -pushd "${EFI_MOUNT}/EFI/BOOT" >/dev/null -shims=(boot*.efi) -shim="${shims[0]}" -grubs=(grub*.efi) -grub="${grubs[0]}" -mokms=(mm*.efi) -mokm="${mokms[0]}" -if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then - # Convert the vendor certificate to the expected format. - cert_table "${SBKEYS}/vendor.cer" "${SBKEYS}/vendor.obj" - - # Replace the embedded vendor certificate, then sign shim with the db key. - objdumpcopy "${shim}" \ - --update-section ".vendor_cert=${SBKEYS}/vendor.obj" - pesign -i "${shim}" -o "${shim}.signed" -s "${SHIM_SIGN_KEY[@]}" - mv "${shim}.signed" "${shim}" - pesigcheck -i "${shim}" -n 0 -c "${SBKEYS}/db.cer" - - # Sign the MOK manager as well. - pesign -i "${mokm}" -o "${mokm}.signed" -s "${CODE_SIGN_KEY[@]}" - mv "${mokm}.signed" "${mokm}" - pesigcheck -i "${mokm}" -n 0 -c "${SBKEYS}/vendor.cer" - - # Replace the embedded gpg public key, then sign grub with the vendor key. - objdumpcopy "${grub}" \ - --file-alignment 4096 \ - --update-section ".pubkey=${SBKEYS}/config-sign.pubkey" - pesign -i "${grub}" -o "${grub}.signed" -s "${CODE_SIGN_KEY[@]}" - mv "${grub}.signed" "${grub}" - pesigcheck -i "${grub}" -n 0 -c "${SBKEYS}/vendor.cer" -else - # Generate a zero-sized certificate in the expected format. - cert_table /dev/null "${SBKEYS}/vendor.obj" - - # Replace the embedded vendor certificate with the zero-sized one, which shim - # will ignore when Secure Boot is disabled. - objdumpcopy "${shim}" \ - --update-section ".vendor_cert=${SBKEYS}/vendor.obj" - - # Remove the embedded gpg public key to disable GRUB's signature checks. - objdumpcopy "${grub}" \ - --file-alignment 4096 \ - --remove-section ".pubkey" -fi -popd >/dev/null - -dd if=/dev/zero of="${EFI_IMAGE}" bs=1M count="${partsize[EFI-A]}" -mkfs.vfat -I -S 512 "${EFI_IMAGE}" $((partsize[EFI-A] * 1024)) -mmd -i "${EFI_IMAGE}" ::/EFI -mmd -i "${EFI_IMAGE}" ::/EFI/BOOT -mcopy -i "${EFI_IMAGE}" "${EFI_MOUNT}/EFI/BOOT"/*.efi ::/EFI/BOOT -if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then - # Make the signing certificate available on the EFI system partition so it - # can be imported through the firmware setup UI on bare metal systems. - mcopy -i "${EFI_IMAGE}" "${SBKEYS}"/db.{crt,cer} ::/EFI/BOOT -fi -dd if="${EFI_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[EFI-A]}" - -# Ensure that the grub directory exists. -mkdir -p "${ROOT_MOUNT}/boot/grub" - -# Now that we're done messing with /, move /boot out of it -mv "${ROOT_MOUNT}/boot"/* "${BOOT_MOUNT}" - -if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then - pushd "${BOOT_MOUNT}" >/dev/null - vmlinuz="vmlinuz" - pesign -i "${vmlinuz}" -o "${vmlinuz}.signed" -s "${CODE_SIGN_KEY[@]}" - mv "${vmlinuz}.signed" "${vmlinuz}" - pesigcheck -i "${vmlinuz}" -n 0 -c "${SBKEYS}/vendor.cer" - popd >/dev/null -fi - -# Set the Bottlerocket variant, version, and build-id -SYS_ROOT="${ARCH}-bottlerocket-linux-gnu/sys-root" -VERSION="${VERSION_ID} (${VARIANT})" -cat <> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" -VERSION="${VERSION}" -PRETTY_NAME="${PRETTY_NAME} ${VERSION}" -VARIANT_ID=${VARIANT} -VERSION_ID=${VERSION_ID} -BUILD_ID=${BUILD_ID} -HOME_URL="https://github.com/bottlerocket-os/bottlerocket" -SUPPORT_URL="https://github.com/bottlerocket-os/bottlerocket/discussions" -BUG_REPORT_URL="https://github.com/bottlerocket-os/bottlerocket/issues" -DOCUMENTATION_URL="https://bottlerocket.dev" -EOF - -# Set the BOTTLEROCKET-DATA Filesystem for creating/mounting -if [ "${XFS_DATA_PARTITION}" == "yes" ] ; then - printf "%s\n" "DATA_PARTITION_FILESYSTEM=xfs" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/share/bottlerocket/image-features.env" -else - printf "%s\n" "DATA_PARTITION_FILESYSTEM=ext4" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/share/bottlerocket/image-features.env" -fi - -# BOTTLEROCKET-ROOT-A -mkdir -p "${ROOT_MOUNT}/lost+found" -ROOT_LABELS=$(setfiles -n -d -F -m -r "${ROOT_MOUNT}" \ - "${SELINUX_FILE_CONTEXTS}" "${ROOT_MOUNT}" \ - | awk -v root="${ROOT_MOUNT}" '{gsub(root"/","/"); gsub(root,"/"); print "ea_set", $1, "security.selinux", $4}') -mkfs.ext4 -E "lazy_itable_init=0,stride=${ROOT_STRIDE},stripe_width=${ROOT_STRIPE_WIDTH}" \ - -O ^has_journal -b "${VERITY_DATA_BLOCK_SIZE}" -d "${ROOT_MOUNT}" "${ROOT_IMAGE}" "${partsize[ROOT-A]}M" -echo "${ROOT_LABELS}" | debugfs -w -f - "${ROOT_IMAGE}" -resize2fs -M "${ROOT_IMAGE}" -dd if="${ROOT_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[ROOT-A]}" - -# BOTTLEROCKET-VERITY-A -veritypart_mib="${partsize[HASH-A]}" -truncate -s "${veritypart_mib}M" "${VERITY_IMAGE}" -veritysetup_output="$(veritysetup format \ - --format "$VERITY_VERSION" \ - --hash "$VERITY_HASH_ALGORITHM" \ - --data-block-size "$VERITY_DATA_BLOCK_SIZE" \ - --hash-block-size "$VERITY_HASH_BLOCK_SIZE" \ - "${ROOT_IMAGE}" "${VERITY_IMAGE}" \ - | tee /dev/stderr)" -verityimage_size="$(stat -c %s "${VERITY_IMAGE}")" -veritypart_bytes="$((veritypart_mib * 1024 * 1024))" -if [ "${verityimage_size}" -gt "${veritypart_bytes}" ] ; then - echo "verity content is larger than partition (${veritypart_mib}M)" - exit 1 -fi -VERITY_DATA_4K_BLOCKS="$(grep '^Data blocks:' <<<"${veritysetup_output}" | awk '{ print $NF }')" -VERITY_DATA_512B_BLOCKS="$((VERITY_DATA_4K_BLOCKS * 8))" -VERITY_ROOT_HASH="$(grep '^Root hash:' <<<"${veritysetup_output}" | awk '{ print $NF }')" -VERITY_SALT="$(grep '^Salt:' <<<"${veritysetup_output}" | awk '{ print $NF }')" -veritysetup verify "${ROOT_IMAGE}" "${VERITY_IMAGE}" "${VERITY_ROOT_HASH}" -dd if="${VERITY_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[HASH-A]}" - -declare -a DM_VERITY_ROOT -DM_VERITY_ROOT=( - "root,,,ro,0" - "${VERITY_DATA_512B_BLOCKS}" - "verity" - "${VERITY_VERSION}" - "PARTUUID=\$boot_uuid/PARTNROFF=1" - "PARTUUID=\$boot_uuid/PARTNROFF=2" - "${VERITY_DATA_BLOCK_SIZE}" - "${VERITY_HASH_BLOCK_SIZE}" - "${VERITY_DATA_4K_BLOCKS}" - "1" - "${VERITY_HASH_ALGORITHM}" - "${VERITY_ROOT_HASH}" - "${VERITY_SALT}" - "2" - "restart_on_corruption" - "ignore_zero_blocks" -) - -# write GRUB config -# If GRUB_SET_PRIVATE_VAR is set, include the parameters that support Boot Config -if [ "${GRUB_SET_PRIVATE_VAR}" == "yes" ] ; then - BOOTCONFIG='bootconfig' - INITRD="initrd (\$private)/bootconfig.data" -else - BOOTCONFIG="" - INITRD="" -fi - -# If UEFI_SECURE_BOOT is set, disable interactive edits. Otherwise the intended -# kernel command line parameters could be changed if the boot fails. Disable -# signature checking as well, since grub.cfg will have already been verified -# before we reach this point. bootconfig.data is generated at runtime and can't -# be signed with a trusted key, so continuing to check signatures would prevent -# it from being read. If boot fails, trigger an automatic reboot, since nothing -# can be changed for troubleshooting purposes. -if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then - echo 'set superusers=""' > "${BOOT_MOUNT}/grub/grub.cfg" - echo 'set check_signatures="no"' >> "${BOOT_MOUNT}/grub/grub.cfg" - FALLBACK=$' echo "rebooting in 30 seconds..."\n' - FALLBACK+=$' sleep 30\n' - FALLBACK+=$' reboot\n' -else - FALLBACK="" -fi - -cat <> "${BOOT_MOUNT}/grub/grub.cfg" -set default="0" -set timeout="0" -set dm_verity_root="${DM_VERITY_ROOT[@]}" - -menuentry "${PRETTY_NAME} ${VERSION_ID}" --unrestricted { - linux (\$root)/vmlinuz \\ - ${KERNEL_PARAMETERS} \\ - ${BOOTCONFIG} \\ - root=/dev/dm-0 rootwait ro \\ - raid=noautodetect \\ - random.trust_cpu=on \\ - selinux=1 enforcing=1 \\ - dm-mod.create="\$dm_verity_root" \\ - -- \\ - systemd.log_target=journal-or-kmsg \\ - systemd.log_color=0 \\ - systemd.show_status=true - ${INITRD} - boot - ${FALLBACK} -} -EOF - -if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then - gpg --detach-sign "${BOOT_MOUNT}/grub/grub.cfg" - gpg --verify "${BOOT_MOUNT}/grub/grub.cfg.sig" -fi - -# BOTTLEROCKET-BOOT-A -mkdir -p "${BOOT_MOUNT}/lost+found" -chmod -R go-rwx "${BOOT_MOUNT}" -BOOT_LABELS=$(setfiles -n -d -F -m -r "${BOOT_MOUNT}" \ - "${SELINUX_FILE_CONTEXTS}" "${BOOT_MOUNT}" \ - | awk -v root="${BOOT_MOUNT}" '{gsub(root"/","/"); gsub(root,"/"); print "ea_set", $1, "security.selinux", $4}') -mkfs.ext4 -O ^has_journal -d "${BOOT_MOUNT}" "${BOOT_IMAGE}" "${partsize[BOOT-A]}M" -echo "${BOOT_LABELS}" | debugfs -w -f - "${BOOT_IMAGE}" -resize2fs -M "${BOOT_IMAGE}" -dd if="${BOOT_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[BOOT-A]}" - -# BOTTLEROCKET-PRIVATE - -# Generate an empty bootconfig file for the image, so grub doesn't pause and -# print an error that the file doesn't exist. -cat < "${PRIVATE_MOUNT}/bootconfig.in" -kernel {} -init {} -EOF -touch "${PRIVATE_MOUNT}/bootconfig.data" -bootconfig -a "${PRIVATE_MOUNT}/bootconfig.in" "${PRIVATE_MOUNT}/bootconfig.data" -rm "${PRIVATE_MOUNT}/bootconfig.in" - -# Targeted toward the current API server implementation. -# Relative to the ext4 defaults, we: -# - adjust the inode ratio since we expect lots of small files -# - retain the inode size to allow most settings to be stored inline -# - retain the block size to handle worse-case alignment for hardware -mkfs.ext4 -b 4096 -i 4096 -I 256 -d "${PRIVATE_MOUNT}" "${PRIVATE_IMAGE}" "${partsize[PRIVATE]}M" -dd if="${PRIVATE_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[PRIVATE]}" - -# BOTTLEROCKET-DATA-A and BOTTLEROCKET-DATA-B - -# If we build on a host with SELinux enabled, we could end up with labels that -# do not match our policy. Since we allow replacing the data volume at runtime, -# we can't count on these labels being correct in any case, and it's better to -# remove them all. -UNLABELED=$(find "${DATA_MOUNT}" \ - | awk -v root="${DATA_MOUNT}" '{gsub(root"/","/"); gsub(root,"/"); print "ea_rm", $1, "security.selinux"}') - -mkfs_data() { - local target size offset - target="${1:?}" - size="${2:?}" - offset="${3:?}" - # Create an XFS filesystem if requested - if [ "${XFS_DATA_PARTITION}" == "yes" ] ; then - echo "writing blank partition for DATA" - # Create a file to write the filesystem to first - dd if=/dev/zero of="${BOTTLEROCKET_DATA}" bs=1M count=${size%?} - else - # default to ext4 - echo "writing ext4 filesystem for DATA" - mkfs.ext4 -m 0 -d "${DATA_MOUNT}" "${BOTTLEROCKET_DATA}" "${size}" - echo "${UNLABELED}" | debugfs -w -f - "${BOTTLEROCKET_DATA}" - fi - dd if="${BOTTLEROCKET_DATA}" of="${target}" conv=notrunc bs=1M seek="${offset}" -} - -# Decide which data filesystem to create at build time based on layout. -# -# The DATA-A partition will always exist, but for the "split" layout, it will be -# too small to provide the desired filesystem parameters (inode count, etc) when -# it is grown later on. Hence this filesystem is only created for "unified". -# -# The DATA-B partition does not exist on the "unified" layout, which anticipates -# a single storage device. Hence this filesystem is only created for "split". -# -# If the other partition is available at runtime, the filesystem will be created -# during first boot instead, providing flexibility at the cost of a minor delay. -case "${PARTITION_PLAN}" in - unified) - mkfs_data "${OS_IMAGE}" "${partsize["DATA-A"]}M" "${partoff["DATA-A"]}" - ;; - split) - mkfs_data "${DATA_IMAGE}" "${partsize["DATA-B"]}M" "${partoff["DATA-B"]}" - ;; -esac - -sgdisk -v "${OS_IMAGE}" -[ -s "${DATA_IMAGE}" ] && sgdisk -v "${DATA_IMAGE}" - -symlink_image() { - local ext what - ext="${1}" - what="${2}" - ext="${ext:+.$ext}" - target="${what^^}_NAME" - for link in symlink versioned_symlink friendly_versioned_symlink ; do - link="${what^^}_${link^^}" - ln -s "${!target}${ext}" "${OUTPUT_DIR}/${!link}${ext}" - done -} - -if [[ ${OUTPUT_FMT} == "raw" ]]; then - lz4 -vc "${OS_IMAGE}" >"${OUTPUT_DIR}/${OS_IMAGE_NAME}.img.lz4" - symlink_image "img.lz4" "os_image" - if [ -s "${DATA_IMAGE}" ] ; then - lz4 -vc "${DATA_IMAGE}" >"${OUTPUT_DIR}/${DATA_IMAGE_NAME}.img.lz4" - symlink_image "img.lz4" "data_image" - fi -elif [[ ${OUTPUT_FMT} == "qcow2" ]]; then - qemu-img convert -f raw -O qcow2 "${OS_IMAGE}" "${OUTPUT_DIR}/${OS_IMAGE_NAME}.qcow2" - symlink_image "qcow2" "os_image" - if [ -s "${DATA_IMAGE}" ] ; then - qemu-img convert -f raw -O qcow2 "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_NAME}.qcow2" - symlink_image "qcow2" "data_image" - fi -elif [[ ${OUTPUT_FMT} == "vmdk" ]]; then - # Stream optimization is required for creating an Open Virtual Appliance (OVA) - qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${OS_IMAGE}" "${OUTPUT_DIR}/${OS_IMAGE_NAME}.vmdk" - symlink_image "vmdk" "os_image" - if [ -s "${DATA_IMAGE}" ] ; then - qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_NAME}.vmdk" - symlink_image "vmdk" "data_image" - fi -fi - -# Now create the OVA if needed. -if [ "${OUTPUT_FMT}" == "vmdk" ] ; then - os_vmdk="${OS_IMAGE_NAME}.vmdk" - data_vmdk="${DATA_IMAGE_NAME}.vmdk" - ovf="${OS_IMAGE_NAME}.ovf" - ova_dir="$(mktemp -d)" - - # The manifest expects disk sizes in bytes. - bytes_in_gib="$((1024 * 1024 * 1024))" - os_disk_bytes="$((OS_IMAGE_PUBLISH_SIZE_GIB * bytes_in_gib))" - data_disk_bytes="$((DATA_IMAGE_PUBLISH_SIZE_GIB * bytes_in_gib))" - sed "${OVF_TEMPLATE}" \ - -e "s/{{OS_DISK}}/${os_vmdk}/g" \ - -e "s/{{DATA_DISK}}/${data_vmdk}/g" \ - -e "s/{{OS_DISK_BYTES}}/${os_disk_bytes}/g" \ - -e "s/{{DATA_DISK_BYTES}}/${data_disk_bytes}/g" \ - > "${ova_dir}/${ovf}" - - # The manifest templates for Secure Boot expect the cert data for - # PK, KEK, db, and dbx. - if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then - pk_cert_der_hex="$(hexdump -ve '1/1 "%02x"' "${SBKEYS}/PK.cer")" - kek_cert_der_hex="$(hexdump -ve '1/1 "%02x"' "${SBKEYS}/KEK.cer")" - db_cert_der_hex="$(hexdump -ve '1/1 "%02x"' "${SBKEYS}/db.cer")" - dbx_empty_hash_hex="$(sha256sum /dev/null | awk '{ print $1 }')" - sed -i \ - -e "s/{{PK_CERT_DER_HEX}}/${pk_cert_der_hex}/g" \ - -e "s/{{KEK_CERT_DER_HEX}}/${kek_cert_der_hex}/g" \ - -e "s/{{DB_CERT_DER_HEX}}/${db_cert_der_hex}/g" \ - -e "s/{{DBX_EMPTY_HASH_HEX}}/${dbx_empty_hash_hex}/g" \ - "${ova_dir}/${ovf}" - fi - - # Make sure we replaced all the '{{...}}' fields with real values. - if grep -F -e '{{' -e '}}' "${ova_dir}/${ovf}" ; then - echo "Failed to fully render the OVF template" >&2 - exit 1 - fi - - # Create the manifest file with the hashes of the VMDKs and the OVF. - manifest="${OS_IMAGE_NAME}.mf" - pushd "${OUTPUT_DIR}" >/dev/null - os_sha256="$(sha256sum ${os_vmdk} | awk '{print $1}')" - echo "SHA256(${os_vmdk})= ${os_sha256}" > "${ova_dir}/${manifest}" - if [ -s "${DATA_IMAGE}" ] ; then - data_sha256="$(sha256sum ${data_vmdk} | awk '{print $1}')" - echo "SHA256(${data_vmdk})= ${data_sha256}" >> "${ova_dir}/${manifest}" - fi - popd >/dev/null - pushd "${ova_dir}" >/dev/null - ovf_sha256="$(sha256sum ${ovf} | awk '{print $1}')" - echo "SHA256(${ovf})= ${ovf_sha256}" >> "${manifest}" - popd >/dev/null - - # According to the OVF spec: - # https://www.dmtf.org/sites/default/files/standards/documents/DSP0243_2.1.1.pdf, - # the OVF must be first in the tar bundle. Manifest is next, and then the - # files must fall in the same order as listed in the References section of the - # OVF file - ova="${OS_IMAGE_NAME}.ova" - tar -cf "${OUTPUT_DIR}/${ova}" -C "${ova_dir}" "${ovf}" "${manifest}" - tar -rf "${OUTPUT_DIR}/${ova}" -C "${OUTPUT_DIR}" "${os_vmdk}" - if [ -s "${DATA_IMAGE}" ] ; then - tar -rf "${OUTPUT_DIR}/${ova}" -C "${OUTPUT_DIR}" "${data_vmdk}" - fi - - symlink_image "ova" "os_image" -fi - -lz4 -9vc "${BOOT_IMAGE}" >"${OUTPUT_DIR}/${BOOT_IMAGE_NAME}" -lz4 -9vc "${VERITY_IMAGE}" >"${OUTPUT_DIR}/${VERITY_IMAGE_NAME}" -lz4 -9vc "${ROOT_IMAGE}" >"${OUTPUT_DIR}/${ROOT_IMAGE_NAME}" - -symlink_image "" "boot_image" -symlink_image "" "verity_image" -symlink_image "" "root_image" - -find "${OUTPUT_DIR}" -type f -print -exec chown 1000:1000 {} \; - -# Clean up temporary files to reduce size of layer. -rm -f "${PACKAGE_DIR}"/*.rpm -rm -rf /tmp/* diff --git a/tools/rpm2kmodkit b/tools/rpm2kmodkit deleted file mode 100755 index 079d98cee7c..00000000000 --- a/tools/rpm2kmodkit +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env bash -# -# Create an archive of kernel development sources and toolchain. -set -eu -o pipefail - -for opt in "$@"; do - optarg="$(expr "${opt}" : '[^=]*=\(.*\)')" - case "${opt}" in - --archive-dir=*) ARCHIVE_DIR="${optarg}" ;; - --toolchain-dir=*) TOOLCHAIN_DIR="${optarg}" ;; - --output-dir=*) OUTPUT_DIR="${optarg}" ;; - esac -done - -# Store output artifacts in a versioned directory. -OUTPUT_DIR="${OUTPUT_DIR}/${VERSION_ID}-${BUILD_ID}" - -# Use a friendly name for the top-level directory inside the archive. -KMOD_KIT="${VARIANT}-${ARCH}-kmod-kit-v${VERSION_ID}" - -# Use the build ID within the filename, to align with our build's expectations. -KMOD_KIT_FULL="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}-kmod-kit" -KMOD_KIT_VERSIONED_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-kmod-kit" -KMOD_KIT_FRIENDLY_VERSIONED_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-v${VERSION_ID}-kmod-kit" -KMOD_KIT_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-kmod-kit" - -EXTRACT_DIR="$(mktemp -d)" -KIT_DIR="$(mktemp -d)" -mkdir -p "${OUTPUT_DIR}" "${KIT_DIR}/${KMOD_KIT}" - -# Extract any RPMs and find the kernel development archive. -pushd "${EXTRACT_DIR}" >/dev/null -find "${ARCHIVE_DIR}" -type f -name '*.rpm' \ - -exec rpm2cpio {} \; | cpio -idm --quiet -find -name 'kernel-devel.tar.xz' \ - -exec mv {} "${KIT_DIR}/${KMOD_KIT}" \; -popd >/dev/null - -# Extract it and copy in the toolchain. -pushd "${KIT_DIR}/${KMOD_KIT}" >/dev/null -tar xf kernel-devel.tar.xz -rm kernel-devel.tar.xz -cp -a "${TOOLCHAIN_DIR}" toolchain -popd >/dev/null - -# Merge them together into a unified archive. -pushd "${KIT_DIR}" >/dev/null -tar cf "${OUTPUT_DIR}/${KMOD_KIT_FULL}.tar" "${KMOD_KIT}" -xz -T0 "${OUTPUT_DIR}/${KMOD_KIT_FULL}.tar" -popd >/dev/null - -# Create friendly symlinks. -ln -s "${KMOD_KIT_FULL}.tar.xz" "${OUTPUT_DIR}/${KMOD_KIT_FRIENDLY_VERSIONED_SYMLINK}.tar.xz" -ln -s "${KMOD_KIT_FULL}.tar.xz" "${OUTPUT_DIR}/${KMOD_KIT_VERSIONED_SYMLINK}.tar.xz" -ln -s "${KMOD_KIT_FULL}.tar.xz" "${OUTPUT_DIR}/${KMOD_KIT_SYMLINK}.tar.xz" -ln -s "${KMOD_KIT_FULL}.tar.xz" "${OUTPUT_DIR}/${KMOD_KIT}.tar.xz" - -rm -rf "${EXTRACT_DIR}" "${KIT_DIR}" diff --git a/tools/rpm2migrations b/tools/rpm2migrations deleted file mode 100755 index 87d3d87ec0d..00000000000 --- a/tools/rpm2migrations +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env bash -# -# Retrieve migrations from the RPM and output an appropriately named tarball -set -eu -o pipefail - -for opt in "$@"; do - optarg="$(expr "${opt}" : '[^=]*=\(.*\)')" - case "${opt}" in - --package-dir=*) PACKAGE_DIR="${optarg}" ;; - --output-dir=*) OUTPUT_DIR="${optarg}" ;; - esac -done - -# Store output artifacts in a versioned directory. -OUTPUT_DIR="${OUTPUT_DIR}/${VERSION_ID}-${BUILD_ID}" -mkdir -p "${OUTPUT_DIR}" - -MIGRATIONS_ARCHIVE="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}-migrations.tar" -MIGRATIONS_VERSIONED_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-migrations.tar" -MIGRATIONS_FRIENDLY_VERSIONED_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-v${VERSION_ID}-migrations.tar" -MIGRATIONS_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-migrations.tar" -ROOT_TEMP="$(mktemp -d)" -SYS_ROOT="${ARCH}-bottlerocket-linux-gnu/sys-root" -MIGRATIONS_DIR="${ROOT_TEMP}/${SYS_ROOT}/usr/share/migrations" - -# "Install" the migrations (just puts them in $MIGRATIONS_DIR) -rpm -iv --ignorearch --root "${ROOT_TEMP}" "${PACKAGE_DIR}"/*.rpm - -if [ ! -d "${MIGRATIONS_DIR}" ]; then - echo "Migrations directory does not exist: ${MIGRATIONS_DIR}" - rm -rf "${ROOT_TEMP}" - exit 1 -fi - -# lz4 compress each migration -for migration in "${MIGRATIONS_DIR}"/*; do - [ -e "${migration}" ] || continue - lz4 -v "${migration}" "${migration}.lz4" -done - -# Tar up migrations with a .lz4 extension if they exist. -# Otherwise create an empty archive -pushd "${MIGRATIONS_DIR}" -if ls *.lz4 &> /dev/null; then - tar -cvf "${OUTPUT_DIR}/${MIGRATIONS_ARCHIVE}" *.lz4 -else - tar -cvf "${OUTPUT_DIR}/${MIGRATIONS_ARCHIVE}" --files-from /dev/null -fi -popd - -# Create friendly symlinks. -ln -s "${MIGRATIONS_ARCHIVE}" "${OUTPUT_DIR}/${MIGRATIONS_FRIENDLY_VERSIONED_SYMLINK}" -ln -s "${MIGRATIONS_ARCHIVE}" "${OUTPUT_DIR}/${MIGRATIONS_VERSIONED_SYMLINK}" -ln -s "${MIGRATIONS_ARCHIVE}" "${OUTPUT_DIR}/${MIGRATIONS_SYMLINK}" - -rm -rf "${ROOT_TEMP}" diff --git a/tools/testsys-config/Cargo.toml b/tools/testsys-config/Cargo.toml deleted file mode 100644 index 064097ff321..00000000000 --- a/tools/testsys-config/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "testsys-config" -version = "0.1.0" -authors = ["Ethan Pullen "] -license = "Apache-2.0 OR MIT" -edition = "2021" -publish = false - -[dependencies] -bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.9", tag = "v0.0.9" } -bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } -handlebars = "4" -log = "0.4" -maplit = "1" -testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.9", tag = "v0.0.9" } -serde = { version = "1", features = ["derive"] } -serde_plain = "1" -serde_yaml = "0.9" -snafu = "0.7" -toml = "0.5" diff --git a/tools/testsys-config/src/lib.rs b/tools/testsys-config/src/lib.rs deleted file mode 100644 index b43c06a66ef..00000000000 --- a/tools/testsys-config/src/lib.rs +++ /dev/null @@ -1,554 +0,0 @@ -use bottlerocket_types::agent_config::KarpenterDeviceMapping; -use bottlerocket_variant::Variant; -pub use error::Error; -use handlebars::Handlebars; -use log::{debug, trace, warn}; -use maplit::btreemap; -use serde::{Deserialize, Serialize}; -use snafu::ResultExt; -use std::collections::{BTreeMap, HashMap}; -use std::fs; -use std::path::Path; -use testsys_model::constants::TESTSYS_VERSION; -use testsys_model::{DestructionPolicy, SecretName}; -pub type Result = std::result::Result; -use serde_plain::derive_fromstr_from_deserialize; - -/// Configuration needed to run tests -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq)] -#[serde(rename_all = "kebab-case")] -pub struct TestConfig { - /// High level configuration for TestSys - pub test: Option, - - #[serde(flatten, serialize_with = "toml::ser::tables_last")] - /// Configuration for testing variants - pub configs: HashMap, -} - -impl TestConfig { - /// Deserializes a TestConfig from a given path - pub fn from_path

(path: P) -> Result - where - P: AsRef, - { - let path = path.as_ref(); - let test_config_str = fs::read_to_string(path).context(error::FileSnafu { path })?; - let mut config: Self = - toml::from_str(&test_config_str).context(error::InvalidTomlSnafu { path })?; - // Copy the GenericConfig from `test` to `configs`. - config.test.as_ref().and_then(|test| { - config - .configs - .insert("test".to_string(), test.config.clone()) - }); - - Ok(config) - } - - /// Deserializes a TestConfig from a given path, if it exists, otherwise builds a default - /// config - pub fn from_path_or_default

(path: P) -> Result - where - P: AsRef, - { - if path.as_ref().exists() { - Self::from_path(path) - } else { - warn!( - "No test config was found at '{}'. Using the default config.", - path.as_ref().display() - ); - Ok(Self::default()) - } - } - - /// Create a single config for the `variant` and `arch` from this test configuration by - /// determining a list of tables that contain information relevant to the arch, variant - /// combination. Then, the tables are reduced to a single config by selecting values from the - /// table based on the order of precedence. If `starting_config` is provided it will be used as - /// the config with the highest precedence. - pub fn reduced_config( - &self, - variant: &Variant, - arch: S, - starting_config: Option, - test_type: &str, - ) -> (GenericVariantConfig, String) - where - S: Into, - { - let arch = arch.into(); - // Starting with a list of keys ordered by precedence, return a single config with values - // selected by the order of the list. - let (test_type, configs) = config_keys(variant) - // Convert the vec of keys in to an iterator of keys. - .into_iter() - // Convert the iterator of keys to and iterator of Configs. If the key does not have a - // configuration in the config file, remove it from the iterator. - .filter_map(|key| self.configs.get(&key).cloned()) - // Reverse the iterator - .rev() - .fold( - (test_type.to_string(), Vec::new()), - |(test_type, mut configs), config| { - let (ordered_configs, test_type) = config.test_configs(test_type); - configs.push(ordered_configs); - (test_type, configs) - }, - ); - debug!("Resolved test-type '{}'", test_type); - ( - configs - .into_iter() - .rev() - .flatten() - // Take the iterator of configurations and extract the arch specific config and the - // non-arch specific config for each config. Then, convert them into a single iterator. - .flat_map(|config| vec![config.for_arch(&arch), config.config]) - // Take the iterator of configurations and merge them into a single config by populating - // each field with the first value that is not `None` while following the list of - // precedence. - .fold( - starting_config.unwrap_or_default(), - GenericVariantConfig::merge, - ), - test_type, - ) - } -} - -/// High level configurations for a test -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] -#[serde(deny_unknown_fields, rename_all = "kebab-case")] -pub struct Test { - /// The name of the repo in `Infra.toml` that should be used for testing - pub repo: Option, - - /// The name of the vSphere data center in `Infra.toml` that should be used for testing - /// If no data center is provided, the first one in `vmware.datacenters` will be used - pub datacenter: Option, - - #[serde(flatten)] - /// The URI of TestSys images - pub testsys_images: TestsysImages, - - /// A registry containing all TestSys images - pub testsys_image_registry: Option, - - /// The tag that should be used for TestSys images - pub testsys_image_tag: Option, - - #[serde(flatten)] - /// Configuration values for all Bottlerocket variants - pub config: GenericConfig, -} - -/// Create a vec of relevant keys for this variant ordered from most specific to least specific. -fn config_keys(variant: &Variant) -> Vec { - let (family_flavor, platform_flavor) = variant - .variant_flavor() - .map(|flavor| { - ( - format!("{}-{}", variant.family(), flavor), - format!("{}-{}", variant.platform(), flavor), - ) - }) - .unwrap_or_default(); - - // The keys used to describe configuration (most specific -> least specific) - vec![ - variant.to_string(), - family_flavor, - variant.family().to_string(), - platform_flavor, - variant.platform().to_string(), - "test".to_string(), - ] -} - -/// All configurations for a specific config level, i.e `-` -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] -#[serde(deny_unknown_fields)] -pub struct GenericConfig { - #[serde(default)] - aarch64: GenericVariantConfig, - #[serde(default)] - x86_64: GenericVariantConfig, - #[serde(default, flatten)] - config: GenericVariantConfig, - #[serde(default)] - configuration: HashMap, - #[serde(rename = "test-type")] - test_type: Option, -} - -impl GenericConfig { - /// Get the configuration for a specific arch. - pub fn for_arch(&self, arch: S) -> GenericVariantConfig - where - S: Into, - { - match arch.into().as_str() { - "x86_64" => self.x86_64.clone(), - "aarch64" => self.aarch64.clone(), - _ => Default::default(), - } - } - - /// Get the configuration for a specific test type. - pub fn test(&self, test_type: S) -> GenericConfig - where - S: AsRef, - { - self.configuration - .get(test_type.as_ref()) - .cloned() - .unwrap_or_default() - } - - /// Get a set of `GenericConfig`s following test types (test_type -> generic config). - fn test_configs(&self, test_type: S) -> (Vec, String) - where - S: AsRef, - { - // A vec containing all relevant test configs for this `GenericConfig` starting with - // `test_type` and ending with the `GenericConfig` itself. - let mut configs = Vec::new(); - // Track the last test_type that we added to `configs` - let mut cur_test_type = test_type.as_ref().to_string(); - loop { - // Add the config for the current test type (if the config doesn't exist, an empty - // config is added) - let test_config = self.test(&cur_test_type); - configs.push(test_config.clone()); - // If the current test config specifies another test type, that test type needs to be - // added to the configurations. - if let Some(test_type) = test_config.test_type.to_owned() { - trace!("Test-type '{}' resolves to '{}'", cur_test_type, test_type); - cur_test_type = test_type; - } else { - break; - } - } - - // Add the `self` config - configs.push(self.clone()); - (configs, cur_test_type) - } -} - -/// The configuration for a specific config level (-). This may or may not be arch -/// specific depending on it's location in `GenericConfig`. -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] -#[serde(deny_unknown_fields)] -#[serde(rename_all = "kebab-case")] -pub struct GenericVariantConfig { - /// The names of all clusters this variant should be tested over. This is particularly useful - /// for testing Bottlerocket on ipv4 and ipv6 clusters. - #[serde(default)] - pub cluster_names: Vec, - /// The instance type that instances should be launched with - pub instance_type: Option, - /// Specify how Bottlerocket instances should be launched (ec2, karpenter) - pub resource_agent_type: Option, - /// Launch instances with the following Block Device Mapping - #[serde(default)] - pub block_device_mapping: Vec, - /// The secrets needed by the agents - #[serde(default)] - pub secrets: BTreeMap, - /// The role that should be assumed for this particular variant - pub agent_role: Option, - /// The location of the sonobuoy testing image - pub sonobuoy_image: Option, - /// The custom images used for conformance testing - pub conformance_image: Option, - /// The custom registry used for conformance testing - pub conformance_registry: Option, - /// The endpoint IP to reserve for the vSphere control plane VMs when creating a K8s cluster - pub control_plane_endpoint: Option, - /// The path to userdata that should be used for Bottlerocket launch - pub userdata: Option, - /// The directory containing Bottlerocket images. For metal, this is the directory containing - /// gzipped images. - pub os_image_dir: Option, - /// The hardware that should be used for provisioning Bottlerocket. For metal, this is the - /// hardware csv that is passed to EKS Anywhere. - pub hardware_csv: Option, - /// The workload tests that should be run - #[serde(default)] - pub workloads: BTreeMap, - #[serde(default)] - pub dev: DeveloperConfig, -} - -impl GenericVariantConfig { - /// Overwrite the unset values of `self` with the set values of `other` - fn merge(self, other: Self) -> Self { - let cluster_names = if self.cluster_names.is_empty() { - other.cluster_names - } else { - self.cluster_names - }; - - let secrets = if self.secrets.is_empty() { - other.secrets - } else { - self.secrets - }; - - let workloads = if self.workloads.is_empty() { - other.workloads - } else { - self.workloads - }; - - let block_device_mapping = if self.block_device_mapping.is_empty() { - other.block_device_mapping - } else { - self.block_device_mapping - }; - - Self { - cluster_names, - instance_type: self.instance_type.or(other.instance_type), - resource_agent_type: self.resource_agent_type.or(other.resource_agent_type), - block_device_mapping, - secrets, - agent_role: self.agent_role.or(other.agent_role), - sonobuoy_image: self.sonobuoy_image.or(other.sonobuoy_image), - conformance_image: self.conformance_image.or(other.conformance_image), - conformance_registry: self.conformance_registry.or(other.conformance_registry), - control_plane_endpoint: self.control_plane_endpoint.or(other.control_plane_endpoint), - userdata: self.userdata.or(other.userdata), - os_image_dir: self.os_image_dir.or(other.os_image_dir), - hardware_csv: self.hardware_csv.or(other.hardware_csv), - workloads, - dev: self.dev.merge(other.dev), - } - } -} - -#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Clone)] -#[serde(rename_all = "kebab-case")] -pub enum ResourceAgentType { - Karpenter, - Ec2, -} - -impl Default for ResourceAgentType { - fn default() -> Self { - Self::Ec2 - } -} - -derive_fromstr_from_deserialize!(ResourceAgentType); - -/// The configuration for a specific config level (-). This may or may not be arch -/// specific depending on it's location in `GenericConfig`. -/// The configurable fields here add refined control to TestSys objects. -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] -#[serde(deny_unknown_fields)] -#[serde(rename_all = "kebab-case")] -pub struct DeveloperConfig { - /// Control the destruction behavior of cluster CRDs - pub cluster_destruction_policy: Option, - /// Control the destruction behavior of Bottlerocket CRDs - pub bottlerocket_destruction_policy: Option, - /// Keep test pods running on completion - pub keep_tests_running: Option, - /// Use an alternate account for image lookup - pub image_account_id: Option, - /// Overrides the EKS service endpoint for TestSys agents gathering EKS cluster metadata - /// (only for pre-existing EKS clusters, does not apply to new EKS cluster creation) - pub eks_service_endpoint: Option, - /// A manifest containing the EKS Anywhere binary that should be used for cluster provisioning - pub eks_a_release_manifest_url: Option, -} - -impl DeveloperConfig { - /// Overwrite the unset values of `self` with the set values of `other` - fn merge(self, other: Self) -> Self { - Self { - cluster_destruction_policy: self - .cluster_destruction_policy - .or(other.cluster_destruction_policy), - bottlerocket_destruction_policy: self - .bottlerocket_destruction_policy - .or(other.bottlerocket_destruction_policy), - keep_tests_running: self.keep_tests_running.or(other.keep_tests_running), - image_account_id: self.image_account_id.or(other.image_account_id), - eks_service_endpoint: self.eks_service_endpoint.or(other.eks_service_endpoint), - eks_a_release_manifest_url: self - .eks_a_release_manifest_url - .or(other.eks_a_release_manifest_url), - } - } -} - -/// Fill in the templated cluster name with `arch` and `variant`. -pub fn rendered_cluster_name(cluster_name: String, arch: S1, variant: S2) -> Result -where - S1: Into, - S2: Into, -{ - let mut cluster_template = Handlebars::new(); - cluster_template.register_template_string("cluster_name", cluster_name)?; - Ok(cluster_template.render( - "cluster_name", - &btreemap! {"arch".to_string() => arch.into(), "variant".to_string() => variant.into()}, - )?) -} - -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] -#[serde(deny_unknown_fields)] -#[serde(rename_all = "kebab-case")] -pub struct TestsysImages { - pub eks_resource_agent_image: Option, - pub ecs_resource_agent_image: Option, - pub vsphere_k8s_cluster_resource_agent_image: Option, - pub metal_k8s_cluster_resource_agent_image: Option, - pub ec2_resource_agent_image: Option, - pub ec2_karpenter_resource_agent_image: Option, - pub vsphere_vm_resource_agent_image: Option, - pub sonobuoy_test_agent_image: Option, - pub ecs_test_agent_image: Option, - pub migration_test_agent_image: Option, - pub k8s_workload_agent_image: Option, - pub ecs_workload_agent_image: Option, - pub controller_image: Option, - pub testsys_agent_pull_secret: Option, -} - -impl TestsysImages { - /// Create an images config for a specific registry. - pub fn new(registry: S, tag: Option) -> Self - where - S: Into, - { - let registry = registry.into(); - let tag = tag.unwrap_or_else(|| format!("v{}", TESTSYS_VERSION)); - Self { - eks_resource_agent_image: Some(format!("{}/eks-resource-agent:{tag}", registry)), - ecs_resource_agent_image: Some(format!("{}/ecs-resource-agent:{tag}", registry)), - vsphere_k8s_cluster_resource_agent_image: Some(format!( - "{}/vsphere-k8s-cluster-resource-agent:{tag}", - registry - )), - metal_k8s_cluster_resource_agent_image: Some(format!( - "{}/metal-k8s-cluster-resource-agent:{tag}", - registry - )), - ec2_resource_agent_image: Some(format!("{}/ec2-resource-agent:{tag}", registry)), - ec2_karpenter_resource_agent_image: Some(format!( - "{}/ec2-karpenter-resource-agent:{tag}", - registry - )), - vsphere_vm_resource_agent_image: Some(format!( - "{}/vsphere-vm-resource-agent:{tag}", - registry - )), - sonobuoy_test_agent_image: Some(format!("{}/sonobuoy-test-agent:{tag}", registry)), - ecs_test_agent_image: Some(format!("{}/ecs-test-agent:{tag}", registry)), - migration_test_agent_image: Some(format!("{}/migration-test-agent:{tag}", registry)), - k8s_workload_agent_image: Some(format!("{}/k8s-workload-agent:{tag}", registry)), - ecs_workload_agent_image: Some(format!("{}/ecs-workload-agent:{tag}", registry)), - controller_image: Some(format!("{}/controller:{tag}", registry)), - testsys_agent_pull_secret: None, - } - } - - pub fn merge(self, other: Self) -> Self { - Self { - eks_resource_agent_image: self - .eks_resource_agent_image - .or(other.eks_resource_agent_image), - ecs_resource_agent_image: self - .ecs_resource_agent_image - .or(other.ecs_resource_agent_image), - vsphere_k8s_cluster_resource_agent_image: self - .vsphere_k8s_cluster_resource_agent_image - .or(other.vsphere_k8s_cluster_resource_agent_image), - metal_k8s_cluster_resource_agent_image: self - .metal_k8s_cluster_resource_agent_image - .or(other.metal_k8s_cluster_resource_agent_image), - vsphere_vm_resource_agent_image: self - .vsphere_vm_resource_agent_image - .or(other.vsphere_vm_resource_agent_image), - ec2_resource_agent_image: self - .ec2_resource_agent_image - .or(other.ec2_resource_agent_image), - ec2_karpenter_resource_agent_image: self - .ec2_karpenter_resource_agent_image - .or(other.ec2_karpenter_resource_agent_image), - sonobuoy_test_agent_image: self - .sonobuoy_test_agent_image - .or(other.sonobuoy_test_agent_image), - ecs_test_agent_image: self.ecs_test_agent_image.or(other.ecs_test_agent_image), - migration_test_agent_image: self - .migration_test_agent_image - .or(other.migration_test_agent_image), - k8s_workload_agent_image: self - .k8s_workload_agent_image - .or(other.k8s_workload_agent_image), - ecs_workload_agent_image: self - .ecs_workload_agent_image - .or(other.ecs_workload_agent_image), - controller_image: self.controller_image.or(other.controller_image), - testsys_agent_pull_secret: self - .testsys_agent_pull_secret - .or(other.testsys_agent_pull_secret), - } - } - - pub fn public_images() -> Self { - Self::new("public.ecr.aws/bottlerocket-test-system", None) - } -} - -mod error { - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub enum Error { - #[snafu(display("Failed to read '{}': {}", path.display(), source))] - File { path: PathBuf, source: io::Error }, - - #[snafu(display("Invalid config file at '{}': {}", path.display(), source))] - InvalidToml { - path: PathBuf, - source: toml::de::Error, - }, - - #[snafu(display("Invalid lock file at '{}': {}", path.display(), source))] - InvalidLock { - path: PathBuf, - source: serde_yaml::Error, - }, - - #[snafu(display("Missing config: {}", what))] - MissingConfig { what: String }, - - #[snafu(display("Failed to get parent of path: {}", path.display()))] - Parent { path: PathBuf }, - - #[snafu( - context(false), - display("Failed to create template for cluster name: {}", source) - )] - TemplateError { - #[snafu(source(from(handlebars::TemplateError, Box::new)))] - source: Box, - }, - - #[snafu( - context(false), - display("Failed to render templated cluster name: {}", source) - )] - RenderError { source: handlebars::RenderError }, - } -} diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml deleted file mode 100644 index ff7ccce26db..00000000000 --- a/tools/testsys/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "testsys" -version = "0.1.0" -authors = [ - "Ethan Pullen ", - "Matt Briggs ", -] -license = "Apache-2.0 OR MIT" -edition = "2021" -publish = false - -[dependencies] -async-trait = "0.1" -aws-config = "0.55" -aws-sdk-ec2 = "0.28" -base64 = "0.20" -bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.9", tag = "v0.0.9" } -bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } -clap = { version = "4", features = ["derive", "env"] } -env_logger = "0.10" -futures = "0.3" -handlebars = "4" -log = "0.4" -maplit = "1" -testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.9", tag = "v0.0.9" } -pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } -fastrand = "1" -serde = { version = "1", features = ["derive"] } -serde_json = "1" -serde_plain = "1" -serde_yaml = "0.9" -snafu = "0.7" -term_size = "0.3" -testsys-config = { path = "../testsys-config/", version = "0.1" } -tokio = { version = "1", features = ["macros", "rt-multi-thread", "fs"] } -unescape = "0.1" -url = "2" diff --git a/tools/testsys/Test.toml.example b/tools/testsys/Test.toml.example deleted file mode 100644 index 415df1ee361..00000000000 --- a/tools/testsys/Test.toml.example +++ /dev/null @@ -1,125 +0,0 @@ -# This is an example testing configuration for TestSys, the tool that is used to validate -# Bottlerocket builds. - -# This section contains configuration details for all testing -[test] - -# The repo from `Infra.toml` that should be used for Bottlerocket update images. It may be useful to -# create a repo in `Infra.toml` that contains the infrastructure needed for testing -repo = "default" - -# The registry containing alternate TestSys agent images -testsys-image-registry = "public.ecr.aws/bottlerocket-test-system" - -# The tag that should be used with `testsys-images-registry` for image pulls -testsys-images-registry = "latest" - -# The URI for the EKS resource agent that should be used. An individual agent's provided URI will be -# used even if `testsys-image-registry` is present. -eks-resource-agent-image = "public.ecr.aws/bottlerocket-test-system/eks_resource_agent:v0.0.2" - -# Test Configurations -# -# Testing requirements tend to differ by variant and architecture. This configuration file provides -# the ability to set values that apply generally to a broad group of similar variants, and to -# override those values at a more granular level. For example, you can set a value for all `aws-k8s` -# variants, then override that value for 'aws-k8s-nvidia' variants, and further override the value -# for 'aws-k8s-nvidia'.aarch64 builds. -# -# The mechanism for resolving configuration values has the following order of precedence: -# -# ''.ARCH -# '' -# '-'.ARCH -# '-' -# ''.ARCH -# '' -# '-'.ARCH -# '-' -# ''.ARCH -# '' -# -# For concrete example, given a variant such as `aws-k8s-1.23-nvidia` with the architecture -# `x86_64`, configurations will have the following order of precedence: -# ['aws-k8s-1.23-nvidia'.x86_64] -# ['aws-k8s-1.23-nvidia'] -# ['aws-k8s-nvidia'.x86_64] -# ['aws-k8s-nvidia'] -# ['aws-k8s'.x86_64] -# ['aws-k8s'] -# ['aws-nvidia'.x86_64] -# ['aws-nvidia'] -# ['aws'.x86_64] -# ['aws'] -# -# Configurable values: -# -# cluster-names: -# All clusters the variant should be tested over. Cluster naming supports templated strings, and -# both `arch` and `variant` are provided as variables (`{{arch}}-{{variant}}`). -# -# instance-type: -# The instance type that should be used for testing. -# -# secrets: -# A map containing the names of all kubernetes secrets needed for resource creation and testing. -# -# agent-role: -# The role that should be assumed by each test and resource agent. -# -# conformance-image: (K8s only) -# Specify a custom image for conformance testing. For `aws-k8s` variants this will be used as a -# custom Kubernetes conformance image for Sonobuoy. -# -# conformance-registry: (K8s only) -# Specify a custom registry for conformance testing images. -# For `aws-k8s` variants this will be used as the Sonobuoy e2e registry. -# -# Note: values passed by command line argument will take precedence over those passed by environment -# variable, and both take precedence over values set by `Test.toml`. - -# Additional fields are configurable with the `dev` table. -# See `DeveloperConfig` for individual fields. - -# Example Configurations - -# Configuration for all variants with the `aws` platform. -[aws] -agent-role = "" - -# Configuration for all nvidia AWS variants on x86_64 (platform-flavor level configuration) -[aws-nvidia.x86_64] -instance-type = "p3.2xlarge" - -# Configuration for all nvidia AWS variants on aarch64 (platform-flavor level configuration) -[aws-nvidia.aarch64] -instance-type = "g5g.2xlarge" - -# Configuration for all `aws-k8s` variants testing (family level configuration). -[aws-k8s] -# A single role can be assumed by agents to test all `aws-k8s` variants in a separate -# testing account. -agent-role = "arn:aws:iam:::role/" - -# The cluster name templating can be defined for all `aws-k8s` variants. To test on ipv4 and ipv6 -# clusters, the following templates could be used. Note: TestSys does not currently support creating -# ipv6 clusters, so the ipv6 cluster must already exist. -cluster-names = ["{{arch}}-{{variant}}", "{{arch}}-{{variant}}-ipv6"] - -# A custom conformance registry may be needed for testing if image pull reliability is a concern. -conformance-registry = ".dkr.ecr.cn-north-1.amazonaws.com.cn" - -# If testing using a kind cluster, AWS credentials need to be passed as a K8s secret. -secrets = {"awsCreds" = "myAwsCredentials"} - -# Configuration for all nvidia AWS variants on x86_64 (family-flavor level configuration) -[aws-ecs-nvidia.x86_64] -instance-type = "p3.2xlarge" - -# Configuration for all nvidia AWS variants on aarch64 (family-flavor level configuration) -[aws-ecs-nvidia.aarch64] -instance-type = "g5g.2xlarge" - -# Configuration for only the `aws-k8s-1.24` variant (variant level configuration). -["aws-k8s-1.24".aarch64] -conformance-image = "" diff --git a/tools/testsys/src/aws_ecs.rs b/tools/testsys/src/aws_ecs.rs deleted file mode 100644 index f021528d923..00000000000 --- a/tools/testsys/src/aws_ecs.rs +++ /dev/null @@ -1,281 +0,0 @@ -use crate::aws_resources::{ami, ami_name, ec2_crd, get_ami_id}; -use crate::crds::{ - BottlerocketInput, ClusterInput, CrdCreator, CrdInput, CreateCrdOutput, MigrationInput, - TestInput, -}; -use crate::error::{self, Result}; -use crate::migration::migration_crd; -use bottlerocket_types::agent_config::{ - ClusterType, EcsClusterConfig, EcsTestConfig, EcsWorkloadTestConfig, WorkloadTest, -}; -use log::debug; -use maplit::btreemap; -use snafu::{OptionExt, ResultExt}; -use std::collections::BTreeMap; -use testsys_model::{Crd, DestructionPolicy, Test}; - -/// A `CrdCreator` responsible for creating crd related to `aws-ecs` variants. -pub(crate) struct AwsEcsCreator { - pub(crate) region: String, - pub(crate) ami_input: String, - pub(crate) migrate_starting_commit: Option, -} - -#[async_trait::async_trait] -impl CrdCreator for AwsEcsCreator { - /// Determine the AMI from `amis.json`. - async fn image_id(&self, _: &CrdInput) -> Result { - ami(&self.ami_input, &self.region) - } - - /// Determine the starting image from EC2 using standard Bottlerocket naming conventions. - async fn starting_image_id(&self, crd_input: &CrdInput) -> Result { - get_ami_id(ami_name(&crd_input.arch,&crd_input.variant,crd_input.starting_version - .as_ref() - .context(error::InvalidSnafu{ - what: "The starting version must be provided for migration testing" - })?, self.migrate_starting_commit - .as_ref() - .context(error::InvalidSnafu{ - what: "The commit for the starting version must be provided if the starting image id is not" - })?) - , &crd_input.arch, - & self.region, - crd_input.config.dev.image_account_id.as_deref(), - ) - .await - } - - /// Create an ECS cluster CRD with the `cluster_name` in `cluster_input`. - async fn cluster_crd<'a>(&self, cluster_input: ClusterInput<'a>) -> Result { - debug!("Creating ECS cluster CRD"); - // Create labels that will be used for identifying existing CRDs for an ECS cluster. - let labels = cluster_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => "cluster".to_string(), - "testsys/cluster".to_string() => cluster_input.cluster_name.to_string(), - "testsys/region".to_string() => self.region.clone() - }); - - // Check if the cluster already has a CRD in the TestSys cluster. - if let Some(cluster_crd) = cluster_input - .crd_input - .existing_crds( - &labels, - &["testsys/cluster", "testsys/type", "testsys/region"], - ) - .await? - .pop() - { - // Return the name of the existing CRD for the cluster. - debug!("ECS cluster CRD already exists with name '{}'", cluster_crd); - return Ok(CreateCrdOutput::ExistingCrd(cluster_crd)); - } - - // Create the CRD for ECS cluster creation. - let ecs_crd = EcsClusterConfig::builder() - .cluster_name(cluster_input.cluster_name) - .region(Some(self.region.to_owned())) - .assume_role(cluster_input.crd_input.config.agent_role.clone()) - .destruction_policy( - cluster_input - .crd_input - .config - .dev - .cluster_destruction_policy - .to_owned() - .unwrap_or(DestructionPolicy::OnTestSuccess), - ) - .image( - cluster_input - .crd_input - .images - .ecs_resource_agent_image - .as_ref() - .expect("The default ecs resource provider image uri is missing."), - ) - .set_image_pull_secret( - cluster_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .set_labels(Some(labels)) - .set_secrets(Some(cluster_input.crd_input.config.secrets.clone())) - .build(cluster_input.cluster_name) - .context(error::BuildSnafu { - what: "ECS cluster CRD", - })?; - - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource(ecs_crd)))) - } - - /// Create an EC2 provider CRD to launch Bottlerocket instances on the cluster created by - /// `cluster_crd`. - async fn bottlerocket_crd<'a>( - &self, - bottlerocket_input: BottlerocketInput<'a>, - ) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource( - ec2_crd(bottlerocket_input, ClusterType::Ecs, &self.region).await?, - )))) - } - - async fn migration_crd<'a>( - &self, - migration_input: MigrationInput<'a>, - ) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(migration_crd( - migration_input, - None, - "ids", - )?)))) - } - - async fn test_crd<'a>(&self, test_input: TestInput<'a>) -> Result { - let cluster_resource_name = test_input - .cluster_crd_name - .as_ref() - .expect("A cluster name is required for migrations"); - let bottlerocket_resource_name = test_input - .bottlerocket_crd_name - .as_ref() - .expect("A cluster name is required for migrations"); - - // Create labels that are used to help filter status. - let labels = test_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => test_input.test_type.to_string(), - "testsys/cluster".to_string() => cluster_resource_name.to_string(), - }); - - let test_crd = EcsTestConfig::builder() - .cluster_name_template(cluster_resource_name, "clusterName") - .region(Some(self.region.to_owned())) - .task_count(1) - .assume_role(test_input.crd_input.config.agent_role.to_owned()) - .resources(bottlerocket_resource_name) - .resources(cluster_resource_name) - .set_depends_on(Some(test_input.prev_tests)) - .set_retries(Some(5)) - .image( - test_input - .crd_input - .images - .ecs_test_agent_image - .to_owned() - .expect("The default ECS testing image is missing"), - ) - .set_image_pull_secret( - test_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .keep_running( - test_input - .crd_input - .config - .dev - .keep_tests_running - .unwrap_or(false), - ) - .set_secrets(Some(test_input.crd_input.config.secrets.to_owned())) - .set_labels(Some(labels)) - .build(format!( - "{}-{}", - cluster_resource_name, - test_input - .name_suffix - .unwrap_or(test_input.crd_input.test_flavor.as_str()) - )) - .context(error::BuildSnafu { - what: "ECS test CRD", - })?; - - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(test_crd)))) - } - - async fn workload_crd<'a>(&self, test_input: TestInput<'a>) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(workload_crd( - &self.region, - test_input, - )?)))) - } - - fn additional_fields(&self, _test_type: &str) -> BTreeMap { - btreemap! {"region".to_string() => self.region.clone()} - } -} - -/// Create a workload CRD for K8s testing. -pub(crate) fn workload_crd(region: &str, test_input: TestInput) -> Result { - let cluster_resource_name = test_input - .cluster_crd_name - .as_ref() - .expect("A cluster name is required for ECS workload tests"); - let bottlerocket_resource_name = test_input - .bottlerocket_crd_name - .as_ref() - .expect("A bottlerocket resource name is required for ECS workload tests"); - - let labels = test_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => test_input.test_type.to_string(), - "testsys/cluster".to_string() => cluster_resource_name.to_string(), - }); - let gpu = test_input.crd_input.variant.variant_flavor() == Some("nvidia"); - let plugins: Vec<_> = test_input - .crd_input - .config - .workloads - .iter() - .map(|(name, image)| WorkloadTest { - name: name.to_string(), - image: image.to_string(), - gpu, - }) - .collect(); - if plugins.is_empty() { - return Err(error::Error::Invalid { - what: "There were no plugins specified in the workload test. - Workloads can be specified in `Test.toml` or via the command line." - .to_string(), - }); - } - - EcsWorkloadTestConfig::builder() - .resources(bottlerocket_resource_name) - .resources(cluster_resource_name) - .set_depends_on(Some(test_input.prev_tests)) - .set_retries(Some(5)) - .image( - test_input - .crd_input - .images - .ecs_workload_agent_image - .to_owned() - .expect("The default K8s workload testing image is missing"), - ) - .set_image_pull_secret( - test_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .keep_running(true) - .region(region.to_string()) - .cluster_name_template(cluster_resource_name, "clusterName") - .assume_role(test_input.crd_input.config.agent_role.to_owned()) - .tests(plugins) - .set_secrets(Some(test_input.crd_input.config.secrets.to_owned())) - .set_labels(Some(labels)) - .build(format!( - "{}{}", - cluster_resource_name, - test_input.name_suffix.unwrap_or("-test") - )) - .context(error::BuildSnafu { - what: "Workload CRD", - }) -} diff --git a/tools/testsys/src/aws_k8s.rs b/tools/testsys/src/aws_k8s.rs deleted file mode 100644 index 2fc063fabcd..00000000000 --- a/tools/testsys/src/aws_k8s.rs +++ /dev/null @@ -1,238 +0,0 @@ -use crate::aws_resources::{ami, ami_name, ec2_crd, ec2_karpenter_crd, get_ami_id}; -use crate::crds::{ - BottlerocketInput, ClusterInput, CrdCreator, CrdInput, CreateCrdOutput, MigrationInput, - TestInput, -}; -use crate::error::{self, Result}; -use crate::migration::migration_crd; -use crate::sonobuoy::{sonobuoy_crd, workload_crd}; -use bottlerocket_types::agent_config::{ - ClusterType, CreationPolicy, EksClusterConfig, EksctlConfig, K8sVersion, -}; -use maplit::btreemap; -use serde_yaml::Value; -use snafu::{OptionExt, ResultExt}; -use std::collections::BTreeMap; -use std::str::FromStr; -use testsys_config::ResourceAgentType; -use testsys_model::{Crd, DestructionPolicy}; - -/// A `CrdCreator` responsible for creating crd related to `aws-k8s` variants. -pub(crate) struct AwsK8sCreator { - pub(crate) region: String, - pub(crate) ami_input: String, - pub(crate) migrate_starting_commit: Option, -} - -#[async_trait::async_trait] -impl CrdCreator for AwsK8sCreator { - /// Determine the AMI from `amis.json`. - async fn image_id(&self, _: &CrdInput) -> Result { - ami(&self.ami_input, &self.region) - } - - /// Determine the starting image from EC2 using standard Bottlerocket naming conventions. - async fn starting_image_id(&self, crd_input: &CrdInput) -> Result { - get_ami_id(ami_name(&crd_input.arch,&crd_input.variant,crd_input.starting_version - .as_ref() - .context(error::InvalidSnafu{ - what: "The starting version must be provided for migration testing" - })?, self.migrate_starting_commit - .as_ref() - .context(error::InvalidSnafu{ - what: "The commit for the starting version must be provided if the starting image id is not" - })?) - , &crd_input.arch, - & self.region, - crd_input.config.dev.image_account_id.as_deref(), - ) - .await - } - - /// Create an EKS cluster CRD with the `cluster_name` in `cluster_input`. - async fn cluster_crd<'a>(&self, cluster_input: ClusterInput<'a>) -> Result { - let cluster_version = - K8sVersion::from_str(cluster_input.crd_input.variant.version().context( - error::MissingSnafu { - item: "K8s version".to_string(), - what: "aws-k8s variant".to_string(), - }, - )?) - .map_err(|_| error::Error::K8sVersion { - version: cluster_input.crd_input.variant.to_string(), - })?; - - let (cluster_name, region, config) = match cluster_input.cluster_config { - Some(config) => { - let (cluster_name, region) = cluster_config_data(config)?; - ( - cluster_name, - region, - EksctlConfig::File { - encoded_config: base64::encode(config), - }, - ) - } - None => ( - cluster_input.cluster_name.to_string(), - self.region.clone(), - EksctlConfig::Args { - cluster_name: cluster_input.cluster_name.to_string(), - region: Some(self.region.clone()), - zones: None, - version: Some(cluster_version), - }, - ), - }; - - let labels = cluster_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => "cluster".to_string(), - "testsys/cluster".to_string() => cluster_name.to_string(), - "testsys/region".to_string() => region.clone() - }); - - // Check if the cluster already has a crd - if let Some(cluster_crd) = cluster_input - .crd_input - .existing_crds( - &labels, - &["testsys/cluster", "testsys/type", "testsys/region"], - ) - .await? - .pop() - { - return Ok(CreateCrdOutput::ExistingCrd(cluster_crd)); - } - - let eks_crd = EksClusterConfig::builder() - .creation_policy(CreationPolicy::IfNotExists) - .eks_service_endpoint( - cluster_input - .crd_input - .config - .dev - .eks_service_endpoint - .clone(), - ) - .assume_role(cluster_input.crd_input.config.agent_role.clone()) - .config(config) - .image( - cluster_input - .crd_input - .images - .eks_resource_agent_image - .to_owned() - .expect("Missing default image for EKS resource agent"), - ) - .set_image_pull_secret( - cluster_input - .crd_input - .images - .testsys_agent_pull_secret - .clone(), - ) - .set_labels(Some(labels)) - .set_secrets(Some(cluster_input.crd_input.config.secrets.clone())) - .destruction_policy( - cluster_input - .crd_input - .config - .dev - .cluster_destruction_policy - .to_owned() - .unwrap_or(DestructionPolicy::Never), - ) - .build(cluster_name) - .context(error::BuildSnafu { - what: "EKS cluster CRD", - })?; - - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource(eks_crd)))) - } - - /// Create an EC2 provider CRD to launch Bottlerocket instances on the cluster created by - /// `cluster_crd`. - async fn bottlerocket_crd<'a>( - &self, - bottlerocket_input: BottlerocketInput<'a>, - ) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource( - match bottlerocket_input - .crd_input - .config - .resource_agent_type - .to_owned() - .unwrap_or_default() - { - ResourceAgentType::Ec2 => { - ec2_crd(bottlerocket_input, ClusterType::Eks, &self.region).await? - } - ResourceAgentType::Karpenter => { - ec2_karpenter_crd(bottlerocket_input, &self.region).await? - } - }, - )))) - } - - async fn migration_crd<'a>( - &self, - migration_input: MigrationInput<'a>, - ) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(migration_crd( - migration_input, - None, - "ids", - )?)))) - } - - async fn test_crd<'a>(&self, test_input: TestInput<'a>) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(sonobuoy_crd( - test_input, - )?)))) - } - - async fn workload_crd<'a>(&self, test_input: TestInput<'a>) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(workload_crd( - test_input, - )?)))) - } - - fn additional_fields(&self, _test_type: &str) -> BTreeMap { - btreemap! {"region".to_string() => self.region.clone()} - } -} - -/// Converts a eksctl cluster config to a `serde_yaml::Value` and extracts the cluster name and -/// region from it. -fn cluster_config_data(cluster_config: &str) -> Result<(String, String)> { - let config: Value = serde_yaml::from_str(cluster_config).context(error::SerdeYamlSnafu { - what: "Unable to deserialize cluster config", - })?; - - let (cluster_name, region) = config - .get("metadata") - .map(|metadata| { - ( - metadata.get("name").and_then(|name| name.as_str()), - metadata.get("region").and_then(|region| region.as_str()), - ) - }) - .context(error::MissingSnafu { - item: "metadata", - what: "eksctl config", - })?; - Ok(( - cluster_name - .context(error::MissingSnafu { - item: "name", - what: "eksctl config metadata", - })? - .to_string(), - region - .context(error::MissingSnafu { - item: "region", - what: "eksctl config metadata", - })? - .to_string(), - )) -} diff --git a/tools/testsys/src/aws_resources.rs b/tools/testsys/src/aws_resources.rs deleted file mode 100644 index aa97a3fb1d3..00000000000 --- a/tools/testsys/src/aws_resources.rs +++ /dev/null @@ -1,348 +0,0 @@ -use crate::crds::BottlerocketInput; -use crate::error::{self, Result}; -use aws_sdk_ec2::config::Region; -use aws_sdk_ec2::types::{Filter, Image}; -use bottlerocket_types::agent_config::{ - ClusterType, CustomUserData, Ec2Config, Ec2KarpenterConfig, KarpenterDeviceMapping, -}; -use maplit::btreemap; -use serde::Deserialize; -use snafu::{ensure, OptionExt, ResultExt}; -use std::collections::HashMap; -use std::fs::File; -use std::iter::repeat_with; -use testsys_model::{DestructionPolicy, Resource}; - -/// Get the AMI for the given `region` from the `ami_input` file. -pub(crate) fn ami(ami_input: &str, region: &str) -> Result { - let file = File::open(ami_input).context(error::IOSnafu { - what: "Unable to open amis.json", - })?; - // Convert the `ami_input` file to a `HashMap` that maps regions to AMI id. - let amis: HashMap = - serde_json::from_reader(file).context(error::SerdeJsonSnafu { - what: format!("Unable to deserialize '{}'", ami_input), - })?; - // Make sure there are some AMIs present in the `ami_input` file. - ensure!( - !amis.is_empty(), - error::InvalidSnafu { - what: format!("{} is empty", ami_input) - } - ); - Ok(amis - .get(region) - .context(error::InvalidSnafu { - what: format!("AMI not found for region '{}'", region), - })? - .id - .clone()) -} - -/// Queries EC2 for the given AMI name. If found, returns Ok(Some(id)), if not returns Ok(None). -pub(crate) async fn get_ami_id( - name: S1, - arch: S2, - region: S3, - account: Option<&str>, -) -> Result -where - S1: Into, - S2: Into, - S3: Into, -{ - // Create the `aws_config` that will be used to search EC2 for AMIs. - // TODO: Follow chain of assumed roles for creating config like pubsys uses. - let config = aws_config::from_env() - .region(Region::new(region.into())) - .load() - .await; - let ec2_client = aws_sdk_ec2::Client::new(&config); - // Find all images named `name` on `arch` in the `region`. - let describe_images = ec2_client - .describe_images() - .owners(account.unwrap_or("self")) - .filters(Filter::builder().name("name").values(name).build()) - .filters( - Filter::builder() - .name("image-type") - .values("machine") - .build(), - ) - .filters(Filter::builder().name("architecture").values(arch).build()) - .filters( - Filter::builder() - .name("virtualization-type") - .values("hvm") - .build(), - ) - .send() - .await? - .images; - let images: Vec<&Image> = describe_images.iter().flatten().collect(); - // Make sure there is exactly 1 image that matches the parameters. - if images.len() > 1 { - return Err(error::Error::Invalid { - what: "Unable to determine AMI. Multiple images were found".to_string(), - }); - }; - if let Some(image) = images.last().as_ref() { - Ok(image - .image_id() - .context(error::InvalidSnafu { - what: "No image id for AMI", - })? - .to_string()) - } else { - Err(error::Error::Invalid { - what: "Unable to determine AMI. No images were found".to_string(), - }) - } -} - -/// Get the standard Bottlerocket AMI name. -pub(crate) fn ami_name(arch: &str, variant: &str, version: &str, commit_id: &str) -> String { - format!( - "bottlerocket-{}-{}-{}-{}", - variant, arch, version, commit_id - ) -} - -#[derive(Clone, Debug, Deserialize)] -pub(crate) struct AmiImage { - pub(crate) id: String, -} - -/// Create a CRD to launch Bottlerocket instances on an EKS or ECS cluster. -pub(crate) async fn ec2_crd<'a>( - bottlerocket_input: BottlerocketInput<'a>, - cluster_type: ClusterType, - region: &str, -) -> Result { - if !bottlerocket_input - .crd_input - .config - .block_device_mapping - .is_empty() - { - return Err(error::Error::Invalid { - what: "Custom block mappings are not supported for ec2 instance launch".to_string(), - }); - } - - let cluster_name = bottlerocket_input - .cluster_crd_name - .as_ref() - .expect("A cluster provider is required"); - - // Create the labels for this EC2 provider. - let labels = bottlerocket_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => "instances".to_string(), - "testsys/cluster".to_string() => cluster_name.to_string(), - "testsys/region".to_string() => region.to_string() - }); - - // Find all resources using the same cluster. - let conflicting_resources = bottlerocket_input - .crd_input - .existing_crds( - &labels, - &["testsys/cluster", "testsys/type", "testsys/region"], - ) - .await?; - - let mut ec2_builder = Ec2Config::builder(); - ec2_builder - .node_ami(bottlerocket_input.image_id) - .instance_count(2) - .instance_types::>( - bottlerocket_input - .crd_input - .config - .instance_type - .iter() - .cloned() - .collect(), - ) - .custom_user_data( - bottlerocket_input - .crd_input - .encoded_userdata()? - .map(|encoded_userdata| CustomUserData::Merge { encoded_userdata }), - ) - .cluster_name_template(cluster_name, "clusterName") - .region_template(cluster_name, "region") - .instance_profile_arn_template(cluster_name, "iamInstanceProfileArn") - .assume_role(bottlerocket_input.crd_input.config.agent_role.clone()) - .cluster_type(cluster_type.clone()) - .depends_on(cluster_name) - .image( - bottlerocket_input - .crd_input - .images - .ec2_resource_agent_image - .as_ref() - .expect("Missing default image for EC2 resource agent"), - ) - .set_image_pull_secret( - bottlerocket_input - .crd_input - .images - .testsys_agent_pull_secret - .clone(), - ) - .set_labels(Some(labels)) - .set_conflicts_with(conflicting_resources.into()) - .set_secrets(Some(bottlerocket_input.crd_input.config.secrets.clone())) - .destruction_policy( - bottlerocket_input - .crd_input - .config - .dev - .bottlerocket_destruction_policy - .to_owned() - .unwrap_or(DestructionPolicy::OnTestSuccess), - ); - - // Add in the EKS specific configuration. - if cluster_type == ClusterType::Eks { - ec2_builder - .subnet_ids_template(cluster_name, "publicSubnetIds") - .endpoint_template(cluster_name, "endpoint") - .certificate_template(cluster_name, "certificate") - .cluster_dns_ip_template(cluster_name, "clusterDnsIp") - .security_groups_template(cluster_name, "securityGroups"); - } else { - // The default VPC doesn't attach private subnets to an ECS cluster, so public subnet ids - // are used instead. - ec2_builder - .subnet_ids_template(cluster_name, "publicSubnetIds") - // TODO If this is not set, the crd cannot be serialized since it is a `Vec` not - // `Option`. - .security_groups(Vec::new()); - } - - let suffix: String = repeat_with(fastrand::lowercase).take(4).collect(); - ec2_builder - .build(format!("{}-instances-{}", cluster_name, suffix)) - .context(error::BuildSnafu { - what: "EC2 instance provider CRD", - }) -} - -/// Create a CRD to launch Bottlerocket instances on an EKS or ECS cluster. -pub(crate) async fn ec2_karpenter_crd<'a>( - bottlerocket_input: BottlerocketInput<'a>, - region: &str, -) -> Result { - let cluster_name = bottlerocket_input - .cluster_crd_name - .as_ref() - .expect("A cluster provider is required"); - - // Create the labels for this EC2 provider. - let labels = bottlerocket_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => "instances".to_string(), - "testsys/cluster".to_string() => cluster_name.to_string(), - "testsys/region".to_string() => region.to_string() - }); - - // Find all resources using the same cluster. - let conflicting_resources = bottlerocket_input - .crd_input - .existing_crds( - &labels, - &["testsys/cluster", "testsys/type", "testsys/region"], - ) - .await?; - - // If no mappings were provided use a standard mapping as a default - let device_mappings = if bottlerocket_input - .crd_input - .config - .block_device_mapping - .is_empty() - { - vec![ - KarpenterDeviceMapping { - name: "/dev/xvda".to_string(), - volume_type: "gp3".to_string(), - volume_size: 4, - delete_on_termination: true, - }, - KarpenterDeviceMapping { - name: "/dev/xvdb".to_string(), - volume_type: "gp3".to_string(), - volume_size: 20, - delete_on_termination: true, - }, - ] - } else { - bottlerocket_input - .crd_input - .config - .block_device_mapping - .clone() - }; - - let mut ec2_builder = Ec2KarpenterConfig::builder(); - ec2_builder - .node_ami(bottlerocket_input.image_id) - .instance_types::>( - bottlerocket_input - .crd_input - .config - .instance_type - .iter() - .cloned() - .collect(), - ) - .custom_user_data( - bottlerocket_input - .crd_input - .encoded_userdata()? - .map(|encoded_userdata| CustomUserData::Merge { encoded_userdata }), - ) - .cluster_name_template(cluster_name, "clusterName") - .region_template(cluster_name, "region") - .subnet_ids_template(cluster_name, "publicSubnetIds") - .endpoint_template(cluster_name, "endpoint") - .cluster_sg_template(cluster_name, "clustersharedSg") - .device_mappings(device_mappings) - .assume_role(bottlerocket_input.crd_input.config.agent_role.clone()) - .depends_on(cluster_name) - .image( - bottlerocket_input - .crd_input - .images - .ec2_karpenter_resource_agent_image - .as_ref() - .expect("Missing default image for EC2 resource agent"), - ) - .set_image_pull_secret( - bottlerocket_input - .crd_input - .images - .testsys_agent_pull_secret - .clone(), - ) - .set_labels(Some(labels)) - .set_conflicts_with(conflicting_resources.into()) - .set_secrets(Some(bottlerocket_input.crd_input.config.secrets.clone())) - .destruction_policy( - bottlerocket_input - .crd_input - .config - .dev - .bottlerocket_destruction_policy - .to_owned() - .unwrap_or(DestructionPolicy::OnTestSuccess), - ); - - let suffix: String = repeat_with(fastrand::lowercase).take(4).collect(); - ec2_builder - .build(format!("{}-karpenter-{}", cluster_name, suffix)) - .context(error::BuildSnafu { - what: "EC2 instance provider CRD", - }) -} diff --git a/tools/testsys/src/crds.rs b/tools/testsys/src/crds.rs deleted file mode 100644 index 43875d9368f..00000000000 --- a/tools/testsys/src/crds.rs +++ /dev/null @@ -1,787 +0,0 @@ -use crate::error::{self, Result}; -use crate::run::{KnownTestType, TestType}; -use bottlerocket_types::agent_config::TufRepoConfig; -use bottlerocket_variant::Variant; -use handlebars::Handlebars; -use log::{debug, info, warn}; -use maplit::btreemap; -use pubsys_config::RepoConfig; -use serde::Deserialize; -use snafu::{OptionExt, ResultExt}; -use std::collections::BTreeMap; -use std::fs; -use std::path::PathBuf; -use testsys_config::{rendered_cluster_name, GenericVariantConfig, TestsysImages}; -use testsys_model::constants::{API_VERSION, NAMESPACE}; -use testsys_model::test_manager::{SelectionParams, TestManager}; -use testsys_model::Crd; - -/// A type that is used for the creation of all CRDs. -pub struct CrdInput<'a> { - pub client: &'a TestManager, - pub arch: String, - pub variant: Variant, - pub config: GenericVariantConfig, - pub repo_config: RepoConfig, - pub test_flavor: String, - pub starting_version: Option, - pub migrate_to_version: Option, - pub build_id: Option, - /// `CrdCreator::starting_image_id` function should be used instead of using this field, so - /// it is not externally visible. - pub(crate) starting_image_id: Option, - pub(crate) test_type: TestType, - pub(crate) tests_directory: PathBuf, - pub images: TestsysImages, -} - -impl<'a> CrdInput<'a> { - /// Retrieve the TUF repo information from `Infra.toml` - pub fn tuf_repo_config(&self) -> Option { - if let (Some(metadata_base_url), Some(targets_url)) = ( - &self.repo_config.metadata_base_url, - &self.repo_config.targets_url, - ) { - debug!( - "Using TUF metadata from Infra.toml, metadata: '{}', targets: '{}'", - metadata_base_url, targets_url - ); - Some(TufRepoConfig { - metadata_url: format!("{}{}/{}/", metadata_base_url, &self.variant, &self.arch), - targets_url: targets_url.to_string(), - }) - } else { - warn!("No TUF metadata was found in Infra.toml using the default TUF repos"); - None - } - } - - /// Create a set of labels for the CRD by adding `additional_labels` to the standard labels. - pub fn labels(&self, additional_labels: BTreeMap) -> BTreeMap { - let mut labels = btreemap! { - "testsys/arch".to_string() => self.arch.to_string(), - "testsys/variant".to_string() => self.variant.to_string(), - "testsys/build-id".to_string() => self.build_id.to_owned().unwrap_or_default(), - "testsys/test-type".to_string() => self.test_type.to_string(), - }; - let mut add_labels = additional_labels; - labels.append(&mut add_labels); - labels - } - - /// Determine all CRDs that have the same value for each `id_labels` as `labels`. - pub async fn existing_crds( - &self, - labels: &BTreeMap, - id_labels: &[&str], - ) -> Result> { - // Create a single string containing all `label=value` pairs. - let checks = id_labels - .iter() - .map(|label| { - labels - .get(&label.to_string()) - .map(|value| format!("{}={}", label, value)) - .context(error::InvalidSnafu { - what: format!("The label '{}' was missing", label), - }) - }) - .collect::>>()? - .join(","); - - // Create a list of all CRD names that match all of the specified labels. - Ok(self - .client - .list(&SelectionParams { - labels: Some(checks), - ..Default::default() - }) - .await? - .iter() - .filter_map(Crd::name) - .collect()) - } - - /// Use the provided userdata path to create the encoded userdata. - pub fn encoded_userdata(&self) -> Result> { - let userdata_path = match self.config.userdata.as_ref() { - Some(userdata) => self.custom_userdata_file_path(userdata)?, - None => return Ok(None), - }; - - info!("Using userdata at '{}'", userdata_path.display()); - - let userdata = std::fs::read_to_string(&userdata_path).context(error::FileSnafu { - path: userdata_path, - })?; - - Ok(Some(base64::encode(userdata))) - } - - /// Find the userdata file for the test type - fn custom_userdata_file_path(&self, userdata: &str) -> Result { - let test_type = &self.test_type.to_string(); - - // List all acceptable paths to the custom crd to allow users some freedom in the way - // `tests` is organized. - let acceptable_paths = vec![ - // Check the absolute path - userdata.into(), - // Check for // - self.tests_directory.join(test_type).join(userdata), - // Check for //.toml - self.tests_directory - .join(test_type) - .join(userdata) - .with_extension("toml"), - // Check for /shared/ - self.tests_directory.join("shared").join(userdata), - // Check for /shared/.toml - self.tests_directory - .join("shared") - .join(userdata) - .with_extension("toml"), - // Check for /shared/userdata/ - self.tests_directory - .join("shared") - .join("userdata") - .join(userdata), - // Check for /shared/userdata/.toml - self.tests_directory - .join("shared") - .join("userdata") - .join(userdata) - .with_extension("toml"), - // Check for the path in the top level directory - PathBuf::new().join(userdata), - ]; - - // Find the first acceptable path that exists and return that. - acceptable_paths - .into_iter() - .find(|path| path.exists()) - .context(error::InvalidSnafu { - what: format!( - "Could not find userdata '{}' for test type '{}'", - userdata, test_type - ), - }) - } - - /// Fill in the templated cluster name with `arch` and `variant`. - fn rendered_cluster_name(&self, raw_cluster_name: String) -> Result { - Ok(rendered_cluster_name( - raw_cluster_name, - self.kube_arch(), - self.kube_variant(), - )?) - } - - /// Get the k8s safe architecture name - fn kube_arch(&self) -> String { - self.arch.replace('_', "-") - } - - /// Get the k8s safe variant name - fn kube_variant(&self) -> String { - self.variant.to_string().replace('.', "") - } - - /// Bottlerocket cluster naming convention. - fn default_cluster_name(&self) -> String { - format!("{}-{}", self.kube_arch(), self.kube_variant()) - } - - /// Get a list of cluster_names for this variant. If there are no cluster names, the default - /// cluster name will be used. - fn cluster_names(&self) -> Result> { - Ok(if self.config.cluster_names.is_empty() { - vec![self.default_cluster_name()] - } else { - self.config - .cluster_names - .iter() - .map(String::to_string) - // Fill the template fields in the clusters name before using it. - .map(|cluster_name| self.rendered_cluster_name(cluster_name)) - .collect::>>()? - }) - } - - /// Creates a `BTreeMap` of all configurable fields from this input - fn config_fields(&self, cluster_name: &str) -> BTreeMap { - btreemap! { - "arch".to_string() => self.arch.clone(), - "variant".to_string() => self.variant.to_string(), - "kube-arch".to_string() => self.kube_arch(), - "kube-variant".to_string() => self.kube_variant(), - "flavor".to_string() => some_or_null(&self.variant.variant_flavor().map(str::to_string)), - "version".to_string() => some_or_null(&self.variant.version().map(str::to_string)), - "cluster-name".to_string() => cluster_name.to_string(), - "instance-type".to_string() => some_or_null(&self.config.instance_type), - "agent-role".to_string() => some_or_null(&self.config.agent_role), - "conformance-image".to_string() => some_or_null(&self.config.conformance_image), - "conformance-registry".to_string() => some_or_null(&self.config.conformance_registry), - "control-plane-endpoint".to_string() => some_or_null(&self.config.control_plane_endpoint), - } - } - - /// Find the crd template file for the given test type - fn custom_crd_template_file_path(&self) -> Option { - let test_type = &self.test_type.to_string(); - // List all acceptable paths to the custom crd to allow users some freedom in the way - // `tests` is organized. - let acceptable_paths = vec![ - // Check for .yaml in the top level directory - PathBuf::new().join(test_type).with_extension("yaml"), - // Check for //.yaml - self.tests_directory - .join(test_type) - .join(test_type) - .with_extension("yaml"), - // Check for //crd.yaml - self.tests_directory.join(test_type).join("crd.yaml"), - // Check for /shared/.yaml - self.tests_directory - .join("shared") - .join(test_type) - .with_extension("yaml"), - // Check for /shared/tests/.yaml - self.tests_directory - .join("shared") - .join("tests") - .join(test_type) - .with_extension("yaml"), - ]; - - // Find the first acceptable path that exists and return that. - acceptable_paths.into_iter().find(|path| path.exists()) - } - - /// Find the cluster config file for the given cluster name and test type. - fn cluster_config_file_path(&self, cluster_name: &str) -> Option { - let test_type = &self.test_type.to_string(); - // List all acceptable paths to the custom crd to allow users some freedom in the way - // `tests` is organized. - let acceptable_paths = vec![ - // Check for //.yaml - self.tests_directory - .join(test_type) - .join(cluster_name) - .with_extension("yaml"), - // Check for /shared/.yaml - self.tests_directory - .join("shared") - .join(cluster_name) - .with_extension("yaml"), - // Check for /shared/cluster-config/.yaml - self.tests_directory - .join("shared") - .join("cluster-config") - .join(cluster_name) - .with_extension("yaml"), - // Check for /shared/clusters/.yaml - self.tests_directory - .join("shared") - .join("clusters") - .join(cluster_name) - .with_extension("yaml"), - // Check for /shared/clusters//cluster.yaml - self.tests_directory - .join("shared") - .join("clusters") - .join(cluster_name) - .join("cluster") - .with_extension("yaml"), - ]; - - // Find the first acceptable path that exists and return that. - acceptable_paths.into_iter().find(|path| path.exists()) - } - - /// Find the resolved cluster config file for the given cluster name and test type if it exists. - fn resolved_cluster_config( - &self, - cluster_name: &str, - additional_fields: &mut BTreeMap, - ) -> Result> { - let path = match self.cluster_config_file_path(cluster_name) { - None => return Ok(None), - Some(path) => path, - }; - info!("Using cluster config at {}", path.display()); - let config = fs::read_to_string(&path).context(error::FileSnafu { path })?; - - let mut fields = self.config_fields(cluster_name); - fields.insert("api-version".to_string(), API_VERSION.to_string()); - fields.insert("namespace".to_string(), NAMESPACE.to_string()); - fields.append(additional_fields); - - let mut handlebars = Handlebars::new(); - handlebars.set_strict_mode(true); - let rendered_config = handlebars.render_template(&config, &fields)?; - - Ok(Some(rendered_config)) - } - - /// Find the hardware csv file for the given hardware csv name and test type. - fn hardware_csv_file_path(&self, hardware_csv: &str) -> Option { - let test_type = &self.test_type.to_string(); - // List all acceptable paths to the custom crd to allow users some freedom in the way - // `tests` is organized. - let acceptable_paths = vec![ - // Check for //.csv - self.tests_directory - .join(test_type) - .join(hardware_csv) - .with_extension("csv"), - // Check for /shared/.csv - self.tests_directory - .join("shared") - .join(hardware_csv) - .with_extension("csv"), - // Check for /shared/cluster-config/.csv - self.tests_directory - .join("shared") - .join("cluster-config") - .join(hardware_csv) - .with_extension("csv"), - // Check for /shared/clusters/.csv - self.tests_directory - .join("shared") - .join("clusters") - .join(hardware_csv) - .with_extension("csv"), - ]; - - // Find the first acceptable path that exists and return that. - acceptable_paths.into_iter().find(|path| path.exists()) - } - - /// Find the resolved cluster config file for the given cluster name and test type if it exists. - fn resolved_hardware_csv(&self) -> Result> { - let hardware_csv = match &self.config.hardware_csv { - Some(hardware_csv) => hardware_csv, - None => return Ok(None), - }; - - // If the hardware csv is csv like, it probably is a csv; otherwise, it is a path to the - // hardware csv. - if hardware_csv.contains(',') { - return Ok(Some(hardware_csv.to_string())); - } - - let path = match self.hardware_csv_file_path(hardware_csv) { - None => return Ok(None), - Some(path) => path, - }; - - info!("Using hardware csv at {}", path.display()); - - let config = fs::read_to_string(&path).context(error::FileSnafu { path })?; - Ok(Some(config)) - } - - fn hardware_for_cluster(&self, cluster_name: &str) -> Result> { - // Check for /shared/clusters//hardware.csv - let path = self - .tests_directory - .join("shared") - .join("clusters") - .join(cluster_name) - .join("hardware") - .with_extension("csv"); - - if !path.exists() { - return Ok(None); - } - - info!("Using hardware csv at {}", path.display()); - - let config = fs::read_to_string(&path).context(error::FileSnafu { path })?; - Ok(Some(config)) - } -} - -/// Take the value of the `Option` or `"null"` if the `Option` was `None` -fn some_or_null(field: &Option) -> String { - field.to_owned().unwrap_or_else(|| "null".to_string()) -} - -/// The `CrdCreator` trait is used to create CRDs. Each variant family should have a `CrdCreator` -/// that is responsible for creating the CRDs needed for testing. -#[async_trait::async_trait] -pub(crate) trait CrdCreator: Sync { - /// Return the image id that should be used for normal testing. - async fn image_id(&self, crd_input: &CrdInput) -> Result; - - /// Return the image id that should be used as the starting point for migration testing. - async fn starting_image_id(&self, crd_input: &CrdInput) -> Result; - - /// Create a CRD for the cluster needed to launch Bottlerocket. If no cluster CRD is - /// needed, `CreateCrdOutput::None` can be returned. - async fn cluster_crd<'a>(&self, cluster_input: ClusterInput<'a>) -> Result; - - /// Create a CRD to launch Bottlerocket. `CreateCrdOutput::None` can be returned if this CRD is - /// not needed. - async fn bottlerocket_crd<'a>( - &self, - bottlerocket_input: BottlerocketInput<'a>, - ) -> Result; - - /// Create a CRD that migrates Bottlerocket from one version to another. - async fn migration_crd<'a>( - &self, - migration_input: MigrationInput<'a>, - ) -> Result; - - /// Create a testing CRD for this variant of Bottlerocket. - async fn test_crd<'a>(&self, test_input: TestInput<'a>) -> Result; - - /// Create a workload testing CRD for this variant of Bottlerocket. - async fn workload_crd<'a>(&self, test_input: TestInput<'a>) -> Result; - - /// Create a set of additional fields that may be used by an externally defined agent on top of - /// the ones in `CrdInput` - fn additional_fields(&self, _test_type: &str) -> BTreeMap { - Default::default() - } - - /// Creates a set of CRDs for the specified variant and test type that can be added to a TestSys - /// cluster. - async fn create_crds( - &self, - test_type: &KnownTestType, - crd_input: &CrdInput, - ) -> Result> { - let mut crds = Vec::new(); - let image_id = match &test_type { - KnownTestType::Migration => { - if let Some(image_id) = &crd_input.starting_image_id { - debug!( - "Using the provided starting image id for migration testing '{}'", - image_id - ); - image_id.to_string() - } else { - let image_id = self.starting_image_id(crd_input).await?; - debug!( - "A starting image id was not provided, '{}' will be used instead.", - image_id - ); - image_id - } - } - _ => self.image_id(crd_input).await?, - }; - for cluster_name in &crd_input.cluster_names()? { - let cluster_output = self - .cluster_crd(ClusterInput { - cluster_name, - image_id: &image_id, - crd_input, - cluster_config: &crd_input.resolved_cluster_config( - cluster_name, - &mut self - .additional_fields(&test_type.to_string()) - .into_iter() - // Add the image id in case it is needed for cluster creation - .chain(Some(("image-id".to_string(), image_id.clone()))) - .collect::>(), - )?, - hardware_csv: &crd_input - .resolved_hardware_csv() - .transpose() - .or_else(|| crd_input.hardware_for_cluster(cluster_name).transpose()) - .transpose()?, - }) - .await?; - let cluster_crd_name = cluster_output.crd_name(); - if let Some(crd) = cluster_output.crd() { - debug!("Cluster crd was created for '{}'", cluster_name); - crds.push(crd) - } - let bottlerocket_output = self - .bottlerocket_crd(BottlerocketInput { - cluster_crd_name: &cluster_crd_name, - image_id: image_id.clone(), - test_type, - crd_input, - }) - .await?; - let bottlerocket_crd_name = bottlerocket_output.crd_name(); - match &test_type { - KnownTestType::Conformance | KnownTestType::Quick => { - if let Some(crd) = bottlerocket_output.crd() { - debug!("Bottlerocket crd was created for '{}'", cluster_name); - crds.push(crd) - } - let test_output = self - .test_crd(TestInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - test_type, - crd_input, - prev_tests: Default::default(), - name_suffix: None, - }) - .await?; - if let Some(crd) = test_output.crd() { - crds.push(crd) - } - } - KnownTestType::Workload => { - if let Some(crd) = bottlerocket_output.crd() { - debug!("Bottlerocket crd was created for '{}'", cluster_name); - crds.push(crd) - } - let test_output = self - .workload_crd(TestInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - test_type, - crd_input, - prev_tests: Default::default(), - name_suffix: None, - }) - .await?; - if let Some(crd) = test_output.crd() { - crds.push(crd) - } - } - KnownTestType::Migration => { - if let Some(crd) = bottlerocket_output.crd() { - debug!("Bottlerocket crd was created for '{}'", cluster_name); - crds.push(crd) - } - let mut tests = Vec::new(); - let test_output = self - .test_crd(TestInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - test_type, - crd_input, - prev_tests: tests.clone(), - name_suffix: "1-initial".into(), - }) - .await?; - if let Some(name) = test_output.crd_name() { - tests.push(name) - } - if let Some(crd) = test_output.crd() { - crds.push(crd) - } - let migration_output = self - .migration_crd(MigrationInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - crd_input, - prev_tests: tests.clone(), - name_suffix: "2-migrate".into(), - migration_direction: MigrationDirection::Upgrade, - }) - .await?; - if let Some(name) = migration_output.crd_name() { - tests.push(name) - } - if let Some(crd) = migration_output.crd() { - crds.push(crd) - } - let test_output = self - .test_crd(TestInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - test_type, - crd_input, - prev_tests: tests.clone(), - name_suffix: "3-migrated".into(), - }) - .await?; - if let Some(name) = test_output.crd_name() { - tests.push(name) - } - if let Some(crd) = test_output.crd() { - crds.push(crd) - } - let migration_output = self - .migration_crd(MigrationInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - crd_input, - prev_tests: tests.clone(), - name_suffix: "4-migrate".into(), - migration_direction: MigrationDirection::Downgrade, - }) - .await?; - if let Some(name) = migration_output.crd_name() { - tests.push(name) - } - if let Some(crd) = migration_output.crd() { - crds.push(crd) - } - let test_output = self - .test_crd(TestInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - test_type, - crd_input, - prev_tests: tests, - name_suffix: "5-final".into(), - }) - .await?; - if let Some(crd) = test_output.crd() { - crds.push(crd) - } - } - } - } - - Ok(crds) - } - - /// Creates a set of CRDs for the specified variant and test type that can be added to a TestSys - /// cluster. - async fn create_custom_crds( - &self, - test_type: &str, - crd_input: &CrdInput, - override_crd_template: Option, - ) -> Result> { - debug!("Creating custom CRDs for '{}' test", test_type); - let crd_template_file_path = &override_crd_template - .or_else(|| crd_input.custom_crd_template_file_path()) - .context(error::InvalidSnafu { - what: format!( - "A custom yaml file could not be found for test type '{}'", - test_type - ), - })?; - info!( - "Creating custom crd from '{}'", - crd_template_file_path.display() - ); - let mut crds = Vec::new(); - for cluster_name in &crd_input.cluster_names()? { - let mut fields = crd_input.config_fields(cluster_name); - fields.insert("api-version".to_string(), API_VERSION.to_string()); - fields.insert("namespace".to_string(), NAMESPACE.to_string()); - fields.insert("image-id".to_string(), self.image_id(crd_input).await?); - fields.append(&mut self.additional_fields(test_type)); - - let mut handlebars = Handlebars::new(); - handlebars.set_strict_mode(true); - let rendered_manifest = handlebars.render_template( - &std::fs::read_to_string(crd_template_file_path).context(error::FileSnafu { - path: crd_template_file_path, - })?, - &fields, - )?; - - for crd_doc in serde_yaml::Deserializer::from_str(&rendered_manifest) { - let value = - serde_yaml::Value::deserialize(crd_doc).context(error::SerdeYamlSnafu { - what: "Unable to deserialize rendered manifest", - })?; - let mut crd: Crd = - serde_yaml::from_value(value).context(error::SerdeYamlSnafu { - what: "The manifest did not match a `CRD`", - })?; - // Add in the secrets from the config manually. - match &mut crd { - Crd::Test(test) => { - test.spec.agent.secrets = Some(crd_input.config.secrets.clone()) - } - Crd::Resource(resource) => { - resource.spec.agent.secrets = Some(crd_input.config.secrets.clone()) - } - } - crds.push(crd); - } - } - Ok(crds) - } -} - -/// The input used for cluster crd creation -pub struct ClusterInput<'a> { - pub cluster_name: &'a String, - pub image_id: &'a String, - pub crd_input: &'a CrdInput<'a>, - pub cluster_config: &'a Option, - pub hardware_csv: &'a Option, -} - -/// The input used for bottlerocket crd creation -pub struct BottlerocketInput<'a> { - pub cluster_crd_name: &'a Option, - /// The image id that should be used by this CRD - pub image_id: String, - pub test_type: &'a KnownTestType, - pub crd_input: &'a CrdInput<'a>, -} - -/// The input used for test crd creation -pub struct TestInput<'a> { - pub cluster_crd_name: &'a Option, - pub bottlerocket_crd_name: &'a Option, - pub test_type: &'a KnownTestType, - pub crd_input: &'a CrdInput<'a>, - /// The set of tests that have already been created that are related to this test - pub prev_tests: Vec, - /// The suffix that should be appended to the end of the test name to prevent naming conflicts - pub name_suffix: Option<&'a str>, -} - -/// The input used for migration crd creation -pub struct MigrationInput<'a> { - pub cluster_crd_name: &'a Option, - pub bottlerocket_crd_name: &'a Option, - pub crd_input: &'a CrdInput<'a>, - /// The set of tests that have already been created that are related to this test - pub prev_tests: Vec, - /// The suffix that should be appended to the end of the test name to prevent naming conflicts - pub name_suffix: Option<&'a str>, - pub migration_direction: MigrationDirection, -} - -pub enum MigrationDirection { - Upgrade, - Downgrade, -} - -pub enum CreateCrdOutput { - /// A new CRD was created and needs to be applied to the cluster. - NewCrd(Box), - /// An existing CRD is already representing this object. - ExistingCrd(String), - /// There is no CRD to create for this step of this family. - None, -} - -impl Default for CreateCrdOutput { - fn default() -> Self { - Self::None - } -} - -impl CreateCrdOutput { - /// Get the name of the CRD that was created or already existed - pub(crate) fn crd_name(&self) -> Option { - match self { - CreateCrdOutput::NewCrd(crd) => { - Some(crd.name().expect("A CRD is missing the name field.")) - } - CreateCrdOutput::ExistingCrd(name) => Some(name.to_string()), - CreateCrdOutput::None => None, - } - } - - /// Get the CRD if it was created - pub(crate) fn crd(self) -> Option { - match self { - CreateCrdOutput::NewCrd(crd) => Some(*crd), - CreateCrdOutput::ExistingCrd(_) => None, - CreateCrdOutput::None => None, - } - } -} diff --git a/tools/testsys/src/delete.rs b/tools/testsys/src/delete.rs deleted file mode 100644 index a08ec9f71f0..00000000000 --- a/tools/testsys/src/delete.rs +++ /dev/null @@ -1,80 +0,0 @@ -use crate::error::Result; -use clap::Parser; -use futures::TryStreamExt; -use log::info; -use testsys_model::test_manager::{CrdState, CrdType, DeleteEvent, SelectionParams, TestManager}; - -/// Delete all tests and resources from a testsys cluster. -#[derive(Debug, Parser)] -pub(crate) struct Delete { - /// Only delete tests - #[clap(long)] - test: bool, - - /// Focus status on a particular arch - #[clap(long)] - arch: Option, - - /// Focus status on a particular variant - #[clap(long)] - variant: Option, - - /// Only delete passed tests - #[clap(long, conflicts_with_all=&["failed", "running"])] - passed: bool, - - /// Only delete failed tests - #[clap(long, conflicts_with_all=&["passed", "running"])] - failed: bool, - - /// Only CRD's that haven't finished - #[clap(long, conflicts_with_all=&["passed", "failed"])] - running: bool, -} - -impl Delete { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - let state = if self.running { - info!("Deleting all running tests and resources"); - Some(CrdState::NotFinished) - } else if self.passed { - info!("Deleting all passed tests"); - Some(CrdState::Passed) - } else if self.failed { - info!("Deleting all failed tests"); - Some(CrdState::Failed) - } else { - info!("Deleting all tests and resources"); - None - }; - let crd_type = self.test.then_some(CrdType::Test); - let mut labels = Vec::new(); - if let Some(arch) = self.arch { - labels.push(format!("testsys/arch={}", arch)) - }; - if let Some(variant) = self.variant { - labels.push(format!("testsys/variant={}", variant)) - }; - let mut stream = client - .delete( - &SelectionParams { - labels: Some(labels.join(",")), - state, - crd_type, - ..Default::default() - }, - false, - ) - .await?; - - while let Some(delete) = stream.try_next().await? { - match delete { - DeleteEvent::Starting(crd) => println!("Starting delete for {}", crd.name()), - DeleteEvent::Deleted(crd) => println!("Delete finished for {}", crd.name()), - DeleteEvent::Failed(crd) => println!("Delete failed for {}", crd.name()), - } - } - info!("Delete finished"); - Ok(()) - } -} diff --git a/tools/testsys/src/error.rs b/tools/testsys/src/error.rs deleted file mode 100644 index 06f73950e38..00000000000 --- a/tools/testsys/src/error.rs +++ /dev/null @@ -1,121 +0,0 @@ -use aws_sdk_ec2::error::SdkError; -use aws_sdk_ec2::operation::describe_images::DescribeImagesError; -use snafu::Snafu; -use std::path::PathBuf; - -pub type Result = std::result::Result; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -pub enum Error { - // `error` must be used instead of `source` because the build function returns - // `std::error::Error` but not `std::error::Error + Sync + Send`. - #[snafu(display("Unable to build '{}': {}", what, source))] - Build { - what: String, - source: Box, - }, - - #[snafu(display("Unable to build datacenter credentials: {}", source))] - CredsBuild { - source: pubsys_config::vmware::Error, - }, - - #[snafu(display("Unable to build data center config: {}", source))] - DatacenterBuild { - source: pubsys_config::vmware::Error, - }, - - #[snafu(context(false), display("{}", source))] - DescribeImages { - source: SdkError, - }, - - #[snafu(display("Unable to read file '{}': {}", path.display(), source))] - File { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(context(false), display("Unable render templated yaml: {}", source))] - HandlebarsRender { source: handlebars::RenderError }, - - #[snafu( - context(false), - display("Unable create template from yaml: {}", source) - )] - HandlebarsTemplate { - #[snafu(source(from(handlebars::TemplateError, Box::new)))] - source: Box, - }, - - #[snafu(display("Unable to create map from {}: {}", what, source))] - IntoMap { - what: String, - source: testsys_model::Error, - }, - - #[snafu(display("{}", what))] - Invalid { what: String }, - - #[snafu(display("{}: {}", what, source))] - IO { - what: String, - source: std::io::Error, - }, - - #[snafu(display("Unable to parse K8s version '{}'", version))] - K8sVersion { version: String }, - - #[snafu(display("{} was missing from {}", item, what))] - Missing { item: String, what: String }, - - #[snafu(context(false), display("{}", source))] - PubsysConfig { source: pubsys_config::Error }, - - #[snafu(display("Unable to create secret name for '{}': {}", secret_name, source))] - SecretName { - secret_name: String, - source: testsys_model::Error, - }, - - #[snafu(display("{}: {}", what, source))] - SerdeJson { - what: String, - source: serde_json::Error, - }, - - #[snafu(display("{}: {}", what, source))] - SerdeYaml { - what: String, - source: serde_yaml::Error, - }, - - #[snafu(context(false), display("{}", source))] - TestManager { - source: testsys_model::test_manager::Error, - }, - - #[snafu(context(false), display("{}", source))] - TestsysConfig { source: testsys_config::Error }, - - #[snafu(display("{} is not supported.", what))] - Unsupported { what: String }, - - #[snafu(display("Unable to parse url from '{}': {}", url, source))] - UrlParse { - url: String, - source: url::ParseError, - }, - - #[snafu(display("Unable to create `Variant` from `{}`: {}", variant, source))] - Variant { - variant: String, - source: bottlerocket_variant::error::Error, - }, - - #[snafu(display("Error reading config: {}", source))] - VmwareConfig { - source: pubsys_config::vmware::Error, - }, -} diff --git a/tools/testsys/src/install.rs b/tools/testsys/src/install.rs deleted file mode 100644 index 179918696e7..00000000000 --- a/tools/testsys/src/install.rs +++ /dev/null @@ -1,62 +0,0 @@ -use crate::error::Result; -use crate::run::TestsysImages; -use clap::Parser; -use log::{info, trace}; -use std::path::PathBuf; -use testsys_config::TestConfig; -use testsys_model::test_manager::{ImageConfig, TestManager}; - -/// The install subcommand is responsible for putting all of the necessary components for testsys in -/// a k8s cluster. -#[derive(Debug, Parser)] -pub(crate) struct Install { - /// The path to `Test.toml` - #[arg(long, env = "TESTSYS_TEST_CONFIG_PATH")] - test_config_path: PathBuf, - - #[command(flatten)] - agent_images: TestsysImages, -} - -impl Install { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - // Use Test.toml or default - let test_config = TestConfig::from_path_or_default(&self.test_config_path)?; - - let test_opts = test_config.test.to_owned().unwrap_or_default(); - - let images = vec![ - Some(self.agent_images.into()), - Some(test_opts.testsys_images), - test_opts.testsys_image_registry.map(|registry| { - testsys_config::TestsysImages::new(registry, test_opts.testsys_image_tag) - }), - Some(testsys_config::TestsysImages::public_images()), - ] - .into_iter() - .flatten() - .fold(Default::default(), testsys_config::TestsysImages::merge); - - let controller_uri = images - .controller_image - .expect("The default controller image is missing."); - - trace!( - "Installing testsys using controller image '{}'", - controller_uri - ); - - let controller_image = match images.testsys_agent_pull_secret { - Some(secret) => ImageConfig::WithCreds { - secret, - image: controller_uri, - }, - None => ImageConfig::Image(controller_uri), - }; - client.install(controller_image).await?; - - info!("testsys components were successfully installed."); - - Ok(()) - } -} diff --git a/tools/testsys/src/logs.rs b/tools/testsys/src/logs.rs deleted file mode 100644 index d63dd81bf65..00000000000 --- a/tools/testsys/src/logs.rs +++ /dev/null @@ -1,47 +0,0 @@ -use crate::error::{self, Result}; -use clap::Parser; -use futures::TryStreamExt; -use snafu::OptionExt; -use testsys_model::test_manager::{ResourceState, TestManager}; -use unescape::unescape; - -/// Stream the logs of an object from a testsys cluster. -#[derive(Debug, Parser)] -pub(crate) struct Logs { - /// The name of the test we want logs from. - #[clap(long, conflicts_with = "resource")] - test: Option, - - /// The name of the resource we want logs from. - #[clap(long, conflicts_with = "test", requires = "state")] - resource: Option, - - /// The resource state we want logs for (Creation, Destruction). - #[clap(long = "state", conflicts_with = "test")] - resource_state: Option, - - /// Follow logs - #[clap(long, short)] - follow: bool, -} - -impl Logs { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - match (self.test, self.resource, self.resource_state) { - (Some(test), None, None) => { - let mut logs = client.test_logs(test, self.follow).await?; - while let Some(line) = logs.try_next().await? { - println!("{}", unescape(&String::from_utf8_lossy(&line)).context(error::InvalidSnafu{what: "Unable to unescape log string"})?); - } - } - (None, Some(resource), Some(state)) => { - let mut logs = client.resource_logs(resource, state, self.follow).await?; - while let Some(line) = logs.try_next().await? { - println!("{}", unescape(&String::from_utf8_lossy(&line)).context(error::InvalidSnafu{what: "Unable to unescape log string"})?); - } - } - _ => return Err(error::Error::Invalid{what: "Invalid arguments were provided. Exactly one of `--test` or `--resource` must be given.".to_string()}), - }; - Ok(()) - } -} diff --git a/tools/testsys/src/main.rs b/tools/testsys/src/main.rs deleted file mode 100644 index 26a97d4b1db..00000000000 --- a/tools/testsys/src/main.rs +++ /dev/null @@ -1,112 +0,0 @@ -use clap::{Parser, Subcommand}; -use delete::Delete; -use env_logger::Builder; -use error::Result; -use install::Install; -use log::{debug, error, LevelFilter}; -use logs::Logs; -use restart_test::RestartTest; -use run::Run; -use secret::Add; -use status::Status; -use std::path::PathBuf; -use testsys_model::test_manager::TestManager; -use uninstall::Uninstall; - -mod aws_ecs; -mod aws_k8s; -mod aws_resources; -mod crds; -mod delete; -mod error; -mod install; -mod logs; -mod metal_k8s; -mod migration; -mod restart_test; -mod run; -mod secret; -mod sonobuoy; -mod status; -mod uninstall; -mod vmware_k8s; - -/// A program for running and controlling Bottlerocket tests in a Kubernetes cluster using -/// bottlerocket-test-system -#[derive(Parser, Debug)] -#[clap(about, long_about = None)] -struct TestsysArgs { - #[arg(global = true, long, default_value = "INFO")] - /// How much detail to log; from least to most: ERROR, WARN, INFO, DEBUG, TRACE - log_level: LevelFilter, - - /// Path to the kubeconfig file for the testsys cluster. Can also be passed with the KUBECONFIG - /// environment variable. - #[arg(long)] - kubeconfig: Option, - - #[command(subcommand)] - command: Command, -} - -impl TestsysArgs { - async fn run(self) -> Result<()> { - let client = match self.kubeconfig { - Some(path) => TestManager::new_from_kubeconfig_path(&path).await?, - None => TestManager::new().await?, - }; - match self.command { - Command::Run(run) => run.run(client).await?, - Command::Install(install) => install.run(client).await?, - Command::Delete(delete) => delete.run(client).await?, - Command::Status(status) => status.run(client).await?, - Command::Logs(logs) => logs.run(client).await?, - Command::RestartTest(restart_test) => restart_test.run(client).await?, - Command::Add(add) => add.run(client).await?, - Command::Uninstall(uninstall) => uninstall.run(client).await?, - }; - Ok(()) - } -} - -#[derive(Subcommand, Debug)] -enum Command { - // We need to box some commands because they require significantly more arguments than the other commands. - Install(Box), - Run(Box), - Delete(Delete), - Status(Status), - Logs(Logs), - RestartTest(RestartTest), - Add(Add), - Uninstall(Uninstall), -} - -#[tokio::main] -async fn main() { - let args = TestsysArgs::parse(); - init_logger(args.log_level); - debug!("{:?}", args); - if let Err(e) = args.run().await { - error!("{}", e); - std::process::exit(1); - } -} - -/// Initialize the logger with the value passed by `--log-level` (or its default) when the -/// `RUST_LOG` environment variable is not present. If present, the `RUST_LOG` environment variable -/// overrides `--log-level`/`level`. -fn init_logger(level: LevelFilter) { - match std::env::var(env_logger::DEFAULT_FILTER_ENV).ok() { - Some(_) => { - // RUST_LOG exists; env_logger will use it. - Builder::from_default_env().init(); - } - None => { - // RUST_LOG does not exist; use default log level for this crate only. - Builder::new() - .filter(Some(env!("CARGO_CRATE_NAME")), level) - .init(); - } - } -} diff --git a/tools/testsys/src/metal_k8s.rs b/tools/testsys/src/metal_k8s.rs deleted file mode 100644 index 3cef00a1541..00000000000 --- a/tools/testsys/src/metal_k8s.rs +++ /dev/null @@ -1,261 +0,0 @@ -use crate::crds::{ - BottlerocketInput, ClusterInput, CrdCreator, CrdInput, CreateCrdOutput, MigrationInput, - TestInput, -}; -use crate::error::{self, Result}; -use crate::migration::migration_crd; -use crate::sonobuoy::{sonobuoy_crd, workload_crd}; -use bottlerocket_types::agent_config::MetalK8sClusterConfig; -use maplit::btreemap; -use serde::Deserialize; -use snafu::{OptionExt, ResultExt}; -use std::collections::BTreeMap; -use testsys_model::{Crd, DestructionPolicy}; -use url::Url; - -/// A `CrdCreator` responsible for creating crd related to `metal-k8s` variants. -pub(crate) struct MetalK8sCreator { - pub(crate) region: String, - pub(crate) encoded_mgmt_cluster_kubeconfig: String, - pub(crate) image_name: String, -} - -#[async_trait::async_trait] -impl CrdCreator for MetalK8sCreator { - /// Use the provided image name with the `os_image_dir` from `Test.toml` for the image id. - async fn image_id(&self, crd_input: &CrdInput) -> Result { - image_url( - crd_input - .config - .os_image_dir - .as_ref() - .context(error::InvalidSnafu { - what: "An os image directory is required for metal testing", - })?, - &self.image_name, - ) - } - - /// Use standard naming conventions to predict the starting image name. - async fn starting_image_id(&self, crd_input: &CrdInput) -> Result { - let filename = format!( - "bottlerocket-{}-{}-{}.img.gz", - crd_input.variant, - crd_input.arch, - crd_input - .starting_version - .as_ref() - .context(error::InvalidSnafu { - what: "The starting version must be provided for migration testing" - })? - ); - image_url(crd_input.config.os_image_dir.as_ref().context(error::InvalidSnafu { - what: "An os image directory is required for metal testing if a starting image id not used", - })?, &filename) - } - - /// Creates a metal K8s cluster CRD with the `cluster_name` in `cluster_input`. - async fn cluster_crd<'a>(&self, cluster_input: ClusterInput<'a>) -> Result { - let (cluster_name, control_plane_endpoint_ip, k8s_version) = cluster_data( - cluster_input - .cluster_config - .as_ref() - .context(error::InvalidSnafu { - what: "A cluster config is required for Bare Metal cluster provisioning.", - })?, - )?; - - let labels = cluster_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => "cluster".to_string(), - "testsys/cluster".to_string() => cluster_name.clone(), - "testsys/controlPlaneEndpoint".to_string() => control_plane_endpoint_ip, - "testsys/k8sVersion".to_string() => k8s_version - }); - - // Check if the cluster already has a CRD - if let Some(cluster_crd) = cluster_input - .crd_input - .existing_crds( - &labels, - &[ - "testsys/cluster", - "testsys/type", - "testsys/controlPlaneEndpoint", - "testsys/k8sVersion", - ], - ) - .await? - .pop() - { - return Ok(CreateCrdOutput::ExistingCrd(cluster_crd)); - } - - // Check if an existing cluster is using this endpoint - let existing_clusters = cluster_input - .crd_input - .existing_crds(&labels, &["testsys/type", "testsys/controlPlaneEndpoint"]) - .await?; - - let metal_k8s_crd = MetalK8sClusterConfig::builder() - .set_labels(Some(labels)) - .mgmt_cluster_kubeconfig_base64(&self.encoded_mgmt_cluster_kubeconfig) - .hardware_csv_base64(base64::encode( - cluster_input - .hardware_csv - .as_ref() - .context(error::InvalidSnafu { - what: "A hardware CSV is required for Bare Metal testing", - })?, - )) - .cluster_config_base64(base64::encode( - cluster_input - .cluster_config - .as_ref() - .context(error::InvalidSnafu { - what: "A cluster config is required for Bare Metal testing", - })?, - )) - .eks_a_release_manifest_url( - cluster_input - .crd_input - .config - .dev - .eks_a_release_manifest_url - .clone(), - ) - .set_conflicts_with(Some(existing_clusters)) - .destruction_policy( - cluster_input - .crd_input - .config - .dev - .cluster_destruction_policy - .to_owned() - .unwrap_or(DestructionPolicy::OnTestSuccess), - ) - .image( - cluster_input - .crd_input - .images - .metal_k8s_cluster_resource_agent_image - .as_ref() - .expect( - "The default metal K8s cluster resource provider image URI is missing.", - ), - ) - .set_image_pull_secret( - cluster_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .privileged(true) - .build(cluster_name) - .context(error::BuildSnafu { - what: "metal K8s cluster CRD", - })?; - - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource( - metal_k8s_crd, - )))) - } - - /// Machines are provisioned during cluster creation, so there is nothing to do here. - async fn bottlerocket_crd<'a>( - &self, - _bottlerocket_input: BottlerocketInput<'a>, - ) -> Result { - Ok(CreateCrdOutput::None) - } - - async fn migration_crd<'a>( - &self, - migration_input: MigrationInput<'a>, - ) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(migration_crd( - migration_input, - Some("us-west-2".to_string()), - "instanceIds", - )?)))) - } - - async fn test_crd<'a>(&self, test_input: TestInput<'a>) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(sonobuoy_crd( - test_input, - )?)))) - } - - async fn workload_crd<'a>(&self, test_input: TestInput<'a>) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(workload_crd( - test_input, - )?)))) - } - - fn additional_fields(&self, _test_type: &str) -> BTreeMap { - btreemap! {"region".to_string() => self.region.clone()} - } -} - -/// Determine the (cluster name, control plane endpoint ip, K8s version) from an EKS Anywhere cluster manifest -fn cluster_data(config: &str) -> Result<(String, String, String)> { - let cluster_manifest = serde_yaml::Deserializer::from_str(config) - .map(|config| { - serde_yaml::Value::deserialize(config).context(error::SerdeYamlSnafu { - what: "Unable to deserialize cluster config", - }) - }) - // Make sure all of the configs were deserializable - .collect::>>()? - .into_iter() - // Find the `Cluster` config - .find(|config| { - config.get("kind") == Some(&serde_yaml::Value::String("Cluster".to_string())) - }); - let cluster_name = cluster_manifest - .as_ref() - // Get the name from the metadata field in the `Cluster` config - .and_then(|config| config.get("metadata")) - .and_then(|config| config.get("name")) - .and_then(|name| name.as_str()) - .context(error::MissingSnafu { - item: "name", - what: "EKS Anywhere config metadata", - })? - .to_string(); - - let control_plane_endpoint_ip = cluster_manifest - .as_ref() - // Get the name from the metadata field in the `Cluster` config - .and_then(|config| config.get("spec")) - .and_then(|config| config.get("controlPlaneConfiguration")) - .and_then(|config| config.get("endpoint")) - .and_then(|config| config.get("host")) - .and_then(|name| name.as_str()) - .context(error::MissingSnafu { - item: "control plane endpoint", - what: "EKS Anywhere config metadata", - })? - .to_string(); - - let k8s_version = cluster_manifest - .as_ref() - // Get the name from the metadata field in the `Cluster` config - .and_then(|config| config.get("spec")) - .and_then(|config| config.get("kubernetesVersion")) - .and_then(|name| name.as_str()) - .context(error::MissingSnafu { - item: "control plane endpoint", - what: "EKS Anywhere config metadata", - })? - .to_string(); - - Ok((cluster_name, control_plane_endpoint_ip, k8s_version)) -} - -fn image_url(image_dir: &str, filename: &str) -> Result { - let image_url = Url::parse(image_dir) - .and_then(|base_url| base_url.join(filename)) - .context(error::UrlParseSnafu { url: image_dir })?; - Ok(image_url.to_string()) -} diff --git a/tools/testsys/src/migration.rs b/tools/testsys/src/migration.rs deleted file mode 100644 index aeb3f7a1bc1..00000000000 --- a/tools/testsys/src/migration.rs +++ /dev/null @@ -1,109 +0,0 @@ -use crate::crds::{MigrationDirection, MigrationInput}; -use crate::error::{self, Result}; -use bottlerocket_types::agent_config::MigrationConfig; -use maplit::btreemap; -use snafu::{OptionExt, ResultExt}; -use testsys_model::Test; - -/// Create a CRD for migrating Bottlerocket instances using SSM commands. -/// `aws_region_override` allows the region that's normally derived from the cluster resource CRD to be overridden -/// `instance_id_field_name` specifies the VM/Instance resource CRD field name for retrieving the instances IDs of the created instances -pub(crate) fn migration_crd( - migration_input: MigrationInput, - aws_region_override: Option, - instance_id_field_name: &str, -) -> Result { - let cluster_resource_name = migration_input - .cluster_crd_name - .as_ref() - .expect("A cluster name is required for migrations"); - - let labels = migration_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => "migration".to_string(), - "testsys/cluster".to_string() => cluster_resource_name.to_string(), - }); - - // Determine which version should be migrated to from `migration_input`. - let migration_version = match migration_input.migration_direction { - MigrationDirection::Upgrade => migration_input - .crd_input - .migrate_to_version - .as_ref() - .context(error::InvalidSnafu { - what: "The target migration version is required", - }), - MigrationDirection::Downgrade => migration_input - .crd_input - .starting_version - .as_ref() - .context(error::InvalidSnafu { - what: "The starting migration version is required", - }), - }?; - - // Construct the migration CRD. - let mut migration_config = MigrationConfig::builder(); - - // Use the specified AWS region for the migration test. - // If no region is specified, derive the appropriate region based on the region of the - // cluster resource CRD (assuming it's an ECS or EKS cluster). - if let Some(aws_region) = aws_region_override { - migration_config.aws_region(aws_region) - } else { - migration_config.aws_region_template(cluster_resource_name, "region") - }; - - migration_config - .instance_ids_template( - migration_input - .bottlerocket_crd_name - .as_ref() - .unwrap_or(cluster_resource_name), - instance_id_field_name, - ) - .migrate_to_version(migration_version) - .tuf_repo(migration_input.crd_input.tuf_repo_config()) - .assume_role(migration_input.crd_input.config.agent_role.clone()) - .set_resources(Some( - vec![cluster_resource_name.to_owned()] - .into_iter() - .chain(migration_input.bottlerocket_crd_name.iter().cloned()) - .collect(), - )) - .set_depends_on(Some(migration_input.prev_tests)) - .image( - migration_input - .crd_input - .images - .migration_test_agent_image - .as_ref() - .expect("Missing default image for migration test agent"), - ) - .set_image_pull_secret( - migration_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .keep_running( - migration_input - .crd_input - .config - .dev - .keep_tests_running - .unwrap_or(false), - ) - .set_secrets(Some(migration_input.crd_input.config.secrets.to_owned())) - .set_labels(Some(labels)) - .build(format!( - "{}-{}", - cluster_resource_name, - migration_input - .name_suffix - .unwrap_or(migration_input.crd_input.test_flavor.as_str()) - )) - .context(error::BuildSnafu { - what: "migration CRD", - }) -} diff --git a/tools/testsys/src/restart_test.rs b/tools/testsys/src/restart_test.rs deleted file mode 100644 index 85f4fbac28a..00000000000 --- a/tools/testsys/src/restart_test.rs +++ /dev/null @@ -1,18 +0,0 @@ -use crate::error::Result; -use clap::Parser; -use testsys_model::test_manager::TestManager; - -/// Restart a test. This will delete the test object from the testsys cluster and replace it with -/// a new, identical test object with a clean state. -#[derive(Debug, Parser)] -pub(crate) struct RestartTest { - /// The name of the test to be restarted. - #[clap()] - test_name: String, -} - -impl RestartTest { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - Ok(client.restart_test(&self.test_name).await?) - } -} diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs deleted file mode 100644 index eb03de0a813..00000000000 --- a/tools/testsys/src/run.rs +++ /dev/null @@ -1,619 +0,0 @@ -use crate::aws_ecs::AwsEcsCreator; -use crate::aws_k8s::AwsK8sCreator; -use crate::crds::{CrdCreator, CrdInput}; -use crate::error; -use crate::error::Result; -use crate::metal_k8s::MetalK8sCreator; -use crate::vmware_k8s::VmwareK8sCreator; -use bottlerocket_variant::Variant; -use clap::Parser; -use log::{debug, info}; -use pubsys_config::vmware::{ - Datacenter, DatacenterBuilder, DatacenterCreds, DatacenterCredsBuilder, DatacenterCredsConfig, - VMWARE_CREDS_PATH, -}; -use pubsys_config::InfraConfig; -use serde::{Deserialize, Serialize}; -use serde_plain::{derive_display_from_serialize, derive_fromstr_from_deserialize}; -use snafu::{OptionExt, ResultExt}; -use std::fs::read_to_string; -use std::path::PathBuf; -use std::str::FromStr; -use testsys_config::{GenericVariantConfig, ResourceAgentType, TestConfig}; -use testsys_model::test_manager::TestManager; -use testsys_model::SecretName; - -/// Run a set of tests for a given arch and variant -#[derive(Debug, Parser)] -pub(crate) struct Run { - /// The type of test to run. Options are `quick` and `conformance`. - test_flavor: TestType, - - /// The architecture to test. Either x86_64 or aarch64. - #[arg(long, env = "BUILDSYS_ARCH")] - arch: String, - - /// The variant to test - #[arg(long, env = "BUILDSYS_VARIANT")] - variant: String, - - /// The path to `Infra.toml` - #[arg(long, env = "PUBLISH_INFRA_CONFIG_PATH")] - infra_config_path: PathBuf, - - /// The path to `Test.toml` - #[arg(long, env = "TESTSYS_TEST_CONFIG_PATH")] - test_config_path: PathBuf, - - /// The path to the `tests` directory - #[arg(long, env = "TESTSYS_TESTS_DIR")] - tests_directory: PathBuf, - - /// The path to the EKS-A management cluster kubeconfig for vSphere or metal K8s cluster creation - #[arg(long, env = "TESTSYS_MGMT_CLUSTER_KUBECONFIG")] - mgmt_cluster_kubeconfig: Option, - - /// Use this named repo infrastructure from Infra.toml for upgrade/downgrade testing. - #[arg(long, env = "PUBLISH_REPO")] - repo: Option, - - /// The name of the vSphere data center in `Infra.toml` that should be used for testing - /// If no data center is provided, the first one in `vmware.datacenters` will be used - #[arg(long, env = "TESTSYS_DATACENTER")] - datacenter: Option, - - /// The name of the VMware OVA that should be used for testing - #[arg(long, env = "BUILDSYS_OVA")] - ova_name: Option, - - /// The name of the image that should be used for Bare Metal testing - #[arg(long, env = "BUILDSYS_NAME_FULL")] - image_name: Option, - - /// The path to `amis.json` - #[arg(long, env = "AMI_INPUT")] - ami_input: Option, - - /// Override for the region the tests should be run in. If none is provided the first region in - /// Infra.toml will be used. This is the region that the aws client is created with for testing - /// and resource agents. - #[arg(long, env = "TESTSYS_TARGET_REGION")] - target_region: Option, - - #[arg(long, env = "BUILDSYS_VERSION_BUILD")] - build_id: Option, - - #[command(flatten)] - agent_images: TestsysImages, - - #[command(flatten)] - config: CliConfig, - - // Migrations - /// Override the starting image used for migrations. The image will be pulled from available - /// amis in the users account if no override is provided. - #[arg(long, env = "TESTSYS_STARTING_IMAGE_ID")] - starting_image_id: Option, - - /// The starting version for migrations. This is required for all migrations tests. - /// This is the version that will be created and migrated to `migration-target-version`. - #[arg(long, env = "TESTSYS_STARTING_VERSION")] - migration_starting_version: Option, - - /// The commit id of the starting version for migrations. This is required for all migrations - /// tests unless `starting-image-id` is provided. This is the version that will be created and - /// migrated to `migration-target-version`. - #[arg(long, env = "TESTSYS_STARTING_COMMIT")] - migration_starting_commit: Option, - - /// The target version for migrations. This is required for all migration tests. This is the - /// version that will be migrated to. - #[arg(long, env = "BUILDSYS_VERSION_IMAGE")] - migration_target_version: Option, - - /// The template file that should be used for custom testing. - #[arg(long = "template-file", short = 'f')] - custom_crd_template: Option, -} - -/// This is a CLI parsable version of `testsys_config::GenericVariantConfig`. -#[derive(Debug, Parser)] -struct CliConfig { - /// The repo containing images necessary for conformance testing. It may be omitted to use the - /// default conformance image registry. - #[arg(long, env = "TESTSYS_CONFORMANCE_REGISTRY")] - conformance_registry: Option, - - /// The name of the cluster for resource agents (EKS resource agent, ECS resource agent). Note: - /// This is not the name of the `testsys cluster` this is the name of the cluster that tests - /// should be run on. If no cluster name is provided, the bottlerocket cluster - /// naming convention `{{arch}}-{{variant}}` will be used. - #[arg(long, env = "TESTSYS_TARGET_CLUSTER_NAME")] - target_cluster_name: Option, - - /// The sonobuoy image that should be used for conformance testing. It may be omitted to use the default - /// sonobuoy image. - #[arg(long, env = "TESTSYS_SONOBUOY_IMAGE")] - sonobuoy_image: Option, - - /// The image that should be used for conformance testing. It may be omitted to use the default - /// testing image. - #[arg(long, env = "TESTSYS_CONFORMANCE_IMAGE")] - conformance_image: Option, - - /// The role that should be assumed by the agents - #[arg(long, env = "TESTSYS_ASSUME_ROLE")] - assume_role: Option, - - /// Specify the instance type that should be used. This is only applicable for aws-* variants. - /// It can be omitted for non-aws variants and can be omitted to use default instance types. - #[arg(long, env = "TESTSYS_INSTANCE_TYPE")] - instance_type: Option, - - /// Add secrets to the testsys agents (`--secret awsCredentials=my-secret`) - #[arg(long, short, value_parser = parse_key_val, number_of_values = 1)] - secret: Vec<(String, SecretName)>, - - /// The endpoint IP to reserve for the vSphere control plane VMs when creating a K8s cluster - #[arg(long, env = "TESTSYS_CONTROL_PLANE_ENDPOINT")] - pub control_plane_endpoint: Option, - - /// Specify the path to the userdata that should be added for Bottlerocket launch - #[arg(long, env = "TESTSYS_USERDATA")] - pub userdata: Option, - - /// Specify the method that should be used to launch instances - #[arg(long, env = "TESTSYS_RESOURCE_AGENT")] - pub resource_agent_type: Option, - - /// A set of workloads that should be run for a workload test (--workload my-workload=) - #[arg(long = "workload", value_parser = parse_workloads, number_of_values = 1)] - pub workloads: Vec<(String, String)>, - - /// The directory containing Bottlerocket images. For metal, this is the directory containing - /// gzipped images. - #[arg(long)] - pub os_image_dir: Option, - - /// The hardware that should be used for provisioning Bottlerocket. For metal, this is the - /// hardware csv that is passed to EKS Anywhere. - #[arg(long)] - pub hardware_csv: Option, -} - -impl From for GenericVariantConfig { - fn from(val: CliConfig) -> Self { - GenericVariantConfig { - cluster_names: val.target_cluster_name.into_iter().collect(), - instance_type: val.instance_type, - resource_agent_type: val.resource_agent_type, - block_device_mapping: Default::default(), - secrets: val.secret.into_iter().collect(), - agent_role: val.assume_role, - sonobuoy_image: val.sonobuoy_image, - conformance_image: val.conformance_image, - conformance_registry: val.conformance_registry, - control_plane_endpoint: val.control_plane_endpoint, - userdata: val.userdata, - os_image_dir: val.os_image_dir, - hardware_csv: val.hardware_csv, - dev: Default::default(), - workloads: val.workloads.into_iter().collect(), - } - } -} - -impl Run { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - // agent config (eventually with configuration) - let variant = Variant::new(&self.variant).context(error::VariantSnafu { - variant: self.variant, - })?; - debug!("Using variant '{}'", variant); - - // Use Test.toml or default - let test_config = TestConfig::from_path_or_default(&self.test_config_path)?; - - let test_opts = test_config.test.to_owned().unwrap_or_default(); - - let (variant_config, test_type) = test_config.reduced_config( - &variant, - &self.arch, - Some(self.config.into()), - &self.test_flavor.to_string(), - ); - let resolved_test_type = TestType::from_str(&test_type) - .expect("All unrecognized test type become `TestType::Custom`"); - - // If a lock file exists, use that, otherwise use Infra.toml or default - let infra_config = InfraConfig::from_path_or_lock(&self.infra_config_path, true)?; - - let repo_config = infra_config - .repo - .unwrap_or_default() - .remove( - &self - .repo - .or(test_opts.repo) - .unwrap_or_else(|| "default".to_string()), - ) - .unwrap_or_default(); - - let images = vec![ - Some(self.agent_images.into()), - Some(test_opts.testsys_images), - test_opts.testsys_image_registry.map(|registry| { - testsys_config::TestsysImages::new(registry, test_opts.testsys_image_tag) - }), - Some(testsys_config::TestsysImages::public_images()), - ] - .into_iter() - .flatten() - .fold(Default::default(), testsys_config::TestsysImages::merge); - - // The `CrdCreator` is responsible for creating crds for the given architecture and variant. - let crd_creator: Box = match variant.family() { - "aws-k8s" => { - debug!("Using family 'aws-k8s'"); - let aws_config = infra_config.aws.unwrap_or_default(); - let region = aws_config - .regions - .front() - .map(String::to_string) - .unwrap_or_else(|| "us-west-2".to_string()); - Box::new(AwsK8sCreator { - region, - ami_input: self.ami_input.context(error::InvalidSnafu { - what: "amis.json is required. You may need to run `cargo make ami`", - })?, - migrate_starting_commit: self.migration_starting_commit, - }) - } - "aws-ecs" => { - debug!("Using family 'aws-ecs'"); - let aws_config = infra_config.aws.unwrap_or_default(); - let region = aws_config - .regions - .front() - .map(String::to_string) - .unwrap_or_else(|| "us-west-2".to_string()); - Box::new(AwsEcsCreator { - region, - ami_input: self.ami_input.context(error::InvalidSnafu { - what: "amis.json is required. You may need to run `cargo make ami`", - })?, - migrate_starting_commit: self.migration_starting_commit, - }) - } - "vmware-k8s" => { - debug!("Using family 'vmware-k8s'"); - let aws_config = infra_config.aws.unwrap_or_default(); - let region = aws_config - .regions - .front() - .map(String::to_string) - .unwrap_or_else(|| "us-west-2".to_string()); - let vmware_config = infra_config.vmware.unwrap_or_default(); - let dc_env = DatacenterBuilder::from_env(); - let dc_common = vmware_config.common.as_ref(); - let dc_config = self - .datacenter - .as_ref() - .or_else(|| vmware_config.datacenters.first()) - .and_then(|datacenter| vmware_config.datacenter.get(datacenter)); - - let datacenter: Datacenter = dc_env - .take_missing_from(dc_config) - .take_missing_from(dc_common) - .build() - .context(error::DatacenterBuildSnafu)?; - - let vsphere_secret = if !variant_config.secrets.contains_key("vsphereCredentials") { - info!("Creating vSphere secret, 'vspherecreds'"); - let creds_env = DatacenterCredsBuilder::from_env(); - let creds_file = if let Some(ref creds_file) = *VMWARE_CREDS_PATH { - if creds_file.exists() { - info!("Using vSphere credentials file at {}", creds_file.display()); - DatacenterCredsConfig::from_path(creds_file) - .context(error::VmwareConfigSnafu)? - } else { - info!( - "vSphere credentials file not found, will attempt to use environment" - ); - DatacenterCredsConfig::default() - } - } else { - info!("Unable to determine vSphere credentials file location, will attempt to use environment"); - DatacenterCredsConfig::default() - }; - let dc_creds = creds_file.datacenter.get(&datacenter.datacenter); - let creds: DatacenterCreds = creds_env - .take_missing_from(dc_creds) - .build() - .context(error::CredsBuildSnafu)?; - - let secret_name = - SecretName::new("vspherecreds").context(error::SecretNameSnafu { - secret_name: "vspherecreds", - })?; - client - .create_secret( - &secret_name, - vec![ - ("username".to_string(), creds.username), - ("password".to_string(), creds.password), - ], - ) - .await?; - Some(("vsphereCredentials".to_string(), secret_name)) - } else { - None - }; - - let mgmt_cluster_kubeconfig = - self.mgmt_cluster_kubeconfig.context(error::InvalidSnafu { - what: "A management cluster kubeconfig is required for VMware testing", - })?; - let encoded_kubeconfig = base64::encode( - read_to_string(&mgmt_cluster_kubeconfig).context(error::FileSnafu { - path: mgmt_cluster_kubeconfig, - })?, - ); - - Box::new(VmwareK8sCreator { - region, - ova_name: self.ova_name.context(error::InvalidSnafu { - what: "An OVA name is required for VMware testing.", - })?, - datacenter, - encoded_mgmt_cluster_kubeconfig: encoded_kubeconfig, - creds: vsphere_secret, - }) - } - "metal-k8s" => { - debug!("Using family 'metal-k8s'"); - let aws_config = infra_config.aws.unwrap_or_default(); - let region = aws_config - .regions - .front() - .map(String::to_string) - .unwrap_or_else(|| "us-west-2".to_string()); - - let mgmt_cluster_kubeconfig = - self.mgmt_cluster_kubeconfig.context(error::InvalidSnafu { - what: "A management cluster kubeconfig is required for metal testing", - })?; - let encoded_kubeconfig = base64::encode( - read_to_string(&mgmt_cluster_kubeconfig).context(error::FileSnafu { - path: mgmt_cluster_kubeconfig, - })?, - ); - Box::new(MetalK8sCreator { - region, - encoded_mgmt_cluster_kubeconfig: encoded_kubeconfig, - image_name: self.image_name.context(error::InvalidSnafu{what: "The image name is required for Bare Metal testing. This can be set with `BUILDSYS_NAME_FULL`."})? - }) - } - unsupported => { - return Err(error::Error::Unsupported { - what: unsupported.to_string(), - }) - } - }; - - let crd_input = CrdInput { - client: &client, - arch: self.arch, - variant, - build_id: self.build_id, - config: variant_config, - repo_config, - starting_version: self.migration_starting_version, - migrate_to_version: self.migration_target_version, - starting_image_id: self.starting_image_id, - test_type: resolved_test_type.clone(), - test_flavor: self.test_flavor.to_string(), - images, - tests_directory: self.tests_directory, - }; - - let crds = match &resolved_test_type { - TestType::Known(resolved_test_type) => { - crd_creator - .create_crds(resolved_test_type, &crd_input) - .await? - } - TestType::Custom(resolved_test_type) => { - crd_creator - .create_custom_crds( - resolved_test_type, - &crd_input, - self.custom_crd_template.to_owned(), - ) - .await? - } - }; - - debug!("Adding crds to testsys cluster"); - for crd in crds { - let crd = client.create_object(crd).await?; - info!("Successfully added '{}'", crd.name().unwrap()); - } - - Ok(()) - } -} - -fn parse_key_val(s: &str) -> Result<(String, SecretName)> { - let mut iter = s.splitn(2, '='); - let key = iter.next().context(error::InvalidSnafu { - what: "Key is missing", - })?; - let value = iter.next().context(error::InvalidSnafu { - what: "Value is missing", - })?; - Ok(( - key.to_string(), - SecretName::new(value).context(error::SecretNameSnafu { secret_name: value })?, - )) -} - -fn parse_workloads(s: &str) -> Result<(String, String)> { - let mut iter = s.splitn(2, '='); - let key = iter.next().context(error::InvalidSnafu { - what: "Key is missing", - })?; - let value = iter.next().context(error::InvalidSnafu { - what: "Value is missing", - })?; - Ok((key.to_string(), value.to_string())) -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -#[serde(rename_all = "lowercase")] -pub enum KnownTestType { - /// Conformance testing is a full integration test that asserts that Bottlerocket is working for - /// customer workloads. For k8s variants, for example, this will run the full suite of sonobuoy - /// conformance tests. - Conformance, - /// Run a quick test that ensures a basic workload can run on Bottlerocket. For example, on k8s - /// variance this will run sonobuoy in "quick" mode. For ECS variants, this will run a simple - /// ECS task. - Quick, - /// Migration testing ensures that all bottlerocket migrations work as expected. Instances will - /// be created at the starting version, migrated to the target version and back to the starting - /// version with validation testing. - Migration, - /// Workload testing is used to test specific workloads on a set of Bottlerocket nodes. - Workload, -} - -/// If a test type is one that is supported by TestSys it will be created as `Known(KnownTestType)`. -/// All other test types will be stored as `Custom()`. -#[derive(Debug, Serialize, Deserialize, Clone)] -#[serde(untagged)] -pub(crate) enum TestType { - Known(KnownTestType), - Custom(String), -} - -derive_fromstr_from_deserialize!(TestType); -derive_display_from_serialize!(TestType); -derive_display_from_serialize!(KnownTestType); - -/// This is a CLI parsable version of `testsys_config::TestsysImages` -#[derive(Debug, Parser)] -pub(crate) struct TestsysImages { - /// EKS resource agent URI. If not provided the latest released resource agent will be used. - #[arg( - long = "eks-resource-agent-image", - env = "TESTSYS_EKS_RESOURCE_AGENT_IMAGE" - )] - pub(crate) eks_resource: Option, - - /// ECS resource agent URI. If not provided the latest released resource agent will be used. - #[arg( - long = "ecs-resource-agent-image", - env = "TESTSYS_ECS_RESOURCE_AGENT_IMAGE" - )] - pub(crate) ecs_resource: Option, - - /// vSphere cluster resource agent URI. If not provided the latest released resource agent will be used. - #[arg( - long = "vsphere-k8s-cluster-resource-agent-image", - env = "TESTSYS_VSPHERE_K8S_CLUSTER_RESOURCE_AGENT_IMAGE" - )] - pub(crate) vsphere_k8s_cluster_resource: Option, - - /// Bare Metal cluster resource agent URI. If not provided the latest released resource agent will be used. - #[arg( - long = "metal-k8s-cluster-resource-agent-image", - env = "TESTSYS_METAL_K8S_CLUSTER_RESOURCE_AGENT_IMAGE" - )] - pub(crate) metal_k8s_cluster_resource: Option, - - /// EC2 resource agent URI. If not provided the latest released resource agent will be used. - #[arg( - long = "ec2-resource-agent-image", - env = "TESTSYS_EC2_RESOURCE_AGENT_IMAGE" - )] - pub(crate) ec2_resource: Option, - - /// EC2 Karpenter resource agent URI. If not provided the latest released resource agent will be used. - #[arg( - long = "ec2-resource-agent-image", - env = "TESTSYS_EC2_KARPENTER_RESOURCE_AGENT_IMAGE" - )] - pub(crate) ec2_karpenter_resource: Option, - - /// vSphere VM resource agent URI. If not provided the latest released resource agent will be used. - #[arg( - long = "vsphere-vm-resource-agent-image", - env = "TESTSYS_VSPHERE_VM_RESOURCE_AGENT_IMAGE" - )] - pub(crate) vsphere_vm_resource: Option, - - /// Sonobuoy test agent URI. If not provided the latest released test agent will be used. - #[arg( - long = "sonobuoy-test-agent-image", - env = "TESTSYS_SONOBUOY_TEST_AGENT_IMAGE" - )] - pub(crate) sonobuoy_test: Option, - - /// ECS test agent URI. If not provided the latest released test agent will be used. - #[arg(long = "ecs-test-agent-image", env = "TESTSYS_ECS_TEST_AGENT_IMAGE")] - pub(crate) ecs_test: Option, - - /// Migration test agent URI. If not provided the latest released test agent will be used. - #[arg( - long = "migration-test-agent-image", - env = "TESTSYS_MIGRATION_TEST_AGENT_IMAGE" - )] - pub(crate) migration_test: Option, - - /// K8s workload agent URI. If not provided the latest released test agent will be used. - #[arg( - long = "k8s-workload-agent-image", - env = "TESTSYS_K8S_WORKLOAD_AGENT_IMAGE" - )] - pub(crate) k8s_workload: Option, - - /// ECS workload agent URI. If not provided the latest released test agent will be used. - #[arg( - long = "ecs-workload-agent-image", - env = "TESTSYS_ECS_WORKLOAD_AGENT_IMAGE" - )] - pub(crate) ecs_workload: Option, - - /// TestSys controller URI. If not provided the latest released controller will be used. - #[arg(long = "controller-image", env = "TESTSYS_CONTROLLER_IMAGE")] - pub(crate) controller_uri: Option, - - /// Images pull secret. This is the name of a Kubernetes secret that will be used to - /// pull the container image from a private registry. For example, if you created a pull secret - /// with `kubectl create secret docker-registry regcred` then you would pass - /// `--images-pull-secret regcred`. - #[arg(long = "images-pull-secret", env = "TESTSYS_IMAGES_PULL_SECRET")] - pub(crate) secret: Option, -} - -impl From for testsys_config::TestsysImages { - fn from(val: TestsysImages) -> Self { - testsys_config::TestsysImages { - eks_resource_agent_image: val.eks_resource, - ecs_resource_agent_image: val.ecs_resource, - vsphere_k8s_cluster_resource_agent_image: val.vsphere_k8s_cluster_resource, - metal_k8s_cluster_resource_agent_image: val.metal_k8s_cluster_resource, - ec2_resource_agent_image: val.ec2_resource, - ec2_karpenter_resource_agent_image: val.ec2_karpenter_resource, - vsphere_vm_resource_agent_image: val.vsphere_vm_resource, - sonobuoy_test_agent_image: val.sonobuoy_test, - ecs_test_agent_image: val.ecs_test, - migration_test_agent_image: val.migration_test, - k8s_workload_agent_image: val.k8s_workload, - ecs_workload_agent_image: val.ecs_workload, - controller_image: val.controller_uri, - testsys_agent_pull_secret: val.secret, - } - } -} diff --git a/tools/testsys/src/secret.rs b/tools/testsys/src/secret.rs deleted file mode 100644 index 6343c163dbb..00000000000 --- a/tools/testsys/src/secret.rs +++ /dev/null @@ -1,118 +0,0 @@ -use crate::error::{self, Result}; -use clap::Parser; -use snafu::OptionExt; -use testsys_model::test_manager::TestManager; -use testsys_model::SecretName; - -/// Add a testsys object to the testsys cluster. -#[derive(Debug, Parser)] -pub(crate) struct Add { - #[command(subcommand)] - command: AddCommand, -} - -#[derive(Debug, Parser)] -enum AddCommand { - /// Add a secret to the testsys cluster. - Secret(AddSecret), -} - -impl Add { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - match self.command { - AddCommand::Secret(add_secret) => add_secret.run(client).await, - } - } -} - -/// Add a secret to the cluster. -#[derive(Debug, Parser)] -pub(crate) struct AddSecret { - #[command(subcommand)] - command: Command, -} - -#[derive(Debug, Parser)] -enum Command { - /// Create a secret for image pulls. - Image(AddSecretImage), - /// Create a secret from key value pairs. - Map(AddSecretMap), -} - -impl AddSecret { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - match self.command { - Command::Image(add_secret_image) => add_secret_image.run(client).await, - Command::Map(add_secret_map) => add_secret_map.run(client).await, - } - } -} - -/// Add a `Secret` with key value pairs. -#[derive(Debug, Parser)] -pub(crate) struct AddSecretMap { - /// Name of the secret - #[arg(short, long)] - name: SecretName, - - /// Key value pairs for secrets. (Key=value) - #[arg(value_parser = parse_key_val)] - args: Vec<(String, String)>, -} - -impl AddSecretMap { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - client.create_secret(&self.name, self.args).await?; - println!("Successfully added '{}' to secrets.", self.name); - Ok(()) - } -} - -fn parse_key_val(s: &str) -> Result<(String, String)> { - let mut iter = s.splitn(2, '='); - let key = iter.next().context(error::InvalidSnafu { - what: "Key is missing", - })?; - let value = iter.next().context(error::InvalidSnafu { - what: "Value is missing", - })?; - Ok((key.to_string(), value.to_string())) -} - -/// Add a secret to the testsys cluster for image pulls. -#[derive(Debug, Parser)] -pub(crate) struct AddSecretImage { - /// Controller image pull username - #[arg(long, short = 'u')] - pull_username: String, - - /// Controller image pull password - #[arg(long, short = 'p')] - pull_password: String, - - /// Image uri - #[arg(long = "image-uri", short)] - image_uri: String, - - /// Controller image uri - #[arg(long, short = 'n')] - secret_name: String, -} - -impl AddSecretImage { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - client - .create_image_pull_secret( - &self.secret_name, - &self.pull_username, - &self.pull_password, - &self.image_uri, - ) - .await?; - - println!("The secret was added."); - - Ok(()) - } -} diff --git a/tools/testsys/src/sonobuoy.rs b/tools/testsys/src/sonobuoy.rs deleted file mode 100644 index d3288442cc7..00000000000 --- a/tools/testsys/src/sonobuoy.rs +++ /dev/null @@ -1,172 +0,0 @@ -use crate::crds::TestInput; -use crate::error::{self, Result}; -use crate::run::KnownTestType; -use bottlerocket_types::agent_config::{ - SonobuoyConfig, SonobuoyMode, WorkloadConfig, WorkloadTest, -}; -use maplit::btreemap; -use snafu::ResultExt; -use std::fmt::Display; -use testsys_model::Test; - -/// Create a Sonobuoy CRD for K8s conformance and quick testing. -pub(crate) fn sonobuoy_crd(test_input: TestInput) -> Result { - let cluster_resource_name = test_input - .cluster_crd_name - .as_ref() - .expect("A cluster name is required for sonobuoy testing"); - let bottlerocket_resource_name = test_input.bottlerocket_crd_name; - let sonobuoy_mode = match test_input.test_type { - KnownTestType::Conformance => SonobuoyMode::CertifiedConformance, - KnownTestType::Quick | KnownTestType::Migration | KnownTestType::Workload => { - SonobuoyMode::Quick - } - }; - - let labels = test_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => test_input.test_type.to_string(), - "testsys/flavor".to_string() => test_input.crd_input.test_flavor.clone(), - "testsys/cluster".to_string() => cluster_resource_name.to_string(), - }); - - SonobuoyConfig::builder() - .set_resources(Some(bottlerocket_resource_name.iter().cloned().collect())) - .resources(cluster_resource_name) - .set_depends_on(Some(test_input.prev_tests)) - .set_retries(Some(5)) - .image( - test_input - .crd_input - .images - .sonobuoy_test_agent_image - .to_owned() - .expect("The default Sonobuoy testing image is missing"), - ) - .set_image_pull_secret( - test_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .keep_running( - test_input - .crd_input - .config - .dev - .keep_tests_running - .unwrap_or(false), - ) - .kubeconfig_base64_template(cluster_resource_name, "encodedKubeconfig") - .plugin("e2e") - .mode(sonobuoy_mode) - .e2e_repo_config_base64( - test_input - .crd_input - .config - .conformance_registry - .to_owned() - .map(e2e_repo_config_base64), - ) - .sonobuoy_image(test_input.crd_input.config.sonobuoy_image.to_owned()) - .kube_conformance_image(test_input.crd_input.config.conformance_image.to_owned()) - .assume_role(test_input.crd_input.config.agent_role.to_owned()) - .set_secrets(Some(test_input.crd_input.config.secrets.to_owned())) - .set_labels(Some(labels)) - .build(format!( - "{}-{}", - cluster_resource_name, - test_input - .name_suffix - .unwrap_or(test_input.crd_input.test_flavor.as_str()) - )) - .context(error::BuildSnafu { - what: "Sonobuoy CRD", - }) -} - -/// Create a workload CRD for K8s testing. -pub(crate) fn workload_crd(test_input: TestInput) -> Result { - let cluster_resource_name = test_input - .cluster_crd_name - .as_ref() - .expect("A cluster name is required for migrations"); - let bottlerocket_resource_name = test_input - .bottlerocket_crd_name - .as_ref() - .expect("A cluster name is required for migrations"); - - let labels = test_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => test_input.test_type.to_string(), - "testsys/cluster".to_string() => cluster_resource_name.to_string(), - }); - let plugins: Vec<_> = test_input - .crd_input - .config - .workloads - .iter() - .map(|(name, image)| WorkloadTest { - name: name.to_string(), - image: image.to_string(), - ..Default::default() - }) - .collect(); - if plugins.is_empty() { - return Err(error::Error::Invalid { - what: "There were no plugins specified in the workload test. - Workloads can be specified in `Test.toml` or via the command line." - .to_string(), - }); - } - - WorkloadConfig::builder() - .resources(bottlerocket_resource_name) - .resources(cluster_resource_name) - .set_depends_on(Some(test_input.prev_tests)) - .set_retries(Some(5)) - .image( - test_input - .crd_input - .images - .k8s_workload_agent_image - .to_owned() - .expect("The default K8s workload testing image is missing"), - ) - .set_image_pull_secret( - test_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .keep_running(true) - .kubeconfig_base64_template(cluster_resource_name, "encodedKubeconfig") - .tests(plugins) - .set_secrets(Some(test_input.crd_input.config.secrets.to_owned())) - .set_labels(Some(labels)) - .build(format!( - "{}{}", - cluster_resource_name, - test_input.name_suffix.unwrap_or("-test") - )) - .context(error::BuildSnafu { - what: "Workload CRD", - }) -} - -fn e2e_repo_config_base64(e2e_registry: S) -> String -where - S: Display, -{ - base64::encode(format!( - r#"buildImageRegistry: {e2e_registry} -dockerGluster: {e2e_registry} -dockerLibraryRegistry: {e2e_registry} -e2eRegistry: {e2e_registry} -e2eVolumeRegistry: {e2e_registry} -gcRegistry: {e2e_registry} -gcEtcdRegistry: {e2e_registry} -promoterE2eRegistry: {e2e_registry} -sigStorageRegistry: {e2e_registry}"# - )) -} diff --git a/tools/testsys/src/status.rs b/tools/testsys/src/status.rs deleted file mode 100644 index 2aadcd99a49..00000000000 --- a/tools/testsys/src/status.rs +++ /dev/null @@ -1,128 +0,0 @@ -use crate::error::{self, Result}; -use clap::Parser; -use log::{debug, info}; -use serde::Deserialize; -use serde_plain::derive_fromstr_from_deserialize; -use snafu::ResultExt; -use testsys_model::test_manager::{CrdState, CrdType, SelectionParams, StatusColumn, TestManager}; - -/// Check the status of testsys objects. -#[derive(Debug, Parser)] -pub(crate) struct Status { - /// Configure the output of the command (json, narrow, wide). - #[arg(long, short = 'o')] - output: Option, - - /// Focus status on a particular arch - #[arg(long)] - arch: Option, - - /// Focus status on a particular variant - #[arg(long)] - variant: Option, - - /// Only show tests - #[arg(long)] - test: bool, - - /// Only show passed tests - #[arg(long, conflicts_with_all=&["failed", "running"])] - passed: bool, - - /// Only show failed tests - #[arg(long, conflicts_with_all=&["passed", "running"])] - failed: bool, - - /// Only CRD's that haven't finished - #[arg(long, conflicts_with_all=&["passed", "failed"])] - running: bool, -} - -impl Status { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - let state = if self.running { - Some(CrdState::NotFinished) - } else if self.passed { - Some(CrdState::Passed) - } else if self.failed { - Some(CrdState::Failed) - } else { - None - }; - let crd_type = self.test.then_some(CrdType::Test); - let mut labels = Vec::new(); - if let Some(arch) = self.arch { - labels.push(format!("testsys/arch={}", arch)) - }; - if let Some(variant) = self.variant { - labels.push(format!("testsys/variant={}", variant)) - }; - let mut status = client - .status(&SelectionParams { - labels: Some(labels.join(",")), - state, - crd_type, - ..Default::default() - }) - .await?; - - status.add_column(StatusColumn::name()); - status.add_column(StatusColumn::crd_type()); - status.add_column(StatusColumn::state()); - status.add_column(StatusColumn::passed()); - status.add_column(StatusColumn::failed()); - status.add_column(StatusColumn::skipped()); - - match self.output { - Some(StatusOutput::Json) => { - info!( - "{}", - serde_json::to_string_pretty(&status).context(error::SerdeJsonSnafu { - what: "Could not create string from status." - })? - ); - return Ok(()); - } - Some(StatusOutput::Narrow) => (), - None => { - status.new_column("BUILD ID", |crd| { - crd.labels() - .get("testsys/build-id") - .cloned() - .into_iter() - .collect() - }); - status.add_column(StatusColumn::last_update()); - } - Some(StatusOutput::Wide) => { - status.new_column("BUILD ID", |crd| { - crd.labels() - .get("testsys/build-id") - .cloned() - .into_iter() - .collect() - }); - status.add_column(StatusColumn::last_update()); - } - }; - - let (width, _) = term_size::dimensions().unwrap_or((80, 0)); - debug!("Window width '{}'", width); - println!("{:width$}", status); - - Ok(()) - } -} - -#[derive(Debug, Deserialize, Clone)] -#[serde(rename_all = "kebab-case")] -enum StatusOutput { - /// Output the status in json - Json, - /// Show minimal columns in the status table - Narrow, - /// Show all columns in the status table - Wide, -} - -derive_fromstr_from_deserialize!(StatusOutput); diff --git a/tools/testsys/src/uninstall.rs b/tools/testsys/src/uninstall.rs deleted file mode 100644 index 5a55f0fcb4c..00000000000 --- a/tools/testsys/src/uninstall.rs +++ /dev/null @@ -1,21 +0,0 @@ -use crate::error::Result; -use clap::Parser; -use log::{info, trace}; -use testsys_model::test_manager::TestManager; - -/// The uninstall subcommand is responsible for removing all of the components for testsys in -/// a k8s cluster. This is completed by removing the `testsys-bottlerocket-aws` namespace. -#[derive(Debug, Parser)] -pub(crate) struct Uninstall {} - -impl Uninstall { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - trace!("Uninstalling testsys"); - - client.uninstall().await?; - - info!("testsys components were successfully uninstalled."); - - Ok(()) - } -} diff --git a/tools/testsys/src/vmware_k8s.rs b/tools/testsys/src/vmware_k8s.rs deleted file mode 100644 index 43d26f77f19..00000000000 --- a/tools/testsys/src/vmware_k8s.rs +++ /dev/null @@ -1,299 +0,0 @@ -use crate::crds::{ - BottlerocketInput, ClusterInput, CrdCreator, CrdInput, CreateCrdOutput, MigrationInput, - TestInput, -}; -use crate::error::{self, Result}; -use crate::migration::migration_crd; -use crate::sonobuoy::{sonobuoy_crd, workload_crd}; -use bottlerocket_types::agent_config::{ - CreationPolicy, CustomUserData, K8sVersion, VSphereK8sClusterConfig, VSphereK8sClusterInfo, - VSphereVmConfig, -}; -use maplit::btreemap; -use pubsys_config::vmware::Datacenter; -use snafu::{OptionExt, ResultExt}; -use std::collections::BTreeMap; -use std::iter::repeat_with; -use std::str::FromStr; -use testsys_model::{Crd, DestructionPolicy, SecretName}; - -/// A `CrdCreator` responsible for creating crd related to `vmware-k8s` variants. -pub(crate) struct VmwareK8sCreator { - pub(crate) region: String, - pub(crate) datacenter: Datacenter, - pub(crate) creds: Option<(String, SecretName)>, - pub(crate) ova_name: String, - pub(crate) encoded_mgmt_cluster_kubeconfig: String, -} - -#[async_trait::async_trait] -impl CrdCreator for VmwareK8sCreator { - /// Use the provided OVA name for the image id. - async fn image_id(&self, _: &CrdInput) -> Result { - Ok(self.ova_name.to_string()) - } - - /// Use standard naming conventions to predict the starting OVA. - async fn starting_image_id(&self, crd_input: &CrdInput) -> Result { - Ok(format!( - "bottlerocket-{}-{}-{}.ova", - crd_input.variant, - crd_input.arch, - crd_input - .starting_version - .as_ref() - .context(error::InvalidSnafu { - what: "The starting version must be provided for migration testing" - })? - )) - } - - /// Creates a vSphere K8s cluster CRD with the `cluster_name` in `cluster_input`. - async fn cluster_crd<'a>(&self, cluster_input: ClusterInput<'a>) -> Result { - let control_plane_endpoint = cluster_input - .crd_input - .config - .control_plane_endpoint - .as_ref() - .context(error::InvalidSnafu { - what: "The control plane endpoint is required for VMware cluster creation.", - })?; - let labels = cluster_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => "cluster".to_string(), - "testsys/cluster".to_string() => cluster_input.cluster_name.to_string(), - "testsys/controlPlaneEndpoint".to_string() => control_plane_endpoint.to_string(), - }); - - // Check if the cluster already has a CRD - if let Some(cluster_crd) = cluster_input - .crd_input - .existing_crds( - &labels, - &[ - "testsys/cluster", - "testsys/type", - "testsys/controlPlaneEndpoint", - ], - ) - .await? - .pop() - { - return Ok(CreateCrdOutput::ExistingCrd(cluster_crd)); - } - - // Check if an existing cluster is using this endpoint - let existing_clusters = cluster_input - .crd_input - .existing_crds(&labels, &["testsys/type", "testsys/controlPlaneEndpoint"]) - .await?; - - let cluster_version = - K8sVersion::from_str(cluster_input.crd_input.variant.version().context( - error::MissingSnafu { - item: "K8s version".to_string(), - what: "aws-k8s variant".to_string(), - }, - )?) - .map_err(|_| error::Error::K8sVersion { - version: cluster_input.crd_input.variant.to_string(), - })?; - - let vsphere_k8s_crd = VSphereK8sClusterConfig::builder() - .name(cluster_input.cluster_name) - .set_labels(Some(labels)) - .control_plane_endpoint_ip(control_plane_endpoint) - .creation_policy(CreationPolicy::IfNotExists) - .version(cluster_version) - .ova_name(self.image_id(cluster_input.crd_input).await?) - .tuf_repo( - cluster_input - .crd_input - .tuf_repo_config() - .context(error::InvalidSnafu { - what: "TUF repo information is required for VMware cluster creation.", - })?, - ) - .vcenter_host_url(&self.datacenter.vsphere_url) - .vcenter_datacenter(&self.datacenter.datacenter) - .vcenter_datastore(&self.datacenter.datastore) - .vcenter_network(&self.datacenter.network) - .vcenter_resource_pool(&self.datacenter.resource_pool) - .vcenter_workload_folder(&self.datacenter.folder) - .mgmt_cluster_kubeconfig_base64(&self.encoded_mgmt_cluster_kubeconfig) - .eks_a_release_manifest_url( - cluster_input - .crd_input - .config - .dev - .eks_a_release_manifest_url - .clone(), - ) - .set_conflicts_with(Some(existing_clusters)) - .destruction_policy( - cluster_input - .crd_input - .config - .dev - .cluster_destruction_policy - .to_owned() - .unwrap_or(DestructionPolicy::OnTestSuccess), - ) - .image( - cluster_input - .crd_input - .images - .vsphere_k8s_cluster_resource_agent_image - .as_ref() - .expect( - "The default vSphere K8s cluster resource provider image URI is missing.", - ), - ) - .set_image_pull_secret( - cluster_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .set_secrets(Some( - cluster_input - .crd_input - .config - .secrets - .clone() - .into_iter() - .chain(self.creds.clone()) - .collect(), - )) - .privileged(true) - .build(cluster_input.cluster_name) - .context(error::BuildSnafu { - what: "vSphere K8s cluster CRD", - })?; - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource( - vsphere_k8s_crd, - )))) - } - - /// Create a vSphere VM provider CRD to launch Bottlerocket VMs on the cluster created by - /// `cluster_crd`. - async fn bottlerocket_crd<'a>( - &self, - bottlerocket_input: BottlerocketInput<'a>, - ) -> Result { - let cluster_name = bottlerocket_input - .cluster_crd_name - .as_ref() - .expect("A vSphere K8s cluster provider is required"); - let labels = bottlerocket_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => "vms".to_string(), - "testsys/cluster".to_string() => cluster_name.to_string(), - }); - - // Check if other VMs are using this cluster - let existing_clusters = bottlerocket_input - .crd_input - .existing_crds(&labels, &["testsys/type", "testsys/cluster"]) - .await?; - - let suffix: String = repeat_with(fastrand::lowercase).take(4).collect(); - let vsphere_vm_crd = VSphereVmConfig::builder() - .ova_name(bottlerocket_input.image_id) - .tuf_repo(bottlerocket_input.crd_input.tuf_repo_config().context( - error::InvalidSnafu { - what: "TUF repo information is required for Bottlerocket vSphere VM creation.", - }, - )?) - .vcenter_host_url(&self.datacenter.vsphere_url) - .vcenter_datacenter(&self.datacenter.datacenter) - .vcenter_datastore(&self.datacenter.datastore) - .vcenter_network(&self.datacenter.network) - .vcenter_resource_pool(&self.datacenter.resource_pool) - .vcenter_workload_folder(&self.datacenter.folder) - .cluster(VSphereK8sClusterInfo { - name: format!("${{{}.clusterName}}", cluster_name), - control_plane_endpoint_ip: format!("${{{}.endpoint}}", cluster_name), - kubeconfig_base64: format!("${{{}.encodedKubeconfig}}", cluster_name), - }) - .custom_user_data( - bottlerocket_input - .crd_input - .encoded_userdata()? - .map(|encoded_userdata| CustomUserData::Merge { encoded_userdata }), - ) - .assume_role(bottlerocket_input.crd_input.config.agent_role.clone()) - .set_labels(Some(labels)) - .set_conflicts_with(Some(existing_clusters)) - .destruction_policy( - bottlerocket_input - .crd_input - .config - .dev - .bottlerocket_destruction_policy - .to_owned() - .unwrap_or(DestructionPolicy::OnTestSuccess), - ) - .image( - bottlerocket_input - .crd_input - .images - .vsphere_vm_resource_agent_image - .as_ref() - .expect("The default vSphere VM resource provider image URI is missing."), - ) - .set_image_pull_secret( - bottlerocket_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .set_secrets(Some( - bottlerocket_input - .crd_input - .config - .secrets - .clone() - .into_iter() - .chain(self.creds.clone()) - .collect(), - )) - .depends_on(cluster_name) - .build(format!("{}-vms-{}", cluster_name, suffix)) - .context(error::BuildSnafu { - what: "vSphere VM CRD", - })?; - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource( - vsphere_vm_crd, - )))) - } - - async fn migration_crd<'a>( - &self, - migration_input: MigrationInput<'a>, - ) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(migration_crd( - migration_input, - // Let the migration test's SSM RunDocuments and RunCommand invocations happen in 'us-west-2' - // FIXME: Do we need to allow this to be configurable? - Some("us-west-2".to_string()), - "instanceIds", - )?)))) - } - - async fn test_crd<'a>(&self, test_input: TestInput<'a>) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(sonobuoy_crd( - test_input, - )?)))) - } - - async fn workload_crd<'a>(&self, test_input: TestInput<'a>) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(workload_crd( - test_input, - )?)))) - } - - fn additional_fields(&self, _test_type: &str) -> BTreeMap { - btreemap! {"region".to_string() => self.region.clone()} - } -}