From 6a3020e25f687460c4937ce529d4390bdd645f2e Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 15 Oct 2024 12:34:12 -0700 Subject: [PATCH 01/16] pin qorb's revision --- Cargo.lock | 31 +++++++++++++++++++++++++++---- Cargo.toml | 2 +- 2 files changed, 28 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 29f5a982d0..7113211c91 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4434,7 +4434,7 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "progenitor", - "qorb", + "qorb 0.0.2 (git+https://github.com/oxidecomputer/qorb?rev=88452176c7a9a573cf300d42a7c8147328224ce1)", "reqwest 0.12.8", "serde", "serde_json", @@ -5516,7 +5516,7 @@ dependencies = [ "pq-sys", "predicates", "pretty_assertions", - "qorb", + "qorb 0.0.2 (git+https://github.com/oxidecomputer/qorb?rev=88452176c7a9a573cf300d42a7c8147328224ce1)", "rand", "rcgen", "ref-cast", @@ -6620,7 +6620,7 @@ dependencies = [ "pretty_assertions", "progenitor-client", "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=11371b0f3743f8df5b047dc0edc2699f4bdf3927)", - "qorb", + "qorb 0.0.2 (git+https://github.com/oxidecomputer/qorb?rev=88452176c7a9a573cf300d42a7c8147328224ce1)", "rand", "rcgen", "ref-cast", @@ -7055,7 +7055,7 @@ dependencies = [ "postgres-types", "predicates", "proc-macro2", - "qorb", + "qorb 0.0.2 (git+https://github.com/oxidecomputer/qorb?branch=master)", "quote", "regex", "regex-automata", @@ -8678,6 +8678,29 @@ dependencies = [ "tracing", ] +[[package]] +name = "qorb" +version = "0.0.2" +source = "git+https://github.com/oxidecomputer/qorb?rev=88452176c7a9a573cf300d42a7c8147328224ce1#88452176c7a9a573cf300d42a7c8147328224ce1" +dependencies = [ + "anyhow", + "async-trait", + "debug-ignore", + "derive-where", + "dropshot", + "futures", + "hickory-resolver", + "rand", + "schemars", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", + "tokio-tungstenite 0.23.1", + "tracing", +] + [[package]] name = "quick-error" version = "1.2.3" diff --git a/Cargo.toml b/Cargo.toml index afa970aa36..07b321b12f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -518,7 +518,7 @@ propolis_api_types = { git = "https://github.com/oxidecomputer/propolis", rev = propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "11371b0f3743f8df5b047dc0edc2699f4bdf3927" } propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "11371b0f3743f8df5b047dc0edc2699f4bdf3927" } proptest = "1.5.0" -qorb = { git = "https://github.com/oxidecomputer/qorb", branch = "master" } +qorb = { git = "https://github.com/oxidecomputer/qorb", rev = "88452176c7a9a573cf300d42a7c8147328224ce1" } quote = "1.0" rand = "0.8.5" rand_core = "0.6.4" From b9bf692a0c56458512cced4b3414b297e61e7d8f Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 15 Oct 2024 12:41:30 -0700 Subject: [PATCH 02/16] hakariiiiiii --- Cargo.lock | 31 ++++--------------------------- workspace-hack/Cargo.toml | 4 ++-- 2 files changed, 6 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7113211c91..8ee177ce54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4434,7 +4434,7 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "progenitor", - "qorb 0.0.2 (git+https://github.com/oxidecomputer/qorb?rev=88452176c7a9a573cf300d42a7c8147328224ce1)", + "qorb", "reqwest 0.12.8", "serde", "serde_json", @@ -5516,7 +5516,7 @@ dependencies = [ "pq-sys", "predicates", "pretty_assertions", - "qorb 0.0.2 (git+https://github.com/oxidecomputer/qorb?rev=88452176c7a9a573cf300d42a7c8147328224ce1)", + "qorb", "rand", "rcgen", "ref-cast", @@ -6620,7 +6620,7 @@ dependencies = [ "pretty_assertions", "progenitor-client", "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=11371b0f3743f8df5b047dc0edc2699f4bdf3927)", - "qorb 0.0.2 (git+https://github.com/oxidecomputer/qorb?rev=88452176c7a9a573cf300d42a7c8147328224ce1)", + "qorb", "rand", "rcgen", "ref-cast", @@ -7055,7 +7055,7 @@ dependencies = [ "postgres-types", "predicates", "proc-macro2", - "qorb 0.0.2 (git+https://github.com/oxidecomputer/qorb?branch=master)", + "qorb", "quote", "regex", "regex-automata", @@ -8655,29 +8655,6 @@ dependencies = [ "psl-types", ] -[[package]] -name = "qorb" -version = "0.0.2" -source = "git+https://github.com/oxidecomputer/qorb?branch=master#de6f7784790c813931042dcc98c84413ecf11826" -dependencies = [ - "anyhow", - "async-trait", - "debug-ignore", - "derive-where", - "dropshot", - "futures", - "hickory-resolver", - "rand", - "schemars", - "serde", - "serde_json", - "thiserror", - "tokio", - "tokio-stream", - "tokio-tungstenite 0.23.1", - "tracing", -] - [[package]] name = "qorb" version = "0.0.2" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index eee0bb6172..1c94facf33 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -87,7 +87,7 @@ pkcs8 = { version = "0.10.2", default-features = false, features = ["encryption" postgres-types = { version = "0.2.8", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.2" } proc-macro2 = { version = "1.0.87" } -qorb = { git = "https://github.com/oxidecomputer/qorb", branch = "master", features = ["qtop"] } +qorb = { git = "https://github.com/oxidecomputer/qorb", rev = "88452176c7a9a573cf300d42a7c8147328224ce1", features = ["qtop"] } quote = { version = "1.0.37" } regex = { version = "1.11.0" } regex-automata = { version = "0.4.8", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } @@ -199,7 +199,7 @@ pkcs8 = { version = "0.10.2", default-features = false, features = ["encryption" postgres-types = { version = "0.2.8", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.2" } proc-macro2 = { version = "1.0.87" } -qorb = { git = "https://github.com/oxidecomputer/qorb", branch = "master", features = ["qtop"] } +qorb = { git = "https://github.com/oxidecomputer/qorb", rev = "88452176c7a9a573cf300d42a7c8147328224ce1", features = ["qtop"] } quote = { version = "1.0.37" } regex = { version = "1.11.0" } regex-automata = { version = "0.4.8", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } From d338e9276a3cf7c7c4d13b1bba9a0572ea944cd3 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 15 Oct 2024 12:54:13 -0700 Subject: [PATCH 03/16] wip --- Cargo.lock | 727 ++++++++++++----------- Cargo.toml | 4 +- nexus/db-queries/src/db/datastore/mod.rs | 5 + nexus/db-queries/src/db/pool.rs | 7 + oximeter/db/Cargo.toml | 3 + oximeter/db/src/bin/oxdb/main.rs | 4 +- oximeter/db/src/native/block.rs | 295 +++++++-- oximeter/db/src/native/connection.rs | 18 +- oximeter/db/src/native/io/block.rs | 2 +- oximeter/db/src/native/io/column.rs | 178 +++++- oximeter/db/src/native/mod.rs | 12 + oximeter/db/src/shells/native.rs | 12 +- workspace-hack/Cargo.toml | 60 +- 13 files changed, 870 insertions(+), 457 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8ee177ce54..df2af29066 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,19 +4,13 @@ version = 3 [[package]] name = "addr2line" -version = "0.21.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "adler2" version = "2.0.0" @@ -68,7 +62,7 @@ dependencies = [ "getrandom", "once_cell", "version_check", - "zerocopy 0.7.34", + "zerocopy 0.7.35", ] [[package]] @@ -109,9 +103,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.14" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" dependencies = [ "anstyle", "anstyle-parse", @@ -130,27 +124,27 @@ checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anstyle-parse" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.3" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.3" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -213,15 +207,15 @@ dependencies = [ [[package]] name = "arrayref" -version = "0.3.7" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "ascii-canvas" @@ -380,9 +374,9 @@ dependencies = [ [[package]] name = "async-process" -version = "2.2.4" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a07789659a4d385b79b18b9127fc27e1a59e1e89117c78c5ea3b806f016374" +checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" dependencies = [ "async-channel 2.3.1", "async-io", @@ -395,7 +389,6 @@ dependencies = [ "futures-lite", "rustix", "tracing", - "windows-sys 0.59.0", ] [[package]] @@ -457,9 +450,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -468,9 +461,9 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", @@ -564,9 +557,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "backoff" @@ -584,17 +577,17 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.71" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", - "miniz_oxide 0.7.2", - "object 0.32.2", + "miniz_oxide", + "object 0.36.5", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -635,7 +628,7 @@ checksum = "b10cf871f3ff2ce56432fddc2615ac7acc3aa22ca321f8fea800846fbb32f188" dependencies = [ "async-trait", "futures-util", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "tokio", ] @@ -681,9 +674,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.69.4" +version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" +checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ "bitflags 2.6.0", "cexpr", @@ -725,9 +718,9 @@ checksum = "2d7e60934ceec538daadb9d8432424ed043a904d8e0243f3c6446bce549a46ac" [[package]] name = "bitfield-struct" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1657dce144574f921af10a92876a96f0ca05dd830900598d21d91c8e4cf78f74" +checksum = "adc0846593a56638b74e136a45610f9934c052e14761bebca6b092d5522599e3" dependencies = [ "proc-macro2", "quote", @@ -783,9 +776,9 @@ dependencies = [ [[package]] name = "blake3" -version = "1.5.1" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52" +checksum = "d82033247fd8e890df8f740e407ad4d038debb9eb1f40533fffb32e7d17dc6f7" dependencies = [ "arrayref", "arrayvec", @@ -793,7 +786,7 @@ dependencies = [ "cfg-if", "constant_time_eq", "memmap2", - "rayon", + "rayon-core", ] [[package]] @@ -907,9 +900,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.9.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" +checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" dependencies = [ "memchr", "regex-automata", @@ -1071,13 +1064,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.97" +version = "1.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" +checksum = "b16803a61b81d9eabb7eae2588776c4c1e584b738ede45fdbb4c972cec1e9945" dependencies = [ "jobserver", "libc", - "once_cell", + "shlex", ] [[package]] @@ -1191,6 +1184,27 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "chrono-tz" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd6dd8046d00723a59a2f8c5f295c515b9bb9a331ee4f8f3d4dd49e428acd3b6" +dependencies = [ + "chrono", + "chrono-tz-build", + "phf", +] + +[[package]] +name = "chrono-tz-build" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e94fea34d77a245229e7746bd2beb786cd2a896f306ff491fb8cecb3074b10a7" +dependencies = [ + "parse-zoneinfo", + "phf_codegen", +] + [[package]] name = "ciborium" version = "0.2.2" @@ -1231,9 +1245,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" dependencies = [ "glob", "libc", @@ -1277,9 +1291,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "clickhouse-admin-api" @@ -1362,9 +1376,9 @@ dependencies = [ [[package]] name = "clipboard-win" -version = "5.3.1" +version = "5.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79f4473f5144e20d9aceaf2972478f06ddf687831eafeeb434fbaf0acc4144ad" +checksum = "15efe7a882b08f34e38556b14f2fb3daa98769d06c7f0c1b076dfd0d983bc892" dependencies = [ "error-code", ] @@ -1418,9 +1432,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" [[package]] name = "colored" @@ -1496,9 +1510,9 @@ dependencies = [ [[package]] name = "constant_time_eq" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" [[package]] name = "convert_case" @@ -1546,15 +1560,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -1593,9 +1607,9 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] @@ -1652,15 +1666,15 @@ dependencies = [ [[package]] name = "critical-section" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216" +checksum = "f64009896348fc5af4222e9cf7d7d82a95a256c634ebcf61c53e4ea461422242" [[package]] name = "crossbeam-channel" -version = "0.5.12" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ "crossbeam-utils", ] @@ -1686,9 +1700,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crossterm" @@ -1700,7 +1714,7 @@ dependencies = [ "crossterm_winapi", "futures-core", "mio", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "rustix", "serde", "signal-hook", @@ -1910,9 +1924,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ "darling_core", "darling_macro", @@ -1920,9 +1934,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", @@ -1934,9 +1948,9 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", @@ -2066,9 +2080,9 @@ dependencies = [ [[package]] name = "der_derive" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fe87ce4529967e0ba1dcf8450bab64d97dfd5010a6256187ffe2e43e6f0e049" +checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" dependencies = [ "proc-macro2", "quote", @@ -2098,18 +2112,18 @@ dependencies = [ [[package]] name = "derive_builder" -version = "0.20.0" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0350b5cb0331628a5916d6c5c0b72e97393b8b6b03b47a9284f4e7f5a405ffd7" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" dependencies = [ "derive_builder_macro", ] [[package]] name = "derive_builder_core" -version = "0.20.0" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d48cda787f839151732d396ac69e3473923d54312c070ee21e9effcaa8ca0b1d" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" dependencies = [ "darling", "proc-macro2", @@ -2119,9 +2133,9 @@ dependencies = [ [[package]] name = "derive_builder_macro" -version = "0.20.0" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206868b8242f27cecce124c19fd88157fbd0dd334df2587f36417bafbc85097b" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", "syn 2.0.79", @@ -2209,7 +2223,7 @@ dependencies = [ [[package]] name = "diesel-dtrace" version = "0.3.0" -source = "git+https://github.com/oxidecomputer/diesel-dtrace?branch=main#8fcc2bb37c635598c39711d8034b14227c210096" +source = "git+https://github.com/oxidecomputer/diesel-dtrace?branch=main#f781d0dbc822adbb49fee509ccd9a5389c34e169" dependencies = [ "diesel", "serde", @@ -2220,9 +2234,9 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6ff2be1e7312c858b2ef974f5c7089833ae57b5311b334b30923af58e5718d8" +checksum = "e7f2c3de51e2ba6bf2a648285696137aaf0f5f487bcbea93972fe8a364e131a4" dependencies = [ "diesel_table_macro_syntax", "dsl_auto_type", @@ -2389,7 +2403,7 @@ dependencies = [ "serde", "serde_json", "thiserror", - "zerocopy 0.7.34", + "zerocopy 0.7.35", ] [[package]] @@ -2590,6 +2604,12 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" +[[package]] +name = "embedded-io" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" + [[package]] name = "ena" version = "0.14.3" @@ -2645,7 +2665,7 @@ dependencies = [ "serde", "serde_json", "sled-agent-types", - "socket2 0.5.7", + "socket2", "tokio", "toml 0.8.19", "uuid", @@ -2671,11 +2691,11 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.79", @@ -2729,18 +2749,15 @@ dependencies = [ [[package]] name = "error-code" -version = "3.2.0" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0474425d51df81997e2f90a21591180b38eccf27292d755f3e30750225c175b" +checksum = "a5d9305ccc6942a704f4335694ecd3de2ea531b114ac2d51f5f843750787a92f" [[package]] name = "escape8259" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4911e3666fcd7826997b4745c8224295a6f3072f1418c3067b97a67557ee" -dependencies = [ - "rustversion", -] +checksum = "5692dd7b5a1978a5aeb0ce83b7655c58ca8efdcb79d21036ea249da95afec2c6" [[package]] name = "event-listener" @@ -2866,12 +2883,6 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "finl_unicode" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" - [[package]] name = "fixedbitset" version = "0.4.2" @@ -2880,9 +2891,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flagset" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdeb3aa5e95cf9aabc17f060cfa0ced7b83f042390760ca53bf09df9968acaa1" +checksum = "b3ea1ec5f8307826a5b71094dd91fc04d4ae75d5709b20ad351c7fb4815c86ec" [[package]] name = "flate2" @@ -2891,7 +2902,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", - "miniz_oxide 0.8.0", + "miniz_oxide", ] [[package]] @@ -2927,6 +2938,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" + [[package]] name = "foreign-types" version = "0.3.2" @@ -3220,7 +3237,7 @@ dependencies = [ "serde-big-array", "slog", "slog-error-chain", - "socket2 0.5.7", + "socket2", "string_cache", "thiserror", "tlvc 0.3.1 (git+https://github.com/oxidecomputer/tlvc.git?branch=main)", @@ -3285,9 +3302,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "js-sys", @@ -3308,9 +3325,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.1" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "git2" @@ -3333,9 +3350,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "globset" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57da3b9b5b85bd66f31093f8c408b90a74431672542466497dcbdfdc02034be1" +checksum = "15f1ce686646e7f1e19bf7d5533fe443a45dbfb990e00629110797578b42fb19" dependencies = [ "aho-corasick", "bstr", @@ -3507,6 +3524,11 @@ name = "hashbrown" version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", +] [[package]] name = "hashlink" @@ -3649,7 +3671,7 @@ dependencies = [ "async-trait", "cfg-if", "data-encoding", - "enum-as-inner 0.6.0", + "enum-as-inner 0.6.1", "futures-channel", "futures-io", "futures-util", @@ -3676,7 +3698,7 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "rand", "resolv-conf", "smallvec 1.13.2", @@ -3694,7 +3716,7 @@ dependencies = [ "async-trait", "bytes", "cfg-if", - "enum-as-inner 0.6.0", + "enum-as-inner 0.6.1", "futures-util", "hickory-proto", "serde", @@ -3795,9 +3817,9 @@ dependencies = [ [[package]] name = "http-body" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http 1.1.0", @@ -3812,7 +3834,7 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "pin-project-lite", ] @@ -3824,9 +3846,9 @@ checksum = "21dec9db110f5f872ed9699c3ecf50cf16f423502706ba5c72462e28d3157573" [[package]] name = "httparse" -version = "1.8.0" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -3918,7 +3940,7 @@ dependencies = [ [[package]] name = "hubtools" version = "0.4.6" -source = "git+https://github.com/oxidecomputer/hubtools.git?branch=main#943c4bbe6b50d1ab635d085d6204895fb4154e79" +source = "git+https://github.com/oxidecomputer/hubtools.git?branch=main#f48e2da029ba6552cff5c07ff8a2fc21cc56aa32" dependencies = [ "hex", "lpc55_areas", @@ -3958,7 +3980,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower-service", "tracing", @@ -3976,7 +3998,7 @@ dependencies = [ "futures-util", "h2 0.4.6", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "httparse", "httpdate", "itoa", @@ -4015,7 +4037,7 @@ dependencies = [ "http 1.1.0", "hyper 1.4.1", "hyper-util", - "rustls 0.23.10", + "rustls 0.23.14", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -4068,10 +4090,10 @@ dependencies = [ "futures-channel", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "hyper 1.4.1", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower-service", "tracing", @@ -4079,9 +4101,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -4393,9 +4415,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if", ] @@ -4487,7 +4509,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.7", + "socket2", "widestring", "windows-sys 0.48.0", "winreg", @@ -4511,11 +4533,11 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.4.0", "libc", "windows-sys 0.52.0", ] @@ -4528,9 +4550,9 @@ checksum = "7655c9839580ee829dfacba1d1278c2b7883e50a277ff7541299489d6bdfdc45" [[package]] name = "is_terminal_polyfill" -version = "1.70.0" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "ispf" @@ -4584,9 +4606,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" dependencies = [ "wasm-bindgen", ] @@ -4718,7 +4740,7 @@ checksum = "b024e211b1b371da58cd69e4fb8fa4ed16915edcc0e2e1fb04ac4bad61959f25" [[package]] name = "libfalcon" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/falcon?branch=main#d8c38f890040f90cdc467e23f3f06b6372a9921c" +source = "git+https://github.com/oxidecomputer/falcon?branch=main#651fb5889d66be90ac1afa4a730c573b643aef1e" dependencies = [ "anstyle", "anyhow", @@ -4775,9 +4797,9 @@ dependencies = [ [[package]] name = "libloading" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", "windows-targets 0.52.6", @@ -4805,16 +4827,16 @@ dependencies = [ "oxnet", "rand", "rusty-doors", - "socket2 0.5.7", + "socket2", "thiserror", "tracing", - "winnow 0.6.18", + "winnow 0.6.20", ] [[package]] name = "libnet" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/netadm-sys#4ceaf96e02acb8258ea4aa403326c08932324835" +source = "git+https://github.com/oxidecomputer/netadm-sys#e07ad76458eb50601e0da4f9da16dbc942bfc2ba" dependencies = [ "anyhow", "cfg-if", @@ -4824,10 +4846,13 @@ dependencies = [ "num_enum", "nvpair", "nvpair-sys", + "oxnet", + "rand", "rusty-doors", - "socket2 0.4.10", + "socket2", "thiserror", "tracing", + "winnow 0.6.20", ] [[package]] @@ -4856,7 +4881,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.6.0", "libc", - "redox_syscall 0.5.1", + "redox_syscall 0.5.7", ] [[package]] @@ -4903,9 +4928,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.16" +version = "1.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" dependencies = [ "cc", "libc", @@ -4997,11 +5022,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.3" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.15.0", ] [[package]] @@ -5081,9 +5106,9 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "memchr" -version = "2.7.2" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memmap" @@ -5097,9 +5122,9 @@ dependencies = [ [[package]] name = "memmap2" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322" +checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" dependencies = [ "libc", ] @@ -5153,9 +5178,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.4" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" dependencies = [ "mime", "unicase", @@ -5167,15 +5192,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.8.0" @@ -5261,11 +5277,10 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" dependencies = [ - "lazy_static", "libc", "log", "openssl", @@ -6009,11 +6024,11 @@ checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" [[package]] name = "nu-ansi-term" -version = "0.50.0" +version = "0.50.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd2800e1520bdc966782168a627aa5d1ad92e33b984bf7c7615d31280c83ff14" +checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -6031,9 +6046,9 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", "num-traits", @@ -6204,18 +6219,18 @@ dependencies = [ [[package]] name = "object" -version = "0.32.2" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "olpc-cjson" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d637c9c15b639ccff597da8f4fa968300651ad2f1e968aefc3b4927a6fb2027a" +checksum = "696183c9b5fe81a7715d074fd632e8bd46f4ccc0231a3ed7fc580a80de5f7083" dependencies = [ "serde", "serde_json", @@ -7022,6 +7037,7 @@ dependencies = [ "generic-array", "getrandom", "group", + "hashbrown 0.15.0", "hex", "hickory-proto", "hmac", @@ -7051,12 +7067,15 @@ dependencies = [ "peg-runtime", "pem-rfc7468", "petgraph", + "phf", + "phf_shared 0.11.2", "pkcs8", "postgres-types", "predicates", "proc-macro2", "qorb", "quote", + "rand", "regex", "regex-automata", "regex-syntax 0.8.5", @@ -7064,7 +7083,7 @@ dependencies = [ "ring 0.17.8", "rsa", "rustix", - "rustls 0.23.10", + "rustls 0.23.14", "schemars", "scopeguard", "semver 1.0.23", @@ -7099,7 +7118,7 @@ dependencies = [ "usdt-impl", "uuid", "x509-cert", - "zerocopy 0.7.34", + "zerocopy 0.7.35", "zeroize", ] @@ -7143,9 +7162,9 @@ checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "oorandom" -version = "11.1.3" +version = "11.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" +checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "opaque-debug" @@ -7371,7 +7390,7 @@ dependencies = [ "smoltcp 0.11.0", "tabwriter", "uuid", - "zerocopy 0.7.34", + "zerocopy 0.7.35", ] [[package]] @@ -7474,6 +7493,7 @@ dependencies = [ "camino", "camino-tempfile", "chrono", + "chrono-tz", "clap", "clickward", "criterion", @@ -7484,9 +7504,11 @@ dependencies = [ "futures", "gethostname", "highway", + "iana-time-zone", "indexmap 2.6.0", "itertools 0.13.0", "libc", + "nom", "num", "omicron-common", "omicron-test-utils", @@ -7668,7 +7690,7 @@ dependencies = [ [[package]] name = "oxnet" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/oxnet#2612d2203effcfdcbf83778a77f1bfd03fe6ed24" +source = "git+https://github.com/oxidecomputer/oxnet#7dacd265f1bcd0f8b47bd4805250c4f0812da206" dependencies = [ "ipnetwork", "schemars", @@ -7780,9 +7802,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core 0.9.10", @@ -7810,7 +7832,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.1", + "redox_syscall 0.5.7", "smallvec 1.13.2", "windows-targets 0.52.6", ] @@ -7846,6 +7868,15 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "944553dd59c802559559161f9816429058b869003836120e262e8caec061b7ae" +[[package]] +name = "parse-zoneinfo" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f2a05b18d44e2957b88f96ba460715e295bc1d7510468a2f3d3b44535d26c24" +dependencies = [ + "regex", +] + [[package]] name = "partial-io" version = "0.5.4" @@ -7903,9 +7934,9 @@ dependencies = [ [[package]] name = "pathdiff" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" +checksum = "d61c5ce1153ab5b689d0c074c4e7fc613e942dfb7dd9eea5ab202d2ad91fe361" dependencies = [ "camino", ] @@ -7986,9 +8017,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.10" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" +checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", "thiserror", @@ -7997,9 +8028,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.10" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26293c9193fbca7b1a3bf9b79dc1e388e927e6cacaa78b4a3ab705a1d3d41459" +checksum = "d214365f632b123a47fd913301e14c946c61d1c183ee245fa76eb752e59a02dd" dependencies = [ "pest", "pest_generator", @@ -8007,9 +8038,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.10" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ec22af7d3fb470a85dd2ca96b7c577a1eb4ef6f1683a9fe9a8c16e136c04687" +checksum = "eb55586734301717aea2ac313f50b2eb8f60d2fc3dc01d190eefa2e625f60c4e" dependencies = [ "pest", "pest_meta", @@ -8020,9 +8051,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.7.10" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7a240022f37c361ec1878d646fc5b7d7c4d28d5946e1a80ad5a7a4f4ca0bdcd" +checksum = "b75da2a70cf4d9cb76833c990ac9cd3923c9a8905a8929789ce347c84564d03d" dependencies = [ "once_cell", "pest", @@ -8050,6 +8081,26 @@ dependencies = [ "phf_shared 0.11.2", ] +[[package]] +name = "phf_codegen" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8d39688d359e6b34654d328e262234662d16cc0f60ec8dcbe5e718709342a5a" +dependencies = [ + "phf_generator", + "phf_shared 0.11.2", +] + +[[package]] +name = "phf_generator" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +dependencies = [ + "phf_shared 0.11.2", + "rand", +] + [[package]] name = "phf_shared" version = "0.10.0" @@ -8070,18 +8121,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", @@ -8151,9 +8202,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "plain" @@ -8163,9 +8214,9 @@ checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" [[package]] name = "plotters" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ "num-traits", "plotters-backend", @@ -8176,15 +8227,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] name = "plotters-svg" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" dependencies = [ "plotters-backend", ] @@ -8250,18 +8301,19 @@ source = "git+https://github.com/oxidecomputer/poptrie?branch=multipath#ca52bef3 [[package]] name = "portable-atomic" -version = "1.6.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "postcard" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55c51ee6c0db07e68448e336cf8ea4131a620edefebf9893e759b2d793420f8" +checksum = "5f7f0a8d620d71c457dd1d47df76bb18960378da56af4527aaa10f515eee732e" dependencies = [ "cobs", - "embedded-io", + "embedded-io 0.4.0", + "embedded-io 0.6.1", "serde", ] @@ -8306,9 +8358,12 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy 0.7.35", +] [[package]] name = "pq-sys" @@ -8340,15 +8395,15 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174" +checksum = "ae8177bee8e75d6846599c6b9ff679ed51e882816914eec639944d7c9aa11931" [[package]] name = "predicates-tree" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf" +checksum = "41b740d195ed3166cd147c8047ec98db0e22ec019eb8eeb76d343b795304fb13" dependencies = [ "predicates-core", "termtree", @@ -8705,8 +8760,8 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.10", - "socket2 0.5.7", + "rustls 0.23.14", + "socket2", "thiserror", "tokio", "tracing", @@ -8722,7 +8777,7 @@ dependencies = [ "rand", "ring 0.17.8", "rustc-hash 2.0.0", - "rustls 0.23.10", + "rustls 0.23.14", "slab", "thiserror", "tinyvec", @@ -8737,7 +8792,7 @@ checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" dependencies = [ "libc", "once_cell", - "socket2 0.5.7", + "socket2", "tracing", "windows-sys 0.59.0", ] @@ -8758,7 +8813,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" dependencies = [ "log", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "scheduled-thread-pool", ] @@ -8944,18 +8999,18 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.1" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] [[package]] name = "redox_users" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom", "libredox", @@ -9117,7 +9172,7 @@ dependencies = [ "futures-util", "h2 0.4.6", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "hyper 1.4.1", "hyper-rustls 0.27.3", @@ -9132,7 +9187,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.10", + "rustls 0.23.14", "rustls-pemfile 2.2.0", "rustls-pki-types", "serde", @@ -9359,9 +9414,9 @@ dependencies = [ [[package]] name = "russh-cryptovec" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b077b6dd8d8c085dac62f7fcc5a83df60c7f7a22d49bfba994f2f4dbf60bc74" +checksum = "fadd2c0ab350e21c66556f94ee06f766d8bdae3213857ba7610bfd8e10e51880" dependencies = [ "libc", "winapi", @@ -9512,31 +9567,31 @@ dependencies = [ "log", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] [[package]] name = "rustls" -version = "0.23.10" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ "log", "once_cell", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] [[package]] name = "rustls-native-certs" -version = "0.7.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" +checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", "rustls-pemfile 2.2.0", @@ -9581,9 +9636,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -9711,11 +9766,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -9724,7 +9779,7 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ - "parking_lot 0.12.2", + "parking_lot 0.12.3", ] [[package]] @@ -9826,9 +9881,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ "bitflags 2.6.0", "core-foundation", @@ -9839,9 +9894,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.0" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -9976,9 +10031,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -10534,9 +10589,9 @@ dependencies = [ [[package]] name = "snafu" -version = "0.8.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75976f4748ab44f6e5332102be424e7c2dc18daeaf7e725f2040c3ebb133512e" +checksum = "223891c85e2a29c3fe8fb900c1fae5e69c2e42415e3177752e8718475efa5019" dependencies = [ "futures-core", "pin-project", @@ -10545,26 +10600,16 @@ dependencies = [ [[package]] name = "snafu-derive" -version = "0.8.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b19911debfb8c2fb1107bc6cb2d61868aaf53a988449213959bb1b5b1ed95f" +checksum = "03c3c6b7927ffe7ecaa769ee0e3994da3b8cafc8f444578982c83ecb161af917" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.79", ] -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.7" @@ -10636,7 +10681,7 @@ dependencies = [ "ed25519-dalek", "libipcc", "pem-rfc7468", - "rustls 0.23.10", + "rustls 0.23.14", "secrecy", "serde", "sha2", @@ -10776,7 +10821,7 @@ checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" dependencies = [ "new_debug_unreachable", "once_cell", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "phf_shared 0.10.0", "precomputed-hash", "serde", @@ -10790,13 +10835,13 @@ checksum = "7b3c8667cd96245cbb600b8dec5680a7319edd719c5aa2b5d23c6bff94f39765" [[package]] name = "stringprep" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" dependencies = [ - "finl_unicode", "unicode-bidi", "unicode-normalization", + "unicode-properties", ] [[package]] @@ -10920,9 +10965,9 @@ dependencies = [ [[package]] name = "subtle" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "supports-color" @@ -11215,9 +11260,9 @@ dependencies = [ [[package]] name = "thread-id" -version = "4.2.1" +version = "4.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0ec81c46e9eb50deaa257be2f148adf052d1fb7701cfd55ccfab2525280b70b" +checksum = "cfe8f25bbdd100db7e1d34acf7fd2dc59c4bf8f7483f505eaa7d4f12f76cc0ea" dependencies = [ "libc", "winapi", @@ -11296,9 +11341,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -11381,10 +11426,10 @@ dependencies = [ "bytes", "libc", "mio", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2", "tokio-macros", "windows-sys 0.52.0", ] @@ -11423,14 +11468,14 @@ dependencies = [ "futures-channel", "futures-util", "log", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "percent-encoding", "phf", "pin-project-lite", "postgres-protocol", "postgres-types", "rand", - "socket2 0.5.7", + "socket2", "tokio", "tokio-util", "whoami", @@ -11463,7 +11508,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.10", + "rustls 0.23.14", "rustls-pki-types", "tokio", ] @@ -11573,7 +11618,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.18", + "winnow 0.6.20", ] [[package]] @@ -11631,9 +11676,9 @@ dependencies = [ [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -11905,9 +11950,9 @@ dependencies = [ [[package]] name = "ucd-trie" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "unarray" @@ -11926,9 +11971,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" @@ -11944,40 +11989,47 @@ checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-properties" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" + [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-truncate" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5fbabedabe362c618c714dbefda9927b5afc8e2a8102f47f081089a9019226" +checksum = "b3644627a5af5fa321c95b9b235a72fd24cd29c648c2c379431e6628655627bf" dependencies = [ - "itertools 0.12.1", + "itertools 0.13.0", + "unicode-segmentation", "unicode-width", ] [[package]] name = "unicode-width" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-xid" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "unicode_categories" @@ -12165,9 +12217,9 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "utf8parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" @@ -12253,9 +12305,9 @@ dependencies = [ [[package]] name = "vte_generate_state_changes" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d257817081c7dffcdbab24b9e62d2def62e2ff7d00b1c20062551e6cccc145ff" +checksum = "2e369bee1b05d510a7b4ed645f5faa90619e05437111783ea5848f28d97d3c2e" dependencies = [ "proc-macro2", "quote", @@ -12312,19 +12364,20 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", @@ -12337,9 +12390,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" dependencies = [ "cfg-if", "js-sys", @@ -12349,9 +12402,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -12359,9 +12412,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", @@ -12372,15 +12425,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "wasm-streams" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" +checksum = "4e072d4e72f700fb3443d8fe94a39315df013eef1104903cdb0a2abd322bbecd" dependencies = [ "futures-util", "js-sys", @@ -12391,9 +12444,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.69" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" dependencies = [ "js-sys", "wasm-bindgen", @@ -12401,9 +12454,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.3" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ "rustls-pki-types", ] @@ -12426,7 +12479,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "redox_syscall 0.5.1", + "redox_syscall 0.5.7", "wasite", "web-sys", ] @@ -12673,11 +12726,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -12894,9 +12947,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -13019,12 +13072,12 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", - "zerocopy-derive 0.7.34", + "zerocopy-derive 0.7.35", ] [[package]] @@ -13040,9 +13093,9 @@ dependencies = [ [[package]] name = "zerocopy-derive" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 07b321b12f..10000d1238 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -312,6 +312,7 @@ cargo_metadata = "0.18.1" chacha20poly1305 = "0.10.1" cfg-if = "1.0" chrono = { version = "0.4", features = [ "serde" ] } +chrono-tz = "0.10.0" ciborium = "0.2.2" clap = { version = "4.5", features = ["cargo", "derive", "env", "wrap_help"] } clickhouse-admin-api = { path = "clickhouse-admin/api" } @@ -398,6 +399,7 @@ hyper-util = "0.1.9" hyper-rustls = "0.26.0" hyper-staticfile = "0.10.0" illumos-utils = { path = "illumos-utils" } +iana-time-zone = "0.1.61" indent_write = "2.2.0" indexmap = "2.6.0" indicatif = { version = "0.17.8", features = ["rayon"] } @@ -518,7 +520,7 @@ propolis_api_types = { git = "https://github.com/oxidecomputer/propolis", rev = propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "11371b0f3743f8df5b047dc0edc2699f4bdf3927" } propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "11371b0f3743f8df5b047dc0edc2699f4bdf3927" } proptest = "1.5.0" -qorb = { git = "https://github.com/oxidecomputer/qorb", rev = "88452176c7a9a573cf300d42a7c8147328224ce1" } +qorb = { git = "https://github.com/oxidecomputer/qorb", rev = "b9bf692a0c56458512cced4b3414b297e61e7d8f" } quote = "1.0" rand = "0.8.5" rand_core = "0.6.4" diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 258e43f18c..7b388663ac 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -276,6 +276,11 @@ impl DataStore { Ok(datastore) } + /// Terminates the underlying pool, stopping it from connecting to backends. + pub async fn terminate(&self) { + self.pool.terminate().await + } + pub fn register_producers(&self, registry: &ProducerRegistry) { registry .register_producer( diff --git a/nexus/db-queries/src/db/pool.rs b/nexus/db-queries/src/db/pool.rs index ea669a419e..4fb9cf4e2f 100644 --- a/nexus/db-queries/src/db/pool.rs +++ b/nexus/db-queries/src/db/pool.rs @@ -140,4 +140,11 @@ impl Pool { ) -> anyhow::Result> { Ok(self.inner.claim().await?) } + + /// Stops the qorb background tasks, and causes all future claims to fail + pub async fn terminate( + &self, + ) { + self.inner.terminate().await + } } diff --git a/oximeter/db/Cargo.toml b/oximeter/db/Cargo.toml index a1750e3fc9..af66028ad3 100644 --- a/oximeter/db/Cargo.toml +++ b/oximeter/db/Cargo.toml @@ -15,13 +15,16 @@ async-trait.workspace = true bcs.workspace = true camino.workspace = true chrono.workspace = true +chrono-tz.workspace = true clap.workspace = true clickward.workspace = true dropshot.workspace = true futures.workspace = true gethostname.workspace = true highway.workspace = true +iana-time-zone.workspace = true libc.workspace = true +nom.workspace = true num.workspace = true omicron-common.workspace = true omicron-workspace-hack.workspace = true diff --git a/oximeter/db/src/bin/oxdb/main.rs b/oximeter/db/src/bin/oxdb/main.rs index 3ad8959e66..2f80518145 100644 --- a/oximeter/db/src/bin/oxdb/main.rs +++ b/oximeter/db/src/bin/oxdb/main.rs @@ -361,7 +361,9 @@ async fn main() -> anyhow::Result<()> { .await? } #[cfg(feature = "native-sql")] - Subcommand::NativeSql => oximeter_db::shells::native::shell().await?, + Subcommand::NativeSql => { + oximeter_db::shells::native::shell(args.address).await? + } } Ok(()) } diff --git a/oximeter/db/src/native/block.rs b/oximeter/db/src/native/block.rs index 026ee49744..a51b27352f 100644 --- a/oximeter/db/src/native/block.rs +++ b/oximeter/db/src/native/block.rs @@ -7,11 +7,20 @@ //! Types for working with actual blocks and columns of data. use super::Error; -use chrono::{DateTime, Utc}; +use chrono::{DateTime, NaiveDate}; +use chrono_tz::Tz; use indexmap::IndexMap; +use nom::{ + bytes::complete::{tag, take_while1}, + character::complete::u8 as nom_u8, + combinator::{eof, map, map_opt, opt}, + sequence::{delimited, preceded, tuple}, + IResult, +}; use std::{ fmt, net::{Ipv4Addr, Ipv6Addr}, + sync::LazyLock, }; use uuid::Uuid; @@ -78,11 +87,13 @@ impl Block { if !self.matches_structure(&block) { return Err(Error::MismatchedBlockStructure); } + let n_new_rows = block.n_rows; for (our_col, their_col) in self.columns.values_mut().zip(block.columns.into_values()) { our_col.concat(their_col).expect("Checked above"); } + self.n_rows += n_new_rows; Ok(()) } @@ -195,8 +206,9 @@ pub enum ValueArray { Uuid(Vec), Ipv4(Vec), Ipv6(Vec), - DateTime(Vec>), - DateTime64 { precision: Precision, values: Vec> }, + Date(Vec), + DateTime { tz: Tz, values: Vec> }, + DateTime64 { precision: Precision, tz: Tz, values: Vec> }, Nullable { is_null: Vec, values: Box }, Enum8 { variants: IndexMap, values: Vec }, Array { inner_type: DataType, values: Vec }, @@ -221,7 +233,8 @@ impl ValueArray { ValueArray::Uuid(inner) => inner.len(), ValueArray::Ipv4(inner) => inner.len(), ValueArray::Ipv6(inner) => inner.len(), - ValueArray::DateTime(inner) => inner.len(), + ValueArray::Date(inner) => inner.len(), + ValueArray::DateTime { values, .. } => values.len(), ValueArray::DateTime64 { values, .. } => values.len(), ValueArray::Nullable { values, .. } => values.len(), ValueArray::Enum8 { values, .. } => values.len(), @@ -248,10 +261,15 @@ impl ValueArray { DataType::Uuid => ValueArray::Uuid(vec![]), DataType::Ipv4 => ValueArray::Ipv4(vec![]), DataType::Ipv6 => ValueArray::Ipv6(vec![]), - DataType::DateTime => ValueArray::DateTime(vec![]), - DataType::DateTime64(precision) => { - ValueArray::DateTime64 { precision: *precision, values: vec![] } + DataType::Date => ValueArray::Date(vec![]), + DataType::DateTime(tz) => { + ValueArray::DateTime { tz: *tz, values: vec![] } } + DataType::DateTime64(precision, tz) => ValueArray::DateTime64 { + precision: *precision, + tz: *tz, + values: vec![], + }, DataType::Enum8(variants) => { ValueArray::Enum8 { variants: variants.clone(), values: vec![] } } @@ -321,9 +339,10 @@ impl ValueArray { (ValueArray::Ipv6(us), ValueArray::Ipv6(mut them)) => { us.append(&mut them) } - (ValueArray::DateTime(us), ValueArray::DateTime(mut them)) => { - us.append(&mut them) - } + ( + ValueArray::DateTime { values: us, .. }, + ValueArray::DateTime { values: mut them, .. }, + ) => us.append(&mut them), ( ValueArray::DateTime64 { values: us, .. }, ValueArray::DateTime64 { values: mut them, .. }, @@ -401,13 +420,13 @@ impl TryFrom for Precision { /// order to convert it to a number of seconds and nanoseconds. Those are then /// used to call `DateTime::from_timestamp()`. macro_rules! precision_conversion_func { - ($precision:literal) => {{ - |x| { + ($tz:expr, $precision:literal) => {{ + |tz, x| { const SCALE: i64 = 10i64.pow($precision); const FACTOR: i64 = 10i64.pow(Precision::MAX as u32 - $precision); let seconds = x.div_euclid(SCALE); let nanos = (FACTOR * x.rem_euclid(SCALE)).try_into().unwrap(); - DateTime::from_timestamp(seconds, nanos).unwrap() + tz.timestamp_opt(seconds, nanos).unwrap() } }}; } @@ -425,7 +444,10 @@ impl Precision { /// Return a conversion function that takes an i64 count and converts it to /// a DateTime. - pub(crate) fn as_conv(&self) -> fn(i64) -> DateTime { + pub(crate) fn as_conv( + &self, + _: &T, + ) -> fn(&T, i64) -> DateTime { // For the easy values, we'll convert to seconds or microseconds, and // then use a constructor. // @@ -433,33 +455,39 @@ impl Precision { // next-smallest sane unit, in this case milliseconds, and use the // appropriate constructor. match self.0 { - 0 => |x| DateTime::from_timestamp(x, 0).unwrap(), - 1 => precision_conversion_func!(1), - 2 => precision_conversion_func!(2), - 3 => |x| DateTime::from_timestamp_millis(x).unwrap(), - 4 => precision_conversion_func!(4), - 5 => precision_conversion_func!(5), - 6 => |x| DateTime::from_timestamp_micros(x).unwrap(), - 7 => precision_conversion_func!(7), - 8 => precision_conversion_func!(8), - 9 => |x| DateTime::from_timestamp_nanos(x), + 0 => |tz, x| tz.timestamp_opt(x, 0).unwrap(), + 1 => precision_conversion_func!(tz, 1), + 2 => precision_conversion_func!(tz, 2), + 3 => |tz, x| tz.timestamp_millis_opt(x).unwrap(), + 4 => precision_conversion_func!(tz, 4), + 5 => precision_conversion_func!(tz, 5), + 6 => |tz, x| tz.timestamp_nanos(x * 1000), + 7 => precision_conversion_func!(tz, 7), + 8 => precision_conversion_func!(tz, 8), + 9 => |tz, x| tz.timestamp_nanos(x), 10..=u8::MAX => unreachable!(), } } /// Convert the provided datetime into a timestamp in the right precision. - pub(crate) fn scale(&self, value: DateTime) -> i64 { + /// + /// This returns `None` if the timestamp cannot be converted to an `i64`, + /// which is how ClickHouse stores the values. + pub(crate) fn scale( + &self, + value: DateTime, + ) -> Option { match self.0 { - 0 => value.timestamp(), - 1 => value.timestamp_millis() / 100, - 2 => value.timestamp_millis() / 10, - 3 => value.timestamp_millis(), - 4 => value.timestamp_micros() / 100, - 5 => value.timestamp_micros() / 10, - 6 => value.timestamp_micros(), - 7 => value.timestamp_nanos_opt().unwrap() / 100, - 8 => value.timestamp_nanos_opt().unwrap() / 10, - 9 => value.timestamp_nanos_opt().unwrap(), + 0 => Some(value.timestamp()), + 1 => Some(value.timestamp_millis() / 100), + 2 => Some(value.timestamp_millis() / 10), + 3 => Some(value.timestamp_millis()), + 4 => Some(value.timestamp_micros() / 100), + 5 => Some(value.timestamp_micros() / 10), + 6 => Some(value.timestamp_micros()), + 7 => value.timestamp_nanos_opt().map(|x| x / 100), + 8 => value.timestamp_nanos_opt().map(|x| x / 10), + 9 => value.timestamp_nanos_opt(), 10.. => unreachable!(), } } @@ -490,8 +518,9 @@ pub enum DataType { Uuid, Ipv4, Ipv6, - DateTime, - DateTime64(Precision), + Date, + DateTime(Tz), + DateTime64(Precision, Tz), Enum8(IndexMap), Nullable(Box), Array(Box), @@ -536,8 +565,11 @@ impl fmt::Display for DataType { DataType::Uuid => write!(f, "UUID"), DataType::Ipv4 => write!(f, "IPv4"), DataType::Ipv6 => write!(f, "IPv6"), - DataType::DateTime => write!(f, "DateTime"), - DataType::DateTime64(prec) => write!(f, "DateTime64({prec})"), + DataType::Date => write!(f, "Date"), + DataType::DateTime(tz) => write!(f, "DateTime('{tz}')"), + DataType::DateTime64(prec, tz) => { + write!(f, "DateTime64({prec}, '{tz}')") + } DataType::Enum8(map) => { write!(f, "Enum8(")?; for (i, (val, name)) in map.iter().enumerate() { @@ -554,6 +586,62 @@ impl fmt::Display for DataType { } } +// Parse a quoted timezone, like `'UTC'` or `'America/Los_Angeles'` +fn quoted_timezone(s: &str) -> IResult<&str, Tz> { + map( + delimited(tag("'"), take_while1(|c| c != '\''), tag("'")), + parse_timezone, + )(s) +} + +// Parse a quoted timezone, delimited by parentheses (). +fn parenthesized_timezone(s: &str) -> IResult<&str, Tz> { + delimited(tag("("), quoted_timezone, tag(")"))(s) +} + +/// Parse a `DateTime` data type from a string, optionally with a timezone in +/// it. +fn datetime(s: &str) -> IResult<&str, DataType> { + map( + tuple((tag("DateTime"), opt(parenthesized_timezone), eof)), + |(_, maybe_tz, _)| { + DataType::DateTime(maybe_tz.unwrap_or_else(|| *DEFAULT_TIMEZONE)) + }, + )(s) +} + +/// Parse a `DateTime64` data type from a string, with a precision and optional +/// timezone in it. +/// +/// Matches things like `DateTime64(1)` and `DateTime64(1, 'UTC')`. +fn datetime64(s: &str) -> IResult<&str, DataType> { + map( + tuple(( + tag("DateTime64("), + map_opt(nom_u8, Precision::new), + opt(preceded(tag(", "), quoted_timezone)), + tag(")"), + eof, + )), + |(_, precision, maybe_tz, _, _)| { + DataType::DateTime64( + precision, + maybe_tz.unwrap_or_else(|| *DEFAULT_TIMEZONE), + ) + }, + )(s) +} + +static DEFAULT_TIMEZONE: LazyLock = + LazyLock::new(|| match iana_time_zone::get_timezone() { + Ok(s) => s.parse().unwrap_or_else(|_| Tz::UTC), + Err(_) => Tz::UTC, + }); + +fn parse_timezone(s: &str) -> Tz { + s.parse().unwrap_or_else(|_| *DEFAULT_TIMEZONE) +} + impl std::str::FromStr for DataType { type Err = Error; @@ -591,20 +679,19 @@ impl std::str::FromStr for DataType { return Ok(DataType::Ipv4); } else if s == "IPv6" { return Ok(DataType::Ipv6); - } else if s == "DateTime" { - return Ok(DataType::DateTime); + } else if s == "Date" { + return Ok(DataType::Date); } - // Check for DateTime with precision. - if let Some(suffix) = s.strip_prefix("DateTime64(") { - let Some(inner) = suffix.strip_suffix(")") else { - return Err(Error::UnsupportedDataType(s.to_string())); - }; - return inner - .parse() - .map_err(|_| Error::UnsupportedDataType(s.to_string())) - .map(|p| DataType::DateTime64(Precision(p))); - } + // Check for datetime, possibly with a timezone. + if let Ok((_, dt)) = datetime(s) { + return Ok(dt); + }; + + // Check for DateTime64 with precision, and possibly a timezone. + if let Ok((_, dt)) = datetime64(s) { + return Ok(dt); + }; // Check for Enum8s. // @@ -653,8 +740,14 @@ impl std::str::FromStr for DataType { #[cfg(test)] mod tests { - use super::{DataType, Precision}; + use super::{ + Block, BlockInfo, Column, DataType, Precision, ValueArray, + DEFAULT_TIMEZONE, + }; + use crate::native::block::{datetime, datetime64}; use chrono::{SubsecRound as _, Utc}; + use chrono_tz::Tz; + use indexmap::IndexMap; #[test] fn test_data_type_to_string() { @@ -677,8 +770,12 @@ mod tests { (DataType::Uuid, "UUID"), (DataType::Ipv4, "IPv4"), (DataType::Ipv6, "IPv6"), - (DataType::DateTime, "DateTime"), - (DataType::DateTime64(6.try_into().unwrap()), "DateTime64(6)"), + (DataType::Date, "Date"), + (DataType::DateTime(Tz::UTC), "DateTime('UTC')"), + ( + DataType::DateTime64(6.try_into().unwrap(), Tz::UTC), + "DateTime64(6, 'UTC')", + ), (DataType::Enum8(enum8), "Enum8('foo' = 0, 'bar' = 1)"), (DataType::Nullable(Box::new(DataType::UInt8)), "Nullable(UInt8)"), (DataType::Array(Box::new(DataType::UInt8)), "Array(UInt8)"), @@ -709,9 +806,10 @@ mod tests { let now = Utc::now(); for precision in 0..=Precision::MAX { let prec = Precision(precision); - let timestamp = prec.scale(now); - let conv = prec.as_conv(); - let recovered = conv(timestamp); + let timestamp = + prec.scale(now).expect("Current time should fit in an i64"); + let conv = prec.as_conv(&Utc); + let recovered = conv(&Utc, timestamp); let now_with_precision = now.trunc_subsecs(u16::from(prec.0)); assert_eq!( now_with_precision, recovered, @@ -724,4 +822,87 @@ mod tests { ); } } + + #[test] + fn datetime64_scale_checks_range() { + assert_eq!( + Precision(9).scale(chrono::DateTime::::MAX_UTC), + None, + "Should fail to scale a timestamp that doesn't fit in \ + the range of an i64" + ); + } + + #[test] + fn parse_date_time() { + for (type_, s) in [ + (DataType::DateTime(*DEFAULT_TIMEZONE), "DateTime"), + (DataType::DateTime(Tz::UTC), "DateTime('UTC')"), + ( + DataType::DateTime(Tz::America__Los_Angeles), + "DateTime('America/Los_Angeles')", + ), + ] { + let dt = datetime(s).unwrap().1; + assert_eq!(type_, dt, "Failed to parse '{}' into DateTime", s,); + } + + assert!(datetime("DateTim").is_err()); + assert!(datetime("DateTime()").is_err()); + assert!(datetime("DateTime()").is_err()); + assert!(datetime("DateTime('U)").is_err()); + assert!(datetime("DateTime(0)").is_err()); + } + + #[test] + fn parse_date_time64() { + for (type_, s) in [ + ( + DataType::DateTime64(Precision(3), *DEFAULT_TIMEZONE), + "DateTime64(3)", + ), + ( + DataType::DateTime64(Precision(3), Tz::UTC), + "DateTime64(3, 'UTC')", + ), + ( + DataType::DateTime64(Precision(6), Tz::America__Los_Angeles), + "DateTime64(6, 'America/Los_Angeles')", + ), + ] { + let dt = datetime64(s).unwrap().1; + assert_eq!(type_, dt, "Failed to parse '{}' into DateTime64", s,); + } + + assert!(datetime64("DateTime6").is_err()); + assert!(datetime64("DateTime64(").is_err()); + assert!(datetime64("DateTime64()").is_err()); + assert!(datetime64("DateTime64('U)").is_err()); + assert!(datetime64("DateTime64(0, )").is_err()); + assert!(datetime64("DateTime64('a', 'UTC')").is_err()); + assert!(datetime64("DateTime64(1,'UTC')").is_err()); + } + + #[test] + fn concat_blocks() { + let data = vec![0, 1]; + let values = ValueArray::UInt64(data.clone()); + let mut block = Block { + name: String::new(), + info: BlockInfo::default(), + n_columns: 1, + n_rows: values.len() as u64, + columns: IndexMap::from([( + String::from("a"), + Column { values: values.clone(), data_type: DataType::UInt64 }, + )]), + }; + block.concat(block.clone()).unwrap(); + assert_eq!(block.n_columns, 1); + assert_eq!(block.n_rows, values.len() as u64 * 2); + assert_eq!( + block.columns["a"].values, + ValueArray::UInt64([data.as_slice(), data.as_slice()].concat()) + ); + } } diff --git a/oximeter/db/src/native/connection.rs b/oximeter/db/src/native/connection.rs index 51bb71cd39..f6367ca126 100644 --- a/oximeter/db/src/native/connection.rs +++ b/oximeter/db/src/native/connection.rs @@ -170,6 +170,22 @@ impl Connection { break Err(Error::UnexpectedPacket("Hello")); } ServerPacket::Data(block) => { + probes::data__packet__received!(|| { + ( + block.n_columns, + block.n_rows, + block + .columns + .iter() + .map(|(name, col)| { + ( + name.clone(), + col.data_type.to_string(), + ) + }) + .collect::>(), + ) + }); // Empty blocks are sent twice: the beginning of the // query so that the client knows the table structure, // and then the end to signal the last data transfer. @@ -466,7 +482,7 @@ mod tests { assert_eq!(block.n_rows, 1); let (name, col) = block.columns.first().unwrap(); assert_eq!(name, "timestamp"); - assert_eq!(col.data_type, DataType::DateTime); + assert!(matches!(col.data_type, DataType::DateTime(_))); db.cleanup().await.unwrap(); logctx.cleanup_successful(); } diff --git a/oximeter/db/src/native/io/block.rs b/oximeter/db/src/native/io/block.rs index b7a13be1d8..e6ab6c8ba1 100644 --- a/oximeter/db/src/native/io/block.rs +++ b/oximeter/db/src/native/io/block.rs @@ -24,7 +24,7 @@ pub fn encode(block: Block, mut dst: &mut BytesMut) -> Result<(), Error> { io::varuint::encode(block.n_columns, &mut dst); io::varuint::encode(block.n_rows, &mut dst); for (name, col) in block.columns { - io::column::encode(&name, col, &mut dst); + io::column::encode(&name, col, &mut dst)?; } Ok(()) } diff --git a/oximeter/db/src/native/io/column.rs b/oximeter/db/src/native/io/column.rs index 0922a49aa2..649d6a044d 100644 --- a/oximeter/db/src/native/io/column.rs +++ b/oximeter/db/src/native/io/column.rs @@ -11,10 +11,33 @@ use crate::native::{ io, Error, }; use bytes::{Buf as _, BufMut as _, BytesMut}; -use chrono::DateTime; +use chrono::{NaiveDate, TimeDelta, TimeZone}; use std::net::{Ipv4Addr, Ipv6Addr}; use uuid::Uuid; +// ClickHouse `Date`s are represented as an unsigned 16-bit number of days from +// the UNIX epoch. +// +// This is deprecated, but we allow it so we can create a const. The fallible +// constructor requires unwrapping. +#[allow(deprecated)] +const EPOCH: NaiveDate = NaiveDate::from_ymd(1970, 1, 1); + +// Maximum supported Date in ClickHouse. +// +// See https://clickhouse.com/docs/en/sql-reference/data-types/date +const MAX_DATE: &str = "2149-06-06"; + +// Maximum supported DateTime in ClickHouse. +// +// See https://clickhouse.com/docs/en/sql-reference/data-types/datetime. +const MAX_DATETIME: &str = "2106-02-07 06:28:15"; + +// Maximum supported DateTime64 in ClickHouse +// +// See https://clickhouse.com/docs/en/sql-reference/data-types/datetime64. +const MAX_DATETIME64: &str = "2299-12-31 23:59:59.99999999"; + /// Helper macro to quickly and unsafely copy POD data from a message from the /// ClickHouse server into our own column data types. macro_rules! copyin_pod_values_raw { @@ -150,7 +173,17 @@ fn decode_value_array( // encoding of IPv4 addresses. copyin_pod_as_values!(Ipv6Addr, src, n_rows) } - DataType::DateTime => { + DataType::Date => { + // Dates are stored as 16-bit unsigned values, giving the number of + // days since the UNIX epoch. + let days = copyin_pod_values_raw!(u16, src, n_rows); + let mut out = Vec::with_capacity(days.len()); + for day in days.into_iter() { + out.push(EPOCH + TimeDelta::days(i64::from(day))); + } + ValueArray::Date(out) + } + DataType::DateTime(tz) => { // DateTimes are encoded as little-endian u32s, giving a traditional // UNIX timestamp with 1 second resolution. Similar to IPv4 // addresses, we'll iterate in chunks and then convert. @@ -164,16 +197,14 @@ fn decode_value_array( // this has exactly `n_rows` chunks of 4 bytes each. let timestamp = u32::from_le_bytes(chunk.try_into().unwrap()); - // Safey: This only panics if the timestamp is out of range, + // Safety: This only panics if the timestamp is out of range, // which is not possible as this is actually a u32. - values.push( - DateTime::from_timestamp(i64::from(timestamp), 0).unwrap(), - ); + values.push(tz.timestamp_opt(i64::from(timestamp), 0).unwrap()); } *src = rest; - ValueArray::DateTime(values) + ValueArray::DateTime { tz: *tz, values } } - DataType::DateTime64(precision) => { + DataType::DateTime64(precision, timezone) => { // DateTime64s are encoded as little-endian i64s, but their // precision is encoded in the argument, not the column itself. // We'll iterate over chunks of the provided data again, and convert @@ -186,18 +217,22 @@ fn decode_value_array( // The precision determines how to convert these values. Most things // should be 3, 6, or 9, for milliseconds, microseconds, or // nanoseconds. But technically any precision in [0, 9] is possible. - let conv = precision.as_conv(); + let conv = precision.as_conv(timezone); for chunk in data.chunks_exact(std::mem::size_of::()) { // Safety: Because we split this above on `n_bytes`, we know // this has exactly `n_rows` chunks of 8 bytes each. let timestamp = i64::from_le_bytes(chunk.try_into().unwrap()); - // Safey: This only panics if the timestamp is out of range, + // Safety: This only panics if the timestamp is out of range, // which is not possible as this is actually a u32. - values.push(conv(timestamp)); + values.push(conv(timezone, timestamp)); } *src = rest; - ValueArray::DateTime64 { precision: *precision, values } + ValueArray::DateTime64 { + precision: *precision, + tz: *timezone, + values, + } } DataType::Enum8(variants) => { // Copy the encoded variant indices themselves, and include the @@ -296,17 +331,24 @@ macro_rules! copyout_pod_values { /// /// This panics if the data type is unsupported. Use `DataType::is_supported()` /// to check that first. -pub fn encode(name: &str, column: Column, mut dst: &mut BytesMut) { +pub fn encode( + name: &str, + column: Column, + mut dst: &mut BytesMut, +) -> Result<(), Error> { assert!(column.data_type.is_supported()); io::string::encode(name, &mut dst); io::string::encode(column.data_type.to_string(), &mut dst); // Encode the "custom serialization tag". See `decode` for details. dst.put_u8(0); - encode_value_array(column.values, dst); + encode_value_array(column.values, dst) } /// Encode an array of values into a buffer. -fn encode_value_array(values: ValueArray, mut dst: &mut BytesMut) { +fn encode_value_array( + values: ValueArray, + mut dst: &mut BytesMut, +) -> Result<(), Error> { match values { ValueArray::UInt8(values) => dst.put(values.as_slice()), ValueArray::UInt16(values) => copyout_pod_values!(u16, values, dst), @@ -339,27 +381,62 @@ fn encode_value_array(values: ValueArray, mut dst: &mut BytesMut) { } } ValueArray::Ipv6(values) => copyout_pod_values!(Ipv6Addr, values, dst), - ValueArray::DateTime(values) => { + ValueArray::Date(values) => { + // Dates are represented in ClickHouse as a 16-bit unsigned number + // of days since the UNIX epoch. + // + // Since these can be constructed from any `NaiveDate`, they can + // have wider values than ClickHouse supports. Check that here + // during conversion to the `u16` format. + dst.reserve(values.len() * std::mem::size_of::()); + for value in values { + let days = value.signed_duration_since(EPOCH).num_days(); + let days = + u16::try_from(days).map_err(|_| Error::OutOfRange { + type_name: String::from("Date"), + min: EPOCH.to_string(), + max: MAX_DATE.to_string(), + value: value.to_string(), + })?; + dst.put_u16_le(days); + } + } + ValueArray::DateTime { values, .. } => { // DateTimes are always little-endian u32s giving the UNIX // timestamp. for value in values { - // Safety: We only construct these today from a u32 in the first - // place, so this must also be safe. - dst.put_u32_le(u32::try_from(value.timestamp()).unwrap()); + // DateTime's in ClickHouse must fit in a u32, so validate the + // range here. + let val = u32::try_from(value.timestamp()).map_err(|_| { + Error::OutOfRange { + type_name: String::from("DateTime"), + min: EPOCH.and_hms_opt(0, 0, 0).unwrap().to_string(), + max: MAX_DATETIME.to_string(), + value: value.to_string(), + } + })?; + dst.put_u32_le(val); } } - ValueArray::DateTime64 { precision, values } => { + ValueArray::DateTime64 { precision, values, .. } => { // DateTime64s are always encoded as i64s, in whatever // resolution is defined by the column type itself. dst.reserve(values.len() * std::mem::size_of::()); for value in values { - let timestamp = precision.scale(value); + let Some(timestamp) = precision.scale(value) else { + return Err(Error::OutOfRange { + type_name: String::from("DateTime64"), + min: EPOCH.to_string(), + max: MAX_DATETIME64.to_string(), + value: value.to_string(), + }); + }; dst.put_i64_le(timestamp); } } ValueArray::Nullable { is_null, values } => { copyout_pod_values!(bool, is_null, dst); - encode_value_array(*values, dst); + encode_value_array(*values, dst)?; } ValueArray::Enum8 { values, .. } => { copyout_pod_values!(i8, values, dst) @@ -369,10 +446,11 @@ fn encode_value_array(values: ValueArray, mut dst: &mut BytesMut) { // array, plus the flattened data itself. encode_array_offsets(&arrays, dst); for array in arrays { - encode_value_array(array, dst); + encode_value_array(array, dst)?; } } } + Ok(()) } // Encode the column offsets for an array column into the provided buffer. @@ -398,7 +476,9 @@ fn encode_array_offsets(arrays: &[ValueArray], dst: &mut BytesMut) { mod tests { use super::*; use crate::native::block::Precision; - use chrono::{SubsecRound as _, Utc}; + use chrono::SubsecRound as _; + use chrono::TimeZone; + use chrono_tz::Tz; #[test] fn test_decode_uint8_column() { @@ -517,7 +597,7 @@ mod tests { #[test] fn test_encode_decode_column() { - let now64 = Utc::now(); + let now64 = Tz::UTC.timestamp_opt(0, 0).unwrap(); let now = now64.trunc_subsecs(0); let precision = Precision::new(9).unwrap(); for (typ, values) in [ @@ -546,10 +626,18 @@ mod tests { ), (DataType::Ipv4, ValueArray::Ipv4(vec![Ipv4Addr::LOCALHOST])), (DataType::Ipv6, ValueArray::Ipv6(vec![Ipv6Addr::LOCALHOST])), - (DataType::DateTime, ValueArray::DateTime(vec![now])), + (DataType::Date, ValueArray::Date(vec![now.date_naive()])), ( - DataType::DateTime64(precision), - ValueArray::DateTime64 { precision, values: vec![now64] }, + DataType::DateTime(Tz::UTC), + ValueArray::DateTime { tz: Tz::UTC, values: vec![now] }, + ), + ( + DataType::DateTime64(precision, Tz::UTC), + ValueArray::DateTime64 { + precision, + tz: Tz::UTC, + values: vec![now64], + }, ), ( DataType::Nullable(Box::new(DataType::UInt8)), @@ -599,7 +687,7 @@ mod tests { let n_rows = values.len(); let col = Column { values, data_type: typ.clone() }; let mut buf = BytesMut::new(); - encode("foo", col.clone(), &mut buf); + encode("foo", col.clone(), &mut buf).unwrap(); let (name, decoded) = decode(&mut &buf[..], n_rows) .expect("Should have succeeded in decoding full column") .unwrap_or_else(|| { @@ -613,4 +701,36 @@ mod tests { ); } } + + #[test] + fn fail_to_encode_out_of_range_column() { + let max = Tz::from_utc_datetime( + &Tz::UTC, + &chrono::DateTime::::MAX_UTC.naive_utc(), + ); + let precision = Precision::new(9).unwrap(); + // See https://clickhouse.com/docs/en/sql-reference/data-types/datetime + // and related pages for the supported ranges of these types. + for (typ, values) in [ + (DataType::Date, ValueArray::Date(vec![max.date_naive()])), + ( + DataType::DateTime(Tz::UTC), + ValueArray::DateTime { tz: Tz::UTC, values: vec![max] }, + ), + ( + DataType::DateTime64(precision, Tz::UTC), + ValueArray::DateTime64 { + precision, + tz: Tz::UTC, + values: vec![max], + }, + ), + ] { + let col = Column { values, data_type: typ.clone() }; + let mut buf = BytesMut::new(); + let err = encode("foo", col.clone(), &mut buf) + .expect_err("Should fail to encode date-like column with out of range value"); + assert!(matches!(err, Error::OutOfRange { .. })); + } + } } diff --git a/oximeter/db/src/native/mod.rs b/oximeter/db/src/native/mod.rs index f8deb22b8b..15d013fa35 100644 --- a/oximeter/db/src/native/mod.rs +++ b/oximeter/db/src/native/mod.rs @@ -136,6 +136,15 @@ mod probes { /// Emitted when we receive a packet from the server, with its kind. fn packet__received(kind: &str) {} + /// Emitted when we receive a data packet, with details about the size and + /// data types for each column. + fn data__packet__received( + n_cols: u64, + n_rows: u64, + columns: Vec<(String, String)>, + ) { + } + /// Emitted when we receive an unrecognized packet, with the kind and the /// length of the discarded buffer. fn unrecognized__server__packet(kind: u64, len: usize) {} @@ -194,4 +203,7 @@ pub enum Error { #[error("Cannot concatenate blocks with mismatched structure")] MismatchedBlockStructure, + + #[error("Value out of range for corresponding ClickHouse type")] + OutOfRange { type_name: String, min: String, max: String, value: String }, } diff --git a/oximeter/db/src/shells/native.rs b/oximeter/db/src/shells/native.rs index 289610b4f3..c0e86367da 100644 --- a/oximeter/db/src/shells/native.rs +++ b/oximeter/db/src/shells/native.rs @@ -12,12 +12,13 @@ use crossterm::style::Stylize; use display_error_chain::DisplayErrorChain; use omicron_common::address::CLICKHOUSE_TCP_PORT; use reedline::{DefaultPrompt, DefaultPromptSegment, Reedline, Signal}; -use std::net::{Ipv6Addr, SocketAddr}; +use std::net::{IpAddr, SocketAddr}; use tabled::{builder::Builder, settings::Style}; /// Run the native SQL shell. -pub async fn shell() -> anyhow::Result<()> { - let addr = SocketAddr::new(Ipv6Addr::LOCALHOST.into(), CLICKHOUSE_TCP_PORT); +pub async fn shell(addr: IpAddr) -> anyhow::Result<()> { + usdt::register_probes()?; + let addr = SocketAddr::new(addr, CLICKHOUSE_TCP_PORT); let mut conn = native::Connection::new(addr) .await .context("Trying to connect to ClickHouse server")?; @@ -162,9 +163,12 @@ fn values_to_string<'a>( ValueArray::Ipv6(vals) => { Box::new(vals.iter().map(ToString::to_string)) } - ValueArray::DateTime(vals) => { + ValueArray::Date(vals) => { Box::new(vals.iter().map(ToString::to_string)) } + ValueArray::DateTime { values, .. } => { + Box::new(values.iter().map(ToString::to_string)) + } ValueArray::DateTime64 { values, .. } => { Box::new(values.iter().map(ToString::to_string)) } diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 1c94facf33..4a7abc6e4c 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -27,7 +27,7 @@ bit-set = { version = "0.5.3" } bit-vec = { version = "0.6.3" } bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1.3.2" } bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.6.0", default-features = false, features = ["serde", "std"] } -bstr = { version = "1.9.1" } +bstr = { version = "1.10.0" } byteorder = { version = "1.5.0" } bytes = { version = "1.7.2", features = ["serde"] } chrono = { version = "0.4.38", features = ["serde"] } @@ -36,7 +36,7 @@ clap = { version = "4.5.20", features = ["cargo", "derive", "env", "wrap_help"] clap_builder = { version = "4.5.20", default-features = false, features = ["cargo", "color", "env", "std", "suggestions", "usage", "wrap_help"] } console = { version = "0.15.8" } crossbeam-epoch = { version = "0.9.18" } -crossbeam-utils = { version = "0.8.19" } +crossbeam-utils = { version = "0.8.20" } crossterm = { version = "0.28.1", features = ["event-stream", "serde"] } crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } curve25519-dalek = { version = "4.1.3", features = ["digest", "legacy_compatibility", "rand_core"] } @@ -58,8 +58,9 @@ futures-task = { version = "0.3.31", default-features = false, features = ["std" futures-util = { version = "0.3.31", features = ["channel", "io", "sink"] } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "9bbac475dcaac88286c07a20b6bd3e94fc81d7f0", features = ["std"] } generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } -getrandom = { version = "0.2.14", default-features = false, features = ["js", "rdrand", "std"] } +getrandom = { version = "0.2.15", default-features = false, features = ["js", "rdrand", "std"] } group = { version = "0.13.0", default-features = false, features = ["alloc"] } +hashbrown = { version = "0.15.0" } hex = { version = "0.4.3", features = ["serde"] } hickory-proto = { version = "0.24.1", features = ["text-parsing"] } hmac = { version = "0.12.1", default-features = false, features = ["reset"] } @@ -73,7 +74,7 @@ lazy_static = { version = "1.5.0", default-features = false, features = ["spin_n libc = { version = "0.2.159", features = ["extra_traits"] } log = { version = "0.4.22", default-features = false, features = ["kv_unstable", "std"] } managed = { version = "0.8.0", default-features = false, features = ["alloc", "map"] } -memchr = { version = "2.7.2" } +memchr = { version = "2.7.4" } nom = { version = "7.1.3" } num-bigint-dig = { version = "0.8.4", default-features = false, features = ["i128", "prime", "serde", "u64_digit", "zeroize"] } num-integer = { version = "0.1.46", features = ["i128"] } @@ -83,12 +84,15 @@ openapiv3 = { version = "2.0.0", default-features = false, features = ["skip_ser peg-runtime = { version = "0.8.3", default-features = false, features = ["std"] } pem-rfc7468 = { version = "0.7.0", default-features = false, features = ["std"] } petgraph = { version = "0.6.5", features = ["serde-1"] } +phf = { version = "0.11.2" } +phf_shared = { version = "0.11.2" } pkcs8 = { version = "0.10.2", default-features = false, features = ["encryption", "pem", "std"] } postgres-types = { version = "0.2.8", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.2" } proc-macro2 = { version = "1.0.87" } qorb = { git = "https://github.com/oxidecomputer/qorb", rev = "88452176c7a9a573cf300d42a7c8147328224ce1", features = ["qtop"] } quote = { version = "1.0.37" } +rand = { version = "0.8.5", features = ["small_rng"] } regex = { version = "1.11.0" } regex-automata = { version = "0.4.8", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } regex-syntax = { version = "0.8.5" } @@ -107,7 +111,7 @@ slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "rele smallvec = { version = "1.13.2", default-features = false, features = ["const_new"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } -subtle = { version = "2.5.0" } +subtle = { version = "2.6.1" } syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.79", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } time = { version = "0.3.36", features = ["formatting", "local-offset", "macros", "parsing"] } tokio = { version = "1.40.0", features = ["full", "test-util"] } @@ -118,13 +122,13 @@ toml = { version = "0.7.8" } toml_datetime = { version = "0.6.8", default-features = false, features = ["serde"] } toml_edit-3c51e837cfc5589a = { package = "toml_edit", version = "0.22.22", features = ["serde"] } tracing = { version = "0.1.40", features = ["log"] } -unicode-bidi = { version = "0.3.15" } -unicode-normalization = { version = "0.1.23" } +unicode-bidi = { version = "0.3.17" } +unicode-normalization = { version = "0.1.24" } usdt = { version = "0.5.0" } usdt-impl = { version = "0.5.0", default-features = false, features = ["asm", "des"] } uuid = { version = "1.10.0", features = ["serde", "v4"] } x509-cert = { version = "0.2.5" } -zerocopy = { version = "0.7.34", features = ["derive", "simd"] } +zerocopy = { version = "0.7.35", features = ["derive", "simd"] } zeroize = { version = "1.8.1", features = ["std", "zeroize_derive"] } [build-dependencies] @@ -138,17 +142,17 @@ bit-set = { version = "0.5.3" } bit-vec = { version = "0.6.3" } bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1.3.2" } bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.6.0", default-features = false, features = ["serde", "std"] } -bstr = { version = "1.9.1" } +bstr = { version = "1.10.0" } byteorder = { version = "1.5.0" } bytes = { version = "1.7.2", features = ["serde"] } -cc = { version = "1.0.97", default-features = false, features = ["parallel"] } +cc = { version = "1.1.30", default-features = false, features = ["parallel"] } chrono = { version = "0.4.38", features = ["serde"] } cipher = { version = "0.4.4", default-features = false, features = ["block-padding", "zeroize"] } clap = { version = "4.5.20", features = ["cargo", "derive", "env", "wrap_help"] } clap_builder = { version = "4.5.20", default-features = false, features = ["cargo", "color", "env", "std", "suggestions", "usage", "wrap_help"] } console = { version = "0.15.8" } crossbeam-epoch = { version = "0.9.18" } -crossbeam-utils = { version = "0.8.19" } +crossbeam-utils = { version = "0.8.20" } crossterm = { version = "0.28.1", features = ["event-stream", "serde"] } crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } curve25519-dalek = { version = "4.1.3", features = ["digest", "legacy_compatibility", "rand_core"] } @@ -170,8 +174,9 @@ futures-task = { version = "0.3.31", default-features = false, features = ["std" futures-util = { version = "0.3.31", features = ["channel", "io", "sink"] } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "9bbac475dcaac88286c07a20b6bd3e94fc81d7f0", features = ["std"] } generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } -getrandom = { version = "0.2.14", default-features = false, features = ["js", "rdrand", "std"] } +getrandom = { version = "0.2.15", default-features = false, features = ["js", "rdrand", "std"] } group = { version = "0.13.0", default-features = false, features = ["alloc"] } +hashbrown = { version = "0.15.0" } hex = { version = "0.4.3", features = ["serde"] } hickory-proto = { version = "0.24.1", features = ["text-parsing"] } hmac = { version = "0.12.1", default-features = false, features = ["reset"] } @@ -185,7 +190,7 @@ lazy_static = { version = "1.5.0", default-features = false, features = ["spin_n libc = { version = "0.2.159", features = ["extra_traits"] } log = { version = "0.4.22", default-features = false, features = ["kv_unstable", "std"] } managed = { version = "0.8.0", default-features = false, features = ["alloc", "map"] } -memchr = { version = "2.7.2" } +memchr = { version = "2.7.4" } nom = { version = "7.1.3" } num-bigint-dig = { version = "0.8.4", default-features = false, features = ["i128", "prime", "serde", "u64_digit", "zeroize"] } num-integer = { version = "0.1.46", features = ["i128"] } @@ -195,12 +200,15 @@ openapiv3 = { version = "2.0.0", default-features = false, features = ["skip_ser peg-runtime = { version = "0.8.3", default-features = false, features = ["std"] } pem-rfc7468 = { version = "0.7.0", default-features = false, features = ["std"] } petgraph = { version = "0.6.5", features = ["serde-1"] } +phf = { version = "0.11.2" } +phf_shared = { version = "0.11.2" } pkcs8 = { version = "0.10.2", default-features = false, features = ["encryption", "pem", "std"] } postgres-types = { version = "0.2.8", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.2" } proc-macro2 = { version = "1.0.87" } qorb = { git = "https://github.com/oxidecomputer/qorb", rev = "88452176c7a9a573cf300d42a7c8147328224ce1", features = ["qtop"] } quote = { version = "1.0.37" } +rand = { version = "0.8.5", features = ["small_rng"] } regex = { version = "1.11.0" } regex-automata = { version = "0.4.8", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } regex-syntax = { version = "0.8.5" } @@ -219,7 +227,7 @@ slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "rele smallvec = { version = "1.13.2", default-features = false, features = ["const_new"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } -subtle = { version = "2.5.0" } +subtle = { version = "2.6.1" } syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.79", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } time = { version = "0.3.36", features = ["formatting", "local-offset", "macros", "parsing"] } @@ -232,14 +240,14 @@ toml = { version = "0.7.8" } toml_datetime = { version = "0.6.8", default-features = false, features = ["serde"] } toml_edit-3c51e837cfc5589a = { package = "toml_edit", version = "0.22.22", features = ["serde"] } tracing = { version = "0.1.40", features = ["log"] } -unicode-bidi = { version = "0.3.15" } -unicode-normalization = { version = "0.1.23" } -unicode-xid = { version = "0.2.4" } +unicode-bidi = { version = "0.3.17" } +unicode-normalization = { version = "0.1.24" } +unicode-xid = { version = "0.2.6" } usdt = { version = "0.5.0" } usdt-impl = { version = "0.5.0", default-features = false, features = ["asm", "des"] } uuid = { version = "1.10.0", features = ["serde", "v4"] } x509-cert = { version = "0.2.5" } -zerocopy = { version = "0.7.34", features = ["derive", "simd"] } +zerocopy = { version = "0.7.35", features = ["derive", "simd"] } zeroize = { version = "1.8.1", features = ["std", "zeroize_derive"] } [target.x86_64-unknown-linux-gnu.dependencies] @@ -251,7 +259,7 @@ linux-raw-sys = { version = "0.4.14", default-features = false, features = ["elf mio = { version = "1.0.2", features = ["net", "os-ext"] } once_cell = { version = "1.20.2" } rustix = { version = "0.38.37", features = ["event", "fs", "net", "pipe", "process", "stdio", "system", "termios", "time"] } -rustls = { version = "0.23.10", default-features = false, features = ["logging", "ring", "std", "tls12"] } +rustls = { version = "0.23.14", default-features = false, features = ["logging", "ring", "std", "tls12"] } tokio-rustls = { version = "0.26.0", default-features = false, features = ["logging", "ring", "tls12"] } [target.x86_64-unknown-linux-gnu.build-dependencies] @@ -263,7 +271,7 @@ linux-raw-sys = { version = "0.4.14", default-features = false, features = ["elf mio = { version = "1.0.2", features = ["net", "os-ext"] } once_cell = { version = "1.20.2" } rustix = { version = "0.38.37", features = ["event", "fs", "net", "pipe", "process", "stdio", "system", "termios", "time"] } -rustls = { version = "0.23.10", default-features = false, features = ["logging", "ring", "std", "tls12"] } +rustls = { version = "0.23.14", default-features = false, features = ["logging", "ring", "std", "tls12"] } tokio-rustls = { version = "0.26.0", default-features = false, features = ["logging", "ring", "tls12"] } [target.x86_64-apple-darwin.dependencies] @@ -273,7 +281,7 @@ hyper-util = { version = "0.1.9", features = ["full"] } mio = { version = "1.0.2", features = ["net", "os-ext"] } once_cell = { version = "1.20.2" } rustix = { version = "0.38.37", features = ["event", "fs", "net", "pipe", "process", "stdio", "system", "termios", "time"] } -rustls = { version = "0.23.10", default-features = false, features = ["logging", "ring", "std", "tls12"] } +rustls = { version = "0.23.14", default-features = false, features = ["logging", "ring", "std", "tls12"] } tokio-rustls = { version = "0.26.0", default-features = false, features = ["logging", "ring", "tls12"] } [target.x86_64-apple-darwin.build-dependencies] @@ -283,7 +291,7 @@ hyper-util = { version = "0.1.9", features = ["full"] } mio = { version = "1.0.2", features = ["net", "os-ext"] } once_cell = { version = "1.20.2" } rustix = { version = "0.38.37", features = ["event", "fs", "net", "pipe", "process", "stdio", "system", "termios", "time"] } -rustls = { version = "0.23.10", default-features = false, features = ["logging", "ring", "std", "tls12"] } +rustls = { version = "0.23.14", default-features = false, features = ["logging", "ring", "std", "tls12"] } tokio-rustls = { version = "0.26.0", default-features = false, features = ["logging", "ring", "tls12"] } [target.aarch64-apple-darwin.dependencies] @@ -293,7 +301,7 @@ hyper-util = { version = "0.1.9", features = ["full"] } mio = { version = "1.0.2", features = ["net", "os-ext"] } once_cell = { version = "1.20.2" } rustix = { version = "0.38.37", features = ["event", "fs", "net", "pipe", "process", "stdio", "system", "termios", "time"] } -rustls = { version = "0.23.10", default-features = false, features = ["logging", "ring", "std", "tls12"] } +rustls = { version = "0.23.14", default-features = false, features = ["logging", "ring", "std", "tls12"] } tokio-rustls = { version = "0.26.0", default-features = false, features = ["logging", "ring", "tls12"] } [target.aarch64-apple-darwin.build-dependencies] @@ -303,7 +311,7 @@ hyper-util = { version = "0.1.9", features = ["full"] } mio = { version = "1.0.2", features = ["net", "os-ext"] } once_cell = { version = "1.20.2" } rustix = { version = "0.38.37", features = ["event", "fs", "net", "pipe", "process", "stdio", "system", "termios", "time"] } -rustls = { version = "0.23.10", default-features = false, features = ["logging", "ring", "std", "tls12"] } +rustls = { version = "0.23.14", default-features = false, features = ["logging", "ring", "std", "tls12"] } tokio-rustls = { version = "0.26.0", default-features = false, features = ["logging", "ring", "tls12"] } [target.x86_64-unknown-illumos.dependencies] @@ -315,7 +323,7 @@ indicatif = { version = "0.17.8", features = ["rayon"] } mio = { version = "1.0.2", features = ["net", "os-ext"] } once_cell = { version = "1.20.2" } rustix = { version = "0.38.37", features = ["event", "fs", "net", "pipe", "process", "stdio", "system", "termios", "time"] } -rustls = { version = "0.23.10", default-features = false, features = ["logging", "ring", "std", "tls12"] } +rustls = { version = "0.23.14", default-features = false, features = ["logging", "ring", "std", "tls12"] } tokio-rustls = { version = "0.26.0", default-features = false, features = ["logging", "ring", "tls12"] } toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] } @@ -328,7 +336,7 @@ indicatif = { version = "0.17.8", features = ["rayon"] } mio = { version = "1.0.2", features = ["net", "os-ext"] } once_cell = { version = "1.20.2" } rustix = { version = "0.38.37", features = ["event", "fs", "net", "pipe", "process", "stdio", "system", "termios", "time"] } -rustls = { version = "0.23.10", default-features = false, features = ["logging", "ring", "std", "tls12"] } +rustls = { version = "0.23.14", default-features = false, features = ["logging", "ring", "std", "tls12"] } tokio-rustls = { version = "0.26.0", default-features = false, features = ["logging", "ring", "tls12"] } toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] } From ffcdada624d7654986eb06edc697bfc434c2c19c Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 15 Oct 2024 13:25:40 -0700 Subject: [PATCH 04/16] Use explicit revs --- Cargo.lock | 6 ++++-- Cargo.toml | 4 ++-- workspace-hack/Cargo.toml | 4 ++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index df2af29066..93e307242a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2223,7 +2223,8 @@ dependencies = [ [[package]] name = "diesel-dtrace" version = "0.3.0" -source = "git+https://github.com/oxidecomputer/diesel-dtrace?branch=main#f781d0dbc822adbb49fee509ccd9a5389c34e169" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5318329cce80f28564e585bb5ba4007bdf16865efa13d797a4f0fd4b1fed40f1" dependencies = [ "diesel", "serde", @@ -8713,7 +8714,8 @@ dependencies = [ [[package]] name = "qorb" version = "0.0.2" -source = "git+https://github.com/oxidecomputer/qorb?rev=88452176c7a9a573cf300d42a7c8147328224ce1#88452176c7a9a573cf300d42a7c8147328224ce1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "104066c20d7277d0af40a7333c579a2a71cc6b68c14982d1da2e5747a381a3ed" dependencies = [ "anyhow", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index 502907eb6f..6168caee77 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -344,7 +344,7 @@ derive_more = "0.99.18" derive-where = "1.2.7" # Having the i-implement-... feature here makes diesel go away from the workspace-hack diesel = { version = "2.2.4", features = ["i-implement-a-third-party-backend-and-opt-into-breaking-changes", "postgres", "r2d2", "chrono", "serde_json", "network-address", "uuid"] } -diesel-dtrace = { git = "https://github.com/oxidecomputer/diesel-dtrace", branch = "main" } +diesel-dtrace = "0.3.0" dns-server = { path = "dns-server" } dns-server-api = { path = "dns-server-api" } dns-service-client = { path = "clients/dns-service-client" } @@ -520,7 +520,7 @@ propolis_api_types = { git = "https://github.com/oxidecomputer/propolis", rev = propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "11371b0f3743f8df5b047dc0edc2699f4bdf3927" } propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "11371b0f3743f8df5b047dc0edc2699f4bdf3927" } proptest = "1.5.0" -qorb = { git = "https://github.com/oxidecomputer/qorb", rev = "88452176c7a9a573cf300d42a7c8147328224ce1" } +qorb = "0.0.2" quote = "1.0" rand = "0.8.5" rand_core = "0.6.4" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 4a7abc6e4c..b3ab1d5ed5 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -90,7 +90,7 @@ pkcs8 = { version = "0.10.2", default-features = false, features = ["encryption" postgres-types = { version = "0.2.8", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.2" } proc-macro2 = { version = "1.0.87" } -qorb = { git = "https://github.com/oxidecomputer/qorb", rev = "88452176c7a9a573cf300d42a7c8147328224ce1", features = ["qtop"] } +qorb = { version = "0.0.2", features = ["qtop"] } quote = { version = "1.0.37" } rand = { version = "0.8.5", features = ["small_rng"] } regex = { version = "1.11.0" } @@ -206,7 +206,7 @@ pkcs8 = { version = "0.10.2", default-features = false, features = ["encryption" postgres-types = { version = "0.2.8", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.2" } proc-macro2 = { version = "1.0.87" } -qorb = { git = "https://github.com/oxidecomputer/qorb", rev = "88452176c7a9a573cf300d42a7c8147328224ce1", features = ["qtop"] } +qorb = { version = "0.0.2", features = ["qtop"] } quote = { version = "1.0.37" } rand = { version = "0.8.5", features = ["small_rng"] } regex = { version = "1.11.0" } From 10a647c8d83267450139e2d251d9ec3c0aebef5e Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 15 Oct 2024 18:16:11 -0700 Subject: [PATCH 05/16] Add timeout as tunable for initialization, qorb 0.1.0 --- Cargo.lock | 34 ++++--------------- Cargo.toml | 3 +- dev-tools/omdb/src/bin/omdb/db.rs | 4 +-- nexus-config/src/nexus_config.rs | 17 ++++++++-- nexus/db-queries/src/db/datastore/mod.rs | 17 ++++++++++ nexus/db-queries/src/db/pool.rs | 4 +-- nexus/src/app/mod.rs | 9 +++-- nexus/src/context.rs | 3 +- nexus/src/lib.rs | 17 ++++------ nexus/test-interface/src/lib.rs | 2 +- nexus/test-utils/src/lib.rs | 13 +++++-- .../tests/integration_tests/initialization.rs | 25 +++++++++----- nexus/tests/integration_tests/schema.rs | 6 ++-- workspace-hack/Cargo.toml | 4 +-- 14 files changed, 88 insertions(+), 70 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2c20136a82..bd37df0fd5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4457,7 +4457,7 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "progenitor", - "qorb 0.0.2", + "qorb", "reqwest 0.12.8", "serde", "serde_json", @@ -5532,7 +5532,7 @@ dependencies = [ "pq-sys", "predicates", "pretty_assertions", - "qorb 0.0.2", + "qorb", "rand", "rcgen", "ref-cast", @@ -6636,7 +6636,7 @@ dependencies = [ "pretty_assertions", "progenitor-client", "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=11371b0f3743f8df5b047dc0edc2699f4bdf3927)", - "qorb 0.0.2", + "qorb", "rand", "rcgen", "ref-cast", @@ -7074,7 +7074,7 @@ dependencies = [ "postgres-types", "predicates", "proc-macro2", - "qorb 0.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "qorb", "quote", "rand", "regex", @@ -8713,31 +8713,9 @@ dependencies = [ [[package]] name = "qorb" -version = "0.0.2" -dependencies = [ - "anyhow", - "async-trait", - "debug-ignore", - "derive-where", - "dropshot", - "futures", - "hickory-resolver", - "rand", - "schemars", - "serde", - "serde_json", - "thiserror", - "tokio", - "tokio-stream", - "tokio-tungstenite 0.23.1", - "tracing", -] - -[[package]] -name = "qorb" -version = "0.0.2" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "104066c20d7277d0af40a7333c579a2a71cc6b68c14982d1da2e5747a381a3ed" +checksum = "15601a1eaea70cb18e7a4f06c625b516d348a1b2d3dc1edbb93f0857636bcafe" dependencies = [ "anyhow", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index dd580bf7c9..882d006337 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -520,8 +520,7 @@ propolis_api_types = { git = "https://github.com/oxidecomputer/propolis", rev = propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "11371b0f3743f8df5b047dc0edc2699f4bdf3927" } propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "11371b0f3743f8df5b047dc0edc2699f4bdf3927" } proptest = "1.5.0" -# qorb = { git = "https://github.com/oxidecomputer/qorb", rev = "b9bf692a0c56458512cced4b3414b297e61e7d8f" } -qorb = { path = "../qorb" } +qorb = "0.1.0" quote = "1.0" rand = "0.8.5" rand_core = "0.6.4" diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 689a7501d0..e268cf200b 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -254,9 +254,7 @@ impl DbUrlOptions { // doesn't match what we expect. So we use `DataStore::new_unchecked()` // here. We will then check the schema version explicitly and warn the // user if it doesn't match. - let datastore = Arc::new( - DataStore::new_unchecked(log.clone(), pool) - ); + let datastore = Arc::new(DataStore::new_unchecked(log.clone(), pool)); check_schema_version(&datastore).await; Ok(datastore) } diff --git a/nexus-config/src/nexus_config.rs b/nexus-config/src/nexus_config.rs index 8c8c25b4c1..a0af65b6ec 100644 --- a/nexus-config/src/nexus_config.rs +++ b/nexus-config/src/nexus_config.rs @@ -270,6 +270,7 @@ pub struct MgdConfig { #[derive(Clone, Debug, Deserialize, PartialEq)] struct UnvalidatedTunables { max_vpc_ipv4_subnet_prefix: u8, + load_timeout: Option, } /// Tunable configuration parameters, intended for use in test environments or @@ -282,6 +283,11 @@ pub struct Tunables { /// Note that this is the maximum _prefix_ size, which sets the minimum size /// of the subnet. pub max_vpc_ipv4_subnet_prefix: u8, + + /// How long should we attempt to loop until the schema matches? + /// + /// If "None", nexus loops forever during initialization. + pub load_timeout: Option, } // Convert from the unvalidated tunables, verifying each parameter as needed. @@ -292,6 +298,7 @@ impl TryFrom for Tunables { Tunables::validate_ipv4_prefix(unvalidated.max_vpc_ipv4_subnet_prefix)?; Ok(Tunables { max_vpc_ipv4_subnet_prefix: unvalidated.max_vpc_ipv4_subnet_prefix, + load_timeout: unvalidated.load_timeout, }) } } @@ -341,7 +348,10 @@ pub const MAX_VPC_IPV4_SUBNET_PREFIX: u8 = 26; impl Default for Tunables { fn default() -> Self { - Tunables { max_vpc_ipv4_subnet_prefix: MAX_VPC_IPV4_SUBNET_PREFIX } + Tunables { + max_vpc_ipv4_subnet_prefix: MAX_VPC_IPV4_SUBNET_PREFIX, + load_timeout: None, + } } } @@ -1003,7 +1013,10 @@ mod test { trusted_root: Utf8PathBuf::from("/path/to/root.json"), }), schema: None, - tunables: Tunables { max_vpc_ipv4_subnet_prefix: 27 }, + tunables: Tunables { + max_vpc_ipv4_subnet_prefix: 27, + load_timeout: None + }, dendrite: HashMap::from([( SwitchLocation::Switch0, DpdConfig { diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index fa550b11a8..6516d35aca 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -215,15 +215,32 @@ impl DataStore { log: &Logger, pool: Arc, config: Option<&AllSchemaVersions>, + ) -> Result { + Self::new_with_timeout(log, pool, config, None).await + } + + pub async fn new_with_timeout( + log: &Logger, + pool: Arc, + config: Option<&AllSchemaVersions>, + try_for: Option, ) -> Result { let datastore = Self::new_unchecked(log.new(o!("component" => "datastore")), pool); + let start = std::time::Instant::now(); + // Keep looping until we find that the schema matches our expectation. const EXPECTED_VERSION: SemverVersion = nexus_db_model::SCHEMA_VERSION; retry_notify( retry_policy_internal_service(), || async { + if let Some(try_for) = try_for { + if std::time::Instant::now() > start + try_for { + return Err(BackoffError::permanent(())); + } + } + match datastore .ensure_schema(&log, EXPECTED_VERSION, config) .await diff --git a/nexus/db-queries/src/db/pool.rs b/nexus/db-queries/src/db/pool.rs index a0f1cd9086..bc6911ff9b 100644 --- a/nexus/db-queries/src/db/pool.rs +++ b/nexus/db-queries/src/db/pool.rs @@ -153,9 +153,7 @@ impl Pool { } /// Stops the qorb background tasks, and causes all future claims to fail - pub async fn terminate( - &self, - ) { + pub async fn terminate(&self) { let _termination_result = self.inner.terminate().await; self.terminated.store(true, std::sync::atomic::Ordering::SeqCst); } diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index cf28b9d4a0..41ab2f41da 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -240,8 +240,13 @@ impl Nexus { .transpose() .map_err(|error| format!("{error:#}"))?; let db_datastore = Arc::new( - db::DataStore::new(&log, Arc::clone(&pool), all_versions.as_ref()) - .await?, + db::DataStore::new_with_timeout( + &log, + Arc::clone(&pool), + all_versions.as_ref(), + config.pkg.tunables.load_timeout, + ) + .await?, ); db_datastore.register_producers(producer_registry); diff --git a/nexus/src/context.rs b/nexus/src/context.rs index 248246247b..ea3a071605 100644 --- a/nexus/src/context.rs +++ b/nexus/src/context.rs @@ -291,7 +291,8 @@ impl ServerContext { config, Arc::clone(&authz), ) - .await { + .await + { Ok(nexus) => nexus, Err(err) => { pool.terminate().await; diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index 423d30fa3a..54e5ea022f 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -89,7 +89,8 @@ impl InternalServer { context.clone(), &log.new(o!("component" => "dropshot_internal")), ) - .map_err(|error| format!("initializing internal server: {}", error)) { + .map_err(|error| format!("initializing internal server: {}", error)) + { Ok(server) => server, Err(err) => { context.context.nexus.datastore().terminate().await; @@ -227,12 +228,11 @@ impl nexus_test_interface::NexusServer for Server { async fn start_internal( config: &NexusConfig, log: &Logger, - ) -> (InternalServer, SocketAddr) { - let internal_server = - InternalServer::start(config, &log).await.unwrap(); + ) -> Result<(InternalServer, SocketAddr), String> { + let internal_server = InternalServer::start(config, &log).await?; internal_server.apictx.context.nexus.wait_for_populate().await.unwrap(); let addr = internal_server.http_server_internal.local_addr(); - (internal_server, addr) + Ok((internal_server, addr)) } async fn start( @@ -401,12 +401,7 @@ impl nexus_test_interface::NexusServer for Server { .close_servers() .await .expect("failed to close servers during test cleanup"); - self.apictx - .context - .nexus - .datastore() - .terminate() - .await; + self.apictx.context.nexus.datastore().terminate().await; } } diff --git a/nexus/test-interface/src/lib.rs b/nexus/test-interface/src/lib.rs index c1e01cbc9e..190d89989e 100644 --- a/nexus/test-interface/src/lib.rs +++ b/nexus/test-interface/src/lib.rs @@ -50,7 +50,7 @@ pub trait NexusServer: Send + Sync + 'static { async fn start_internal( config: &NexusConfig, log: &Logger, - ) -> (Self::InternalServer, SocketAddr); + ) -> Result<(Self::InternalServer, SocketAddr), String>; #[allow(clippy::too_many_arguments)] async fn start( diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index d69abbd93e..bd1808f798 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -650,7 +650,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { } // Begin starting Nexus. - pub async fn start_nexus_internal(&mut self) { + pub async fn start_nexus_internal(&mut self) -> Result<(), String> { let log = &self.logctx.log; debug!(log, "Starting Nexus (internal API)"); @@ -672,7 +672,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { }; let (nexus_internal, nexus_internal_addr) = - N::start_internal(&self.config, &log).await; + N::start_internal(&self.config, &log).await?; let address = SocketAddrV6::new( match nexus_internal_addr.ip() { @@ -737,6 +737,8 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { self.nexus_internal = Some(nexus_internal); self.nexus_internal_addr = Some(nexus_internal_addr); + + Ok(()) } pub async fn populate_internal_dns(&mut self) { @@ -1361,7 +1363,12 @@ async fn setup_with_config_impl( ), ( "start_nexus_internal", - Box::new(|builder| builder.start_nexus_internal().boxed()), + Box::new(|builder| { + builder + .start_nexus_internal() + .map(|r| r.unwrap()) + .boxed() + }), ), ( "start_sled1", diff --git a/nexus/tests/integration_tests/initialization.rs b/nexus/tests/integration_tests/initialization.rs index a305a4178e..76ac75caa0 100644 --- a/nexus/tests/integration_tests/initialization.rs +++ b/nexus/tests/integration_tests/initialization.rs @@ -50,7 +50,9 @@ async fn test_nexus_boots_before_cockroach() { let nexus_log = log.clone(); let nexus_handle = tokio::task::spawn(async move { info!(nexus_log, "Test: Trying to start Nexus (internal)"); - omicron_nexus::Server::start_internal(&nexus_config, &nexus_log).await; + omicron_nexus::Server::start_internal(&nexus_config, &nexus_log) + .await + .unwrap(); info!(nexus_log, "Test: Started Nexus (internal)"); }); @@ -123,7 +125,9 @@ async fn test_nexus_boots_before_dendrite() { let nexus_log = log.clone(); let nexus_handle = tokio::task::spawn(async move { info!(nexus_log, "Test: Trying to start Nexus (internal)"); - omicron_nexus::Server::start_internal(&nexus_config, &nexus_log).await; + omicron_nexus::Server::start_internal(&nexus_config, &nexus_log) + .await + .unwrap(); info!(nexus_log, "Test: Started Nexus (internal)"); }); @@ -202,6 +206,9 @@ async fn test_nexus_does_not_boot_without_valid_schema() { for schema in schemas_to_test { let mut config = load_test_config(); + config.pkg.tunables.load_timeout = + Some(std::time::Duration::from_secs(5)); + let mut builder = ControlPlaneTestContextBuilder::::new( "test_nexus_does_not_boot_without_valid_schema", @@ -224,14 +231,14 @@ async fn test_nexus_does_not_boot_without_valid_schema() { .await .expect("Failed to update schema"); - assert!( - timeout( - std::time::Duration::from_secs(5), - builder.start_nexus_internal(), - ) + let err = builder + .start_nexus_internal() .await - .is_err(), - "Nexus should have failed to start" + .expect_err("Nexus should have failed to start"); + + assert!( + err.contains("Failed to read valid DB schema"), + "Saw error: {err}" ); builder.teardown().await; diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index c822ac686f..f852312de2 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -555,6 +555,8 @@ async fn dbinit_version_matches_version_known_to_nexus() { #[tokio::test] async fn nexus_cannot_apply_update_from_unknown_version() { let mut config = load_test_config(); + config.pkg.tunables.load_timeout = Some(std::time::Duration::from_secs(15)); + let mut builder = test_setup( &mut config, "nexus_cannot_apply_update_from_unknown_version", @@ -587,9 +589,7 @@ async fn nexus_cannot_apply_update_from_unknown_version() { .expect("Failed to update schema"); assert!( - timeout(Duration::from_secs(15), builder.start_nexus_internal()) - .await - .is_err(), + builder.start_nexus_internal().await.is_err(), "Nexus should not have started" ); diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index b3ab1d5ed5..2e00e1ea49 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -90,7 +90,7 @@ pkcs8 = { version = "0.10.2", default-features = false, features = ["encryption" postgres-types = { version = "0.2.8", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.2" } proc-macro2 = { version = "1.0.87" } -qorb = { version = "0.0.2", features = ["qtop"] } +qorb = { version = "0.1.0", features = ["qtop"] } quote = { version = "1.0.37" } rand = { version = "0.8.5", features = ["small_rng"] } regex = { version = "1.11.0" } @@ -206,7 +206,7 @@ pkcs8 = { version = "0.10.2", default-features = false, features = ["encryption" postgres-types = { version = "0.2.8", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.2" } proc-macro2 = { version = "1.0.87" } -qorb = { version = "0.0.2", features = ["qtop"] } +qorb = { version = "0.1.0", features = ["qtop"] } quote = { version = "1.0.37" } rand = { version = "0.8.5", features = ["small_rng"] } regex = { version = "1.11.0" } From fcedb8df80a70389ccc14fcd93af7b7fc5775305 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 15 Oct 2024 18:28:00 -0700 Subject: [PATCH 06/16] Stop internal server (and pool) during builder teardown --- nexus/src/lib.rs | 12 ++++++++++++ nexus/test-interface/src/lib.rs | 2 ++ nexus/test-utils/src/lib.rs | 3 +++ 3 files changed, 17 insertions(+) diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index 54e5ea022f..a83b5eeae9 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -235,6 +235,18 @@ impl nexus_test_interface::NexusServer for Server { Ok((internal_server, addr)) } + // TODO: De-dup with the "fn close" below? + async fn stop_internal(internal_server: InternalServer) { + internal_server + .apictx + .context + .nexus + .close_servers() + .await + .expect("failed to close servers during test cleanup"); + internal_server.apictx.context.nexus.datastore().terminate().await; + } + async fn start( internal_server: InternalServer, config: &NexusConfig, diff --git a/nexus/test-interface/src/lib.rs b/nexus/test-interface/src/lib.rs index 190d89989e..c89c4ce4c5 100644 --- a/nexus/test-interface/src/lib.rs +++ b/nexus/test-interface/src/lib.rs @@ -52,6 +52,8 @@ pub trait NexusServer: Send + Sync + 'static { log: &Logger, ) -> Result<(Self::InternalServer, SocketAddr), String>; + async fn stop_internal(internal_server: Self::InternalServer); + #[allow(clippy::too_many_arguments)] async fn start( internal_server: Self::InternalServer, diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index bd1808f798..53a9334eb4 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -1181,6 +1181,9 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { if let Some(server) = self.server { server.close().await; } + if let Some(nexus_internal) = self.nexus_internal { + N::stop_internal(nexus_internal).await; + } if let Some(mut database) = self.database { database.cleanup().await.unwrap(); } From 7f2b2bd852e2a11c86b088f6bb6e3a9d665efaf8 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 16 Oct 2024 12:05:54 -0700 Subject: [PATCH 07/16] De-duplicate Nexus termination code --- nexus/src/app/mod.rs | 14 ++++++++++++++ nexus/src/lib.rs | 11 ++++------- nexus/test-interface/src/lib.rs | 8 ++++++++ 3 files changed, 26 insertions(+), 7 deletions(-) diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 41ab2f41da..2300dbce6a 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -653,6 +653,16 @@ impl Nexus { self.producer_server.lock().unwrap().replace(producer_server); } + /// Fully terminates Nexus. + /// + /// Closes all running servers and the connection to the datastore. + pub(crate) async fn terminate(&self) -> Result<(), String> { + let mut res = Ok(()); + res = res.and(self.close_servers().await); + self.datastore().terminate().await; + res + } + /// Terminates all servers. /// /// This function also waits for the servers to shut down. @@ -682,6 +692,10 @@ impl Nexus { res } + /// Awaits termination without triggering it. + /// + /// To trigger termination, see: + /// - [Self::close_servers] or [Self::terminate] pub(crate) async fn wait_for_shutdown(&self) -> Result<(), String> { // The internal server is the last server to be closed. // diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index a83b5eeae9..634873ec84 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -235,16 +235,14 @@ impl nexus_test_interface::NexusServer for Server { Ok((internal_server, addr)) } - // TODO: De-dup with the "fn close" below? async fn stop_internal(internal_server: InternalServer) { internal_server .apictx .context .nexus - .close_servers() + .terminate() .await - .expect("failed to close servers during test cleanup"); - internal_server.apictx.context.nexus.datastore().terminate().await; + .expect("Failed to terminate Nexus"); } async fn start( @@ -410,10 +408,9 @@ impl nexus_test_interface::NexusServer for Server { self.apictx .context .nexus - .close_servers() + .terminate() .await - .expect("failed to close servers during test cleanup"); - self.apictx.context.nexus.datastore().terminate().await; + .expect("Failed to terminate Nexus"); } } diff --git a/nexus/test-interface/src/lib.rs b/nexus/test-interface/src/lib.rs index c89c4ce4c5..d011ceccf2 100644 --- a/nexus/test-interface/src/lib.rs +++ b/nexus/test-interface/src/lib.rs @@ -52,6 +52,14 @@ pub trait NexusServer: Send + Sync + 'static { log: &Logger, ) -> Result<(Self::InternalServer, SocketAddr), String>; + /// Stops the execution of a `Self::InternalServer`. + /// + /// This is used to terminate a server which has been + /// partially created with `Self::start_internal`, but which + /// has not yet been passed to `Self::start`. + /// + /// Once `Self::start` has been called, the internal server + /// may be closed by invoking `Self::close`. async fn stop_internal(internal_server: Self::InternalServer); #[allow(clippy::too_many_arguments)] From 6326cdef59cd8ab91825b39f5219b3911e713f4e Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 16 Oct 2024 13:01:13 -0700 Subject: [PATCH 08/16] Panic like a pro (thanks john) --- nexus/db-queries/src/db/pool.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/nexus/db-queries/src/db/pool.rs b/nexus/db-queries/src/db/pool.rs index bc6911ff9b..c42158a64f 100644 --- a/nexus/db-queries/src/db/pool.rs +++ b/nexus/db-queries/src/db/pool.rs @@ -171,7 +171,16 @@ impl Drop for Pool { // With this check, we'll reliably panic (rather than flake) if the pool // is dropped without terminating these worker tasks. if !self.terminated.load(std::sync::atomic::Ordering::SeqCst) { - panic!("Pool dropped without invoking `terminate`"); + // If we're already panicking, don't panic again. + // Doing so can ruin test handlers by aborting the process. + // + // Instead, just drop a message to stderr and carry on. + let msg = "Pool dropped without invoking `terminate`"; + if std::thread::panicking() { + eprintln!("{msg}"); + } else { + panic!("{msg}"); + } } } } From 320960f3b1f8cf1d88c2d2484117596dd7435626 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 16 Oct 2024 17:48:14 -0700 Subject: [PATCH 09/16] Use a test wrapper for DB work in db-queries --- nexus/db-queries/src/db/collection_attach.rs | 105 ++++------ nexus/db-queries/src/db/collection_detach.rs | 70 +++---- .../src/db/collection_detach_many.rs | 92 ++++----- nexus/db-queries/src/db/collection_insert.rs | 26 +-- .../db-queries/src/db/datastore/allow_list.rs | 14 +- nexus/db-queries/src/db/datastore/bgp.rs | 10 +- .../src/db/datastore/cockroachdb_node_id.rs | 10 +- .../src/db/datastore/cockroachdb_settings.rs | 18 +- nexus/db-queries/src/db/datastore/dataset.rs | 11 +- .../src/db/datastore/db_metadata.rs | 32 +-- .../db-queries/src/db/datastore/deployment.rs | 45 ++--- .../deployment/external_networking.rs | 17 +- nexus/db-queries/src/db/datastore/disk.rs | 10 +- nexus/db-queries/src/db/datastore/dns.rs | 45 ++--- .../src/db/datastore/external_ip.rs | 10 +- nexus/db-queries/src/db/datastore/instance.rs | 66 +++---- .../db-queries/src/db/datastore/inventory.rs | 31 ++- nexus/db-queries/src/db/datastore/ip_pool.rs | 24 +-- .../src/db/datastore/ipv4_nat_entry.rs | 31 ++- .../db-queries/src/db/datastore/migration.rs | 10 +- nexus/db-queries/src/db/datastore/mod.rs | 139 ++++++------- .../src/db/datastore/network_interface.rs | 10 +- nexus/db-queries/src/db/datastore/oximeter.rs | 51 ++--- .../src/db/datastore/physical_disk.rs | 52 +++-- .../src/db/datastore/pub_test_utils.rs | 116 ++++++++++- nexus/db-queries/src/db/datastore/rack.rs | 52 +++-- .../src/db/datastore/region_replacement.rs | 24 +-- .../datastore/region_snapshot_replacement.rs | 52 +++-- nexus/db-queries/src/db/datastore/saga.rs | 46 ++--- nexus/db-queries/src/db/datastore/sled.rs | 56 +++--- .../src/db/datastore/switch_port.rs | 10 +- .../db-queries/src/db/datastore/test_utils.rs | 6 +- .../virtual_provisioning_collection.rs | 31 ++- nexus/db-queries/src/db/datastore/vmm.rs | 10 +- nexus/db-queries/src/db/datastore/volume.rs | 76 ++++---- .../src/db/datastore/volume_repair.rs | 10 +- nexus/db-queries/src/db/datastore/vpc.rs | 38 ++-- nexus/db-queries/src/db/explain.rs | 18 +- nexus/db-queries/src/db/lookup.rs | 17 +- nexus/db-queries/src/db/pagination.rs | 42 ++-- .../db-queries/src/db/queries/external_ip.rs | 172 +++++++++-------- .../src/db/queries/network_interface.rs | 182 +++++++++--------- nexus/db-queries/src/db/queries/next_item.rs | 60 ++---- nexus/db-queries/src/db/queries/oximeter.rs | 20 +- .../src/db/queries/region_allocation.rs | 11 +- .../virtual_provisioning_collection_update.rs | 38 ++-- nexus/db-queries/src/db/queries/vpc_subnet.rs | 44 +---- nexus/db-queries/src/policy_test/mod.rs | 19 +- nexus/db-queries/src/transaction_retry.rs | 17 +- nexus/metrics-producer-gc/src/lib.rs | 4 +- .../tasks/crdb_node_id_collector.rs | 6 +- 51 files changed, 929 insertions(+), 1177 deletions(-) diff --git a/nexus/db-queries/src/db/collection_attach.rs b/nexus/db-queries/src/db/collection_attach.rs index 066e42fae3..127861b2a8 100644 --- a/nexus/db-queries/src/db/collection_attach.rs +++ b/nexus/db-queries/src/db/collection_attach.rs @@ -577,7 +577,8 @@ where #[cfg(test)] mod test { use super::*; - use crate::db::{self, identity::Resource as IdentityResource}; + use crate::db::datastore::pub_test_utils::TestDatabase; + use crate::db::identity::Resource as IdentityResource; use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use chrono::Utc; use db_macros::Resource; @@ -585,7 +586,6 @@ mod test { use diesel::pg::Pg; use diesel::QueryDsl; use diesel::SelectableHelper; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; use omicron_test_utils::dev; use uuid::Uuid; @@ -869,11 +869,9 @@ mod test { async fn test_attach_missing_collection_fails() { let logctx = dev::test_setup_log("test_attach_missing_collection_fails"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -891,17 +889,15 @@ mod test { assert!(matches!(attach, Err(AttachError::CollectionNotFound))); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_missing_resource_fails() { let logctx = dev::test_setup_log("test_attach_missing_resource_fails"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = setup_db(&pool).await; @@ -929,17 +925,15 @@ mod test { // The collection should remain unchanged. assert_eq!(collection, get_collection(collection_id, &conn).await); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_once() { let logctx = dev::test_setup_log("test_attach_once"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = setup_db(&pool).await; @@ -978,17 +972,15 @@ mod test { ); assert_eq!(returned_resource, get_resource(resource_id, &conn).await); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_once_synchronous() { let logctx = dev::test_setup_log("test_attach_once_synchronous"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = setup_db(&pool).await; @@ -1028,19 +1020,16 @@ mod test { ); assert_eq!(returned_resource, get_resource(resource_id, &conn).await); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_multiple_times() { let logctx = dev::test_setup_log("test_attach_multiple_times"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; const RESOURCE_COUNT: u32 = 5; @@ -1085,19 +1074,16 @@ mod test { ); } - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_beyond_capacity_fails() { let logctx = dev::test_setup_log("test_attach_beyond_capacity_fails"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); @@ -1150,19 +1136,16 @@ mod test { _ => panic!("Unexpected error: {:?}", err), }; - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_while_already_attached() { let logctx = dev::test_setup_log("test_attach_while_already_attached"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); @@ -1258,19 +1241,16 @@ mod test { _ => panic!("Unexpected error: {:?}", err), }; - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_with_filters() { let logctx = dev::test_setup_log("test_attach_once"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -1314,19 +1294,16 @@ mod test { assert_eq!(returned_resource, get_resource(resource_id, &conn).await); assert_eq!(returned_resource.description(), "new description"); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_deleted_resource_fails() { let logctx = dev::test_setup_log("test_attach_deleted_resource_fails"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -1360,19 +1337,16 @@ mod test { .await; assert!(matches!(attach, Err(AttachError::ResourceNotFound))); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_without_update_filter() { let logctx = dev::test_setup_log("test_attach_without_update_filter"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); @@ -1417,8 +1391,7 @@ mod test { .collection_id .is_none()); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/collection_detach.rs b/nexus/db-queries/src/db/collection_detach.rs index ce41545487..cdf8e111c7 100644 --- a/nexus/db-queries/src/db/collection_detach.rs +++ b/nexus/db-queries/src/db/collection_detach.rs @@ -481,7 +481,8 @@ where mod test { use super::*; use crate::db::collection_attach::DatastoreAttachTarget; - use crate::db::{self, identity::Resource as IdentityResource}; + use crate::db::datastore::pub_test_utils::TestDatabase; + use crate::db::identity::Resource as IdentityResource; use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use chrono::Utc; use db_macros::Resource; @@ -489,7 +490,6 @@ mod test { use diesel::pg::Pg; use diesel::QueryDsl; use diesel::SelectableHelper; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; use omicron_test_utils::dev; use uuid::Uuid; @@ -782,11 +782,9 @@ mod test { async fn test_detach_missing_collection_fails() { let logctx = dev::test_setup_log("test_detach_missing_collection_fails"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -803,19 +801,16 @@ mod test { assert!(matches!(detach, Err(DetachError::CollectionNotFound))); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_missing_resource_fails() { let logctx = dev::test_setup_log("test_detach_missing_resource_fails"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -840,19 +835,16 @@ mod test { // The collection should remain unchanged. assert_eq!(collection, get_collection(collection_id, &conn).await); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_once() { let logctx = dev::test_setup_log("test_detach_once"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -881,19 +873,16 @@ mod test { // The returned value should be the latest value in the DB. assert_eq!(returned_resource, get_resource(resource_id, &conn).await); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_while_already_detached() { let logctx = dev::test_setup_log("test_detach_while_already_detached"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); @@ -946,19 +935,16 @@ mod test { _ => panic!("Unexpected error: {:?}", err), }; - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_deleted_resource_fails() { let logctx = dev::test_setup_log("test_detach_deleted_resource_fails"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -991,19 +977,16 @@ mod test { .await; assert!(matches!(detach, Err(DetachError::ResourceNotFound))); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_without_update_filter() { let logctx = dev::test_setup_log("test_detach_without_update_filter"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); @@ -1049,8 +1032,7 @@ mod test { .collection_id .is_some()); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/collection_detach_many.rs b/nexus/db-queries/src/db/collection_detach_many.rs index eb1a67ffff..15267dd7ee 100644 --- a/nexus/db-queries/src/db/collection_detach_many.rs +++ b/nexus/db-queries/src/db/collection_detach_many.rs @@ -479,7 +479,8 @@ where mod test { use super::*; use crate::db::collection_attach::DatastoreAttachTarget; - use crate::db::{self, identity::Resource as IdentityResource}; + use crate::db::datastore::pub_test_utils::TestDatabase; + use crate::db::identity::Resource as IdentityResource; use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use chrono::Utc; use db_macros::Resource; @@ -487,7 +488,6 @@ mod test { use diesel::pg::Pg; use diesel::QueryDsl; use diesel::SelectableHelper; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; use omicron_test_utils::dev; use uuid::Uuid; @@ -774,11 +774,9 @@ mod test { async fn test_detach_missing_collection_fails() { let logctx = dev::test_setup_log("test_detach_missing_collection_fails"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let _resource_id = uuid::Uuid::new_v4(); @@ -796,8 +794,7 @@ mod test { assert!(matches!(detach, Err(DetachManyError::CollectionNotFound))); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -805,11 +802,9 @@ mod test { async fn test_detach_missing_resource_succeeds() { let logctx = dev::test_setup_log("test_detach_missing_resource_succeeds"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let _resource_id = uuid::Uuid::new_v4(); @@ -839,19 +834,16 @@ mod test { get_collection(collection_id, &conn).await ); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_once() { let logctx = dev::test_setup_log("test_detach_once"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -883,19 +875,16 @@ mod test { get_collection(collection_id, &conn).await ); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_once_synchronous() { let logctx = dev::test_setup_log("test_detach_once_synchronous"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -929,19 +918,16 @@ mod test { get_collection(collection_id, &conn).await ); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_while_already_detached() { let logctx = dev::test_setup_log("test_detach_while_already_detached"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); @@ -986,19 +972,16 @@ mod test { "... and again!" ); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_filter_collection() { let logctx = dev::test_setup_log("test_detach_filter_collection"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); @@ -1038,19 +1021,16 @@ mod test { _ => panic!("Unexpected error: {:?}", err), }; - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_deleted_resource() { let logctx = dev::test_setup_log("test_detach_deleted_resource"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -1097,19 +1077,16 @@ mod test { &collection_id, ); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_many() { let logctx = dev::test_setup_log("test_detach_many"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; // Create the collection and some resources. let collection_id1 = uuid::Uuid::new_v4(); @@ -1173,8 +1150,7 @@ mod test { &collection_id2 ); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/collection_insert.rs b/nexus/db-queries/src/db/collection_insert.rs index fa1c17f789..7f8275e594 100644 --- a/nexus/db-queries/src/db/collection_insert.rs +++ b/nexus/db-queries/src/db/collection_insert.rs @@ -405,14 +405,14 @@ where #[cfg(test)] mod test { use super::*; - use crate::db::{self, identity::Resource as IdentityResource}; + use crate::db::datastore::pub_test_utils::TestDatabase; + use crate::db::identity::Resource as IdentityResource; use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use chrono::{DateTime, Utc}; use db_macros::Resource; use diesel::expression_methods::ExpressionMethods; use diesel::pg::Pg; use diesel::QueryDsl; - use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; table! { @@ -556,11 +556,9 @@ mod test { #[tokio::test] async fn test_collection_not_present() { let logctx = dev::test_setup_log("test_collection_not_present"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -579,19 +577,16 @@ mod test { .await; assert!(matches!(insert, Err(AsyncInsertError::CollectionNotFound))); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_collection_present() { let logctx = dev::test_setup_log("test_collection_present"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -643,8 +638,7 @@ mod test { // Make sure rcgen got incremented assert_eq!(collection_rcgen, 2); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/allow_list.rs b/nexus/db-queries/src/db/datastore/allow_list.rs index dbe13aafaa..ce839ebcbc 100644 --- a/nexus/db-queries/src/db/datastore/allow_list.rs +++ b/nexus/db-queries/src/db/datastore/allow_list.rs @@ -83,19 +83,16 @@ impl super::DataStore { #[cfg(test)] mod tests { - use crate::db::{ - datastore::test_utils::datastore_test, - fixed_data::allow_list::USER_FACING_SERVICES_ALLOW_LIST_ID, - }; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; + use crate::db::fixed_data::allow_list::USER_FACING_SERVICES_ALLOW_LIST_ID; use omicron_common::api::external; use omicron_test_utils::dev; #[tokio::test] async fn test_allowed_source_ip_database_ops() { let logctx = dev::test_setup_log("test_allowed_source_ip_database_ops"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Should have the default to start with. let record = datastore @@ -203,8 +200,7 @@ mod tests { "Updated allowed IPs are incorrect" ); - datastore.terminate().await; - db.cleanup().await.expect("failed to cleanup database"); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/bgp.rs b/nexus/db-queries/src/db/datastore/bgp.rs index eb7814ff9b..ccf5c6bb75 100644 --- a/nexus/db-queries/src/db/datastore/bgp.rs +++ b/nexus/db-queries/src/db/datastore/bgp.rs @@ -1000,8 +1000,7 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Name; use omicron_test_utils::dev; @@ -1011,8 +1010,8 @@ mod tests { let logctx = dev::test_setup_log( "test_delete_bgp_config_and_announce_set_by_name", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let config_name: Name = "testconfig47".parse().unwrap(); let announce_name: Name = "testannounce47".parse().unwrap(); @@ -1069,8 +1068,7 @@ mod tests { .await .expect("delete announce set by name"); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/cockroachdb_node_id.rs b/nexus/db-queries/src/db/datastore/cockroachdb_node_id.rs index 233194e0f4..1c1a699c26 100644 --- a/nexus/db-queries/src/db/datastore/cockroachdb_node_id.rs +++ b/nexus/db-queries/src/db/datastore/cockroachdb_node_id.rs @@ -82,16 +82,15 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use omicron_test_utils::dev; #[tokio::test] async fn test_cockroachdb_node_id() { let logctx = dev::test_setup_log("test_service_network_interfaces_list"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Make up a CRDB zone id. let crdb_zone_id = OmicronZoneUuid::new_v4(); @@ -160,8 +159,7 @@ mod tests { .expect("looked up node ID"); assert_eq!(node_id.as_deref(), Some(fake_node_id)); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/cockroachdb_settings.rs b/nexus/db-queries/src/db/datastore/cockroachdb_settings.rs index 5994abdd2e..ba7c302f83 100644 --- a/nexus/db-queries/src/db/datastore/cockroachdb_settings.rs +++ b/nexus/db-queries/src/db/datastore/cockroachdb_settings.rs @@ -133,24 +133,17 @@ impl DataStore { #[cfg(test)] mod test { - use super::{CockroachDbSettings, OpContext}; - use nexus_test_utils::db::test_setup_database; + use super::CockroachDbSettings; + use crate::db::datastore::pub_test_utils::TestDatabase; use nexus_types::deployment::CockroachDbClusterVersion; use omicron_common::api::external::Error; use omicron_test_utils::dev; - use std::sync::Arc; #[tokio::test] async fn test_preserve_downgrade() { let logctx = dev::test_setup_log("test_preserve_downgrade"); - let mut db = test_setup_database(&logctx.log).await; - let (_, datastore) = - crate::db::datastore::test_utils::datastore_test(&logctx, &db) - .await; - let opctx = OpContext::for_tests( - logctx.log.new(o!()), - Arc::clone(&datastore) as Arc, - ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let settings = datastore.cockroachdb_settings(&opctx).await.unwrap(); let version: CockroachDbClusterVersion = @@ -247,8 +240,7 @@ mod test { } } - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/dataset.rs b/nexus/db-queries/src/db/datastore/dataset.rs index a764f8ca70..1843df0c7d 100644 --- a/nexus/db-queries/src/db/datastore/dataset.rs +++ b/nexus/db-queries/src/db/datastore/dataset.rs @@ -235,21 +235,19 @@ impl DataStore { #[cfg(test)] mod test { use super::*; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use nexus_db_model::Generation; use nexus_db_model::SledBaseboard; use nexus_db_model::SledSystemHardware; use nexus_db_model::SledUpdate; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::internal::shared::DatasetKind as ApiDatasetKind; use omicron_test_utils::dev; #[tokio::test] async fn test_insert_if_not_exists() { let logctx = dev::test_setup_log("inventory_insert"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; - let opctx = &opctx; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // There should be no datasets initially. assert_eq!( @@ -383,8 +381,7 @@ mod test { expected_datasets, ); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/db_metadata.rs b/nexus/db-queries/src/db/datastore/db_metadata.rs index 1277de1983..fbb6cd35c8 100644 --- a/nexus/db-queries/src/db/datastore/db_metadata.rs +++ b/nexus/db-queries/src/db/datastore/db_metadata.rs @@ -496,32 +496,26 @@ impl DataStore { #[cfg(test)] mod test { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use camino::Utf8Path; use camino_tempfile::Utf8TempDir; use nexus_db_model::SCHEMA_VERSION; - use nexus_test_utils::db as test_db; use omicron_test_utils::dev; - use std::sync::Arc; // Confirms that calling the internal "ensure_schema" function can succeed // when the database is already at that version. #[tokio::test] async fn ensure_schema_is_current_version() { let logctx = dev::test_setup_log("ensure_schema_is_current_version"); - let mut crdb = test_db::test_setup_database(&logctx.log).await; - - let cfg = db::Config { url: crdb.pg_config().clone() }; - let pool = Arc::new(db::Pool::new_single_host(&logctx.log, &cfg)); - let datastore = - Arc::new(DataStore::new(&logctx.log, pool, None).await.unwrap()); + let db = TestDatabase::new_with_raw_datastore(&logctx.log).await; + let datastore = db.datastore(); datastore .ensure_schema(&logctx.log, SCHEMA_VERSION, None) .await .expect("Failed to ensure schema"); - datastore.terminate().await; - crdb.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -557,10 +551,8 @@ mod test { let logctx = dev::test_setup_log("concurrent_nexus_instances_only_move_forward"); let log = &logctx.log; - let mut crdb = test_db::test_setup_database(&logctx.log).await; - - let cfg = db::Config { url: crdb.pg_config().clone() }; - let pool = Arc::new(db::Pool::new_single_host(&logctx.log, &cfg)); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); // Mimic the layout of "schema/crdb". @@ -660,8 +652,7 @@ mod test { .collect::, _>>() .expect("Failed to create datastore"); - pool.terminate().await; - crdb.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -670,10 +661,8 @@ mod test { let logctx = dev::test_setup_log("schema_version_subcomponents_save_progress"); let log = &logctx.log; - let mut crdb = test_db::test_setup_database(&logctx.log).await; - - let cfg = db::Config { url: crdb.pg_config().clone() }; - let pool = Arc::new(db::Pool::new_single_host(&logctx.log, &cfg)); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); // Mimic the layout of "schema/crdb". @@ -781,8 +770,7 @@ mod test { .expect("Failed to get data"); assert_eq!(data, "abcd"); - datastore.terminate().await; - crdb.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index af3c1d07cd..4398c4b13f 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -1730,7 +1730,7 @@ impl RunQueryDsl for InsertTargetQuery {} mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::raw_query_builder::QueryBuilder; use nexus_inventory::now_db_precision; use nexus_inventory::CollectionBuilder; @@ -1738,7 +1738,6 @@ mod tests { use nexus_reconfigurator_planning::blueprint_builder::Ensure; use nexus_reconfigurator_planning::blueprint_builder::EnsureMultiple; use nexus_reconfigurator_planning::example::example; - use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; @@ -1906,8 +1905,8 @@ mod tests { async fn test_empty_blueprint() { // Setup let logctx = dev::test_setup_log("test_empty_blueprint"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create an empty blueprint from it let blueprint1 = BlueprintBuilder::build_empty_with_sleds( @@ -1956,8 +1955,7 @@ mod tests { // on other tests to check blueprint deletion. // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1966,8 +1964,8 @@ mod tests { const TEST_NAME: &str = "test_representative_blueprint"; // Setup let logctx = dev::test_setup_log(TEST_NAME); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a cohesive representative collection/policy/blueprint let (collection, planning_input, blueprint1) = @@ -2192,8 +2190,7 @@ mod tests { ); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2201,8 +2198,8 @@ mod tests { async fn test_set_target() { // Setup let logctx = dev::test_setup_log("test_set_target"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Trying to insert a target that doesn't reference a blueprint should // fail with a relevant error message. @@ -2379,8 +2376,7 @@ mod tests { ); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2388,8 +2384,8 @@ mod tests { async fn test_set_target_enabled() { // Setup let logctx = dev::test_setup_log("test_set_target_enabled"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create an initial empty collection let collection = CollectionBuilder::new("test").build(); @@ -2493,8 +2489,7 @@ mod tests { } // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2584,8 +2579,8 @@ mod tests { let logctx = dev::test_setup_log( "test_ensure_external_networking_works_with_good_target", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let blueprint = create_blueprint_with_external_ip(&datastore, &opctx).await; @@ -2611,8 +2606,7 @@ mod tests { .expect("Should be able to allocate external network resources"); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2622,8 +2616,8 @@ mod tests { let logctx = dev::test_setup_log( "test_ensure_external_networking_bails_on_bad_target", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create an initial empty collection let collection = CollectionBuilder::new("test").build(); @@ -2817,8 +2811,7 @@ mod tests { ); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } diff --git a/nexus/db-queries/src/db/datastore/deployment/external_networking.rs b/nexus/db-queries/src/db/datastore/deployment/external_networking.rs index 302a79160d..0f13050b98 100644 --- a/nexus/db-queries/src/db/datastore/deployment/external_networking.rs +++ b/nexus/db-queries/src/db/datastore/deployment/external_networking.rs @@ -408,7 +408,7 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::queries::ALLOW_FULL_TABLE_SCAN_SQL; use anyhow::Context as _; use async_bb8_diesel::AsyncSimpleConnection; @@ -417,7 +417,6 @@ mod tests { use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_db_model::SqlU16; use nexus_sled_agent_shared::inventory::OmicronZoneDataset; - use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; @@ -876,8 +875,8 @@ mod tests { // Set up. usdt::register_probes().unwrap(); let logctx = dev::test_setup_log("test_service_ip_list"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Generate the test values we care about. let mut harness = Harness::new(); @@ -1128,8 +1127,7 @@ mod tests { } // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1138,8 +1136,8 @@ mod tests { // Set up. usdt::register_probes().unwrap(); let logctx = dev::test_setup_log("test_service_ip_list"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Generate the test values we care about. let harness = Harness::new(); @@ -1196,8 +1194,7 @@ mod tests { harness.assert_nics_are_deleted_in_datastore(&datastore).await; // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/disk.rs b/nexus/db-queries/src/db/datastore/disk.rs index d369cd17be..76f4055373 100644 --- a/nexus/db-queries/src/db/datastore/disk.rs +++ b/nexus/db-queries/src/db/datastore/disk.rs @@ -843,8 +843,7 @@ impl DataStore { mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use nexus_types::external_api::params; use omicron_common::api::external; use omicron_test_utils::dev; @@ -854,8 +853,8 @@ mod tests { let logctx = dev::test_setup_log("test_undelete_disk_set_faulted_idempotent"); let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let (opctx, db_datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, db_datastore) = (db.opctx(), db.datastore()); let silo_id = opctx.authn.actor().unwrap().silo_id().unwrap(); @@ -979,8 +978,7 @@ mod tests { ); } - db_datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/dns.rs b/nexus/db-queries/src/db/datastore/dns.rs index e86c61b8b9..9279933e47 100644 --- a/nexus/db-queries/src/db/datastore/dns.rs +++ b/nexus/db-queries/src/db/datastore/dns.rs @@ -729,7 +729,7 @@ impl DataStoreDnsTest for DataStore { #[cfg(test)] mod test { - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::DnsVersionUpdateBuilder; use crate::db::DataStore; use crate::db::TransactionError; @@ -744,7 +744,6 @@ mod test { use nexus_db_model::DnsZone; use nexus_db_model::Generation; use nexus_db_model::InitialDnsGroup; - use nexus_test_utils::db::test_setup_database; use nexus_types::internal_api::params::DnsRecord; use nexus_types::internal_api::params::Srv; use omicron_common::api::external::Error; @@ -758,8 +757,8 @@ mod test { #[tokio::test] async fn test_read_dns_config_uninitialized() { let logctx = dev::test_setup_log("test_read_dns_config_uninitialized"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // If we attempt to load the config when literally nothing related to // DNS has been initialized, we will get an InternalError because we @@ -834,8 +833,7 @@ mod test { version for DNS group External, found 0" ); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -843,8 +841,8 @@ mod test { #[tokio::test] async fn test_read_dns_config_basic() { let logctx = dev::test_setup_log("test_read_dns_config_basic"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create exactly one zone with no names in it. // This will not show up in the read config. @@ -941,8 +939,7 @@ mod test { .expect("failed to read DNS config with batch size 1"); assert_eq!(dns_config_batch_1, dns_config); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -950,8 +947,8 @@ mod test { #[tokio::test] async fn test_read_dns_config_complex() { let logctx = dev::test_setup_log("test_read_dns_config_complex"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let batch_size = NonZeroU32::new(10).unwrap(); let now = Utc::now(); let log = &logctx.log; @@ -1312,8 +1309,7 @@ mod test { HashMap::from([("n1".to_string(), records_r2.clone())]) ); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1321,8 +1317,8 @@ mod test { #[tokio::test] async fn test_dns_uniqueness() { let logctx = dev::test_setup_log("test_dns_uniqueness"); - let mut db = test_setup_database(&logctx.log).await; - let (_opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let now = Utc::now(); // There cannot be two DNS zones in the same group with the same name. @@ -1418,8 +1414,7 @@ mod test { .contains("duplicate key value violates unique constraint")); } - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1495,8 +1490,8 @@ mod test { #[tokio::test] async fn test_dns_update_incremental() { let logctx = dev::test_setup_log("test_dns_update_incremental"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let now = Utc::now(); // Create three DNS zones for testing: @@ -1866,16 +1861,15 @@ mod test { assert_eq!(dns_config.zones[1].zone_name, "oxide2.test"); assert_eq!(dns_config.zones[0].records, dns_config.zones[1].records,); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_dns_update_from_version() { let logctx = dev::test_setup_log("test_dns_update_from_version"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // The guts of `dns_update_from_version()` are shared with // `dns_update_incremental()`. The main cases worth testing here are @@ -1980,8 +1974,7 @@ mod test { assert!(!records.contains_key("krabappel")); assert!(records.contains_key("hoover")); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 2972c78e56..ef6716a43b 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -1132,8 +1132,7 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use nexus_types::deployment::OmicronZoneExternalFloatingIp; use nexus_types::deployment::OmicronZoneExternalSnatIp; use nexus_types::external_api::shared::IpRange; @@ -1164,8 +1163,8 @@ mod tests { async fn test_service_ip_list() { usdt::register_probes().unwrap(); let logctx = dev::test_setup_log("test_service_ip_list"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // No IPs, to start let ips = read_all_service_ips(&datastore, &opctx).await; @@ -1246,8 +1245,7 @@ mod tests { let ips = read_all_service_ips(&datastore, &opctx).await; assert_eq!(ips, external_ips); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/instance.rs b/nexus/db-queries/src/db/datastore/instance.rs index c2f426ceb1..0698883891 100644 --- a/nexus/db-queries/src/db/datastore/instance.rs +++ b/nexus/db-queries/src/db/datastore/instance.rs @@ -1943,15 +1943,14 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::sled; - use crate::db::datastore::test_utils::datastore_test; use crate::db::lookup::LookupPath; use crate::db::pagination::Paginator; use nexus_db_model::InstanceState; use nexus_db_model::Project; use nexus_db_model::VmmRuntimeState; use nexus_db_model::VmmState; - use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; use nexus_types::identity::Asset; use nexus_types::silo::DEFAULT_SILO_ID; @@ -2034,8 +2033,8 @@ mod tests { async fn test_instance_updater_acquires_lock() { // Setup let logctx = dev::test_setup_log("test_instance_updater_acquires_lock"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let saga1 = Uuid::new_v4(); let saga2 = Uuid::new_v4(); let (authz_project, _) = create_test_project(&datastore, &opctx).await; @@ -2108,8 +2107,7 @@ mod tests { assert!(unlocked, "instance must actually be unlocked"); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2118,8 +2116,8 @@ mod tests { // Setup let logctx = dev::test_setup_log("test_instance_updater_lock_is_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, _) = create_test_project(&datastore, &opctx).await; let authz_instance = create_test_instance( &datastore, @@ -2173,8 +2171,7 @@ mod tests { assert!(!unlocked, "instance should already have been unlocked"); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2184,8 +2181,8 @@ mod tests { let logctx = dev::test_setup_log( "test_instance_updater_cant_unlock_someone_elses_instance_", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, _) = create_test_project(&datastore, &opctx).await; let authz_instance = create_test_instance( &datastore, @@ -2267,8 +2264,7 @@ mod tests { assert!(!unlocked); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2277,8 +2273,8 @@ mod tests { // Setup let logctx = dev::test_setup_log("test_unlocking_a_deleted_instance_is_okay"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, _) = create_test_project(&datastore, &opctx).await; let authz_instance = create_test_instance( &datastore, @@ -2327,8 +2323,7 @@ mod tests { .expect("instance should unlock"); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2337,8 +2332,8 @@ mod tests { // Setup let logctx = dev::test_setup_log("test_instance_commit_update_is_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, _) = create_test_project(&datastore, &opctx).await; let authz_instance = create_test_instance( &datastore, @@ -2426,8 +2421,7 @@ mod tests { assert_eq!(instance.runtime().r#gen, new_runtime.r#gen); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2437,8 +2431,8 @@ mod tests { let logctx = dev::test_setup_log( "test_instance_update_invalidated_while_locked", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, _) = create_test_project(&datastore, &opctx).await; let authz_instance = create_test_instance( &datastore, @@ -2517,8 +2511,7 @@ mod tests { assert_eq!(instance.runtime().nexus_state, new_runtime.nexus_state); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2526,8 +2519,8 @@ mod tests { async fn test_instance_fetch_all() { // Setup let logctx = dev::test_setup_log("test_instance_fetch_all"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, _) = create_test_project(&datastore, &opctx).await; let authz_instance = create_test_instance( &datastore, @@ -2698,8 +2691,7 @@ mod tests { ); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2707,8 +2699,8 @@ mod tests { async fn test_instance_set_migration_ids() { // Setup let logctx = dev::test_setup_log("test_instance_set_migration_ids"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, _) = create_test_project(&datastore, &opctx).await; let authz_instance = create_test_instance( &datastore, @@ -2966,8 +2958,7 @@ mod tests { assert_eq!(instance.runtime().dst_propolis_id, Some(vmm3.id)); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2977,8 +2968,8 @@ mod tests { // Setup let logctx = dev::test_setup_log("test_instance_and_vmm_list_by_sled_agent"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, _) = create_test_project(&datastore, &opctx).await; let mut expected_instances = BTreeSet::new(); @@ -3104,8 +3095,7 @@ mod tests { assert_eq!(expected_instances, found_instances); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/inventory.rs b/nexus/db-queries/src/db/datastore/inventory.rs index 7dba515f05..4a2ab216a2 100644 --- a/nexus/db-queries/src/db/datastore/inventory.rs +++ b/nexus/db-queries/src/db/datastore/inventory.rs @@ -2444,7 +2444,7 @@ impl DataStoreInventoryTest for DataStore { #[cfg(test)] mod test { use crate::db::datastore::inventory::DataStoreInventoryTest; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::DataStoreConnection; use crate::db::raw_query_builder::{QueryBuilder, TrustedStr}; use crate::db::schema; @@ -2457,7 +2457,6 @@ mod test { use gateway_client::types::SpType; use nexus_inventory::examples::representative; use nexus_inventory::examples::Representative; - use nexus_test_utils::db::test_setup_database; use nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL; use nexus_types::inventory::BaseboardId; use nexus_types::inventory::CabooseWhich; @@ -2511,8 +2510,8 @@ mod test { #[tokio::test] async fn test_find_hw_baseboard_id_missing_returns_not_found() { let logctx = dev::test_setup_log("inventory_insert"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let baseboard_id = BaseboardId { serial_number: "some-serial".into(), part_number: "some-part".into(), @@ -2522,8 +2521,7 @@ mod test { .await .unwrap_err(); assert!(matches!(err, Error::ObjectNotFound { .. })); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2533,8 +2531,8 @@ mod test { async fn test_inventory_insert() { // Setup let logctx = dev::test_setup_log("inventory_insert"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create an empty collection and write it to the database. let builder = nexus_inventory::CollectionBuilder::new("test"); @@ -3016,8 +3014,7 @@ mod test { assert_ne!(coll_counts.rot_pages, 0); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -3100,8 +3097,8 @@ mod test { async fn test_inventory_deletion() { // Setup let logctx = dev::test_setup_log("inventory_deletion"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a representative collection and write it to the database. let Representative { builder, .. } = representative(); @@ -3138,8 +3135,7 @@ mod test { ); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -3147,8 +3143,8 @@ mod test { async fn test_representative_collection_populates_database() { // Setup let logctx = dev::test_setup_log("inventory_deletion"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a representative collection and write it to the database. let Representative { builder, .. } = representative(); @@ -3164,8 +3160,7 @@ mod test { .expect("All inv_... tables should be populated by representative collection"); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index eb7d2aa9c3..9ea8f7b088 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -1121,12 +1121,11 @@ mod test { use std::num::NonZeroU32; use crate::authz; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::model::{ IpPool, IpPoolResource, IpPoolResourceType, Project, }; use assert_matches::assert_matches; - use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::address::{IpRange, Ipv4Range, Ipv6Range}; @@ -1139,8 +1138,8 @@ mod test { #[tokio::test] async fn test_default_ip_pools() { let logctx = dev::test_setup_log("test_default_ip_pools"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // we start out with no default pool, so we expect not found let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); @@ -1298,16 +1297,15 @@ mod test { .expect("Should list silo IP pools"); assert_eq!(silo_pools.len(), 0); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_internal_ip_pool() { let logctx = dev::test_setup_log("test_internal_ip_pool"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // confirm internal pool appears as internal let (authz_pool, _pool) = @@ -1353,16 +1351,15 @@ mod test { datastore.ip_pool_is_internal(&opctx, &authz_other_pool).await; assert_eq!(is_internal, Ok(false)); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_ip_pool_utilization() { let logctx = dev::test_setup_log("test_ip_utilization"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let authz_silo = opctx.authn.silo_required().unwrap(); let project = Project::new( @@ -1505,8 +1502,7 @@ mod test { assert_eq!(max_ips.ipv4, 5); assert_eq!(max_ips.ipv6, 1208925819614629174706166); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs b/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs index 6c0676d4ee..80794f193a 100644 --- a/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs +++ b/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs @@ -379,10 +379,9 @@ fn ipv4_nat_next_version() -> diesel::expression::SqlLiteral { mod test { use std::{net::Ipv4Addr, str::FromStr}; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use chrono::Utc; use nexus_db_model::{Ipv4NatEntry, Ipv4NatValues, MacAddr, Vni}; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::external; use omicron_test_utils::dev; use rand::seq::IteratorRandom; @@ -391,8 +390,8 @@ mod test { #[tokio::test] async fn nat_version_tracking() { let logctx = dev::test_setup_log("test_nat_version_tracking"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // We should not have any NAT entries at this moment let initial_state = @@ -538,8 +537,7 @@ mod test { 3 ); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -549,8 +547,8 @@ mod test { /// of properties. async fn table_allows_unique_active_multiple_deleted() { let logctx = dev::test_setup_log("test_nat_version_tracking"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // We should not have any NAT entries at this moment let initial_state = @@ -683,8 +681,7 @@ mod test { 4 ); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -692,8 +689,8 @@ mod test { #[tokio::test] async fn ipv4_nat_sync_service_zones() { let logctx = dev::test_setup_log("ipv4_nat_sync_service_zones"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // We should not have any NAT entries at this moment let initial_state = @@ -806,8 +803,7 @@ mod test { && entry.version_removed.is_none() })); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -815,8 +811,8 @@ mod test { #[tokio::test] async fn ipv4_nat_changeset() { let logctx = dev::test_setup_log("test_nat_version_tracking"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // We should not have any NAT entries at this moment let initial_state = @@ -956,8 +952,7 @@ mod test { // did we see everything? assert_eq!(total_changes, db_records.len()); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/migration.rs b/nexus/db-queries/src/db/datastore/migration.rs index a1155f8bb7..e1d8c070e7 100644 --- a/nexus/db-queries/src/db/datastore/migration.rs +++ b/nexus/db-queries/src/db/datastore/migration.rs @@ -178,11 +178,10 @@ impl DataStore { mod tests { use super::*; use crate::authz; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::lookup::LookupPath; use crate::db::model::Instance; use nexus_db_model::Project; - use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; use nexus_types::silo::DEFAULT_SILO_ID; use omicron_common::api::external::ByteCount; @@ -277,8 +276,8 @@ mod tests { async fn test_migration_query_by_instance() { // Setup let logctx = dev::test_setup_log("test_migration_query_by_instance"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let authz_instance = create_test_instance(&datastore, &opctx).await; let instance_id = InstanceUuid::from_untyped_uuid(authz_instance.id()); @@ -356,8 +355,7 @@ mod tests { ); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 6516d35aca..d2d376b1f8 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -445,7 +445,7 @@ mod test { use crate::authn; use crate::authn::SiloAuthnPolicy; use crate::authz; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::test_utils::{ IneligibleSledKind, IneligibleSleds, }; @@ -466,7 +466,6 @@ mod test { use nexus_db_fixed_data::silo::DEFAULT_SILO; use nexus_db_model::IpAttachState; use nexus_db_model::{to_db_typed_uuid, Generation}; - use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; use nexus_types::silo::DEFAULT_SILO_ID; use omicron_common::api::external::{ @@ -508,8 +507,8 @@ mod test { #[tokio::test] async fn test_project_creation() { let logctx = dev::test_setup_log("test_project_creation"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let authz_silo = opctx.authn.silo_required().unwrap(); @@ -538,16 +537,15 @@ mod test { .unwrap(); assert!(silo_after_project_create.rcgen > silo.rcgen); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_session_methods() { let logctx = dev::test_setup_log("test_session_methods"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let authn_opctx = OpContext::for_background( logctx.log.new(o!("component" => "TestExternalAuthn")), Arc::new(authz::Authz::new(&logctx.log)), @@ -674,8 +672,7 @@ mod test { datastore.session_hard_delete(&opctx, &authz_session).await; assert_eq!(delete_again, Ok(())); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1005,8 +1002,8 @@ mod test { /// pool IDs should not matter. async fn test_region_allocation_strat_random() { let logctx = dev::test_setup_log("test_region_allocation_strat_random"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let test_datasets = TestDatasets::create( &opctx, datastore.clone(), @@ -1083,8 +1080,7 @@ mod test { } } - datastore.terminate().await; - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1096,8 +1092,8 @@ mod test { let logctx = dev::test_setup_log( "test_region_allocation_strat_random_with_distinct_sleds", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a rack with enough sleds for a successful allocation when we // require 3 distinct eligible sleds. @@ -1173,8 +1169,7 @@ mod test { } } - datastore.terminate().await; - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1185,8 +1180,8 @@ mod test { let logctx = dev::test_setup_log( "test_region_allocation_strat_random_with_distinct_sleds_fails", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a rack without enough sleds for a successful allocation when // we require 3 distinct provisionable sleds. @@ -1230,8 +1225,7 @@ mod test { assert!(matches!(err, Error::InsufficientCapacity { .. })); } - datastore.terminate().await; - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1239,8 +1233,8 @@ mod test { async fn test_region_allocation_is_idempotent() { let logctx = dev::test_setup_log("test_region_allocation_is_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); TestDatasets::create( &opctx, datastore.clone(), @@ -1297,8 +1291,7 @@ mod test { assert_eq!(dataset_and_regions1[i], dataset_and_regions2[i],); } - datastore.terminate().await; - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1307,8 +1300,8 @@ mod test { let logctx = dev::test_setup_log( "test_region_allocation_only_operates_on_zpools_in_inventory", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a sled... let sled_id = create_test_sled(&datastore).await; @@ -1398,8 +1391,7 @@ mod test { .await .expect("Allocation should have worked after adding zpools to inventory"); - datastore.terminate().await; - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1407,8 +1399,8 @@ mod test { async fn test_region_allocation_not_enough_zpools() { let logctx = dev::test_setup_log("test_region_allocation_not_enough_zpools"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a sled... let sled_id = create_test_sled(&datastore).await; @@ -1484,8 +1476,7 @@ mod test { assert!(matches!(err, Error::InsufficientCapacity { .. })); - datastore.terminate().await; - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1494,8 +1485,8 @@ mod test { let logctx = dev::test_setup_log( "test_region_allocation_only_considers_disks_in_service", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a sled... let sled_id = create_test_sled(&datastore).await; @@ -1603,8 +1594,7 @@ mod test { } } - datastore.terminate().await; - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1612,8 +1602,8 @@ mod test { async fn test_region_allocation_out_of_space_fails() { let logctx = dev::test_setup_log("test_region_allocation_out_of_space_fails"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); TestDatasets::create( &opctx, @@ -1638,8 +1628,7 @@ mod test { .await .is_err()); - datastore.terminate().await; - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1650,11 +1639,8 @@ mod test { use omicron_common::api::external; let logctx = dev::test_setup_log("test_queries_do_not_require_full_table_scan"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - let datastore = - DataStore::new(&logctx.log, Arc::new(pool), None).await.unwrap(); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let conn = datastore.pool_connection_for_tests().await.unwrap(); let explanation = DataStore::get_allocated_regions_query(Uuid::nil()) .explain_async(&conn) @@ -1685,8 +1671,7 @@ mod test { explanation, ); - datastore.terminate().await; - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1696,15 +1681,8 @@ mod test { use std::net::Ipv6Addr; let logctx = dev::test_setup_log("test_sled_ipv6_address_allocation"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = Arc::new(db::Pool::new_single_host(&logctx.log, &cfg)); - let datastore = - Arc::new(DataStore::new(&logctx.log, pool, None).await.unwrap()); - let opctx = OpContext::for_tests( - logctx.log.new(o!()), - Arc::clone(&datastore) as Arc, - ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let rack_id = Uuid::new_v4(); let addr1 = "[fd00:1de::1]:12345".parse().unwrap(); @@ -1744,16 +1722,15 @@ mod test { let expected_ip = Ipv6Addr::new(0xfd00, 0x1df, 0, 0, 0, 0, 1, 0); assert_eq!(ip, expected_ip); - datastore.terminate().await; - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_ssh_keys() { let logctx = dev::test_setup_log("test_ssh_keys"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a new Silo user so that we can lookup their keys. let authz_silo = authz::Silo::new( @@ -1829,16 +1806,15 @@ mod test { datastore.ssh_key_delete(&opctx, &authz_ssh_key).await.unwrap(); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_rack_initialize_is_idempotent() { let logctx = dev::test_setup_log("test_rack_initialize_is_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a Rack, insert it into the DB. let rack = Rack::new(Uuid::new_v4()); @@ -1871,16 +1847,15 @@ mod test { .unwrap(); assert!(result.initialized); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_table_scan() { let logctx = dev::test_setup_log("test_table_scan"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let error = datastore.test_try_table_scan(&opctx).await; println!("error from attempted table scan: {:#}", error); @@ -1898,8 +1873,7 @@ mod test { } // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1911,8 +1885,8 @@ mod test { let logctx = dev::test_setup_log( "test_deallocate_external_ip_by_instance_id_is_idempotent", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let conn = datastore.pool_connection_for_tests().await.unwrap(); // Create a few records. @@ -1966,8 +1940,7 @@ mod test { .expect("Failed to delete instance external IPs"); assert_eq!(count, 0, "Expected to delete zero IPs for the instance"); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1978,8 +1951,8 @@ mod test { let logctx = dev::test_setup_log("test_deallocate_external_ip_is_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let conn = datastore.pool_connection_for_tests().await.unwrap(); // Create a record. @@ -2033,8 +2006,7 @@ mod test { .await .is_err()); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2047,8 +2019,8 @@ mod test { use diesel::result::Error::DatabaseError; let logctx = dev::test_setup_log("test_external_ip_check_constraints"); - let mut db = test_setup_database(&logctx.log).await; - let (_opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let conn = datastore.pool_connection_for_tests().await.unwrap(); let now = Utc::now(); @@ -2282,8 +2254,7 @@ mod test { } } - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/network_interface.rs b/nexus/db-queries/src/db/datastore/network_interface.rs index a652f6a5a5..b7f0622609 100644 --- a/nexus/db-queries/src/db/datastore/network_interface.rs +++ b/nexus/db-queries/src/db/datastore/network_interface.rs @@ -892,10 +892,9 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_db_fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; - use nexus_test_utils::db::test_setup_database; use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; use omicron_test_utils::dev; use std::collections::BTreeSet; @@ -924,8 +923,8 @@ mod tests { usdt::register_probes().unwrap(); let logctx = dev::test_setup_log("test_service_network_interfaces_list"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // No IPs, to start let nics = read_all_service_nics(&datastore, &opctx).await; @@ -991,8 +990,7 @@ mod tests { let nics = read_all_service_nics(&datastore, &opctx).await; assert_eq!(nics, service_nics); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/oximeter.rs b/nexus/db-queries/src/db/datastore/oximeter.rs index d8632b096a..be5ddb91bb 100644 --- a/nexus/db-queries/src/db/datastore/oximeter.rs +++ b/nexus/db-queries/src/db/datastore/oximeter.rs @@ -292,8 +292,7 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; - use db::datastore::pub_test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use nexus_types::internal_api::params; use omicron_common::api::internal::nexus; use omicron_test_utils::dev; @@ -345,9 +344,8 @@ mod tests { async fn test_oximeter_expunge() { // Setup let logctx = dev::test_setup_log("test_oximeter_expunge"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Insert a few Oximeter collectors. let mut collector_ids = @@ -447,8 +445,7 @@ mod tests { assert_eq!(expunged1a, expunged1b); // Cleanup - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -458,9 +455,8 @@ mod tests { let logctx = dev::test_setup_log( "test_producer_endpoint_reassigns_if_oximeter_expunged", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Insert an Oximeter collector. let oximeter1_id = Uuid::new_v4(); @@ -575,8 +571,7 @@ mod tests { } // Cleanup - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -586,9 +581,8 @@ mod tests { let logctx = dev::test_setup_log( "test_producer_endpoint_upsert_rejects_expunged_oximeters", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Insert a few Oximeter collectors. let collector_ids = (0..4).map(|_| Uuid::new_v4()).collect::>(); @@ -686,8 +680,7 @@ mod tests { ); // Cleanup - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -695,9 +688,8 @@ mod tests { async fn test_oximeter_reassigns_randomly() { // Setup let logctx = dev::test_setup_log("test_oximeter_reassigns_randomly"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Insert a few Oximeter collectors. let collector_ids = (0..4).map(|_| Uuid::new_v4()).collect::>(); @@ -791,8 +783,7 @@ mod tests { assert_eq!(producer_counts[1..].iter().sum::(), 1000); // Cleanup - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -802,9 +793,8 @@ mod tests { let logctx = dev::test_setup_log( "test_oximeter_reassign_fails_if_no_collectors", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Insert a few Oximeter collectors. let collector_ids = (0..4).map(|_| Uuid::new_v4()).collect::>(); @@ -899,8 +889,7 @@ mod tests { assert_eq!(nproducers, 100); // Cleanup - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -908,9 +897,8 @@ mod tests { async fn test_producers_list_expired() { // Setup let logctx = dev::test_setup_log("test_producers_list_expired"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Insert an Oximeter collector let collector_info = OximeterInfo::new(¶ms::OximeterInfo { @@ -977,8 +965,7 @@ mod tests { .await; assert_eq!(expired_producers.as_slice(), &[]); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/physical_disk.rs b/nexus/db-queries/src/db/datastore/physical_disk.rs index 3057954e13..73aa837af8 100644 --- a/nexus/db-queries/src/db/datastore/physical_disk.rs +++ b/nexus/db-queries/src/db/datastore/physical_disk.rs @@ -321,10 +321,10 @@ impl DataStore { #[cfg(test)] mod test { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::test::{ sled_baseboard_for_test, sled_system_hardware_for_test, }; - use crate::db::datastore::test_utils::datastore_test; use crate::db::lookup::LookupPath; use crate::db::model::{PhysicalDiskKind, Sled, SledUpdate}; use dropshot::PaginationOrder; @@ -332,7 +332,6 @@ mod test { use nexus_sled_agent_shared::inventory::{ Baseboard, Inventory, InventoryDisk, OmicronZonesConfig, SledRole, }; - use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Asset; use omicron_common::api::external::ByteCount; use omicron_common::disk::{DiskIdentity, DiskVariant}; @@ -372,8 +371,8 @@ mod test { async fn physical_disk_insert_same_uuid_collides() { let logctx = dev::test_setup_log("physical_disk_insert_same_uuid_collides"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled = create_test_sled(&datastore).await; let sled_id = sled.id(); @@ -405,8 +404,7 @@ mod test { "{err}" ); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -414,8 +412,8 @@ mod test { async fn physical_disk_insert_different_disks() { let logctx = dev::test_setup_log("physical_disk_insert_different_disks"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled = create_test_sled(&datastore).await; let sled_id = sled.id(); @@ -455,16 +453,15 @@ mod test { .expect("Failed to list physical disks"); assert_eq!(disks.len(), 2); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn physical_disk_deletion_idempotency() { let logctx = dev::test_setup_log("physical_disk_deletion_idempotency"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled = create_test_sled(&datastore).await; @@ -506,8 +503,7 @@ mod test { .await .expect("Failed to delete disk"); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -521,8 +517,8 @@ mod test { let logctx = dev::test_setup_log( "physical_disk_insert_delete_reupsert_new_sled", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled_a = create_test_sled(&datastore).await; let sled_b = create_test_sled(&datastore).await; @@ -595,8 +591,7 @@ mod test { .expect("Failed to list physical disks"); assert_eq!(disks.len(), 1); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -611,8 +606,8 @@ mod test { async fn physical_disk_insert_reupsert_new_sled() { let logctx = dev::test_setup_log("physical_disk_insert_reupsert_new_sled"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled_a = create_test_sled(&datastore).await; let sled_b = create_test_sled(&datastore).await; @@ -674,8 +669,7 @@ mod test { .expect("Failed to list physical disks"); assert_eq!(disks.len(), 1); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -781,8 +775,8 @@ mod test { async fn physical_disk_cannot_insert_to_expunged_sled() { let logctx = dev::test_setup_log("physical_disk_cannot_insert_to_expunged_sled"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled = create_test_sled(&datastore).await; @@ -819,16 +813,15 @@ mod test { "Expected string: {expected} within actual error: {actual}", ); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn physical_disk_uninitialized_list() { let logctx = dev::test_setup_log("physical_disk_uninitialized_list"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled_a = create_test_sled(&datastore).await; let sled_b = create_test_sled(&datastore).await; @@ -1006,8 +999,7 @@ mod test { .expect("Failed to list uninitialized disks"); assert_eq!(uninitialized_disks.len(), 0); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/pub_test_utils.rs b/nexus/db-queries/src/db/datastore/pub_test_utils.rs index bcf6a6c80f..233113ea83 100644 --- a/nexus/db-queries/src/db/datastore/pub_test_utils.rs +++ b/nexus/db-queries/src/db/datastore/pub_test_utils.rs @@ -12,32 +12,132 @@ use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::DataStore; -use dropshot::test_util::LogContext; use omicron_test_utils::dev::db::CockroachInstance; +use slog::Logger; use std::sync::Arc; use uuid::Uuid; +#[cfg(test)] +mod test { + use super::*; + use nexus_test_utils::db::test_setup_database; + + enum TestState { + Pool { pool: Arc }, + RawDatastore { datastore: Arc }, + Datastore { opctx: OpContext, datastore: Arc }, + } + + /// A test database with a pool connected to it. + pub struct TestDatabase { + db: CockroachInstance, + + state: TestState, + } + + impl TestDatabase { + /// Creates a new database for test usage, with a pool. + /// + /// [Self::terminate] should be called before the test finishes. + pub async fn new_with_pool(log: &Logger) -> Self { + let db = test_setup_database(log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = Arc::new(db::Pool::new_single_host(log, &cfg)); + Self { db, state: TestState::Pool { pool } } + } + + /// Creates a new database for test usage, with a pre-loaded datastore. + /// + /// [Self::terminate] should be called before the test finishes. + pub async fn new_with_datastore(log: &Logger) -> Self { + let db = test_setup_database(log).await; + let (opctx, datastore) = + crate::db::datastore::test_utils::datastore_test(log, &db) + .await; + + Self { db, state: TestState::Datastore { opctx, datastore } } + } + + /// Creates a new database for test usage, with a raw datastore. + /// + /// [Self::terminate] should be called before the test finishes. + pub async fn new_with_raw_datastore(log: &Logger) -> Self { + let db = test_setup_database(log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = Arc::new(db::Pool::new_single_host(log, &cfg)); + let datastore = + Arc::new(DataStore::new(&log, pool, None).await.unwrap()); + Self { db, state: TestState::RawDatastore { datastore } } + } + + pub fn pool(&self) -> &Arc { + match &self.state { + TestState::Pool { pool } => pool, + TestState::RawDatastore { .. } + | TestState::Datastore { .. } => { + panic!("Wrong test type; try using `TestDatabase::new_with_pool`"); + } + } + } + + pub fn opctx(&self) -> &OpContext { + match &self.state { + TestState::Pool { .. } | TestState::RawDatastore { .. } => { + panic!("Wrong test type; try using `TestDatabase::new_with_datastore`"); + } + TestState::Datastore { opctx, .. } => opctx, + } + } + + pub fn datastore(&self) -> &Arc { + match &self.state { + TestState::Pool { .. } => { + panic!("Wrong test type; try using `TestDatabase::new_with_datastore`"); + } + TestState::RawDatastore { datastore } => datastore, + TestState::Datastore { datastore, .. } => datastore, + } + } + + /// Shuts down both the database and the pool + pub async fn terminate(mut self) { + match self.state { + TestState::Pool { pool } => pool.terminate().await, + TestState::RawDatastore { datastore } => { + datastore.terminate().await + } + TestState::Datastore { datastore, .. } => { + datastore.terminate().await + } + } + self.db.cleanup().await.unwrap(); + } + } +} + +#[cfg(test)] +pub use test::TestDatabase; + /// Constructs a DataStore for use in test suites that has preloaded the /// built-in users, roles, and role assignments that are needed for basic /// operation #[cfg(any(test, feature = "testing"))] pub async fn datastore_test( - logctx: &LogContext, + log: &Logger, db: &CockroachInstance, rack_id: Uuid, ) -> (OpContext, Arc) { use crate::authn; let cfg = db::Config { url: db.pg_config().clone() }; - let pool = Arc::new(db::Pool::new_single_host(&logctx.log, &cfg)); - let datastore = - Arc::new(DataStore::new(&logctx.log, pool, None).await.unwrap()); + let pool = Arc::new(db::Pool::new_single_host(&log, &cfg)); + let datastore = Arc::new(DataStore::new(&log, pool, None).await.unwrap()); // Create an OpContext with the credentials of "db-init" just for the // purpose of loading the built-in users, roles, and assignments. let opctx = OpContext::for_background( - logctx.log.new(o!()), - Arc::new(authz::Authz::new(&logctx.log)), + log.new(o!()), + Arc::new(authz::Authz::new(&log)), authn::Context::internal_db_init(), Arc::clone(&datastore) as Arc, ); @@ -60,7 +160,7 @@ pub async fn datastore_test( // Create an OpContext with the credentials of "test-privileged" for general // testing. let opctx = OpContext::for_tests( - logctx.log.new(o!()), + log.new(o!()), Arc::clone(&datastore) as Arc, ); diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 4dc88cea93..ea0ef57908 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -997,10 +997,10 @@ impl DataStore { #[cfg(test)] mod test { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::test::{ sled_baseboard_for_test, sled_system_hardware_for_test, }; - use crate::db::datastore::test_utils::datastore_test; use crate::db::datastore::Discoverability; use crate::db::model::ExternalIp; use crate::db::model::IpKind; @@ -1015,7 +1015,6 @@ mod test { SledBuilder, SystemDescription, }; use nexus_sled_agent_shared::inventory::OmicronZoneDataset; - use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::BlueprintZonesConfig; use nexus_types::deployment::CockroachDbPreserveDowngrade; use nexus_types::deployment::{ @@ -1131,8 +1130,8 @@ mod test { #[tokio::test] async fn rack_set_initialized_empty() { let logctx = dev::test_setup_log("rack_set_initialized_empty"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let before = Utc::now(); let rack_init = RackInit::default(); @@ -1233,8 +1232,7 @@ mod test { .unwrap(); assert_eq!(dns_internal, dns_internal2); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1318,8 +1316,8 @@ mod test { async fn rack_set_initialized_with_services() { let test_name = "rack_set_initialized_with_services"; let logctx = dev::test_setup_log(test_name); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled1 = create_test_sled(&datastore, Uuid::new_v4()).await; let sled2 = create_test_sled(&datastore, Uuid::new_v4()).await; @@ -1662,8 +1660,7 @@ mod test { let observed_datasets = get_all_datasets(&datastore).await; assert!(observed_datasets.is_empty()); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1671,8 +1668,8 @@ mod test { async fn rack_set_initialized_with_many_nexus_services() { let test_name = "rack_set_initialized_with_many_nexus_services"; let logctx = dev::test_setup_log(test_name); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled = create_test_sled(&datastore, Uuid::new_v4()).await; @@ -1948,8 +1945,7 @@ mod test { Some(&external_records) ); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1958,8 +1954,8 @@ mod test { let test_name = "rack_set_initialized_missing_service_pool_ip_throws_error"; let logctx = dev::test_setup_log(test_name); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled = create_test_sled(&datastore, Uuid::new_v4()).await; @@ -2049,8 +2045,7 @@ mod test { assert!(get_all_datasets(&datastore).await.is_empty()); assert!(get_all_external_ips(&datastore).await.is_empty()); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2058,8 +2053,8 @@ mod test { async fn rack_set_initialized_overlapping_ips_throws_error() { let test_name = "rack_set_initialized_overlapping_ips_throws_error"; let logctx = dev::test_setup_log(test_name); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled = create_test_sled(&datastore, Uuid::new_v4()).await; @@ -2199,16 +2194,15 @@ mod test { assert!(get_all_datasets(&datastore).await.is_empty()); assert!(get_all_external_ips(&datastore).await.is_empty()); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn rack_sled_subnet_allocations() { let logctx = dev::test_setup_log("rack_sled_subnet_allocations"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let rack_id = Uuid::new_v4(); @@ -2293,16 +2287,15 @@ mod test { allocations.iter().map(|a| a.subnet_octet).collect::>() ); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn allocate_sled_underlay_subnet_octets() { let logctx = dev::test_setup_log("rack_sled_subnet_allocations"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let rack_id = Uuid::new_v4(); @@ -2488,8 +2481,7 @@ mod test { next_expected_octet += 1; } - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/region_replacement.rs b/nexus/db-queries/src/db/datastore/region_replacement.rs index 922511ad49..508e80a63b 100644 --- a/nexus/db-queries/src/db/datastore/region_replacement.rs +++ b/nexus/db-queries/src/db/datastore/region_replacement.rs @@ -895,15 +895,14 @@ impl DataStore { mod test { use super::*; - use crate::db::datastore::test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use omicron_test_utils::dev; #[tokio::test] async fn test_one_replacement_per_volume() { let logctx = dev::test_setup_log("test_one_replacement_per_volume"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let region_1_id = Uuid::new_v4(); let region_2_id = Uuid::new_v4(); @@ -921,8 +920,7 @@ mod test { .await .unwrap_err(); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -935,8 +933,8 @@ mod test { let logctx = dev::test_setup_log( "test_replacement_done_in_middle_of_drive_saga", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let region_id = Uuid::new_v4(); let volume_id = Uuid::new_v4(); @@ -1015,8 +1013,7 @@ mod test { ); assert_eq!(actual_request.operating_saga_id, None); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1028,8 +1025,8 @@ mod test { let logctx = dev::test_setup_log( "test_replacement_done_in_middle_of_finish_saga", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let region_id = Uuid::new_v4(); let volume_id = Uuid::new_v4(); @@ -1083,8 +1080,7 @@ mod test { .await .unwrap(); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/region_snapshot_replacement.rs b/nexus/db-queries/src/db/datastore/region_snapshot_replacement.rs index 43424b345e..d9c8a8b258 100644 --- a/nexus/db-queries/src/db/datastore/region_snapshot_replacement.rs +++ b/nexus/db-queries/src/db/datastore/region_snapshot_replacement.rs @@ -1059,16 +1059,15 @@ impl DataStore { mod test { use super::*; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::model::RegionReplacement; - use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; #[tokio::test] async fn test_one_replacement_per_volume() { let logctx = dev::test_setup_log("test_one_replacement_per_volume"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let dataset_1_id = Uuid::new_v4(); let region_1_id = Uuid::new_v4(); @@ -1106,8 +1105,7 @@ mod test { .await .unwrap_err(); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1116,8 +1114,8 @@ mod test { let logctx = dev::test_setup_log( "test_one_replacement_per_volume_conflict_with_region", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let dataset_1_id = Uuid::new_v4(); let region_1_id = Uuid::new_v4(); @@ -1147,16 +1145,15 @@ mod test { .await .unwrap_err(); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn count_replacement_steps() { let logctx = dev::test_setup_log("count_replacement_steps"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let dataset_id = Uuid::new_v4(); let region_id = Uuid::new_v4(); @@ -1302,8 +1299,7 @@ mod test { 1, ); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1312,8 +1308,8 @@ mod test { let logctx = dev::test_setup_log( "unique_region_snapshot_replacement_step_per_volume", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Ensure that only one non-complete replacement step can be inserted // per volume. @@ -1404,16 +1400,15 @@ mod test { .await .unwrap(); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn region_snapshot_replacement_step_gc() { let logctx = dev::test_setup_log("region_snapshot_replacement_step_gc"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let mut request = RegionSnapshotReplacement::new( Uuid::new_v4(), @@ -1474,8 +1469,7 @@ mod test { .len(), ); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1483,8 +1477,8 @@ mod test { async fn region_snapshot_replacement_step_conflict() { let logctx = dev::test_setup_log("region_snapshot_replacement_step_conflict"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Assert that a region snapshot replacement step cannot be created for // a volume that is the "old snapshot volume" for another snapshot @@ -1525,8 +1519,7 @@ mod test { } ); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1536,8 +1529,8 @@ mod test { let logctx = dev::test_setup_log( "region_snapshot_replacement_step_conflict_with_region_replacement", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Assert that a region snapshot replacement step cannot be performed on // a volume if region replacement is occurring for that volume. @@ -1559,8 +1552,7 @@ mod test { .await .unwrap_err(); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/saga.rs b/nexus/db-queries/src/db/datastore/saga.rs index d59f5c4fe5..f1f0bd18cc 100644 --- a/nexus/db-queries/src/db/datastore/saga.rs +++ b/nexus/db-queries/src/db/datastore/saga.rs @@ -259,12 +259,11 @@ impl DataStore { #[cfg(test)] mod test { use super::*; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncSimpleConnection; use db::queries::ALLOW_FULL_TABLE_SCAN_SQL; use nexus_db_model::{SagaNodeEvent, SecId}; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::external::Generation; use omicron_test_utils::dev; use rand::seq::SliceRandom; @@ -276,8 +275,8 @@ mod test { async fn test_list_candidate_sagas() { // Test setup let logctx = dev::test_setup_log("test_list_candidate_sagas"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sec_id = db::SecId(uuid::Uuid::new_v4()); let mut inserted_sagas = (0..SQL_BATCH_SIZE.get() * 2) .map(|_| SagaTestContext::new(sec_id).new_running_db_saga()) @@ -324,8 +323,7 @@ mod test { ); // Test cleanup - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -334,8 +332,8 @@ mod test { async fn test_list_unfinished_nodes() { // Test setup let logctx = dev::test_setup_log("test_list_unfinished_nodes"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let node_cx = SagaTestContext::new(SecId(Uuid::new_v4())); // Create a couple batches of saga events @@ -400,8 +398,7 @@ mod test { } // Test cleanup - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -410,8 +407,8 @@ mod test { async fn test_list_no_unfinished_nodes() { // Test setup let logctx = dev::test_setup_log("test_list_no_unfinished_nodes"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let saga_id = steno::SagaId(Uuid::new_v4()); // Test that this returns "no nodes" rather than throwing some "not @@ -426,8 +423,7 @@ mod test { assert_eq!(observed_nodes.len(), 0); // Test cleanup - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -435,8 +431,8 @@ mod test { async fn test_create_event_idempotent() { // Test setup let logctx = dev::test_setup_log("test_create_event_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let (_, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let node_cx = SagaTestContext::new(SecId(Uuid::new_v4())); // Generate a bunch of events. @@ -469,8 +465,7 @@ mod test { } // Test cleanup - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -478,8 +473,8 @@ mod test { async fn test_update_state_idempotent() { // Test setup let logctx = dev::test_setup_log("test_create_event_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let (_, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let node_cx = SagaTestContext::new(SecId(Uuid::new_v4())); // Create a saga in the running state. @@ -522,8 +517,7 @@ mod test { .expect("updating state to Done again"); // Test cleanup - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -568,9 +562,8 @@ mod test { async fn test_saga_reassignment() { // Test setup let logctx = dev::test_setup_log("test_saga_reassignment"); - let mut db = test_setup_database(&logctx.log).await; - let (_, datastore) = datastore_test(&logctx, &db).await; - let opctx = OpContext::for_tests(logctx.log.clone(), datastore.clone()); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Populate the database with a few different sagas: // @@ -712,8 +705,7 @@ mod test { assert_eq!(nreassigned, 0); // Test cleanup - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/sled.rs b/nexus/db-queries/src/db/datastore/sled.rs index 75043a2115..8e37d7ae7f 100644 --- a/nexus/db-queries/src/db/datastore/sled.rs +++ b/nexus/db-queries/src/db/datastore/sled.rs @@ -824,12 +824,12 @@ impl TransitionError { #[cfg(test)] pub(in crate::db::datastore) mod test { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::test::{ sled_baseboard_for_test, sled_system_hardware_for_test, }; use crate::db::datastore::test_utils::{ - datastore_test, sled_set_policy, sled_set_state, Expected, - IneligibleSleds, + sled_set_policy, sled_set_state, Expected, IneligibleSleds, }; use crate::db::lookup::LookupPath; use crate::db::model::ByteCount; @@ -841,7 +841,6 @@ pub(in crate::db::datastore) mod test { use nexus_db_model::PhysicalDiskKind; use nexus_db_model::PhysicalDiskPolicy; use nexus_db_model::PhysicalDiskState; - use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Asset; use omicron_common::api::external; use omicron_test_utils::dev; @@ -857,8 +856,8 @@ pub(in crate::db::datastore) mod test { #[tokio::test] async fn upsert_sled_updates_hardware() { let logctx = dev::test_setup_log("upsert_sled_updates_hardware"); - let mut db = test_setup_database(&logctx.log).await; - let (_opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let mut sled_update = test_new_sled_update(); let (observed_sled, _) = @@ -908,8 +907,7 @@ pub(in crate::db::datastore) mod test { ); assert_eq!(observed_sled.reservoir_size, sled_update.reservoir_size); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -918,8 +916,8 @@ pub(in crate::db::datastore) mod test { let logctx = dev::test_setup_log( "upsert_sled_updates_fails_with_stale_sled_agent_gen", ); - let mut db = test_setup_database(&logctx.log).await; - let (_opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let mut sled_update = test_new_sled_update(); let (observed_sled, _) = @@ -973,8 +971,7 @@ pub(in crate::db::datastore) mod test { assert_eq!(observed_sled.reservoir_size, sled_update.reservoir_size); assert_eq!(observed_sled.sled_agent_gen, sled_update.sled_agent_gen); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -982,8 +979,8 @@ pub(in crate::db::datastore) mod test { async fn upsert_sled_doesnt_update_decommissioned() { let logctx = dev::test_setup_log("upsert_sled_doesnt_update_decommissioned"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let mut sled_update = test_new_sled_update(); let (observed_sled, _) = @@ -1052,8 +1049,7 @@ pub(in crate::db::datastore) mod test { "reservoir_size should not have changed" ); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1062,8 +1058,8 @@ pub(in crate::db::datastore) mod test { async fn sled_reservation_create_non_provisionable() { let logctx = dev::test_setup_log("sled_reservation_create_non_provisionable"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Define some sleds that resources cannot be provisioned on. let (non_provisionable_sled, _) = @@ -1140,8 +1136,7 @@ pub(in crate::db::datastore) mod test { .unwrap(); } - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1168,9 +1163,8 @@ pub(in crate::db::datastore) mod test { async fn test_sled_expungement_also_expunges_disks() { let logctx = dev::test_setup_log("test_sled_expungement_also_expunges_disks"); - let mut db = test_setup_database(&logctx.log).await; - - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Set up a sled to test against. let (sled, _) = @@ -1266,8 +1260,7 @@ pub(in crate::db::datastore) mod test { lookup_physical_disk(&datastore, disk2.id()).await.disk_state ); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1275,9 +1268,8 @@ pub(in crate::db::datastore) mod test { async fn test_sled_transitions() { // Test valid and invalid state and policy transitions. let logctx = dev::test_setup_log("test_sled_transitions"); - let mut db = test_setup_database(&logctx.log).await; - - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // This test generates all possible sets of transitions. Below, we list // the before and after predicates for valid transitions. @@ -1391,8 +1383,7 @@ pub(in crate::db::datastore) mod test { .unwrap(); } - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1512,8 +1503,8 @@ pub(in crate::db::datastore) mod test { #[tokio::test] async fn sled_list_batch() { let logctx = dev::test_setup_log("sled_list_batch"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let size = usize::try_from(2 * SQL_BATCH_SIZE.get()).unwrap(); let mut new_sleds = Vec::with_capacity(size); @@ -1554,8 +1545,7 @@ pub(in crate::db::datastore) mod test { assert_eq!(expected_ids, found_ids); assert_eq!(found_ids.len(), size); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/switch_port.rs b/nexus/db-queries/src/db/datastore/switch_port.rs index 27a461328a..b332c57798 100644 --- a/nexus/db-queries/src/db/datastore/switch_port.rs +++ b/nexus/db-queries/src/db/datastore/switch_port.rs @@ -1619,9 +1619,8 @@ async fn do_switch_port_settings_delete( #[cfg(test)] mod test { - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::UpdatePrecondition; - use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params::{ BgpAnnounceSetCreate, BgpConfigCreate, BgpPeerConfig, SwitchPortConfigCreate, SwitchPortGeometry, SwitchPortSettingsCreate, @@ -1637,8 +1636,8 @@ mod test { #[tokio::test] async fn test_bgp_boundary_switches() { let logctx = dev::test_setup_log("test_bgp_boundary_switches"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let rack_id: Uuid = nexus_test_utils::RACK_UUID.parse().expect("parse uuid"); @@ -1738,8 +1737,7 @@ mod test { assert_eq!(uplink_ports.len(), 1); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/test_utils.rs b/nexus/db-queries/src/db/datastore/test_utils.rs index 4678e07f47..75d8833873 100644 --- a/nexus/db-queries/src/db/datastore/test_utils.rs +++ b/nexus/db-queries/src/db/datastore/test_utils.rs @@ -13,7 +13,6 @@ use anyhow::bail; use anyhow::ensure; use anyhow::Context; use anyhow::Result; -use dropshot::test_util::LogContext; use futures::future::try_join_all; use nexus_db_model::SledState; use nexus_types::external_api::views::SledPolicy; @@ -21,16 +20,17 @@ use nexus_types::external_api::views::SledProvisionPolicy; use omicron_test_utils::dev::db::CockroachInstance; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledUuid; +use slog::Logger; use std::sync::Arc; use strum::EnumCount; use uuid::Uuid; pub(crate) async fn datastore_test( - logctx: &LogContext, + log: &Logger, db: &CockroachInstance, ) -> (OpContext, Arc) { let rack_id = Uuid::parse_str(nexus_test_utils::RACK_UUID).unwrap(); - super::pub_test_utils::datastore_test(logctx, db, rack_id).await + super::pub_test_utils::datastore_test(log, db, rack_id).await } /// Denotes a specific way in which a sled is ineligible. diff --git a/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs b/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs index 08dbb13948..a72c032125 100644 --- a/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs +++ b/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs @@ -326,12 +326,11 @@ impl DataStore { mod test { use super::*; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::lookup::LookupPath; use nexus_db_model::Instance; use nexus_db_model::Project; use nexus_db_model::SiloQuotasUpdate; - use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; use nexus_types::silo::DEFAULT_SILO_ID; use omicron_common::api::external::IdentityMetadataCreateParams; @@ -469,8 +468,8 @@ mod test { #[tokio::test] async fn test_instance_create_and_delete() { let logctx = dev::test_setup_log("test_instance_create_and_delete"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let test_data = setup_collections(&datastore, &opctx).await; let ids = test_data.ids(); @@ -531,8 +530,7 @@ mod test { verify_collection_usage(&datastore, &opctx, id, 0, 0, 0).await; } - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -540,8 +538,8 @@ mod test { async fn test_instance_create_and_delete_twice() { let logctx = dev::test_setup_log("test_instance_create_and_delete_twice"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let test_data = setup_collections(&datastore, &opctx).await; let ids = test_data.ids(); @@ -645,16 +643,15 @@ mod test { verify_collection_usage(&datastore, &opctx, id, 0, 0, 0).await; } - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_storage_create_and_delete() { let logctx = dev::test_setup_log("test_storage_create_and_delete"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let test_data = setup_collections(&datastore, &opctx).await; let ids = test_data.ids(); @@ -701,8 +698,7 @@ mod test { verify_collection_usage(&datastore, &opctx, id, 0, 0, 0).await; } - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -710,8 +706,8 @@ mod test { async fn test_storage_create_and_delete_twice() { let logctx = dev::test_setup_log("test_storage_create_and_delete_twice"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let test_data = setup_collections(&datastore, &opctx).await; let ids = test_data.ids(); @@ -800,8 +796,7 @@ mod test { verify_collection_usage(&datastore, &opctx, id, 0, 0, 0).await; } - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/vmm.rs b/nexus/db-queries/src/db/datastore/vmm.rs index 1d7120a3f1..e578bb1696 100644 --- a/nexus/db-queries/src/db/datastore/vmm.rs +++ b/nexus/db-queries/src/db/datastore/vmm.rs @@ -441,12 +441,11 @@ impl DataStore { mod tests { use super::*; use crate::db; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::model::Generation; use crate::db::model::Migration; use crate::db::model::VmmRuntimeState; use crate::db::model::VmmState; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::internal::nexus; use omicron_test_utils::dev; use omicron_uuid_kinds::InstanceUuid; @@ -456,8 +455,8 @@ mod tests { // Setup let logctx = dev::test_setup_log("test_vmm_and_migration_update_runtime"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let instance_id = InstanceUuid::from_untyped_uuid(Uuid::new_v4()); let vmm1 = datastore @@ -724,8 +723,7 @@ mod tests { ); // Clean up. - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/volume.rs b/nexus/db-queries/src/db/datastore/volume.rs index 4b659f44b2..85c1401b82 100644 --- a/nexus/db-queries/src/db/datastore/volume.rs +++ b/nexus/db-queries/src/db/datastore/volume.rs @@ -2780,8 +2780,7 @@ impl DataStore { mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use omicron_test_utils::dev; use sled_agent_client::types::CrucibleOpts; @@ -2792,13 +2791,13 @@ mod tests { let logctx = dev::test_setup_log("test_deserialize_old_crucible_resources"); let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let (_opctx, db_datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&log).await; + let datastore = db.datastore(); // Start with a fake volume, doesn't matter if it's empty let volume_id = Uuid::new_v4(); - let _volume = db_datastore + let _volume = datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { @@ -2819,8 +2818,7 @@ mod tests { { use db::schema::volume::dsl; - let conn = - db_datastore.pool_connection_unauthorized().await.unwrap(); + let conn = datastore.pool_connection_unauthorized().await.unwrap(); let resources_to_clean_up = r#"{ "V1": { @@ -2867,14 +2865,14 @@ mod tests { // Soft delete the volume - let cr = db_datastore.soft_delete_volume(volume_id).await.unwrap(); + let cr = datastore.soft_delete_volume(volume_id).await.unwrap(); // Assert the contents of the returned CrucibleResources let datasets_and_regions = - db_datastore.regions_to_delete(&cr).await.unwrap(); + datastore.regions_to_delete(&cr).await.unwrap(); let datasets_and_snapshots = - db_datastore.snapshots_to_delete(&cr).await.unwrap(); + datastore.snapshots_to_delete(&cr).await.unwrap(); assert!(datasets_and_regions.is_empty()); assert_eq!(datasets_and_snapshots.len(), 1); @@ -2887,8 +2885,7 @@ mod tests { ); assert_eq!(region_snapshot.deleting, false); - db_datastore.terminate().await; - db.cleanup().await.unwrap(); + datastore.terminate().await; logctx.cleanup_successful(); } @@ -2896,8 +2893,8 @@ mod tests { async fn test_volume_replace_region() { let logctx = dev::test_setup_log("test_volume_replace_region"); let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let (_opctx, db_datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&log).await; + let datastore = db.datastore(); // Insert four Region records (three, plus one additionally allocated) @@ -2912,7 +2909,7 @@ mod tests { ]; { - let conn = db_datastore.pool_connection_for_tests().await.unwrap(); + let conn = datastore.pool_connection_for_tests().await.unwrap(); for i in 0..4 { let (_, volume_id) = region_and_volume_ids[i]; @@ -2938,7 +2935,7 @@ mod tests { } } - let _volume = db_datastore + let _volume = datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { @@ -2978,7 +2975,7 @@ mod tests { let target = region_and_volume_ids[0]; let replacement = region_and_volume_ids[3]; - let volume_replace_region_result = db_datastore + let volume_replace_region_result = datastore .volume_replace_region( /* target */ db::datastore::VolumeReplacementParams { @@ -3003,7 +3000,7 @@ mod tests { assert_eq!(volume_replace_region_result, VolumeReplaceResult::Done); let vcr: VolumeConstructionRequest = serde_json::from_str( - db_datastore.volume_get(volume_id).await.unwrap().unwrap().data(), + datastore.volume_get(volume_id).await.unwrap().unwrap().data(), ) .unwrap(); @@ -3040,7 +3037,7 @@ mod tests { ); // Now undo the replacement. Note volume ID is not swapped. - let volume_replace_region_result = db_datastore + let volume_replace_region_result = datastore .volume_replace_region( /* target */ db::datastore::VolumeReplacementParams { @@ -3065,7 +3062,7 @@ mod tests { assert_eq!(volume_replace_region_result, VolumeReplaceResult::Done); let vcr: VolumeConstructionRequest = serde_json::from_str( - db_datastore.volume_get(volume_id).await.unwrap().unwrap().data(), + datastore.volume_get(volume_id).await.unwrap().unwrap().data(), ) .unwrap(); @@ -3101,8 +3098,7 @@ mod tests { }, ); - db_datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -3110,8 +3106,8 @@ mod tests { async fn test_volume_replace_snapshot() { let logctx = dev::test_setup_log("test_volume_replace_snapshot"); let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let (_opctx, db_datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&log).await; + let datastore = db.datastore(); // Insert two volumes: one with the target to replace, and one temporary // "volume to delete" that's blank. @@ -3120,7 +3116,7 @@ mod tests { let volume_to_delete_id = Uuid::new_v4(); let rop_id = Uuid::new_v4(); - db_datastore + datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { @@ -3179,7 +3175,7 @@ mod tests { .await .unwrap(); - db_datastore + datastore .volume_create(nexus_db_model::Volume::new( volume_to_delete_id, serde_json::to_string(&VolumeConstructionRequest::Volume { @@ -3195,7 +3191,7 @@ mod tests { // Do the replacement - let volume_replace_snapshot_result = db_datastore + let volume_replace_snapshot_result = datastore .volume_replace_snapshot( VolumeWithTarget(volume_id), ExistingTarget("[fd00:1122:3344:104::1]:400".parse().unwrap()), @@ -3212,7 +3208,7 @@ mod tests { // Ensure the shape of the resulting VCRs let vcr: VolumeConstructionRequest = serde_json::from_str( - db_datastore.volume_get(volume_id).await.unwrap().unwrap().data(), + datastore.volume_get(volume_id).await.unwrap().unwrap().data(), ) .unwrap(); @@ -3272,7 +3268,7 @@ mod tests { ); let vcr: VolumeConstructionRequest = serde_json::from_str( - db_datastore + datastore .volume_get(volume_to_delete_id) .await .unwrap() @@ -3313,7 +3309,7 @@ mod tests { // Now undo the replacement. Note volume ID is not swapped. - let volume_replace_snapshot_result = db_datastore + let volume_replace_snapshot_result = datastore .volume_replace_snapshot( VolumeWithTarget(volume_id), ExistingTarget("[fd55:1122:3344:101::1]:111".parse().unwrap()), @@ -3328,7 +3324,7 @@ mod tests { assert_eq!(volume_replace_snapshot_result, VolumeReplaceResult::Done,); let vcr: VolumeConstructionRequest = serde_json::from_str( - db_datastore.volume_get(volume_id).await.unwrap().unwrap().data(), + datastore.volume_get(volume_id).await.unwrap().unwrap().data(), ) .unwrap(); @@ -3389,7 +3385,7 @@ mod tests { ); let vcr: VolumeConstructionRequest = serde_json::from_str( - db_datastore + datastore .volume_get(volume_to_delete_id) .await .unwrap() @@ -3428,8 +3424,7 @@ mod tests { }, ); - db_datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -3438,14 +3433,14 @@ mod tests { let logctx = dev::test_setup_log("test_find_volumes_referencing_socket_addr"); let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let (opctx, db_datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let volume_id = Uuid::new_v4(); // case where the needle is found - db_datastore + datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { @@ -3482,7 +3477,7 @@ mod tests { .await .unwrap(); - let volumes = db_datastore + let volumes = datastore .find_volumes_referencing_socket_addr( &opctx, "[fd00:1122:3344:104::1]:400".parse().unwrap(), @@ -3495,7 +3490,7 @@ mod tests { // case where the needle is missing - let volumes = db_datastore + let volumes = datastore .find_volumes_referencing_socket_addr( &opctx, "[fd55:1122:3344:104::1]:400".parse().unwrap(), @@ -3505,8 +3500,7 @@ mod tests { assert!(volumes.is_empty()); - db_datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } diff --git a/nexus/db-queries/src/db/datastore/volume_repair.rs b/nexus/db-queries/src/db/datastore/volume_repair.rs index ba887048b4..115244f347 100644 --- a/nexus/db-queries/src/db/datastore/volume_repair.rs +++ b/nexus/db-queries/src/db/datastore/volume_repair.rs @@ -100,15 +100,14 @@ impl DataStore { mod test { use super::*; - use crate::db::datastore::test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use omicron_test_utils::dev; #[tokio::test] async fn volume_lock_conflict_error_returned() { let logctx = dev::test_setup_log("volume_lock_conflict_error_returned"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let lock_1 = Uuid::new_v4(); let lock_2 = Uuid::new_v4(); @@ -123,8 +122,7 @@ mod test { assert!(matches!(err, Error::Conflict { .. })); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/vpc.rs b/nexus/db-queries/src/db/datastore/vpc.rs index 2763aadf39..e3bd33e0a4 100644 --- a/nexus/db-queries/src/db/datastore/vpc.rs +++ b/nexus/db-queries/src/db/datastore/vpc.rs @@ -2755,9 +2755,9 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::test::sled_baseboard_for_test; use crate::db::datastore::test::sled_system_hardware_for_test; - use crate::db::datastore::test_utils::datastore_test; use crate::db::datastore::test_utils::IneligibleSleds; use crate::db::model::Project; use crate::db::queries::vpc::MAX_VNI_SEARCH_RANGE_SIZE; @@ -2768,7 +2768,6 @@ mod tests { use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_planning::system::SledBuilder; use nexus_reconfigurator_planning::system::SystemDescription; - use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintTarget; use nexus_types::deployment::BlueprintZoneConfig; @@ -2798,8 +2797,8 @@ mod tests { "test_project_create_vpc_raw_returns_none_on_vni_exhaustion", ); let log = &logctx.log; - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a project. let project_params = params::ProjectCreate { @@ -2889,8 +2888,7 @@ mod tests { else { panic!("Expected Ok(None) when creating a VPC without any available VNIs"); }; - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2903,8 +2901,8 @@ mod tests { usdt::register_probes().unwrap(); let logctx = dev::test_setup_log("test_project_create_vpc_retries"); let log = &logctx.log; - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a project. let project_params = params::ProjectCreate { @@ -3000,8 +2998,7 @@ mod tests { } Err(e) => panic!("Unexpected error when inserting VPC: {e}"), }; - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -3049,8 +3046,8 @@ mod tests { let logctx = dev::test_setup_log( "test_vpc_resolve_to_sleds_uses_current_target_blueprint", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Set up our fake system with 5 sleds. let rack_id = Uuid::new_v4(); @@ -3286,8 +3283,7 @@ mod tests { ) .await; - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -3412,8 +3408,8 @@ mod tests { let logctx = dev::test_setup_log("test_vpc_system_router_sync_to_subnets"); let log = &logctx.log; - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (_, authz_vpc, db_vpc, _, db_router) = create_initial_vpc(log, &opctx, &datastore).await; @@ -3547,8 +3543,7 @@ mod tests { ) .await; - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -3640,8 +3635,8 @@ mod tests { let logctx = dev::test_setup_log("test_vpc_router_rule_instance_resolve"); let log = &logctx.log; - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, authz_vpc, db_vpc, authz_router, _) = create_initial_vpc(log, &opctx, &datastore).await; @@ -3777,8 +3772,7 @@ mod tests { _ => false, })); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/explain.rs b/nexus/db-queries/src/db/explain.rs index 823f064c91..284e96bc6e 100644 --- a/nexus/db-queries/src/db/explain.rs +++ b/nexus/db-queries/src/db/explain.rs @@ -94,10 +94,10 @@ mod test { use super::*; use crate::db; + use crate::db::datastore::pub_test_utils::TestDatabase; use async_bb8_diesel::AsyncSimpleConnection; use diesel::SelectableHelper; use expectorate::assert_contents; - use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; use uuid::Uuid; @@ -142,9 +142,8 @@ mod test { #[tokio::test] async fn test_explain_async() { let logctx = dev::test_setup_log("test_explain_async"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); create_schema(&pool).await; @@ -158,8 +157,7 @@ mod test { .unwrap(); assert_contents("tests/output/test-explain-output", &explanation); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -168,9 +166,8 @@ mod test { #[tokio::test] async fn test_explain_full_table_scan() { let logctx = dev::test_setup_log("test_explain_full_table_scan"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); create_schema(&pool).await; @@ -188,8 +185,7 @@ mod test { "Expected [{}] to contain 'FULL SCAN'", explanation ); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/lookup.rs b/nexus/db-queries/src/db/lookup.rs index 6de950e935..43cd2a073f 100644 --- a/nexus/db-queries/src/db/lookup.rs +++ b/nexus/db-queries/src/db/lookup.rs @@ -958,24 +958,16 @@ mod test { use super::Instance; use super::LookupPath; use super::Project; - use crate::context::OpContext; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::model::Name; - use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; - use std::sync::Arc; /* This is a smoke test that things basically appear to work. */ #[tokio::test] async fn test_lookup() { let logctx = dev::test_setup_log("test_lookup"); - let mut db = test_setup_database(&logctx.log).await; - let (_, datastore) = - crate::db::datastore::test_utils::datastore_test(&logctx, &db) - .await; - let opctx = OpContext::for_tests( - logctx.log.new(o!()), - Arc::clone(&datastore) as Arc, - ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let project_name: Name = Name("my-project".parse().unwrap()); let instance_name: Name = Name("my-instance".parse().unwrap()); @@ -999,8 +991,7 @@ mod test { Project::PrimaryKey(_, p) if *p == project_id)); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/pagination.rs b/nexus/db-queries/src/db/pagination.rs index 785ac2ff54..a16591ad6c 100644 --- a/nexus/db-queries/src/db/pagination.rs +++ b/nexus/db-queries/src/db/pagination.rs @@ -343,11 +343,11 @@ mod test { use super::*; use crate::db; + use crate::db::datastore::pub_test_utils::TestDatabase; use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use diesel::JoinOnDsl; use diesel::SelectableHelper; use dropshot::PaginationOrder; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::external::DataPageParams; use omicron_test_utils::dev; use std::num::NonZeroU32; @@ -489,9 +489,8 @@ mod test { async fn test_paginated_single_column_ascending() { let logctx = dev::test_setup_log("test_paginated_single_column_ascending"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); use schema::test_users::dsl; @@ -516,8 +515,7 @@ mod test { let observed = execute_query(&pool, query).await; assert_eq!(observed, vec![(2, 2), (3, 3)]); - pool.terminate().await; - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -525,9 +523,8 @@ mod test { async fn test_paginated_single_column_descending() { let logctx = dev::test_setup_log("test_paginated_single_column_descending"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); use schema::test_users::dsl; @@ -552,8 +549,7 @@ mod test { let observed = execute_query(&pool, query).await; assert_eq!(observed, vec![(2, 2), (1, 1)]); - pool.terminate().await; - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -561,9 +557,8 @@ mod test { async fn test_paginated_multicolumn_ascending() { let logctx = dev::test_setup_log("test_paginated_multicolumn_ascending"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); use schema::test_users::dsl; @@ -607,8 +602,7 @@ mod test { let observed = execute_query(&pool, query).await; assert_eq!(observed, vec![(1, 1), (2, 1), (3, 1), (1, 2), (2, 3)]); - pool.terminate().await; - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -616,9 +610,8 @@ mod test { async fn test_paginated_multicolumn_descending() { let logctx = dev::test_setup_log("test_paginated_multicolumn_descending"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); use schema::test_users::dsl; @@ -662,8 +655,7 @@ mod test { let observed = execute_query(&pool, query).await; assert_eq!(observed, vec![(2, 3), (1, 2), (3, 1), (2, 1), (1, 1)]); - pool.terminate().await; - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -673,9 +665,8 @@ mod test { let logctx = dev::test_setup_log("test_paginated_multicolumn_works_with_joins"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); use schema::test_phone_numbers::dsl as phone_numbers_dsl; use schema::test_users::dsl; @@ -764,8 +755,7 @@ mod test { &[((2, 3), 42), ((3, 1), 50), ((3, 1), 51), ((3, 1), 52)] ); - pool.terminate().await; - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index bd0b5be5ac..2f17a5e2b6 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -865,8 +865,7 @@ impl RunQueryDsl for NextExternalIp {} #[cfg(test)] mod tests { use crate::authz; - use crate::context::OpContext; - use crate::db::datastore::DataStore; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::SERVICE_IP_POOL_NAME; use crate::db::identity::Resource; use crate::db::lookup::LookupPath; @@ -882,7 +881,6 @@ mod tests { use nexus_db_model::IpPoolResource; use nexus_db_model::IpPoolResourceType; use nexus_sled_agent_shared::inventory::ZoneKind; - use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::OmicronZoneExternalFloatingIp; use nexus_types::deployment::OmicronZoneExternalIp; use nexus_types::deployment::OmicronZoneExternalSnatIp; @@ -893,32 +891,25 @@ mod tests { use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_test_utils::dev; - use omicron_test_utils::dev::db::CockroachInstance; use omicron_uuid_kinds::ExternalIpUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::InstanceUuid; use omicron_uuid_kinds::OmicronZoneUuid; use std::net::IpAddr; use std::net::Ipv4Addr; - use std::sync::Arc; use uuid::Uuid; struct TestContext { logctx: LogContext, - opctx: OpContext, - db: CockroachInstance, - db_datastore: Arc, + db: TestDatabase, } impl TestContext { async fn new(test_name: &str) -> Self { let logctx = dev::test_setup_log(test_name); let log = logctx.log.new(o!()); - let db = test_setup_database(&log).await; - let (opctx, db_datastore) = - crate::db::datastore::test_utils::datastore_test(&logctx, &db) - .await; - Self { logctx, opctx, db, db_datastore } + let db = TestDatabase::new_with_datastore(&log).await; + Self { logctx, db } } /// Create pool, associate with current silo @@ -933,26 +924,28 @@ mod tests { description: format!("ip pool {}", name), }); - self.db_datastore - .ip_pool_create(&self.opctx, pool.clone()) + self.db + .datastore() + .ip_pool_create(self.db.opctx(), pool.clone()) .await .expect("Failed to create IP pool"); - let silo_id = self.opctx.authn.silo_required().unwrap().id(); + let silo_id = self.db.opctx().authn.silo_required().unwrap().id(); let association = IpPoolResource { resource_id: silo_id, resource_type: IpPoolResourceType::Silo, ip_pool_id: pool.id(), is_default, }; - self.db_datastore - .ip_pool_link_silo(&self.opctx, association) + self.db + .datastore() + .ip_pool_link_silo(self.db.opctx(), association) .await .expect("Failed to associate IP pool with silo"); self.initialize_ip_pool(name, range).await; - LookupPath::new(&self.opctx, &self.db_datastore) + LookupPath::new(self.db.opctx(), &self.db.datastore()) .ip_pool_id(pool.id()) .lookup_for(authz::Action::Read) .await @@ -964,8 +957,9 @@ mod tests { // Find the target IP pool use crate::db::schema::ip_pool::dsl as ip_pool_dsl; let conn = self - .db_datastore - .pool_connection_authorized(&self.opctx) + .db + .datastore() + .pool_connection_authorized(self.db.opctx()) .await .unwrap(); let pool = ip_pool_dsl::ip_pool @@ -984,8 +978,9 @@ mod tests { .values(pool_range) .execute_async( &*self - .db_datastore - .pool_connection_authorized(&self.opctx) + .db + .datastore() + .pool_connection_authorized(self.db.opctx()) .await .unwrap(), ) @@ -1012,8 +1007,9 @@ mod tests { }); let conn = self - .db_datastore - .pool_connection_authorized(&self.opctx) + .db + .datastore() + .pool_connection_authorized(self.db.opctx()) .await .unwrap(); @@ -1029,16 +1025,16 @@ mod tests { async fn default_pool_id(&self) -> Uuid { let (.., pool) = self - .db_datastore - .ip_pools_fetch_default(&self.opctx) + .db + .datastore() + .ip_pools_fetch_default(self.db.opctx()) .await .expect("Failed to lookup default ip pool"); pool.identity.id } - async fn success(mut self) { - self.db_datastore.terminate().await; - self.db.cleanup().await.unwrap(); + async fn success(self) { + self.db.terminate().await; self.logctx.cleanup_successful(); } } @@ -1060,9 +1056,10 @@ mod tests { let id = Uuid::new_v4(); let instance_id = InstanceUuid::new_v4(); let ip = context - .db_datastore + .db + .datastore() .allocate_instance_snat_ip( - &context.opctx, + &context.db.opctx(), id, instance_id, context.default_pool_id().await, @@ -1077,9 +1074,10 @@ mod tests { // The next allocation should fail, due to IP exhaustion let instance_id = InstanceUuid::new_v4(); let err = context - .db_datastore + .db + .datastore() .allocate_instance_snat_ip( - &context.opctx, + &context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1114,9 +1112,10 @@ mod tests { // the only address in the pool. let instance_id = context.create_instance("for-eph").await; let ephemeral_ip = context - .db_datastore + .db + .datastore() .allocate_instance_ephemeral_ip( - &context.opctx, + &context.db.opctx(), Uuid::new_v4(), instance_id, /* pool_name = */ None, @@ -1133,9 +1132,10 @@ mod tests { // nor any SNAT IPs. let instance_id = context.create_instance("for-snat").await; let res = context - .db_datastore + .db + .datastore() .allocate_instance_snat_ip( - &context.opctx, + &context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1156,9 +1156,10 @@ mod tests { ); let res = context - .db_datastore + .db + .datastore() .allocate_instance_ephemeral_ip( - &context.opctx, + &context.db.opctx(), Uuid::new_v4(), instance_id, /* pool_name = */ None, @@ -1210,9 +1211,10 @@ mod tests { for (expected_ip, expected_first_port) in external_ips.clone().take(2) { let instance_id = InstanceUuid::new_v4(); let ip = context - .db_datastore + .db + .datastore() .allocate_instance_snat_ip( - &context.opctx, + &context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1229,8 +1231,9 @@ mod tests { // Release the first context - .db_datastore - .deallocate_external_ip(&context.opctx, ips[0].id) + .db + .datastore() + .deallocate_external_ip(&context.db.opctx(), ips[0].id) .await .expect("Failed to release the first external IP address"); @@ -1238,9 +1241,10 @@ mod tests { // released. let instance_id = InstanceUuid::new_v4(); let ip = context - .db_datastore + .db + .datastore() .allocate_instance_snat_ip( - &context.opctx, + &context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1265,9 +1269,10 @@ mod tests { // from the original loop. let instance_id = InstanceUuid::new_v4(); let ip = context - .db_datastore + .db + .datastore() .allocate_instance_snat_ip( - &context.opctx, + &context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1302,9 +1307,10 @@ mod tests { let pool_name = None; let ip = context - .db_datastore + .db + .datastore() .allocate_instance_ephemeral_ip( - &context.opctx, + &context.db.opctx(), id, instance_id, pool_name, @@ -1350,9 +1356,10 @@ mod tests { // service. let service_id = OmicronZoneUuid::new_v4(); let ip = context - .db_datastore + .db + .datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + &context.db.opctx(), service_id, ZoneKind::Nexus, ip_10_0_0_3, @@ -1367,9 +1374,10 @@ mod tests { // Try allocating the same service IP again. let ip_again = context - .db_datastore + .db + .datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + &context.db.opctx(), service_id, ZoneKind::Nexus, ip_10_0_0_3, @@ -1383,9 +1391,9 @@ mod tests { // Try allocating the same service IP once more, but do it with a // different UUID. let err = context - .db_datastore + .db.datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + &context.db.opctx(), service_id, ZoneKind::Nexus, OmicronZoneExternalIp::Floating(OmicronZoneExternalFloatingIp { @@ -1403,9 +1411,9 @@ mod tests { // Try allocating the same service IP once more, but do it with a // different input address. let err = context - .db_datastore + .db.datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + &context.db.opctx(), service_id, ZoneKind::Nexus, OmicronZoneExternalIp::Floating(OmicronZoneExternalFloatingIp { @@ -1429,9 +1437,9 @@ mod tests { .unwrap(), }); let err = context - .db_datastore + .db.datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + &context.db.opctx(), service_id, ZoneKind::BoundaryNtp, ip_10_0_0_3_snat_0, @@ -1456,9 +1464,10 @@ mod tests { }); let snat_service_id = OmicronZoneUuid::new_v4(); let snat_ip = context - .db_datastore + .db + .datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + &context.db.opctx(), snat_service_id, ZoneKind::BoundaryNtp, ip_10_0_0_1_snat_32768, @@ -1477,9 +1486,10 @@ mod tests { // Try allocating the same service IP again. let snat_ip_again = context - .db_datastore + .db + .datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + &context.db.opctx(), snat_service_id, ZoneKind::BoundaryNtp, ip_10_0_0_1_snat_32768, @@ -1505,9 +1515,9 @@ mod tests { .unwrap(), }); let err = context - .db_datastore + .db.datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + &context.db.opctx(), snat_service_id, ZoneKind::BoundaryNtp, ip_10_0_0_1_snat_49152, @@ -1544,9 +1554,10 @@ mod tests { let service_id = OmicronZoneUuid::new_v4(); let err = context - .db_datastore + .db + .datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + &context.db.opctx(), service_id, ZoneKind::Nexus, ip_10_0_0_5, @@ -1578,9 +1589,10 @@ mod tests { let instance_id = InstanceUuid::new_v4(); let id = Uuid::new_v4(); let ip = context - .db_datastore + .db + .datastore() .allocate_instance_snat_ip( - &context.opctx, + &context.db.opctx(), id, instance_id, context.default_pool_id().await, @@ -1595,9 +1607,10 @@ mod tests { // Create a new IP, with the _same_ ID, and ensure we get back the same // value. let new_ip = context - .db_datastore + .db + .datastore() .allocate_instance_snat_ip( - &context.opctx, + &context.db.opctx(), id, instance_id, context.default_pool_id().await, @@ -1648,9 +1661,10 @@ mod tests { let id = Uuid::new_v4(); let ip = context - .db_datastore + .db + .datastore() .allocate_instance_ephemeral_ip( - &context.opctx, + &context.db.opctx(), id, instance_id, Some(p1), @@ -1693,9 +1707,10 @@ mod tests { let instance_id = context.create_instance(&format!("o{octet}")).await; let ip = context - .db_datastore + .db + .datastore() .allocate_instance_ephemeral_ip( - &context.opctx, + &context.db.opctx(), Uuid::new_v4(), instance_id, Some(p1.clone()), @@ -1715,9 +1730,10 @@ mod tests { // Allocating another address should _fail_, and not use the first pool. let instance_id = context.create_instance("final").await; context - .db_datastore + .db + .datastore() .allocate_instance_ephemeral_ip( - &context.opctx, + &context.db.opctx(), Uuid::new_v4(), instance_id, Some(p1), diff --git a/nexus/db-queries/src/db/queries/network_interface.rs b/nexus/db-queries/src/db/queries/network_interface.rs index 01aa8aafe2..39c799d223 100644 --- a/nexus/db-queries/src/db/queries/network_interface.rs +++ b/nexus/db-queries/src/db/queries/network_interface.rs @@ -1797,6 +1797,7 @@ mod tests { use super::NUM_INITIAL_RESERVED_IP_ADDRESSES; use crate::authz; use crate::context::OpContext; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::DataStore; use crate::db::identity::Resource; use crate::db::lookup::LookupPath; @@ -1811,7 +1812,6 @@ mod tests { use async_bb8_diesel::AsyncRunQueryDsl; use dropshot::test_util::LogContext; use model::NetworkInterfaceKind; - use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; use nexus_types::external_api::params::InstanceCreate; use nexus_types::external_api::params::InstanceNetworkInterfaceAttachment; @@ -1822,7 +1822,6 @@ mod tests { use omicron_common::api::external::InstanceCpuCount; use omicron_common::api::external::MacAddr; use omicron_test_utils::dev; - use omicron_test_utils::dev::db::CockroachInstance; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::InstanceUuid; use oxnet::Ipv4Net; @@ -1832,7 +1831,6 @@ mod tests { use std::net::IpAddr; use std::net::Ipv4Addr; use std::net::Ipv6Addr; - use std::sync::Arc; use uuid::Uuid; // Add an instance. We'll use this to verify that the instance must be @@ -1964,9 +1962,7 @@ mod tests { // Context for testing network interface queries. struct TestContext { logctx: LogContext, - opctx: OpContext, - db: CockroachInstance, - db_datastore: Arc, + db: TestDatabase, project_id: Uuid, net1: Network, net2: Network, @@ -1976,10 +1972,8 @@ mod tests { async fn new(test_name: &str, n_subnets: u8) -> Self { let logctx = dev::test_setup_log(test_name); let log = logctx.log.new(o!()); - let db = test_setup_database(&log).await; - let (opctx, db_datastore) = - crate::db::datastore::test_utils::datastore_test(&logctx, &db) - .await; + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let authz_silo = opctx.authn.silo_required().unwrap(); @@ -1994,11 +1988,11 @@ mod tests { }, ); let (.., project) = - db_datastore.project_create(&opctx, project).await.unwrap(); + datastore.project_create(&opctx, project).await.unwrap(); use crate::db::schema::vpc_subnet::dsl::vpc_subnet; let conn = - db_datastore.pool_connection_authorized(&opctx).await.unwrap(); + datastore.pool_connection_authorized(&opctx).await.unwrap(); let net1 = Network::new(n_subnets); let net2 = Network::new(n_subnets); for subnet in net1.subnets.iter().chain(net2.subnets.iter()) { @@ -2009,30 +2003,29 @@ mod tests { .unwrap(); } drop(conn); - Self { - logctx, - opctx, - db, - db_datastore, - project_id: project.id(), - net1, - net2, - } + Self { logctx, db, project_id: project.id(), net1, net2 } + } + + fn opctx(&self) -> &OpContext { + self.db.opctx() + } + + fn datastore(&self) -> &DataStore { + self.db.datastore() } - async fn success(mut self) { - self.db_datastore.terminate().await; - self.db.cleanup().await.unwrap(); + async fn success(self) { + self.db.terminate().await; self.logctx.cleanup_successful(); } async fn create_stopped_instance(&self) -> Instance { instance_set_state( - &self.db_datastore, + self.datastore(), create_instance( - &self.opctx, + self.opctx(), self.project_id, - &self.db_datastore, + self.datastore(), ) .await, InstanceState::NoVmm, @@ -2042,11 +2035,11 @@ mod tests { async fn create_running_instance(&self) -> Instance { instance_set_state( - &self.db_datastore, + self.datastore(), create_instance( - &self.opctx, + self.opctx(), self.project_id, - &self.db_datastore, + self.datastore(), ) .await, InstanceState::Vmm, @@ -2079,17 +2072,17 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore - .service_create_network_interface_raw(&context.opctx, interface) + .datastore() + .service_create_network_interface_raw(context.opctx(), interface) .await .expect("Failed to insert interface"); // We should be able to delete twice, and be told that the first delete // modified the row and the second did not. let first_deleted = context - .db_datastore + .datastore() .service_delete_network_interface( - &context.opctx, + context.opctx(), service_id, inserted_interface.id(), ) @@ -2098,9 +2091,9 @@ mod tests { assert!(first_deleted, "first delete removed interface"); let second_deleted = context - .db_datastore + .datastore() .service_delete_network_interface( - &context.opctx, + context.opctx(), service_id, inserted_interface.id(), ) @@ -2111,9 +2104,9 @@ mod tests { // Attempting to delete a nonexistent interface should fail. let bogus_id = Uuid::new_v4(); let err = context - .db_datastore + .datastore() .service_delete_network_interface( - &context.opctx, + context.opctx(), service_id, bogus_id, ) @@ -2148,8 +2141,8 @@ mod tests { Some(requested_ip), ) .unwrap(); - let err = context.db_datastore - .instance_create_network_interface_raw(&context.opctx, interface.clone()) + let err = context.datastore() + .instance_create_network_interface_raw(context.opctx(), interface.clone()) .await .expect_err("Should not be able to create an interface for a running instance"); assert!( @@ -2178,9 +2171,9 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface.clone(), ) .await @@ -2209,8 +2202,8 @@ mod tests { None, ) .unwrap(); - let err = context.db_datastore - .instance_create_network_interface_raw(&context.opctx, interface.clone()) + let err = context.datastore() + .instance_create_network_interface_raw(context.opctx(), interface.clone()) .await .expect_err("Should not be able to insert an interface for an instance that doesn't exist"); assert!( @@ -2248,9 +2241,9 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface.clone(), ) .await @@ -2292,8 +2285,8 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore - .instance_create_network_interface_raw(&context.opctx, interface) + .datastore() + .instance_create_network_interface_raw(context.opctx(), interface) .await .expect("Failed to insert interface"); @@ -2311,8 +2304,8 @@ mod tests { ) .unwrap(); let result = context - .db_datastore - .instance_create_network_interface_raw(&context.opctx, interface) + .datastore() + .instance_create_network_interface_raw(context.opctx(), interface) .await; assert!( matches!(result, Err(InsertError::IpAddressNotAvailable(_))), @@ -2348,8 +2341,8 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore - .service_create_network_interface_raw(&context.opctx, interface) + .datastore() + .service_create_network_interface_raw(context.opctx(), interface) .await .expect("Failed to insert interface"); assert_eq!(inserted_interface.mac.0, mac); @@ -2388,8 +2381,11 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore - .service_create_network_interface_raw(&context.opctx, interface) + .datastore() + .service_create_network_interface_raw( + context.opctx(), + interface, + ) .await .expect("Failed to insert interface"); assert_eq!(*inserted_interface.slot, slot); @@ -2425,8 +2421,8 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore - .service_create_network_interface_raw(&context.opctx, interface) + .datastore() + .service_create_network_interface_raw(context.opctx(), interface) .await .expect("Failed to insert interface"); assert_eq!(inserted_interface.mac.0, mac); @@ -2448,8 +2444,11 @@ mod tests { ) .unwrap(); let result = context - .db_datastore - .service_create_network_interface_raw(&context.opctx, new_interface) + .datastore() + .service_create_network_interface_raw( + context.opctx(), + new_interface, + ) .await; assert!( matches!(result, Err(InsertError::MacAddressNotAvailable(_))), @@ -2501,8 +2500,8 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore - .service_create_network_interface_raw(&context.opctx, interface) + .datastore() + .service_create_network_interface_raw(context.opctx(), interface) .await .expect("Failed to insert interface"); assert_eq!(*inserted_interface.slot, 0); @@ -2522,8 +2521,11 @@ mod tests { ) .unwrap(); let result = context - .db_datastore - .service_create_network_interface_raw(&context.opctx, new_interface) + .datastore() + .service_create_network_interface_raw( + context.opctx(), + new_interface, + ) .await; assert!( matches!(result, Err(InsertError::SlotNotAvailable(0))), @@ -2550,9 +2552,9 @@ mod tests { ) .unwrap(); let _ = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface.clone(), ) .await @@ -2569,8 +2571,8 @@ mod tests { ) .unwrap(); let result = context - .db_datastore - .instance_create_network_interface_raw(&context.opctx, interface) + .datastore() + .instance_create_network_interface_raw(context.opctx(), interface) .await; assert!( matches!( @@ -2600,8 +2602,8 @@ mod tests { ) .unwrap(); let _ = context - .db_datastore - .instance_create_network_interface_raw(&context.opctx, interface) + .datastore() + .instance_create_network_interface_raw(context.opctx(), interface) .await .expect("Failed to insert interface"); let interface = IncompleteNetworkInterface::new_instance( @@ -2616,8 +2618,8 @@ mod tests { ) .unwrap(); let result = context - .db_datastore - .instance_create_network_interface_raw(&context.opctx, interface) + .datastore() + .instance_create_network_interface_raw(context.opctx(), interface) .await; assert!( matches!(result, Err(InsertError::NonUniqueVpcSubnets)), @@ -2644,16 +2646,16 @@ mod tests { ) .unwrap(); let _ = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface.clone(), ) .await .expect("Failed to insert interface"); let result = context - .db_datastore - .instance_create_network_interface_raw(&context.opctx, interface) + .datastore() + .instance_create_network_interface_raw(context.opctx(), interface) .await; assert!( matches!( @@ -2686,8 +2688,8 @@ mod tests { ) .unwrap(); let _ = context - .db_datastore - .instance_create_network_interface_raw(&context.opctx, interface) + .datastore() + .instance_create_network_interface_raw(context.opctx(), interface) .await .expect("Failed to insert interface"); let expected_address = "172.30.0.5".parse().unwrap(); @@ -2704,9 +2706,9 @@ mod tests { ) .unwrap(); let result = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface, ) .await; @@ -2740,9 +2742,9 @@ mod tests { ) .unwrap(); let _ = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface, ) .await @@ -2751,9 +2753,9 @@ mod tests { // Next one should fail let instance = create_stopped_instance( - &context.opctx, + context.opctx(), context.project_id, - &context.db_datastore, + context.datastore(), ) .await; let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); @@ -2769,8 +2771,8 @@ mod tests { ) .unwrap(); let result = context - .db_datastore - .instance_create_network_interface_raw(&context.opctx, interface) + .datastore() + .instance_create_network_interface_raw(context.opctx(), interface) .await; assert!( matches!(result, Err(InsertError::NoAvailableIpAddresses)), @@ -2802,9 +2804,9 @@ mod tests { ) .unwrap(); let result = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface, ) .await; @@ -2870,9 +2872,9 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface.clone(), ) .await @@ -2905,9 +2907,9 @@ mod tests { ) .unwrap(); let result = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface.clone(), ) .await diff --git a/nexus/db-queries/src/db/queries/next_item.rs b/nexus/db-queries/src/db/queries/next_item.rs index 3a65546e7c..0ec0727737 100644 --- a/nexus/db-queries/src/db/queries/next_item.rs +++ b/nexus/db-queries/src/db/queries/next_item.rs @@ -924,6 +924,7 @@ mod tests { use super::NextItem; use super::ShiftIndices; use crate::db; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::explain::ExplainableAsync as _; use crate::db::queries::next_item::NextItemSelfJoined; use async_bb8_diesel::AsyncRunQueryDsl; @@ -937,9 +938,7 @@ mod tests { use diesel::Column; use diesel::Insertable; use diesel::SelectableHelper; - use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; - use std::sync::Arc; use uuid::Uuid; table! { @@ -1102,11 +1101,8 @@ mod tests { async fn test_wrapping_next_item_query() { // Setup the test database let logctx = dev::test_setup_log("test_wrapping_next_item_query"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); // We're going to operate on a separate table, for simplicity. @@ -1156,8 +1152,7 @@ mod tests { .unwrap(); assert_eq!(it.value, 2); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1166,11 +1161,8 @@ mod tests { // Setup the test database let logctx = dev::test_setup_log("test_next_item_query_is_ordered_by_indices"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); // We're going to operate on a separate table, for simplicity. @@ -1213,8 +1205,7 @@ mod tests { "The next item query should not have further items to generate", ); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1257,11 +1248,8 @@ mod tests { async fn test_explain_next_item_self_joined() { // Setup the test database let logctx = dev::test_setup_log("test_explain_next_item_self_joined"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); // We're going to operate on a separate table, for simplicity. @@ -1276,8 +1264,7 @@ mod tests { >::new_scoped(Uuid::nil(), i32::MIN, i32::MAX); let out = query.explain_async(&conn).await.unwrap(); println!("{out}"); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1285,11 +1272,8 @@ mod tests { async fn test_next_item_self_joined() { // Setup the test database let logctx = dev::test_setup_log("test_next_item_self_joined"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); // We're going to operate on a separate table, for simplicity. @@ -1313,8 +1297,7 @@ mod tests { .get_result_async(&*conn) .await .expect_err("should not be able to insert after the query range is exhausted"); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1323,11 +1306,8 @@ mod tests { // Setup the test database let logctx = dev::test_setup_log("test_next_item_self_joined_with_gaps"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); // We're going to operate on a separate table, for simplicity. @@ -1368,8 +1348,7 @@ mod tests { "Should have inserted the next skipped value" ); } - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1378,11 +1357,8 @@ mod tests { async fn print_next_item_query_forms() { // Setup the test database let logctx = dev::test_setup_log("print_next_item_query_forms"); - let log = logctx.log.new(o!()); - let db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); // We're going to operate on a separate table, for simplicity. diff --git a/nexus/db-queries/src/db/queries/oximeter.rs b/nexus/db-queries/src/db/queries/oximeter.rs index 4a4bf97235..eee7dd5669 100644 --- a/nexus/db-queries/src/db/queries/oximeter.rs +++ b/nexus/db-queries/src/db/queries/oximeter.rs @@ -221,9 +221,9 @@ pub fn reassign_producers_query(oximeter_id: Uuid) -> TypedSqlQuery<()> { #[cfg(test)] mod test { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::explain::ExplainableAsync; use crate::db::raw_query_builder::expectorate_query_contents; - use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; use std::time::Duration; use uuid::Uuid; @@ -266,10 +266,8 @@ mod test { #[tokio::test] async fn explainable_upsert_producer() { let logctx = dev::test_setup_log("explainable_upsert_producer"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = crate::db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); let producer = internal::nexus::ProducerEndpoint { @@ -285,18 +283,15 @@ mod test { .await .expect("Failed to explain query - is it valid SQL?"); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn explainable_reassign_producers() { let logctx = dev::test_setup_log("explainable_reassign_producers"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = crate::db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); let oximeter_id = Uuid::nil(); @@ -307,8 +302,7 @@ mod test { .await .expect("Failed to explain query - is it valid SQL?"); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/queries/region_allocation.rs b/nexus/db-queries/src/db/queries/region_allocation.rs index c7e23be9db..a531e726b3 100644 --- a/nexus/db-queries/src/db/queries/region_allocation.rs +++ b/nexus/db-queries/src/db/queries/region_allocation.rs @@ -405,10 +405,10 @@ UNION #[cfg(test)] mod test { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::REGION_REDUNDANCY_THRESHOLD; use crate::db::explain::ExplainableAsync; use crate::db::raw_query_builder::expectorate_query_contents; - use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; use uuid::Uuid; @@ -504,10 +504,8 @@ mod test { #[tokio::test] async fn explainable() { let logctx = dev::test_setup_log("explainable"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = crate::db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); let volume_id = Uuid::new_v4(); @@ -546,8 +544,7 @@ mod test { .await .expect("Failed to explain query - is it valid SQL?"); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/queries/virtual_provisioning_collection_update.rs b/nexus/db-queries/src/db/queries/virtual_provisioning_collection_update.rs index 2784afe9c8..8d3ef320ee 100644 --- a/nexus/db-queries/src/db/queries/virtual_provisioning_collection_update.rs +++ b/nexus/db-queries/src/db/queries/virtual_provisioning_collection_update.rs @@ -478,9 +478,9 @@ FROM #[cfg(test)] mod test { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::explain::ExplainableAsync; use crate::db::raw_query_builder::expectorate_query_contents; - use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; use uuid::Uuid; @@ -565,10 +565,8 @@ mod test { #[tokio::test] async fn explain_insert_storage() { let logctx = dev::test_setup_log("explain_insert_storage"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = crate::db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); let id = Uuid::nil(); @@ -587,18 +585,15 @@ mod test { .await .expect("Failed to explain query - is it valid SQL?"); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn explain_delete_storage() { let logctx = dev::test_setup_log("explain_delete_storage"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = crate::db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); let id = Uuid::nil(); @@ -615,18 +610,15 @@ mod test { .await .expect("Failed to explain query - is it valid SQL?"); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn explain_insert_instance() { let logctx = dev::test_setup_log("explain_insert_instance"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = crate::db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); let id = InstanceUuid::nil(); @@ -642,18 +634,15 @@ mod test { .await .expect("Failed to explain query - is it valid SQL?"); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn explain_delete_instance() { let logctx = dev::test_setup_log("explain_delete_instance"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = crate::db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); let id = InstanceUuid::nil(); @@ -669,8 +658,7 @@ mod test { .await .expect("Failed to explain query - is it valid SQL?"); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/queries/vpc_subnet.rs b/nexus/db-queries/src/db/queries/vpc_subnet.rs index ff34c50e51..17a362880f 100644 --- a/nexus/db-queries/src/db/queries/vpc_subnet.rs +++ b/nexus/db-queries/src/db/queries/vpc_subnet.rs @@ -288,14 +288,13 @@ impl InsertVpcSubnetError { mod test { use super::InsertVpcSubnetError; use super::InsertVpcSubnetQuery; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::explain::ExplainableAsync as _; use crate::db::model::VpcSubnet; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Name; use omicron_test_utils::dev; use std::convert::TryInto; - use std::sync::Arc; #[tokio::test] async fn explain_insert_query() { @@ -310,16 +309,11 @@ mod test { VpcSubnet::new(subnet_id, vpc_id, identity, ipv4_block, ipv6_block); let query = InsertVpcSubnetQuery::new(row); let logctx = dev::test_setup_log("explain_insert_query"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); - let conn = pool.claim().await.unwrap(); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let conn = db.pool().claim().await.unwrap(); let explain = query.explain_async(&conn).await.unwrap(); println!("{explain}"); - pool.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -351,16 +345,8 @@ mod test { // Setup the test database let logctx = dev::test_setup_log("test_insert_vpc_subnet_query"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); - let db_datastore = Arc::new( - crate::db::DataStore::new(&log, Arc::clone(&pool), None) - .await - .unwrap(), - ); + let db = TestDatabase::new_with_raw_datastore(&logctx.log).await; + let db_datastore = db.datastore(); // We should be able to insert anything into an empty table. assert!( @@ -471,8 +457,7 @@ mod test { "Should be able to insert new VPC Subnet with non-overlapping IP ranges" ); - db_datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -545,16 +530,8 @@ mod test { // Setup the test database let logctx = dev::test_setup_log("test_insert_vpc_subnet_query_is_idempotent"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); - let db_datastore = Arc::new( - crate::db::DataStore::new(&log, Arc::clone(&pool), None) - .await - .unwrap(), - ); + let db = TestDatabase::new_with_raw_datastore(&logctx.log).await; + let db_datastore = db.datastore(); // We should be able to insert anything into an empty table. let inserted = db_datastore @@ -573,8 +550,7 @@ mod test { "Must be able to insert the exact same VPC subnet more than once", ); assert_rows_eq(&inserted, &row); - db_datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/policy_test/mod.rs b/nexus/db-queries/src/policy_test/mod.rs index a14ffb943b..be81f58138 100644 --- a/nexus/db-queries/src/policy_test/mod.rs +++ b/nexus/db-queries/src/policy_test/mod.rs @@ -14,7 +14,7 @@ mod coverage; mod resource_builder; mod resources; -use crate::db; +use crate::db::datastore::pub_test_utils::TestDatabase; use coverage::Coverage; use futures::StreamExt; use nexus_auth::authn; @@ -22,7 +22,6 @@ use nexus_auth::authn::SiloAuthnPolicy; use nexus_auth::authn::USER_TEST_PRIVILEGED; use nexus_auth::authz; use nexus_auth::context::OpContext; -use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::shared; use nexus_types::external_api::shared::FleetRole; use nexus_types::external_api::shared::SiloRole; @@ -62,9 +61,8 @@ use uuid::Uuid; #[tokio::test(flavor = "multi_thread")] async fn test_iam_roles_behavior() { let logctx = dev::test_setup_log("test_iam_roles"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = - db::datastore::test_utils::datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Before we can create the resources, users, and role assignments that we // need, we must grant the "test-privileged" user privileges to fetch and @@ -173,8 +171,7 @@ async fn test_iam_roles_behavior() { &std::str::from_utf8(buffer.as_ref()).expect("non-UTF8 output"), ); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -330,9 +327,8 @@ impl Write for StdoutTee { async fn test_conferred_roles() { // To start, this test looks a lot like the test above. let logctx = dev::test_setup_log("test_conferred_roles"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = - db::datastore::test_utils::datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Before we can create the resources, users, and role assignments that we // need, we must grant the "test-privileged" user privileges to fetch and @@ -465,7 +461,6 @@ async fn test_conferred_roles() { &std::str::from_utf8(buffer.as_ref()).expect("non-UTF8 output"), ); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } diff --git a/nexus/db-queries/src/transaction_retry.rs b/nexus/db-queries/src/transaction_retry.rs index f8c59ee15b..9975d1cf3b 100644 --- a/nexus/db-queries/src/transaction_retry.rs +++ b/nexus/db-queries/src/transaction_retry.rs @@ -258,8 +258,7 @@ impl OptionalError { mod test { use super::*; - use crate::db::datastore::test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use omicron_test_utils::dev; use oximeter::types::FieldValue; @@ -271,8 +270,8 @@ mod test { let logctx = dev::test_setup_log( "test_transaction_rollback_produces_no_samples", ); - let mut db = test_setup_database(&logctx.log).await; - let (_opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let conn = datastore.pool_connection_for_tests().await.unwrap(); @@ -294,8 +293,7 @@ mod test { .clone(); assert_eq!(samples, vec![]); - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -305,8 +303,8 @@ mod test { async fn test_transaction_retry_produces_samples() { let logctx = dev::test_setup_log("test_transaction_retry_produces_samples"); - let mut db = test_setup_database(&logctx.log).await; - let (_opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let conn = datastore.pool_connection_for_tests().await.unwrap(); datastore @@ -356,8 +354,7 @@ mod test { ); } - datastore.terminate().await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/metrics-producer-gc/src/lib.rs b/nexus/metrics-producer-gc/src/lib.rs index 5785af834f..32e2be5809 100644 --- a/nexus/metrics-producer-gc/src/lib.rs +++ b/nexus/metrics-producer-gc/src/lib.rs @@ -218,7 +218,7 @@ mod tests { let logctx = dev::test_setup_log("test_prune_expired_producers"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + datastore_test(&logctx.log, &db, Uuid::new_v4()).await; // Insert an Oximeter collector let collector_info = OximeterInfo::new(¶ms::OximeterInfo { @@ -304,7 +304,7 @@ mod tests { ); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + datastore_test(&logctx.log, &db, Uuid::new_v4()).await; let mut collector = httptest::Server::run(); diff --git a/nexus/src/app/background/tasks/crdb_node_id_collector.rs b/nexus/src/app/background/tasks/crdb_node_id_collector.rs index 0867aa9e17..ddb068226f 100644 --- a/nexus/src/app/background/tasks/crdb_node_id_collector.rs +++ b/nexus/src/app/background/tasks/crdb_node_id_collector.rs @@ -349,7 +349,7 @@ mod tests { let logctx = dev::test_setup_log("test_activate_fails_if_no_blueprint"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + datastore_test(&logctx.log, &db, Uuid::new_v4()).await; let (_tx_blueprint, rx_blueprint) = watch::channel(None); let mut collector = @@ -381,7 +381,7 @@ mod tests { dev::test_setup_log("test_activate_with_no_unknown_node_ids"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + datastore_test(&logctx.log, &db, Uuid::new_v4()).await; let blueprint = BlueprintBuilder::build_empty_with_sleds( iter::once(SledUuid::new_v4()), @@ -446,7 +446,7 @@ mod tests { let logctx = dev::test_setup_log("test_activate_with_unknown_node_ids"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + datastore_test(&logctx.log, &db, Uuid::new_v4()).await; let blueprint = BlueprintBuilder::build_empty_with_sleds( iter::once(SledUuid::new_v4()), From 168fdfa31fc48c3221a1d9f9473c6cd689da5ce1 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 16 Oct 2024 18:03:46 -0700 Subject: [PATCH 10/16] merge errors --- nexus/src/app/mod.rs | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 2300dbce6a..d6d0f22f94 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -673,21 +673,35 @@ impl Nexus { let external_server = self.external_server.lock().unwrap().take(); let mut res = Ok(()); + let extend_err = + |mut res: &mut Result<(), String>, mut new: Result<(), String>| { + match (&mut res, &mut new) { + (Err(s), Err(new_err)) => { + s.push_str(&format!(", {new_err}")) + } + (Ok(()), Err(_)) => *res = new, + (_, Ok(())) => (), + } + }; + if let Some(server) = external_server { - res = res.and(server.close().await); + extend_err(&mut res, server.close().await); } let techport_external_server = self.techport_external_server.lock().unwrap().take(); if let Some(server) = techport_external_server { - res = res.and(server.close().await); + extend_err(&mut res, server.close().await); } let internal_server = self.internal_server.lock().unwrap().take(); if let Some(server) = internal_server { - res = res.and(server.close().await); + extend_err(&mut res, server.close().await); } let producer_server = self.producer_server.lock().unwrap().take(); if let Some(server) = producer_server { - res = res.and(server.close().await.map_err(|e| e.to_string())); + extend_err( + &mut res, + server.close().await.map_err(|e| e.to_string()), + ); } res } From 144d5ea216b556f50e9a75474c43537083aa1e0f Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 17 Oct 2024 10:17:53 -0700 Subject: [PATCH 11/16] Stop CRDB in one of the volume tests --- nexus/db-queries/src/db/datastore/volume.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/db-queries/src/db/datastore/volume.rs b/nexus/db-queries/src/db/datastore/volume.rs index 85c1401b82..93ba737eb5 100644 --- a/nexus/db-queries/src/db/datastore/volume.rs +++ b/nexus/db-queries/src/db/datastore/volume.rs @@ -2885,7 +2885,7 @@ mod tests { ); assert_eq!(region_snapshot.deleting, false); - datastore.terminate().await; + db.terminate().await; logctx.cleanup_successful(); } From 95c68255ff52b8fc36544fe915226f9e7c07203b Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 17 Oct 2024 15:06:17 -0700 Subject: [PATCH 12/16] review feedback --- live-tests/tests/common/mod.rs | 5 +- .../src/db/datastore/external_ip.rs | 28 +++++------ .../src/db/datastore/pub_test_utils.rs | 9 ++-- .../db-queries/src/db/queries/external_ip.rs | 48 +++++++++---------- nexus/src/app/mod.rs | 2 +- 5 files changed, 47 insertions(+), 45 deletions(-) diff --git a/live-tests/tests/common/mod.rs b/live-tests/tests/common/mod.rs index 32c0bfb982..3e3e583870 100644 --- a/live-tests/tests/common/mod.rs +++ b/live-tests/tests/common/mod.rs @@ -46,9 +46,8 @@ impl LiveTestContext { /// Clean up this `LiveTestContext` /// - /// This mainly removes log files created by the test. We do this in this - /// explicit cleanup function rather than on `Drop` because we want the log - /// files preserved on test failure. + /// This removes log files and cleans up the [`DataStore`], which + /// but be terminated asynchronously. pub async fn cleanup_successful(self) { self.datastore.terminate().await; self.logctx.cleanup_successful(); diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index ef6716a43b..a03b6a6249 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -87,7 +87,7 @@ impl DataStore { probe_id: Uuid, pool: Option, ) -> CreateResult { - let authz_pool = self.resolve_pool_for_allocation(&opctx, pool).await?; + let authz_pool = self.resolve_pool_for_allocation(opctx, pool).await?; let data = IncompleteExternalIp::for_ephemeral_probe( ip_id, probe_id, @@ -123,7 +123,7 @@ impl DataStore { // Naturally, we now *need* to destroy the ephemeral IP if the newly alloc'd // IP was not attached, including on idempotent success. - let authz_pool = self.resolve_pool_for_allocation(&opctx, pool).await?; + let authz_pool = self.resolve_pool_for_allocation(opctx, pool).await?; let data = IncompleteExternalIp::for_ephemeral(ip_id, authz_pool.id()); // We might not be able to acquire a new IP, but in the event of an @@ -205,7 +205,7 @@ impl DataStore { // If no pool specified, use the default logic None => { let (authz_pool, ..) = - self.ip_pools_fetch_default(&opctx).await?; + self.ip_pools_fetch_default(opctx).await?; authz_pool } }; @@ -224,7 +224,7 @@ impl DataStore { ) -> CreateResult { let ip_id = Uuid::new_v4(); - let authz_pool = self.resolve_pool_for_allocation(&opctx, pool).await?; + let authz_pool = self.resolve_pool_for_allocation(opctx, pool).await?; let data = if let Some(ip) = ip { IncompleteExternalIp::for_floating_explicit( @@ -695,7 +695,7 @@ impl DataStore { ip_id: Uuid, instance_id: InstanceUuid, ) -> Result, Error> { - let _ = LookupPath::new(&opctx, self) + let _ = LookupPath::new(opctx, self) .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Modify) .await?; @@ -951,7 +951,7 @@ impl DataStore { instance_id: InstanceUuid, creating_instance: bool, ) -> UpdateResult<(ExternalIp, bool)> { - let (.., authz_instance) = LookupPath::new(&opctx, self) + let (.., authz_instance) = LookupPath::new(opctx, self) .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Modify) .await?; @@ -993,7 +993,7 @@ impl DataStore { instance_id: InstanceUuid, creating_instance: bool, ) -> UpdateResult<(ExternalIp, bool)> { - let (.., authz_instance) = LookupPath::new(&opctx, self) + let (.., authz_instance) = LookupPath::new(opctx, self) .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Modify) .await?; @@ -1167,7 +1167,7 @@ mod tests { let (opctx, datastore) = (db.opctx(), db.datastore()); // No IPs, to start - let ips = read_all_service_ips(&datastore, &opctx).await; + let ips = read_all_service_ips(&datastore, opctx).await; assert_eq!(ips, vec![]); // Set up service IP pool range @@ -1177,11 +1177,11 @@ mod tests { )) .unwrap(); let (service_ip_pool, _) = datastore - .ip_pools_service_lookup(&opctx) + .ip_pools_service_lookup(opctx) .await .expect("lookup service ip pool"); datastore - .ip_pool_add_range(&opctx, &service_ip_pool, &ip_range) + .ip_pool_add_range(opctx, &service_ip_pool, &ip_range) .await .expect("add range to service ip pool"); @@ -1207,7 +1207,7 @@ mod tests { }; let external_ip = datastore .external_ip_allocate_omicron_zone( - &opctx, + opctx, OmicronZoneUuid::new_v4(), ZoneKind::Nexus, external_ip, @@ -1220,7 +1220,7 @@ mod tests { external_ips.sort_by_key(|ip| ip.id); // Ensure we see them all. - let ips = read_all_service_ips(&datastore, &opctx).await; + let ips = read_all_service_ips(&datastore, opctx).await; assert_eq!(ips, external_ips); // Deallocate a few, and ensure we don't see them anymore. @@ -1229,7 +1229,7 @@ mod tests { if i % 3 == 0 { let id = external_ip.id; datastore - .deallocate_external_ip(&opctx, id) + .deallocate_external_ip(opctx, id) .await .expect("failed to deallocate IP"); removed_ip_ids.insert(id); @@ -1242,7 +1242,7 @@ mod tests { external_ips.retain(|ip| !removed_ip_ids.contains(&ip.id)); // Ensure we see them all remaining IPs. - let ips = read_all_service_ips(&datastore, &opctx).await; + let ips = read_all_service_ips(&datastore, opctx).await; assert_eq!(ips, external_ips); db.terminate().await; diff --git a/nexus/db-queries/src/db/datastore/pub_test_utils.rs b/nexus/db-queries/src/db/datastore/pub_test_utils.rs index 233113ea83..1572861f2e 100644 --- a/nexus/db-queries/src/db/datastore/pub_test_utils.rs +++ b/nexus/db-queries/src/db/datastore/pub_test_utils.rs @@ -38,7 +38,8 @@ mod test { impl TestDatabase { /// Creates a new database for test usage, with a pool. /// - /// [Self::terminate] should be called before the test finishes. + /// [`Self::terminate`] should be called before the test finishes, + /// or dropping the [`TestDatabase`] will panic. pub async fn new_with_pool(log: &Logger) -> Self { let db = test_setup_database(log).await; let cfg = db::Config { url: db.pg_config().clone() }; @@ -48,7 +49,8 @@ mod test { /// Creates a new database for test usage, with a pre-loaded datastore. /// - /// [Self::terminate] should be called before the test finishes. + /// [`Self::terminate`] should be called before the test finishes, + /// or dropping the [`TestDatabase`] will panic. pub async fn new_with_datastore(log: &Logger) -> Self { let db = test_setup_database(log).await; let (opctx, datastore) = @@ -60,7 +62,8 @@ mod test { /// Creates a new database for test usage, with a raw datastore. /// - /// [Self::terminate] should be called before the test finishes. + /// [`Self::terminate`] should be called before the test finishes, + /// or dropping the [`TestDatabase`] will panic. pub async fn new_with_raw_datastore(log: &Logger) -> Self { let db = test_setup_database(log).await; let cfg = db::Config { url: db.pg_config().clone() }; diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 2f17a5e2b6..d1028fbdb6 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -1059,7 +1059,7 @@ mod tests { .db .datastore() .allocate_instance_snat_ip( - &context.db.opctx(), + context.db.opctx(), id, instance_id, context.default_pool_id().await, @@ -1077,7 +1077,7 @@ mod tests { .db .datastore() .allocate_instance_snat_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1115,7 +1115,7 @@ mod tests { .db .datastore() .allocate_instance_ephemeral_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, /* pool_name = */ None, @@ -1135,7 +1135,7 @@ mod tests { .db .datastore() .allocate_instance_snat_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1159,7 +1159,7 @@ mod tests { .db .datastore() .allocate_instance_ephemeral_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, /* pool_name = */ None, @@ -1214,7 +1214,7 @@ mod tests { .db .datastore() .allocate_instance_snat_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1233,7 +1233,7 @@ mod tests { context .db .datastore() - .deallocate_external_ip(&context.db.opctx(), ips[0].id) + .deallocate_external_ip(context.db.opctx(), ips[0].id) .await .expect("Failed to release the first external IP address"); @@ -1244,7 +1244,7 @@ mod tests { .db .datastore() .allocate_instance_snat_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1272,7 +1272,7 @@ mod tests { .db .datastore() .allocate_instance_snat_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1310,7 +1310,7 @@ mod tests { .db .datastore() .allocate_instance_ephemeral_ip( - &context.db.opctx(), + context.db.opctx(), id, instance_id, pool_name, @@ -1359,7 +1359,7 @@ mod tests { .db .datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), service_id, ZoneKind::Nexus, ip_10_0_0_3, @@ -1377,7 +1377,7 @@ mod tests { .db .datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), service_id, ZoneKind::Nexus, ip_10_0_0_3, @@ -1393,7 +1393,7 @@ mod tests { let err = context .db.datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), service_id, ZoneKind::Nexus, OmicronZoneExternalIp::Floating(OmicronZoneExternalFloatingIp { @@ -1413,7 +1413,7 @@ mod tests { let err = context .db.datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), service_id, ZoneKind::Nexus, OmicronZoneExternalIp::Floating(OmicronZoneExternalFloatingIp { @@ -1439,7 +1439,7 @@ mod tests { let err = context .db.datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), service_id, ZoneKind::BoundaryNtp, ip_10_0_0_3_snat_0, @@ -1467,7 +1467,7 @@ mod tests { .db .datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), snat_service_id, ZoneKind::BoundaryNtp, ip_10_0_0_1_snat_32768, @@ -1489,7 +1489,7 @@ mod tests { .db .datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), snat_service_id, ZoneKind::BoundaryNtp, ip_10_0_0_1_snat_32768, @@ -1517,7 +1517,7 @@ mod tests { let err = context .db.datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), snat_service_id, ZoneKind::BoundaryNtp, ip_10_0_0_1_snat_49152, @@ -1557,7 +1557,7 @@ mod tests { .db .datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), service_id, ZoneKind::Nexus, ip_10_0_0_5, @@ -1592,7 +1592,7 @@ mod tests { .db .datastore() .allocate_instance_snat_ip( - &context.db.opctx(), + context.db.opctx(), id, instance_id, context.default_pool_id().await, @@ -1610,7 +1610,7 @@ mod tests { .db .datastore() .allocate_instance_snat_ip( - &context.db.opctx(), + context.db.opctx(), id, instance_id, context.default_pool_id().await, @@ -1664,7 +1664,7 @@ mod tests { .db .datastore() .allocate_instance_ephemeral_ip( - &context.db.opctx(), + context.db.opctx(), id, instance_id, Some(p1), @@ -1710,7 +1710,7 @@ mod tests { .db .datastore() .allocate_instance_ephemeral_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, Some(p1.clone()), @@ -1733,7 +1733,7 @@ mod tests { .db .datastore() .allocate_instance_ephemeral_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, Some(p1), diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index d6d0f22f94..0691ecf863 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -709,7 +709,7 @@ impl Nexus { /// Awaits termination without triggering it. /// /// To trigger termination, see: - /// - [Self::close_servers] or [Self::terminate] + /// - [`Self::close_servers`] or [`Self::terminate`] pub(crate) async fn wait_for_shutdown(&self) -> Result<(), String> { // The internal server is the last server to be closed. // From 6d2673b157081a837f2c7d31edbcc3dab106ea17 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 17 Oct 2024 17:27:52 -0700 Subject: [PATCH 13/16] update qorb revision --- Cargo.lock | 36 +++++++++++++++++++++++++++++++++--- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 4 ++-- 3 files changed, 36 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2e20ecc218..c840b90506 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8718,9 +8718,9 @@ dependencies = [ [[package]] name = "qorb" -version = "0.1.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15601a1eaea70cb18e7a4f06c625b516d348a1b2d3dc1edbb93f0857636bcafe" +checksum = "d25f71eb7c5ba56a99f0721fd771b2503aa6de4ec73f0891f9b7ac115ca34723" dependencies = [ "anyhow", "async-trait", @@ -8736,7 +8736,7 @@ dependencies = [ "thiserror", "tokio", "tokio-stream", - "tokio-tungstenite 0.23.1", + "tokio-tungstenite 0.24.0", "tracing", ] @@ -11556,6 +11556,18 @@ dependencies = [ "tungstenite 0.23.0", ] +[[package]] +name = "tokio-tungstenite" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edc5f74e248dc973e0dbb7b74c7e0d6fcc301c694ff50049504004ef4d0cdcd9" +dependencies = [ + "futures-util", + "log", + "tokio", + "tungstenite 0.24.0", +] + [[package]] name = "tokio-util" version = "0.7.12" @@ -11873,6 +11885,24 @@ dependencies = [ "utf-8", ] +[[package]] +name = "tungstenite" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18e5b8366ee7a95b16d32197d0b2604b43a0be89dc5fac9f8e96ccafbaedda8a" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http 1.1.0", + "httparse", + "log", + "rand", + "sha1", + "thiserror", + "utf-8", +] + [[package]] name = "twox-hash" version = "1.6.3" diff --git a/Cargo.toml b/Cargo.toml index 882d006337..7a1cd418df 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -520,7 +520,7 @@ propolis_api_types = { git = "https://github.com/oxidecomputer/propolis", rev = propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "11371b0f3743f8df5b047dc0edc2699f4bdf3927" } propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "11371b0f3743f8df5b047dc0edc2699f4bdf3927" } proptest = "1.5.0" -qorb = "0.1.0" +qorb = "0.1.2" quote = "1.0" rand = "0.8.5" rand_core = "0.6.4" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 2e00e1ea49..6d0cb9ec90 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -90,7 +90,7 @@ pkcs8 = { version = "0.10.2", default-features = false, features = ["encryption" postgres-types = { version = "0.2.8", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.2" } proc-macro2 = { version = "1.0.87" } -qorb = { version = "0.1.0", features = ["qtop"] } +qorb = { version = "0.1.2", features = ["qtop"] } quote = { version = "1.0.37" } rand = { version = "0.8.5", features = ["small_rng"] } regex = { version = "1.11.0" } @@ -206,7 +206,7 @@ pkcs8 = { version = "0.10.2", default-features = false, features = ["encryption" postgres-types = { version = "0.2.8", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.2" } proc-macro2 = { version = "1.0.87" } -qorb = { version = "0.1.0", features = ["qtop"] } +qorb = { version = "0.1.2", features = ["qtop"] } quote = { version = "1.0.37" } rand = { version = "0.8.5", features = ["small_rng"] } regex = { version = "1.11.0" } From 0d59b623a97fc87c383f5fe4e745bddda019ef3f Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 17 Oct 2024 17:42:23 -0700 Subject: [PATCH 14/16] state -> kind --- .../src/db/datastore/pub_test_utils.rs | 39 +++++++++---------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/pub_test_utils.rs b/nexus/db-queries/src/db/datastore/pub_test_utils.rs index 1572861f2e..1e3343a165 100644 --- a/nexus/db-queries/src/db/datastore/pub_test_utils.rs +++ b/nexus/db-queries/src/db/datastore/pub_test_utils.rs @@ -22,7 +22,7 @@ mod test { use super::*; use nexus_test_utils::db::test_setup_database; - enum TestState { + enum TestKind { Pool { pool: Arc }, RawDatastore { datastore: Arc }, Datastore { opctx: OpContext, datastore: Arc }, @@ -32,7 +32,7 @@ mod test { pub struct TestDatabase { db: CockroachInstance, - state: TestState, + kind: TestKind, } impl TestDatabase { @@ -44,7 +44,7 @@ mod test { let db = test_setup_database(log).await; let cfg = db::Config { url: db.pg_config().clone() }; let pool = Arc::new(db::Pool::new_single_host(log, &cfg)); - Self { db, state: TestState::Pool { pool } } + Self { db, kind: TestKind::Pool { pool } } } /// Creates a new database for test usage, with a pre-loaded datastore. @@ -57,7 +57,7 @@ mod test { crate::db::datastore::test_utils::datastore_test(log, &db) .await; - Self { db, state: TestState::Datastore { opctx, datastore } } + Self { db, kind: TestKind::Datastore { opctx, datastore } } } /// Creates a new database for test usage, with a raw datastore. @@ -70,46 +70,45 @@ mod test { let pool = Arc::new(db::Pool::new_single_host(log, &cfg)); let datastore = Arc::new(DataStore::new(&log, pool, None).await.unwrap()); - Self { db, state: TestState::RawDatastore { datastore } } + Self { db, kind: TestKind::RawDatastore { datastore } } } pub fn pool(&self) -> &Arc { - match &self.state { - TestState::Pool { pool } => pool, - TestState::RawDatastore { .. } - | TestState::Datastore { .. } => { + match &self.kind { + TestKind::Pool { pool } => pool, + TestKind::RawDatastore { .. } | TestKind::Datastore { .. } => { panic!("Wrong test type; try using `TestDatabase::new_with_pool`"); } } } pub fn opctx(&self) -> &OpContext { - match &self.state { - TestState::Pool { .. } | TestState::RawDatastore { .. } => { + match &self.kind { + TestKind::Pool { .. } | TestKind::RawDatastore { .. } => { panic!("Wrong test type; try using `TestDatabase::new_with_datastore`"); } - TestState::Datastore { opctx, .. } => opctx, + TestKind::Datastore { opctx, .. } => opctx, } } pub fn datastore(&self) -> &Arc { - match &self.state { - TestState::Pool { .. } => { + match &self.kind { + TestKind::Pool { .. } => { panic!("Wrong test type; try using `TestDatabase::new_with_datastore`"); } - TestState::RawDatastore { datastore } => datastore, - TestState::Datastore { datastore, .. } => datastore, + TestKind::RawDatastore { datastore } => datastore, + TestKind::Datastore { datastore, .. } => datastore, } } /// Shuts down both the database and the pool pub async fn terminate(mut self) { - match self.state { - TestState::Pool { pool } => pool.terminate().await, - TestState::RawDatastore { datastore } => { + match self.kind { + TestKind::Pool { pool } => pool.terminate().await, + TestKind::RawDatastore { datastore } => { datastore.terminate().await } - TestState::Datastore { datastore, .. } => { + TestKind::Datastore { datastore, .. } => { datastore.terminate().await } } From 350d70e378240dce69cf6027845d04b3549b1803 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 18 Oct 2024 12:10:59 -0700 Subject: [PATCH 15/16] Update datastore_test api (mergin') --- nexus/db-queries/src/db/datastore/clickhouse_policy.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/db-queries/src/db/datastore/clickhouse_policy.rs b/nexus/db-queries/src/db/datastore/clickhouse_policy.rs index d433bb9b60..fdbc1853c2 100644 --- a/nexus/db-queries/src/db/datastore/clickhouse_policy.rs +++ b/nexus/db-queries/src/db/datastore/clickhouse_policy.rs @@ -186,7 +186,7 @@ mod tests { // Setup let logctx = dev::test_setup_log("test_clickhouse_policy_basic"); let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let (opctx, datastore) = datastore_test(&logctx.log, &db).await; // Listing an empty table should return an empty vec From 74d0fdba409309e31374fb0d356dc78a5a248c13 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 18 Oct 2024 13:34:45 -0700 Subject: [PATCH 16/16] Convert a new test --- .../src/db/datastore/clickhouse_policy.rs | 27 +++++++++---------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/clickhouse_policy.rs b/nexus/db-queries/src/db/datastore/clickhouse_policy.rs index fdbc1853c2..cdd0e4127b 100644 --- a/nexus/db-queries/src/db/datastore/clickhouse_policy.rs +++ b/nexus/db-queries/src/db/datastore/clickhouse_policy.rs @@ -175,9 +175,8 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use nexus_inventory::now_db_precision; - use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::ClickhouseMode; use omicron_test_utils::dev; @@ -185,13 +184,13 @@ mod tests { async fn test_clickhouse_policy_basic() { // Setup let logctx = dev::test_setup_log("test_clickhouse_policy_basic"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx.log, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Listing an empty table should return an empty vec assert!(datastore - .clickhouse_policy_list(&opctx, &DataPageParams::max_page()) + .clickhouse_policy_list(opctx, &DataPageParams::max_page()) .await .unwrap() .is_empty()); @@ -204,7 +203,7 @@ mod tests { }; assert!(datastore - .clickhouse_policy_insert_latest_version(&opctx, &policy) + .clickhouse_policy_insert_latest_version(opctx, &policy) .await .unwrap_err() .to_string() @@ -213,7 +212,7 @@ mod tests { // Inserting version 2 before version 1 should not work policy.version = 2; assert!(datastore - .clickhouse_policy_insert_latest_version(&opctx, &policy) + .clickhouse_policy_insert_latest_version(opctx, &policy) .await .unwrap_err() .to_string() @@ -222,21 +221,21 @@ mod tests { // Inserting version 1 should work policy.version = 1; assert!(datastore - .clickhouse_policy_insert_latest_version(&opctx, &policy) + .clickhouse_policy_insert_latest_version(opctx, &policy) .await .is_ok()); // Inserting version 2 should work policy.version = 2; assert!(datastore - .clickhouse_policy_insert_latest_version(&opctx, &policy) + .clickhouse_policy_insert_latest_version(opctx, &policy) .await .is_ok()); // Inserting version 4 should not work, since the prior version is 2 policy.version = 4; assert!(datastore - .clickhouse_policy_insert_latest_version(&opctx, &policy) + .clickhouse_policy_insert_latest_version(opctx, &policy) .await .unwrap_err() .to_string() @@ -245,7 +244,7 @@ mod tests { // Inserting version 3 should work policy.version = 3; assert!(datastore - .clickhouse_policy_insert_latest_version(&opctx, &policy) + .clickhouse_policy_insert_latest_version(opctx, &policy) .await .is_ok()); @@ -254,12 +253,12 @@ mod tests { policy.mode = ClickhouseMode::Both { target_servers: 3, target_keepers: 5 }; assert!(datastore - .clickhouse_policy_insert_latest_version(&opctx, &policy) + .clickhouse_policy_insert_latest_version(opctx, &policy) .await .is_ok()); let history = datastore - .clickhouse_policy_list(&opctx, &DataPageParams::max_page()) + .clickhouse_policy_list(opctx, &DataPageParams::max_page()) .await .unwrap(); @@ -278,7 +277,7 @@ mod tests { } // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } }